use super::functions::*;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant, SystemTime};
#[allow(dead_code)]
#[derive(Debug, Default)]
pub struct WatchConfigBuilder {
debounce_ms: Option<u64>,
recursive: Option<bool>,
extensions: Vec<String>,
ignore_dirs: Vec<String>,
action: Option<WatchAction>,
}
#[allow(dead_code)]
impl WatchConfigBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn debounce_ms(mut self, ms: u64) -> Self {
self.debounce_ms = Some(ms);
self
}
pub fn recursive(mut self, r: bool) -> Self {
self.recursive = Some(r);
self
}
pub fn extension(mut self, ext: impl Into<String>) -> Self {
self.extensions.push(ext.into());
self
}
pub fn ignore_dir(mut self, dir: impl Into<String>) -> Self {
self.ignore_dirs.push(dir.into());
self
}
pub fn action(mut self, a: WatchAction) -> Self {
self.action = Some(a);
self
}
pub fn build(self) -> WatchConfig {
let mut base = WatchConfig::default();
if let Some(d) = self.debounce_ms {
base.debounce_ms = d;
}
if let Some(r) = self.recursive {
base.recursive = r;
}
if !self.extensions.is_empty() {
base.extensions = self.extensions;
}
if !self.ignore_dirs.is_empty() {
base.ignore_dirs = self.ignore_dirs;
}
if let Some(a) = self.action {
base.action = a;
}
base
}
}
#[allow(dead_code)]
#[derive(Clone, Debug, Default)]
pub struct WatcherStatistics {
pub total_polls: u64,
pub total_events: u64,
pub created_events: u64,
pub modified_events: u64,
pub deleted_events: u64,
pub files_watched: usize,
pub directories_watched: usize,
pub errors: u64,
}
impl WatcherStatistics {
#[allow(dead_code)]
pub fn new() -> Self {
Self::default()
}
#[allow(dead_code)]
pub fn record_poll(&mut self, events: &[WatchEventKind]) {
self.total_polls += 1;
for e in events {
self.total_events += 1;
match e {
WatchEventKind::Created => self.created_events += 1,
WatchEventKind::Modified => self.modified_events += 1,
WatchEventKind::Deleted => self.deleted_events += 1,
WatchEventKind::Renamed => {
self.deleted_events += 1;
self.created_events += 1;
self.total_events += 1;
}
}
}
}
#[allow(dead_code)]
pub fn events_per_poll(&self) -> f64 {
if self.total_polls == 0 {
0.0
} else {
self.total_events as f64 / self.total_polls as f64
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum WatchEventKind {
Created,
Modified,
Deleted,
Renamed,
}
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum HotReloadState {
Idle,
Pending,
Reloading,
Done,
Failed,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum WatchAction {
Recheck,
Rebuild,
Notify,
Custom,
}
#[derive(Clone, Debug)]
pub struct ChangeTracker {
history: Vec<ChangeRecord>,
}
impl ChangeTracker {
pub fn new() -> Self {
Self {
history: Vec::new(),
}
}
pub fn record_change(&mut self, record: ChangeRecord) {
self.history.push(record);
}
pub fn all_changes(&self) -> &[ChangeRecord] {
&self.history
}
pub fn recent_changes(&self, n: usize) -> &[ChangeRecord] {
let start = self.history.len().saturating_sub(n);
&self.history[start..]
}
pub fn changes_since(&self, since: Instant) -> Vec<&ChangeRecord> {
self.history
.iter()
.filter(|r| r.timestamp >= since)
.collect()
}
pub fn len(&self) -> usize {
self.history.len()
}
pub fn is_empty(&self) -> bool {
self.history.is_empty()
}
pub fn clear(&mut self) {
self.history.clear();
}
}
#[derive(Clone, Debug)]
pub struct FileMonitor {
watched_paths: Vec<PathBuf>,
poll_interval_ms: u64,
last_seen: HashMap<String, FileState>,
}
impl FileMonitor {
pub fn new(poll_interval_ms: u64) -> Self {
Self {
watched_paths: Vec::new(),
poll_interval_ms,
last_seen: HashMap::new(),
}
}
pub fn add_watch(&mut self, path: impl Into<PathBuf>) {
let p = path.into();
if !self.watched_paths.contains(&p) {
self.watched_paths.push(p);
}
}
pub fn remove_watch(&mut self, path: &Path) {
self.watched_paths.retain(|p| p != path);
}
pub fn clear_watches(&mut self) {
self.watched_paths.clear();
self.last_seen.clear();
}
pub fn watch_count(&self) -> usize {
self.watched_paths.len()
}
pub fn poll_interval(&self) -> Duration {
Duration::from_millis(self.poll_interval_ms)
}
pub fn poll_changes(&mut self) -> Vec<WatchEvent> {
let current = self.scan_all();
let events = self.detect_changes(¤t);
self.last_seen = current
.into_iter()
.map(|fs| (fs.path.to_string_lossy().to_string(), fs))
.collect();
events
}
pub fn scan_directory(dir: &Path) -> Vec<FileState> {
let mut results = Vec::new();
Self::scan_directory_inner(dir, &mut results);
results
}
fn scan_directory_inner(dir: &Path, results: &mut Vec<FileState>) {
let entries = match std::fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return,
};
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() {
Self::scan_directory_inner(&path, results);
} else if let Ok(meta) = entry.metadata() {
results.push(FileState::from_metadata(path, &meta));
}
}
}
fn scan_all(&self) -> Vec<FileState> {
let mut all = Vec::new();
for p in &self.watched_paths {
if p.is_dir() {
all.extend(Self::scan_directory(p));
} else if let Ok(meta) = std::fs::metadata(p) {
all.push(FileState::from_metadata(p.clone(), &meta));
}
}
all
}
pub fn detect_changes(&self, current: &[FileState]) -> Vec<WatchEvent> {
let mut events = Vec::new();
let current_map: HashMap<String, &FileState> = current
.iter()
.map(|fs| (fs.path.to_string_lossy().to_string(), fs))
.collect();
for (key, state) in ¤t_map {
match self.last_seen.get(key) {
None => {
events.push(WatchEvent::new(state.path.clone(), WatchEventKind::Created));
}
Some(prev) => {
if state.modified != prev.modified || state.size != prev.size {
events.push(WatchEvent::new(
state.path.clone(),
WatchEventKind::Modified,
));
}
}
}
}
for key in self.last_seen.keys() {
if !current_map.contains_key(key) {
events.push(WatchEvent::new(PathBuf::from(key), WatchEventKind::Deleted));
}
}
events
}
}
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum WatchDisplayMode {
Compact,
Verbose,
Silent,
}
#[allow(dead_code)]
#[derive(Debug, Clone, Default)]
pub struct HashCache {
pub(crate) cache: HashMap<String, u64>,
}
#[allow(dead_code)]
impl HashCache {
pub fn new() -> Self {
Self::default()
}
pub fn update(&mut self, path: &std::path::Path) -> bool {
let key = path.to_string_lossy().to_string();
let new_hash = compute_file_hash(path).unwrap_or(0);
let changed = self.cache.get(&key).copied().unwrap_or(u64::MAX) != new_hash;
self.cache.insert(key, new_hash);
changed
}
pub fn remove(&mut self, path: &std::path::Path) {
self.cache.remove(&path.to_string_lossy().to_string());
}
pub fn get(&self, path: &std::path::Path) -> Option<u64> {
self.cache.get(&path.to_string_lossy().to_string()).copied()
}
pub fn len(&self) -> usize {
self.cache.len()
}
pub fn is_empty(&self) -> bool {
self.cache.is_empty()
}
pub fn clear(&mut self) {
self.cache.clear();
}
}
#[allow(dead_code)]
pub struct WatcherLog {
entries: Vec<WatcherLogEntry>,
max_entries: usize,
}
impl WatcherLog {
#[allow(dead_code)]
pub fn new(max_entries: usize) -> Self {
Self {
entries: vec![],
max_entries,
}
}
#[allow(dead_code)]
pub fn add(
&mut self,
level: WatcherLogLevel,
message: impl Into<String>,
path: Option<PathBuf>,
) {
if self.entries.len() >= self.max_entries {
self.entries.remove(0);
}
self.entries.push(WatcherLogEntry {
timestamp_secs: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
level,
message: message.into(),
path,
});
}
#[allow(dead_code)]
pub fn entries_at_level(&self, level: &WatcherLogLevel) -> Vec<&WatcherLogEntry> {
self.entries.iter().filter(|e| &e.level == level).collect()
}
#[allow(dead_code)]
pub fn all_entries(&self) -> &[WatcherLogEntry] {
&self.entries
}
}
#[derive(Debug)]
pub struct WatchHandle {
stopped: bool,
}
impl WatchHandle {
pub fn new() -> Self {
Self { stopped: false }
}
pub fn stop(&mut self) {
self.stopped = true;
}
pub fn is_stopped(&self) -> bool {
self.stopped
}
}
#[allow(dead_code)]
pub struct DirectoryExcludeFilter {
pub excluded: Vec<String>,
}
impl DirectoryExcludeFilter {
#[allow(dead_code)]
pub fn new(excluded: Vec<&str>) -> Self {
Self {
excluded: excluded.into_iter().map(String::from).collect(),
}
}
}
#[allow(dead_code)]
pub struct BuildRequestQueue {
pending: Vec<BuildRequest>,
max_pending: usize,
}
#[allow(dead_code)]
impl BuildRequestQueue {
pub fn new(max_pending: usize) -> Self {
Self {
pending: Vec::new(),
max_pending: max_pending.max(1),
}
}
pub fn push(&mut self, req: BuildRequest) {
if req.supersedes_pending {
self.pending.clear();
}
if self.pending.len() < self.max_pending {
self.pending.push(req);
}
}
pub fn pop(&mut self) -> Option<BuildRequest> {
if self.pending.is_empty() {
None
} else {
Some(self.pending.remove(0))
}
}
pub fn len(&self) -> usize {
self.pending.len()
}
pub fn is_empty(&self) -> bool {
self.pending.is_empty()
}
}
#[allow(dead_code)]
pub struct RebuildTrigger {
pub full_rebuild_patterns: Vec<String>,
pub recheck_patterns: Vec<String>,
pub default_action: WatchAction,
}
#[allow(dead_code)]
impl RebuildTrigger {
pub fn default_oxilean() -> Self {
Self {
full_rebuild_patterns: vec!["Oxilean.toml".to_string()],
recheck_patterns: vec!["*.lean".to_string(), "*.oxilean".to_string()],
default_action: WatchAction::Notify,
}
}
pub fn action_for(&self, event: &WatchEvent) -> WatchAction {
for pat in &self.full_rebuild_patterns {
if path_matches_pattern(&event.path, pat)
|| event
.path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("")
== pat.as_str()
{
return WatchAction::Rebuild;
}
}
for pat in &self.recheck_patterns {
if path_matches_pattern(&event.path, pat) {
return WatchAction::Recheck;
}
}
self.default_action
}
pub fn group_by_action<'a>(
&self,
events: &'a [WatchEvent],
) -> HashMap<WatchAction, Vec<&'a WatchEvent>> {
let mut map: HashMap<WatchAction, Vec<&'a WatchEvent>> = HashMap::new();
for event in events {
map.entry(self.action_for(event)).or_default().push(event);
}
map
}
}
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub struct WatcherLogEntry {
pub timestamp_secs: u64,
pub level: WatcherLogLevel,
pub message: String,
pub path: Option<PathBuf>,
}
#[allow(dead_code)]
pub struct ExtensionFilter {
pub extensions: Vec<String>,
}
impl ExtensionFilter {
#[allow(dead_code)]
pub fn new(extensions: Vec<&str>) -> Self {
Self {
extensions: extensions.into_iter().map(String::from).collect(),
}
}
}
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub struct WatcherConfig {
pub poll_interval_ms: u64,
pub debounce_ms: u64,
pub max_depth: u32,
pub include_extensions: Vec<String>,
pub exclude_patterns: Vec<String>,
pub follow_symlinks: bool,
pub batch_events: bool,
pub batch_window_ms: u64,
}
impl WatcherConfig {
#[allow(dead_code)]
pub fn for_oxilean() -> Self {
Self::default()
}
#[allow(dead_code)]
pub fn should_include(&self, path: &Path) -> bool {
if self.include_extensions.is_empty() {
return true;
}
if let Some(ext) = path.extension() {
let ext_str = ext.to_string_lossy();
return self
.include_extensions
.iter()
.any(|e| e == ext_str.as_ref());
}
false
}
#[allow(dead_code)]
pub fn should_exclude(&self, path: &Path) -> bool {
for component in path.components() {
let name = component.as_os_str().to_string_lossy();
if self.exclude_patterns.iter().any(|p| p == name.as_ref()) {
return true;
}
}
false
}
}
#[allow(dead_code)]
pub struct CountingSubscriber {
pub count: std::sync::atomic::AtomicU64,
}
impl CountingSubscriber {
#[allow(dead_code)]
pub fn new() -> Self {
Self {
count: std::sync::atomic::AtomicU64::new(0),
}
}
#[allow(dead_code)]
pub fn get_count(&self) -> u64 {
self.count.load(std::sync::atomic::Ordering::Relaxed)
}
}
#[allow(dead_code)]
pub struct WatcherRegistry {
targets: Vec<WatchTarget>,
}
impl WatcherRegistry {
#[allow(dead_code)]
pub fn new() -> Self {
Self { targets: vec![] }
}
#[allow(dead_code)]
pub fn add(&mut self, target: WatchTarget) {
self.targets.push(target);
}
#[allow(dead_code)]
pub fn remove(&mut self, path: &Path) {
self.targets.retain(|t| t.path != path);
}
#[allow(dead_code)]
pub fn set_enabled(&mut self, path: &Path, enabled: bool) {
for target in self.targets.iter_mut() {
if target.path == path {
target.enabled = enabled;
}
}
}
#[allow(dead_code)]
pub fn enabled_targets(&self) -> Vec<&WatchTarget> {
self.targets.iter().filter(|t| t.enabled).collect()
}
#[allow(dead_code)]
pub fn count(&self) -> usize {
self.targets.len()
}
}
#[derive(Clone, Debug)]
pub struct WatchConfig {
pub debounce_ms: u64,
pub recursive: bool,
pub extensions: Vec<String>,
pub ignore_dirs: Vec<String>,
pub action: WatchAction,
}
#[derive(Clone, Debug, Default)]
pub struct MonitorStats {
pub poll_cycles: u64,
pub events_detected: u64,
pub events_processed: u64,
pub events_debounced: u64,
}
impl MonitorStats {
pub fn new() -> Self {
Self::default()
}
pub fn record_poll(&mut self, detected: u64, processed: u64) {
self.poll_cycles += 1;
self.events_detected += detected;
self.events_processed += processed;
self.events_debounced += detected.saturating_sub(processed);
}
pub fn avg_events_per_cycle(&self) -> f64 {
if self.poll_cycles == 0 {
0.0
} else {
self.events_detected as f64 / self.poll_cycles as f64
}
}
pub fn debounce_ratio(&self) -> f64 {
if self.events_detected == 0 {
0.0
} else {
self.events_debounced as f64 / self.events_detected as f64
}
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct WatchError {
pub message: String,
pub recoverable: bool,
pub retry_count: u32,
}
#[allow(dead_code)]
impl WatchError {
pub fn recoverable(msg: impl Into<String>) -> Self {
Self {
message: msg.into(),
recoverable: true,
retry_count: 0,
}
}
pub fn fatal(msg: impl Into<String>) -> Self {
Self {
message: msg.into(),
recoverable: false,
retry_count: 0,
}
}
pub fn increment_retry(&mut self) {
self.retry_count += 1;
}
}
#[allow(dead_code)]
pub struct WatchEventBatcher {
pending: HashMap<PathBuf, (WatchEventKind, Instant)>,
window_ms: u64,
}
impl WatchEventBatcher {
#[allow(dead_code)]
pub fn new(window_ms: u64) -> Self {
Self {
pending: HashMap::new(),
window_ms,
}
}
#[allow(dead_code)]
pub fn add(&mut self, path: PathBuf, kind: WatchEventKind) -> bool {
let now = Instant::now();
if let Some((existing_kind, ts)) = self.pending.get_mut(&path) {
if (ts.elapsed().as_millis() as u64) < self.window_ms {
*existing_kind = kind;
return true;
}
}
self.pending.insert(path, (kind, now));
false
}
#[allow(dead_code)]
pub fn drain_ready(&mut self) -> Vec<(PathBuf, WatchEventKind)> {
let window_ms = self.window_ms;
let mut ready = vec![];
self.pending.retain(|path, (kind, ts)| {
if ts.elapsed().as_millis() as u64 >= window_ms {
ready.push((path.clone(), *kind));
false
} else {
true
}
});
ready
}
}
#[allow(dead_code)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum WatcherLogLevel {
Error,
Warning,
Info,
Debug,
}
#[allow(dead_code)]
pub struct WatchSpinner {
frames: Vec<&'static str>,
idx: usize,
}
#[allow(dead_code)]
impl WatchSpinner {
pub fn new() -> Self {
Self {
frames: vec!["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"],
idx: 0,
}
}
pub fn tick(&mut self) -> &str {
let frame = self.frames[self.idx % self.frames.len()];
self.idx = self.idx.wrapping_add(1);
frame
}
pub fn current(&self) -> &str {
self.frames[self.idx % self.frames.len()]
}
}
#[derive(Clone, Debug, Default)]
pub struct WatchEventLog {
entries: Vec<(WatchEvent, String)>,
capacity: usize,
}
impl WatchEventLog {
pub fn with_capacity(capacity: usize) -> Self {
Self {
entries: Vec::new(),
capacity,
}
}
pub fn append(&mut self, event: WatchEvent, description: impl Into<String>) {
if self.entries.len() >= self.capacity && self.capacity > 0 {
self.entries.remove(0);
}
self.entries.push((event, description.into()));
}
pub fn len(&self) -> usize {
self.entries.len()
}
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
pub fn clear(&mut self) {
self.entries.clear();
}
pub fn by_kind(&self, kind: WatchEventKind) -> Vec<&(WatchEvent, String)> {
self.entries
.iter()
.filter(|(e, _)| e.kind == kind)
.collect()
}
pub fn descriptions(&self) -> Vec<&str> {
self.entries.iter().map(|(_, d)| d.as_str()).collect()
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct ReconnectPolicy {
pub max_retries: u32,
pub retry_delay_ms: u64,
pub exponential_backoff: bool,
}
#[allow(dead_code)]
impl ReconnectPolicy {
pub fn default_policy() -> Self {
Self {
max_retries: 5,
retry_delay_ms: 500,
exponential_backoff: true,
}
}
pub fn delay_for_retry(&self, n: u32) -> u64 {
if self.exponential_backoff {
self.retry_delay_ms * (1u64 << n.min(10))
} else {
self.retry_delay_ms
}
}
pub fn can_retry(&self, attempts: u32) -> bool {
attempts < self.max_retries
}
}
#[derive(Clone, Debug)]
pub struct ChangeRecord {
pub path: PathBuf,
pub kind: WatchEventKind,
pub timestamp: Instant,
pub action_taken: WatchAction,
}
#[allow(dead_code)]
pub struct HotReloadSession {
state: HotReloadState,
success_count: u32,
fail_count: u32,
last_changed: Option<std::path::PathBuf>,
}
#[allow(dead_code)]
impl HotReloadSession {
pub fn new() -> Self {
Self {
state: HotReloadState::Idle,
success_count: 0,
fail_count: 0,
last_changed: None,
}
}
pub fn trigger(&mut self, path: std::path::PathBuf) {
self.state = HotReloadState::Pending;
self.last_changed = Some(path);
}
pub fn start_reload(&mut self) {
self.state = HotReloadState::Reloading;
}
pub fn finish_reload(&mut self, success: bool) {
if success {
self.success_count += 1;
self.state = HotReloadState::Done;
} else {
self.fail_count += 1;
self.state = HotReloadState::Failed;
}
}
pub fn reset(&mut self) {
self.state = HotReloadState::Idle;
}
pub fn state(&self) -> HotReloadState {
self.state
}
pub fn status_line(&self) -> String {
format!(
"[hot-reload] state={} ok={} fail={}",
self.state, self.success_count, self.fail_count
)
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct BuildRequest {
pub changed_files: Vec<std::path::PathBuf>,
pub action: WatchAction,
pub created_at: std::time::Instant,
pub supersedes_pending: bool,
}
#[allow(dead_code)]
impl BuildRequest {
pub fn new(changed_files: Vec<std::path::PathBuf>, action: WatchAction) -> Self {
Self {
changed_files,
action,
created_at: std::time::Instant::now(),
supersedes_pending: true,
}
}
pub fn file_count(&self) -> usize {
self.changed_files.len()
}
pub fn describe(&self) -> String {
format!("{} {} file(s)", self.action, self.changed_files.len())
}
}
#[derive(Clone, Debug)]
pub struct WatchFilter {
pub extensions: Vec<String>,
pub ignore_patterns: Vec<String>,
pub include_hidden: bool,
}
impl WatchFilter {
pub fn accept_all() -> Self {
Self {
extensions: Vec::new(),
ignore_patterns: Vec::new(),
include_hidden: true,
}
}
}
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum WatchBackend {
Polling,
Inotify,
Kqueue,
ReadDirChanges,
}
#[allow(dead_code)]
#[derive(Clone, Debug, Default)]
pub struct PatternSet {
pub include: Vec<String>,
pub exclude: Vec<String>,
}
#[allow(dead_code)]
impl PatternSet {
pub fn new() -> Self {
Self::default()
}
pub fn include(mut self, pat: impl Into<String>) -> Self {
self.include.push(pat.into());
self
}
pub fn exclude(mut self, pat: impl Into<String>) -> Self {
self.exclude.push(pat.into());
self
}
pub fn matches(&self, path: &std::path::Path) -> bool {
for pat in &self.exclude {
if path_matches_pattern(path, pat) || path.to_string_lossy().contains(pat.as_str()) {
return false;
}
}
if self.include.is_empty() {
return true;
}
self.include.iter().any(|p| path_matches_pattern(path, p))
}
pub fn oxilean_sources() -> Self {
Self::new()
.include("*.lean")
.include("*.oxilean")
.exclude(".git")
.exclude("build")
}
}
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub struct WatchTarget {
pub path: PathBuf,
pub config: WatcherConfig,
pub enabled: bool,
}
impl WatchTarget {
#[allow(dead_code)]
pub fn new(path: impl Into<PathBuf>, config: WatcherConfig) -> Self {
Self {
path: path.into(),
config,
enabled: true,
}
}
}
#[allow(dead_code)]
#[derive(Clone, Debug, Default)]
pub struct WatcherSnapshot {
pub file_count: usize,
pub total_size_bytes: u64,
pub newest_modification_secs: u64,
}
impl WatcherSnapshot {
#[allow(dead_code)]
pub fn from_dir(dir: &Path, config: &WatcherConfig) -> Self {
let mut snapshot = Self::default();
let Ok(entries) = std::fs::read_dir(dir) else {
return snapshot;
};
for entry in entries.flatten() {
let path = entry.path();
if !config.should_include(&path) || config.should_exclude(&path) {
continue;
}
if let Ok(meta) = entry.metadata() {
if meta.is_file() {
snapshot.file_count += 1;
snapshot.total_size_bytes += meta.len();
if let Ok(mtime) = meta.modified() {
let secs = mtime
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
if secs > snapshot.newest_modification_secs {
snapshot.newest_modification_secs = secs;
}
}
}
}
}
snapshot
}
}
#[derive(Clone, Debug)]
pub struct FileState {
pub path: PathBuf,
pub modified: Duration,
pub size: u64,
pub hash: u64,
}
impl FileState {
pub fn from_metadata(path: PathBuf, meta: &std::fs::Metadata) -> Self {
let modified = meta
.modified()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.unwrap_or(Duration::ZERO);
Self {
path,
modified,
size: meta.len(),
hash: 0,
}
}
}
#[derive(Clone, Debug)]
pub struct WatchEvent {
pub path: PathBuf,
pub kind: WatchEventKind,
pub timestamp: Instant,
}
impl WatchEvent {
pub fn new(path: PathBuf, kind: WatchEventKind) -> Self {
Self {
path,
kind,
timestamp: Instant::now(),
}
}
pub fn extension(&self) -> Option<&str> {
self.path.extension().and_then(|e| e.to_str())
}
}
#[derive(Debug)]
pub struct WatchSession {
monitor: FileMonitor,
config: WatchConfig,
filter: WatchFilter,
tracker: ChangeTracker,
}
impl WatchSession {
pub fn new(config: WatchConfig) -> Self {
let filter = WatchFilter {
extensions: config.extensions.clone(),
ignore_patterns: config.ignore_dirs.clone(),
include_hidden: false,
};
let monitor = FileMonitor::new(config.debounce_ms);
Self {
monitor,
config,
filter,
tracker: ChangeTracker::new(),
}
}
pub fn watch_directory(&mut self, dir: impl Into<PathBuf>) {
self.monitor.add_watch(dir);
}
pub fn start_watch(&mut self) -> WatchHandle {
let _ = self.monitor.poll_changes();
WatchHandle::new()
}
pub fn process_events(&mut self) -> Vec<WatchEvent> {
let raw = self.monitor.poll_changes();
let filtered: Vec<WatchEvent> = raw
.into_iter()
.filter(|e| should_process_event(e, &self.filter))
.collect();
let debounced = debounce(&filtered, self.config.debounce_ms);
for event in &debounced {
self.tracker.record_change(ChangeRecord {
path: event.path.clone(),
kind: event.kind,
timestamp: Instant::now(),
action_taken: self.config.action,
});
}
debounced
}
pub fn tracker(&self) -> &ChangeTracker {
&self.tracker
}
pub fn config(&self) -> &WatchConfig {
&self.config
}
}
#[allow(dead_code)]
pub struct EventDebouncer {
window_ms: u64,
pending: HashMap<String, WatchEvent>,
batch_start: Option<std::time::Instant>,
}
#[allow(dead_code)]
impl EventDebouncer {
pub fn new(window_ms: u64) -> Self {
Self {
window_ms,
pending: HashMap::new(),
batch_start: None,
}
}
pub fn push(&mut self, event: WatchEvent) {
if self.batch_start.is_none() {
self.batch_start = Some(std::time::Instant::now());
}
self.pending
.insert(event.path.to_string_lossy().to_string(), event);
}
pub fn is_ready(&self) -> bool {
self.batch_start
.map(|t| t.elapsed().as_millis() >= self.window_ms as u128)
.unwrap_or(false)
}
pub fn flush(&mut self) -> Vec<WatchEvent> {
self.batch_start = None;
self.pending.drain().map(|(_, v)| v).collect()
}
pub fn pending_count(&self) -> usize {
self.pending.len()
}
pub fn discard(&mut self) {
self.pending.clear();
self.batch_start = None;
}
}