memvid_cli/analytics/
queue.rs

1//! Local JSONL queue for analytics events
2//!
3//! Events are appended to a local file and periodically flushed
4//! to the server in batches.
5
6use serde::{Deserialize, Serialize};
7use std::fs::{self, OpenOptions};
8use std::io::{BufRead, BufReader, Write};
9use std::path::PathBuf;
10use std::sync::Mutex;
11
12/// Analytics event structure
13#[derive(Debug, Clone, Serialize, Deserialize)]
14pub struct AnalyticsEvent {
15    pub anon_id: String,
16    pub file_hash: String,
17    pub client: String,
18    pub command: String,
19    pub success: bool,
20    pub timestamp: String,
21    #[serde(default)]
22    pub file_created: bool,
23    #[serde(default)]
24    pub file_opened: bool,
25    /// User tier: "free", or plan name like "starter", "teams", "enterprise"
26    #[serde(default = "default_tier")]
27    pub user_tier: String,
28}
29
30fn default_tier() -> String {
31    "free".to_string()
32}
33
34/// Global queue file lock
35static QUEUE_LOCK: Mutex<()> = Mutex::new(());
36
37/// Get the analytics queue directory
38fn get_analytics_dir() -> Option<PathBuf> {
39    dirs::data_local_dir().map(|d| d.join("memvid").join("analytics"))
40}
41
42/// Get the queue file path
43fn get_queue_path() -> Option<PathBuf> {
44    get_analytics_dir().map(|d| d.join("queue.jsonl"))
45}
46
47/// Ensure analytics directory exists
48fn ensure_dir() -> Option<PathBuf> {
49    let dir = get_analytics_dir()?;
50    fs::create_dir_all(&dir).ok()?;
51    Some(dir)
52}
53
54/// Track an analytics event (append to local queue)
55/// This is fire-and-forget - errors are silently ignored
56pub fn track_event(event: AnalyticsEvent) {
57    if let Err(_e) = track_event_inner(event) {
58        // Silently ignore errors - analytics should never impact UX
59        #[cfg(debug_assertions)]
60        eprintln!("[analytics] Failed to queue event: {}", _e);
61    }
62}
63
64fn track_event_inner(event: AnalyticsEvent) -> std::io::Result<()> {
65    let _lock = QUEUE_LOCK.lock().map_err(|_| {
66        std::io::Error::new(std::io::ErrorKind::Other, "Failed to acquire queue lock")
67    })?;
68
69    let queue_path = get_queue_path()
70        .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::NotFound, "Cannot find data dir"))?;
71
72    ensure_dir();
73
74    let mut file = OpenOptions::new()
75        .create(true)
76        .append(true)
77        .open(&queue_path)?;
78
79    let json = serde_json::to_string(&event).map_err(|e| {
80        std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string())
81    })?;
82
83    writeln!(file, "{}", json)?;
84    Ok(())
85}
86
87/// Read all pending events from the queue
88pub fn read_pending_events() -> Vec<AnalyticsEvent> {
89    let _lock = match QUEUE_LOCK.lock() {
90        Ok(lock) => lock,
91        Err(_) => return vec![],
92    };
93
94    let queue_path = match get_queue_path() {
95        Some(p) => p,
96        None => return vec![],
97    };
98
99    if !queue_path.exists() {
100        return vec![];
101    }
102
103    let file = match fs::File::open(&queue_path) {
104        Ok(f) => f,
105        Err(_) => return vec![],
106    };
107
108    let reader = BufReader::new(file);
109    let mut events = Vec::new();
110
111    for line in reader.lines() {
112        if let Ok(line) = line {
113            if let Ok(event) = serde_json::from_str::<AnalyticsEvent>(&line) {
114                events.push(event);
115            }
116        }
117    }
118
119    events
120}
121
122/// Clear the queue after successful flush
123pub fn clear_queue() {
124    let _lock = match QUEUE_LOCK.lock() {
125        Ok(lock) => lock,
126        Err(_) => return,
127    };
128
129    if let Some(queue_path) = get_queue_path() {
130        let _ = fs::remove_file(&queue_path);
131    }
132}
133
134/// Get the number of pending events
135pub fn pending_count() -> usize {
136    read_pending_events().len()
137}
138
139/// Get the queue file size in bytes
140#[allow(dead_code)]
141pub fn queue_size_bytes() -> u64 {
142    get_queue_path()
143        .and_then(|p| fs::metadata(p).ok())
144        .map(|m| m.len())
145        .unwrap_or(0)
146}