1use crate::db_healthcheck::DbHealthChecker;
2use crate::error::Error;
3use heed::types::Bytes;
4use heed::{Database, Env, EnvOpenOptions};
5use heed::{EnvFlags, types::SerdeBincode};
6use serde::{Deserialize, Serialize};
7use std::collections::VecDeque;
8use std::fs;
9use std::path::{Path, PathBuf};
10use std::time::{SystemTime, UNIX_EPOCH};
11
12const MAX_HISTORY_ENTRIES: usize = 128;
13
14#[derive(Debug, Serialize, Deserialize, Clone)]
16pub struct QueryMatchEntry {
17 pub file_path: PathBuf, pub open_count: u32, pub last_opened: u64, }
21
22#[derive(Debug, Serialize, Deserialize, Clone)]
24struct HistoryEntry {
25 query: String,
26 timestamp: u64,
27}
28
29#[derive(Debug)]
30pub struct QueryTracker {
31 env: Env,
32 query_file_db: Database<Bytes, SerdeBincode<QueryMatchEntry>>,
34 query_history_db: Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
36 grep_query_history_db: Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
38}
39
40impl DbHealthChecker for QueryTracker {
41 fn get_env(&self) -> &Env {
42 &self.env
43 }
44
45 fn count_entries(&self) -> Result<Vec<(&'static str, u64)>, Error> {
46 let rtxn = self.env.read_txn().map_err(Error::DbStartReadTxn)?;
47
48 let count_queries = self.query_file_db.len(&rtxn).map_err(Error::DbRead)?;
49 let count_histories = self.query_history_db.len(&rtxn).map_err(Error::DbRead)?;
50 let count_grep_histories = self
51 .grep_query_history_db
52 .len(&rtxn)
53 .map_err(Error::DbRead)?;
54
55 Ok(vec![
56 ("query_file_entries", count_queries),
57 ("query_history_entries", count_histories),
58 ("grep_query_history_entries", count_grep_histories),
59 ])
60 }
61}
62
63impl QueryTracker {
64 pub fn db_path(&self) -> &Path {
66 self.env.path()
67 }
68
69 pub fn new(db_path: impl AsRef<Path>, use_unsafe_no_lock: bool) -> Result<Self, Error> {
70 let db_path = db_path.as_ref();
71 fs::create_dir_all(db_path).map_err(Error::CreateDir)?;
72
73 let env = unsafe {
74 let mut opts = EnvOpenOptions::new();
75 opts.map_size(10 * 1024 * 1024); opts.max_dbs(16); if use_unsafe_no_lock {
78 opts.flags(EnvFlags::NO_LOCK | EnvFlags::NO_SYNC | EnvFlags::NO_META_SYNC);
79 }
80 opts.open(db_path).map_err(Error::EnvOpen)?
81 };
82
83 env.clear_stale_readers()
84 .map_err(Error::DbClearStaleReaders)?;
85
86 let mut wtxn = env.write_txn().map_err(Error::DbStartWriteTxn)?;
87
88 let query_file_db = env
90 .create_database(&mut wtxn, Some("query_file_associations"))
91 .map_err(Error::DbCreate)?;
92 let query_history_db = env
93 .create_database(&mut wtxn, Some("query_history"))
94 .map_err(Error::DbCreate)?;
95 let grep_query_history_db = env
96 .create_database(&mut wtxn, Some("grep_query_history"))
97 .map_err(Error::DbCreate)?;
98
99 wtxn.commit().map_err(Error::DbCommit)?;
100
101 Ok(QueryTracker {
102 env,
103 query_file_db,
104 query_history_db,
105 grep_query_history_db,
106 })
107 }
108
109 fn get_now(&self) -> u64 {
110 SystemTime::now()
111 .duration_since(UNIX_EPOCH)
112 .unwrap()
113 .as_secs()
114 }
115
116 fn create_query_key(project_path: &Path, query: &str) -> Result<[u8; 32], Error> {
117 let project_str = project_path
118 .to_str()
119 .ok_or_else(|| Error::InvalidPath(project_path.to_path_buf()))?;
120
121 let mut hasher = blake3::Hasher::default();
122 hasher.update(project_str.as_bytes());
123 hasher.update(b"::");
124 hasher.update(query.as_bytes());
125
126 Ok(*hasher.finalize().as_bytes())
127 }
128
129 fn create_project_key(project_path: &Path) -> Result<[u8; 32], Error> {
130 let project_str = project_path
131 .to_str()
132 .ok_or_else(|| Error::InvalidPath(project_path.to_path_buf()))?;
133
134 Ok(*blake3::hash(project_str.as_bytes()).as_bytes())
135 }
136
137 fn append_to_history(
139 db: &Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
140 wtxn: &mut heed::RwTxn,
141 project_key: &[u8; 32],
142 query: &str,
143 now: u64,
144 ) -> Result<(), Error> {
145 let mut history = db
146 .get(wtxn, project_key)
147 .map_err(Error::DbRead)?
148 .unwrap_or_default();
149
150 history.push_back(HistoryEntry {
151 query: query.to_string(),
152 timestamp: now,
153 });
154 while history.len() > MAX_HISTORY_ENTRIES {
155 history.pop_front();
156 }
157
158 db.put(wtxn, project_key, &history)
159 .map_err(Error::DbWrite)?;
160 Ok(())
161 }
162
163 fn read_history_at_offset(
166 db: &Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
167 env: &Env,
168 project_key: &[u8; 32],
169 offset: usize,
170 ) -> Result<Option<String>, Error> {
171 let rtxn = env.read_txn().map_err(Error::DbStartReadTxn)?;
172
173 let mut history = db
174 .get(&rtxn, project_key)
175 .map_err(Error::DbRead)?
176 .unwrap_or_default();
177
178 if history.len() > offset {
180 let index = history.len() - 1 - offset;
181 let record = history.remove(index);
182 Ok(record.map(|r| r.query))
183 } else {
184 Ok(None)
185 }
186 }
187
188 pub fn track_query_completion(
189 &mut self,
190 query: &str,
191 project_path: &Path,
192 file_path: &Path,
193 ) -> Result<(), Error> {
194 let now = self.get_now();
195 let file_path_buf = file_path.to_path_buf();
196
197 let query_key = Self::create_query_key(project_path, query)?;
198 let mut wtxn = self.env.write_txn().map_err(Error::DbStartWriteTxn)?;
199
200 let mut entry = self
201 .query_file_db
202 .get(&wtxn, &query_key)
203 .map_err(Error::DbRead)?
204 .unwrap_or_else(|| QueryMatchEntry {
205 file_path: file_path_buf.clone(),
206 open_count: 0,
207 last_opened: now,
208 });
209
210 if entry.file_path == file_path_buf {
211 tracing::debug!(
212 ?query,
213 ?file_path,
214 "Query completed for same file as last time"
215 );
216
217 entry.open_count += 1;
219 } else {
220 tracing::debug!(
221 ?query,
222 ?file_path,
223 "Query completed for different file than last time"
224 );
225
226 entry.file_path = file_path_buf;
228 entry.open_count = 1;
229 }
230
231 entry.last_opened = now;
232
233 self.query_file_db
234 .put(&mut wtxn, &query_key, &entry)
235 .map_err(Error::DbWrite)?;
236
237 let project_key = Self::create_project_key(project_path)?;
239 Self::append_to_history(&self.query_history_db, &mut wtxn, &project_key, query, now)?;
240
241 wtxn.commit().map_err(Error::DbCommit)?;
242
243 tracing::debug!(?query, ?file_path, "Tracked query completion");
244 Ok(())
245 }
246
247 pub fn get_last_query_entry(
248 &self,
249 query: &str,
250 project_path: &Path,
251 min_combo_count: u32,
252 ) -> Result<Option<QueryMatchEntry>, Error> {
253 let query_key = Self::create_query_key(project_path, query)?;
254 let rtxn = self.env.read_txn().map_err(Error::DbStartReadTxn)?;
255
256 let last_match = self
257 .query_file_db
258 .get(&rtxn, &query_key)
259 .map_err(Error::DbRead)?;
260
261 Ok(last_match.filter(|entry| entry.open_count >= min_combo_count))
262 }
263
264 pub fn get_last_query_path(
265 &self,
266 query: &str,
267 project_path: &Path,
268 file_path: &Path,
269 combo_boost: i32,
270 ) -> Result<i32, Error> {
271 let query_key = Self::create_query_key(project_path, query)?;
272 tracing::debug!(?query_key, "HASH");
273 let rtxn = self.env.read_txn().map_err(Error::DbStartReadTxn)?;
274
275 match self
276 .query_file_db
277 .get(&rtxn, &query_key)
278 .map_err(Error::DbRead)?
279 {
280 Some(entry) => {
281 if entry.file_path == file_path && entry.open_count >= 2 {
283 Ok(combo_boost)
284 } else {
285 Ok(0)
286 }
287 }
288 None => Ok(0), }
290 }
291
292 pub fn get_historical_query(
295 &self,
296 project_path: &Path,
297 offset: usize,
298 ) -> Result<Option<String>, Error> {
299 let project_key = Self::create_project_key(project_path)?;
300 Self::read_history_at_offset(&self.query_history_db, &self.env, &project_key, offset)
301 }
302
303 pub fn track_grep_query(&mut self, query: &str, project_path: &Path) -> Result<(), Error> {
306 let now = self.get_now();
307 let project_key = Self::create_project_key(project_path)?;
308 let mut wtxn = self.env.write_txn().map_err(Error::DbStartWriteTxn)?;
309
310 Self::append_to_history(
311 &self.grep_query_history_db,
312 &mut wtxn,
313 &project_key,
314 query,
315 now,
316 )?;
317
318 wtxn.commit().map_err(Error::DbCommit)?;
319
320 tracing::debug!(?query, "Tracked grep query");
321 Ok(())
322 }
323
324 pub fn get_historical_grep_query(
327 &self,
328 project_path: &Path,
329 offset: usize,
330 ) -> Result<Option<String>, Error> {
331 let project_key = Self::create_project_key(project_path)?;
332 Self::read_history_at_offset(&self.grep_query_history_db, &self.env, &project_key, offset)
333 }
334}
335
336#[cfg(test)]
337mod tests {
338 use super::*;
339 use std::env;
340
341 #[test]
342 fn test_query_tracking() {
343 let temp_dir = env::temp_dir().join("fff_test_query_tracking_new");
344 let _ = std::fs::remove_dir_all(&temp_dir);
345
346 let mut tracker = QueryTracker::new(temp_dir.to_str().unwrap(), true).unwrap();
347
348 let project_path = PathBuf::from("/test/project");
349 let file_path = PathBuf::from("/test/project/src/main.rs");
350
351 tracker
353 .track_query_completion("main", &project_path, &file_path)
354 .unwrap();
355 let boost = tracker
356 .get_last_query_path("main", &project_path, &file_path, 10000)
357 .unwrap();
358 assert_eq!(boost, 0, "First completion should not boost");
359
360 tracker
362 .track_query_completion("main", &project_path, &file_path)
363 .unwrap();
364 let boost = tracker
365 .get_last_query_path("main", &project_path, &file_path, 10000)
366 .unwrap();
367 assert_eq!(boost, 10000, "Second completion should boost");
368
369 let other_file = PathBuf::from("/test/project/src/lib.rs");
371 tracker
372 .track_query_completion("main", &project_path, &other_file)
373 .unwrap();
374 let boost = tracker
375 .get_last_query_path("main", &project_path, &other_file, 10000)
376 .unwrap();
377 assert_eq!(boost, 0, "Different file should reset boost");
378
379 let boost = tracker
381 .get_last_query_path("main", &project_path, &file_path, 10000)
382 .unwrap();
383 assert_eq!(boost, 0, "Original file should not boost after replacement");
384
385 let _ = std::fs::remove_dir_all(&temp_dir);
386 }
387
388 #[test]
389 fn test_hashing_functions() {
390 let project_path = PathBuf::from("/test/project");
391
392 let key1 = QueryTracker::create_project_key(&project_path).unwrap();
394 let key2 = QueryTracker::create_project_key(&project_path).unwrap();
395 assert_eq!(key1, key2, "Same project should hash to same key");
396
397 let query_key1 = QueryTracker::create_query_key(&project_path, "test").unwrap();
399 let query_key2 = QueryTracker::create_query_key(&project_path, "test").unwrap();
400 assert_eq!(
401 query_key1, query_key2,
402 "Same project+query should hash to same key"
403 );
404
405 let query_key3 = QueryTracker::create_query_key(&project_path, "different").unwrap();
407 assert_ne!(
408 query_key1, query_key3,
409 "Different queries should hash to different keys"
410 );
411
412 let other_project = PathBuf::from("/other/project");
414 let query_key4 = QueryTracker::create_query_key(&other_project, "test").unwrap();
415 assert_ne!(
416 query_key1, query_key4,
417 "Different projects should hash to different keys"
418 );
419 }
420}