1use crate::db_healthcheck::DbHealthChecker;
2use crate::error::Error;
3use heed::types::Bytes;
4use heed::{Database, Env, EnvOpenOptions};
5use heed::{EnvFlags, types::SerdeBincode};
6use serde::{Deserialize, Serialize};
7use std::collections::VecDeque;
8use std::fs;
9use std::path::{Path, PathBuf};
10use std::time::{SystemTime, UNIX_EPOCH};
11
12const MAX_HISTORY_ENTRIES: usize = 128;
13
14#[derive(Debug, Serialize, Deserialize, Clone)]
16pub struct QueryMatchEntry {
17 pub file_path: PathBuf, pub open_count: u32, pub last_opened: u64, }
21
22#[derive(Debug, Serialize, Deserialize, Clone)]
24struct HistoryEntry {
25 query: String,
26 timestamp: u64,
27}
28
29#[derive(Debug)]
30pub struct QueryTracker {
31 env: Env,
32 query_file_db: Database<Bytes, SerdeBincode<QueryMatchEntry>>,
34 query_history_db: Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
36 grep_query_history_db: Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
38}
39
40impl DbHealthChecker for QueryTracker {
41 fn get_env(&self) -> &Env {
42 &self.env
43 }
44
45 fn count_entries(&self) -> Result<Vec<(&'static str, u64)>, Error> {
46 let rtxn = self.env.read_txn().map_err(Error::DbStartReadTxn)?;
47
48 let count_queries = self.query_file_db.len(&rtxn).map_err(Error::DbRead)?;
49 let count_histories = self.query_history_db.len(&rtxn).map_err(Error::DbRead)?;
50 let count_grep_histories = self
51 .grep_query_history_db
52 .len(&rtxn)
53 .map_err(Error::DbRead)?;
54
55 Ok(vec![
56 ("query_file_entries", count_queries),
57 ("query_history_entries", count_histories),
58 ("grep_query_history_entries", count_grep_histories),
59 ])
60 }
61}
62
63impl QueryTracker {
64 pub fn new(db_path: &str, use_unsafe_no_lock: bool) -> Result<Self, Error> {
65 fs::create_dir_all(db_path).map_err(Error::CreateDir)?;
66 let env = unsafe {
67 let mut opts = EnvOpenOptions::new();
68 opts.map_size(10 * 1024 * 1024); opts.max_dbs(16); if use_unsafe_no_lock {
71 opts.flags(EnvFlags::NO_LOCK | EnvFlags::NO_SYNC | EnvFlags::NO_META_SYNC);
72 }
73 opts.open(db_path).map_err(Error::EnvOpen)?
74 };
75
76 env.clear_stale_readers()
77 .map_err(Error::DbClearStaleReaders)?;
78
79 let mut wtxn = env.write_txn().map_err(Error::DbStartWriteTxn)?;
80
81 let query_file_db = env
83 .create_database(&mut wtxn, Some("query_file_associations"))
84 .map_err(Error::DbCreate)?;
85 let query_history_db = env
86 .create_database(&mut wtxn, Some("query_history"))
87 .map_err(Error::DbCreate)?;
88 let grep_query_history_db = env
89 .create_database(&mut wtxn, Some("grep_query_history"))
90 .map_err(Error::DbCreate)?;
91
92 wtxn.commit().map_err(Error::DbCommit)?;
93
94 Ok(QueryTracker {
95 env,
96 query_file_db,
97 query_history_db,
98 grep_query_history_db,
99 })
100 }
101
102 fn get_now(&self) -> u64 {
103 SystemTime::now()
104 .duration_since(UNIX_EPOCH)
105 .unwrap()
106 .as_secs()
107 }
108
109 fn create_query_key(project_path: &Path, query: &str) -> Result<[u8; 32], Error> {
110 let project_str = project_path
111 .to_str()
112 .ok_or_else(|| Error::InvalidPath(project_path.to_path_buf()))?;
113
114 let mut hasher = blake3::Hasher::default();
115 hasher.update(project_str.as_bytes());
116 hasher.update(b"::");
117 hasher.update(query.as_bytes());
118
119 Ok(*hasher.finalize().as_bytes())
120 }
121
122 fn create_project_key(project_path: &Path) -> Result<[u8; 32], Error> {
123 let project_str = project_path
124 .to_str()
125 .ok_or_else(|| Error::InvalidPath(project_path.to_path_buf()))?;
126
127 Ok(*blake3::hash(project_str.as_bytes()).as_bytes())
128 }
129
130 fn append_to_history(
132 db: &Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
133 wtxn: &mut heed::RwTxn,
134 project_key: &[u8; 32],
135 query: &str,
136 now: u64,
137 ) -> Result<(), Error> {
138 let mut history = db
139 .get(wtxn, project_key)
140 .map_err(Error::DbRead)?
141 .unwrap_or_default();
142
143 history.push_back(HistoryEntry {
144 query: query.to_string(),
145 timestamp: now,
146 });
147 while history.len() > MAX_HISTORY_ENTRIES {
148 history.pop_front();
149 }
150
151 db.put(wtxn, project_key, &history)
152 .map_err(Error::DbWrite)?;
153 Ok(())
154 }
155
156 fn read_history_at_offset(
159 db: &Database<Bytes, SerdeBincode<VecDeque<HistoryEntry>>>,
160 env: &Env,
161 project_key: &[u8; 32],
162 offset: usize,
163 ) -> Result<Option<String>, Error> {
164 let rtxn = env.read_txn().map_err(Error::DbStartReadTxn)?;
165
166 let mut history = db
167 .get(&rtxn, project_key)
168 .map_err(Error::DbRead)?
169 .unwrap_or_default();
170
171 if history.len() > offset {
173 let index = history.len() - 1 - offset;
174 let record = history.remove(index);
175 Ok(record.map(|r| r.query))
176 } else {
177 Ok(None)
178 }
179 }
180
181 pub fn track_query_completion(
182 &mut self,
183 query: &str,
184 project_path: &Path,
185 file_path: &Path,
186 ) -> Result<(), Error> {
187 let now = self.get_now();
188 let file_path_buf = file_path.to_path_buf();
189
190 let query_key = Self::create_query_key(project_path, query)?;
191 let mut wtxn = self.env.write_txn().map_err(Error::DbStartWriteTxn)?;
192
193 let mut entry = self
194 .query_file_db
195 .get(&wtxn, &query_key)
196 .map_err(Error::DbRead)?
197 .unwrap_or_else(|| QueryMatchEntry {
198 file_path: file_path_buf.clone(),
199 open_count: 0,
200 last_opened: now,
201 });
202
203 if entry.file_path == file_path_buf {
204 tracing::debug!(
205 ?query,
206 ?file_path,
207 "Query completed for same file as last time"
208 );
209
210 entry.open_count += 1;
212 } else {
213 tracing::debug!(
214 ?query,
215 ?file_path,
216 "Query completed for different file than last time"
217 );
218
219 entry.file_path = file_path_buf;
221 entry.open_count = 1;
222 }
223
224 entry.last_opened = now;
225
226 self.query_file_db
227 .put(&mut wtxn, &query_key, &entry)
228 .map_err(Error::DbWrite)?;
229
230 let project_key = Self::create_project_key(project_path)?;
232 Self::append_to_history(&self.query_history_db, &mut wtxn, &project_key, query, now)?;
233
234 wtxn.commit().map_err(Error::DbCommit)?;
235
236 tracing::debug!(?query, ?file_path, "Tracked query completion");
237 Ok(())
238 }
239
240 pub fn get_last_query_entry(
241 &self,
242 query: &str,
243 project_path: &Path,
244 min_combo_count: u32,
245 ) -> Result<Option<QueryMatchEntry>, Error> {
246 let query_key = Self::create_query_key(project_path, query)?;
247 tracing::debug!(?query_key, "HASH");
248 let rtxn = self.env.read_txn().map_err(Error::DbStartReadTxn)?;
249
250 let last_match = self
251 .query_file_db
252 .get(&rtxn, &query_key)
253 .map_err(Error::DbRead)?;
254
255 Ok(last_match.filter(|entry| entry.open_count >= min_combo_count))
256 }
257
258 pub fn get_last_query_path(
259 &self,
260 query: &str,
261 project_path: &Path,
262 file_path: &Path,
263 combo_boost: i32,
264 ) -> Result<i32, Error> {
265 let query_key = Self::create_query_key(project_path, query)?;
266 tracing::debug!(?query_key, "HASH");
267 let rtxn = self.env.read_txn().map_err(Error::DbStartReadTxn)?;
268
269 match self
270 .query_file_db
271 .get(&rtxn, &query_key)
272 .map_err(Error::DbRead)?
273 {
274 Some(entry) => {
275 if entry.file_path == file_path && entry.open_count >= 2 {
277 Ok(combo_boost)
278 } else {
279 Ok(0)
280 }
281 }
282 None => Ok(0), }
284 }
285
286 pub fn get_historical_query(
289 &self,
290 project_path: &Path,
291 offset: usize,
292 ) -> Result<Option<String>, Error> {
293 let project_key = Self::create_project_key(project_path)?;
294 Self::read_history_at_offset(&self.query_history_db, &self.env, &project_key, offset)
295 }
296
297 pub fn track_grep_query(&mut self, query: &str, project_path: &Path) -> Result<(), Error> {
300 let now = self.get_now();
301 let project_key = Self::create_project_key(project_path)?;
302 let mut wtxn = self.env.write_txn().map_err(Error::DbStartWriteTxn)?;
303
304 Self::append_to_history(
305 &self.grep_query_history_db,
306 &mut wtxn,
307 &project_key,
308 query,
309 now,
310 )?;
311
312 wtxn.commit().map_err(Error::DbCommit)?;
313
314 tracing::debug!(?query, "Tracked grep query");
315 Ok(())
316 }
317
318 pub fn get_historical_grep_query(
321 &self,
322 project_path: &Path,
323 offset: usize,
324 ) -> Result<Option<String>, Error> {
325 let project_key = Self::create_project_key(project_path)?;
326 Self::read_history_at_offset(&self.grep_query_history_db, &self.env, &project_key, offset)
327 }
328}
329
330#[cfg(test)]
331mod tests {
332 use super::*;
333 use std::env;
334
335 #[test]
336 fn test_query_tracking() {
337 let temp_dir = env::temp_dir().join("fff_test_query_tracking_new");
338 let _ = std::fs::remove_dir_all(&temp_dir);
339
340 let mut tracker = QueryTracker::new(temp_dir.to_str().unwrap(), true).unwrap();
341
342 let project_path = PathBuf::from("/test/project");
343 let file_path = PathBuf::from("/test/project/src/main.rs");
344
345 tracker
347 .track_query_completion("main", &project_path, &file_path)
348 .unwrap();
349 let boost = tracker
350 .get_last_query_path("main", &project_path, &file_path, 10000)
351 .unwrap();
352 assert_eq!(boost, 0, "First completion should not boost");
353
354 tracker
356 .track_query_completion("main", &project_path, &file_path)
357 .unwrap();
358 let boost = tracker
359 .get_last_query_path("main", &project_path, &file_path, 10000)
360 .unwrap();
361 assert_eq!(boost, 10000, "Second completion should boost");
362
363 let other_file = PathBuf::from("/test/project/src/lib.rs");
365 tracker
366 .track_query_completion("main", &project_path, &other_file)
367 .unwrap();
368 let boost = tracker
369 .get_last_query_path("main", &project_path, &other_file, 10000)
370 .unwrap();
371 assert_eq!(boost, 0, "Different file should reset boost");
372
373 let boost = tracker
375 .get_last_query_path("main", &project_path, &file_path, 10000)
376 .unwrap();
377 assert_eq!(boost, 0, "Original file should not boost after replacement");
378
379 let _ = std::fs::remove_dir_all(&temp_dir);
380 }
381
382 #[test]
383 fn test_hashing_functions() {
384 let project_path = PathBuf::from("/test/project");
385
386 let key1 = QueryTracker::create_project_key(&project_path).unwrap();
388 let key2 = QueryTracker::create_project_key(&project_path).unwrap();
389 assert_eq!(key1, key2, "Same project should hash to same key");
390
391 let query_key1 = QueryTracker::create_query_key(&project_path, "test").unwrap();
393 let query_key2 = QueryTracker::create_query_key(&project_path, "test").unwrap();
394 assert_eq!(
395 query_key1, query_key2,
396 "Same project+query should hash to same key"
397 );
398
399 let query_key3 = QueryTracker::create_query_key(&project_path, "different").unwrap();
401 assert_ne!(
402 query_key1, query_key3,
403 "Different queries should hash to different keys"
404 );
405
406 let other_project = PathBuf::from("/other/project");
408 let query_key4 = QueryTracker::create_query_key(&other_project, "test").unwrap();
409 assert_ne!(
410 query_key1, query_key4,
411 "Different projects should hash to different keys"
412 );
413 }
414}