agtrace_runtime/ops/
index.rs

1use crate::{Error, Result};
2use agtrace_index::{Database, LogFileRecord, ProjectRecord, SessionRecord};
3use agtrace_providers::ProviderAdapter;
4use std::collections::HashSet;
5use std::path::PathBuf;
6
7#[derive(Debug, Clone)]
8pub enum IndexProgress {
9    IncrementalHint {
10        indexed_files: usize,
11    },
12    LogRootMissing {
13        provider_name: String,
14        log_root: PathBuf,
15    },
16    ProviderScanning {
17        provider_name: String,
18    },
19    ProviderSessionCount {
20        provider_name: String,
21        count: usize,
22        project_hash: String,
23        all_projects: bool,
24    },
25    SessionRegistered {
26        session_id: String,
27    },
28    Completed {
29        total_sessions: usize,
30        scanned_files: usize,
31        skipped_files: usize,
32    },
33}
34
35pub struct IndexService<'a> {
36    db: &'a Database,
37    providers: Vec<(ProviderAdapter, PathBuf)>,
38}
39
40impl<'a> IndexService<'a> {
41    pub fn new(db: &'a Database, providers: Vec<(ProviderAdapter, PathBuf)>) -> Self {
42        Self { db, providers }
43    }
44
45    pub fn run<F>(
46        &self,
47        scope: agtrace_types::ProjectScope,
48        force: bool,
49        mut on_progress: F,
50    ) -> Result<()>
51    where
52        F: FnMut(IndexProgress),
53    {
54        let indexed_files = if force {
55            HashSet::new()
56        } else {
57            self.db
58                .get_all_log_files()?
59                .into_iter()
60                .filter_map(|f| {
61                    if should_skip_indexed_file(&f) {
62                        Some(f.path)
63                    } else {
64                        None
65                    }
66                })
67                .collect::<HashSet<_>>()
68        };
69
70        if !force {
71            on_progress(IndexProgress::IncrementalHint {
72                indexed_files: indexed_files.len(),
73            });
74        }
75
76        let mut total_sessions = 0;
77        let mut scanned_files = 0;
78        let mut skipped_files = 0;
79
80        for (provider, log_root) in &self.providers {
81            let provider_name = provider.id();
82
83            if !log_root.exists() {
84                on_progress(IndexProgress::LogRootMissing {
85                    provider_name: provider_name.to_string(),
86                    log_root: log_root.clone(),
87                });
88                continue;
89            }
90
91            on_progress(IndexProgress::ProviderScanning {
92                provider_name: provider_name.to_string(),
93            });
94
95            let sessions = provider
96                .discovery
97                .scan_sessions(log_root)
98                .map_err(Error::Provider)?;
99
100            // Filter sessions by project_hash if specified
101            let filtered_sessions: Vec<_> = sessions
102                .into_iter()
103                .filter(|session| {
104                    if let Some(expected_hash) = scope.hash() {
105                        if let Some(session_root) = &session.project_root {
106                            let session_hash = agtrace_core::project_hash_from_root(&session_root.to_string_lossy());
107                            &session_hash == expected_hash
108                        } else {
109                            // Gemini sessions might not have project_root, compute hash from file
110                            if provider_name == "gemini" {
111                                use agtrace_providers::gemini::io::extract_project_hash_from_gemini_file;
112                                if let Some(session_hash) = extract_project_hash_from_gemini_file(&session.main_file) {
113                                    &session_hash == expected_hash
114                                } else {
115                                    false
116                                }
117                            } else {
118                                false
119                            }
120                        }
121                    } else {
122                        true
123                    }
124                })
125                .collect();
126
127            on_progress(IndexProgress::ProviderSessionCount {
128                provider_name: provider_name.to_string(),
129                count: filtered_sessions.len(),
130                project_hash: match &scope {
131                    agtrace_types::ProjectScope::All => "<all>".to_string(),
132                    agtrace_types::ProjectScope::Specific(hash) => hash.to_string(),
133                },
134                all_projects: matches!(scope, agtrace_types::ProjectScope::All),
135            });
136
137            for session in filtered_sessions {
138                // Collect all file paths for this session
139                let mut all_files = vec![session.main_file.display().to_string()];
140                for side_file in &session.sidechain_files {
141                    all_files.push(side_file.display().to_string());
142                }
143
144                let all_files_unchanged =
145                    !force && all_files.iter().all(|f| indexed_files.contains(f));
146
147                if all_files_unchanged {
148                    skipped_files += all_files.len();
149                    continue;
150                }
151
152                on_progress(IndexProgress::SessionRegistered {
153                    session_id: session.session_id.clone(),
154                });
155
156                // Calculate project_hash from session data
157                let session_project_hash = if let Some(ref root) = session.project_root {
158                    agtrace_core::project_hash_from_root(&root.to_string_lossy())
159                } else if provider_name == "gemini" {
160                    // For Gemini, extract project_hash directly from the file
161                    use agtrace_providers::gemini::io::extract_project_hash_from_gemini_file;
162                    extract_project_hash_from_gemini_file(&session.main_file).unwrap_or_else(|| {
163                        agtrace_core::project_hash_from_log_path(&session.main_file)
164                    })
165                } else {
166                    // Generate unique hash from log path for orphaned sessions
167                    agtrace_core::project_hash_from_log_path(&session.main_file)
168                };
169
170                let project_record = ProjectRecord {
171                    hash: session_project_hash.clone(),
172                    root_path: session
173                        .project_root
174                        .as_ref()
175                        .map(|p| p.to_string_lossy().to_string()),
176                    last_scanned_at: Some(chrono::Utc::now().to_rfc3339()),
177                };
178                self.db.insert_or_update_project(&project_record)?;
179
180                let session_record = SessionRecord {
181                    id: session.session_id.clone(),
182                    project_hash: session_project_hash,
183                    provider: provider_name.to_string(),
184                    start_ts: session.timestamp.clone(),
185                    end_ts: None,
186                    snippet: session.snippet.clone(),
187                    is_valid: true,
188                };
189                self.db.insert_or_update_session(&session_record)?;
190
191                // Register main file
192                let to_log_file_record = |path: &PathBuf, role: &str| -> Result<LogFileRecord> {
193                    let meta = std::fs::metadata(path).ok();
194                    Ok(LogFileRecord {
195                        path: path.display().to_string(),
196                        session_id: session.session_id.clone(),
197                        role: role.to_string(),
198                        file_size: meta.as_ref().map(|m| m.len() as i64),
199                        mod_time: meta
200                            .and_then(|m| m.modified().ok())
201                            .map(|t| format!("{:?}", t)),
202                    })
203                };
204
205                scanned_files += 1;
206                let main_log_file = to_log_file_record(&session.main_file, "main")?;
207                self.db.insert_or_update_log_file(&main_log_file)?;
208
209                // Register sidechain files
210                for side_file in &session.sidechain_files {
211                    scanned_files += 1;
212                    let side_log_file = to_log_file_record(side_file, "sidechain")?;
213                    self.db.insert_or_update_log_file(&side_log_file)?;
214                }
215
216                total_sessions += 1;
217            }
218        }
219
220        on_progress(IndexProgress::Completed {
221            total_sessions,
222            scanned_files,
223            skipped_files,
224        });
225
226        Ok(())
227    }
228}
229
230fn should_skip_indexed_file(indexed: &LogFileRecord) -> bool {
231    use std::path::Path;
232
233    let path = Path::new(&indexed.path);
234
235    if !path.exists() {
236        return false;
237    }
238
239    let metadata = match std::fs::metadata(path) {
240        Ok(m) => m,
241        Err(_) => return false,
242    };
243
244    if let Some(db_size) = indexed.file_size {
245        if db_size != metadata.len() as i64 {
246            return false;
247        }
248    } else {
249        return false;
250    }
251
252    if let Some(db_mod_time) = &indexed.mod_time {
253        if let Ok(fs_mod_time) = metadata.modified() {
254            let fs_mod_time_str = format!("{:?}", fs_mod_time);
255            if db_mod_time != &fs_mod_time_str {
256                return false;
257            }
258        } else {
259            return false;
260        }
261    } else {
262        return false;
263    }
264
265    true
266}