1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
//! Temporal signal integration for recency and churn scores.
//!
//! This module retrieves recency and churn scores from the chunks table
//! and combines them into a unified signal score.
use crate::db::traits::StoreGraph;
use crate::db::SqliteStore;
use crate::search::executor_types::{RankedResult, RankedResults, SearchSource};
use tracing::{debug, instrument};
/// Signal weights for combining recency and churn.
#[derive(Debug, Clone, Copy)]
pub struct SignalWeights {
/// Weight for recency score (default: 0.3)
pub recency: f32,
/// Weight for churn score (default: 0.2)
pub churn: f32,
}
impl Default for SignalWeights {
fn default() -> Self {
Self {
recency: 0.3,
churn: 0.2,
}
}
}
/// Temporal signal executor.
///
/// Retrieves and combines recency_score and churn_score from chunks table.
/// Returns all chunks with signal scores (no limit) for flexible fusion.
pub struct SignalExecutor;
impl SignalExecutor {
/// Execute signal query with default weights.
///
/// # Parameters
/// - `client`: Database client
/// - `repo_id`: Repository ID to filter results
/// - `worktree_id`: Optional worktree ID for additional filtering
///
/// # Returns
/// RankedResults with combined signal scores (0.0-1.0 range)
#[instrument(skip(store))]
pub async fn execute(
store: &SqliteStore,
repo_id: i64,
worktree_id: Option<i64>,
) -> Result<RankedResults, SignalError> {
Self::execute_with_weights(store, repo_id, worktree_id, SignalWeights::default()).await
}
/// Execute signal query with custom weights.
///
/// # Parameters
/// - `client`: Database client
/// - `repo_id`: Repository ID to filter results
/// - `worktree_id`: Optional worktree ID for additional filtering
/// - `weights`: Custom weights for recency and churn
///
/// # SQL Query
/// ```sql
/// SELECT
/// c.id,
/// c.recency_score,
/// c.churn_score,
/// (c.recency_score * $3 + c.churn_score * $4) as combined_signal
/// FROM maproom.chunks c
/// JOIN maproom.files f ON f.id = c.file_id
/// WHERE f.repo_id = $1
/// AND ($2::bigint IS NULL OR f.worktree_id = $2)
/// ORDER BY combined_signal DESC;
/// ```
#[instrument(skip(store))]
pub async fn execute_with_weights(
store: &SqliteStore,
repo_id: i64,
worktree_id: Option<i64>,
weights: SignalWeights,
) -> Result<RankedResults, SignalError> {
debug!(
"Executing signal query (recency: {}, churn: {})",
weights.recency, weights.churn
);
// Get a reasonable limit for signal scores (signals don't have natural limit like search)
let limit = 1000;
// Delegate to SqliteStore's signal score calculation
let hits = store
.calculate_signal_scores(repo_id, worktree_id, weights.recency, weights.churn, limit)
.await
.map_err(|e| SignalError::Database(e.to_string()))?;
// Convert SearchHit to RankedResult
let results: Vec<RankedResult> = hits
.into_iter()
.enumerate()
.map(|(i, hit)| RankedResult::new(hit.chunk_id, hit.score as f32, i + 1))
.collect();
debug!("Signal search returned {} results", results.len());
Ok(RankedResults::new(results, SearchSource::Signals))
}
/// Execute signal query for specific chunk IDs.
///
/// This variant calculates signal scores only for a given set of chunks,
/// useful when combining with other search results.
#[instrument(skip(store, chunk_ids), fields(chunk_count = chunk_ids.len()))]
pub async fn execute_for_chunks(
store: &SqliteStore,
chunk_ids: &[i64],
repo_id: i64,
worktree_id: Option<i64>,
weights: SignalWeights,
) -> Result<RankedResults, SignalError> {
if chunk_ids.is_empty() {
return Ok(RankedResults::empty(SearchSource::Signals));
}
debug!(
"Executing signal query for {} specific chunks",
chunk_ids.len()
);
// Delegate to SqliteStore's signal score calculation for specific chunks
let hits = store
.calculate_signal_scores_for_chunks(
chunk_ids,
repo_id,
worktree_id,
weights.recency,
weights.churn,
)
.await
.map_err(|e| SignalError::Database(e.to_string()))?;
// Convert SearchHit to RankedResult
let results: Vec<RankedResult> = hits
.into_iter()
.enumerate()
.map(|(i, hit)| RankedResult::new(hit.chunk_id, hit.score as f32, i + 1))
.collect();
debug!(
"Signal search for chunks returned {} results",
results.len()
);
Ok(RankedResults::new(results, SearchSource::Signals))
}
}
/// Errors that can occur during signal query execution.
#[derive(Debug, thiserror::Error)]
pub enum SignalError {
/// Database query error
#[error("Database error: {0}")]
Database(String),
/// Invalid signal weights
#[error("Invalid signal weights: {0}")]
InvalidWeights(String),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_signal_weights_default() {
let weights = SignalWeights::default();
assert_eq!(weights.recency, 0.3);
assert_eq!(weights.churn, 0.2);
}
#[test]
fn test_signal_executor_exists() {
// Verify the executor type exists
let _executor = SignalExecutor;
}
// Note: Full integration tests with real database are in tests/search/executors_test.rs
}