Skip to main content

engine/
routing.rs

1//! Semantic Routing Engine for Dakera AI Agent Memory Platform.
2//!
3//! Agents query Dakera without knowing which namespace holds the answer.
4//! Dakera figures it out by comparing the query embedding against cached
5//! namespace centroids (averaged embeddings sampled from each namespace).
6//!
7//! The centroid cache is refreshed periodically in the background.
8
9use std::collections::HashMap;
10use std::sync::Arc;
11
12use parking_lot::RwLock;
13use storage::VectorStorage;
14
15use crate::distance::calculate_distance;
16use common::DistanceMetric;
17
18/// A route result: which namespace matched and how strongly.
19#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
20pub struct RouteMatch {
21    pub namespace: String,
22    pub similarity: f32,
23    pub memory_count: usize,
24}
25
26/// Configuration for the semantic router.
27pub struct SemanticRouterConfig {
28    /// Maximum number of memories to sample per namespace for centroid calculation
29    pub sample_size: usize,
30    /// How often to refresh centroids (seconds)
31    pub refresh_interval_secs: u64,
32}
33
34impl Default for SemanticRouterConfig {
35    fn default() -> Self {
36        Self {
37            sample_size: 20,
38            refresh_interval_secs: 1800, // 30 minutes
39        }
40    }
41}
42
43impl SemanticRouterConfig {
44    pub fn from_env() -> Self {
45        let sample_size: usize = std::env::var("DAKERA_ROUTE_SAMPLE_SIZE")
46            .ok()
47            .and_then(|v| v.parse().ok())
48            .unwrap_or(20);
49
50        let refresh_interval_secs: u64 = std::env::var("DAKERA_ROUTE_REFRESH_SECS")
51            .ok()
52            .and_then(|v| v.parse().ok())
53            .unwrap_or(1800);
54
55        Self {
56            sample_size,
57            refresh_interval_secs,
58        }
59    }
60}
61
62/// Cached centroid for a namespace: average embedding + vector count.
63#[derive(Clone)]
64struct CentroidEntry {
65    centroid: Vec<f32>,
66    count: usize,
67}
68
69/// Semantic router that maintains a centroid cache per namespace.
70pub struct SemanticRouter {
71    config: SemanticRouterConfig,
72    /// Namespace → averaged centroid embedding + count
73    cache: RwLock<HashMap<String, CentroidEntry>>,
74}
75
76impl SemanticRouter {
77    pub fn new(config: SemanticRouterConfig) -> Self {
78        Self {
79            config,
80            cache: RwLock::new(HashMap::new()),
81        }
82    }
83
84    /// Route a query embedding to the most relevant namespaces.
85    ///
86    /// Returns namespaces sorted by similarity (descending), filtered
87    /// by `min_similarity`.
88    pub fn route(&self, query: &[f32], top_k: usize, min_similarity: f32) -> Vec<RouteMatch> {
89        let cache = self.cache.read();
90        let mut matches: Vec<RouteMatch> = cache
91            .iter()
92            .filter_map(|(ns, entry)| {
93                if entry.centroid.len() != query.len() {
94                    return None; // dimension mismatch, skip
95                }
96                let sim = calculate_distance(query, &entry.centroid, DistanceMetric::Cosine);
97                if sim >= min_similarity {
98                    Some(RouteMatch {
99                        namespace: ns.clone(),
100                        similarity: sim,
101                        memory_count: entry.count,
102                    })
103                } else {
104                    None
105                }
106            })
107            .collect();
108
109        matches.sort_by(|a, b| {
110            b.similarity
111                .partial_cmp(&a.similarity)
112                .unwrap_or(std::cmp::Ordering::Equal)
113        });
114        matches.truncate(top_k);
115        matches
116    }
117
118    /// Refresh the centroid cache by sampling memories from each agent namespace.
119    ///
120    /// For each `_dakera_agent_*` namespace, sample up to `sample_size` vectors,
121    /// average their embeddings into a single centroid.
122    pub async fn refresh_centroids(&self, storage: &Arc<dyn VectorStorage>) {
123        let namespaces = match storage.list_namespaces().await {
124            Ok(ns) => ns,
125            Err(e) => {
126                tracing::warn!(error = %e, "Failed to list namespaces for centroid refresh");
127                return;
128            }
129        };
130
131        let mut new_cache: HashMap<String, CentroidEntry> = HashMap::new();
132
133        for namespace in &namespaces {
134            if !namespace.starts_with("_dakera_agent_") {
135                continue;
136            }
137
138            let vectors = match storage.get_all(namespace).await {
139                Ok(v) => v,
140                Err(_) => continue,
141            };
142
143            if vectors.is_empty() {
144                continue;
145            }
146
147            let count = vectors.len();
148
149            // Sample up to sample_size vectors (take first N — they're stored in insertion order)
150            let sample: Vec<&Vec<f32>> = vectors
151                .iter()
152                .filter(|v| !v.values.is_empty())
153                .take(self.config.sample_size)
154                .map(|v| &v.values)
155                .collect();
156
157            if sample.is_empty() {
158                continue;
159            }
160
161            // Compute centroid (average embedding)
162            let dim = sample[0].len();
163            let mut centroid = vec![0.0f32; dim];
164            let mut valid = 0usize;
165            for embedding in &sample {
166                if embedding.len() == dim {
167                    for (i, val) in embedding.iter().enumerate() {
168                        centroid[i] += val;
169                    }
170                    valid += 1;
171                }
172            }
173
174            if valid > 0 {
175                for val in &mut centroid {
176                    *val /= valid as f32;
177                }
178                // Normalize centroid for cosine comparison
179                let norm: f32 = centroid.iter().map(|x| x * x).sum::<f32>().sqrt();
180                if norm > 1e-8 {
181                    for val in &mut centroid {
182                        *val /= norm;
183                    }
184                }
185                new_cache.insert(namespace.clone(), CentroidEntry { centroid, count });
186            }
187        }
188
189        let refreshed_count = new_cache.len();
190        *self.cache.write() = new_cache;
191
192        tracing::info!(
193            namespaces_cached = refreshed_count,
194            "Semantic router centroid cache refreshed"
195        );
196    }
197
198    /// Spawn the centroid refresh as a background tokio task.
199    pub fn spawn_refresh(
200        router: Arc<SemanticRouter>,
201        storage: Arc<dyn VectorStorage>,
202    ) -> tokio::task::JoinHandle<()> {
203        let interval_secs = router.config.refresh_interval_secs;
204        tokio::spawn(async move {
205            // Initial refresh on startup (small delay to let storage warm up)
206            tokio::time::sleep(std::time::Duration::from_secs(5)).await;
207            router.refresh_centroids(&storage).await;
208
209            let mut interval = tokio::time::interval(std::time::Duration::from_secs(interval_secs));
210            loop {
211                interval.tick().await;
212                router.refresh_centroids(&storage).await;
213            }
214        })
215    }
216}
217
218// ============================================================================
219// CE-12a: Query Classifier for smart routing
220// ============================================================================
221
222/// Inferred query kind used for smart routing decisions.
223#[derive(Debug, Clone, Copy, PartialEq, Eq)]
224pub enum QueryKind {
225    /// Short / keyword-based query → prefer BM25 full-text search
226    Keyword,
227    /// Long / natural-language query → prefer vector similarity search
228    Semantic,
229    /// Mixed signal → hybrid (vector + BM25)
230    Hybrid,
231}
232
233/// Heuristic classifier that determines the best retrieval strategy for a
234/// free-text query without any model inference.
235pub struct QueryClassifier;
236
237impl QueryClassifier {
238    /// Classify a raw query string into a [`QueryKind`].
239    ///
240    /// Heuristics (in priority order):
241    /// 1. ≥ 8 words **or** sentence-marker present → [`QueryKind::Semantic`]
242    /// 2. ≤ 3 words and no sentence structure → [`QueryKind::Keyword`]
243    /// 3. Everything else → [`QueryKind::Hybrid`]
244    pub fn classify(query: &str) -> QueryKind {
245        let trimmed = query.trim();
246        let word_count = trimmed.split_whitespace().count();
247
248        let has_sentence_structure = trimmed.contains('?') || trimmed.contains('.') || {
249            let lower = trimmed.to_lowercase();
250            lower.starts_with("what ")
251                || lower.starts_with("how ")
252                || lower.starts_with("why ")
253                || lower.starts_with("when ")
254                || lower.starts_with("where ")
255                || lower.starts_with("who ")
256                || lower.starts_with("tell me")
257                || lower.starts_with("explain")
258                || lower.starts_with("describe")
259        };
260
261        if word_count >= 8 || has_sentence_structure {
262            QueryKind::Semantic
263        } else if word_count <= 3 && !has_sentence_structure {
264            QueryKind::Keyword
265        } else {
266            QueryKind::Hybrid
267        }
268    }
269}
270
271#[cfg(test)]
272mod tests {
273    use super::*;
274
275    #[test]
276    fn test_route_empty_cache() {
277        let router = SemanticRouter::new(SemanticRouterConfig::default());
278        let results = router.route(&[1.0, 0.0, 0.0], 3, 0.5);
279        assert!(results.is_empty());
280    }
281
282    #[test]
283    fn test_route_with_cached_centroids() {
284        let router = SemanticRouter::new(SemanticRouterConfig::default());
285
286        // Manually populate cache
287        {
288            let mut cache = router.cache.write();
289            cache.insert(
290                "_dakera_agent_dev".to_string(),
291                CentroidEntry {
292                    centroid: vec![1.0, 0.0, 0.0],
293                    count: 100,
294                },
295            );
296            cache.insert(
297                "_dakera_agent_ops".to_string(),
298                CentroidEntry {
299                    centroid: vec![0.0, 1.0, 0.0],
300                    count: 50,
301                },
302            );
303            cache.insert(
304                "_dakera_agent_sec".to_string(),
305                CentroidEntry {
306                    centroid: vec![0.707, 0.707, 0.0],
307                    count: 30,
308                },
309            );
310        }
311
312        // Query aligned with "dev" namespace
313        let results = router.route(&[1.0, 0.0, 0.0], 3, 0.0);
314        assert_eq!(results.len(), 3);
315        assert_eq!(results[0].namespace, "_dakera_agent_dev");
316        assert!(results[0].similarity > results[1].similarity);
317    }
318
319    #[test]
320    fn test_route_min_similarity_filter() {
321        let router = SemanticRouter::new(SemanticRouterConfig::default());
322
323        {
324            let mut cache = router.cache.write();
325            cache.insert(
326                "_dakera_agent_a".to_string(),
327                CentroidEntry {
328                    centroid: vec![1.0, 0.0, 0.0],
329                    count: 10,
330                },
331            );
332            cache.insert(
333                "_dakera_agent_b".to_string(),
334                CentroidEntry {
335                    centroid: vec![0.0, 1.0, 0.0],
336                    count: 10,
337                },
338            );
339        }
340
341        // High min_similarity should filter out the orthogonal namespace
342        let results = router.route(&[1.0, 0.0, 0.0], 5, 0.9);
343        assert_eq!(results.len(), 1);
344        assert_eq!(results[0].namespace, "_dakera_agent_a");
345    }
346
347    #[test]
348    fn test_route_top_k_truncation() {
349        let router = SemanticRouter::new(SemanticRouterConfig::default());
350
351        {
352            let mut cache = router.cache.write();
353            for i in 0..10 {
354                let mut centroid = vec![0.0f32; 3];
355                centroid[0] = 1.0 - (i as f32 * 0.05);
356                centroid[1] = i as f32 * 0.05;
357                let norm = (centroid[0] * centroid[0] + centroid[1] * centroid[1]).sqrt();
358                centroid[0] /= norm;
359                centroid[1] /= norm;
360                cache.insert(
361                    format!("_dakera_agent_{}", i),
362                    CentroidEntry {
363                        centroid,
364                        count: 10,
365                    },
366                );
367            }
368        }
369
370        let results = router.route(&[1.0, 0.0, 0.0], 3, 0.0);
371        assert_eq!(results.len(), 3);
372    }
373
374    #[test]
375    fn test_route_dimension_mismatch_skipped() {
376        let router = SemanticRouter::new(SemanticRouterConfig::default());
377
378        {
379            let mut cache = router.cache.write();
380            cache.insert(
381                "_dakera_agent_3d".to_string(),
382                CentroidEntry {
383                    centroid: vec![1.0, 0.0, 0.0],
384                    count: 10,
385                },
386            );
387            cache.insert(
388                "_dakera_agent_5d".to_string(),
389                CentroidEntry {
390                    centroid: vec![1.0, 0.0, 0.0, 0.0, 0.0],
391                    count: 10,
392                },
393            );
394        }
395
396        // Query is 3D, should only match the 3D centroid
397        let results = router.route(&[1.0, 0.0, 0.0], 5, 0.0);
398        assert_eq!(results.len(), 1);
399        assert_eq!(results[0].namespace, "_dakera_agent_3d");
400    }
401
402    #[test]
403    fn test_config_defaults() {
404        let config = SemanticRouterConfig::default();
405        assert_eq!(config.sample_size, 20);
406        assert_eq!(config.refresh_interval_secs, 1800);
407    }
408
409    // --- QueryClassifier tests ---
410
411    #[test]
412    fn test_classify_keyword_short() {
413        assert_eq!(QueryClassifier::classify("rust async"), QueryKind::Keyword);
414        assert_eq!(QueryClassifier::classify("HNSW"), QueryKind::Keyword);
415        assert_eq!(
416            QueryClassifier::classify("memory importance"),
417            QueryKind::Keyword
418        );
419    }
420
421    #[test]
422    fn test_classify_semantic_long() {
423        assert_eq!(
424            QueryClassifier::classify(
425                "what is the best way to store long term memories in an AI system"
426            ),
427            QueryKind::Semantic
428        );
429        assert_eq!(
430            QueryClassifier::classify("tell me about the agent memory architecture"),
431            QueryKind::Semantic
432        );
433    }
434
435    #[test]
436    fn test_classify_semantic_question_mark() {
437        assert_eq!(
438            QueryClassifier::classify("how does HNSW work?"),
439            QueryKind::Semantic
440        );
441    }
442
443    #[test]
444    fn test_classify_hybrid_middle() {
445        assert_eq!(
446            QueryClassifier::classify("vector search memory agent"),
447            QueryKind::Hybrid
448        );
449    }
450}