Skip to main content

zeph_memory/semantic/
recall.rs

1// SPDX-FileCopyrightText: 2026 Andrei G <bug-ops>
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4use std::sync::Arc;
5use std::sync::atomic::{AtomicU64, Ordering};
6
7use futures::{StreamExt as _, TryStreamExt as _};
8use zeph_llm::provider::{LlmProvider as _, Message};
9
10/// Approximate characters per token (conservative estimate for mixed content).
11const CHARS_PER_TOKEN: usize = 4;
12
13/// Target chunk size in characters (~400 tokens).
14const CHUNK_CHARS: usize = 400 * CHARS_PER_TOKEN;
15
16/// Overlap between adjacent chunks in characters (~80 tokens).
17const CHUNK_OVERLAP_CHARS: usize = 80 * CHARS_PER_TOKEN;
18
19/// Split `text` into overlapping chunks suitable for embedding.
20///
21/// For text shorter than `CHUNK_CHARS`, returns a single chunk.
22/// Splits at UTF-8 character boundaries on paragraph (`\n\n`), line (`\n`),
23/// space (` `), or raw character boundaries as a last resort.
24fn chunk_text(text: &str) -> Vec<&str> {
25    if text.len() <= CHUNK_CHARS {
26        return vec![text];
27    }
28
29    let mut chunks = Vec::new();
30    let mut start = 0;
31
32    while start < text.len() {
33        let end = if start + CHUNK_CHARS >= text.len() {
34            text.len()
35        } else {
36            // Find a clean UTF-8 char boundary at or before start + CHUNK_CHARS.
37            let boundary = text.floor_char_boundary(start + CHUNK_CHARS);
38            // Prefer to split at a paragraph or line break for cleaner chunks.
39            let slice = &text[start..boundary];
40            if let Some(pos) = slice.rfind("\n\n") {
41                start + pos + 2
42            } else if let Some(pos) = slice.rfind('\n') {
43                start + pos + 1
44            } else if let Some(pos) = slice.rfind(' ') {
45                start + pos + 1
46            } else {
47                boundary
48            }
49        };
50
51        chunks.push(&text[start..end]);
52        if end >= text.len() {
53            break;
54        }
55        // Next chunk starts with overlap, but must always advance past the
56        // current position to prevent infinite loops when rfind finds a match
57        // very early in the slice (end barely advances, overlap rewinds start).
58        let next = end.saturating_sub(CHUNK_OVERLAP_CHARS);
59        let new_start = text.ceil_char_boundary(next);
60        start = if new_start > start { new_start } else { end };
61    }
62
63    chunks
64}
65
66use crate::admission::log_admission_decision;
67use crate::embedding_store::{MessageKind, SearchFilter};
68use crate::error::MemoryError;
69use crate::types::{ConversationId, MessageId};
70
71use super::SemanticMemory;
72use super::algorithms::{apply_mmr, apply_temporal_decay};
73
74/// Tool execution metadata stored as Qdrant payload fields alongside embeddings.
75///
76/// Stored as payload — NOT prepended to content — to avoid corrupting embedding vectors.
77#[derive(Debug, Clone, Default)]
78pub struct EmbedContext {
79    pub tool_name: Option<String>,
80    pub exit_code: Option<i32>,
81    pub timestamp: Option<String>,
82}
83
84#[derive(Debug)]
85pub struct RecalledMessage {
86    pub message: Message,
87    pub score: f32,
88}
89
90/// Maximum number of concurrent background embed tasks per `SemanticMemory` instance.
91const MAX_EMBED_BG_TASKS: usize = 64;
92
93/// Shared arguments for background embed tasks.
94struct EmbedBgArgs {
95    qdrant: Arc<crate::embedding_store::EmbeddingStore>,
96    embed_provider: zeph_llm::any::AnyProvider,
97    embedding_model: String,
98    message_id: MessageId,
99    conversation_id: ConversationId,
100    role: String,
101    content: String,
102    last_qdrant_warn: Arc<AtomicU64>,
103}
104
105/// Background task: embed chunks and store as regular message vectors.
106///
107/// All errors are logged as warnings; the function never panics.
108async fn embed_and_store_regular_bg(args: EmbedBgArgs) {
109    let EmbedBgArgs {
110        qdrant,
111        embed_provider,
112        embedding_model,
113        message_id,
114        conversation_id,
115        role,
116        content,
117        last_qdrant_warn,
118    } = args;
119    let chunks = chunk_text(&content);
120    let chunk_count = chunks.len();
121
122    let vectors = match embed_provider.embed_batch(&chunks).await {
123        Ok(v) => v,
124        Err(e) => {
125            tracing::warn!("bg embed_regular: failed to embed chunks for msg {message_id}: {e:#}");
126            return;
127        }
128    };
129
130    let Some(first) = vectors.first() else {
131        return;
132    };
133    let vector_size = first.len() as u64;
134    if let Err(e) = qdrant.ensure_collection(vector_size).await {
135        let now = std::time::SystemTime::now()
136            .duration_since(std::time::UNIX_EPOCH)
137            .unwrap_or_default()
138            .as_secs();
139        let last = last_qdrant_warn.load(Ordering::Relaxed);
140        if now.saturating_sub(last) >= 10 {
141            last_qdrant_warn.store(now, Ordering::Relaxed);
142            tracing::warn!("bg embed_regular: failed to ensure Qdrant collection: {e:#}");
143        } else {
144            tracing::debug!(
145                "bg embed_regular: failed to ensure Qdrant collection (suppressed): {e:#}"
146            );
147        }
148        return;
149    }
150
151    for (chunk_index, vector) in vectors.into_iter().enumerate() {
152        let chunk_index_u32 = u32::try_from(chunk_index).unwrap_or(u32::MAX);
153        if let Err(e) = qdrant
154            .store(
155                message_id,
156                conversation_id,
157                &role,
158                vector,
159                MessageKind::Regular,
160                &embedding_model,
161                chunk_index_u32,
162            )
163            .await
164        {
165            tracing::warn!(
166                "bg embed_regular: failed to store chunk {chunk_index}/{chunk_count} \
167                 for msg {message_id}: {e:#}"
168            );
169        }
170    }
171}
172
173/// Background task: embed chunks with tool context metadata and store in Qdrant.
174///
175/// All errors are logged as warnings; the function never panics.
176async fn embed_chunks_with_tool_context_bg(args: EmbedBgArgs, embed_ctx: EmbedContext) {
177    let EmbedBgArgs {
178        qdrant,
179        embed_provider,
180        embedding_model,
181        message_id,
182        conversation_id,
183        role,
184        content,
185        last_qdrant_warn,
186    } = args;
187    let chunks = chunk_text(&content);
188    let chunk_count = chunks.len();
189
190    let vectors = match embed_provider.embed_batch(&chunks).await {
191        Ok(v) => v,
192        Err(e) => {
193            tracing::warn!(
194                "bg embed_tool: failed to embed tool-output chunks for msg {message_id}: {e:#}"
195            );
196            return;
197        }
198    };
199
200    if let Some(first) = vectors.first() {
201        let vector_size = first.len() as u64;
202        if let Err(e) = qdrant.ensure_collection(vector_size).await {
203            let now = std::time::SystemTime::now()
204                .duration_since(std::time::UNIX_EPOCH)
205                .unwrap_or_default()
206                .as_secs();
207            let last = last_qdrant_warn.load(Ordering::Relaxed);
208            if now.saturating_sub(last) >= 10 {
209                last_qdrant_warn.store(now, Ordering::Relaxed);
210                tracing::warn!("bg embed_tool: failed to ensure Qdrant collection: {e:#}");
211            } else {
212                tracing::debug!(
213                    "bg embed_tool: failed to ensure Qdrant collection (suppressed): {e:#}"
214                );
215            }
216            return;
217        }
218    }
219
220    for (chunk_index, vector) in vectors.into_iter().enumerate() {
221        let chunk_index_u32 = u32::try_from(chunk_index).unwrap_or(u32::MAX);
222        let result = if let Some(ref tool_name) = embed_ctx.tool_name {
223            qdrant
224                .store_with_tool_context(
225                    message_id,
226                    conversation_id,
227                    &role,
228                    vector,
229                    MessageKind::Regular,
230                    &embedding_model,
231                    chunk_index_u32,
232                    tool_name,
233                    embed_ctx.exit_code,
234                    embed_ctx.timestamp.as_deref(),
235                )
236                .await
237                .map(|_| ())
238        } else {
239            qdrant
240                .store(
241                    message_id,
242                    conversation_id,
243                    &role,
244                    vector,
245                    MessageKind::Regular,
246                    &embedding_model,
247                    chunk_index_u32,
248                )
249                .await
250                .map(|_| ())
251        };
252        if let Err(e) = result {
253            tracing::warn!(
254                "bg embed_tool: failed to store chunk {chunk_index}/{chunk_count} \
255                 for msg {message_id}: {e:#}"
256            );
257        }
258    }
259}
260
261/// Background task: embed chunks with optional category and store in Qdrant.
262///
263/// All errors are logged as warnings; the function never panics.
264async fn embed_and_store_with_category_bg(args: EmbedBgArgs, category: Option<String>) {
265    let EmbedBgArgs {
266        qdrant,
267        embed_provider,
268        embedding_model,
269        message_id,
270        conversation_id,
271        role,
272        content,
273        last_qdrant_warn,
274    } = args;
275    let chunks = chunk_text(&content);
276    let chunk_count = chunks.len();
277
278    let vectors = match embed_provider.embed_batch(&chunks).await {
279        Ok(v) => v,
280        Err(e) => {
281            tracing::warn!(
282                "bg embed_category: failed to embed categorized chunks for msg {message_id}: {e:#}"
283            );
284            return;
285        }
286    };
287
288    let Some(first) = vectors.first() else {
289        return;
290    };
291    let vector_size = first.len() as u64;
292    if let Err(e) = qdrant.ensure_collection(vector_size).await {
293        let now = std::time::SystemTime::now()
294            .duration_since(std::time::UNIX_EPOCH)
295            .unwrap_or_default()
296            .as_secs();
297        let last = last_qdrant_warn.load(Ordering::Relaxed);
298        if now.saturating_sub(last) >= 10 {
299            last_qdrant_warn.store(now, Ordering::Relaxed);
300            tracing::warn!("bg embed_category: failed to ensure Qdrant collection: {e:#}");
301        } else {
302            tracing::debug!(
303                "bg embed_category: failed to ensure Qdrant collection (suppressed): {e:#}"
304            );
305        }
306        return;
307    }
308
309    for (chunk_index, vector) in vectors.into_iter().enumerate() {
310        let chunk_index_u32 = u32::try_from(chunk_index).unwrap_or(u32::MAX);
311        if let Err(e) = qdrant
312            .store_with_category(
313                message_id,
314                conversation_id,
315                &role,
316                vector,
317                MessageKind::Regular,
318                &embedding_model,
319                chunk_index_u32,
320                category.as_deref(),
321            )
322            .await
323        {
324            tracing::warn!(
325                "bg embed_category: failed to store chunk {chunk_index}/{chunk_count} \
326                 for msg {message_id}: {e:#}"
327            );
328        }
329    }
330}
331
332impl SemanticMemory {
333    /// Save a message to `SQLite` and optionally embed and store in Qdrant.
334    ///
335    /// Returns `Ok(Some(message_id))` when admitted and persisted.
336    /// Returns `Ok(None)` when A-MAC admission control rejects the message (not an error).
337    ///
338    /// # Errors
339    ///
340    /// Returns an error if the `SQLite` save fails. Embedding failures are logged but not
341    /// propagated.
342    #[cfg_attr(
343        feature = "profiling",
344        tracing::instrument(name = "memory.remember", skip_all, fields(content_len = %content.len()))
345    )]
346    pub async fn remember(
347        &self,
348        conversation_id: ConversationId,
349        role: &str,
350        content: &str,
351        goal_text: Option<&str>,
352    ) -> Result<Option<MessageId>, MemoryError> {
353        // A-MAC admission gate.
354        if let Some(ref admission) = self.admission_control {
355            let decision = admission
356                .evaluate(
357                    content,
358                    role,
359                    self.effective_embed_provider(),
360                    self.qdrant.as_ref(),
361                    goal_text,
362                )
363                .await;
364            let preview: String = content.chars().take(100).collect();
365            log_admission_decision(&decision, &preview, role, admission.threshold());
366            if !decision.admitted {
367                return Ok(None);
368            }
369        }
370
371        if let Some(gate) = &self.quality_gate
372            && gate
373                .evaluate(content, self.effective_embed_provider(), &[])
374                .await
375                .is_some()
376        {
377            return Ok(None);
378        }
379
380        let message_id = self
381            .sqlite
382            .save_message(conversation_id, role, content)
383            .await?;
384
385        self.embed_and_store_regular(message_id, conversation_id, role, content);
386
387        Ok(Some(message_id))
388    }
389
390    /// Save a message with pre-serialized parts JSON to `SQLite` and optionally embed in Qdrant.
391    ///
392    /// Returns `Ok((Some(message_id), embedding_stored))` when admitted and persisted.
393    /// Returns `Ok((None, false))` when A-MAC admission control rejects the message.
394    ///
395    /// # Errors
396    ///
397    /// Returns an error if the `SQLite` save fails.
398    #[cfg_attr(
399        feature = "profiling",
400        tracing::instrument(name = "memory.remember", skip_all, fields(content_len = %content.len()))
401    )]
402    pub async fn remember_with_parts(
403        &self,
404        conversation_id: ConversationId,
405        role: &str,
406        content: &str,
407        parts_json: &str,
408        goal_text: Option<&str>,
409    ) -> Result<(Option<MessageId>, bool), MemoryError> {
410        // A-MAC admission gate.
411        if let Some(ref admission) = self.admission_control {
412            let decision = admission
413                .evaluate(
414                    content,
415                    role,
416                    self.effective_embed_provider(),
417                    self.qdrant.as_ref(),
418                    goal_text,
419                )
420                .await;
421            let preview: String = content.chars().take(100).collect();
422            log_admission_decision(&decision, &preview, role, admission.threshold());
423            if !decision.admitted {
424                return Ok((None, false));
425            }
426        }
427
428        if let Some(gate) = &self.quality_gate
429            && gate
430                .evaluate(content, self.effective_embed_provider(), &[])
431                .await
432                .is_some()
433        {
434            return Ok((None, false));
435        }
436
437        let message_id = self
438            .sqlite
439            .save_message_with_parts(conversation_id, role, content, parts_json)
440            .await?;
441
442        let embedding_stored =
443            self.embed_and_store_regular(message_id, conversation_id, role, content);
444
445        Ok((Some(message_id), embedding_stored))
446    }
447
448    /// Save a tool output to `SQLite` and embed with tool metadata in Qdrant payload.
449    ///
450    /// Tool metadata (`tool_name`, `exit_code`, `timestamp`) is stored as Qdrant payload fields
451    /// so it is available for filtering without corrupting the embedding vector.
452    ///
453    /// Returns `Ok(Some(message_id))` when admitted and persisted.
454    /// Returns `Ok(None)` when A-MAC admission control rejects the message.
455    ///
456    /// # Errors
457    ///
458    /// Returns an error if the `SQLite` save fails.
459    #[cfg_attr(
460        feature = "profiling",
461        tracing::instrument(name = "memory.remember", skip_all, fields(content_len = %content.len()))
462    )]
463    pub async fn remember_tool_output(
464        &self,
465        conversation_id: ConversationId,
466        role: &str,
467        content: &str,
468        parts_json: &str,
469        embed_ctx: EmbedContext,
470    ) -> Result<(Option<MessageId>, bool), MemoryError> {
471        if let Some(ref admission) = self.admission_control {
472            let decision = admission
473                .evaluate(
474                    content,
475                    role,
476                    self.effective_embed_provider(),
477                    self.qdrant.as_ref(),
478                    None,
479                )
480                .await;
481            let preview: String = content.chars().take(100).collect();
482            log_admission_decision(&decision, &preview, role, admission.threshold());
483            if !decision.admitted {
484                return Ok((None, false));
485            }
486        }
487
488        let message_id = self
489            .sqlite
490            .save_message_with_parts(conversation_id, role, content, parts_json)
491            .await?;
492
493        let embedding_stored = self.embed_chunks_with_tool_context(
494            message_id,
495            conversation_id,
496            role,
497            content,
498            embed_ctx,
499        );
500
501        Ok((Some(message_id), embedding_stored))
502    }
503
504    /// Save a categorized message to `SQLite` and embed with category payload in Qdrant.
505    ///
506    /// The `category` is stored in both the `messages.category` column and as a Qdrant payload
507    /// field for recall filtering. Uses A-MAC admission gate.
508    ///
509    /// Returns `Ok(Some(message_id))` when admitted; `Ok(None)` when rejected.
510    ///
511    /// # Errors
512    ///
513    /// Returns an error if the `SQLite` save fails.
514    #[cfg_attr(
515        feature = "profiling",
516        tracing::instrument(name = "memory.remember", skip_all, fields(content_len = %content.len()))
517    )]
518    pub async fn remember_categorized(
519        &self,
520        conversation_id: ConversationId,
521        role: &str,
522        content: &str,
523        category: Option<&str>,
524        goal_text: Option<&str>,
525    ) -> Result<Option<MessageId>, MemoryError> {
526        if let Some(ref admission) = self.admission_control {
527            let decision = admission
528                .evaluate(
529                    content,
530                    role,
531                    self.effective_embed_provider(),
532                    self.qdrant.as_ref(),
533                    goal_text,
534                )
535                .await;
536            let preview: String = content.chars().take(100).collect();
537            log_admission_decision(&decision, &preview, role, admission.threshold());
538            if !decision.admitted {
539                return Ok(None);
540            }
541        }
542
543        let message_id = self
544            .sqlite
545            .save_message_with_category(conversation_id, role, content, category)
546            .await?;
547
548        self.embed_and_store_with_category(message_id, conversation_id, role, content, category);
549
550        Ok(Some(message_id))
551    }
552
553    /// Recall messages filtered by category.
554    ///
555    /// When `category` is `None`, behaves identically to [`Self::recall`].
556    ///
557    /// # Errors
558    ///
559    /// Returns an error if the search fails.
560    pub async fn recall_with_category(
561        &self,
562        query: &str,
563        limit: usize,
564        filter: Option<SearchFilter>,
565        category: Option<&str>,
566    ) -> Result<Vec<RecalledMessage>, MemoryError> {
567        let filter_with_category = filter.map(|mut f| {
568            f.category = category.map(str::to_owned);
569            f
570        });
571        self.recall(query, limit, filter_with_category).await
572    }
573
574    /// Reap completed background embed tasks (non-blocking).
575    ///
576    /// Call at turn boundaries to release handles for finished tasks.
577    pub fn reap_embed_tasks(&self) {
578        if let Ok(mut tasks) = self.embed_tasks.lock() {
579            while tasks.try_join_next().is_some() {}
580        }
581    }
582
583    /// Spawn `fut` as a bounded background embed task.
584    ///
585    /// If the task limit is reached, the task is dropped and a debug message is logged.
586    fn spawn_embed_bg<F>(&self, fut: F) -> bool
587    where
588        F: std::future::Future<Output = ()> + Send + 'static,
589    {
590        let Ok(mut tasks) = self.embed_tasks.lock() else {
591            return false;
592        };
593        // Reap any finished tasks before checking capacity.
594        while tasks.try_join_next().is_some() {}
595        if tasks.len() >= MAX_EMBED_BG_TASKS {
596            tracing::debug!("background embed task limit reached, skipping");
597            return false;
598        }
599        tasks.spawn(fut);
600        true
601    }
602
603    /// Embed content chunks and store each with an optional category payload field.
604    ///
605    /// Spawns a bounded background task; returns immediately.
606    fn embed_and_store_with_category(
607        &self,
608        message_id: MessageId,
609        conversation_id: ConversationId,
610        role: &str,
611        content: &str,
612        category: Option<&str>,
613    ) -> bool {
614        let Some(qdrant) = self.qdrant.clone() else {
615            return false;
616        };
617        let embed_provider = self.effective_embed_provider().clone();
618        if !embed_provider.supports_embeddings() {
619            return false;
620        }
621        self.spawn_embed_bg(embed_and_store_with_category_bg(
622            EmbedBgArgs {
623                qdrant,
624                embed_provider,
625                embedding_model: self.embedding_model.clone(),
626                message_id,
627                conversation_id,
628                role: role.to_owned(),
629                content: content.to_owned(),
630                last_qdrant_warn: Arc::clone(&self.last_qdrant_warn),
631            },
632            category.map(str::to_owned),
633        ))
634    }
635
636    /// Embed content chunks and store each as a regular (non-tool) message vector.
637    ///
638    /// Spawns a bounded background task; returns immediately.
639    fn embed_and_store_regular(
640        &self,
641        message_id: MessageId,
642        conversation_id: ConversationId,
643        role: &str,
644        content: &str,
645    ) -> bool {
646        let Some(qdrant) = self.qdrant.clone() else {
647            return false;
648        };
649        let embed_provider = self.effective_embed_provider().clone();
650        if !embed_provider.supports_embeddings() {
651            return false;
652        }
653        self.spawn_embed_bg(embed_and_store_regular_bg(EmbedBgArgs {
654            qdrant,
655            embed_provider,
656            embedding_model: self.embedding_model.clone(),
657            message_id,
658            conversation_id,
659            role: role.to_owned(),
660            content: content.to_owned(),
661            last_qdrant_warn: Arc::clone(&self.last_qdrant_warn),
662        }))
663    }
664
665    /// Embed content chunks, enriching Qdrant payload with tool metadata when present.
666    ///
667    /// Spawns a bounded background task; returns immediately.
668    fn embed_chunks_with_tool_context(
669        &self,
670        message_id: MessageId,
671        conversation_id: ConversationId,
672        role: &str,
673        content: &str,
674        embed_ctx: EmbedContext,
675    ) -> bool {
676        let Some(qdrant) = self.qdrant.clone() else {
677            return false;
678        };
679        let embed_provider = self.effective_embed_provider().clone();
680        if !embed_provider.supports_embeddings() {
681            return false;
682        }
683        self.spawn_embed_bg(embed_chunks_with_tool_context_bg(
684            EmbedBgArgs {
685                qdrant,
686                embed_provider,
687                embedding_model: self.embedding_model.clone(),
688                message_id,
689                conversation_id,
690                role: role.to_owned(),
691                content: content.to_owned(),
692                last_qdrant_warn: Arc::clone(&self.last_qdrant_warn),
693            },
694            embed_ctx,
695        ))
696    }
697
698    /// Save a message to `SQLite` without generating an embedding.
699    ///
700    /// Use this when embedding is intentionally skipped (e.g. autosave disabled for assistant).
701    ///
702    /// # Errors
703    ///
704    /// Returns an error if the `SQLite` save fails.
705    pub async fn save_only(
706        &self,
707        conversation_id: ConversationId,
708        role: &str,
709        content: &str,
710        parts_json: &str,
711    ) -> Result<MessageId, MemoryError> {
712        self.sqlite
713            .save_message_with_parts(conversation_id, role, content, parts_json)
714            .await
715    }
716
717    /// Recall relevant messages using hybrid search (vector + FTS5 keyword).
718    ///
719    /// When Qdrant is available, runs both vector and keyword searches, then merges
720    /// results using weighted scoring. When Qdrant is unavailable, falls back to
721    /// FTS5-only keyword search.
722    ///
723    /// # Errors
724    ///
725    /// Returns an error if embedding generation, Qdrant search, or FTS5 query fails.
726    #[cfg_attr(
727        feature = "profiling",
728        tracing::instrument(name = "memory.recall", skip_all, fields(query_len = %query.len(), result_count = tracing::field::Empty, top_score = tracing::field::Empty))
729    )]
730    pub async fn recall(
731        &self,
732        query: &str,
733        limit: usize,
734        filter: Option<SearchFilter>,
735    ) -> Result<Vec<RecalledMessage>, MemoryError> {
736        let conversation_id = filter.as_ref().and_then(|f| f.conversation_id);
737
738        tracing::debug!(
739            query_len = query.len(),
740            limit,
741            has_filter = filter.is_some(),
742            conversation_id = conversation_id.map(|c| c.0),
743            has_qdrant = self.qdrant.is_some(),
744            "recall: starting hybrid search"
745        );
746
747        let keyword_results = match self
748            .sqlite
749            .keyword_search(query, limit * 2, conversation_id)
750            .await
751        {
752            Ok(results) => results,
753            Err(e) => {
754                tracing::warn!("FTS5 keyword search failed: {e:#}");
755                Vec::new()
756            }
757        };
758
759        let vector_results = if let Some(qdrant) = &self.qdrant
760            && self.effective_embed_provider().supports_embeddings()
761        {
762            let query_vector = self.effective_embed_provider().embed(query).await?;
763            let vector_size = u64::try_from(query_vector.len()).unwrap_or(896);
764            qdrant.ensure_collection(vector_size).await?;
765            qdrant.search(&query_vector, limit * 2, filter).await?
766        } else {
767            Vec::new()
768        };
769
770        let results = self
771            .recall_merge_and_rank(keyword_results, vector_results, limit)
772            .await?;
773        #[cfg(feature = "profiling")]
774        {
775            let span = tracing::Span::current();
776            span.record("result_count", results.len());
777            if let Some(top) = results.first() {
778                span.record("top_score", top.score);
779            }
780        }
781        Ok(results)
782    }
783
784    pub(super) async fn recall_fts5_raw(
785        &self,
786        query: &str,
787        limit: usize,
788        conversation_id: Option<ConversationId>,
789    ) -> Result<Vec<(MessageId, f64)>, MemoryError> {
790        self.sqlite
791            .keyword_search(query, limit * 2, conversation_id)
792            .await
793    }
794
795    pub(super) async fn recall_vectors_raw(
796        &self,
797        query: &str,
798        limit: usize,
799        filter: Option<SearchFilter>,
800    ) -> Result<Vec<crate::embedding_store::SearchResult>, MemoryError> {
801        let Some(qdrant) = &self.qdrant else {
802            return Ok(Vec::new());
803        };
804        if !self.effective_embed_provider().supports_embeddings() {
805            return Ok(Vec::new());
806        }
807        let query_vector = self.effective_embed_provider().embed(query).await?;
808        let vector_size = u64::try_from(query_vector.len()).unwrap_or(896);
809        qdrant.ensure_collection(vector_size).await?;
810        qdrant.search(&query_vector, limit * 2, filter).await
811    }
812
813    /// Merge raw keyword and vector results, apply weighted scoring, temporal decay, and MMR
814    /// re-ranking, then resolve to `RecalledMessage` objects.
815    ///
816    /// This is the shared post-processing step used by all recall paths.
817    ///
818    /// # Errors
819    ///
820    /// Returns an error if the `SQLite` `messages_by_ids` query fails.
821    #[allow(clippy::cast_possible_truncation, clippy::too_many_lines)]
822    pub(super) async fn recall_merge_and_rank(
823        &self,
824        keyword_results: Vec<(MessageId, f64)>,
825        vector_results: Vec<crate::embedding_store::SearchResult>,
826        limit: usize,
827    ) -> Result<Vec<RecalledMessage>, MemoryError> {
828        tracing::debug!(
829            vector_count = vector_results.len(),
830            keyword_count = keyword_results.len(),
831            limit,
832            "recall: merging search results"
833        );
834
835        let mut scores: std::collections::HashMap<MessageId, f64> =
836            std::collections::HashMap::new();
837
838        if !vector_results.is_empty() {
839            let max_vs = vector_results
840                .iter()
841                .map(|r| r.score)
842                .fold(f32::NEG_INFINITY, f32::max);
843            let norm = if max_vs > 0.0 { max_vs } else { 1.0 };
844            for r in &vector_results {
845                let normalized = f64::from(r.score / norm);
846                *scores.entry(r.message_id).or_default() += normalized * self.vector_weight;
847            }
848        }
849
850        if !keyword_results.is_empty() {
851            let max_ks = keyword_results
852                .iter()
853                .map(|r| r.1)
854                .fold(f64::NEG_INFINITY, f64::max);
855            let norm = if max_ks > 0.0 { max_ks } else { 1.0 };
856            for &(msg_id, score) in &keyword_results {
857                let normalized = score / norm;
858                *scores.entry(msg_id).or_default() += normalized * self.keyword_weight;
859            }
860        }
861
862        if scores.is_empty() {
863            tracing::debug!("recall: empty merge, no overlapping scores");
864            return Ok(Vec::new());
865        }
866
867        let mut ranked: Vec<(MessageId, f64)> = scores.into_iter().collect();
868        ranked.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
869
870        tracing::debug!(
871            merged = ranked.len(),
872            top_score = ranked.first().map(|r| r.1),
873            bottom_score = ranked.last().map(|r| r.1),
874            vector_weight = %self.vector_weight,
875            keyword_weight = %self.keyword_weight,
876            "recall: weighted merge complete"
877        );
878
879        if self.temporal_decay_enabled && self.temporal_decay_half_life_days > 0 {
880            let ids: Vec<MessageId> = ranked.iter().map(|r| r.0).collect();
881            match self.sqlite.message_timestamps(&ids).await {
882                Ok(timestamps) => {
883                    apply_temporal_decay(
884                        &mut ranked,
885                        &timestamps,
886                        self.temporal_decay_half_life_days,
887                    );
888                    ranked
889                        .sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
890                    tracing::debug!(
891                        half_life_days = self.temporal_decay_half_life_days,
892                        top_score_after = ranked.first().map(|r| r.1),
893                        "recall: temporal decay applied"
894                    );
895                }
896                Err(e) => {
897                    tracing::warn!("temporal decay: failed to fetch timestamps: {e:#}");
898                }
899            }
900        }
901
902        if self.mmr_enabled && !vector_results.is_empty() {
903            if let Some(qdrant) = &self.qdrant {
904                let ids: Vec<MessageId> = ranked.iter().map(|r| r.0).collect();
905                match qdrant.get_vectors(&ids).await {
906                    Ok(vec_map) if !vec_map.is_empty() => {
907                        let ranked_len_before = ranked.len();
908                        ranked = apply_mmr(&ranked, &vec_map, self.mmr_lambda, limit);
909                        tracing::debug!(
910                            before = ranked_len_before,
911                            after = ranked.len(),
912                            lambda = %self.mmr_lambda,
913                            "recall: mmr re-ranked"
914                        );
915                    }
916                    Ok(_) => {
917                        ranked.truncate(limit);
918                    }
919                    Err(e) => {
920                        tracing::warn!("MMR: failed to fetch vectors: {e:#}");
921                        ranked.truncate(limit);
922                    }
923                }
924            } else {
925                ranked.truncate(limit);
926            }
927        } else {
928            ranked.truncate(limit);
929        }
930
931        if self.importance_enabled && !ranked.is_empty() {
932            let ids: Vec<MessageId> = ranked.iter().map(|r| r.0).collect();
933            match self.sqlite.fetch_importance_scores(&ids).await {
934                Ok(scores) => {
935                    for (msg_id, score) in &mut ranked {
936                        if let Some(&imp) = scores.get(msg_id) {
937                            *score += imp * self.importance_weight;
938                        }
939                    }
940                    ranked
941                        .sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
942                    tracing::debug!(
943                        importance_weight = %self.importance_weight,
944                        "recall: importance scores blended"
945                    );
946                }
947                Err(e) => {
948                    tracing::warn!("importance scoring: failed to fetch scores: {e:#}");
949                }
950            }
951        }
952
953        // Apply tier boost: semantic-tier messages receive an additive bonus so distilled facts
954        // rank above episodic messages with the same base score. Additive (not multiplicative)
955        // so the effect is consistent regardless of base score magnitude.
956        if (self.tier_boost_semantic - 1.0).abs() > f64::EPSILON && !ranked.is_empty() {
957            let ids: Vec<MessageId> = ranked.iter().map(|r| r.0).collect();
958            match self.sqlite.fetch_tiers(&ids).await {
959                Ok(tiers) => {
960                    let bonus = self.tier_boost_semantic - 1.0;
961                    let mut boosted = false;
962                    for (msg_id, score) in &mut ranked {
963                        if tiers.get(msg_id).map(String::as_str) == Some("semantic") {
964                            *score += bonus;
965                            boosted = true;
966                        }
967                    }
968                    if boosted {
969                        ranked.sort_by(|a, b| {
970                            b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal)
971                        });
972                        tracing::debug!(
973                            tier_boost = %self.tier_boost_semantic,
974                            "recall: semantic tier boost applied"
975                        );
976                    }
977                }
978                Err(e) => {
979                    tracing::warn!("tier boost: failed to fetch tiers: {e:#}");
980                }
981            }
982        }
983
984        let ids: Vec<MessageId> = ranked.iter().map(|r| r.0).collect();
985
986        if !ids.is_empty()
987            && let Err(e) = self.batch_increment_access_count(ids.clone()).await
988        {
989            tracing::warn!("recall: failed to increment access counts: {e:#}");
990        }
991
992        // Update RL admission training data: mark recalled messages as positive examples.
993        if let Err(e) = self.sqlite.mark_training_recalled(&ids).await {
994            tracing::debug!(
995                error = %e,
996                "recall: failed to mark training data as recalled (non-fatal)"
997            );
998        }
999
1000        let messages = self.sqlite.messages_by_ids(&ids).await?;
1001        let msg_map: std::collections::HashMap<MessageId, _> = messages.into_iter().collect();
1002
1003        let recalled: Vec<RecalledMessage> = ranked
1004            .iter()
1005            .filter_map(|(msg_id, score)| {
1006                msg_map.get(msg_id).map(|msg| RecalledMessage {
1007                    message: msg.clone(),
1008                    #[expect(clippy::cast_possible_truncation)]
1009                    score: *score as f32,
1010                })
1011            })
1012            .collect();
1013
1014        tracing::debug!(final_count = recalled.len(), "recall: final results");
1015
1016        Ok(recalled)
1017    }
1018
1019    /// Recall messages using query-aware routing.
1020    ///
1021    /// Delegates to FTS5-only, vector-only, or hybrid search based on the router decision,
1022    /// then runs the shared merge and ranking pipeline.
1023    ///
1024    /// # Errors
1025    ///
1026    /// Returns an error if any underlying search or database operation fails.
1027    #[cfg_attr(
1028        feature = "profiling",
1029        tracing::instrument(name = "memory.recall", skip_all, fields(query_len = %query.len(), result_count = tracing::field::Empty))
1030    )]
1031    pub async fn recall_routed(
1032        &self,
1033        query: &str,
1034        limit: usize,
1035        filter: Option<SearchFilter>,
1036        router: &dyn crate::router::MemoryRouter,
1037    ) -> Result<Vec<RecalledMessage>, MemoryError> {
1038        use crate::router::MemoryRoute;
1039
1040        let route = router.route(query);
1041        tracing::debug!(?route, query_len = query.len(), "memory routing decision");
1042
1043        let conversation_id = filter.as_ref().and_then(|f| f.conversation_id);
1044
1045        let (keyword_results, vector_results): (
1046            Vec<(MessageId, f64)>,
1047            Vec<crate::embedding_store::SearchResult>,
1048        ) = match route {
1049            MemoryRoute::Keyword => {
1050                let kw = self.recall_fts5_raw(query, limit, conversation_id).await?;
1051                (kw, Vec::new())
1052            }
1053            MemoryRoute::Semantic => {
1054                let vr = self.recall_vectors_raw(query, limit, filter).await?;
1055                (Vec::new(), vr)
1056            }
1057            MemoryRoute::Hybrid => {
1058                let kw = match self.recall_fts5_raw(query, limit, conversation_id).await {
1059                    Ok(r) => r,
1060                    Err(e) => {
1061                        tracing::warn!("FTS5 keyword search failed: {e:#}");
1062                        Vec::new()
1063                    }
1064                };
1065                let vr = self.recall_vectors_raw(query, limit, filter).await?;
1066                (kw, vr)
1067            }
1068            // Episodic: FTS5 keyword search with an optional timestamp-range filter.
1069            // Temporal keywords are stripped from the query before passing to FTS5 to
1070            // prevent BM25 score distortion (e.g. "yesterday" matching messages that
1071            // literally contain the word "yesterday" regardless of actual relevance).
1072            // Vector search is skipped for speed; temporal decay in recall_merge_and_rank
1073            // provides recency boosting for the FTS5 results.
1074            // Known trade-off (MVP): semantically similar but lexically different messages
1075            // may be missed. See issue #1629 for a future hybrid_temporal mode.
1076            MemoryRoute::Episodic => {
1077                let range = crate::router::resolve_temporal_range(query, chrono::Utc::now());
1078                let cleaned = crate::router::strip_temporal_keywords(query);
1079                let search_query = if cleaned.is_empty() { query } else { &cleaned };
1080                let kw = if let Some(ref r) = range {
1081                    self.sqlite
1082                        .keyword_search_with_time_range(
1083                            search_query,
1084                            limit,
1085                            conversation_id,
1086                            r.after.as_deref(),
1087                            r.before.as_deref(),
1088                        )
1089                        .await?
1090                } else {
1091                    self.recall_fts5_raw(search_query, limit, conversation_id)
1092                        .await?
1093                };
1094                tracing::debug!(
1095                    has_range = range.is_some(),
1096                    cleaned_query = %search_query,
1097                    keyword_count = kw.len(),
1098                    "recall: episodic path"
1099                );
1100                (kw, Vec::new())
1101            }
1102            // Graph routing triggers graph_recall separately in agent/context.rs.
1103            // For the message-based recall, behave like Hybrid.
1104            MemoryRoute::Graph => {
1105                let kw = match self.recall_fts5_raw(query, limit, conversation_id).await {
1106                    Ok(r) => r,
1107                    Err(e) => {
1108                        tracing::warn!("FTS5 keyword search failed (graph→hybrid fallback): {e:#}");
1109                        Vec::new()
1110                    }
1111                };
1112                let vr = self.recall_vectors_raw(query, limit, filter).await?;
1113                (kw, vr)
1114            }
1115        };
1116
1117        tracing::debug!(
1118            keyword_count = keyword_results.len(),
1119            vector_count = vector_results.len(),
1120            "recall: routed search results"
1121        );
1122
1123        self.recall_merge_and_rank(keyword_results, vector_results, limit)
1124            .await
1125    }
1126
1127    /// Async variant of [`recall_routed`](Self::recall_routed) that uses
1128    /// [`AsyncMemoryRouter::route_async`](crate::router::AsyncMemoryRouter::route_async) when
1129    /// available, enabling LLM-based routing for `LlmRouter` and `HybridRouter`.
1130    ///
1131    /// Falls back to [`recall_routed`](Self::recall_routed) for routers that only implement
1132    /// the sync `MemoryRouter` trait (e.g. `HeuristicRouter`).
1133    ///
1134    /// # Errors
1135    ///
1136    /// Returns an error if any underlying search or database operation fails.
1137    #[cfg_attr(
1138        feature = "profiling",
1139        tracing::instrument(name = "memory.recall", skip_all, fields(query_len = %query.len(), result_count = tracing::field::Empty))
1140    )]
1141    pub async fn recall_routed_async(
1142        &self,
1143        query: &str,
1144        limit: usize,
1145        filter: Option<crate::embedding_store::SearchFilter>,
1146        router: &dyn crate::router::AsyncMemoryRouter,
1147    ) -> Result<Vec<RecalledMessage>, MemoryError> {
1148        use crate::router::MemoryRoute;
1149
1150        let decision = router.route_async(query).await;
1151        let route = decision.route;
1152        tracing::debug!(
1153            ?route,
1154            confidence = decision.confidence,
1155            query_len = query.len(),
1156            "memory routing decision (async)"
1157        );
1158
1159        let conversation_id = filter.as_ref().and_then(|f| f.conversation_id);
1160
1161        let (keyword_results, vector_results): (
1162            Vec<(crate::types::MessageId, f64)>,
1163            Vec<crate::embedding_store::SearchResult>,
1164        ) = match route {
1165            MemoryRoute::Keyword => {
1166                let kw = self.recall_fts5_raw(query, limit, conversation_id).await?;
1167                (kw, Vec::new())
1168            }
1169            MemoryRoute::Semantic => {
1170                let vr = self.recall_vectors_raw(query, limit, filter).await?;
1171                (Vec::new(), vr)
1172            }
1173            MemoryRoute::Hybrid => {
1174                let kw = match self.recall_fts5_raw(query, limit, conversation_id).await {
1175                    Ok(r) => r,
1176                    Err(e) => {
1177                        tracing::warn!("FTS5 keyword search failed: {e:#}");
1178                        Vec::new()
1179                    }
1180                };
1181                let vr = self.recall_vectors_raw(query, limit, filter).await?;
1182                (kw, vr)
1183            }
1184            MemoryRoute::Episodic => {
1185                let range = crate::router::resolve_temporal_range(query, chrono::Utc::now());
1186                let cleaned = crate::router::strip_temporal_keywords(query);
1187                let search_query = if cleaned.is_empty() { query } else { &cleaned };
1188                let kw = if let Some(ref r) = range {
1189                    self.sqlite
1190                        .keyword_search_with_time_range(
1191                            search_query,
1192                            limit,
1193                            conversation_id,
1194                            r.after.as_deref(),
1195                            r.before.as_deref(),
1196                        )
1197                        .await?
1198                } else {
1199                    self.recall_fts5_raw(search_query, limit, conversation_id)
1200                        .await?
1201                };
1202                (kw, Vec::new())
1203            }
1204            MemoryRoute::Graph => {
1205                let kw = match self.recall_fts5_raw(query, limit, conversation_id).await {
1206                    Ok(r) => r,
1207                    Err(e) => {
1208                        tracing::warn!("FTS5 keyword search failed (graph→hybrid fallback): {e:#}");
1209                        Vec::new()
1210                    }
1211                };
1212                let vr = self.recall_vectors_raw(query, limit, filter).await?;
1213                (kw, vr)
1214            }
1215        };
1216
1217        tracing::debug!(
1218            keyword_count = keyword_results.len(),
1219            vector_count = vector_results.len(),
1220            "recall: routed search results (async)"
1221        );
1222
1223        self.recall_merge_and_rank(keyword_results, vector_results, limit)
1224            .await
1225    }
1226
1227    /// Retrieve graph facts relevant to `query` via BFS traversal.
1228    ///
1229    /// Returns an empty `Vec` if no `graph_store` is configured.
1230    ///
1231    /// # Parameters
1232    ///
1233    /// - `at_timestamp`: when `Some`, only edges valid at that `SQLite` datetime string are returned.
1234    ///   When `None`, only currently active edges are used.
1235    /// - `temporal_decay_rate`: non-negative decay rate (1/day). `0.0` preserves original ordering.
1236    ///
1237    /// # Errors
1238    ///
1239    /// Returns an error if the underlying graph query fails.
1240    #[cfg_attr(
1241        feature = "profiling",
1242        tracing::instrument(name = "memory.recall_graph", skip_all, fields(result_count = tracing::field::Empty))
1243    )]
1244    pub async fn recall_graph(
1245        &self,
1246        query: &str,
1247        limit: usize,
1248        max_hops: u32,
1249        at_timestamp: Option<&str>,
1250        temporal_decay_rate: f64,
1251        edge_types: &[crate::graph::EdgeType],
1252    ) -> Result<Vec<crate::graph::types::GraphFact>, MemoryError> {
1253        let Some(store) = &self.graph_store else {
1254            return Ok(Vec::new());
1255        };
1256
1257        tracing::debug!(
1258            query_len = query.len(),
1259            limit,
1260            max_hops,
1261            "graph: starting recall"
1262        );
1263
1264        let results = crate::graph::retrieval::graph_recall(
1265            store,
1266            self.qdrant.as_deref(),
1267            &self.provider,
1268            query,
1269            limit,
1270            max_hops,
1271            at_timestamp,
1272            temporal_decay_rate,
1273            edge_types,
1274        )
1275        .await?;
1276
1277        tracing::debug!(result_count = results.len(), "graph: recall complete");
1278        #[cfg(feature = "profiling")]
1279        tracing::Span::current().record("result_count", results.len());
1280
1281        Ok(results)
1282    }
1283
1284    /// Retrieve graph facts via SYNAPSE spreading activation.
1285    ///
1286    /// Delegates to [`crate::graph::retrieval::graph_recall_activated`].
1287    /// Used in place of [`Self::recall_graph`] when `spreading_activation.enabled = true`.
1288    ///
1289    /// # Errors
1290    ///
1291    /// Returns an error if the underlying graph query fails.
1292    #[cfg_attr(
1293        feature = "profiling",
1294        tracing::instrument(name = "memory.recall_graph", skip_all, fields(result_count = tracing::field::Empty))
1295    )]
1296    pub async fn recall_graph_activated(
1297        &self,
1298        query: &str,
1299        limit: usize,
1300        params: crate::graph::SpreadingActivationParams,
1301        edge_types: &[crate::graph::EdgeType],
1302    ) -> Result<Vec<crate::graph::activation::ActivatedFact>, MemoryError> {
1303        let Some(store) = &self.graph_store else {
1304            return Ok(Vec::new());
1305        };
1306
1307        tracing::debug!(
1308            query_len = query.len(),
1309            limit,
1310            "spreading activation: starting graph recall"
1311        );
1312
1313        let embeddings = self.qdrant.as_deref();
1314        let results = crate::graph::retrieval::graph_recall_activated(
1315            store,
1316            embeddings,
1317            &self.provider,
1318            query,
1319            limit,
1320            params,
1321            edge_types,
1322        )
1323        .await?;
1324
1325        tracing::debug!(
1326            result_count = results.len(),
1327            "spreading activation: graph recall complete"
1328        );
1329
1330        Ok(results)
1331    }
1332
1333    /// Increment access count and update `last_accessed` for a batch of message IDs.
1334    ///
1335    /// Skips the update if `message_ids` is empty to avoid an invalid `IN ()` clause.
1336    ///
1337    /// # Errors
1338    ///
1339    /// Returns an error if the `SQLite` update fails.
1340    async fn batch_increment_access_count(
1341        &self,
1342        message_ids: Vec<MessageId>,
1343    ) -> Result<(), MemoryError> {
1344        if message_ids.is_empty() {
1345            return Ok(());
1346        }
1347        self.sqlite.increment_access_counts(&message_ids).await
1348    }
1349
1350    /// Check whether an embedding exists for a given message ID.
1351    ///
1352    /// # Errors
1353    ///
1354    /// Returns an error if the `SQLite` query fails.
1355    pub async fn has_embedding(&self, message_id: MessageId) -> Result<bool, MemoryError> {
1356        match &self.qdrant {
1357            Some(qdrant) => qdrant.has_embedding(message_id).await,
1358            None => Ok(false),
1359        }
1360    }
1361
1362    /// Embed all messages that do not yet have embeddings.
1363    ///
1364    /// Processes unembedded messages in micro-batches of 32, using `buffer_unordered(4)` for
1365    /// concurrent embedding within each batch. Bounded peak memory: at most 32 messages of content
1366    /// plus their embedding vectors are live at any time.
1367    ///
1368    /// When `progress_tx` is `Some`, sends `Some(BackfillProgress)` after each message and
1369    /// `None` on completion (or on timeout/error in the caller).
1370    ///
1371    /// Returns the count of successfully embedded messages.
1372    ///
1373    /// # Errors
1374    ///
1375    /// Returns an error if collection initialization or the streaming query setup fails.
1376    /// Individual embedding failures are logged but do not stop processing.
1377    pub async fn embed_missing(
1378        &self,
1379        progress_tx: Option<tokio::sync::watch::Sender<Option<super::BackfillProgress>>>,
1380    ) -> Result<usize, MemoryError> {
1381        if self.qdrant.is_none() || !self.effective_embed_provider().supports_embeddings() {
1382            return Ok(0);
1383        }
1384
1385        let total = self.sqlite.count_unembedded_messages().await?;
1386        if total == 0 {
1387            return Ok(0);
1388        }
1389
1390        if let Some(tx) = &progress_tx {
1391            let _ = tx.send(Some(super::BackfillProgress { done: 0, total }));
1392        }
1393
1394        let mut done = 0usize;
1395        let mut succeeded = 0usize;
1396
1397        loop {
1398            const BATCH_SIZE: usize = 32;
1399            const BATCH_SIZE_I64: i64 = 32;
1400            let rows: Vec<_> = self
1401                .sqlite
1402                .stream_unembedded_messages(BATCH_SIZE_I64)
1403                .try_collect()
1404                .await?;
1405
1406            if rows.is_empty() {
1407                break;
1408            }
1409
1410            let batch_len = rows.len();
1411
1412            let results: Vec<bool> = futures::stream::iter(rows)
1413                .map(|(msg_id, conv_id, role, content)| async move {
1414                    self.embed_and_store_regular(msg_id, conv_id, &role, &content)
1415                })
1416                .buffer_unordered(4)
1417                .collect()
1418                .await;
1419
1420            for ok in &results {
1421                done += 1;
1422                if *ok {
1423                    succeeded += 1;
1424                }
1425                if let Some(tx) = &progress_tx {
1426                    let _ = tx.send(Some(super::BackfillProgress { done, total }));
1427                }
1428            }
1429
1430            let batch_succeeded = results.iter().filter(|&&b| b).count();
1431            if batch_succeeded > 0 {
1432                tracing::debug!("Backfill batch: {batch_succeeded}/{batch_len} embedded");
1433            }
1434
1435            if batch_len < BATCH_SIZE {
1436                break;
1437            }
1438        }
1439
1440        if let Some(tx) = &progress_tx {
1441            let _ = tx.send(None);
1442        }
1443
1444        if done > 0 {
1445            tracing::info!("Embedded {succeeded}/{total} missing messages");
1446        }
1447        Ok(succeeded)
1448    }
1449}
1450
1451#[cfg(test)]
1452mod tests {
1453    use super::*;
1454
1455    #[test]
1456    fn embed_context_default_all_none() {
1457        let ctx = EmbedContext::default();
1458        assert!(ctx.tool_name.is_none());
1459        assert!(ctx.exit_code.is_none());
1460        assert!(ctx.timestamp.is_none());
1461    }
1462
1463    #[test]
1464    fn embed_context_fields_set_correctly() {
1465        let ctx = EmbedContext {
1466            tool_name: Some("shell".to_string()),
1467            exit_code: Some(0),
1468            timestamp: Some("2026-04-04T00:00:00Z".to_string()),
1469        };
1470        assert_eq!(ctx.tool_name.as_deref(), Some("shell"));
1471        assert_eq!(ctx.exit_code, Some(0));
1472        assert_eq!(ctx.timestamp.as_deref(), Some("2026-04-04T00:00:00Z"));
1473    }
1474
1475    #[test]
1476    fn embed_context_non_zero_exit_code() {
1477        let ctx = EmbedContext {
1478            tool_name: Some("shell".to_string()),
1479            exit_code: Some(1),
1480            timestamp: None,
1481        };
1482        assert_eq!(ctx.exit_code, Some(1));
1483        assert!(ctx.timestamp.is_none());
1484    }
1485
1486    async fn make_semantic_memory() -> crate::semantic::SemanticMemory {
1487        use std::sync::Arc;
1488        use std::sync::atomic::AtomicU64;
1489        use zeph_llm::any::AnyProvider;
1490        use zeph_llm::mock::MockProvider;
1491
1492        let provider = AnyProvider::Mock(MockProvider::default());
1493        let sqlite = crate::store::SqliteStore::new(":memory:").await.unwrap();
1494        crate::semantic::SemanticMemory {
1495            sqlite,
1496            qdrant: None,
1497            provider,
1498            embed_provider: None,
1499            embedding_model: "test-model".into(),
1500            vector_weight: 0.7,
1501            keyword_weight: 0.3,
1502            temporal_decay_enabled: false,
1503            temporal_decay_half_life_days: 30,
1504            mmr_enabled: false,
1505            mmr_lambda: 0.7,
1506            importance_enabled: false,
1507            importance_weight: 0.15,
1508            token_counter: Arc::new(crate::token_counter::TokenCounter::new()),
1509            graph_store: None,
1510            community_detection_failures: Arc::new(AtomicU64::new(0)),
1511            graph_extraction_count: Arc::new(AtomicU64::new(0)),
1512            graph_extraction_failures: Arc::new(AtomicU64::new(0)),
1513            last_qdrant_warn: Arc::new(AtomicU64::new(0)),
1514            tier_boost_semantic: 1.3,
1515            admission_control: None,
1516            quality_gate: None,
1517            key_facts_dedup_threshold: 0.95,
1518            embed_tasks: std::sync::Mutex::new(tokio::task::JoinSet::new()),
1519        }
1520    }
1521
1522    #[tokio::test]
1523    async fn spawn_embed_bg_returns_true_when_capacity_available() {
1524        let memory = make_semantic_memory().await;
1525        let dispatched = memory.spawn_embed_bg(std::future::ready(()));
1526        assert!(
1527            dispatched,
1528            "spawn_embed_bg must return true when a task was successfully spawned"
1529        );
1530    }
1531
1532    #[tokio::test]
1533    async fn spawn_embed_bg_returns_false_at_capacity() {
1534        let memory = make_semantic_memory().await;
1535
1536        // Fill the JoinSet to the limit with never-completing futures.
1537        {
1538            let mut tasks = memory.embed_tasks.lock().unwrap();
1539            for _ in 0..MAX_EMBED_BG_TASKS {
1540                tasks.spawn(std::future::pending::<()>());
1541            }
1542        }
1543
1544        let dispatched = memory.spawn_embed_bg(std::future::ready(()));
1545        assert!(
1546            !dispatched,
1547            "spawn_embed_bg must return false when the task limit is reached"
1548        );
1549    }
1550
1551    #[test]
1552    fn qdrant_warn_rate_limit_suppresses_within_window() {
1553        use std::sync::Arc;
1554        use std::sync::atomic::{AtomicU64, Ordering};
1555
1556        let last_warn = Arc::new(AtomicU64::new(0));
1557        let window_secs = 10u64;
1558
1559        // Simulate first call: last=0, now=100 → should emit (diff >= 10)
1560        let now1 = 100u64;
1561        let last1 = last_warn.load(Ordering::Relaxed);
1562        let should_warn1 = now1.saturating_sub(last1) >= window_secs;
1563        assert!(should_warn1, "first call must not be suppressed");
1564        if should_warn1 {
1565            last_warn.store(now1, Ordering::Relaxed);
1566        }
1567
1568        // Simulate second call 5s later: now=105 → should be suppressed (diff < 10)
1569        let now2 = 105u64;
1570        let last2 = last_warn.load(Ordering::Relaxed);
1571        let should_warn2 = now2.saturating_sub(last2) >= window_secs;
1572        assert!(!should_warn2, "call within 10s window must be suppressed");
1573
1574        // Simulate third call 10s after first: now=110 → should emit again
1575        let now3 = 110u64;
1576        let last3 = last_warn.load(Ordering::Relaxed);
1577        let should_warn3 = now3.saturating_sub(last3) >= window_secs;
1578        assert!(
1579            should_warn3,
1580            "call after window expiry must not be suppressed"
1581        );
1582    }
1583
1584    #[test]
1585    fn qdrant_warn_rate_limit_shared_across_concurrent_sites() {
1586        use std::sync::Arc;
1587        use std::sync::atomic::{AtomicU64, Ordering};
1588
1589        // All 3 WARN sites share one Arc<AtomicU64>. Simulate site A warning at t=100,
1590        // then site B attempting at t=105 — must be suppressed.
1591        let shared = Arc::new(AtomicU64::new(0));
1592        let window_secs = 10u64;
1593
1594        let site_a = Arc::clone(&shared);
1595        let site_b = Arc::clone(&shared);
1596
1597        let now_a = 100u64;
1598        let last_a = site_a.load(Ordering::Relaxed);
1599        if now_a.saturating_sub(last_a) >= window_secs {
1600            site_a.store(now_a, Ordering::Relaxed);
1601        }
1602
1603        let now_b = 105u64;
1604        let last_b = site_b.load(Ordering::Relaxed);
1605        let warn_b = now_b.saturating_sub(last_b) >= window_secs;
1606        assert!(
1607            !warn_b,
1608            "site B must be suppressed because site A already warned within the window"
1609        );
1610    }
1611}