Skip to main content

mnemara_core/
scorer.rs

1use crate::config::{
2    EngineConfig, RecallPlanningProfile, RecallPolicyProfile, RecallScorerKind,
3    RecallScoringProfile,
4};
5use crate::embedding::{ConfiguredSemanticEmbedder, SemanticEmbedder};
6use crate::model::{
7    EpisodeContext, MemoryHistoricalState, MemoryQualityState, MemoryRecord, MemoryTrustLevel,
8};
9use crate::query::{
10    RecallCandidateSource, RecallHit, RecallPlannerStage, RecallQuery, RecallScoreBreakdown,
11};
12use std::collections::{BTreeMap, BTreeSet};
13use std::sync::Arc;
14use std::time::{Instant, SystemTime, UNIX_EPOCH};
15
16#[derive(Debug, Clone, PartialEq)]
17pub struct ScoredRecallCandidate {
18    pub hit: RecallHit,
19    pub matched_terms: Vec<String>,
20}
21
22#[derive(Debug, Clone, PartialEq)]
23pub struct PlannedRecallCandidate {
24    pub hit: RecallHit,
25    pub matched_terms: Vec<String>,
26    pub candidate_sources: Vec<RecallCandidateSource>,
27    pub planner_stage: RecallPlannerStage,
28}
29
30#[derive(Debug, Clone)]
31pub struct RecallPlanner {
32    profile: RecallPlanningProfile,
33    graph_expansion_max_hops: u8,
34    scorer: ConfiguredRecallScorer,
35}
36
37#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
38pub struct RecallPlannerMetrics {
39    pub candidate_generation_ns: u128,
40    pub graph_expansion_ns: u128,
41    pub total_ns: u128,
42    pub seeded_candidates: usize,
43    pub expanded_candidates: usize,
44    pub hops_applied: u8,
45}
46
47#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
48enum GraphExpansionRelation {
49    EpisodeMembership,
50    Chronology,
51    Causal,
52    Related,
53    Lineage,
54}
55
56pub trait RecallScorer: Send + Sync {
57    fn score(&self, record: &MemoryRecord, query: &RecallQuery) -> Option<ScoredRecallCandidate>;
58    fn scorer_kind(&self) -> RecallScorerKind;
59    fn scoring_profile(&self) -> RecallScoringProfile;
60    fn policy_profile(&self) -> RecallPolicyProfile;
61    fn profile_note(&self) -> &'static str;
62    fn policy_profile_note(&self) -> &'static str;
63    fn embedding_note(&self) -> Option<String>;
64}
65
66#[derive(Debug, Clone, Copy)]
67struct ProvenancePolicyWeights {
68    source_bonus: f32,
69    pinned_bonus: f32,
70    verified_bonus: f32,
71    observed_bonus: f32,
72    derived_bonus: f32,
73    untrusted_penalty: f32,
74    verified_state_bonus: f32,
75    active_state_bonus: f32,
76    draft_penalty: f32,
77    archived_penalty_fast_path: f32,
78    archived_penalty_continuity: f32,
79    suppressed_penalty: f32,
80    deleted_penalty: f32,
81    current_bonus: f32,
82    historical_bonus: f32,
83    historical_penalty: f32,
84    superseded_bonus: f32,
85    superseded_penalty: f32,
86}
87
88fn provenance_policy_weights(profile: RecallPolicyProfile) -> ProvenancePolicyWeights {
89    match profile {
90        RecallPolicyProfile::General => ProvenancePolicyWeights {
91            source_bonus: 0.12,
92            pinned_bonus: 0.14,
93            verified_bonus: 0.09,
94            observed_bonus: 0.03,
95            derived_bonus: 0.0,
96            untrusted_penalty: -0.08,
97            verified_state_bonus: 0.08,
98            active_state_bonus: 0.03,
99            draft_penalty: -0.03,
100            archived_penalty_fast_path: -0.08,
101            archived_penalty_continuity: -0.02,
102            suppressed_penalty: -0.2,
103            deleted_penalty: -0.3,
104            current_bonus: 0.06,
105            historical_bonus: 0.04,
106            historical_penalty: -0.05,
107            superseded_bonus: -0.02,
108            superseded_penalty: -0.12,
109        },
110        RecallPolicyProfile::Support => ProvenancePolicyWeights {
111            source_bonus: 0.16,
112            pinned_bonus: 0.2,
113            verified_bonus: 0.13,
114            observed_bonus: 0.01,
115            derived_bonus: -0.03,
116            untrusted_penalty: -0.14,
117            verified_state_bonus: 0.12,
118            active_state_bonus: 0.05,
119            draft_penalty: -0.06,
120            archived_penalty_fast_path: -0.12,
121            archived_penalty_continuity: -0.05,
122            suppressed_penalty: -0.28,
123            deleted_penalty: -0.4,
124            current_bonus: 0.09,
125            historical_bonus: 0.01,
126            historical_penalty: -0.09,
127            superseded_bonus: -0.05,
128            superseded_penalty: -0.16,
129        },
130        RecallPolicyProfile::Research => ProvenancePolicyWeights {
131            source_bonus: 0.04,
132            pinned_bonus: 0.1,
133            verified_bonus: 0.07,
134            observed_bonus: 0.04,
135            derived_bonus: 0.03,
136            untrusted_penalty: -0.04,
137            verified_state_bonus: 0.05,
138            active_state_bonus: 0.02,
139            draft_penalty: -0.01,
140            archived_penalty_fast_path: -0.02,
141            archived_penalty_continuity: 0.01,
142            suppressed_penalty: -0.12,
143            deleted_penalty: -0.18,
144            current_bonus: 0.03,
145            historical_bonus: 0.08,
146            historical_penalty: -0.01,
147            superseded_bonus: 0.02,
148            superseded_penalty: -0.04,
149        },
150        RecallPolicyProfile::Assistant => ProvenancePolicyWeights {
151            source_bonus: 0.1,
152            pinned_bonus: 0.13,
153            verified_bonus: 0.1,
154            observed_bonus: 0.03,
155            derived_bonus: 0.01,
156            untrusted_penalty: -0.07,
157            verified_state_bonus: 0.09,
158            active_state_bonus: 0.04,
159            draft_penalty: -0.03,
160            archived_penalty_fast_path: -0.06,
161            archived_penalty_continuity: -0.01,
162            suppressed_penalty: -0.18,
163            deleted_penalty: -0.28,
164            current_bonus: 0.07,
165            historical_bonus: 0.04,
166            historical_penalty: -0.03,
167            superseded_bonus: -0.01,
168            superseded_penalty: -0.1,
169        },
170        RecallPolicyProfile::AutonomousAgent => ProvenancePolicyWeights {
171            source_bonus: 0.14,
172            pinned_bonus: 0.18,
173            verified_bonus: 0.12,
174            observed_bonus: 0.02,
175            derived_bonus: 0.0,
176            untrusted_penalty: -0.1,
177            verified_state_bonus: 0.1,
178            active_state_bonus: 0.05,
179            draft_penalty: -0.04,
180            archived_penalty_fast_path: -0.08,
181            archived_penalty_continuity: -0.02,
182            suppressed_penalty: -0.24,
183            deleted_penalty: -0.36,
184            current_bonus: 0.08,
185            historical_bonus: 0.03,
186            historical_penalty: -0.06,
187            superseded_bonus: -0.03,
188            superseded_penalty: -0.14,
189        },
190    }
191}
192
193fn semantic_similarity(
194    embedder: &ConfiguredSemanticEmbedder,
195    query_text: &str,
196    haystack: &str,
197) -> f32 {
198    if embedder.dimensions() == 0 {
199        return 0.0;
200    }
201
202    embedder
203        .embed(query_text)
204        .cosine_similarity(&embedder.embed(haystack))
205        .max(0.0)
206}
207
208fn episode_haystack(episode: &EpisodeContext) -> String {
209    format!(
210        "{} {} {} {} {} {} {} {}",
211        episode.summary.clone().unwrap_or_default(),
212        episode.goal.clone().unwrap_or_default(),
213        episode.outcome.clone().unwrap_or_default(),
214        episode.actor_ids.join(" "),
215        episode.recurrence_key.clone().unwrap_or_default(),
216        episode.boundary_label.clone().unwrap_or_default(),
217        episode.related_record_ids.join(" "),
218        episode.linked_artifact_uris.join(" "),
219    )
220    .to_ascii_lowercase()
221}
222
223fn has_sequence_intent(query_text: &str) -> bool {
224    let lowered = query_text.to_ascii_lowercase();
225    ["timeline", "before", "after", "first", "last", "sequence"]
226        .iter()
227        .any(|needle| lowered.contains(needle))
228}
229
230fn has_duration_intent(query_text: &str) -> bool {
231    let lowered = query_text.to_ascii_lowercase();
232    [
233        "how long",
234        "duration",
235        "lasting",
236        "long-running",
237        "long running",
238    ]
239    .iter()
240    .any(|needle| lowered.contains(needle))
241}
242
243fn has_recurrence_intent(query_text: &str) -> bool {
244    let lowered = query_text.to_ascii_lowercase();
245    ["recurring", "repeat", "repeats", "again", "every ", "habit"]
246        .iter()
247        .any(|needle| lowered.contains(needle))
248}
249
250fn has_boundary_intent(query_text: &str) -> bool {
251    let lowered = query_text.to_ascii_lowercase();
252    ["boundary", "handoff", "checkpoint", "session", "window"]
253        .iter()
254        .any(|needle| lowered.contains(needle))
255}
256
257fn temporal_context_bonus(record: &MemoryRecord, query: &RecallQuery) -> f32 {
258    let Some(episode) = record.episode.as_ref() else {
259        return 0.0;
260    };
261
262    let mut score = 0.0;
263    if has_sequence_intent(&query.query_text)
264        && (episode.previous_record_id.is_some()
265            || episode.next_record_id.is_some()
266            || !episode.causal_record_ids.is_empty())
267    {
268        score += 0.2;
269    }
270    if has_duration_intent(&query.query_text) {
271        if let Some(duration_ms) = episode.duration_hint_ms() {
272            let normalized_hours = (duration_ms as f32 / (60.0 * 60.0 * 1000.0)).min(24.0) / 24.0;
273            score += 0.15 + (normalized_hours * 0.2);
274        }
275    }
276    if has_recurrence_intent(&query.query_text) {
277        if episode.recurrence_key.is_some() {
278            score += 0.2;
279        }
280        if let Some(interval_ms) = episode.recurrence_interval_ms {
281            let normalized_days =
282                (interval_ms as f32 / (24.0 * 60.0 * 60.0 * 1000.0)).min(30.0) / 30.0;
283            score += 0.1 + (normalized_days * 0.1);
284        }
285    }
286    if has_boundary_intent(&query.query_text) {
287        if episode.boundary_label.is_some() {
288            score += 0.18;
289        }
290        if record.scope.session_id.is_some() || record.scope.conversation_id.is_some() {
291            score += 0.08;
292        }
293    }
294    score
295}
296
297fn episodic_score(record: &MemoryRecord, query: &RecallQuery, query_terms: &[String]) -> f32 {
298    let Some(episode) = record.episode.as_ref() else {
299        return 0.0;
300    };
301
302    let mut score = 0.0;
303    if let Some(filter_episode_id) = &query.filters.episode_id
304        && &episode.episode_id == filter_episode_id
305    {
306        score += 1.0;
307    }
308
309    if query.filters.unresolved_only && episode.continuity_state.is_unresolved() {
310        score += 0.35;
311    }
312
313    let haystack = episode_haystack(episode);
314    let term_matches = query_terms
315        .iter()
316        .filter(|term| haystack.contains(term.as_str()))
317        .count() as f32;
318    score += term_matches * 0.2;
319
320    let lowered_query = query.query_text.to_ascii_lowercase();
321    if lowered_query.contains("what changed") && episode.previous_record_id.is_some() {
322        score += 0.25;
323    }
324    if lowered_query.contains("what happened next") && episode.next_record_id.is_some() {
325        score += 0.25;
326    }
327    if lowered_query.contains("what led") && !episode.causal_record_ids.is_empty() {
328        score += 0.25;
329    }
330
331    score
332}
333
334fn salience_score(record: &MemoryRecord) -> f32 {
335    let Some(episode) = record.episode.as_ref() else {
336        return 0.0;
337    };
338
339    let mut score = ((episode.salience.reuse_count.min(10) as f32) / 10.0) * 0.35
340        + episode.salience.novelty_score.clamp(0.0, 1.0) * 0.15
341        + episode.salience.goal_relevance.clamp(0.0, 1.0) * 0.35
342        + episode.salience.unresolved_weight.clamp(0.0, 1.0) * 0.25;
343    if let Some(affective) = &episode.affective {
344        score += affective.urgency.clamp(0.0, 1.0) * 0.1;
345        score += affective.tension.clamp(0.0, 1.0) * 0.05;
346    }
347    score
348}
349
350fn tokenize_query(query_text: &str) -> Vec<String> {
351    query_text
352        .split_whitespace()
353        .map(|term| term.to_ascii_lowercase())
354        .collect()
355}
356
357fn has_continuity_intent(query_text: &str) -> bool {
358    let lowered = query_text.to_ascii_lowercase();
359    [
360        "what led",
361        "what changed",
362        "what happened next",
363        "unresolved",
364        "follow up",
365        "follow-up",
366        "same episode",
367    ]
368    .iter()
369    .any(|needle| lowered.contains(needle))
370}
371
372fn scope_matches_query(record: &MemoryRecord, query: &RecallQuery) -> bool {
373    if record.scope.tenant_id != query.scope.tenant_id
374        || record.scope.namespace != query.scope.namespace
375        || record.scope.actor_id != query.scope.actor_id
376    {
377        return false;
378    }
379
380    if query.scope.conversation_id.is_some()
381        && record.scope.conversation_id != query.scope.conversation_id
382    {
383        return false;
384    }
385
386    if query.scope.session_id.is_some() && record.scope.session_id != query.scope.session_id {
387        return false;
388    }
389
390    true
391}
392
393fn graph_relations(
394    record: &MemoryRecord,
395    frontier_ids: &BTreeSet<String>,
396    frontier_episode_ids: &BTreeSet<String>,
397    profile: RecallPlanningProfile,
398) -> Vec<GraphExpansionRelation> {
399    if !matches!(profile, RecallPlanningProfile::ContinuityAware) {
400        return Vec::new();
401    }
402
403    let mut relations = Vec::new();
404    if let Some(episode) = &record.episode {
405        if frontier_episode_ids.contains(&episode.episode_id) {
406            relations.push(GraphExpansionRelation::EpisodeMembership);
407        }
408        if episode
409            .previous_record_id
410            .as_ref()
411            .is_some_and(|value| frontier_ids.contains(value))
412            || episode
413                .next_record_id
414                .as_ref()
415                .is_some_and(|value| frontier_ids.contains(value))
416        {
417            relations.push(GraphExpansionRelation::Chronology);
418        }
419        if episode
420            .causal_record_ids
421            .iter()
422            .any(|value| frontier_ids.contains(value))
423        {
424            relations.push(GraphExpansionRelation::Causal);
425        }
426        if episode
427            .related_record_ids
428            .iter()
429            .any(|value| frontier_ids.contains(value))
430        {
431            relations.push(GraphExpansionRelation::Related);
432        }
433    }
434    if record
435        .lineage
436        .iter()
437        .any(|link| frontier_ids.contains(&link.record_id))
438    {
439        relations.push(GraphExpansionRelation::Lineage);
440    }
441    relations.sort();
442    relations.dedup();
443    relations
444}
445
446fn graph_relation_bonus(relations: &[GraphExpansionRelation], hop: u8) -> f32 {
447    let strongest = relations
448        .iter()
449        .map(|relation| match relation {
450            GraphExpansionRelation::EpisodeMembership => 0.45,
451            GraphExpansionRelation::Chronology => 0.35,
452            GraphExpansionRelation::Causal => 0.32,
453            GraphExpansionRelation::Related => 0.24,
454            GraphExpansionRelation::Lineage => 0.28,
455        })
456        .fold(0.0, f32::max);
457    let stacked_bonus = relations.len().saturating_sub(1) as f32 * 0.04;
458    let hop_penalty = hop.saturating_sub(1) as f32 * 0.08;
459    (strongest + stacked_bonus - hop_penalty).max(0.0)
460}
461
462fn seed_candidate_sources(
463    hit: &RecallHit,
464    matched_terms: &[String],
465    empty_query: bool,
466) -> Vec<RecallCandidateSource> {
467    let mut sources = Vec::new();
468    if !matched_terms.is_empty() {
469        sources.push(RecallCandidateSource::Lexical);
470    }
471    if hit.breakdown.semantic > 0.0 {
472        sources.push(RecallCandidateSource::Semantic);
473    }
474    if hit.breakdown.metadata > 0.0 {
475        sources.push(RecallCandidateSource::Metadata);
476    }
477    if hit.breakdown.episodic > 0.0 {
478        sources.push(RecallCandidateSource::Episode);
479    }
480    if empty_query || hit.breakdown.temporal > 0.0 {
481        sources.push(RecallCandidateSource::Temporal);
482    }
483    sources.sort();
484    sources.dedup();
485    sources
486}
487
488fn provenance_bonus(
489    record: &MemoryRecord,
490    query: &RecallQuery,
491    profile: RecallPlanningProfile,
492    policy_profile: RecallPolicyProfile,
493) -> f32 {
494    let weights = provenance_policy_weights(policy_profile);
495    let source_bonus = if record.scope.source == query.scope.source {
496        weights.source_bonus
497    } else {
498        0.0
499    };
500    let trust_bonus = match record.scope.trust_level {
501        MemoryTrustLevel::Pinned => weights.pinned_bonus,
502        MemoryTrustLevel::Verified => weights.verified_bonus,
503        MemoryTrustLevel::Observed => weights.observed_bonus,
504        MemoryTrustLevel::Derived => weights.derived_bonus,
505        MemoryTrustLevel::Untrusted => weights.untrusted_penalty,
506    };
507    let quality_bonus = match record.quality_state {
508        MemoryQualityState::Verified => weights.verified_state_bonus,
509        MemoryQualityState::Active => weights.active_state_bonus,
510        MemoryQualityState::Draft => weights.draft_penalty,
511        MemoryQualityState::Archived => {
512            if matches!(profile, RecallPlanningProfile::ContinuityAware) {
513                weights.archived_penalty_continuity
514            } else {
515                weights.archived_penalty_fast_path
516            }
517        }
518        MemoryQualityState::Suppressed => weights.suppressed_penalty,
519        MemoryQualityState::Deleted => weights.deleted_penalty,
520    };
521    let historical_bonus = match record.historical_state {
522        MemoryHistoricalState::Current => weights.current_bonus,
523        MemoryHistoricalState::Historical => {
524            if matches!(
525                query.filters.historical_mode,
526                crate::query::RecallHistoricalMode::HistoricalOnly
527            ) {
528                weights.historical_bonus
529            } else {
530                weights.historical_penalty
531            }
532        }
533        MemoryHistoricalState::Superseded => {
534            if matches!(
535                query.filters.historical_mode,
536                crate::query::RecallHistoricalMode::HistoricalOnly
537            ) {
538                weights.superseded_bonus
539            } else {
540                weights.superseded_penalty
541            }
542        }
543    };
544    source_bonus + trust_bonus + quality_bonus + historical_bonus
545}
546
547#[derive(Debug, Clone)]
548pub struct ProfileRecallScorer {
549    profile: RecallScoringProfile,
550    policy_profile: RecallPolicyProfile,
551    embedder: ConfiguredSemanticEmbedder,
552}
553
554impl ProfileRecallScorer {
555    pub fn new(profile: RecallScoringProfile) -> Self {
556        Self::with_embedder(
557            profile,
558            RecallPolicyProfile::General,
559            ConfiguredSemanticEmbedder::Disabled(Default::default()),
560        )
561    }
562
563    pub fn with_embedder(
564        profile: RecallScoringProfile,
565        policy_profile: RecallPolicyProfile,
566        embedder: ConfiguredSemanticEmbedder,
567    ) -> Self {
568        Self {
569            profile,
570            policy_profile,
571            embedder,
572        }
573    }
574
575    pub fn with_shared_embedder(
576        profile: RecallScoringProfile,
577        policy_profile: RecallPolicyProfile,
578        embedder: Arc<dyn SemanticEmbedder>,
579        provider_note: impl Into<String>,
580    ) -> Self {
581        Self::with_embedder(
582            profile,
583            policy_profile,
584            ConfiguredSemanticEmbedder::shared(embedder, provider_note),
585        )
586    }
587
588    pub fn profile(&self) -> RecallScoringProfile {
589        self.profile
590    }
591
592    fn weights(&self) -> (f32, f32, f32) {
593        match self.profile {
594            RecallScoringProfile::Balanced => (1.0, 0.45, 1.0),
595            RecallScoringProfile::LexicalFirst => (1.25, 0.25, 0.35),
596            RecallScoringProfile::ImportanceFirst => (0.75, 0.35, 1.5),
597        }
598    }
599
600    fn metadata_score(&self, record: &MemoryRecord, query_terms: &[String]) -> f32 {
601        let source = record.scope.source.to_ascii_lowercase();
602        let label_matches = record
603            .scope
604            .labels
605            .iter()
606            .map(|label| label.to_ascii_lowercase())
607            .filter(|label| query_terms.iter().any(|term| label.contains(term)))
608            .count() as f32;
609        let source_bonus = if query_terms.iter().any(|term| source.contains(term)) {
610            0.5
611        } else {
612            0.0
613        };
614        label_matches + source_bonus
615    }
616
617    fn temporal_score(&self, record: &MemoryRecord, query: &RecallQuery) -> f32 {
618        let now = SystemTime::now()
619            .duration_since(UNIX_EPOCH)
620            .map(|duration| duration.as_millis() as u64)
621            .unwrap_or(record.updated_at_unix_ms);
622        let freshness_window_ms = 7 * 24 * 60 * 60 * 1000u64;
623        let age_ms = now
624            .saturating_sub(record.updated_at_unix_ms)
625            .min(freshness_window_ms) as f32;
626        (1.0 - (age_ms / freshness_window_ms as f32)) + temporal_context_bonus(record, query)
627    }
628}
629
630impl RecallScorer for ProfileRecallScorer {
631    fn score(&self, record: &MemoryRecord, query: &RecallQuery) -> Option<ScoredRecallCandidate> {
632        let (lexical_weight, semantic_weight, policy_weight) = self.weights();
633
634        if query.query_text.trim().is_empty() {
635            let episodic = episodic_score(record, query, &[]);
636            let salience = salience_score(record);
637            return Some(ScoredRecallCandidate {
638                hit: RecallHit {
639                    record: record.clone(),
640                    breakdown: RecallScoreBreakdown {
641                        lexical: 0.0,
642                        semantic: 0.0,
643                        graph: 0.0,
644                        temporal: 1.0,
645                        metadata: 0.0,
646                        episodic,
647                        salience,
648                        curation: 0.0,
649                        policy: record.importance_score,
650                        total: 1.0
651                            + (episodic * 0.35)
652                            + (salience * 0.25)
653                            + (record.importance_score * policy_weight),
654                    },
655                    explanation: None,
656                },
657                matched_terms: Vec::new(),
658            });
659        }
660
661        let query_terms = query
662            .query_text
663            .split_whitespace()
664            .map(|term| term.to_ascii_lowercase())
665            .collect::<Vec<_>>();
666        let haystack = format!(
667            "{} {}",
668            record.content.to_ascii_lowercase(),
669            record
670                .summary
671                .clone()
672                .unwrap_or_default()
673                .to_ascii_lowercase()
674        );
675        let semantic = semantic_similarity(&self.embedder, &query.query_text, &haystack);
676        let matched_terms = query_terms
677            .iter()
678            .filter(|term| haystack.contains(term.as_str()))
679            .cloned()
680            .collect::<Vec<_>>();
681        let lexical = matched_terms.len() as f32;
682        if lexical == 0.0 && semantic == 0.0 {
683            return None;
684        }
685
686        let metadata = self.metadata_score(record, &query_terms);
687        let temporal = self.temporal_score(record, query);
688        let episodic = episodic_score(record, query, &query_terms);
689        let salience = salience_score(record);
690        let policy = record.importance_score;
691        let total = (lexical * lexical_weight)
692            + (semantic * semantic_weight)
693            + (metadata * 0.4)
694            + (temporal * 0.2)
695            + (episodic * 0.35)
696            + (salience * 0.25)
697            + (policy * policy_weight);
698        Some(ScoredRecallCandidate {
699            hit: RecallHit {
700                record: record.clone(),
701                breakdown: RecallScoreBreakdown {
702                    lexical,
703                    semantic,
704                    graph: 0.0,
705                    temporal,
706                    metadata,
707                    episodic,
708                    salience,
709                    curation: 0.0,
710                    policy,
711                    total,
712                },
713                explanation: None,
714            },
715            matched_terms,
716        })
717    }
718
719    fn scorer_kind(&self) -> RecallScorerKind {
720        RecallScorerKind::Profile
721    }
722
723    fn scoring_profile(&self) -> RecallScoringProfile {
724        self.profile
725    }
726
727    fn policy_profile(&self) -> RecallPolicyProfile {
728        self.policy_profile
729    }
730
731    fn profile_note(&self) -> &'static str {
732        match self.profile {
733            RecallScoringProfile::Balanced => "scoring_profile=balanced",
734            RecallScoringProfile::LexicalFirst => "scoring_profile=lexical_first",
735            RecallScoringProfile::ImportanceFirst => "scoring_profile=importance_first",
736        }
737    }
738
739    fn policy_profile_note(&self) -> &'static str {
740        match self.policy_profile {
741            RecallPolicyProfile::General => "policy_profile=general",
742            RecallPolicyProfile::Support => "policy_profile=support",
743            RecallPolicyProfile::Research => "policy_profile=research",
744            RecallPolicyProfile::Assistant => "policy_profile=assistant",
745            RecallPolicyProfile::AutonomousAgent => "policy_profile=autonomous_agent",
746        }
747    }
748
749    fn embedding_note(&self) -> Option<String> {
750        self.embedder.provider_note()
751    }
752}
753
754#[derive(Debug, Clone)]
755pub struct CuratedRecallScorer {
756    profile: RecallScoringProfile,
757    policy_profile: RecallPolicyProfile,
758    embedder: ConfiguredSemanticEmbedder,
759}
760
761impl CuratedRecallScorer {
762    pub fn new(profile: RecallScoringProfile) -> Self {
763        Self::with_embedder(
764            profile,
765            RecallPolicyProfile::General,
766            ConfiguredSemanticEmbedder::Disabled(Default::default()),
767        )
768    }
769
770    pub fn with_embedder(
771        profile: RecallScoringProfile,
772        policy_profile: RecallPolicyProfile,
773        embedder: ConfiguredSemanticEmbedder,
774    ) -> Self {
775        Self {
776            profile,
777            policy_profile,
778            embedder,
779        }
780    }
781
782    pub fn with_shared_embedder(
783        profile: RecallScoringProfile,
784        policy_profile: RecallPolicyProfile,
785        embedder: Arc<dyn SemanticEmbedder>,
786        provider_note: impl Into<String>,
787    ) -> Self {
788        Self::with_embedder(
789            profile,
790            policy_profile,
791            ConfiguredSemanticEmbedder::shared(embedder, provider_note),
792        )
793    }
794
795    fn weights(&self) -> (f32, f32, f32) {
796        match self.profile {
797            RecallScoringProfile::Balanced => (1.0, 0.50, 1.0),
798            RecallScoringProfile::LexicalFirst => (1.15, 0.30, 0.65),
799            RecallScoringProfile::ImportanceFirst => (0.75, 0.40, 1.7),
800        }
801    }
802
803    fn curation_bonus(&self, record: &MemoryRecord) -> f32 {
804        let trust_bonus = match record.scope.trust_level {
805            MemoryTrustLevel::Pinned => 0.45,
806            MemoryTrustLevel::Verified => 0.25,
807            MemoryTrustLevel::Observed => 0.10,
808            MemoryTrustLevel::Derived => 0.05,
809            MemoryTrustLevel::Untrusted => -0.10,
810        };
811        let quality_bonus = match record.quality_state {
812            MemoryQualityState::Verified => 0.20,
813            MemoryQualityState::Active => 0.0,
814            MemoryQualityState::Draft => -0.05,
815            MemoryQualityState::Archived => -0.15,
816            MemoryQualityState::Suppressed => -0.40,
817            MemoryQualityState::Deleted => -0.50,
818        };
819        trust_bonus + quality_bonus
820    }
821
822    fn metadata_score(&self, record: &MemoryRecord, query_terms: &[String]) -> f32 {
823        let source = record.scope.source.to_ascii_lowercase();
824        let label_hits = record
825            .scope
826            .labels
827            .iter()
828            .map(|label| label.to_ascii_lowercase())
829            .filter(|label| query_terms.iter().any(|term| label.contains(term)))
830            .count() as f32;
831        let source_hits = query_terms
832            .iter()
833            .filter(|term| source.contains(term.as_str()))
834            .count() as f32;
835        label_hits + (source_hits * 0.5)
836    }
837
838    fn temporal_score(&self, record: &MemoryRecord, query: &RecallQuery) -> f32 {
839        let now = SystemTime::now()
840            .duration_since(UNIX_EPOCH)
841            .map(|duration| duration.as_millis() as u64)
842            .unwrap_or(record.updated_at_unix_ms);
843        let freshness_window_ms = 30 * 24 * 60 * 60 * 1000u64;
844        let age_ms = now
845            .saturating_sub(record.updated_at_unix_ms)
846            .min(freshness_window_ms) as f32;
847        (1.0 - (age_ms / freshness_window_ms as f32)) + temporal_context_bonus(record, query)
848    }
849}
850
851impl RecallScorer for CuratedRecallScorer {
852    fn score(&self, record: &MemoryRecord, query: &RecallQuery) -> Option<ScoredRecallCandidate> {
853        let (lexical_weight, semantic_weight, policy_weight) = self.weights();
854        let curated_policy = (record.importance_score + self.curation_bonus(record)).max(0.0);
855
856        if query.query_text.trim().is_empty() {
857            let episodic = episodic_score(record, query, &[]);
858            let salience = salience_score(record);
859            return Some(ScoredRecallCandidate {
860                hit: RecallHit {
861                    record: record.clone(),
862                    breakdown: RecallScoreBreakdown {
863                        lexical: 0.0,
864                        semantic: 0.0,
865                        graph: 0.0,
866                        temporal: 1.0,
867                        metadata: 0.0,
868                        episodic,
869                        salience,
870                        curation: self.curation_bonus(record),
871                        policy: curated_policy,
872                        total: 1.0
873                            + (episodic * 0.35)
874                            + (salience * 0.25)
875                            + (curated_policy * policy_weight),
876                    },
877                    explanation: None,
878                },
879                matched_terms: Vec::new(),
880            });
881        }
882
883        let query_terms = query
884            .query_text
885            .split_whitespace()
886            .map(|term| term.to_ascii_lowercase())
887            .collect::<Vec<_>>();
888        let haystack = format!(
889            "{} {} {}",
890            record.content.to_ascii_lowercase(),
891            record
892                .summary
893                .clone()
894                .unwrap_or_default()
895                .to_ascii_lowercase(),
896            record.scope.source.to_ascii_lowercase(),
897        );
898        let semantic = semantic_similarity(&self.embedder, &query.query_text, &haystack);
899        let matched_terms = query_terms
900            .iter()
901            .filter(|term| haystack.contains(term.as_str()))
902            .cloned()
903            .collect::<Vec<_>>();
904        let lexical = matched_terms.len() as f32;
905        if lexical == 0.0 && semantic == 0.0 {
906            return None;
907        }
908
909        let metadata = self.metadata_score(record, &query_terms);
910        let temporal = self.temporal_score(record, query);
911        let episodic = episodic_score(record, query, &query_terms);
912        let salience = salience_score(record);
913        let curation = self.curation_bonus(record);
914        let total = (lexical * lexical_weight)
915            + (semantic * semantic_weight)
916            + (metadata * 0.45)
917            + (temporal * 0.2)
918            + (episodic * 0.35)
919            + (salience * 0.25)
920            + (curation * 0.5)
921            + (curated_policy * policy_weight);
922        Some(ScoredRecallCandidate {
923            hit: RecallHit {
924                record: record.clone(),
925                breakdown: RecallScoreBreakdown {
926                    lexical,
927                    semantic,
928                    graph: 0.0,
929                    temporal,
930                    metadata,
931                    episodic,
932                    salience,
933                    curation,
934                    policy: curated_policy,
935                    total,
936                },
937                explanation: None,
938            },
939            matched_terms,
940        })
941    }
942
943    fn scorer_kind(&self) -> RecallScorerKind {
944        RecallScorerKind::Curated
945    }
946
947    fn scoring_profile(&self) -> RecallScoringProfile {
948        self.profile
949    }
950
951    fn policy_profile(&self) -> RecallPolicyProfile {
952        self.policy_profile
953    }
954
955    fn profile_note(&self) -> &'static str {
956        match self.profile {
957            RecallScoringProfile::Balanced => "scoring_profile=balanced",
958            RecallScoringProfile::LexicalFirst => "scoring_profile=lexical_first",
959            RecallScoringProfile::ImportanceFirst => "scoring_profile=importance_first",
960        }
961    }
962
963    fn policy_profile_note(&self) -> &'static str {
964        match self.policy_profile {
965            RecallPolicyProfile::General => "policy_profile=general",
966            RecallPolicyProfile::Support => "policy_profile=support",
967            RecallPolicyProfile::Research => "policy_profile=research",
968            RecallPolicyProfile::Assistant => "policy_profile=assistant",
969            RecallPolicyProfile::AutonomousAgent => "policy_profile=autonomous_agent",
970        }
971    }
972
973    fn embedding_note(&self) -> Option<String> {
974        self.embedder.provider_note()
975    }
976}
977
978#[derive(Debug, Clone)]
979pub enum ConfiguredRecallScorer {
980    Profile(ProfileRecallScorer),
981    Curated(CuratedRecallScorer),
982}
983
984impl ConfiguredRecallScorer {
985    pub fn from_engine_config(config: &EngineConfig) -> Self {
986        let embedder = ConfiguredSemanticEmbedder::from_engine_config(config);
987        match config.recall_scorer_kind {
988            RecallScorerKind::Profile => Self::Profile(ProfileRecallScorer::with_embedder(
989                config.recall_scoring_profile,
990                config.recall_policy_profile,
991                embedder,
992            )),
993            RecallScorerKind::Curated => Self::Curated(CuratedRecallScorer::with_embedder(
994                config.recall_scoring_profile,
995                config.recall_policy_profile,
996                embedder,
997            )),
998        }
999    }
1000
1001    pub fn with_embedder(
1002        scorer_kind: RecallScorerKind,
1003        scoring_profile: RecallScoringProfile,
1004        policy_profile: RecallPolicyProfile,
1005        embedder: ConfiguredSemanticEmbedder,
1006    ) -> Self {
1007        match scorer_kind {
1008            RecallScorerKind::Profile => Self::Profile(ProfileRecallScorer::with_embedder(
1009                scoring_profile,
1010                policy_profile,
1011                embedder,
1012            )),
1013            RecallScorerKind::Curated => Self::Curated(CuratedRecallScorer::with_embedder(
1014                scoring_profile,
1015                policy_profile,
1016                embedder,
1017            )),
1018        }
1019    }
1020
1021    pub fn with_shared_embedder(
1022        scorer_kind: RecallScorerKind,
1023        scoring_profile: RecallScoringProfile,
1024        policy_profile: RecallPolicyProfile,
1025        embedder: Arc<dyn SemanticEmbedder>,
1026        provider_note: impl Into<String>,
1027    ) -> Self {
1028        Self::with_embedder(
1029            scorer_kind,
1030            scoring_profile,
1031            policy_profile,
1032            ConfiguredSemanticEmbedder::shared(embedder, provider_note),
1033        )
1034    }
1035
1036    fn curation_bonus_for(&self, record: &MemoryRecord) -> f32 {
1037        match self {
1038            Self::Profile(_) => 0.0,
1039            Self::Curated(scorer) => scorer.curation_bonus(record),
1040        }
1041    }
1042
1043    fn temporal_score_for(&self, record: &MemoryRecord, query: &RecallQuery) -> f32 {
1044        match self {
1045            Self::Profile(scorer) => scorer.temporal_score(record, query),
1046            Self::Curated(scorer) => scorer.temporal_score(record, query),
1047        }
1048    }
1049}
1050
1051impl RecallScorer for ConfiguredRecallScorer {
1052    fn score(&self, record: &MemoryRecord, query: &RecallQuery) -> Option<ScoredRecallCandidate> {
1053        match self {
1054            Self::Profile(scorer) => scorer.score(record, query),
1055            Self::Curated(scorer) => scorer.score(record, query),
1056        }
1057    }
1058
1059    fn scorer_kind(&self) -> RecallScorerKind {
1060        match self {
1061            Self::Profile(scorer) => scorer.scorer_kind(),
1062            Self::Curated(scorer) => scorer.scorer_kind(),
1063        }
1064    }
1065
1066    fn scoring_profile(&self) -> RecallScoringProfile {
1067        match self {
1068            Self::Profile(scorer) => scorer.scoring_profile(),
1069            Self::Curated(scorer) => scorer.scoring_profile(),
1070        }
1071    }
1072
1073    fn policy_profile(&self) -> RecallPolicyProfile {
1074        match self {
1075            Self::Profile(scorer) => scorer.policy_profile(),
1076            Self::Curated(scorer) => scorer.policy_profile(),
1077        }
1078    }
1079
1080    fn profile_note(&self) -> &'static str {
1081        match self {
1082            Self::Profile(scorer) => scorer.profile_note(),
1083            Self::Curated(scorer) => scorer.profile_note(),
1084        }
1085    }
1086
1087    fn policy_profile_note(&self) -> &'static str {
1088        match self {
1089            Self::Profile(scorer) => scorer.policy_profile_note(),
1090            Self::Curated(scorer) => scorer.policy_profile_note(),
1091        }
1092    }
1093
1094    fn embedding_note(&self) -> Option<String> {
1095        match self {
1096            Self::Profile(scorer) => scorer.embedding_note(),
1097            Self::Curated(scorer) => scorer.embedding_note(),
1098        }
1099    }
1100}
1101
1102impl RecallPlanner {
1103    pub fn from_engine_config(config: &EngineConfig) -> Self {
1104        Self {
1105            profile: config.recall_planning_profile,
1106            graph_expansion_max_hops: config.graph_expansion_max_hops,
1107            scorer: ConfiguredRecallScorer::from_engine_config(config),
1108        }
1109    }
1110
1111    pub fn with_scorer(
1112        profile: RecallPlanningProfile,
1113        graph_expansion_max_hops: u8,
1114        scorer: ConfiguredRecallScorer,
1115    ) -> Self {
1116        Self {
1117            profile,
1118            graph_expansion_max_hops,
1119            scorer,
1120        }
1121    }
1122
1123    pub fn with_shared_embedder(
1124        profile: RecallPlanningProfile,
1125        graph_expansion_max_hops: u8,
1126        scorer_kind: RecallScorerKind,
1127        scoring_profile: RecallScoringProfile,
1128        policy_profile: RecallPolicyProfile,
1129        embedder: Arc<dyn SemanticEmbedder>,
1130        provider_note: impl Into<String>,
1131    ) -> Self {
1132        Self::with_scorer(
1133            profile,
1134            graph_expansion_max_hops,
1135            ConfiguredRecallScorer::with_shared_embedder(
1136                scorer_kind,
1137                scoring_profile,
1138                policy_profile,
1139                embedder,
1140                provider_note,
1141            ),
1142        )
1143    }
1144
1145    pub fn scorer(&self) -> ConfiguredRecallScorer {
1146        self.scorer.clone()
1147    }
1148
1149    pub fn effective_profile(&self, query: &RecallQuery) -> RecallPlanningProfile {
1150        if matches!(self.profile, RecallPlanningProfile::ContinuityAware)
1151            || query.filters.episode_id.is_some()
1152            || query.filters.unresolved_only
1153            || has_continuity_intent(&query.query_text)
1154        {
1155            RecallPlanningProfile::ContinuityAware
1156        } else {
1157            RecallPlanningProfile::FastPath
1158        }
1159    }
1160
1161    fn apply_overlay(&self, hit: &mut RecallHit, graph_bonus: f32, provenance_delta: f32) {
1162        if graph_bonus > 0.0 {
1163            hit.breakdown.graph += graph_bonus;
1164            hit.breakdown.total += graph_bonus;
1165        }
1166        if provenance_delta != 0.0 {
1167            hit.breakdown.policy = (hit.breakdown.policy + provenance_delta).max(0.0);
1168            hit.breakdown.total += provenance_delta;
1169        }
1170    }
1171
1172    fn contextual_candidate(
1173        &self,
1174        record: &MemoryRecord,
1175        query: &RecallQuery,
1176        query_terms: &[String],
1177        graph_bonus: f32,
1178        provenance_delta: f32,
1179    ) -> PlannedRecallCandidate {
1180        let temporal = self.scorer.temporal_score_for(record, query);
1181        let episodic = episodic_score(record, query, query_terms);
1182        let salience = salience_score(record);
1183        let curation = self.scorer.curation_bonus_for(record);
1184        let policy = (record.importance_score + provenance_delta).max(0.0);
1185        let total = graph_bonus
1186            + (temporal * 0.15)
1187            + (episodic * 0.35)
1188            + (salience * 0.25)
1189            + (curation * 0.25)
1190            + (policy * 0.5);
1191
1192        let mut candidate_sources = vec![RecallCandidateSource::Graph];
1193        if episodic > 0.0 {
1194            candidate_sources.push(RecallCandidateSource::Episode);
1195        }
1196        if temporal > 0.0 {
1197            candidate_sources.push(RecallCandidateSource::Temporal);
1198        }
1199        if provenance_delta != 0.0 {
1200            candidate_sources.push(RecallCandidateSource::Provenance);
1201        }
1202        candidate_sources.sort();
1203        candidate_sources.dedup();
1204
1205        PlannedRecallCandidate {
1206            hit: RecallHit {
1207                record: record.clone(),
1208                breakdown: RecallScoreBreakdown {
1209                    lexical: 0.0,
1210                    semantic: 0.0,
1211                    graph: graph_bonus,
1212                    temporal,
1213                    metadata: 0.0,
1214                    episodic,
1215                    salience,
1216                    curation,
1217                    policy,
1218                    total,
1219                },
1220                explanation: None,
1221            },
1222            matched_terms: Vec::new(),
1223            candidate_sources,
1224            planner_stage: RecallPlannerStage::GraphExpansion,
1225        }
1226    }
1227
1228    pub fn plan(
1229        &self,
1230        records: &[MemoryRecord],
1231        query: &RecallQuery,
1232    ) -> Vec<PlannedRecallCandidate> {
1233        self.plan_with_metrics(records, query).0
1234    }
1235
1236    pub fn plan_with_metrics(
1237        &self,
1238        records: &[MemoryRecord],
1239        query: &RecallQuery,
1240    ) -> (Vec<PlannedRecallCandidate>, RecallPlannerMetrics) {
1241        let total_started = Instant::now();
1242        let empty_query = query.query_text.trim().is_empty();
1243        let effective_profile = self.effective_profile(query);
1244        let query_terms = tokenize_query(&query.query_text);
1245        let mut candidates = BTreeMap::<String, PlannedRecallCandidate>::new();
1246
1247        let candidate_generation_started = Instant::now();
1248
1249        for record in records {
1250            if !scope_matches_query(record, query) {
1251                continue;
1252            }
1253            if let Some(scored) = self.scorer.score(record, query) {
1254                let provenance_delta = provenance_bonus(
1255                    record,
1256                    query,
1257                    effective_profile,
1258                    self.scorer.policy_profile(),
1259                );
1260                let mut hit = scored.hit;
1261                self.apply_overlay(&mut hit, 0.0, provenance_delta);
1262                let mut candidate_sources =
1263                    seed_candidate_sources(&hit, &scored.matched_terms, empty_query);
1264                if provenance_delta != 0.0 {
1265                    candidate_sources.push(RecallCandidateSource::Provenance);
1266                }
1267                candidate_sources.sort();
1268                candidate_sources.dedup();
1269                candidates.insert(
1270                    record.id.clone(),
1271                    PlannedRecallCandidate {
1272                        hit,
1273                        matched_terms: scored.matched_terms,
1274                        candidate_sources,
1275                        planner_stage: RecallPlannerStage::CandidateGeneration,
1276                    },
1277                );
1278            }
1279        }
1280
1281        let candidate_generation_ns = candidate_generation_started.elapsed().as_nanos();
1282        let seeded_candidates = candidates.len();
1283        let mut graph_expansion_ns = 0;
1284        let mut expanded_candidates = 0usize;
1285        let mut hops_applied = 0u8;
1286
1287        if matches!(effective_profile, RecallPlanningProfile::ContinuityAware)
1288            && self.graph_expansion_max_hops > 0
1289        {
1290            let graph_expansion_started = Instant::now();
1291            let mut frontier_ids = candidates.keys().cloned().collect::<BTreeSet<_>>();
1292            let mut frontier_episode_ids = candidates
1293                .values()
1294                .filter_map(|candidate| {
1295                    candidate
1296                        .hit
1297                        .record
1298                        .episode
1299                        .as_ref()
1300                        .map(|episode| episode.episode_id.clone())
1301                })
1302                .collect::<BTreeSet<_>>();
1303
1304            for hop in 1..=self.graph_expansion_max_hops {
1305                if frontier_ids.is_empty() {
1306                    break;
1307                }
1308
1309                let mut next_frontier_ids = BTreeSet::new();
1310                let mut next_frontier_episode_ids = BTreeSet::new();
1311
1312                for record in records {
1313                    if !scope_matches_query(record, query) || candidates.contains_key(&record.id) {
1314                        continue;
1315                    }
1316
1317                    let relations = graph_relations(
1318                        record,
1319                        &frontier_ids,
1320                        &frontier_episode_ids,
1321                        effective_profile,
1322                    );
1323                    if relations.is_empty() {
1324                        continue;
1325                    }
1326
1327                    let graph_bonus = graph_relation_bonus(&relations, hop);
1328                    if graph_bonus <= 0.0 {
1329                        continue;
1330                    }
1331
1332                    let provenance_delta = provenance_bonus(
1333                        record,
1334                        query,
1335                        effective_profile,
1336                        self.scorer.policy_profile(),
1337                    );
1338                    let planned = if let Some(scored) = self.scorer.score(record, query) {
1339                        let mut hit = scored.hit;
1340                        self.apply_overlay(&mut hit, graph_bonus, provenance_delta);
1341                        let mut candidate_sources =
1342                            seed_candidate_sources(&hit, &scored.matched_terms, empty_query);
1343                        candidate_sources.push(RecallCandidateSource::Graph);
1344                        if hit.breakdown.episodic > 0.0 {
1345                            candidate_sources.push(RecallCandidateSource::Episode);
1346                        }
1347                        if provenance_delta != 0.0 {
1348                            candidate_sources.push(RecallCandidateSource::Provenance);
1349                        }
1350                        candidate_sources.sort();
1351                        candidate_sources.dedup();
1352                        PlannedRecallCandidate {
1353                            hit,
1354                            matched_terms: scored.matched_terms,
1355                            candidate_sources,
1356                            planner_stage: RecallPlannerStage::GraphExpansion,
1357                        }
1358                    } else {
1359                        self.contextual_candidate(
1360                            record,
1361                            query,
1362                            &query_terms,
1363                            graph_bonus,
1364                            provenance_delta,
1365                        )
1366                    };
1367
1368                    if let Some(episode) = &planned.hit.record.episode {
1369                        next_frontier_episode_ids.insert(episode.episode_id.clone());
1370                    }
1371                    next_frontier_ids.insert(planned.hit.record.id.clone());
1372                    candidates.insert(record.id.clone(), planned);
1373                    expanded_candidates += 1;
1374                }
1375
1376                if next_frontier_ids.is_empty() {
1377                    break;
1378                }
1379
1380                hops_applied = hop;
1381                frontier_ids = next_frontier_ids;
1382                frontier_episode_ids = next_frontier_episode_ids;
1383            }
1384
1385            graph_expansion_ns = graph_expansion_started.elapsed().as_nanos();
1386        }
1387
1388        (
1389            candidates.into_values().collect(),
1390            RecallPlannerMetrics {
1391                candidate_generation_ns,
1392                graph_expansion_ns,
1393                total_ns: total_started.elapsed().as_nanos(),
1394                seeded_candidates,
1395                expanded_candidates,
1396                hops_applied,
1397            },
1398        )
1399    }
1400}
1401
1402#[cfg(test)]
1403mod tests {
1404    #![allow(clippy::field_reassign_with_default)]
1405
1406    use super::{
1407        ConfiguredRecallScorer, CuratedRecallScorer, ProfileRecallScorer, RecallPlanner,
1408        RecallScorer, provenance_bonus,
1409    };
1410    use crate::config::{
1411        EmbeddingProviderKind, EngineConfig, RecallPlanningProfile, RecallPolicyProfile,
1412        RecallScorerKind, RecallScoringProfile,
1413    };
1414    use crate::embedding::{EmbeddingVector, SemanticEmbedder};
1415    use crate::model::{
1416        EPISODE_SCHEMA_VERSION, EpisodeContext, EpisodeContinuityState, EpisodeSalience,
1417        LineageLink, LineageRelationKind, MemoryHistoricalState, MemoryQualityState, MemoryRecord,
1418        MemoryRecordKind, MemoryScope, MemoryTrustLevel,
1419    };
1420    use crate::query::{RecallCandidateSource, RecallFilters, RecallPlannerStage, RecallQuery};
1421    use std::collections::BTreeMap;
1422    use std::sync::Arc;
1423
1424    #[derive(Debug)]
1425    struct FixtureEmbedder;
1426
1427    impl SemanticEmbedder for FixtureEmbedder {
1428        fn provider_kind(&self) -> EmbeddingProviderKind {
1429            EmbeddingProviderKind::Disabled
1430        }
1431
1432        fn dimensions(&self) -> usize {
1433            2
1434        }
1435
1436        fn embed(&self, text: &str) -> EmbeddingVector {
1437            if text.to_ascii_lowercase().contains("storm") {
1438                EmbeddingVector {
1439                    values: vec![1.0, 0.0],
1440                }
1441            } else {
1442                EmbeddingVector {
1443                    values: vec![0.0, 1.0],
1444                }
1445            }
1446        }
1447    }
1448
1449    fn scope() -> MemoryScope {
1450        MemoryScope {
1451            tenant_id: "default".to_string(),
1452            namespace: "conversation".to_string(),
1453            actor_id: "ava".to_string(),
1454            conversation_id: Some("thread-a".to_string()),
1455            session_id: Some("session-a".to_string()),
1456            source: "test".to_string(),
1457            labels: vec![],
1458            trust_level: MemoryTrustLevel::Verified,
1459        }
1460    }
1461
1462    fn query() -> RecallQuery {
1463        RecallQuery {
1464            scope: scope(),
1465            query_text: "storm checklist".to_string(),
1466            max_items: 5,
1467            token_budget: None,
1468            filters: RecallFilters::default(),
1469            include_explanation: false,
1470        }
1471    }
1472
1473    #[test]
1474    fn configured_scorer_uses_engine_policy_profile() {
1475        let mut config = EngineConfig::default();
1476        config.recall_policy_profile = RecallPolicyProfile::Research;
1477
1478        let scorer = ConfiguredRecallScorer::from_engine_config(&config);
1479        assert_eq!(scorer.policy_profile(), RecallPolicyProfile::Research);
1480        assert_eq!(scorer.policy_profile_note(), "policy_profile=research");
1481    }
1482
1483    #[test]
1484    fn support_policy_prefers_current_verified_records() {
1485        let mut current = record("current", "storm guidance", "storm guidance", 0.5);
1486        current.scope.trust_level = MemoryTrustLevel::Verified;
1487        current.quality_state = MemoryQualityState::Verified;
1488        current.historical_state = MemoryHistoricalState::Current;
1489
1490        let mut historical = current.clone();
1491        historical.id = "historical".to_string();
1492        historical.historical_state = MemoryHistoricalState::Historical;
1493        historical.quality_state = MemoryQualityState::Archived;
1494
1495        let query = query();
1496        let current_bonus = provenance_bonus(
1497            &current,
1498            &query,
1499            RecallPlanningProfile::FastPath,
1500            RecallPolicyProfile::Support,
1501        );
1502        let historical_bonus = provenance_bonus(
1503            &historical,
1504            &query,
1505            RecallPlanningProfile::FastPath,
1506            RecallPolicyProfile::Support,
1507        );
1508
1509        assert!(current_bonus > historical_bonus);
1510    }
1511
1512    #[test]
1513    fn research_policy_is_more_tolerant_of_historical_context() {
1514        let mut historical = record("historical", "storm history", "storm history", 0.5);
1515        historical.historical_state = MemoryHistoricalState::Historical;
1516        historical.quality_state = MemoryQualityState::Archived;
1517
1518        let query = query();
1519        let support_bonus = provenance_bonus(
1520            &historical,
1521            &query,
1522            RecallPlanningProfile::FastPath,
1523            RecallPolicyProfile::Support,
1524        );
1525        let research_bonus = provenance_bonus(
1526            &historical,
1527            &query,
1528            RecallPlanningProfile::FastPath,
1529            RecallPolicyProfile::Research,
1530        );
1531
1532        assert!(research_bonus > support_bonus);
1533    }
1534
1535    #[test]
1536    fn support_policy_penalizes_superseded_conflicts_until_history_is_requested() {
1537        let mut current = record(
1538            "current",
1539            "storm rollback is disabled",
1540            "rollback disabled",
1541            0.7,
1542        );
1543        current.scope.trust_level = MemoryTrustLevel::Verified;
1544        current.quality_state = MemoryQualityState::Verified;
1545        current.historical_state = MemoryHistoricalState::Current;
1546
1547        let mut superseded = current.clone();
1548        superseded.id = "superseded".to_string();
1549        superseded.historical_state = MemoryHistoricalState::Superseded;
1550        superseded.quality_state = MemoryQualityState::Archived;
1551        superseded.lineage = vec![LineageLink {
1552            record_id: current.id.clone(),
1553            relation: LineageRelationKind::ConflictsWith,
1554            confidence: 0.92,
1555        }];
1556
1557        let current_query = query();
1558        let mut historical_query = query();
1559        historical_query.filters.historical_mode =
1560            crate::query::RecallHistoricalMode::HistoricalOnly;
1561
1562        let current_bonus = provenance_bonus(
1563            &current,
1564            &current_query,
1565            RecallPlanningProfile::FastPath,
1566            RecallPolicyProfile::Support,
1567        );
1568        let superseded_default_bonus = provenance_bonus(
1569            &superseded,
1570            &current_query,
1571            RecallPlanningProfile::FastPath,
1572            RecallPolicyProfile::Support,
1573        );
1574        let superseded_historical_bonus = provenance_bonus(
1575            &superseded,
1576            &historical_query,
1577            RecallPlanningProfile::FastPath,
1578            RecallPolicyProfile::Support,
1579        );
1580
1581        assert!(current_bonus > superseded_default_bonus);
1582        assert!(superseded_historical_bonus > superseded_default_bonus);
1583        assert_eq!(
1584            superseded.lineage[0].relation,
1585            LineageRelationKind::ConflictsWith
1586        );
1587    }
1588
1589    fn record(id: &str, content: &str, summary: &str, importance_score: f32) -> MemoryRecord {
1590        MemoryRecord {
1591            id: id.to_string(),
1592            scope: scope(),
1593            kind: MemoryRecordKind::Fact,
1594            content: content.to_string(),
1595            summary: Some(summary.to_string()),
1596            source_id: None,
1597            metadata: BTreeMap::new(),
1598            quality_state: MemoryQualityState::Active,
1599            created_at_unix_ms: 1,
1600            updated_at_unix_ms: 1,
1601            expires_at_unix_ms: None,
1602            importance_score,
1603            artifact: None,
1604            episode: None,
1605            historical_state: MemoryHistoricalState::Current,
1606            lineage: Vec::new(),
1607            conflict: None,
1608        }
1609    }
1610
1611    #[test]
1612    fn episodic_and_salience_scores_are_reported_when_present() {
1613        let mut episodic = record(
1614            "episodic",
1615            "storm response open loop",
1616            "storm response",
1617            0.3,
1618        );
1619        episodic.episode = Some(EpisodeContext {
1620            schema_version: EPISODE_SCHEMA_VERSION,
1621            episode_id: "storm-episode".to_string(),
1622            summary: Some("storm response episode".to_string()),
1623            continuity_state: EpisodeContinuityState::Open,
1624            actor_ids: vec!["ava".to_string()],
1625            goal: Some("close the storm remediation checklist".to_string()),
1626            outcome: None,
1627            started_at_unix_ms: Some(1),
1628            ended_at_unix_ms: None,
1629            last_active_unix_ms: Some(2),
1630            recurrence_key: None,
1631            recurrence_interval_ms: None,
1632            boundary_label: None,
1633            previous_record_id: Some("storm-previous".to_string()),
1634            next_record_id: Some("storm-next".to_string()),
1635            causal_record_ids: vec!["storm-root".to_string()],
1636            related_record_ids: vec!["storm-related".to_string()],
1637            linked_artifact_uris: vec![],
1638            salience: EpisodeSalience {
1639                reuse_count: 5,
1640                novelty_score: 0.4,
1641                goal_relevance: 0.9,
1642                unresolved_weight: 0.8,
1643            },
1644            affective: None,
1645        });
1646        let mut query = query();
1647        query.filters.episode_id = Some("storm-episode".to_string());
1648        query.filters.unresolved_only = true;
1649
1650        let scored = ProfileRecallScorer::new(RecallScoringProfile::Balanced)
1651            .score(&episodic, &query)
1652            .unwrap();
1653
1654        assert!(scored.hit.breakdown.episodic > 0.0);
1655        assert!(scored.hit.breakdown.salience > 0.0);
1656    }
1657
1658    #[test]
1659    fn continuity_intent_queries_trigger_episode_reasoning_and_continuity_planning() {
1660        let mut episodic = record(
1661            "episodic",
1662            "storm response open loop",
1663            "storm response",
1664            0.3,
1665        );
1666        episodic.episode = Some(EpisodeContext {
1667            schema_version: EPISODE_SCHEMA_VERSION,
1668            episode_id: "storm-episode".to_string(),
1669            summary: Some("storm response episode".to_string()),
1670            continuity_state: EpisodeContinuityState::Open,
1671            actor_ids: vec!["ava".to_string()],
1672            goal: Some("close the storm remediation checklist".to_string()),
1673            outcome: None,
1674            started_at_unix_ms: Some(1),
1675            ended_at_unix_ms: None,
1676            last_active_unix_ms: Some(2),
1677            recurrence_key: None,
1678            recurrence_interval_ms: None,
1679            boundary_label: None,
1680            previous_record_id: Some("storm-previous".to_string()),
1681            next_record_id: Some("storm-next".to_string()),
1682            causal_record_ids: vec!["storm-root".to_string()],
1683            related_record_ids: vec!["storm-related".to_string()],
1684            linked_artifact_uris: vec![],
1685            salience: EpisodeSalience {
1686                reuse_count: 5,
1687                novelty_score: 0.4,
1688                goal_relevance: 0.9,
1689                unresolved_weight: 0.8,
1690            },
1691            affective: None,
1692        });
1693
1694        let planner = RecallPlanner::from_engine_config(&EngineConfig::default());
1695        for query_text in [
1696            "what led to this storm state",
1697            "what changed in the storm episode",
1698            "what happened next in the storm episode",
1699        ] {
1700            let query = RecallQuery {
1701                query_text: query_text.to_string(),
1702                ..query()
1703            };
1704            let scored = ProfileRecallScorer::new(RecallScoringProfile::Balanced)
1705                .score(&episodic, &query)
1706                .unwrap();
1707            assert_eq!(
1708                planner.effective_profile(&query),
1709                RecallPlanningProfile::ContinuityAware
1710            );
1711            assert!(scored.hit.breakdown.episodic > 0.0);
1712        }
1713
1714        let mut unresolved_query = query();
1715        unresolved_query.filters.unresolved_only = true;
1716        let unresolved_scored = ProfileRecallScorer::new(RecallScoringProfile::Balanced)
1717            .score(&episodic, &unresolved_query)
1718            .unwrap();
1719        assert_eq!(
1720            planner.effective_profile(&unresolved_query),
1721            RecallPlanningProfile::ContinuityAware
1722        );
1723        assert!(unresolved_scored.hit.breakdown.episodic > 0.0);
1724    }
1725
1726    #[test]
1727    fn profile_scorer_changes_total_weighting() {
1728        let lexical = record(
1729            "lexical",
1730            "storm mitigation storm checklist",
1731            "storm checklist",
1732            0.1,
1733        );
1734        let importance = record("importance", "storm memo", "storm memo", 0.95);
1735        let query = query();
1736
1737        let lexical_first = ProfileRecallScorer::new(RecallScoringProfile::LexicalFirst);
1738        let importance_first = ProfileRecallScorer::new(RecallScoringProfile::ImportanceFirst);
1739
1740        let lexical_first_lexical = lexical_first.score(&lexical, &query).unwrap();
1741        let lexical_first_importance = lexical_first.score(&importance, &query).unwrap();
1742        assert!(
1743            lexical_first_lexical.hit.breakdown.total
1744                > lexical_first_importance.hit.breakdown.total
1745        );
1746
1747        let importance_first_lexical = importance_first.score(&lexical, &query).unwrap();
1748        let importance_first_importance = importance_first.score(&importance, &query).unwrap();
1749        assert!(
1750            importance_first_importance.hit.breakdown.total
1751                > importance_first_lexical.hit.breakdown.total
1752        );
1753    }
1754
1755    #[test]
1756    fn curated_scorer_rewards_verified_high_trust_records() {
1757        let lexical = record(
1758            "lexical",
1759            "storm mitigation storm checklist",
1760            "storm checklist",
1761            0.1,
1762        );
1763        let mut curated = record("curated", "storm memo", "storm memo", 0.4);
1764        curated.scope.trust_level = crate::model::MemoryTrustLevel::Pinned;
1765        curated.quality_state = crate::model::MemoryQualityState::Verified;
1766        let query = query();
1767
1768        let scorer = CuratedRecallScorer::new(RecallScoringProfile::ImportanceFirst);
1769        let lexical_score = scorer.score(&lexical, &query).unwrap();
1770        let curated_score = scorer.score(&curated, &query).unwrap();
1771        assert!(curated_score.hit.breakdown.total > lexical_score.hit.breakdown.total);
1772    }
1773
1774    #[test]
1775    fn configured_scorer_uses_engine_config_kind() {
1776        let mut config = EngineConfig::default();
1777        config.recall_scorer_kind = RecallScorerKind::Curated;
1778        config.recall_scoring_profile = RecallScoringProfile::ImportanceFirst;
1779        let scorer = ConfiguredRecallScorer::from_engine_config(&config);
1780        assert_eq!(scorer.scorer_kind(), RecallScorerKind::Curated);
1781        assert_eq!(
1782            scorer.scoring_profile(),
1783            RecallScoringProfile::ImportanceFirst
1784        );
1785    }
1786
1787    #[test]
1788    fn deterministic_embedder_populates_semantic_score() {
1789        let mut config = EngineConfig::default();
1790        config.embedding_provider_kind = EmbeddingProviderKind::DeterministicLocal;
1791        config.embedding_dimensions = 64;
1792        let scorer = ConfiguredRecallScorer::from_engine_config(&config);
1793        let scored = scorer
1794            .score(
1795                &record(
1796                    "semantic",
1797                    "storm checklist remediation notes",
1798                    "storm remediation",
1799                    0.4,
1800                ),
1801                &query(),
1802            )
1803            .unwrap();
1804        assert!(scored.hit.breakdown.semantic > 0.0);
1805        assert_eq!(
1806            scorer.embedding_note(),
1807            Some("embedding_provider=deterministic_local".to_string())
1808        );
1809    }
1810
1811    #[test]
1812    fn planner_can_use_shared_embedder_and_report_provider_note() {
1813        let planner = RecallPlanner::with_shared_embedder(
1814            RecallPlanningProfile::FastPath,
1815            0,
1816            RecallScorerKind::Profile,
1817            RecallScoringProfile::Balanced,
1818            RecallPolicyProfile::General,
1819            Arc::new(FixtureEmbedder),
1820            "embedding_provider=fixture_custom",
1821        );
1822
1823        let planned = planner.plan(
1824            &[record(
1825                "semantic",
1826                "storm checklist remediation notes",
1827                "storm remediation",
1828                0.4,
1829            )],
1830            &query(),
1831        );
1832
1833        assert_eq!(
1834            planner.scorer().embedding_note(),
1835            Some("embedding_provider=fixture_custom".to_string())
1836        );
1837        assert_eq!(planned.len(), 1);
1838        assert!(planned[0].hit.breakdown.semantic > 0.0);
1839    }
1840
1841    #[test]
1842    fn continuity_planner_expands_graph_neighbors() {
1843        let mut config = EngineConfig::default();
1844        config.recall_planning_profile = RecallPlanningProfile::ContinuityAware;
1845        let planner = RecallPlanner::from_engine_config(&config);
1846
1847        let mut seed = record(
1848            "seed",
1849            "storm checklist remediation",
1850            "storm checklist",
1851            0.5,
1852        );
1853        seed.episode = Some(EpisodeContext {
1854            schema_version: EPISODE_SCHEMA_VERSION,
1855            episode_id: "storm-episode".to_string(),
1856            summary: Some("storm remediation thread".to_string()),
1857            continuity_state: EpisodeContinuityState::Open,
1858            actor_ids: vec!["ava".to_string()],
1859            goal: Some("close follow-up actions".to_string()),
1860            outcome: None,
1861            started_at_unix_ms: Some(1),
1862            ended_at_unix_ms: None,
1863            last_active_unix_ms: Some(2),
1864            recurrence_key: None,
1865            recurrence_interval_ms: None,
1866            boundary_label: None,
1867            previous_record_id: None,
1868            next_record_id: Some("neighbor".to_string()),
1869            causal_record_ids: vec![],
1870            related_record_ids: vec!["neighbor".to_string()],
1871            linked_artifact_uris: vec![],
1872            salience: EpisodeSalience::default(),
1873            affective: None,
1874        });
1875
1876        let mut neighbor = record("neighbor", "follow-up note", "follow-up note", 0.2);
1877        neighbor.episode = Some(EpisodeContext {
1878            schema_version: EPISODE_SCHEMA_VERSION,
1879            episode_id: "storm-episode".to_string(),
1880            summary: Some("storm remediation thread".to_string()),
1881            continuity_state: EpisodeContinuityState::Open,
1882            actor_ids: vec!["ava".to_string()],
1883            goal: Some("close follow-up actions".to_string()),
1884            outcome: None,
1885            started_at_unix_ms: Some(2),
1886            ended_at_unix_ms: None,
1887            last_active_unix_ms: Some(3),
1888            recurrence_key: None,
1889            recurrence_interval_ms: None,
1890            boundary_label: None,
1891            previous_record_id: Some("seed".to_string()),
1892            next_record_id: None,
1893            causal_record_ids: vec!["seed".to_string()],
1894            related_record_ids: vec![],
1895            linked_artifact_uris: vec![],
1896            salience: EpisodeSalience::default(),
1897            affective: None,
1898        });
1899
1900        let planned = planner.plan(&[seed, neighbor], &query());
1901        assert_eq!(planned.len(), 2);
1902        let expanded = planned
1903            .iter()
1904            .find(|candidate| candidate.hit.record.id == "neighbor")
1905            .unwrap();
1906        assert_eq!(expanded.planner_stage, RecallPlannerStage::GraphExpansion);
1907        assert!(
1908            expanded
1909                .candidate_sources
1910                .contains(&RecallCandidateSource::Graph)
1911        );
1912        assert!(expanded.hit.breakdown.graph > 0.0);
1913    }
1914
1915    #[test]
1916    fn fast_path_planner_skips_graph_expansion() {
1917        let planner = RecallPlanner::from_engine_config(&EngineConfig::default());
1918
1919        let mut seed = record(
1920            "seed",
1921            "storm checklist remediation",
1922            "storm checklist",
1923            0.5,
1924        );
1925        seed.episode = Some(EpisodeContext {
1926            schema_version: EPISODE_SCHEMA_VERSION,
1927            episode_id: "storm-episode".to_string(),
1928            summary: Some("storm remediation thread".to_string()),
1929            continuity_state: EpisodeContinuityState::Open,
1930            actor_ids: vec!["ava".to_string()],
1931            goal: Some("close follow-up actions".to_string()),
1932            outcome: None,
1933            started_at_unix_ms: Some(1),
1934            ended_at_unix_ms: None,
1935            last_active_unix_ms: Some(2),
1936            recurrence_key: None,
1937            recurrence_interval_ms: None,
1938            boundary_label: None,
1939            previous_record_id: None,
1940            next_record_id: Some("neighbor".to_string()),
1941            causal_record_ids: vec![],
1942            related_record_ids: vec!["neighbor".to_string()],
1943            linked_artifact_uris: vec![],
1944            salience: EpisodeSalience::default(),
1945            affective: None,
1946        });
1947
1948        let mut neighbor = record("neighbor", "follow-up note", "follow-up note", 0.2);
1949        neighbor.episode = Some(EpisodeContext {
1950            schema_version: EPISODE_SCHEMA_VERSION,
1951            episode_id: "storm-episode".to_string(),
1952            summary: Some("storm remediation thread".to_string()),
1953            continuity_state: EpisodeContinuityState::Open,
1954            actor_ids: vec!["ava".to_string()],
1955            goal: Some("close follow-up actions".to_string()),
1956            outcome: None,
1957            started_at_unix_ms: Some(2),
1958            ended_at_unix_ms: None,
1959            last_active_unix_ms: Some(3),
1960            recurrence_key: None,
1961            recurrence_interval_ms: None,
1962            boundary_label: None,
1963            previous_record_id: Some("seed".to_string()),
1964            next_record_id: None,
1965            causal_record_ids: vec!["seed".to_string()],
1966            related_record_ids: vec![],
1967            linked_artifact_uris: vec![],
1968            salience: EpisodeSalience::default(),
1969            affective: None,
1970        });
1971
1972        let planned = planner.plan(&[seed, neighbor], &query());
1973        assert_eq!(planned.len(), 1);
1974        assert_eq!(planned[0].hit.record.id, "seed");
1975    }
1976
1977    #[test]
1978    fn continuity_planner_respects_graph_hop_limit() {
1979        let mut config = EngineConfig::default();
1980        config.recall_planning_profile = RecallPlanningProfile::ContinuityAware;
1981        config.graph_expansion_max_hops = 1;
1982        let one_hop_planner = RecallPlanner::from_engine_config(&config);
1983
1984        config.graph_expansion_max_hops = 2;
1985        let two_hop_planner = RecallPlanner::from_engine_config(&config);
1986
1987        let mut seed = record(
1988            "seed",
1989            "storm checklist remediation",
1990            "storm checklist",
1991            0.5,
1992        );
1993        seed.episode = Some(EpisodeContext {
1994            schema_version: EPISODE_SCHEMA_VERSION,
1995            episode_id: "storm-episode".to_string(),
1996            summary: Some("storm remediation thread".to_string()),
1997            continuity_state: EpisodeContinuityState::Open,
1998            actor_ids: vec!["ava".to_string()],
1999            goal: Some("close follow-up actions".to_string()),
2000            outcome: None,
2001            started_at_unix_ms: Some(1),
2002            ended_at_unix_ms: None,
2003            last_active_unix_ms: Some(2),
2004            recurrence_key: None,
2005            recurrence_interval_ms: None,
2006            boundary_label: None,
2007            previous_record_id: None,
2008            next_record_id: Some("middle".to_string()),
2009            causal_record_ids: vec![],
2010            related_record_ids: vec![],
2011            linked_artifact_uris: vec![],
2012            salience: EpisodeSalience::default(),
2013            affective: None,
2014        });
2015
2016        let mut middle = record("middle", "follow-up note", "follow-up note", 0.2);
2017        middle.episode = Some(EpisodeContext {
2018            schema_version: EPISODE_SCHEMA_VERSION,
2019            episode_id: "storm-episode".to_string(),
2020            summary: Some("storm remediation thread".to_string()),
2021            continuity_state: EpisodeContinuityState::Open,
2022            actor_ids: vec!["ava".to_string()],
2023            goal: Some("close follow-up actions".to_string()),
2024            outcome: None,
2025            started_at_unix_ms: Some(2),
2026            ended_at_unix_ms: None,
2027            last_active_unix_ms: Some(3),
2028            recurrence_key: None,
2029            recurrence_interval_ms: None,
2030            boundary_label: None,
2031            previous_record_id: Some("seed".to_string()),
2032            next_record_id: Some("far".to_string()),
2033            causal_record_ids: vec!["seed".to_string()],
2034            related_record_ids: vec![],
2035            linked_artifact_uris: vec![],
2036            salience: EpisodeSalience::default(),
2037            affective: None,
2038        });
2039
2040        let mut far = record("far", "final action", "final action", 0.1);
2041        far.episode = Some(EpisodeContext {
2042            schema_version: EPISODE_SCHEMA_VERSION,
2043            episode_id: "storm-secondary-episode".to_string(),
2044            summary: Some("storm secondary thread".to_string()),
2045            continuity_state: EpisodeContinuityState::Open,
2046            actor_ids: vec!["ava".to_string()],
2047            goal: Some("close follow-up actions".to_string()),
2048            outcome: None,
2049            started_at_unix_ms: Some(3),
2050            ended_at_unix_ms: None,
2051            last_active_unix_ms: Some(4),
2052            recurrence_key: None,
2053            recurrence_interval_ms: None,
2054            boundary_label: None,
2055            previous_record_id: Some("middle".to_string()),
2056            next_record_id: None,
2057            causal_record_ids: vec!["middle".to_string()],
2058            related_record_ids: vec![],
2059            linked_artifact_uris: vec![],
2060            salience: EpisodeSalience::default(),
2061            affective: None,
2062        });
2063
2064        let one_hop = one_hop_planner.plan(&[seed.clone(), middle.clone(), far.clone()], &query());
2065        assert!(
2066            one_hop
2067                .iter()
2068                .any(|candidate| candidate.hit.record.id == "middle")
2069        );
2070        assert!(
2071            !one_hop
2072                .iter()
2073                .any(|candidate| candidate.hit.record.id == "far")
2074        );
2075
2076        let (two_hop, metrics) = two_hop_planner.plan_with_metrics(&[seed, middle, far], &query());
2077        assert!(
2078            two_hop
2079                .iter()
2080                .any(|candidate| candidate.hit.record.id == "far")
2081        );
2082        assert_eq!(metrics.hops_applied, 2);
2083        assert!(metrics.graph_expansion_ns > 0);
2084    }
2085
2086    #[test]
2087    fn continuity_planner_stays_within_query_scope_boundaries() {
2088        let mut config = EngineConfig::default();
2089        config.recall_planning_profile = RecallPlanningProfile::ContinuityAware;
2090        config.graph_expansion_max_hops = 2;
2091        let planner = RecallPlanner::from_engine_config(&config);
2092
2093        let mut seed = record(
2094            "seed",
2095            "storm checklist remediation",
2096            "storm checklist",
2097            0.5,
2098        );
2099        seed.episode = Some(EpisodeContext {
2100            schema_version: EPISODE_SCHEMA_VERSION,
2101            episode_id: "storm-episode".to_string(),
2102            summary: Some("storm remediation thread".to_string()),
2103            continuity_state: EpisodeContinuityState::Open,
2104            actor_ids: vec!["ava".to_string()],
2105            goal: Some("close follow-up actions".to_string()),
2106            outcome: None,
2107            started_at_unix_ms: Some(1),
2108            ended_at_unix_ms: None,
2109            last_active_unix_ms: Some(2),
2110            recurrence_key: None,
2111            recurrence_interval_ms: None,
2112            boundary_label: None,
2113            previous_record_id: None,
2114            next_record_id: Some("cross-scope".to_string()),
2115            causal_record_ids: vec![],
2116            related_record_ids: vec!["cross-scope".to_string()],
2117            linked_artifact_uris: vec![],
2118            salience: EpisodeSalience::default(),
2119            affective: None,
2120        });
2121
2122        let mut cross_scope = record("cross-scope", "storm follow-up note", "follow-up", 0.2);
2123        cross_scope.scope.namespace = "other".to_string();
2124        cross_scope.episode = Some(EpisodeContext {
2125            schema_version: EPISODE_SCHEMA_VERSION,
2126            episode_id: "storm-episode".to_string(),
2127            summary: Some("storm remediation thread".to_string()),
2128            continuity_state: EpisodeContinuityState::Open,
2129            actor_ids: vec!["ava".to_string()],
2130            goal: Some("close follow-up actions".to_string()),
2131            outcome: None,
2132            started_at_unix_ms: Some(2),
2133            ended_at_unix_ms: None,
2134            last_active_unix_ms: Some(3),
2135            recurrence_key: None,
2136            recurrence_interval_ms: None,
2137            boundary_label: None,
2138            previous_record_id: Some("seed".to_string()),
2139            next_record_id: None,
2140            causal_record_ids: vec!["seed".to_string()],
2141            related_record_ids: vec![],
2142            linked_artifact_uris: vec![],
2143            salience: EpisodeSalience::default(),
2144            affective: None,
2145        });
2146
2147        let planned = planner.plan(&[seed, cross_scope], &query());
2148        assert_eq!(planned.len(), 1);
2149        assert_eq!(planned[0].hit.record.id, "seed");
2150    }
2151
2152    #[test]
2153    fn temporal_score_rewards_recurrence_duration_and_boundary_cues() {
2154        let mut recurring = record(
2155            "recurring",
2156            "weekly release retrospective handoff",
2157            "weekly release retrospective",
2158            0.4,
2159        );
2160        recurring.episode = Some(EpisodeContext {
2161            schema_version: EPISODE_SCHEMA_VERSION,
2162            episode_id: "release-retro".to_string(),
2163            summary: Some("Recurring release retrospective".to_string()),
2164            continuity_state: EpisodeContinuityState::Open,
2165            actor_ids: vec!["ava".to_string()],
2166            goal: Some("review each release boundary".to_string()),
2167            outcome: None,
2168            started_at_unix_ms: Some(1),
2169            ended_at_unix_ms: Some(1 + (4 * 60 * 60 * 1000)),
2170            last_active_unix_ms: Some(1 + (4 * 60 * 60 * 1000)),
2171            recurrence_key: Some("weekly-release-retro".to_string()),
2172            recurrence_interval_ms: Some(7 * 24 * 60 * 60 * 1000),
2173            boundary_label: Some("weekly-release-boundary".to_string()),
2174            previous_record_id: Some("retro-previous".to_string()),
2175            next_record_id: Some("retro-next".to_string()),
2176            causal_record_ids: vec!["retro-root".to_string()],
2177            related_record_ids: vec![],
2178            linked_artifact_uris: vec![],
2179            salience: EpisodeSalience::default(),
2180            affective: None,
2181        });
2182
2183        let plain = record(
2184            "plain",
2185            "weekly release retrospective handoff",
2186            "weekly release retrospective",
2187            0.4,
2188        );
2189
2190        let query = RecallQuery {
2191            query_text: "how long does the recurring weekly release handoff boundary last"
2192                .to_string(),
2193            ..query()
2194        };
2195
2196        let scorer = ProfileRecallScorer::new(RecallScoringProfile::Balanced);
2197        let recurring_scored = scorer.score(&recurring, &query).unwrap();
2198        let plain_scored = scorer.score(&plain, &query).unwrap();
2199        assert!(recurring_scored.hit.breakdown.temporal > plain_scored.hit.breakdown.temporal);
2200    }
2201}