1use std::collections::HashMap;
5use std::sync::Arc;
6#[allow(unused_imports)]
7use zeph_db::sql;
8
9use futures::TryStreamExt as _;
10use petgraph::Graph;
11use petgraph::graph::NodeIndex;
12use tokio::sync::Semaphore;
13use tokio::task::JoinSet;
14use zeph_llm::LlmProvider as _;
15use zeph_llm::any::AnyProvider;
16use zeph_llm::provider::{Message, Role};
17
18use crate::error::MemoryError;
19
20use super::store::GraphStore;
21use super::types::Entity;
22
23const MAX_LABEL_PROPAGATION_ITERATIONS: usize = 50;
24
25fn scrub_content(s: &str) -> String {
34 s.chars()
35 .filter(|c| {
36 !c.is_control()
37 && !matches!(*c as u32,
38 0x200B..=0x200F | 0x202A..=0x202E | 0x2066..=0x2069 | 0xFEFF
39 )
40 })
41 .collect()
42}
43
44#[derive(Debug, Default)]
46pub struct GraphEvictionStats {
47 pub expired_edges_deleted: usize,
48 pub orphan_entities_deleted: usize,
49 pub capped_entities_deleted: usize,
50}
51
52fn truncate_prompt(prompt: String, max_bytes: usize) -> String {
58 if max_bytes == 0 {
59 return String::new();
60 }
61 if prompt.len() <= max_bytes {
62 return prompt;
63 }
64 let boundary = prompt.floor_char_boundary(max_bytes);
65 format!("{}...", &prompt[..boundary])
66}
67
68fn compute_partition_fingerprint(entity_ids: &[i64], intra_edge_ids: &[i64]) -> String {
74 let mut hasher = blake3::Hasher::new();
75 let mut sorted_entities = entity_ids.to_vec();
76 sorted_entities.sort_unstable();
77 hasher.update(b"entities");
78 for id in &sorted_entities {
79 hasher.update(&id.to_le_bytes());
80 }
81 let mut sorted_edges = intra_edge_ids.to_vec();
82 sorted_edges.sort_unstable();
83 hasher.update(b"edges");
84 for id in &sorted_edges {
85 hasher.update(&id.to_le_bytes());
86 }
87 hasher.finalize().to_hex().to_string()
88}
89
90struct CommunityData {
92 entity_ids: Vec<i64>,
93 entity_names: Vec<String>,
94 intra_facts: Vec<String>,
95 fingerprint: String,
96 name: String,
97}
98
99type UndirectedGraph = Graph<i64, (), petgraph::Undirected>;
100
101async fn build_entity_graph_and_maps(
102 store: &GraphStore,
103 entities: &[Entity],
104 edge_chunk_size: usize,
105) -> Result<
106 (
107 UndirectedGraph,
108 HashMap<(i64, i64), Vec<String>>,
109 HashMap<(i64, i64), Vec<i64>>,
110 ),
111 MemoryError,
112> {
113 let mut graph = UndirectedGraph::new_undirected();
114 let mut node_map: HashMap<i64, NodeIndex> = HashMap::new();
115
116 for entity in entities {
117 let idx = graph.add_node(entity.id);
118 node_map.insert(entity.id, idx);
119 }
120
121 let mut edge_facts_map: HashMap<(i64, i64), Vec<String>> = HashMap::new();
122 let mut edge_id_map: HashMap<(i64, i64), Vec<i64>> = HashMap::new();
123
124 if edge_chunk_size == 0 {
125 let edges: Vec<_> = store.all_active_edges_stream().try_collect().await?;
126 for edge in &edges {
127 if let (Some(&src_idx), Some(&tgt_idx)) = (
128 node_map.get(&edge.source_entity_id),
129 node_map.get(&edge.target_entity_id),
130 ) {
131 graph.add_edge(src_idx, tgt_idx, ());
132 }
133 let key = (edge.source_entity_id, edge.target_entity_id);
134 edge_facts_map
135 .entry(key)
136 .or_default()
137 .push(edge.fact.clone());
138 edge_id_map.entry(key).or_default().push(edge.id);
139 }
140 } else {
141 let limit = i64::try_from(edge_chunk_size).unwrap_or(i64::MAX);
142 let mut last_id: i64 = 0;
143 loop {
144 let chunk = store.edges_after_id(last_id, limit).await?;
145 if chunk.is_empty() {
146 break;
147 }
148 last_id = chunk.last().expect("non-empty chunk has a last element").id;
149 for edge in &chunk {
150 if let (Some(&src_idx), Some(&tgt_idx)) = (
151 node_map.get(&edge.source_entity_id),
152 node_map.get(&edge.target_entity_id),
153 ) {
154 graph.add_edge(src_idx, tgt_idx, ());
155 }
156 let key = (edge.source_entity_id, edge.target_entity_id);
157 edge_facts_map
158 .entry(key)
159 .or_default()
160 .push(edge.fact.clone());
161 edge_id_map.entry(key).or_default().push(edge.id);
162 }
163 }
164 }
165
166 Ok((graph, edge_facts_map, edge_id_map))
167}
168
169fn run_label_propagation(graph: &UndirectedGraph) -> HashMap<usize, Vec<i64>> {
170 let mut labels: Vec<usize> = (0..graph.node_count()).collect();
171
172 for _ in 0..MAX_LABEL_PROPAGATION_ITERATIONS {
173 let mut changed = false;
174 for node_idx in graph.node_indices() {
175 let neighbors: Vec<NodeIndex> = graph.neighbors(node_idx).collect();
176 if neighbors.is_empty() {
177 continue;
178 }
179 let mut freq: HashMap<usize, usize> = HashMap::new();
180 for &nbr in &neighbors {
181 *freq.entry(labels[nbr.index()]).or_insert(0) += 1;
182 }
183 let max_count = *freq.values().max().unwrap_or(&0);
184 let best_label = freq
185 .iter()
186 .filter(|&(_, count)| *count == max_count)
187 .map(|(&label, _)| label)
188 .min()
189 .unwrap_or(labels[node_idx.index()]);
190 if labels[node_idx.index()] != best_label {
191 labels[node_idx.index()] = best_label;
192 changed = true;
193 }
194 }
195 if !changed {
196 break;
197 }
198 }
199
200 let mut communities: HashMap<usize, Vec<i64>> = HashMap::new();
201 for node_idx in graph.node_indices() {
202 let entity_id = graph[node_idx];
203 communities
204 .entry(labels[node_idx.index()])
205 .or_default()
206 .push(entity_id);
207 }
208 communities.retain(|_, members| members.len() >= 2);
209 communities
210}
211
212struct ClassifyResult {
213 to_summarize: Vec<CommunityData>,
214 unchanged_count: usize,
215 new_fingerprints: std::collections::HashSet<String>,
216}
217
218fn classify_communities(
219 communities: &HashMap<usize, Vec<i64>>,
220 edge_facts_map: &HashMap<(i64, i64), Vec<String>>,
221 edge_id_map: &HashMap<(i64, i64), Vec<i64>>,
222 entity_name_map: &HashMap<i64, &str>,
223 stored_fingerprints: &HashMap<String, i64>,
224 sorted_labels: &[usize],
225) -> ClassifyResult {
226 let mut to_summarize: Vec<CommunityData> = Vec::new();
227 let mut unchanged_count = 0usize;
228 let mut new_fingerprints: std::collections::HashSet<String> = std::collections::HashSet::new();
229
230 for (label_index, &label) in sorted_labels.iter().enumerate() {
231 let entity_ids = communities[&label].as_slice();
232 let member_set: std::collections::HashSet<i64> = entity_ids.iter().copied().collect();
233
234 let mut intra_facts: Vec<String> = Vec::new();
235 let mut intra_edge_ids: Vec<i64> = Vec::new();
236 for (&(src, tgt), facts) in edge_facts_map {
237 if member_set.contains(&src) && member_set.contains(&tgt) {
238 intra_facts.extend(facts.iter().map(|f| scrub_content(f)));
239 if let Some(ids) = edge_id_map.get(&(src, tgt)) {
240 intra_edge_ids.extend_from_slice(ids);
241 }
242 }
243 }
244
245 let fingerprint = compute_partition_fingerprint(entity_ids, &intra_edge_ids);
246 new_fingerprints.insert(fingerprint.clone());
247
248 if stored_fingerprints.contains_key(&fingerprint) {
249 unchanged_count += 1;
250 continue;
251 }
252
253 let entity_names: Vec<String> = entity_ids
254 .iter()
255 .filter_map(|id| entity_name_map.get(id).map(|&s| scrub_content(s)))
256 .collect();
257
258 let base_name = entity_names
261 .iter()
262 .take(3)
263 .cloned()
264 .collect::<Vec<_>>()
265 .join(", ");
266 let name = format!("{base_name} [{label_index}]");
267
268 to_summarize.push(CommunityData {
269 entity_ids: entity_ids.to_vec(),
270 entity_names,
271 intra_facts,
272 fingerprint,
273 name,
274 });
275 }
276
277 ClassifyResult {
278 to_summarize,
279 unchanged_count,
280 new_fingerprints,
281 }
282}
283
284async fn summarize_and_upsert_communities(
285 store: &GraphStore,
286 provider: &AnyProvider,
287 to_summarize: Vec<CommunityData>,
288 concurrency: usize,
289 community_summary_max_prompt_bytes: usize,
290) -> Result<usize, MemoryError> {
291 let semaphore = Arc::new(Semaphore::new(concurrency.max(1)));
292 let mut join_set: JoinSet<(String, String, Vec<i64>, String)> = JoinSet::new();
293
294 for data in to_summarize {
295 let provider = provider.clone();
296 let sem = Arc::clone(&semaphore);
297 let max_bytes = community_summary_max_prompt_bytes;
298 join_set.spawn(async move {
299 let _permit = sem.acquire().await.expect("semaphore is never closed");
300 let summary = match generate_community_summary(
301 &provider,
302 &data.entity_names,
303 &data.intra_facts,
304 max_bytes,
305 )
306 .await
307 {
308 Ok(text) => text,
309 Err(e) => {
310 tracing::warn!(community = %data.name, "community summary generation failed: {e:#}");
311 String::new()
312 }
313 };
314 (data.name, summary, data.entity_ids, data.fingerprint)
315 });
316 }
317
318 let mut results: Vec<(String, String, Vec<i64>, String)> = Vec::new();
320 while let Some(outcome) = join_set.join_next().await {
321 match outcome {
322 Ok(tuple) => results.push(tuple),
323 Err(e) => {
324 tracing::error!(
325 panicked = e.is_panic(),
326 cancelled = e.is_cancelled(),
327 "community summary task failed"
328 );
329 }
330 }
331 }
332
333 results.sort_unstable_by(|a, b| a.0.cmp(&b.0));
334
335 let mut count = 0usize;
336 for (name, summary, entity_ids, fingerprint) in results {
337 store
338 .upsert_community(&name, &summary, &entity_ids, Some(&fingerprint))
339 .await?;
340 count += 1;
341 }
342
343 Ok(count)
344}
345
346pub async fn detect_communities(
365 store: &GraphStore,
366 provider: &AnyProvider,
367 community_summary_max_prompt_bytes: usize,
368 concurrency: usize,
369 edge_chunk_size: usize,
370) -> Result<usize, MemoryError> {
371 let edge_chunk_size = if edge_chunk_size == 0 {
372 tracing::warn!(
373 "edge_chunk_size is 0, which would load all edges into memory; \
374 using safe default of 10_000"
375 );
376 10_000_usize
377 } else {
378 edge_chunk_size
379 };
380
381 let entities = store.all_entities().await?;
382 if entities.len() < 2 {
383 return Ok(0);
384 }
385
386 let (graph, edge_facts_map, edge_id_map) =
387 build_entity_graph_and_maps(store, &entities, edge_chunk_size).await?;
388
389 let communities = run_label_propagation(&graph);
390
391 let entity_name_map: HashMap<i64, &str> =
392 entities.iter().map(|e| (e.id, e.name.as_str())).collect();
393 let stored_fingerprints = store.community_fingerprints().await?;
394
395 let mut sorted_labels: Vec<usize> = communities.keys().copied().collect();
396 sorted_labels.sort_unstable();
397
398 let ClassifyResult {
399 to_summarize,
400 unchanged_count,
401 new_fingerprints,
402 } = classify_communities(
403 &communities,
404 &edge_facts_map,
405 &edge_id_map,
406 &entity_name_map,
407 &stored_fingerprints,
408 &sorted_labels,
409 );
410
411 tracing::debug!(
412 total = sorted_labels.len(),
413 unchanged = unchanged_count,
414 to_summarize = to_summarize.len(),
415 "community detection: partition classification complete"
416 );
417
418 for (stored_fp, community_id) in &stored_fingerprints {
420 if !new_fingerprints.contains(stored_fp.as_str()) {
421 store.delete_community_by_id(*community_id).await?;
422 }
423 }
424
425 let new_count = summarize_and_upsert_communities(
426 store,
427 provider,
428 to_summarize,
429 concurrency,
430 community_summary_max_prompt_bytes,
431 )
432 .await?;
433
434 Ok(unchanged_count + new_count)
435}
436
437pub async fn assign_to_community(
448 store: &GraphStore,
449 entity_id: i64,
450) -> Result<Option<i64>, MemoryError> {
451 let edges = store.edges_for_entity(entity_id).await?;
452 if edges.is_empty() {
453 return Ok(None);
454 }
455
456 let neighbor_ids: Vec<i64> = edges
457 .iter()
458 .map(|e| {
459 if e.source_entity_id == entity_id {
460 e.target_entity_id
461 } else {
462 e.source_entity_id
463 }
464 })
465 .collect();
466
467 let mut community_votes: HashMap<i64, usize> = HashMap::new();
468 for &nbr_id in &neighbor_ids {
469 if let Some(community) = store.community_for_entity(nbr_id).await? {
470 *community_votes.entry(community.id).or_insert(0) += 1;
471 }
472 }
473
474 if community_votes.is_empty() {
475 return Ok(None);
476 }
477
478 let Some((&best_community_id, _)) =
481 community_votes
482 .iter()
483 .max_by(|&(&id_a, &count_a), &(&id_b, &count_b)| {
484 count_a.cmp(&count_b).then(id_b.cmp(&id_a))
485 })
486 else {
487 return Ok(None);
488 };
489
490 if let Some(mut target) = store.find_community_by_id(best_community_id).await? {
491 if !target.entity_ids.contains(&entity_id) {
492 target.entity_ids.push(entity_id);
493 store
494 .upsert_community(&target.name, &target.summary, &target.entity_ids, None)
495 .await?;
496 store.clear_community_fingerprint(best_community_id).await?;
498 }
499 return Ok(Some(best_community_id));
500 }
501
502 Ok(None)
503}
504
505pub async fn cleanup_stale_entity_embeddings(
513 store: &GraphStore,
514 embeddings: &crate::embedding_store::EmbeddingStore,
515) -> Result<usize, MemoryError> {
516 const ENTITY_COLLECTION: &str = "zeph_graph_entities";
517
518 let pairs = embeddings.scroll_all_entity_ids(ENTITY_COLLECTION).await?;
522 if pairs.is_empty() {
523 return Ok(0);
524 }
525
526 let qdrant_ids: Vec<i64> = pairs.iter().map(|(_, eid)| *eid).collect();
527 let live: std::collections::HashSet<i64> = store
528 .entity_ids_in(&qdrant_ids)
529 .await?
530 .into_iter()
531 .collect();
532
533 let stale_point_ids: Vec<String> = pairs
534 .into_iter()
535 .filter_map(|(pid, eid)| (!live.contains(&eid)).then_some(pid))
536 .collect();
537
538 if stale_point_ids.is_empty() {
539 return Ok(0);
540 }
541
542 let count = stale_point_ids.len();
543 embeddings
544 .delete_from_collection(ENTITY_COLLECTION, stale_point_ids)
545 .await?;
546 Ok(count)
547}
548
549pub async fn run_graph_eviction(
555 store: &GraphStore,
556 expired_edge_retention_days: u32,
557 max_entities: usize,
558) -> Result<GraphEvictionStats, MemoryError> {
559 let expired_edges_deleted = store
560 .delete_expired_edges(expired_edge_retention_days)
561 .await?;
562 let orphan_entities_deleted = store
563 .delete_orphan_entities(expired_edge_retention_days)
564 .await?;
565 let capped_entities_deleted = if max_entities > 0 {
566 store.cap_entities(max_entities).await?
567 } else {
568 0
569 };
570
571 Ok(GraphEvictionStats {
572 expired_edges_deleted,
573 orphan_entities_deleted,
574 capped_entities_deleted,
575 })
576}
577
578async fn generate_community_summary(
579 provider: &AnyProvider,
580 entity_names: &[String],
581 edge_facts: &[String],
582 max_prompt_bytes: usize,
583) -> Result<String, MemoryError> {
584 let entities_str = entity_names.join(", ");
585 let facts_str = edge_facts
587 .iter()
588 .take(20)
589 .map(|f| format!("- {f}"))
590 .collect::<Vec<_>>()
591 .join("\n");
592
593 let raw_prompt = format!(
594 "Summarize the following group of related entities and their relationships \
595 into a single paragraph (2-3 sentences). Focus on the theme that connects \
596 them and the key relationships.\n\nEntities: {entities_str}\n\
597 Relationships:\n{facts_str}\n\nSummary:"
598 );
599
600 let original_bytes = raw_prompt.len();
601 let truncated = raw_prompt.len() > max_prompt_bytes;
602 let prompt = truncate_prompt(raw_prompt, max_prompt_bytes);
603 if prompt.is_empty() {
604 return Ok(String::new());
605 }
606 if truncated {
607 tracing::warn!(
608 entity_count = entity_names.len(),
609 original_bytes,
610 truncated_bytes = prompt.len(),
611 "community summary prompt truncated"
612 );
613 }
614
615 let messages = [Message::from_legacy(Role::User, prompt)];
616 let response: String = provider.chat(&messages).await.map_err(MemoryError::Llm)?;
617 Ok(response)
618}
619
620#[cfg(test)]
621mod tests {
622 use std::sync::{Arc, Mutex};
623
624 use super::*;
625 use crate::graph::types::EntityType;
626 use crate::store::SqliteStore;
627
628 async fn setup() -> GraphStore {
629 let store = SqliteStore::new(":memory:").await.unwrap();
630 GraphStore::new(store.pool().clone())
631 }
632
633 fn mock_provider() -> AnyProvider {
634 AnyProvider::Mock(zeph_llm::mock::MockProvider::default())
635 }
636
637 fn recording_provider() -> (
638 AnyProvider,
639 Arc<Mutex<Vec<Vec<zeph_llm::provider::Message>>>>,
640 ) {
641 let (mock, buf) = zeph_llm::mock::MockProvider::default().with_recording();
642 (AnyProvider::Mock(mock), buf)
643 }
644
645 #[tokio::test]
646 async fn test_detect_communities_empty_graph() {
647 let store = setup().await;
648 let provider = mock_provider();
649 let count = detect_communities(&store, &provider, usize::MAX, 4, 0)
650 .await
651 .unwrap();
652 assert_eq!(count, 0);
653 }
654
655 #[tokio::test]
656 async fn test_detect_communities_single_entity() {
657 let store = setup().await;
658 let provider = mock_provider();
659 store
660 .upsert_entity("Solo", "Solo", EntityType::Concept, None)
661 .await
662 .unwrap();
663 let count = detect_communities(&store, &provider, usize::MAX, 4, 0)
664 .await
665 .unwrap();
666 assert_eq!(count, 0, "single isolated entity must not form a community");
667 }
668
669 #[tokio::test]
670 async fn test_single_entity_community_filtered() {
671 let store = setup().await;
672 let provider = mock_provider();
673
674 let a = store
676 .upsert_entity("A", "A", EntityType::Concept, None)
677 .await
678 .unwrap();
679 let b = store
680 .upsert_entity("B", "B", EntityType::Concept, None)
681 .await
682 .unwrap();
683 let c = store
684 .upsert_entity("C", "C", EntityType::Concept, None)
685 .await
686 .unwrap();
687 let iso = store
688 .upsert_entity("Isolated", "Isolated", EntityType::Concept, None)
689 .await
690 .unwrap();
691
692 store
693 .insert_edge(a, b, "r", "A relates B", 1.0, None)
694 .await
695 .unwrap();
696 store
697 .insert_edge(b, c, "r", "B relates C", 1.0, None)
698 .await
699 .unwrap();
700
701 let count = detect_communities(&store, &provider, usize::MAX, 4, 0)
702 .await
703 .unwrap();
704 assert_eq!(count, 1, "only the 3-entity cluster should be detected");
706
707 let communities = store.all_communities().await.unwrap();
708 assert_eq!(communities.len(), 1);
709 assert!(
710 !communities[0].entity_ids.contains(&iso),
711 "isolated entity must not be in any community"
712 );
713 }
714
715 #[tokio::test]
716 async fn test_label_propagation_basic() {
717 let store = setup().await;
718 let provider = mock_provider();
719
720 let mut cluster_ids: Vec<Vec<i64>> = Vec::new();
722 for cluster in 0..4_i64 {
723 let mut ids = Vec::new();
724 for node in 0..3_i64 {
725 let name = format!("c{cluster}_n{node}");
726 let id = store
727 .upsert_entity(&name, &name, EntityType::Concept, None)
728 .await
729 .unwrap();
730 ids.push(id);
731 }
732 store
734 .insert_edge(ids[0], ids[1], "r", "f", 1.0, None)
735 .await
736 .unwrap();
737 store
738 .insert_edge(ids[1], ids[2], "r", "f", 1.0, None)
739 .await
740 .unwrap();
741 cluster_ids.push(ids);
742 }
743
744 let count = detect_communities(&store, &provider, usize::MAX, 4, 0)
745 .await
746 .unwrap();
747 assert_eq!(count, 4, "expected 4 communities, one per cluster");
748
749 let communities = store.all_communities().await.unwrap();
750 assert_eq!(communities.len(), 4);
751
752 for ids in &cluster_ids {
754 let found = communities
755 .iter()
756 .filter(|c| ids.iter().any(|id| c.entity_ids.contains(id)))
757 .count();
758 assert_eq!(
759 found, 1,
760 "all nodes of a cluster must be in the same community"
761 );
762 }
763 }
764
765 #[tokio::test]
766 async fn test_all_isolated_nodes() {
767 let store = setup().await;
768 let provider = mock_provider();
769
770 for i in 0..5_i64 {
772 store
773 .upsert_entity(
774 &format!("iso_{i}"),
775 &format!("iso_{i}"),
776 EntityType::Concept,
777 None,
778 )
779 .await
780 .unwrap();
781 }
782
783 let count = detect_communities(&store, &provider, usize::MAX, 4, 0)
784 .await
785 .unwrap();
786 assert_eq!(count, 0, "zero-edge graph must produce no communities");
787 assert_eq!(store.community_count().await.unwrap(), 0);
788 }
789
790 #[tokio::test]
791 async fn test_eviction_expired_edges() {
792 let store = setup().await;
793
794 let a = store
795 .upsert_entity("EA", "EA", EntityType::Concept, None)
796 .await
797 .unwrap();
798 let b = store
799 .upsert_entity("EB", "EB", EntityType::Concept, None)
800 .await
801 .unwrap();
802 let edge_id = store.insert_edge(a, b, "r", "f", 1.0, None).await.unwrap();
803 store.invalidate_edge(edge_id).await.unwrap();
804
805 zeph_db::query(sql!(
807 "UPDATE graph_edges SET expired_at = datetime('now', '-200 days') WHERE id = ?1"
808 ))
809 .bind(edge_id)
810 .execute(store.pool())
811 .await
812 .unwrap();
813
814 let stats = run_graph_eviction(&store, 90, 0).await.unwrap();
815 assert_eq!(stats.expired_edges_deleted, 1);
816 }
817
818 #[tokio::test]
819 async fn test_eviction_orphan_entities() {
820 let store = setup().await;
821
822 let iso = store
823 .upsert_entity("Orphan", "Orphan", EntityType::Concept, None)
824 .await
825 .unwrap();
826
827 zeph_db::query(sql!(
829 "UPDATE graph_entities SET last_seen_at = datetime('now', '-200 days') WHERE id = ?1"
830 ))
831 .bind(iso)
832 .execute(store.pool())
833 .await
834 .unwrap();
835
836 let stats = run_graph_eviction(&store, 90, 0).await.unwrap();
837 assert_eq!(stats.orphan_entities_deleted, 1);
838 }
839
840 #[tokio::test]
841 async fn test_eviction_entity_cap() {
842 let store = setup().await;
843
844 for i in 0..5_i64 {
846 let name = format!("cap_entity_{i}");
847 store
848 .upsert_entity(&name, &name, EntityType::Concept, None)
849 .await
850 .unwrap();
851 }
852
853 let stats = run_graph_eviction(&store, 90, 3).await.unwrap();
854 assert_eq!(
855 stats.capped_entities_deleted, 2,
856 "should delete 5-3=2 entities"
857 );
858 assert_eq!(store.entity_count().await.unwrap(), 3);
859 }
860
861 #[tokio::test]
862 async fn test_assign_to_community_no_neighbors() {
863 let store = setup().await;
864 let entity_id = store
865 .upsert_entity("Loner", "Loner", EntityType::Concept, None)
866 .await
867 .unwrap();
868
869 let result = assign_to_community(&store, entity_id).await.unwrap();
870 assert!(result.is_none());
871 }
872
873 #[tokio::test]
874 async fn test_extraction_count_persistence() {
875 use tempfile::NamedTempFile;
876 let tmp = NamedTempFile::new().unwrap();
878 let path = tmp.path().to_str().unwrap().to_owned();
879
880 let store1 = {
881 let s = crate::store::SqliteStore::new(&path).await.unwrap();
882 GraphStore::new(s.pool().clone())
883 };
884
885 store1.set_metadata("extraction_count", "0").await.unwrap();
886 for i in 1..=5_i64 {
887 store1
888 .set_metadata("extraction_count", &i.to_string())
889 .await
890 .unwrap();
891 }
892
893 let store2 = {
895 let s = crate::store::SqliteStore::new(&path).await.unwrap();
896 GraphStore::new(s.pool().clone())
897 };
898 assert_eq!(store2.extraction_count().await.unwrap(), 5);
899 }
900
901 #[test]
902 fn test_scrub_content_ascii_control() {
903 let input = "hello\nworld\r\x00\x01\x09end";
905 assert_eq!(scrub_content(input), "helloworldend");
906 }
907
908 #[test]
909 fn test_scrub_content_bidi_overrides() {
910 let input = "safe\u{202A}inject\u{202E}end\u{2066}iso\u{2069}done".to_string();
913 assert_eq!(scrub_content(&input), "safeinjectendisodone");
914 }
915
916 #[test]
917 fn test_scrub_content_zero_width() {
918 let input = "a\u{200B}b\u{200C}c\u{200D}d\u{200F}e".to_string();
921 assert_eq!(scrub_content(&input), "abcde");
922 }
923
924 #[test]
925 fn test_scrub_content_bom() {
926 let input = "\u{FEFF}hello".to_string();
928 assert_eq!(scrub_content(&input), "hello");
929 }
930
931 #[test]
932 fn test_scrub_content_clean_string_unchanged() {
933 let input = "Hello, World! 123 — normal text.";
934 assert_eq!(scrub_content(input), input);
935 }
936
937 #[test]
938 fn test_truncate_prompt_within_limit() {
939 let result = truncate_prompt("short".into(), 100);
940 assert_eq!(result, "short");
941 }
942
943 #[test]
944 fn test_truncate_prompt_zero_max_bytes() {
945 let result = truncate_prompt("hello".into(), 0);
946 assert_eq!(result, "");
947 }
948
949 #[test]
950 fn test_truncate_prompt_long_facts() {
951 let facts: Vec<String> = (0..20)
952 .map(|i| format!("fact_{i}_{}", "x".repeat(20)))
953 .collect();
954 let prompt = facts.join("\n");
955 let result = truncate_prompt(prompt, 200);
956 assert!(
957 result.ends_with("..."),
958 "truncated prompt must end with '...'"
959 );
960 assert!(result.len() <= 203);
962 assert!(std::str::from_utf8(result.as_bytes()).is_ok());
963 }
964
965 #[test]
966 fn test_truncate_prompt_utf8_boundary() {
967 let prompt = "🔥".repeat(100);
969 let result = truncate_prompt(prompt, 10);
970 assert!(
971 result.ends_with("..."),
972 "truncated prompt must end with '...'"
973 );
974 assert_eq!(result.len(), 8 + 3, "2 emojis (8 bytes) + '...' (3 bytes)");
976 assert!(std::str::from_utf8(result.as_bytes()).is_ok());
977 }
978
979 #[tokio::test]
980 async fn test_assign_to_community_majority_vote() {
981 let store = setup().await;
982
983 let a = store
985 .upsert_entity("AA", "AA", EntityType::Concept, None)
986 .await
987 .unwrap();
988 let b = store
989 .upsert_entity("BB", "BB", EntityType::Concept, None)
990 .await
991 .unwrap();
992 let d = store
993 .upsert_entity("DD", "DD", EntityType::Concept, None)
994 .await
995 .unwrap();
996
997 store
998 .upsert_community("test_community", "summary", &[a, b], None)
999 .await
1000 .unwrap();
1001
1002 store.insert_edge(d, a, "r", "f", 1.0, None).await.unwrap();
1003 store.insert_edge(d, b, "r", "f", 1.0, None).await.unwrap();
1004
1005 let result = assign_to_community(&store, d).await.unwrap();
1006 assert!(result.is_some());
1007
1008 let returned_id = result.unwrap();
1010 let community = store
1011 .find_community_by_id(returned_id)
1012 .await
1013 .unwrap()
1014 .expect("returned community_id must reference an existing row");
1015 assert!(
1016 community.entity_ids.contains(&d),
1017 "D should be added to the community"
1018 );
1019 assert!(
1021 community.fingerprint.is_none(),
1022 "fingerprint must be cleared after assign_to_community"
1023 );
1024 }
1025
1026 #[tokio::test]
1028 async fn test_incremental_detection_no_changes_skips_llm() {
1029 let store = setup().await;
1030 let (provider, call_buf) = recording_provider();
1031
1032 let a = store
1033 .upsert_entity("X", "X", EntityType::Concept, None)
1034 .await
1035 .unwrap();
1036 let b = store
1037 .upsert_entity("Y", "Y", EntityType::Concept, None)
1038 .await
1039 .unwrap();
1040 store
1041 .insert_edge(a, b, "r", "X relates Y", 1.0, None)
1042 .await
1043 .unwrap();
1044
1045 detect_communities(&store, &provider, usize::MAX, 4, 0)
1047 .await
1048 .unwrap();
1049 let first_calls = call_buf.lock().unwrap().len();
1050 assert_eq!(first_calls, 1, "first run must produce exactly 1 LLM call");
1051
1052 detect_communities(&store, &provider, usize::MAX, 4, 0)
1054 .await
1055 .unwrap();
1056 let second_calls = call_buf.lock().unwrap().len();
1057 assert_eq!(
1058 second_calls, first_calls,
1059 "second run with no graph changes must produce 0 additional LLM calls"
1060 );
1061 }
1062
1063 #[tokio::test]
1065 async fn test_incremental_detection_edge_change_triggers_resummary() {
1066 let store = setup().await;
1067 let (provider, call_buf) = recording_provider();
1068
1069 let a = store
1070 .upsert_entity("P", "P", EntityType::Concept, None)
1071 .await
1072 .unwrap();
1073 let b = store
1074 .upsert_entity("Q", "Q", EntityType::Concept, None)
1075 .await
1076 .unwrap();
1077 store
1078 .insert_edge(a, b, "r", "P relates Q", 1.0, None)
1079 .await
1080 .unwrap();
1081
1082 detect_communities(&store, &provider, usize::MAX, 4, 0)
1083 .await
1084 .unwrap();
1085 let after_first = call_buf.lock().unwrap().len();
1086 assert_eq!(after_first, 1);
1087
1088 store
1090 .insert_edge(b, a, "r2", "Q also relates P", 1.0, None)
1091 .await
1092 .unwrap();
1093
1094 detect_communities(&store, &provider, usize::MAX, 4, 0)
1095 .await
1096 .unwrap();
1097 let after_second = call_buf.lock().unwrap().len();
1098 assert_eq!(
1099 after_second, 2,
1100 "edge change must trigger one additional LLM call"
1101 );
1102 }
1103
1104 #[tokio::test]
1106 async fn test_incremental_detection_dissolved_community_deleted() {
1107 let store = setup().await;
1108 let provider = mock_provider();
1109
1110 let a = store
1111 .upsert_entity("M1", "M1", EntityType::Concept, None)
1112 .await
1113 .unwrap();
1114 let b = store
1115 .upsert_entity("M2", "M2", EntityType::Concept, None)
1116 .await
1117 .unwrap();
1118 let edge_id = store
1119 .insert_edge(a, b, "r", "M1 relates M2", 1.0, None)
1120 .await
1121 .unwrap();
1122
1123 detect_communities(&store, &provider, usize::MAX, 4, 0)
1124 .await
1125 .unwrap();
1126 assert_eq!(store.community_count().await.unwrap(), 1);
1127
1128 store.invalidate_edge(edge_id).await.unwrap();
1130
1131 detect_communities(&store, &provider, usize::MAX, 4, 0)
1132 .await
1133 .unwrap();
1134 assert_eq!(
1135 store.community_count().await.unwrap(),
1136 0,
1137 "dissolved community must be deleted on next refresh"
1138 );
1139 }
1140
1141 #[tokio::test]
1143 async fn test_detect_communities_concurrency_one() {
1144 let store = setup().await;
1145 let provider = mock_provider();
1146
1147 let a = store
1148 .upsert_entity("C1A", "C1A", EntityType::Concept, None)
1149 .await
1150 .unwrap();
1151 let b = store
1152 .upsert_entity("C1B", "C1B", EntityType::Concept, None)
1153 .await
1154 .unwrap();
1155 store.insert_edge(a, b, "r", "f", 1.0, None).await.unwrap();
1156
1157 let count = detect_communities(&store, &provider, usize::MAX, 1, 0)
1158 .await
1159 .unwrap();
1160 assert_eq!(count, 1, "concurrency=1 must still detect the community");
1161 assert_eq!(store.community_count().await.unwrap(), 1);
1162 }
1163
1164 #[test]
1165 fn test_compute_fingerprint_deterministic() {
1166 let fp1 = compute_partition_fingerprint(&[1, 2, 3], &[10, 20]);
1167 let fp2 = compute_partition_fingerprint(&[3, 1, 2], &[20, 10]);
1168 assert_eq!(fp1, fp2, "fingerprint must be order-independent");
1169
1170 let fp3 = compute_partition_fingerprint(&[1, 2, 3], &[10, 30]);
1171 assert_ne!(
1172 fp1, fp3,
1173 "different edge IDs must produce different fingerprint"
1174 );
1175
1176 let fp4 = compute_partition_fingerprint(&[1, 2, 4], &[10, 20]);
1177 assert_ne!(
1178 fp1, fp4,
1179 "different entity IDs must produce different fingerprint"
1180 );
1181 }
1182
1183 #[test]
1188 fn test_compute_fingerprint_domain_separation() {
1189 let fp_a = compute_partition_fingerprint(&[1, 2], &[3]);
1190 let fp_b = compute_partition_fingerprint(&[1], &[2, 3]);
1191 assert_ne!(
1192 fp_a, fp_b,
1193 "entity/edge sequences with same raw bytes must produce different fingerprints"
1194 );
1195 }
1196
1197 #[tokio::test]
1203 async fn test_detect_communities_chunked_correct_membership() {
1204 let store = setup().await;
1205 let provider = mock_provider();
1206
1207 let node_alpha = store
1209 .upsert_entity("CA", "CA", EntityType::Concept, None)
1210 .await
1211 .unwrap();
1212 let node_beta = store
1213 .upsert_entity("CB", "CB", EntityType::Concept, None)
1214 .await
1215 .unwrap();
1216 let node_gamma = store
1217 .upsert_entity("CC", "CC", EntityType::Concept, None)
1218 .await
1219 .unwrap();
1220 let node_delta = store
1221 .upsert_entity("CD", "CD", EntityType::Concept, None)
1222 .await
1223 .unwrap();
1224 let node_epsilon = store
1225 .upsert_entity("CE", "CE", EntityType::Concept, None)
1226 .await
1227 .unwrap();
1228
1229 store
1230 .insert_edge(node_alpha, node_beta, "r", "A-B fact", 1.0, None)
1231 .await
1232 .unwrap();
1233 store
1234 .insert_edge(node_beta, node_gamma, "r", "B-C fact", 1.0, None)
1235 .await
1236 .unwrap();
1237 store
1238 .insert_edge(node_delta, node_epsilon, "r", "D-E fact", 1.0, None)
1239 .await
1240 .unwrap();
1241
1242 let count_chunked = detect_communities(&store, &provider, usize::MAX, 4, 1)
1244 .await
1245 .unwrap();
1246 assert_eq!(
1247 count_chunked, 2,
1248 "chunked loading must detect both communities"
1249 );
1250
1251 let communities = store.all_communities().await.unwrap();
1253 assert_eq!(communities.len(), 2);
1254
1255 let abc_ids = [node_alpha, node_beta, node_gamma];
1256 let de_ids = [node_delta, node_epsilon];
1257 let has_abc = communities
1258 .iter()
1259 .any(|comm| abc_ids.iter().all(|id| comm.entity_ids.contains(id)));
1260 let has_de = communities
1261 .iter()
1262 .any(|comm| de_ids.iter().all(|id| comm.entity_ids.contains(id)));
1263 assert!(has_abc, "cluster A-B-C must form a community");
1264 assert!(has_de, "cluster D-E must form a community");
1265 }
1266
1267 #[tokio::test]
1269 async fn test_detect_communities_chunk_size_max() {
1270 let store = setup().await;
1271 let provider = mock_provider();
1272
1273 let x = store
1274 .upsert_entity("MX", "MX", EntityType::Concept, None)
1275 .await
1276 .unwrap();
1277 let y = store
1278 .upsert_entity("MY", "MY", EntityType::Concept, None)
1279 .await
1280 .unwrap();
1281 store
1282 .insert_edge(x, y, "r", "X-Y fact", 1.0, None)
1283 .await
1284 .unwrap();
1285
1286 let count = detect_communities(&store, &provider, usize::MAX, 4, usize::MAX)
1287 .await
1288 .unwrap();
1289 assert_eq!(count, 1, "chunk_size=usize::MAX must detect the community");
1290 }
1291
1292 #[tokio::test]
1294 async fn test_detect_communities_chunk_size_zero_fallback() {
1295 let store = setup().await;
1296 let provider = mock_provider();
1297
1298 let p = store
1299 .upsert_entity("ZP", "ZP", EntityType::Concept, None)
1300 .await
1301 .unwrap();
1302 let q = store
1303 .upsert_entity("ZQ", "ZQ", EntityType::Concept, None)
1304 .await
1305 .unwrap();
1306 store
1307 .insert_edge(p, q, "r", "P-Q fact", 1.0, None)
1308 .await
1309 .unwrap();
1310
1311 let count = detect_communities(&store, &provider, usize::MAX, 4, 0)
1312 .await
1313 .unwrap();
1314 assert_eq!(
1315 count, 1,
1316 "chunk_size=0 must detect the community via stream fallback"
1317 );
1318 }
1319
1320 #[tokio::test]
1324 async fn test_detect_communities_chunked_edge_map_complete() {
1325 let store = setup().await;
1326 let (provider, call_buf) = recording_provider();
1327
1328 let a = store
1329 .upsert_entity("FA", "FA", EntityType::Concept, None)
1330 .await
1331 .unwrap();
1332 let b = store
1333 .upsert_entity("FB", "FB", EntityType::Concept, None)
1334 .await
1335 .unwrap();
1336 store
1337 .insert_edge(a, b, "r", "edge1 fact", 1.0, None)
1338 .await
1339 .unwrap();
1340
1341 detect_communities(&store, &provider, usize::MAX, 4, 1)
1343 .await
1344 .unwrap();
1345 let calls_after_first = call_buf.lock().unwrap().len();
1346 assert_eq!(calls_after_first, 1, "first run must trigger 1 LLM call");
1347
1348 store
1350 .insert_edge(b, a, "r2", "edge2 fact", 1.0, None)
1351 .await
1352 .unwrap();
1353
1354 detect_communities(&store, &provider, usize::MAX, 4, 1)
1355 .await
1356 .unwrap();
1357 let calls_after_second = call_buf.lock().unwrap().len();
1358 assert_eq!(
1359 calls_after_second, 2,
1360 "adding an edge must change fingerprint and trigger re-summarization"
1361 );
1362 }
1363
1364 #[tokio::test]
1366 async fn cleanup_stale_empty_collection() {
1367 let store = setup().await;
1368 let sqlite_store = crate::store::SqliteStore::new(":memory:").await.unwrap();
1369 let pool = sqlite_store.pool().clone();
1370 let mem_store = Box::new(crate::in_memory_store::InMemoryVectorStore::new());
1371 let emb_store = crate::embedding_store::EmbeddingStore::with_store(mem_store, pool);
1372 emb_store
1373 .ensure_named_collection("zeph_graph_entities", 4)
1374 .await
1375 .unwrap();
1376
1377 let deleted = cleanup_stale_entity_embeddings(&store, &emb_store)
1378 .await
1379 .unwrap();
1380 assert_eq!(deleted, 0, "nothing to delete from empty collection");
1381 }
1382
1383 #[tokio::test]
1386 async fn cleanup_stale_deletes_orphaned_points() {
1387 use crate::graph::types::EntityType;
1388
1389 let sqlite_store = crate::store::SqliteStore::new(":memory:").await.unwrap();
1390 let pool = sqlite_store.pool().clone();
1391 let graph_store = GraphStore::new(pool.clone());
1392
1393 let mem_store = Box::new(crate::in_memory_store::InMemoryVectorStore::new());
1394 let emb_store = crate::embedding_store::EmbeddingStore::with_store(mem_store, pool.clone());
1395 emb_store
1396 .ensure_named_collection("zeph_graph_entities", 4)
1397 .await
1398 .unwrap();
1399
1400 let live_id = graph_store
1402 .upsert_entity("Live", "live", EntityType::Person, None)
1403 .await
1404 .unwrap();
1405 let stale_id = graph_store
1406 .upsert_entity("Stale", "stale", EntityType::Person, None)
1407 .await
1408 .unwrap();
1409
1410 let live_payload = serde_json::json!({
1412 "entity_id": live_id,
1413 "entity_id_str": live_id.to_string(),
1414 "name": "Live",
1415 });
1416 let stale_payload = serde_json::json!({
1417 "entity_id": stale_id,
1418 "entity_id_str": stale_id.to_string(),
1419 "name": "Stale",
1420 });
1421 emb_store
1422 .store_to_collection(
1423 "zeph_graph_entities",
1424 live_payload,
1425 vec![1.0, 0.0, 0.0, 0.0],
1426 )
1427 .await
1428 .unwrap();
1429 emb_store
1430 .store_to_collection(
1431 "zeph_graph_entities",
1432 stale_payload,
1433 vec![0.0, 1.0, 0.0, 0.0],
1434 )
1435 .await
1436 .unwrap();
1437
1438 zeph_db::query(zeph_db::sql!("DELETE FROM graph_entities WHERE id = ?"))
1440 .bind(stale_id)
1441 .execute(&pool)
1442 .await
1443 .unwrap();
1444
1445 let deleted = cleanup_stale_entity_embeddings(&graph_store, &emb_store)
1446 .await
1447 .unwrap();
1448 assert_eq!(deleted, 1, "exactly one stale point should be removed");
1449
1450 let remaining = emb_store
1452 .scroll_all_entity_ids("zeph_graph_entities")
1453 .await
1454 .unwrap();
1455 assert_eq!(remaining.len(), 1);
1456 assert_eq!(remaining[0].1, live_id);
1457 }
1458}