1#![warn(missing_docs)]
42#![warn(clippy::all)]
43pub mod config;
52pub mod core;
54pub mod text;
56pub mod vector;
58pub mod graph;
60pub mod entity;
62pub mod retrieval;
64#[cfg(feature = "async")]
66pub mod generation;
67#[cfg(any(feature = "memory-storage", feature = "persistent-storage", feature = "async"))]
69pub mod storage;
70
71pub mod persistence;
73pub mod query;
75pub mod builder;
77pub mod summarization;
79pub mod ollama;
81pub mod nlp;
83pub mod embeddings;
85
86pub mod pipeline;
89
90#[cfg(feature = "parallel-processing")]
92pub mod parallel;
93
94#[cfg(feature = "lightrag")]
95pub mod lightrag;
97
98pub mod reranking;
101
102pub mod monitoring;
104
105pub mod evaluation;
107
108#[cfg(feature = "api")]
110pub mod api;
111
112pub mod inference;
114
115#[cfg(feature = "corpus-processing")]
117pub mod corpus;
118
119#[cfg(feature = "async")]
121pub mod async_graphrag;
123
124#[cfg(feature = "async")]
125pub mod async_processing;
127
128#[cfg(feature = "caching")]
129pub mod caching;
131
132#[cfg(feature = "function-calling")]
133pub mod function_calling;
135
136#[cfg(feature = "incremental")]
137pub mod incremental;
139
140#[cfg(feature = "rograg")]
141pub mod rograg;
143
144pub mod prelude {
154 pub use crate::config::Config;
157 pub use crate::core::{
158 Document, DocumentId, Entity, EntityId, KnowledgeGraph,
159 GraphRAGError, Result,
160 };
161}
162
163pub use crate::config::Config;
165pub use crate::core::{
166 ChunkId, Document, DocumentId, Entity, EntityId, EntityMention,
167 ErrorContext, ErrorSeverity, GraphRAGError, KnowledgeGraph,
168 Relationship, Result, TextChunk,
169};
170
171#[cfg(feature = "async")]
173pub use crate::core::traits::{
174 Embedder, EntityExtractor, GraphStore, LanguageModel,
175 Retriever, Storage, VectorStore,
176};
177
178#[cfg(feature = "memory-storage")]
180pub use crate::storage::MemoryStorage;
181
182#[cfg(feature = "lightrag")]
190pub use crate::lightrag::{
191 DualLevelRetriever, DualRetrievalConfig, DualRetrievalResults,
192 KeywordExtractor, KeywordExtractorConfig, DualLevelKeywords,
193 MergeStrategy, SemanticSearcher,
194};
195
196#[cfg(feature = "pagerank")]
197pub use crate::graph::pagerank::{
198 PageRankConfig, PersonalizedPageRank,
199};
200
201#[cfg(feature = "leiden")]
202pub use crate::graph::leiden::{
203 HierarchicalCommunities, LeidenConfig, LeidenCommunityDetector,
204};
205
206#[cfg(feature = "cross-encoder")]
207pub use crate::reranking::cross_encoder::{
208 CrossEncoder, CrossEncoderConfig, ConfidenceCrossEncoder,
209 RankedResult, RerankingStats,
210};
211
212#[cfg(feature = "pagerank")]
213pub use crate::retrieval::pagerank_retrieval::{
214 PageRankRetrievalSystem, ScoredResult,
215};
216
217#[cfg(feature = "pagerank")]
218pub use crate::retrieval::hipporag_ppr::{
219 HippoRAGConfig, HippoRAGRetriever, Fact,
220};
221
222pub struct GraphRAG {
254 config: Config,
255 knowledge_graph: Option<KnowledgeGraph>,
256 retrieval_system: Option<retrieval::RetrievalSystem>,
257 #[cfg(feature = "parallel-processing")]
258 #[allow(dead_code)]
259 parallel_processor: Option<parallel::ParallelProcessor>,
260}
261
262impl GraphRAG {
263 pub fn new(config: Config) -> Result<Self> {
265 Ok(Self {
266 config,
267 knowledge_graph: None,
268 retrieval_system: None,
269 #[cfg(feature = "parallel-processing")]
270 parallel_processor: None,
271 })
272 }
273
274 pub fn initialize(&mut self) -> Result<()> {
282 self.knowledge_graph = Some(KnowledgeGraph::new());
283 self.retrieval_system = Some(retrieval::RetrievalSystem::new(&self.config)?);
284 Ok(())
285 }
286
287 pub fn add_document_from_text(&mut self, text: &str) -> Result<()> {
289 use crate::text::TextProcessor;
290 use indexmap::IndexMap;
291
292 let doc_id = DocumentId::new(
294 format!("doc_{}", uuid::Uuid::new_v4().simple())
295 );
296
297 let document = Document {
298 id: doc_id,
299 title: "Document".to_string(),
300 content: text.to_string(),
301 metadata: IndexMap::new(),
302 chunks: Vec::new(),
303 };
304
305 let text_processor = TextProcessor::new(
306 self.config.text.chunk_size,
307 self.config.text.chunk_overlap
308 )?;
309 let chunks = text_processor.chunk_text(&document)?;
310
311 let document_with_chunks = Document {
312 chunks,
313 ..document
314 };
315
316 self.add_document(document_with_chunks)
317 }
318
319 pub fn add_document(&mut self, document: Document) -> Result<()> {
321 let graph = self.knowledge_graph.as_mut()
322 .ok_or_else(|| GraphRAGError::Config {
323 message: "Knowledge graph not initialized".to_string(),
324 })?;
325
326 graph.add_document(document)
327 }
328
329 pub fn clear_graph(&mut self) -> Result<()> {
334 let graph = self.knowledge_graph.as_mut()
335 .ok_or_else(|| GraphRAGError::Config {
336 message: "Knowledge graph not initialized".to_string(),
337 })?;
338
339 #[cfg(feature = "tracing")]
340 tracing::info!("Clearing knowledge graph (preserving documents and chunks)");
341
342 graph.clear_entities_and_relationships();
343 Ok(())
344 }
345
346 #[cfg(feature = "async")]
357 pub async fn build_graph(&mut self) -> Result<()> {
358 use indicatif::{ProgressBar, ProgressStyle};
359
360 let graph = self.knowledge_graph.as_mut()
361 .ok_or_else(|| GraphRAGError::Config {
362 message: "Knowledge graph not initialized".to_string(),
363 })?;
364
365 let chunks: Vec<_> = graph.chunks().cloned().collect();
366 let total_chunks = chunks.len();
367
368 #[cfg(feature = "tracing")]
376 tracing::info!(
377 "build_graph() - Config state: approach='{}', use_gleaning={}, ollama.enabled={}",
378 self.config.approach,
379 self.config.entities.use_gleaning,
380 self.config.ollama.enabled
381 );
382
383 if self.config.entities.use_gleaning && self.config.ollama.enabled {
384 #[cfg(feature = "async")]
386 {
387 use crate::entity::GleaningEntityExtractor;
388 use crate::ollama::OllamaClient;
389
390 #[cfg(feature = "tracing")]
391 tracing::info!(
392 "Using LLM-based entity extraction with gleaning (max_rounds: {})",
393 self.config.entities.max_gleaning_rounds
394 );
395
396 let client = OllamaClient::new(self.config.ollama.clone());
398
399 let gleaning_config = crate::entity::GleaningConfig {
401 max_gleaning_rounds: self.config.entities.max_gleaning_rounds,
402 completion_threshold: 0.8,
403 entity_confidence_threshold: self.config.entities.min_confidence as f64,
404 use_llm_completion_check: true,
405 entity_types: if self.config.entities.entity_types.is_empty() {
406 vec![
407 "PERSON".to_string(),
408 "ORGANIZATION".to_string(),
409 "LOCATION".to_string(),
410 ]
411 } else {
412 self.config.entities.entity_types.clone()
413 },
414 temperature: 0.1,
415 max_tokens: 1500,
416 };
417
418 let extractor = GleaningEntityExtractor::new(client, gleaning_config);
420
421 let pb = ProgressBar::new(total_chunks as u64);
423 pb.set_style(
424 ProgressStyle::default_bar()
425 .template(" [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} chunks ({eta})")
426 .expect("Invalid progress bar template")
427 .progress_chars("=>-")
428 );
429 pb.set_message("Extracting entities with LLM");
430
431 for (idx, chunk) in chunks.iter().enumerate() {
433 pb.set_message(format!("Chunk {}/{} (gleaning with {} rounds)",
434 idx + 1, total_chunks, self.config.entities.max_gleaning_rounds));
435
436 let (entities, relationships) = extractor.extract_with_gleaning(chunk).await?;
437
438 for entity in entities {
440 graph.add_entity(entity)?;
441 }
442
443 for relationship in relationships {
445 if let Err(e) = graph.add_relationship(relationship) {
446 #[cfg(feature = "tracing")]
447 tracing::warn!(
448 "Failed to add relationship: {} -> {} ({}). Error: {}",
449 e.to_string().split("entity ").nth(1).unwrap_or("unknown"),
450 e.to_string().split("entity ").nth(2).unwrap_or("unknown"),
451 "relationship",
452 e
453 );
454 }
455 }
456
457 pb.inc(1);
458 }
459
460 pb.finish_with_message("Entity extraction complete");
461 }
462
463 } else {
464 use crate::entity::EntityExtractor;
466
467 #[cfg(feature = "tracing")]
468 tracing::info!("Using pattern-based entity extraction");
469
470 let extractor = EntityExtractor::new(self.config.entities.min_confidence)?;
471
472 let pb = ProgressBar::new(total_chunks as u64);
474 pb.set_style(
475 ProgressStyle::default_bar()
476 .template(" [{elapsed_precise}] [{bar:40.green/blue}] {pos}/{len} chunks ({eta})")
477 .expect("Invalid progress bar template")
478 .progress_chars("=>-")
479 );
480 pb.set_message("Extracting entities (pattern-based)");
481
482 for (idx, chunk) in chunks.iter().enumerate() {
483 pb.set_message(format!("Chunk {}/{} (pattern-based)", idx + 1, total_chunks));
484
485 let entities = extractor.extract_from_chunk(chunk)?;
486 for entity in entities {
487 graph.add_entity(entity)?;
488 }
489
490 pb.inc(1);
491 }
492
493 pb.finish_with_message("Entity extraction complete");
494
495 if self.config.graph.extract_relationships {
499 let all_entities: Vec<_> = graph.entities().cloned().collect();
500
501 let rel_pb = ProgressBar::new(total_chunks as u64);
503 rel_pb.set_style(
504 ProgressStyle::default_bar()
505 .template(" [{elapsed_precise}] [{bar:40.yellow/blue}] {pos}/{len} chunks ({eta})")
506 .expect("Invalid progress bar template")
507 .progress_chars("=>-")
508 );
509 rel_pb.set_message("Extracting relationships");
510
511 for (idx, chunk) in chunks.iter().enumerate() {
512 rel_pb.set_message(format!("Chunk {}/{} (relationships)", idx + 1, total_chunks));
513 let chunk_entities: Vec<_> = all_entities
515 .iter()
516 .filter(|e| e.mentions.iter().any(|m| m.chunk_id == chunk.id))
517 .cloned()
518 .collect();
519
520 if chunk_entities.len() < 2 {
521 rel_pb.inc(1);
522 continue; }
524
525 let relationships = extractor.extract_relationships(&chunk_entities, chunk)?;
527
528 for (source_id, target_id, relation_type) in relationships {
530 let relationship = Relationship {
531 source: source_id.clone(),
532 target: target_id.clone(),
533 relation_type: relation_type.clone(),
534 confidence: self.config.graph.relationship_confidence_threshold,
535 context: vec![chunk.id.clone()],
536 };
537
538 if let Err(_e) = graph.add_relationship(relationship) {
540 #[cfg(feature = "tracing")]
541 tracing::debug!(
542 "Failed to add relationship: {} -> {} ({}). Error: {}",
543 source_id,
544 target_id,
545 relation_type,
546 _e
547 );
548 }
549 }
550
551 rel_pb.inc(1);
552 }
553
554 rel_pb.finish_with_message("Relationship extraction complete");
555 } } Ok(())
559 }
560
561 #[cfg(not(feature = "async"))]
566 pub fn build_graph(&mut self) -> Result<()> {
567 use crate::entity::EntityExtractor;
568
569 let graph = self.knowledge_graph.as_mut()
570 .ok_or_else(|| GraphRAGError::Config {
571 message: "Knowledge graph not initialized".to_string(),
572 })?;
573
574 let chunks: Vec<_> = graph.chunks().cloned().collect();
575
576 #[cfg(feature = "tracing")]
577 tracing::info!("Using pattern-based entity extraction (sync mode)");
578
579 let extractor = EntityExtractor::new(self.config.entities.min_confidence)?;
580
581 for chunk in &chunks {
582 let entities = extractor.extract_from_chunk(chunk)?;
583 for entity in entities {
584 graph.add_entity(entity)?;
585 }
586 }
587
588 if self.config.graph.extract_relationships {
590 let all_entities: Vec<_> = graph.entities().cloned().collect();
591
592 for chunk in &chunks {
593 let chunk_entities: Vec<_> = all_entities
594 .iter()
595 .filter(|e| e.mentions.iter().any(|m| m.chunk_id == chunk.id))
596 .cloned()
597 .collect();
598
599 if chunk_entities.len() < 2 {
600 continue;
601 }
602
603 let relationships = extractor.extract_relationships(&chunk_entities, chunk)?;
604
605 for (source_id, target_id, relation_type) in relationships {
606 let relationship = Relationship {
607 source: source_id.clone(),
608 target: target_id.clone(),
609 relation_type: relation_type.clone(),
610 confidence: self.config.graph.relationship_confidence_threshold,
611 context: vec![chunk.id.clone()],
612 };
613
614 if let Err(_e) = graph.add_relationship(relationship) {
615 #[cfg(feature = "tracing")]
616 tracing::debug!(
617 "Failed to add relationship: {} -> {} ({}). Error: {}",
618 source_id,
619 target_id,
620 relation_type,
621 _e
622 );
623 }
624 }
625 }
626 }
627
628 Ok(())
629 }
630
631 #[cfg(feature = "async")]
633 pub async fn ask(&mut self, query: &str) -> Result<String> {
634 self.ensure_initialized()?;
635
636 if self.has_documents() && !self.has_graph() {
637 self.build_graph().await?;
638 }
639
640 let search_results = self.query_internal_with_results(query)?;
642
643 if self.config.ollama.enabled {
645 return self.generate_semantic_answer_from_results(query, &search_results).await;
646 }
647
648 let formatted: Vec<String> = search_results
650 .into_iter()
651 .map(|r| format!("{} (score: {:.2})", r.content, r.score))
652 .collect();
653 Ok(formatted.join("\n"))
654 }
655
656 #[cfg(not(feature = "async"))]
658 pub fn ask(&mut self, query: &str) -> Result<String> {
659 self.ensure_initialized()?;
660
661 if self.has_documents() && !self.has_graph() {
662 self.build_graph()?;
663 }
664
665 let results = self.query_internal(query)?;
666 Ok(results.join("\n"))
667 }
668
669 pub fn query_internal(&mut self, query: &str) -> Result<Vec<String>> {
671 let retrieval = self.retrieval_system.as_mut()
672 .ok_or_else(|| GraphRAGError::Config {
673 message: "Retrieval system not initialized".to_string(),
674 })?;
675
676 let graph = self.knowledge_graph.as_mut()
677 .ok_or_else(|| GraphRAGError::Config {
678 message: "Knowledge graph not initialized".to_string(),
679 })?;
680
681 retrieval.add_embeddings_to_graph(graph)?;
683
684 let search_results = retrieval.hybrid_query(query, graph)?;
686
687 let result_strings: Vec<String> = search_results
689 .into_iter()
690 .map(|r| format!("{} (score: {:.2})", r.content, r.score))
691 .collect();
692
693 Ok(result_strings)
694 }
695
696 fn query_internal_with_results(&mut self, query: &str) -> Result<Vec<retrieval::SearchResult>> {
698 let retrieval = self.retrieval_system.as_mut()
699 .ok_or_else(|| GraphRAGError::Config {
700 message: "Retrieval system not initialized".to_string(),
701 })?;
702
703 let graph = self.knowledge_graph.as_mut()
704 .ok_or_else(|| GraphRAGError::Config {
705 message: "Knowledge graph not initialized".to_string(),
706 })?;
707
708 retrieval.add_embeddings_to_graph(graph)?;
710
711 retrieval.hybrid_query(query, graph)
713 }
714
715
716 #[cfg(feature = "async")]
718 async fn generate_semantic_answer_from_results(&self, query: &str, search_results: &[retrieval::SearchResult]) -> Result<String> {
719 use crate::ollama::OllamaClient;
720
721 let graph = self.knowledge_graph.as_ref()
722 .ok_or_else(|| GraphRAGError::Config {
723 message: "Knowledge graph not initialized".to_string(),
724 })?;
725
726 let mut context_parts = Vec::new();
728
729 for result in search_results.iter().take(5) {
730 if result.result_type == retrieval::ResultType::Entity && !result.source_chunks.is_empty() {
732 for chunk_id_str in result.source_chunks.iter().take(2) {
734 let chunk_id = ChunkId::new(chunk_id_str.clone());
735 if let Some(chunk) = graph.chunks().find(|c| c.id == chunk_id) {
736 let chunk_excerpt = if chunk.content.len() > 400 {
737 format!("{}...", &chunk.content[..400])
738 } else {
739 chunk.content.clone()
740 };
741
742 context_parts.push(format!(
743 "[Entity: {} | Relevance: {:.2}]\n{}",
744 result.content.split(" (score:").next().unwrap_or(&result.content),
745 result.score,
746 chunk_excerpt
747 ));
748 }
749 }
750 }
751 else if result.result_type == retrieval::ResultType::Chunk {
753 let chunk_excerpt = if result.content.len() > 400 {
754 format!("{}...", &result.content[..400])
755 } else {
756 result.content.clone()
757 };
758
759 context_parts.push(format!(
760 "[Chunk | Relevance: {:.2}]\n{}",
761 result.score,
762 chunk_excerpt
763 ));
764 }
765 else {
767 context_parts.push(format!(
768 "[{:?} | Relevance: {:.2}]\n{}",
769 result.result_type,
770 result.score,
771 result.content
772 ));
773 }
774 }
775
776 let context = context_parts.join("\n\n---\n\n");
777
778 if context.trim().is_empty() {
779 return Ok("No relevant information found in the knowledge graph.".to_string());
780 }
781
782 let client = OllamaClient::new(self.config.ollama.clone());
784
785 let prompt = format!(
787 "You are a knowledgeable assistant specialized in answering questions based on a knowledge graph.\n\n\
788 IMPORTANT INSTRUCTIONS:\n\
789 - Answer ONLY using information from the provided context below\n\
790 - Provide direct, conversational, and natural responses\n\
791 - Do NOT show your reasoning process or use <think> tags\n\
792 - If the context lacks sufficient information, clearly state: \"I don't have enough information to answer this question.\"\n\
793 - Keep answers concise but complete (2-4 sentences)\n\
794 - Use a natural, helpful tone as if speaking to a person\n\n\
795 CONTEXT:\n\
796 {}\n\n\
797 QUESTION: {}\n\n\
798 ANSWER (direct response only, no reasoning):",
799 context, query
800 );
801
802 match client.generate(&prompt).await {
804 Ok(answer) => {
805 let cleaned_answer = Self::remove_thinking_tags(&answer);
807 Ok(cleaned_answer.trim().to_string())
808 },
809 Err(e) => {
810 #[cfg(feature = "tracing")]
811 tracing::warn!("LLM generation failed: {}. Falling back to search results.", e);
812
813 Ok(format!("Relevant information from knowledge graph:\n\n{}", context))
815 }
816 }
817 }
818
819 #[cfg(feature = "async")]
824 fn remove_thinking_tags(text: &str) -> String {
825 let mut result = text.to_string();
828
829 loop {
830 if let Some(start) = result.find("<think>") {
832 if let Some(end) = result[start..].find("</think>") {
834 let end_pos = start + end + "</think>".len();
836 result.replace_range(start..end_pos, "");
837 } else {
838 result.replace_range(start..start + "<think>".len(), "");
840 break;
841 }
842 } else {
843 break;
845 }
846 }
847
848 result.trim().to_string()
849 }
850
851 pub fn is_initialized(&self) -> bool {
853 self.knowledge_graph.is_some() && self.retrieval_system.is_some()
854 }
855
856 pub fn has_documents(&self) -> bool {
858 if let Some(graph) = &self.knowledge_graph {
859 graph.chunks().count() > 0
860 } else {
861 false
862 }
863 }
864
865 pub fn has_graph(&self) -> bool {
867 if let Some(graph) = &self.knowledge_graph {
868 graph.entities().count() > 0
869 } else {
870 false
871 }
872 }
873
874 pub fn knowledge_graph(&self) -> Option<&KnowledgeGraph> {
876 self.knowledge_graph.as_ref()
877 }
878
879 pub fn get_entity(&self, entity_id: &str) -> Option<&Entity> {
881 if let Some(graph) = &self.knowledge_graph {
882 graph.entities().find(|e| e.id.0 == entity_id)
883 } else {
884 None
885 }
886 }
887
888 pub fn get_entity_relationships(&self, entity_id: &str) -> Vec<&Relationship> {
890 if let Some(graph) = &self.knowledge_graph {
891 let entity_id_obj = EntityId::new(entity_id.to_string());
892 graph.relationships()
893 .filter(|r| r.source == entity_id_obj || r.target == entity_id_obj)
894 .collect()
895 } else {
896 Vec::new()
897 }
898 }
899
900 pub fn get_chunk(&self, chunk_id: &str) -> Option<&TextChunk> {
902 if let Some(graph) = &self.knowledge_graph {
903 graph.chunks().find(|c| c.id.0 == chunk_id)
904 } else {
905 None
906 }
907 }
908
909 #[cfg(all(feature = "pagerank", feature = "async"))]
911 pub async fn ask_with_pagerank(&mut self, query: &str) -> Result<Vec<retrieval::pagerank_retrieval::ScoredResult>> {
912 use crate::retrieval::pagerank_retrieval::PageRankRetrievalSystem;
913
914 self.ensure_initialized()?;
915
916 if self.has_documents() && !self.has_graph() {
917 self.build_graph().await?;
918 }
919
920 let graph = self.knowledge_graph.as_ref()
921 .ok_or_else(|| GraphRAGError::Config {
922 message: "Knowledge graph not initialized".to_string(),
923 })?;
924
925 let pagerank_system = PageRankRetrievalSystem::new(10);
926 pagerank_system.search_with_pagerank(query, graph, Some(5))
927 }
928
929 #[cfg(all(feature = "pagerank", not(feature = "async")))]
931 pub fn ask_with_pagerank(&mut self, query: &str) -> Result<Vec<retrieval::pagerank_retrieval::ScoredResult>> {
932 use crate::retrieval::pagerank_retrieval::PageRankRetrievalSystem;
933
934 self.ensure_initialized()?;
935
936 if self.has_documents() && !self.has_graph() {
937 self.build_graph()?;
938 }
939
940 let graph = self.knowledge_graph.as_ref()
941 .ok_or_else(|| GraphRAGError::Config {
942 message: "Knowledge graph not initialized".to_string(),
943 })?;
944
945 let pagerank_system = PageRankRetrievalSystem::new(10);
946 pagerank_system.search_with_pagerank(query, graph, Some(5))
947 }
948
949 pub fn knowledge_graph_mut(&mut self) -> Option<&mut KnowledgeGraph> {
951 self.knowledge_graph.as_mut()
952 }
953
954 #[cfg(feature = "json5-support")]
974 pub fn from_json5_file<P: AsRef<std::path::Path>>(path: P) -> Result<Self> {
975 use crate::config::json5_loader::load_json5_config;
976 use crate::config::setconfig::SetConfig;
977
978 let set_config = load_json5_config::<SetConfig, _>(path)?;
979 let config = set_config.to_graphrag_config();
980 Self::new(config)
981 }
982
983 pub fn from_config_file<P: AsRef<std::path::Path>>(path: P) -> Result<Self> {
1006 use crate::config::setconfig::SetConfig;
1007
1008 let set_config = SetConfig::from_file(path)?;
1009 let config = set_config.to_graphrag_config();
1010 Self::new(config)
1011 }
1012
1013 #[cfg(feature = "async")]
1043 pub async fn from_config_and_document<P1, P2>(
1044 config_path: P1,
1045 document_path: P2
1046 ) -> Result<Self>
1047 where
1048 P1: AsRef<std::path::Path>,
1049 P2: AsRef<std::path::Path>,
1050 {
1051 let mut graphrag = Self::from_config_file(config_path)?;
1053
1054 graphrag.initialize()?;
1056
1057 let content = std::fs::read_to_string(document_path)
1059 .map_err(GraphRAGError::Io)?;
1060
1061 graphrag.add_document_from_text(&content)?;
1062
1063 graphrag.build_graph().await?;
1065
1066 Ok(graphrag)
1067 }
1068
1069 fn ensure_initialized(&mut self) -> Result<()> {
1071 if !self.is_initialized() {
1072 self.initialize()
1073 } else {
1074 Ok(())
1075 }
1076 }
1077}
1078
1079#[cfg(test)]
1080mod tests {
1081 use super::*;
1082
1083 #[test]
1084 fn test_graphrag_creation() {
1085 let config = Config::default();
1086 let graphrag = GraphRAG::new(config);
1087 assert!(graphrag.is_ok());
1088 }
1089
1090 }