pub struct LLMBrain { /* private fields */ }Expand description
Core library struct holding initialized clients and configuration.
This is the main entry point for interacting with the LLMBrain library.
It manages database connections, LLM client, and ConceptNet integration.
Implementations§
Source§impl LLMBrain
impl LLMBrain
Sourcepub async fn launch() -> Result<Self>
pub async fn launch() -> Result<Self>
Loads configuration, initializes clients, and returns an instance of
LLMBrain.
This should be called once at application startup. It performs:
- Loading and parsing configuration
- Setting up the
SurrealDBconnection - Initializing LLM client
- Initializing
ConceptNetclient (if configured)
Examples found in repository?
examples/comprehensive_demo.rs (line 226)
223async fn main() -> Result<()> {
224 // Initialize LLMBrain instance
225 println!("Initializing LLMBrain...");
226 let llm_brain = LLMBrain::launch().await?;
227
228 // Demonstrate different memory types
229 demonstrate_memory_types(&llm_brain).await?;
230
231 // Demonstrate memory recall
232 demonstrate_memory_recall(&llm_brain).await?;
233
234 // Demonstrate memory lookup
235 demonstrate_memory_lookup(&llm_brain).await?;
236
237 println!("\nComprehensive demonstration completed successfully!");
238 Ok(())
239}More examples
examples/file_rag.rs (line 162)
159async fn main() -> Result<()> {
160 println!("--- Starting File RAG Example ---");
161 ensure_config_exists()?;
162 let llm_brain = LLMBrain::launch().await?;
163
164 // --- Ingestion Step ---
165 // IMPORTANT: Adjust this path to your actual test PDF file
166 // Using the path relative to workspace root as seen in python example context
167 let pdf_path = PathBuf::from("tests/document.pdf");
168
169 if !pdf_path.exists() {
170 eprintln!("Error: PDF file not found at {}", pdf_path.display());
171 eprintln!("Please place a PDF file at that location or update the path in the code.");
172 return Ok(()); // Exit gracefully
173 }
174
175 // Ingest the PDF content
176 if let Err(e) = ingest_pdf(&llm_brain, &pdf_path).await {
177 eprintln!("Error during PDF ingestion: {e}");
178 // Decide if program should terminate or continue
179 }
180
181 // --- Query Step ---
182 let test_queries = [
183 "What is the main topic of the document?",
184 "Summarize the key points from the document.",
185 "What are the main conclusions drawn in the document?",
186 "what is silha center",
187 "who is Charlotte Higgins",
188 "Explain the lawsuits",
189 "Explain OpenAI's Involvement",
190 "who is Mike Masnick",
191 "content moderation liability shield", // Added query
192 ];
193
194 if let Err(e) = run_queries(&llm_brain, &test_queries).await {
195 eprintln!("Error during querying: {e}");
196 }
197
198 println!("\n--- File RAG Example Finished ---");
199 println!("Note: Database is at ./llm_brain_file_rag_db");
200
201 Ok(())
202}examples/llm_brain_rag.rs (line 85)
82async fn main() -> Result<()> {
83 println!("--- Starting LLMBrain RAG Example (Rust/LLMBrain Version) ---");
84 ensure_config_exists()?;
85 let llm_brain = LLMBrain::launch().await?;
86
87 // --- Add Memories Step ---
88 println!("\nAdding example memories...");
89 let examples = vec![
90 TestMemoryData {
91 content: "Python is a programming language created by Guido van Rossum in 1991. It supports object-oriented, imperative, and functional programming. Commonly used for web development, data science, automation.".to_owned(),
92 metadata: json!({
93 "entity_name": "Python_Language",
94 "memory_type": "Semantic",
95 "properties": {"creator": "Guido van Rossum", "year": 1991, "paradigms": ["OOP", "Imperative", "Functional"], "uses": ["web dev", "data science", "automation"]},
96 }),
97 },
98 TestMemoryData {
99 content: "Completed first Python project today in home office. Took 2 hours, successful. Reviewed code.".to_owned(),
100 metadata: json!({
101 "entity_name": "First_Python_Project",
102 "memory_type": "Episodic",
103 "properties": {"timestamp": Utc::now().timestamp(), "location": "home office", "duration_hours": 2, "outcome": "successful"},
104 }),
105 },
106 TestMemoryData {
107 content: "Tesla Model 3 is red, made in 2023, parked in the garage. Range 358 miles, 0-60 mph in 3.1 seconds.".to_owned(),
108 metadata: json!({
109 "entity_name": "Tesla_Model_3_Red",
110 "memory_type": "Semantic",
111 "properties": {"color": "red", "year": 2023, "location": "garage", "range_miles": 358, "acceleration_0_60_sec": 3.1},
112 }),
113 },
114 ];
115
116 for (i, data) in examples.into_iter().enumerate() {
117 println!("\nAdding Example {}", i + 1);
118 match llm_brain
119 .add_memory(data.content.clone(), data.metadata.clone())
120 .await
121 {
122 Ok(id) => println!(" Added successfully. ID: {id}"),
123 Err(e) => eprintln!(" Failed to add: {e}"),
124 }
125 }
126
127 // --- Query Step (Simulating RAG Retrieval) ---
128 let test_queries = [
129 "What programming language was created by Guido van Rossum?",
130 "Tell me about the Tesla Model 3's specifications.",
131 "What happened during the first Python project?",
132 ];
133
134 if let Err(e) = run_rag_queries(&llm_brain, &test_queries).await {
135 eprintln!("Error during querying: {e}");
136 }
137
138 println!("\n--- LLMBrain RAG Example Finished ---");
139 println!("Note: Database is at ./llm_brain_rag_db");
140
141 Ok(())
142}examples/basic_demo.rs (line 17)
10async fn main() -> Result<()> {
11 println!("Initializing LLMBrain for basic demo...");
12
13 // Ensure config exists before launching
14 ensure_config_exists()?;
15
16 // Initialize LLMBrain
17 let llm_brain = LLMBrain::launch().await?;
18 println!("LLMBrain initialized.");
19
20 // Add a semantic memory
21 println!("\nAdding semantic memory...");
22 let cat_data = json!({
23 "name": "Cat",
24 "column": "Semantic",
25 "properties": {
26 "type": "Animal",
27 "features": ["fur", "whiskers", "tail"],
28 "diet": "carnivore"
29 },
30 "relationships": {
31 "preys_on": ["mice", "birds"],
32 "related_to": ["tiger", "lion"]
33 }
34 });
35
36 let cat_content = "The cat is a small carnivorous mammal with fur, whiskers, and a tail. It preys on mice and birds, and is related to tigers and lions.";
37 let result = llm_brain
38 .add_memory(cat_content.to_owned(), cat_data)
39 .await?;
40 println!("Semantic memory added with ID: {result}");
41
42 // Add an episodic memory
43 println!("\nAdding episodic memory...");
44 let event_data = json!({
45 "name": "First Pet",
46 "column": "Episodic",
47 "properties": {
48 "timestamp": Utc::now().to_rfc3339(),
49 "action": "Got my first cat",
50 "location": "Pet Store",
51 "emotion": "happy",
52 "participants": ["family", "pet store staff"]
53 }
54 });
55
56 let event_content = "Today I went to the pet store with my family and got my first cat. Everyone was very happy.";
57 let result = llm_brain
58 .add_memory(event_content.to_owned(), event_data)
59 .await?;
60 println!("Episodic memory added with ID: {result}");
61
62 // Add a procedural memory
63 println!("\nAdding procedural memory...");
64 let procedure_data = json!({
65 "name": "Feed Cat",
66 "column": "Procedural",
67 "properties": {
68 "steps": [
69 "Get cat food from cabinet",
70 "Fill bowl with appropriate amount",
71 "Add fresh water to water bowl",
72 "Call cat for feeding"
73 ],
74 "frequency": "twice daily",
75 "importance": "high"
76 }
77 });
78
79 let procedure_content = "To feed a cat: First, get cat food from the cabinet. Then fill the bowl with an appropriate amount. Add fresh water to the water bowl. Finally, call the cat for feeding. This should be done twice daily.";
80 let result = llm_brain
81 .add_memory(procedure_content.to_owned(), procedure_data)
82 .await?;
83 println!("Procedural memory added with ID: {result}");
84
85 // Query memories
86 println!("\nQuerying cat-related memories...");
87 let cat_results = llm_brain.recall("cat animal features", 3).await?;
88 println!("Cat-related memories:");
89 for (memory, score) in cat_results {
90 println!("Score: {:.4}, Content: {}", score, memory.content);
91 println!(
92 "Metadata: {}",
93 serde_json::to_string_pretty(&memory.metadata)?
94 );
95 println!();
96 }
97
98 println!("\nQuerying feeding procedure...");
99 let feeding_results = llm_brain.recall("how to feed a cat", 2).await?;
100 println!("Feeding procedure results:");
101 for (memory, score) in feeding_results {
102 println!("Score: {:.4}, Content: {}", score, memory.content);
103 println!(
104 "Metadata: {}",
105 serde_json::to_string_pretty(&memory.metadata)?
106 );
107 println!();
108 }
109
110 println!("\nDemo completed.");
111 println!("Note: Database file created at ./llm_brain_example_db (if it didn't exist).");
112
113 Ok(())
114}examples/llm_brain_basic_demo.rs (line 50)
43async fn main() -> Result<()> {
44 println!("\nStarting LLMBrain Basic Demo (Rust Version)...");
45
46 // Ensure config exists
47 ensure_config_exists()?;
48
49 // Initialize LLMBrain (handles DB connection based on config)
50 let llm_brain = LLMBrain::launch().await?;
51 println!("LLMBrain initialized.");
52
53 // --- Add Semantic Memory (Cat) ---
54 println!("\nAdding basic semantic memory for 'cat'...");
55 let cat_content = "A cat is a medium-sized, furry, agile, carnivorous animal often found in homes or outdoors. Common behaviors include hunting, sleeping, and grooming.".to_owned();
56 let cat_metadata = json!({
57 "entity_name": "cat",
58 "memory_type": "Semantic",
59 "properties": {
60 "type": "animal",
61 "size": "medium",
62 "characteristics": ["furry", "agile", "carnivorous"]
63 },
64 "relationships": {
65 "habitat": ["homes", "outdoors"],
66 "behavior": ["hunting", "sleeping", "grooming"]
67 }
68 });
69 match llm_brain
70 .add_memory(cat_content.clone(), cat_metadata.clone())
71 .await
72 {
73 Ok(id) => println!("Semantic memory added successfully. ID: {id}"),
74 Err(e) => println!("Failed to add semantic memory: {e}"),
75 }
76
77 // --- Add Episodic Memory (Cat Observation) ---
78 println!("\nAdding episodic memory...");
79 let episode_content = format!(
80 "Timestamp: {}. Observed cat behavior in the Garden: Cat was chasing a butterfly. Observed by human.",
81 Utc::now().to_rfc3339()
82 );
83 let episode_metadata = json!({
84 "entity_name": "cat_observation",
85 "memory_type": "Episodic",
86 "properties": {
87 "action": "Observed cat behavior",
88 "location": "Garden",
89 "details": "Cat was chasing a butterfly"
90 },
91 "relationships": {
92 "relates_to": ["cat"],
93 "observed_by": ["human"]
94 }
95 });
96 match llm_brain
97 .add_memory(episode_content.clone(), episode_metadata.clone())
98 .await
99 {
100 Ok(id) => println!("Episodic memory added successfully. ID: {id}"),
101 Err(e) => println!("Failed to add episodic memory: {e}"),
102 }
103
104 // --- Query and Display Results ---
105 println!("\nQuerying semantic memory for 'cat':");
106 let cat_query = "information about cats";
107 match llm_brain.recall(cat_query, 2).await {
108 // Recall top 2
109 Ok(results) => {
110 println!("Found {} memories:", results.len());
111 for (fragment, score) in results {
112 println!(
113 "- Score: {:.4}, Content: {:.80}...",
114 score, fragment.content
115 );
116 println!(
117 " Metadata: {}",
118 serde_json::to_string_pretty(&fragment.metadata)?
119 );
120 }
121 }
122 Err(e) => println!("Failed to recall memories for '{cat_query}': {e}"),
123 }
124
125 println!("\nQuerying episodic memory:");
126 let episode_query = "cat observation in garden";
127 match llm_brain.recall(episode_query, 1).await {
128 Ok(results) => {
129 println!("Found {} memories:", results.len());
130 for (fragment, score) in results {
131 println!(
132 "- Score: {:.4}, Content: {:.80}...",
133 score, fragment.content
134 );
135 println!(
136 " Metadata: {}",
137 serde_json::to_string_pretty(&fragment.metadata)?
138 );
139 }
140 }
141 Err(e) => println!("Failed to recall memories for '{episode_query}': {e}"),
142 }
143
144 println!("\nDemo completed successfully!");
145 println!("Note: Database file created at ./llm_brain_basic_demo_db (if it didn't exist).");
146
147 Ok(())
148}examples/embedding_demo.rs (line 9)
6async fn main() -> Result<()> {
7 // Initialize LLMBrain instance
8 println!("Initializing LLMBrain...");
9 let llm_brain = LLMBrain::launch().await?;
10
11 // Demo 1: Basic embedding generation and query
12 println!("\n--- Basic Embedding Demo ---");
13
14 // Add several memories
15 let memories = vec![
16 (
17 "Cats are small mammals with fur, whiskers, and tails. They eat mice and birds, and are related to tigers and lions.",
18 json!({
19 "name": "Cat",
20 "type": "Animal",
21 "properties": {
22 "characteristics": ["fur", "whiskers", "tail"],
23 "diet": "carnivorous"
24 }
25 }),
26 ),
27 (
28 "Dogs are one of the earliest domesticated animals, loyal companions to humans, with various breeds and purposes.",
29 json!({
30 "name": "Dog",
31 "type": "Animal",
32 "properties": {
33 "characteristics": ["fur", "loyalty", "keen sense of smell"],
34 "uses": ["pet", "working dog", "guide dog"]
35 }
36 }),
37 ),
38 (
39 "Birds are warm-blooded vertebrates, covered with feathers, with forelimbs evolved into wings, and most can fly.",
40 json!({
41 "name": "Bird",
42 "type": "Animal",
43 "properties": {
44 "characteristics": ["feathers", "wings", "beak"],
45 "abilities": ["flying", "nest building", "singing"]
46 }
47 }),
48 ),
49 ];
50
51 // Add memories to database
52 for (content, metadata) in memories {
53 let memory_id = llm_brain.add_memory(content.to_owned(), metadata).await?;
54 println!("Added memory: ID={memory_id}");
55 }
56
57 // Wait for embedding processing to complete
58 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
59
60 // Query related memories using semantic search
61 println!("\nQuerying memories about 'pets':");
62 let results = llm_brain.recall("pets and human relationships", 2).await?;
63 for (i, (memory, score)) in results.iter().enumerate() {
64 println!("\nResult {}: Similarity {:.4}", i + 1, score);
65 println!("Content: {}", memory.content);
66 println!(
67 "Metadata: {}",
68 serde_json::to_string_pretty(&memory.metadata)?
69 );
70 }
71
72 // Demo 2: Long text processing
73 println!("\n--- Long Text Processing Demo ---");
74
75 // Prepare a longer text
76 let long_text = r#"
77 Artificial Intelligence (AI) is a branch of computer science aimed at creating systems capable of simulating human intelligence behaviors.
78 AI research includes multiple sub-fields such as machine learning, deep learning, natural language processing, computer vision, expert systems, etc.
79 Machine learning is one of the core technologies of AI, using statistical techniques to enable computer systems to "learn" (i.e., progressively improve performance) without explicit programming.
80 Deep learning is a subset of machine learning that uses multi-layered neural networks to process data. These networks extract features from data, with each layer building on the output of the previous one.
81 Large Language Models (LLMs) are a significant breakthrough in AI in recent years, generating human-like text, understanding context, and performing various language tasks by learning from vast amounts of text data.
82 Vector embeddings are techniques for representing text, images, or other data as points in a multi-dimensional vector space, playing an important role in information retrieval, recommendation systems, and semantic search.
83 "#;
84
85 println!("Long text content:\n{long_text}");
86
87 // Process long text using different strategies
88 println!("\nUsing default strategy (no chunking):");
89 let embedding1 = llm_brain.process_long_text(long_text, None).await?;
90 println!("Generated embedding vector length: {}", embedding1.len());
91
92 println!("\nUsing chunk and average strategy:");
93 let chunk_strategy = ChunkingStrategy::ChunkAndAverage {
94 chunk_size: 100,
95 chunk_overlap: 20,
96 };
97 let embedding2 = llm_brain
98 .process_long_text(long_text, Some(chunk_strategy))
99 .await?;
100 println!("Generated embedding vector length: {}", embedding2.len());
101
102 // Calculate similarity between embeddings generated by different strategies
103 let similarity = cosine_similarity(&embedding1, &embedding2);
104 println!("\nCosine similarity between embeddings from two strategies: {similarity:.4}");
105
106 // Demo 3: Adding long text to memory
107 println!("\n--- Adding Long Text Memory Demo ---");
108
109 let metadata = json!({
110 "name": "Artificial Intelligence Overview",
111 "type": "Knowledge",
112 "tags": ["AI", "Machine Learning", "Deep Learning"]
113 });
114
115 let memory_id = llm_brain.add_memory(long_text.to_owned(), metadata).await?;
116 println!("Added long text memory: ID={memory_id}");
117
118 // Wait for embedding processing to complete
119 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
120
121 // Query related content
122 println!("\nQuerying memories about 'large language models':");
123 let results = llm_brain
124 .recall("large language models and vector embeddings", 1)
125 .await?;
126 for (i, (memory, score)) in results.iter().enumerate() {
127 println!("\nResult {}: Similarity {:.4}", i + 1, score);
128 println!("Content summary: {}", truncate_text(&memory.content, 100));
129 println!(
130 "Metadata: {}",
131 serde_json::to_string_pretty(&memory.metadata)?
132 );
133 }
134
135 println!("\nDemo completed.");
136 Ok(())
137}Sourcepub async fn add_memory(
&self,
content: String,
metadata: Value,
) -> Result<Thing>
pub async fn add_memory( &self, content: String, metadata: Value, ) -> Result<Thing>
Adds a text fragment to the memory store.
This method:
- Generates an embedding for the content using the LLM client
- Creates a
MemoryFragmentwith the content, embedding, and metadata - Stores it in the database
Returns the SurrealDB ID of the created record.
Examples found in repository?
examples/file_rag.rs (line 84)
47async fn ingest_pdf(llm_brain: &LLMBrain, pdf_path: &Path) -> Result<Vec<String>> {
48 let start_time = Instant::now();
49 println!("Processing PDF: {}", pdf_path.display());
50
51 // 1. Extract text
52 let text = extract_text(pdf_path).context("Failed to extract text from PDF")?;
53 println!("Extracted {} characters.", text.len());
54
55 // 2. Chunk text
56 let chunks = chunk_text_by_words(&text, DEFAULT_CHUNK_SIZE);
57 println!("Split into {} chunks.", chunks.len());
58
59 // 3. Add chunks as MemoryFragments
60 let filename = pdf_path
61 .file_name()
62 .unwrap_or_default()
63 .to_string_lossy()
64 .to_string();
65 let file_stem = pdf_path
66 .file_stem()
67 .unwrap_or_default()
68 .to_string_lossy()
69 .to_string();
70
71 let mut chunk_ids = Vec::new();
72
73 for (i, chunk_content) in chunks.into_iter().enumerate() {
74 let chunk_num = i + 1;
75 let metadata = json!({
76 "memory_type": "DocumentChunk",
77 "source_file": filename,
78 "chunk_number": chunk_num,
79 // Optionally add entity_name for direct reference if needed
80 "entity_name": format!("{}_chunk_{}", file_stem, chunk_num)
81 });
82
83 // Add memory (handle potential errors)
84 match llm_brain.add_memory(chunk_content, metadata).await {
85 Ok(id) => {
86 chunk_ids.push(id.to_string());
87 if chunk_num % 10 == 0 {
88 println!("Added chunk {chunk_num}...");
89 }
90 }
91 Err(e) => {
92 eprintln!("Failed to add chunk {chunk_num}: {e}");
93 // Decide whether to continue or stop on error
94 }
95 }
96 }
97
98 // 4. (Optional) Add a MemoryFragment for the document itself, linking to chunks
99 let doc_content = format!(
100 "Metadata for document: {}. Contains {} chunks.",
101 filename,
102 chunk_ids.len()
103 );
104 let doc_metadata = json!({
105 "memory_type": "DocumentMeta",
106 "entity_name": file_stem,
107 "properties": {
108 "file_path": pdf_path.to_string_lossy(),
109 "file_name": filename,
110 "chunk_count": chunk_ids.len()
111 },
112 "relationships": {
113 "contains_chunks": chunk_ids // Store the actual DB IDs
114 }
115 });
116 llm_brain.add_memory(doc_content, doc_metadata).await?;
117 println!("Added document metadata entry.");
118
119 println!("Ingestion completed in {:?}.", start_time.elapsed());
120 Ok(chunk_ids) // Return chunk IDs if needed elsewhere
121}More examples
examples/llm_brain_rag.rs (line 119)
82async fn main() -> Result<()> {
83 println!("--- Starting LLMBrain RAG Example (Rust/LLMBrain Version) ---");
84 ensure_config_exists()?;
85 let llm_brain = LLMBrain::launch().await?;
86
87 // --- Add Memories Step ---
88 println!("\nAdding example memories...");
89 let examples = vec![
90 TestMemoryData {
91 content: "Python is a programming language created by Guido van Rossum in 1991. It supports object-oriented, imperative, and functional programming. Commonly used for web development, data science, automation.".to_owned(),
92 metadata: json!({
93 "entity_name": "Python_Language",
94 "memory_type": "Semantic",
95 "properties": {"creator": "Guido van Rossum", "year": 1991, "paradigms": ["OOP", "Imperative", "Functional"], "uses": ["web dev", "data science", "automation"]},
96 }),
97 },
98 TestMemoryData {
99 content: "Completed first Python project today in home office. Took 2 hours, successful. Reviewed code.".to_owned(),
100 metadata: json!({
101 "entity_name": "First_Python_Project",
102 "memory_type": "Episodic",
103 "properties": {"timestamp": Utc::now().timestamp(), "location": "home office", "duration_hours": 2, "outcome": "successful"},
104 }),
105 },
106 TestMemoryData {
107 content: "Tesla Model 3 is red, made in 2023, parked in the garage. Range 358 miles, 0-60 mph in 3.1 seconds.".to_owned(),
108 metadata: json!({
109 "entity_name": "Tesla_Model_3_Red",
110 "memory_type": "Semantic",
111 "properties": {"color": "red", "year": 2023, "location": "garage", "range_miles": 358, "acceleration_0_60_sec": 3.1},
112 }),
113 },
114 ];
115
116 for (i, data) in examples.into_iter().enumerate() {
117 println!("\nAdding Example {}", i + 1);
118 match llm_brain
119 .add_memory(data.content.clone(), data.metadata.clone())
120 .await
121 {
122 Ok(id) => println!(" Added successfully. ID: {id}"),
123 Err(e) => eprintln!(" Failed to add: {e}"),
124 }
125 }
126
127 // --- Query Step (Simulating RAG Retrieval) ---
128 let test_queries = [
129 "What programming language was created by Guido van Rossum?",
130 "Tell me about the Tesla Model 3's specifications.",
131 "What happened during the first Python project?",
132 ];
133
134 if let Err(e) = run_rag_queries(&llm_brain, &test_queries).await {
135 eprintln!("Error during querying: {e}");
136 }
137
138 println!("\n--- LLMBrain RAG Example Finished ---");
139 println!("Note: Database is at ./llm_brain_rag_db");
140
141 Ok(())
142}examples/basic_demo.rs (line 38)
10async fn main() -> Result<()> {
11 println!("Initializing LLMBrain for basic demo...");
12
13 // Ensure config exists before launching
14 ensure_config_exists()?;
15
16 // Initialize LLMBrain
17 let llm_brain = LLMBrain::launch().await?;
18 println!("LLMBrain initialized.");
19
20 // Add a semantic memory
21 println!("\nAdding semantic memory...");
22 let cat_data = json!({
23 "name": "Cat",
24 "column": "Semantic",
25 "properties": {
26 "type": "Animal",
27 "features": ["fur", "whiskers", "tail"],
28 "diet": "carnivore"
29 },
30 "relationships": {
31 "preys_on": ["mice", "birds"],
32 "related_to": ["tiger", "lion"]
33 }
34 });
35
36 let cat_content = "The cat is a small carnivorous mammal with fur, whiskers, and a tail. It preys on mice and birds, and is related to tigers and lions.";
37 let result = llm_brain
38 .add_memory(cat_content.to_owned(), cat_data)
39 .await?;
40 println!("Semantic memory added with ID: {result}");
41
42 // Add an episodic memory
43 println!("\nAdding episodic memory...");
44 let event_data = json!({
45 "name": "First Pet",
46 "column": "Episodic",
47 "properties": {
48 "timestamp": Utc::now().to_rfc3339(),
49 "action": "Got my first cat",
50 "location": "Pet Store",
51 "emotion": "happy",
52 "participants": ["family", "pet store staff"]
53 }
54 });
55
56 let event_content = "Today I went to the pet store with my family and got my first cat. Everyone was very happy.";
57 let result = llm_brain
58 .add_memory(event_content.to_owned(), event_data)
59 .await?;
60 println!("Episodic memory added with ID: {result}");
61
62 // Add a procedural memory
63 println!("\nAdding procedural memory...");
64 let procedure_data = json!({
65 "name": "Feed Cat",
66 "column": "Procedural",
67 "properties": {
68 "steps": [
69 "Get cat food from cabinet",
70 "Fill bowl with appropriate amount",
71 "Add fresh water to water bowl",
72 "Call cat for feeding"
73 ],
74 "frequency": "twice daily",
75 "importance": "high"
76 }
77 });
78
79 let procedure_content = "To feed a cat: First, get cat food from the cabinet. Then fill the bowl with an appropriate amount. Add fresh water to the water bowl. Finally, call the cat for feeding. This should be done twice daily.";
80 let result = llm_brain
81 .add_memory(procedure_content.to_owned(), procedure_data)
82 .await?;
83 println!("Procedural memory added with ID: {result}");
84
85 // Query memories
86 println!("\nQuerying cat-related memories...");
87 let cat_results = llm_brain.recall("cat animal features", 3).await?;
88 println!("Cat-related memories:");
89 for (memory, score) in cat_results {
90 println!("Score: {:.4}, Content: {}", score, memory.content);
91 println!(
92 "Metadata: {}",
93 serde_json::to_string_pretty(&memory.metadata)?
94 );
95 println!();
96 }
97
98 println!("\nQuerying feeding procedure...");
99 let feeding_results = llm_brain.recall("how to feed a cat", 2).await?;
100 println!("Feeding procedure results:");
101 for (memory, score) in feeding_results {
102 println!("Score: {:.4}, Content: {}", score, memory.content);
103 println!(
104 "Metadata: {}",
105 serde_json::to_string_pretty(&memory.metadata)?
106 );
107 println!();
108 }
109
110 println!("\nDemo completed.");
111 println!("Note: Database file created at ./llm_brain_example_db (if it didn't exist).");
112
113 Ok(())
114}examples/comprehensive_demo.rs (line 34)
9async fn demonstrate_memory_types(llm_brain: &LLMBrain) -> Result<()> {
10 println!("\n=== Demonstrating Memory Types ===");
11
12 // Create semantic memory - a Tesla car entity
13 println!("\nAdding semantic memory (Tesla Model 3)...");
14 let car_data = json!({
15 "name": "Tesla_Model_3",
16 "column": "Semantic",
17 "properties": {
18 "color": "red",
19 "year": "2023",
20 "mileage": "1000 miles",
21 "features": ["autopilot capabilities", "glass roof"],
22 "type": "electric vehicle"
23 },
24 "relationships": {
25 "type_of": ["Vehicle"],
26 "location": "garage"
27 }
28 });
29
30 let car_content = "The Tesla Model 3 is a red electric vehicle from 2023 with 1000 miles. \
31 It features autopilot capabilities and a glass roof. It is parked in the garage.";
32
33 let car_id = llm_brain
34 .add_memory(car_content.to_owned(), car_data)
35 .await?;
36 println!("Added car memory with ID: {car_id}");
37
38 // Create episodic memory - buying the car
39 println!("\nAdding episodic memory (Buying the car)...");
40 let purchase_data = json!({
41 "name": "Tesla_Purchase",
42 "column": "Episodic",
43 "timestamp": Utc::now().to_rfc3339(),
44 "properties": {
45 "action": "Bought a new car",
46 "location": "Tesla Dealership",
47 "participants": ["Me", "Tesla Sales Representative"],
48 "emotion": "excited"
49 },
50 "relationships": {
51 "related_to": ["Tesla_Model_3"]
52 }
53 });
54
55 let purchase_content = "I bought a new Tesla Model 3 today at the Tesla Dealership. \
56 The sales representative was very helpful. I was very excited about getting my new car.";
57
58 let purchase_id = llm_brain
59 .add_memory(purchase_content.to_owned(), purchase_data)
60 .await?;
61 println!("Added purchase memory with ID: {purchase_id}");
62
63 // Create procedural memory - charging the car
64 println!("\nAdding procedural memory (Charging the car)...");
65 let charging_data = json!({
66 "name": "Charge_Tesla",
67 "column": "Procedural",
68 "properties": {
69 "steps": [
70 "Park near charging station",
71 "Open charging port on car",
72 "Connect charging cable",
73 "Initiate charging via app or car interface",
74 "Wait for sufficient charge",
75 "Disconnect charging cable",
76 "Close charging port"
77 ],
78 "required_tools": ["Charging cable", "Tesla app"],
79 "estimated_time": "30-60 minutes",
80 "frequency": "weekly"
81 },
82 "relationships": {
83 "applies_to": ["Tesla_Model_3"]
84 }
85 });
86
87 let charging_content = "To charge a Tesla Model 3: First, park near a charging station. \
88 Open the charging port on the car and connect the charging cable. \
89 Then initiate charging via the app or car interface. Wait for 30-60 minutes \
90 until there is sufficient charge. Finally, disconnect the charging cable \
91 and close the charging port. This procedure should be performed weekly \
92 and requires a charging cable and the Tesla app.";
93
94 let charging_id = llm_brain
95 .add_memory(charging_content.to_owned(), charging_data)
96 .await?;
97 println!("Added charging memory with ID: {charging_id}");
98
99 // Allow time for embeddings to be processed
100 sleep(Duration::from_millis(200)).await;
101
102 Ok(())
103}
104
105/// Demonstrate querying and memory recall capabilities
106async fn demonstrate_memory_recall(llm_brain: &LLMBrain) -> Result<()> {
107 println!("\n=== Demonstrating Memory Recall ===");
108
109 // Query for Tesla car
110 println!("\nQuerying for Tesla Model 3 information:");
111 let tesla_query = "What do I know about my Tesla Model 3?";
112 let tesla_results = llm_brain.recall(tesla_query, 3).await?;
113
114 for (i, (memory, score)) in tesla_results.iter().enumerate() {
115 println!("\nResult {}: Score {:.4}", i + 1, score);
116 println!("Content: {}", memory.content);
117 println!(
118 "Metadata: {}",
119 serde_json::to_string_pretty(&memory.metadata)?
120 );
121 }
122
123 // Query for car purchase experience
124 println!("\nQuerying for car purchase experience:");
125 let purchase_query = "When did I buy my Tesla?";
126 let purchase_results = llm_brain.recall(purchase_query, 2).await?;
127
128 for (i, (memory, score)) in purchase_results.iter().enumerate() {
129 println!("\nResult {}: Score {:.4}", i + 1, score);
130 println!("Content: {}", memory.content);
131 println!(
132 "Metadata: {}",
133 serde_json::to_string_pretty(&memory.metadata)?
134 );
135 }
136
137 // Query for charging instructions
138 println!("\nQuerying for charging instructions:");
139 let charging_query = "How do I charge my car?";
140 let charging_results = llm_brain.recall(charging_query, 2).await?;
141
142 for (i, (memory, score)) in charging_results.iter().enumerate() {
143 println!("\nResult {}: Score {:.4}", i + 1, score);
144 println!("Content: {}", memory.content);
145 println!(
146 "Metadata: {}",
147 serde_json::to_string_pretty(&memory.metadata)?
148 );
149 }
150
151 Ok(())
152}
153
154/// Demonstrate adding a memory and then finding it by similarity
155async fn demonstrate_memory_lookup(llm_brain: &LLMBrain) -> Result<()> {
156 println!("\n=== Demonstrating Memory Lookup ===");
157
158 // Add a new memory about a trip
159 println!("\nAdding memory about a road trip...");
160 let trip_data = json!({
161 "name": "Road_Trip_Tahoe",
162 "column": "Episodic",
163 "timestamp": Utc::now().to_rfc3339(),
164 "properties": {
165 "action": "Road trip to Lake Tahoe",
166 "location": "Lake Tahoe",
167 "participants": ["Me", "Family"],
168 "duration": "3 days",
169 "transportation": "Tesla Model 3",
170 "highlights": ["Beautiful scenery", "Hiking", "Swimming"]
171 }
172 });
173
174 let trip_content = "Last weekend, we took our Tesla Model 3 on a road trip to Lake Tahoe. \
175 The drive was smooth and we enjoyed the beautiful scenery. \
176 We spent 3 days there with the family, hiking and swimming in the lake. \
177 The car performed excellently on the mountain roads.";
178
179 let trip_id = llm_brain
180 .add_memory(trip_content.to_owned(), trip_data)
181 .await?;
182 println!("Added trip memory with ID: {trip_id}");
183
184 // Allow time for embeddings to be processed
185 sleep(Duration::from_millis(200)).await;
186
187 // Now try to find this memory with various related queries
188 let queries = [
189 "Did we go on any trips with our Tesla?",
190 "What activities did we do at Lake Tahoe?",
191 "How did the Tesla perform on our road trip?",
192 ];
193
194 for (i, query) in queries.iter().enumerate() {
195 println!("\nQuery {}: \"{}\"", i + 1, query);
196 let results = llm_brain.recall(query, 1).await?;
197
198 if !results.is_empty() {
199 let (memory, score) = &results[0];
200 println!("Found memory (score: {score:.4}):");
201 println!("Content: {}", memory.content);
202 } else {
203 println!("No relevant memories found.");
204 }
205 }
206
207 // Direct lookup by ID
208 println!("\nDirect lookup by ID:");
209 if let Some(memory) = llm_brain
210 .get_memory_by_id_string(&trip_id.to_string())
211 .await?
212 {
213 println!("Successfully retrieved memory");
214 println!("Content: {}", memory.content);
215 } else {
216 println!("Failed to retrieve memory by ID");
217 }
218
219 Ok(())
220}examples/llm_brain_basic_demo.rs (line 70)
43async fn main() -> Result<()> {
44 println!("\nStarting LLMBrain Basic Demo (Rust Version)...");
45
46 // Ensure config exists
47 ensure_config_exists()?;
48
49 // Initialize LLMBrain (handles DB connection based on config)
50 let llm_brain = LLMBrain::launch().await?;
51 println!("LLMBrain initialized.");
52
53 // --- Add Semantic Memory (Cat) ---
54 println!("\nAdding basic semantic memory for 'cat'...");
55 let cat_content = "A cat is a medium-sized, furry, agile, carnivorous animal often found in homes or outdoors. Common behaviors include hunting, sleeping, and grooming.".to_owned();
56 let cat_metadata = json!({
57 "entity_name": "cat",
58 "memory_type": "Semantic",
59 "properties": {
60 "type": "animal",
61 "size": "medium",
62 "characteristics": ["furry", "agile", "carnivorous"]
63 },
64 "relationships": {
65 "habitat": ["homes", "outdoors"],
66 "behavior": ["hunting", "sleeping", "grooming"]
67 }
68 });
69 match llm_brain
70 .add_memory(cat_content.clone(), cat_metadata.clone())
71 .await
72 {
73 Ok(id) => println!("Semantic memory added successfully. ID: {id}"),
74 Err(e) => println!("Failed to add semantic memory: {e}"),
75 }
76
77 // --- Add Episodic Memory (Cat Observation) ---
78 println!("\nAdding episodic memory...");
79 let episode_content = format!(
80 "Timestamp: {}. Observed cat behavior in the Garden: Cat was chasing a butterfly. Observed by human.",
81 Utc::now().to_rfc3339()
82 );
83 let episode_metadata = json!({
84 "entity_name": "cat_observation",
85 "memory_type": "Episodic",
86 "properties": {
87 "action": "Observed cat behavior",
88 "location": "Garden",
89 "details": "Cat was chasing a butterfly"
90 },
91 "relationships": {
92 "relates_to": ["cat"],
93 "observed_by": ["human"]
94 }
95 });
96 match llm_brain
97 .add_memory(episode_content.clone(), episode_metadata.clone())
98 .await
99 {
100 Ok(id) => println!("Episodic memory added successfully. ID: {id}"),
101 Err(e) => println!("Failed to add episodic memory: {e}"),
102 }
103
104 // --- Query and Display Results ---
105 println!("\nQuerying semantic memory for 'cat':");
106 let cat_query = "information about cats";
107 match llm_brain.recall(cat_query, 2).await {
108 // Recall top 2
109 Ok(results) => {
110 println!("Found {} memories:", results.len());
111 for (fragment, score) in results {
112 println!(
113 "- Score: {:.4}, Content: {:.80}...",
114 score, fragment.content
115 );
116 println!(
117 " Metadata: {}",
118 serde_json::to_string_pretty(&fragment.metadata)?
119 );
120 }
121 }
122 Err(e) => println!("Failed to recall memories for '{cat_query}': {e}"),
123 }
124
125 println!("\nQuerying episodic memory:");
126 let episode_query = "cat observation in garden";
127 match llm_brain.recall(episode_query, 1).await {
128 Ok(results) => {
129 println!("Found {} memories:", results.len());
130 for (fragment, score) in results {
131 println!(
132 "- Score: {:.4}, Content: {:.80}...",
133 score, fragment.content
134 );
135 println!(
136 " Metadata: {}",
137 serde_json::to_string_pretty(&fragment.metadata)?
138 );
139 }
140 }
141 Err(e) => println!("Failed to recall memories for '{episode_query}': {e}"),
142 }
143
144 println!("\nDemo completed successfully!");
145 println!("Note: Database file created at ./llm_brain_basic_demo_db (if it didn't exist).");
146
147 Ok(())
148}examples/embedding_demo.rs (line 53)
6async fn main() -> Result<()> {
7 // Initialize LLMBrain instance
8 println!("Initializing LLMBrain...");
9 let llm_brain = LLMBrain::launch().await?;
10
11 // Demo 1: Basic embedding generation and query
12 println!("\n--- Basic Embedding Demo ---");
13
14 // Add several memories
15 let memories = vec![
16 (
17 "Cats are small mammals with fur, whiskers, and tails. They eat mice and birds, and are related to tigers and lions.",
18 json!({
19 "name": "Cat",
20 "type": "Animal",
21 "properties": {
22 "characteristics": ["fur", "whiskers", "tail"],
23 "diet": "carnivorous"
24 }
25 }),
26 ),
27 (
28 "Dogs are one of the earliest domesticated animals, loyal companions to humans, with various breeds and purposes.",
29 json!({
30 "name": "Dog",
31 "type": "Animal",
32 "properties": {
33 "characteristics": ["fur", "loyalty", "keen sense of smell"],
34 "uses": ["pet", "working dog", "guide dog"]
35 }
36 }),
37 ),
38 (
39 "Birds are warm-blooded vertebrates, covered with feathers, with forelimbs evolved into wings, and most can fly.",
40 json!({
41 "name": "Bird",
42 "type": "Animal",
43 "properties": {
44 "characteristics": ["feathers", "wings", "beak"],
45 "abilities": ["flying", "nest building", "singing"]
46 }
47 }),
48 ),
49 ];
50
51 // Add memories to database
52 for (content, metadata) in memories {
53 let memory_id = llm_brain.add_memory(content.to_owned(), metadata).await?;
54 println!("Added memory: ID={memory_id}");
55 }
56
57 // Wait for embedding processing to complete
58 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
59
60 // Query related memories using semantic search
61 println!("\nQuerying memories about 'pets':");
62 let results = llm_brain.recall("pets and human relationships", 2).await?;
63 for (i, (memory, score)) in results.iter().enumerate() {
64 println!("\nResult {}: Similarity {:.4}", i + 1, score);
65 println!("Content: {}", memory.content);
66 println!(
67 "Metadata: {}",
68 serde_json::to_string_pretty(&memory.metadata)?
69 );
70 }
71
72 // Demo 2: Long text processing
73 println!("\n--- Long Text Processing Demo ---");
74
75 // Prepare a longer text
76 let long_text = r#"
77 Artificial Intelligence (AI) is a branch of computer science aimed at creating systems capable of simulating human intelligence behaviors.
78 AI research includes multiple sub-fields such as machine learning, deep learning, natural language processing, computer vision, expert systems, etc.
79 Machine learning is one of the core technologies of AI, using statistical techniques to enable computer systems to "learn" (i.e., progressively improve performance) without explicit programming.
80 Deep learning is a subset of machine learning that uses multi-layered neural networks to process data. These networks extract features from data, with each layer building on the output of the previous one.
81 Large Language Models (LLMs) are a significant breakthrough in AI in recent years, generating human-like text, understanding context, and performing various language tasks by learning from vast amounts of text data.
82 Vector embeddings are techniques for representing text, images, or other data as points in a multi-dimensional vector space, playing an important role in information retrieval, recommendation systems, and semantic search.
83 "#;
84
85 println!("Long text content:\n{long_text}");
86
87 // Process long text using different strategies
88 println!("\nUsing default strategy (no chunking):");
89 let embedding1 = llm_brain.process_long_text(long_text, None).await?;
90 println!("Generated embedding vector length: {}", embedding1.len());
91
92 println!("\nUsing chunk and average strategy:");
93 let chunk_strategy = ChunkingStrategy::ChunkAndAverage {
94 chunk_size: 100,
95 chunk_overlap: 20,
96 };
97 let embedding2 = llm_brain
98 .process_long_text(long_text, Some(chunk_strategy))
99 .await?;
100 println!("Generated embedding vector length: {}", embedding2.len());
101
102 // Calculate similarity between embeddings generated by different strategies
103 let similarity = cosine_similarity(&embedding1, &embedding2);
104 println!("\nCosine similarity between embeddings from two strategies: {similarity:.4}");
105
106 // Demo 3: Adding long text to memory
107 println!("\n--- Adding Long Text Memory Demo ---");
108
109 let metadata = json!({
110 "name": "Artificial Intelligence Overview",
111 "type": "Knowledge",
112 "tags": ["AI", "Machine Learning", "Deep Learning"]
113 });
114
115 let memory_id = llm_brain.add_memory(long_text.to_owned(), metadata).await?;
116 println!("Added long text memory: ID={memory_id}");
117
118 // Wait for embedding processing to complete
119 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
120
121 // Query related content
122 println!("\nQuerying memories about 'large language models':");
123 let results = llm_brain
124 .recall("large language models and vector embeddings", 1)
125 .await?;
126 for (i, (memory, score)) in results.iter().enumerate() {
127 println!("\nResult {}: Similarity {:.4}", i + 1, score);
128 println!("Content summary: {}", truncate_text(&memory.content, 100));
129 println!(
130 "Metadata: {}",
131 serde_json::to_string_pretty(&memory.metadata)?
132 );
133 }
134
135 println!("\nDemo completed.");
136 Ok(())
137}Sourcepub async fn recall(
&self,
query_text: &str,
top_k: usize,
) -> Result<Vec<(MemoryFragment, f32)>>
pub async fn recall( &self, query_text: &str, top_k: usize, ) -> Result<Vec<(MemoryFragment, f32)>>
Retrieves similar memories based on a text query.
This method:
- Generates an embedding for the query text
- Finds memory fragments with similar embeddings
Returns a vec of (MemoryFragment, similarity_score) tuples.
Examples found in repository?
examples/file_rag.rs (line 128)
123async fn run_queries(llm_brain: &LLMBrain, queries: &[&str]) -> Result<()> {
124 println!("\n--- Running Queries ---");
125 for query in queries {
126 println!("\nQuery: {query}");
127 let start_time = Instant::now();
128 match llm_brain.recall(query, 3).await {
129 // Retrieve top 3 relevant chunks
130 Ok(results) => {
131 println!(
132 "Found {} relevant chunks in {:?}:",
133 results.len(),
134 start_time.elapsed()
135 );
136 if results.is_empty() {
137 println!(" (No relevant chunks found)");
138 } else {
139 for (fragment, score) in results {
140 println!(
141 " - Score: {:.4}, Source: {:?}, Chunk: {:?}\n Content: {:.150}...",
142 score,
143 fragment.metadata.get("source_file"),
144 fragment.metadata.get("chunk_number"),
145 fragment.content.replace('\n', " ") // Basic formatting
146 );
147 }
148 }
149 }
150 Err(e) => {
151 eprintln!(" Error during recall: {e}");
152 }
153 }
154 }
155 Ok(())
156}More examples
examples/llm_brain_rag.rs (line 47)
42async fn run_rag_queries(llm_brain: &LLMBrain, queries: &[&str]) -> Result<()> {
43 println!("\n--- Running Simulated RAG Queries (Retrieval Only) ---");
44 for query in queries {
45 println!("\nQuery: {query}");
46 let start_time = Instant::now();
47 match llm_brain.recall(query, 3).await {
48 // Retrieve top 3 relevant fragments
49 Ok(results) => {
50 println!(
51 "Retrieved {} relevant fragments in {:?}:",
52 results.len(),
53 start_time.elapsed()
54 );
55 if results.is_empty() {
56 println!(" (No relevant fragments found for RAG context)");
57 } else {
58 println!(" --- Potential RAG Context: ---");
59 for (fragment, score) in results {
60 println!(
61 " - Score: {:.4}, Entity: {:?}\n Content: {:.150}...",
62 score,
63 fragment.metadata.get("entity_name"), // Display name if available
64 fragment.content.replace('\n', " ")
65 );
66 }
67 println!(" --- End Context ---");
68 println!(
69 " (Note: In a full RAG system, these fragments would be passed to an LLM with the query to generate a final answer.)"
70 );
71 }
72 }
73 Err(e) => {
74 eprintln!(" Error during recall: {e}");
75 }
76 }
77 }
78 Ok(())
79}examples/comprehensive_demo.rs (line 112)
106async fn demonstrate_memory_recall(llm_brain: &LLMBrain) -> Result<()> {
107 println!("\n=== Demonstrating Memory Recall ===");
108
109 // Query for Tesla car
110 println!("\nQuerying for Tesla Model 3 information:");
111 let tesla_query = "What do I know about my Tesla Model 3?";
112 let tesla_results = llm_brain.recall(tesla_query, 3).await?;
113
114 for (i, (memory, score)) in tesla_results.iter().enumerate() {
115 println!("\nResult {}: Score {:.4}", i + 1, score);
116 println!("Content: {}", memory.content);
117 println!(
118 "Metadata: {}",
119 serde_json::to_string_pretty(&memory.metadata)?
120 );
121 }
122
123 // Query for car purchase experience
124 println!("\nQuerying for car purchase experience:");
125 let purchase_query = "When did I buy my Tesla?";
126 let purchase_results = llm_brain.recall(purchase_query, 2).await?;
127
128 for (i, (memory, score)) in purchase_results.iter().enumerate() {
129 println!("\nResult {}: Score {:.4}", i + 1, score);
130 println!("Content: {}", memory.content);
131 println!(
132 "Metadata: {}",
133 serde_json::to_string_pretty(&memory.metadata)?
134 );
135 }
136
137 // Query for charging instructions
138 println!("\nQuerying for charging instructions:");
139 let charging_query = "How do I charge my car?";
140 let charging_results = llm_brain.recall(charging_query, 2).await?;
141
142 for (i, (memory, score)) in charging_results.iter().enumerate() {
143 println!("\nResult {}: Score {:.4}", i + 1, score);
144 println!("Content: {}", memory.content);
145 println!(
146 "Metadata: {}",
147 serde_json::to_string_pretty(&memory.metadata)?
148 );
149 }
150
151 Ok(())
152}
153
154/// Demonstrate adding a memory and then finding it by similarity
155async fn demonstrate_memory_lookup(llm_brain: &LLMBrain) -> Result<()> {
156 println!("\n=== Demonstrating Memory Lookup ===");
157
158 // Add a new memory about a trip
159 println!("\nAdding memory about a road trip...");
160 let trip_data = json!({
161 "name": "Road_Trip_Tahoe",
162 "column": "Episodic",
163 "timestamp": Utc::now().to_rfc3339(),
164 "properties": {
165 "action": "Road trip to Lake Tahoe",
166 "location": "Lake Tahoe",
167 "participants": ["Me", "Family"],
168 "duration": "3 days",
169 "transportation": "Tesla Model 3",
170 "highlights": ["Beautiful scenery", "Hiking", "Swimming"]
171 }
172 });
173
174 let trip_content = "Last weekend, we took our Tesla Model 3 on a road trip to Lake Tahoe. \
175 The drive was smooth and we enjoyed the beautiful scenery. \
176 We spent 3 days there with the family, hiking and swimming in the lake. \
177 The car performed excellently on the mountain roads.";
178
179 let trip_id = llm_brain
180 .add_memory(trip_content.to_owned(), trip_data)
181 .await?;
182 println!("Added trip memory with ID: {trip_id}");
183
184 // Allow time for embeddings to be processed
185 sleep(Duration::from_millis(200)).await;
186
187 // Now try to find this memory with various related queries
188 let queries = [
189 "Did we go on any trips with our Tesla?",
190 "What activities did we do at Lake Tahoe?",
191 "How did the Tesla perform on our road trip?",
192 ];
193
194 for (i, query) in queries.iter().enumerate() {
195 println!("\nQuery {}: \"{}\"", i + 1, query);
196 let results = llm_brain.recall(query, 1).await?;
197
198 if !results.is_empty() {
199 let (memory, score) = &results[0];
200 println!("Found memory (score: {score:.4}):");
201 println!("Content: {}", memory.content);
202 } else {
203 println!("No relevant memories found.");
204 }
205 }
206
207 // Direct lookup by ID
208 println!("\nDirect lookup by ID:");
209 if let Some(memory) = llm_brain
210 .get_memory_by_id_string(&trip_id.to_string())
211 .await?
212 {
213 println!("Successfully retrieved memory");
214 println!("Content: {}", memory.content);
215 } else {
216 println!("Failed to retrieve memory by ID");
217 }
218
219 Ok(())
220}examples/basic_demo.rs (line 87)
10async fn main() -> Result<()> {
11 println!("Initializing LLMBrain for basic demo...");
12
13 // Ensure config exists before launching
14 ensure_config_exists()?;
15
16 // Initialize LLMBrain
17 let llm_brain = LLMBrain::launch().await?;
18 println!("LLMBrain initialized.");
19
20 // Add a semantic memory
21 println!("\nAdding semantic memory...");
22 let cat_data = json!({
23 "name": "Cat",
24 "column": "Semantic",
25 "properties": {
26 "type": "Animal",
27 "features": ["fur", "whiskers", "tail"],
28 "diet": "carnivore"
29 },
30 "relationships": {
31 "preys_on": ["mice", "birds"],
32 "related_to": ["tiger", "lion"]
33 }
34 });
35
36 let cat_content = "The cat is a small carnivorous mammal with fur, whiskers, and a tail. It preys on mice and birds, and is related to tigers and lions.";
37 let result = llm_brain
38 .add_memory(cat_content.to_owned(), cat_data)
39 .await?;
40 println!("Semantic memory added with ID: {result}");
41
42 // Add an episodic memory
43 println!("\nAdding episodic memory...");
44 let event_data = json!({
45 "name": "First Pet",
46 "column": "Episodic",
47 "properties": {
48 "timestamp": Utc::now().to_rfc3339(),
49 "action": "Got my first cat",
50 "location": "Pet Store",
51 "emotion": "happy",
52 "participants": ["family", "pet store staff"]
53 }
54 });
55
56 let event_content = "Today I went to the pet store with my family and got my first cat. Everyone was very happy.";
57 let result = llm_brain
58 .add_memory(event_content.to_owned(), event_data)
59 .await?;
60 println!("Episodic memory added with ID: {result}");
61
62 // Add a procedural memory
63 println!("\nAdding procedural memory...");
64 let procedure_data = json!({
65 "name": "Feed Cat",
66 "column": "Procedural",
67 "properties": {
68 "steps": [
69 "Get cat food from cabinet",
70 "Fill bowl with appropriate amount",
71 "Add fresh water to water bowl",
72 "Call cat for feeding"
73 ],
74 "frequency": "twice daily",
75 "importance": "high"
76 }
77 });
78
79 let procedure_content = "To feed a cat: First, get cat food from the cabinet. Then fill the bowl with an appropriate amount. Add fresh water to the water bowl. Finally, call the cat for feeding. This should be done twice daily.";
80 let result = llm_brain
81 .add_memory(procedure_content.to_owned(), procedure_data)
82 .await?;
83 println!("Procedural memory added with ID: {result}");
84
85 // Query memories
86 println!("\nQuerying cat-related memories...");
87 let cat_results = llm_brain.recall("cat animal features", 3).await?;
88 println!("Cat-related memories:");
89 for (memory, score) in cat_results {
90 println!("Score: {:.4}, Content: {}", score, memory.content);
91 println!(
92 "Metadata: {}",
93 serde_json::to_string_pretty(&memory.metadata)?
94 );
95 println!();
96 }
97
98 println!("\nQuerying feeding procedure...");
99 let feeding_results = llm_brain.recall("how to feed a cat", 2).await?;
100 println!("Feeding procedure results:");
101 for (memory, score) in feeding_results {
102 println!("Score: {:.4}, Content: {}", score, memory.content);
103 println!(
104 "Metadata: {}",
105 serde_json::to_string_pretty(&memory.metadata)?
106 );
107 println!();
108 }
109
110 println!("\nDemo completed.");
111 println!("Note: Database file created at ./llm_brain_example_db (if it didn't exist).");
112
113 Ok(())
114}examples/llm_brain_basic_demo.rs (line 107)
43async fn main() -> Result<()> {
44 println!("\nStarting LLMBrain Basic Demo (Rust Version)...");
45
46 // Ensure config exists
47 ensure_config_exists()?;
48
49 // Initialize LLMBrain (handles DB connection based on config)
50 let llm_brain = LLMBrain::launch().await?;
51 println!("LLMBrain initialized.");
52
53 // --- Add Semantic Memory (Cat) ---
54 println!("\nAdding basic semantic memory for 'cat'...");
55 let cat_content = "A cat is a medium-sized, furry, agile, carnivorous animal often found in homes or outdoors. Common behaviors include hunting, sleeping, and grooming.".to_owned();
56 let cat_metadata = json!({
57 "entity_name": "cat",
58 "memory_type": "Semantic",
59 "properties": {
60 "type": "animal",
61 "size": "medium",
62 "characteristics": ["furry", "agile", "carnivorous"]
63 },
64 "relationships": {
65 "habitat": ["homes", "outdoors"],
66 "behavior": ["hunting", "sleeping", "grooming"]
67 }
68 });
69 match llm_brain
70 .add_memory(cat_content.clone(), cat_metadata.clone())
71 .await
72 {
73 Ok(id) => println!("Semantic memory added successfully. ID: {id}"),
74 Err(e) => println!("Failed to add semantic memory: {e}"),
75 }
76
77 // --- Add Episodic Memory (Cat Observation) ---
78 println!("\nAdding episodic memory...");
79 let episode_content = format!(
80 "Timestamp: {}. Observed cat behavior in the Garden: Cat was chasing a butterfly. Observed by human.",
81 Utc::now().to_rfc3339()
82 );
83 let episode_metadata = json!({
84 "entity_name": "cat_observation",
85 "memory_type": "Episodic",
86 "properties": {
87 "action": "Observed cat behavior",
88 "location": "Garden",
89 "details": "Cat was chasing a butterfly"
90 },
91 "relationships": {
92 "relates_to": ["cat"],
93 "observed_by": ["human"]
94 }
95 });
96 match llm_brain
97 .add_memory(episode_content.clone(), episode_metadata.clone())
98 .await
99 {
100 Ok(id) => println!("Episodic memory added successfully. ID: {id}"),
101 Err(e) => println!("Failed to add episodic memory: {e}"),
102 }
103
104 // --- Query and Display Results ---
105 println!("\nQuerying semantic memory for 'cat':");
106 let cat_query = "information about cats";
107 match llm_brain.recall(cat_query, 2).await {
108 // Recall top 2
109 Ok(results) => {
110 println!("Found {} memories:", results.len());
111 for (fragment, score) in results {
112 println!(
113 "- Score: {:.4}, Content: {:.80}...",
114 score, fragment.content
115 );
116 println!(
117 " Metadata: {}",
118 serde_json::to_string_pretty(&fragment.metadata)?
119 );
120 }
121 }
122 Err(e) => println!("Failed to recall memories for '{cat_query}': {e}"),
123 }
124
125 println!("\nQuerying episodic memory:");
126 let episode_query = "cat observation in garden";
127 match llm_brain.recall(episode_query, 1).await {
128 Ok(results) => {
129 println!("Found {} memories:", results.len());
130 for (fragment, score) in results {
131 println!(
132 "- Score: {:.4}, Content: {:.80}...",
133 score, fragment.content
134 );
135 println!(
136 " Metadata: {}",
137 serde_json::to_string_pretty(&fragment.metadata)?
138 );
139 }
140 }
141 Err(e) => println!("Failed to recall memories for '{episode_query}': {e}"),
142 }
143
144 println!("\nDemo completed successfully!");
145 println!("Note: Database file created at ./llm_brain_basic_demo_db (if it didn't exist).");
146
147 Ok(())
148}examples/embedding_demo.rs (line 62)
6async fn main() -> Result<()> {
7 // Initialize LLMBrain instance
8 println!("Initializing LLMBrain...");
9 let llm_brain = LLMBrain::launch().await?;
10
11 // Demo 1: Basic embedding generation and query
12 println!("\n--- Basic Embedding Demo ---");
13
14 // Add several memories
15 let memories = vec![
16 (
17 "Cats are small mammals with fur, whiskers, and tails. They eat mice and birds, and are related to tigers and lions.",
18 json!({
19 "name": "Cat",
20 "type": "Animal",
21 "properties": {
22 "characteristics": ["fur", "whiskers", "tail"],
23 "diet": "carnivorous"
24 }
25 }),
26 ),
27 (
28 "Dogs are one of the earliest domesticated animals, loyal companions to humans, with various breeds and purposes.",
29 json!({
30 "name": "Dog",
31 "type": "Animal",
32 "properties": {
33 "characteristics": ["fur", "loyalty", "keen sense of smell"],
34 "uses": ["pet", "working dog", "guide dog"]
35 }
36 }),
37 ),
38 (
39 "Birds are warm-blooded vertebrates, covered with feathers, with forelimbs evolved into wings, and most can fly.",
40 json!({
41 "name": "Bird",
42 "type": "Animal",
43 "properties": {
44 "characteristics": ["feathers", "wings", "beak"],
45 "abilities": ["flying", "nest building", "singing"]
46 }
47 }),
48 ),
49 ];
50
51 // Add memories to database
52 for (content, metadata) in memories {
53 let memory_id = llm_brain.add_memory(content.to_owned(), metadata).await?;
54 println!("Added memory: ID={memory_id}");
55 }
56
57 // Wait for embedding processing to complete
58 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
59
60 // Query related memories using semantic search
61 println!("\nQuerying memories about 'pets':");
62 let results = llm_brain.recall("pets and human relationships", 2).await?;
63 for (i, (memory, score)) in results.iter().enumerate() {
64 println!("\nResult {}: Similarity {:.4}", i + 1, score);
65 println!("Content: {}", memory.content);
66 println!(
67 "Metadata: {}",
68 serde_json::to_string_pretty(&memory.metadata)?
69 );
70 }
71
72 // Demo 2: Long text processing
73 println!("\n--- Long Text Processing Demo ---");
74
75 // Prepare a longer text
76 let long_text = r#"
77 Artificial Intelligence (AI) is a branch of computer science aimed at creating systems capable of simulating human intelligence behaviors.
78 AI research includes multiple sub-fields such as machine learning, deep learning, natural language processing, computer vision, expert systems, etc.
79 Machine learning is one of the core technologies of AI, using statistical techniques to enable computer systems to "learn" (i.e., progressively improve performance) without explicit programming.
80 Deep learning is a subset of machine learning that uses multi-layered neural networks to process data. These networks extract features from data, with each layer building on the output of the previous one.
81 Large Language Models (LLMs) are a significant breakthrough in AI in recent years, generating human-like text, understanding context, and performing various language tasks by learning from vast amounts of text data.
82 Vector embeddings are techniques for representing text, images, or other data as points in a multi-dimensional vector space, playing an important role in information retrieval, recommendation systems, and semantic search.
83 "#;
84
85 println!("Long text content:\n{long_text}");
86
87 // Process long text using different strategies
88 println!("\nUsing default strategy (no chunking):");
89 let embedding1 = llm_brain.process_long_text(long_text, None).await?;
90 println!("Generated embedding vector length: {}", embedding1.len());
91
92 println!("\nUsing chunk and average strategy:");
93 let chunk_strategy = ChunkingStrategy::ChunkAndAverage {
94 chunk_size: 100,
95 chunk_overlap: 20,
96 };
97 let embedding2 = llm_brain
98 .process_long_text(long_text, Some(chunk_strategy))
99 .await?;
100 println!("Generated embedding vector length: {}", embedding2.len());
101
102 // Calculate similarity between embeddings generated by different strategies
103 let similarity = cosine_similarity(&embedding1, &embedding2);
104 println!("\nCosine similarity between embeddings from two strategies: {similarity:.4}");
105
106 // Demo 3: Adding long text to memory
107 println!("\n--- Adding Long Text Memory Demo ---");
108
109 let metadata = json!({
110 "name": "Artificial Intelligence Overview",
111 "type": "Knowledge",
112 "tags": ["AI", "Machine Learning", "Deep Learning"]
113 });
114
115 let memory_id = llm_brain.add_memory(long_text.to_owned(), metadata).await?;
116 println!("Added long text memory: ID={memory_id}");
117
118 // Wait for embedding processing to complete
119 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
120
121 // Query related content
122 println!("\nQuerying memories about 'large language models':");
123 let results = llm_brain
124 .recall("large language models and vector embeddings", 1)
125 .await?;
126 for (i, (memory, score)) in results.iter().enumerate() {
127 println!("\nResult {}: Similarity {:.4}", i + 1, score);
128 println!("Content summary: {}", truncate_text(&memory.content, 100));
129 println!(
130 "Metadata: {}",
131 serde_json::to_string_pretty(&memory.metadata)?
132 );
133 }
134
135 println!("\nDemo completed.");
136 Ok(())
137}Sourcepub async fn process_long_text(
&self,
text: &str,
strategy: Option<ChunkingStrategy>,
) -> Result<Vec<f32>>
pub async fn process_long_text( &self, text: &str, strategy: Option<ChunkingStrategy>, ) -> Result<Vec<f32>>
Process long text using appropriate chunking strategy
Examples found in repository?
examples/embedding_demo.rs (line 89)
6async fn main() -> Result<()> {
7 // Initialize LLMBrain instance
8 println!("Initializing LLMBrain...");
9 let llm_brain = LLMBrain::launch().await?;
10
11 // Demo 1: Basic embedding generation and query
12 println!("\n--- Basic Embedding Demo ---");
13
14 // Add several memories
15 let memories = vec![
16 (
17 "Cats are small mammals with fur, whiskers, and tails. They eat mice and birds, and are related to tigers and lions.",
18 json!({
19 "name": "Cat",
20 "type": "Animal",
21 "properties": {
22 "characteristics": ["fur", "whiskers", "tail"],
23 "diet": "carnivorous"
24 }
25 }),
26 ),
27 (
28 "Dogs are one of the earliest domesticated animals, loyal companions to humans, with various breeds and purposes.",
29 json!({
30 "name": "Dog",
31 "type": "Animal",
32 "properties": {
33 "characteristics": ["fur", "loyalty", "keen sense of smell"],
34 "uses": ["pet", "working dog", "guide dog"]
35 }
36 }),
37 ),
38 (
39 "Birds are warm-blooded vertebrates, covered with feathers, with forelimbs evolved into wings, and most can fly.",
40 json!({
41 "name": "Bird",
42 "type": "Animal",
43 "properties": {
44 "characteristics": ["feathers", "wings", "beak"],
45 "abilities": ["flying", "nest building", "singing"]
46 }
47 }),
48 ),
49 ];
50
51 // Add memories to database
52 for (content, metadata) in memories {
53 let memory_id = llm_brain.add_memory(content.to_owned(), metadata).await?;
54 println!("Added memory: ID={memory_id}");
55 }
56
57 // Wait for embedding processing to complete
58 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
59
60 // Query related memories using semantic search
61 println!("\nQuerying memories about 'pets':");
62 let results = llm_brain.recall("pets and human relationships", 2).await?;
63 for (i, (memory, score)) in results.iter().enumerate() {
64 println!("\nResult {}: Similarity {:.4}", i + 1, score);
65 println!("Content: {}", memory.content);
66 println!(
67 "Metadata: {}",
68 serde_json::to_string_pretty(&memory.metadata)?
69 );
70 }
71
72 // Demo 2: Long text processing
73 println!("\n--- Long Text Processing Demo ---");
74
75 // Prepare a longer text
76 let long_text = r#"
77 Artificial Intelligence (AI) is a branch of computer science aimed at creating systems capable of simulating human intelligence behaviors.
78 AI research includes multiple sub-fields such as machine learning, deep learning, natural language processing, computer vision, expert systems, etc.
79 Machine learning is one of the core technologies of AI, using statistical techniques to enable computer systems to "learn" (i.e., progressively improve performance) without explicit programming.
80 Deep learning is a subset of machine learning that uses multi-layered neural networks to process data. These networks extract features from data, with each layer building on the output of the previous one.
81 Large Language Models (LLMs) are a significant breakthrough in AI in recent years, generating human-like text, understanding context, and performing various language tasks by learning from vast amounts of text data.
82 Vector embeddings are techniques for representing text, images, or other data as points in a multi-dimensional vector space, playing an important role in information retrieval, recommendation systems, and semantic search.
83 "#;
84
85 println!("Long text content:\n{long_text}");
86
87 // Process long text using different strategies
88 println!("\nUsing default strategy (no chunking):");
89 let embedding1 = llm_brain.process_long_text(long_text, None).await?;
90 println!("Generated embedding vector length: {}", embedding1.len());
91
92 println!("\nUsing chunk and average strategy:");
93 let chunk_strategy = ChunkingStrategy::ChunkAndAverage {
94 chunk_size: 100,
95 chunk_overlap: 20,
96 };
97 let embedding2 = llm_brain
98 .process_long_text(long_text, Some(chunk_strategy))
99 .await?;
100 println!("Generated embedding vector length: {}", embedding2.len());
101
102 // Calculate similarity between embeddings generated by different strategies
103 let similarity = cosine_similarity(&embedding1, &embedding2);
104 println!("\nCosine similarity between embeddings from two strategies: {similarity:.4}");
105
106 // Demo 3: Adding long text to memory
107 println!("\n--- Adding Long Text Memory Demo ---");
108
109 let metadata = json!({
110 "name": "Artificial Intelligence Overview",
111 "type": "Knowledge",
112 "tags": ["AI", "Machine Learning", "Deep Learning"]
113 });
114
115 let memory_id = llm_brain.add_memory(long_text.to_owned(), metadata).await?;
116 println!("Added long text memory: ID={memory_id}");
117
118 // Wait for embedding processing to complete
119 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
120
121 // Query related content
122 println!("\nQuerying memories about 'large language models':");
123 let results = llm_brain
124 .recall("large language models and vector embeddings", 1)
125 .await?;
126 for (i, (memory, score)) in results.iter().enumerate() {
127 println!("\nResult {}: Similarity {:.4}", i + 1, score);
128 println!("Content summary: {}", truncate_text(&memory.content, 100));
129 println!(
130 "Metadata: {}",
131 serde_json::to_string_pretty(&memory.metadata)?
132 );
133 }
134
135 println!("\nDemo completed.");
136 Ok(())
137}Sourcepub async fn get_memory_by_id_string(
&self,
id_string: &str,
) -> Result<Option<MemoryFragment>>
pub async fn get_memory_by_id_string( &self, id_string: &str, ) -> Result<Option<MemoryFragment>>
Retrieves a specific memory by its SurrealDB ID string.
Returns None if no memory with the given ID exists.
Examples found in repository?
examples/comprehensive_demo.rs (line 210)
155async fn demonstrate_memory_lookup(llm_brain: &LLMBrain) -> Result<()> {
156 println!("\n=== Demonstrating Memory Lookup ===");
157
158 // Add a new memory about a trip
159 println!("\nAdding memory about a road trip...");
160 let trip_data = json!({
161 "name": "Road_Trip_Tahoe",
162 "column": "Episodic",
163 "timestamp": Utc::now().to_rfc3339(),
164 "properties": {
165 "action": "Road trip to Lake Tahoe",
166 "location": "Lake Tahoe",
167 "participants": ["Me", "Family"],
168 "duration": "3 days",
169 "transportation": "Tesla Model 3",
170 "highlights": ["Beautiful scenery", "Hiking", "Swimming"]
171 }
172 });
173
174 let trip_content = "Last weekend, we took our Tesla Model 3 on a road trip to Lake Tahoe. \
175 The drive was smooth and we enjoyed the beautiful scenery. \
176 We spent 3 days there with the family, hiking and swimming in the lake. \
177 The car performed excellently on the mountain roads.";
178
179 let trip_id = llm_brain
180 .add_memory(trip_content.to_owned(), trip_data)
181 .await?;
182 println!("Added trip memory with ID: {trip_id}");
183
184 // Allow time for embeddings to be processed
185 sleep(Duration::from_millis(200)).await;
186
187 // Now try to find this memory with various related queries
188 let queries = [
189 "Did we go on any trips with our Tesla?",
190 "What activities did we do at Lake Tahoe?",
191 "How did the Tesla perform on our road trip?",
192 ];
193
194 for (i, query) in queries.iter().enumerate() {
195 println!("\nQuery {}: \"{}\"", i + 1, query);
196 let results = llm_brain.recall(query, 1).await?;
197
198 if !results.is_empty() {
199 let (memory, score) = &results[0];
200 println!("Found memory (score: {score:.4}):");
201 println!("Content: {}", memory.content);
202 } else {
203 println!("No relevant memories found.");
204 }
205 }
206
207 // Direct lookup by ID
208 println!("\nDirect lookup by ID:");
209 if let Some(memory) = llm_brain
210 .get_memory_by_id_string(&trip_id.to_string())
211 .await?
212 {
213 println!("Successfully retrieved memory");
214 println!("Content: {}", memory.content);
215 } else {
216 println!("Failed to retrieve memory by ID");
217 }
218
219 Ok(())
220}Trait Implementations§
Auto Trait Implementations§
impl Freeze for LLMBrain
impl !RefUnwindSafe for LLMBrain
impl Send for LLMBrain
impl Sync for LLMBrain
impl Unpin for LLMBrain
impl !UnwindSafe for LLMBrain
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more