codetether_agent/tool/
rlm.rs1use super::{Tool, ToolResult};
7use anyhow::Result;
8use async_trait::async_trait;
9use serde_json::{Value, json};
10
11pub struct RlmTool {
14 max_chunk_size: usize,
15}
16
17impl Default for RlmTool {
18 fn default() -> Self {
19 Self::new()
20 }
21}
22
23impl RlmTool {
24 pub fn new() -> Self {
25 Self {
26 max_chunk_size: 8192,
27 }
28 }
29
30 #[allow(dead_code)]
31 pub fn with_chunk_size(max_chunk_size: usize) -> Self {
32 Self { max_chunk_size }
33 }
34}
35
36#[async_trait]
37impl Tool for RlmTool {
38 fn id(&self) -> &str {
39 "rlm"
40 }
41
42 fn name(&self) -> &str {
43 "RLM"
44 }
45
46 fn description(&self) -> &str {
47 "Recursive Language Model for processing large codebases. Use this when you need to analyze files or content that exceeds the context window. RLM chunks the content, processes each chunk, and synthesizes results. Actions: 'analyze' (analyze large content), 'summarize' (summarize large files), 'search' (semantic search across large codebase)."
48 }
49
50 fn parameters(&self) -> Value {
51 json!({
52 "type": "object",
53 "properties": {
54 "action": {
55 "type": "string",
56 "description": "Action: 'analyze' (deep analysis), 'summarize' (generate summary), 'search' (semantic search)",
57 "enum": ["analyze", "summarize", "search"]
58 },
59 "query": {
60 "type": "string",
61 "description": "The question or query to answer (for analyze/search)"
62 },
63 "paths": {
64 "type": "array",
65 "items": {"type": "string"},
66 "description": "File or directory paths to process"
67 },
68 "content": {
69 "type": "string",
70 "description": "Direct content to analyze (alternative to paths)"
71 },
72 "max_depth": {
73 "type": "integer",
74 "description": "Maximum recursion depth (default: 3)",
75 "default": 3
76 }
77 },
78 "required": ["action"]
79 })
80 }
81
82 async fn execute(&self, args: Value) -> Result<ToolResult> {
83 let action = args["action"]
84 .as_str()
85 .ok_or_else(|| anyhow::anyhow!("action is required"))?;
86
87 let query = args["query"].as_str().unwrap_or("");
88 let paths: Vec<&str> = args["paths"]
89 .as_array()
90 .map(|arr| arr.iter().filter_map(|v| v.as_str()).collect())
91 .unwrap_or_default();
92 let content = args["content"].as_str();
93 let max_depth = args["max_depth"].as_u64().unwrap_or(3) as usize;
94
95 match action {
96 "analyze" => {
97 if query.is_empty() {
98 return Ok(ToolResult::error("query is required for 'analyze' action"));
99 }
100
101 let all_content = if let Some(c) = content {
103 c.to_string()
104 } else if !paths.is_empty() {
105 let mut collected = String::new();
106 for path in &paths {
107 match tokio::fs::read_to_string(path).await {
108 Ok(c) => {
109 collected.push_str(&format!("=== {} ===\n{}\n\n", path, c));
110 }
111 Err(e) => {
112 collected.push_str(&format!("=== {} (error: {}) ===\n\n", path, e));
113 }
114 }
115 }
116 collected
117 } else {
118 return Ok(ToolResult::error("Either 'paths' or 'content' is required"));
119 };
120
121 let chunks = self.chunk_content(&all_content);
124 let output = format!(
125 "RLM Analysis\n\
126 Query: {}\n\
127 Paths: {:?}\n\
128 Content size: {} bytes\n\
129 Chunks: {}\n\
130 Max depth: {}\n\n\
131 [Full RLM processing would analyze each chunk and synthesize results]\n\n\
132 Content preview (first chunk):\n{}",
133 query,
134 paths,
135 all_content.len(),
136 chunks.len(),
137 max_depth,
138 chunks
139 .first()
140 .map(|c| if c.len() > 500 { &c[..500] } else { c })
141 .unwrap_or("")
142 );
143
144 Ok(ToolResult::success(output))
145 }
146 "summarize" => {
147 if paths.is_empty() && content.is_none() {
148 return Ok(ToolResult::error("Either 'paths' or 'content' is required"));
149 }
150
151 let all_content = if let Some(c) = content {
152 c.to_string()
153 } else {
154 let mut collected = String::new();
155 for path in &paths {
156 match tokio::fs::read_to_string(path).await {
157 Ok(c) => collected.push_str(&c),
158 Err(e) => {
159 collected.push_str(&format!("[Error reading {}: {}]\n", path, e))
160 }
161 }
162 }
163 collected
164 };
165
166 let chunks = self.chunk_content(&all_content);
167 let output = format!(
168 "RLM Summary\n\
169 Paths: {:?}\n\
170 Content size: {} bytes\n\
171 Chunks: {}\n\n\
172 [Full RLM would summarize each chunk and combine summaries]",
173 paths,
174 all_content.len(),
175 chunks.len()
176 );
177
178 Ok(ToolResult::success(output))
179 }
180 "search" => {
181 if query.is_empty() {
182 return Ok(ToolResult::error("query is required for 'search' action"));
183 }
184
185 let output = format!(
186 "RLM Semantic Search\n\
187 Query: {}\n\
188 Paths: {:?}\n\n\
189 [Full RLM would perform semantic search across chunks]",
190 query, paths
191 );
192
193 Ok(ToolResult::success(output))
194 }
195 _ => Ok(ToolResult::error(format!(
196 "Unknown action: {}. Use 'analyze', 'summarize', or 'search'.",
197 action
198 ))),
199 }
200 }
201}
202
203impl RlmTool {
204 fn chunk_content(&self, content: &str) -> Vec<String> {
205 let mut chunks = Vec::new();
206 let lines: Vec<&str> = content.lines().collect();
207 let mut current_chunk = String::new();
208
209 for line in lines {
210 if current_chunk.len() + line.len() + 1 > self.max_chunk_size
211 && !current_chunk.is_empty()
212 {
213 chunks.push(current_chunk);
214 current_chunk = String::new();
215 }
216 current_chunk.push_str(line);
217 current_chunk.push('\n');
218 }
219
220 if !current_chunk.is_empty() {
221 chunks.push(current_chunk);
222 }
223
224 chunks
225 }
226}