codetether_agent/tool/
rlm.rs1use super::{Tool, ToolResult};
7use anyhow::Result;
8use async_trait::async_trait;
9use serde_json::{json, Value};
10
11pub struct RlmTool {
14 max_chunk_size: usize,
15}
16
17impl Default for RlmTool {
18 fn default() -> Self {
19 Self::new()
20 }
21}
22
23impl RlmTool {
24 pub fn new() -> Self {
25 Self {
26 max_chunk_size: 8192,
27 }
28 }
29
30 #[allow(dead_code)]
31 pub fn with_chunk_size(max_chunk_size: usize) -> Self {
32 Self { max_chunk_size }
33 }
34}
35
36#[async_trait]
37impl Tool for RlmTool {
38 fn id(&self) -> &str {
39 "rlm"
40 }
41
42 fn name(&self) -> &str {
43 "RLM"
44 }
45
46 fn description(&self) -> &str {
47 "Recursive Language Model for processing large codebases. Use this when you need to analyze files or content that exceeds the context window. RLM chunks the content, processes each chunk, and synthesizes results. Actions: 'analyze' (analyze large content), 'summarize' (summarize large files), 'search' (semantic search across large codebase)."
48 }
49
50 fn parameters(&self) -> Value {
51 json!({
52 "type": "object",
53 "properties": {
54 "action": {
55 "type": "string",
56 "description": "Action: 'analyze' (deep analysis), 'summarize' (generate summary), 'search' (semantic search)",
57 "enum": ["analyze", "summarize", "search"]
58 },
59 "query": {
60 "type": "string",
61 "description": "The question or query to answer (for analyze/search)"
62 },
63 "paths": {
64 "type": "array",
65 "items": {"type": "string"},
66 "description": "File or directory paths to process"
67 },
68 "content": {
69 "type": "string",
70 "description": "Direct content to analyze (alternative to paths)"
71 },
72 "max_depth": {
73 "type": "integer",
74 "description": "Maximum recursion depth (default: 3)",
75 "default": 3
76 }
77 },
78 "required": ["action"]
79 })
80 }
81
82 async fn execute(&self, args: Value) -> Result<ToolResult> {
83 let action = args["action"]
84 .as_str()
85 .ok_or_else(|| anyhow::anyhow!("action is required"))?;
86
87 let query = args["query"].as_str().unwrap_or("");
88 let paths: Vec<&str> = args["paths"]
89 .as_array()
90 .map(|arr| arr.iter().filter_map(|v| v.as_str()).collect())
91 .unwrap_or_default();
92 let content = args["content"].as_str();
93 let max_depth = args["max_depth"].as_u64().unwrap_or(3) as usize;
94
95 match action {
96 "analyze" => {
97 if query.is_empty() {
98 return Ok(ToolResult::error("query is required for 'analyze' action"));
99 }
100
101 let all_content = if let Some(c) = content {
103 c.to_string()
104 } else if !paths.is_empty() {
105 let mut collected = String::new();
106 for path in &paths {
107 match tokio::fs::read_to_string(path).await {
108 Ok(c) => {
109 collected.push_str(&format!("=== {} ===\n{}\n\n", path, c));
110 }
111 Err(e) => {
112 collected.push_str(&format!("=== {} (error: {}) ===\n\n", path, e));
113 }
114 }
115 }
116 collected
117 } else {
118 return Ok(ToolResult::error("Either 'paths' or 'content' is required"));
119 };
120
121 let chunks = self.chunk_content(&all_content);
124 let output = format!(
125 "RLM Analysis\n\
126 Query: {}\n\
127 Paths: {:?}\n\
128 Content size: {} bytes\n\
129 Chunks: {}\n\
130 Max depth: {}\n\n\
131 [Full RLM processing would analyze each chunk and synthesize results]\n\n\
132 Content preview (first chunk):\n{}",
133 query,
134 paths,
135 all_content.len(),
136 chunks.len(),
137 max_depth,
138 chunks.first().map(|c| if c.len() > 500 { &c[..500] } else { c }).unwrap_or("")
139 );
140
141 Ok(ToolResult::success(output))
142 }
143 "summarize" => {
144 if paths.is_empty() && content.is_none() {
145 return Ok(ToolResult::error("Either 'paths' or 'content' is required"));
146 }
147
148 let all_content = if let Some(c) = content {
149 c.to_string()
150 } else {
151 let mut collected = String::new();
152 for path in &paths {
153 match tokio::fs::read_to_string(path).await {
154 Ok(c) => collected.push_str(&c),
155 Err(e) => collected.push_str(&format!("[Error reading {}: {}]\n", path, e)),
156 }
157 }
158 collected
159 };
160
161 let chunks = self.chunk_content(&all_content);
162 let output = format!(
163 "RLM Summary\n\
164 Paths: {:?}\n\
165 Content size: {} bytes\n\
166 Chunks: {}\n\n\
167 [Full RLM would summarize each chunk and combine summaries]",
168 paths,
169 all_content.len(),
170 chunks.len()
171 );
172
173 Ok(ToolResult::success(output))
174 }
175 "search" => {
176 if query.is_empty() {
177 return Ok(ToolResult::error("query is required for 'search' action"));
178 }
179
180 let output = format!(
181 "RLM Semantic Search\n\
182 Query: {}\n\
183 Paths: {:?}\n\n\
184 [Full RLM would perform semantic search across chunks]",
185 query, paths
186 );
187
188 Ok(ToolResult::success(output))
189 }
190 _ => Ok(ToolResult::error(format!("Unknown action: {}. Use 'analyze', 'summarize', or 'search'.", action))),
191 }
192 }
193}
194
195impl RlmTool {
196 fn chunk_content(&self, content: &str) -> Vec<String> {
197 let mut chunks = Vec::new();
198 let lines: Vec<&str> = content.lines().collect();
199 let mut current_chunk = String::new();
200
201 for line in lines {
202 if current_chunk.len() + line.len() + 1 > self.max_chunk_size
203 && !current_chunk.is_empty() {
204 chunks.push(current_chunk);
205 current_chunk = String::new();
206 }
207 current_chunk.push_str(line);
208 current_chunk.push('\n');
209 }
210
211 if !current_chunk.is_empty() {
212 chunks.push(current_chunk);
213 }
214
215 chunks
216 }
217}