Skip to main content

codetether_agent/rlm/
mod.rs

1//! Recursive Language Model (RLM) processing
2//!
3//! Handles large contexts that exceed model context windows by:
4//! 1. Loading context into a REPL environment as a variable
5//! 2. Having the LLM write code to analyze it
6//! 3. Supporting recursive sub-LM calls for semantic analysis
7//!
8//! Based on "Recursive Language Models" (Zhang et al. 2025)
9
10pub mod chunker;
11pub mod repl;
12pub mod router;
13
14pub use chunker::{Chunk, ChunkOptions, ContentType, RlmChunker};
15pub use repl::{ReplRuntime, RlmAnalysisResult, RlmExecutor, RlmRepl, SubQuery};
16pub use router::{RlmRouter, RoutingContext, RoutingResult};
17
18use serde::{Deserialize, Serialize};
19
20/// RLM processing statistics
21#[derive(Debug, Clone, Default, Serialize, Deserialize)]
22pub struct RlmStats {
23    pub input_tokens: usize,
24    pub output_tokens: usize,
25    pub iterations: usize,
26    pub subcalls: usize,
27    pub elapsed_ms: u64,
28    pub compression_ratio: f64,
29}
30
31/// RLM processing result
32#[derive(Debug, Clone, Serialize, Deserialize)]
33pub struct RlmResult {
34    pub processed: String,
35    pub stats: RlmStats,
36    pub success: bool,
37    pub error: Option<String>,
38}
39
40/// RLM configuration
41#[derive(Debug, Clone, Serialize, Deserialize)]
42pub struct RlmConfig {
43    /// Mode: "auto", "off", or "always"
44    #[serde(default = "default_mode")]
45    pub mode: String,
46
47    /// Threshold ratio of context window to trigger RLM (0.0-1.0)
48    #[serde(default = "default_threshold")]
49    pub threshold: f64,
50
51    /// Maximum iterations for RLM processing
52    #[serde(default = "default_max_iterations")]
53    pub max_iterations: usize,
54
55    /// Maximum recursive sub-calls
56    #[serde(default = "default_max_subcalls")]
57    pub max_subcalls: usize,
58
59    /// Preferred runtime: "rust", "bun", or "python"
60    #[serde(default = "default_runtime")]
61    pub runtime: String,
62
63    /// Model reference for root processing (provider:model)
64    pub root_model: Option<String>,
65
66    /// Model reference for subcalls (provider:model)
67    pub subcall_model: Option<String>,
68}
69
70fn default_mode() -> String {
71    "auto".to_string()
72}
73
74fn default_threshold() -> f64 {
75    0.35
76}
77
78fn default_max_iterations() -> usize {
79    15
80}
81
82fn default_max_subcalls() -> usize {
83    50
84}
85
86fn default_runtime() -> String {
87    "rust".to_string()
88}
89
90impl Default for RlmConfig {
91    fn default() -> Self {
92        Self {
93            mode: default_mode(),
94            threshold: default_threshold(),
95            max_iterations: default_max_iterations(),
96            max_subcalls: default_max_subcalls(),
97            runtime: default_runtime(),
98            root_model: None,
99            subcall_model: None,
100        }
101    }
102}