infiniloom_engine/lib.rs
1//! # Infiniloom Engine - Repository Context Generation for LLMs
2//!
3//! `infiniloom_engine` is a high-performance library for generating optimized
4//! repository context for Large Language Models. It transforms codebases into
5//! structured formats optimized for Claude, GPT-4, Gemini, and other LLMs.
6//!
7//! ## Features
8//!
9//! - **AST-based symbol extraction** via Tree-sitter (21 programming languages)
10//! - **PageRank-based importance ranking** for intelligent code prioritization
11//! - **Model-specific output formats** (XML for Claude, Markdown for GPT, YAML for Gemini)
12//! - **Automatic secret detection** and redaction (API keys, credentials, tokens)
13//! - **Accurate token counting** using tiktoken-rs for OpenAI models (~95% accuracy)
14//! - **Full dependency resolution** with transitive dependency analysis
15//! - **Remote Git repository support** (GitHub, GitLab, Bitbucket)
16//! - **Incremental scanning** with content-addressed caching
17//! - **Semantic compression** for intelligent code summarization
18//! - **Token budget enforcement** with smart truncation strategies
19//!
20//! ## Quick Start
21//!
22//! ```rust,ignore
23//! use infiniloom_engine::{Repository, RepoMapGenerator, OutputFormatter, OutputFormat};
24//!
25//! // Create a repository from scanned files
26//! let repo = Repository::new("my-project", "/path/to/project");
27//!
28//! // Generate a repository map with key symbols ranked by importance
29//! let map = RepoMapGenerator::new(2000).generate(&repo);
30//!
31//! // Format for Claude (XML output)
32//! let formatter = OutputFormatter::by_format(OutputFormat::Xml);
33//! let output = formatter.format(&repo, &map);
34//! ```
35//!
36//! ## Output Formats
37//!
38//! Each LLM has an optimal input format:
39//!
40//! | Format | Best For | Notes |
41//! |--------|----------|-------|
42//! | XML | Claude | Optimized structure, CDATA sections |
43//! | Markdown | GPT-4 | Fenced code blocks with syntax highlighting |
44//! | YAML | Gemini | Query at end (Gemini best practice) |
45//! | TOON | All | Token-efficient, 30-40% fewer tokens |
46//! | JSON | APIs | Machine-readable, fully structured |
47//!
48//! ## Token Counting
49//!
50//! The library provides accurate token counts for multiple LLM families:
51//!
52//! ```rust,ignore
53//! use infiniloom_engine::{Tokenizer, TokenModel};
54//!
55//! let tokenizer = Tokenizer::new();
56//! let content = "fn main() { println!(\"Hello\"); }";
57//!
58//! // Exact counts via tiktoken for OpenAI models
59//! let gpt4o_tokens = tokenizer.count(content, TokenModel::Gpt4o);
60//!
61//! // Calibrated estimation for other models
62//! let claude_tokens = tokenizer.count(content, TokenModel::Claude);
63//! ```
64//!
65//! ## Security Scanning
66//!
67//! Automatically detect and redact sensitive information:
68//!
69//! ```rust,ignore
70//! use infiniloom_engine::SecurityScanner;
71//!
72//! let scanner = SecurityScanner::new();
73//! let content = "AWS_KEY=AKIAIOSFODNN7EXAMPLE";
74//!
75//! // Check if content is safe
76//! if !scanner.is_safe(content, "config.env") {
77//! // Redact sensitive content
78//! let redacted = scanner.redact_content(content, "config.env");
79//! }
80//! ```
81//!
82//! ## Feature Flags
83//!
84//! Enable optional functionality:
85//!
86//! - `async` - Async/await support with Tokio
87//! - `embeddings` - Character-frequency similarity (NOT neural - see semantic module docs)
88//! - `watch` - File watching for incremental updates
89//! - `full` - All features enabled
90//!
91//! Note: Git operations use the system `git` CLI via `std::process::Command`.
92//!
93//! ## Module Overview
94//!
95//! | Module | Description |
96//! |--------|-------------|
97//! | [`parser`] | AST-based symbol extraction using Tree-sitter |
98//! | [`repomap`] | PageRank-based symbol importance ranking |
99//! | [`output`] | Model-specific formatters (XML, Markdown, etc.) |
100//! | [`security`] | Secret detection and redaction |
101//! | [`tokenizer`] | Multi-model token counting |
102//! | [`chunking`] | Semantic code chunking |
103//! | [`budget`] | Token budget enforcement |
104//! | [`incremental`] | Caching and incremental scanning |
105//! | [`semantic`] | Heuristic-based compression (char-frequency, NOT neural) |
106//! | [`error`] | Unified error types |
107
108// Core modules
109pub mod chunking;
110pub mod default_ignores;
111pub mod output;
112pub mod parser;
113pub mod ranking;
114pub mod repomap;
115pub mod security;
116pub mod types;
117
118// New modules
119pub mod config;
120pub mod dependencies;
121pub mod git;
122pub mod remote;
123pub mod tokenizer;
124
125// Git context index module
126pub mod index;
127
128// Memory-mapped file scanner for large files
129pub mod mmap_scanner;
130
131// Semantic analysis module (always available, embeddings feature enables neural compression)
132pub mod semantic;
133
134// Smart token budget enforcement
135pub mod budget;
136
137// Incremental scanning and caching
138pub mod incremental;
139
140// Unified error types
141pub mod error;
142
143// Re-exports from core modules
144pub use chunking::{Chunk, ChunkStrategy, Chunker};
145pub use output::{OutputFormat, OutputFormatter};
146pub use parser::{Language, Parser, ParserError};
147pub use ranking::{count_symbol_references, rank_files, sort_files_by_importance, SymbolRanker};
148pub use repomap::{RepoMap, RepoMapGenerator};
149pub use security::SecurityScanner;
150pub use types::*;
151
152// Re-exports from new modules
153pub use budget::{BudgetConfig, BudgetEnforcer, EnforcementResult, TruncationStrategy};
154pub use config::{
155 Config, OutputConfig, PerformanceConfig, ScanConfig, SecurityConfig, SymbolConfig,
156};
157pub use dependencies::{DependencyEdge, DependencyGraph, DependencyNode, ResolvedImport};
158pub use git::{ChangedFile, Commit, FileStatus, GitError, GitRepo};
159pub use incremental::{CacheError, CacheStats, CachedFile, CachedSymbol, RepoCache};
160pub use mmap_scanner::{MappedFile, MmapScanner, ScanStats, ScannedFile, StreamingProcessor};
161pub use remote::{GitProvider, RemoteError, RemoteRepo};
162pub use semantic::{
163 CodeChunk,
164 HeuristicCompressionConfig,
165 // Note: SemanticAnalyzer and CharacterFrequencyAnalyzer are available via semantic:: module
166 // but not re-exported at top level since they're primarily internal implementation details
167 // Honest type aliases - recommended for new code
168 HeuristicCompressor,
169 SemanticCompressor,
170 SemanticConfig,
171 SemanticError,
172};
173/// Backward-compatible alias for TokenCounts
174pub use tokenizer::TokenCounts as AccurateTokenCounts;
175pub use tokenizer::Tokenizer;
176// Note: IncrementalScanner is available via incremental:: module but not re-exported
177// at top level since CLI uses RepoCache directly
178pub use error::{InfiniloomError, Result as InfiniloomResult};
179
180/// Library version
181pub const VERSION: &str = env!("CARGO_PKG_VERSION");
182
183/// Default token budget for repository maps
184pub const DEFAULT_MAP_BUDGET: u32 = 2000;
185
186/// Default chunk size in tokens
187pub const DEFAULT_CHUNK_SIZE: u32 = 8000;
188
189#[cfg(test)]
190mod tests {
191 use super::*;
192
193 #[test]
194 fn test_version() {
195 // Verify version follows semver format (at least has a number)
196 assert!(VERSION.chars().any(|c| c.is_ascii_digit()));
197 }
198}