1pub mod error;
34pub mod streaming;
35pub mod caching;
36pub mod parallel;
37pub mod adaptive;
38pub mod signatures;
39pub mod profiling;
40pub mod memory;
41pub mod metrics;
42
43pub mod positioning;
45
46pub mod engine;
48
49pub mod selector;
51
52pub use engine::{ScalingEngine, ScalingConfig, ProcessingResult};
54pub use selector::{ScalingSelector, ScalingSelectionConfig, ScalingSelectionResult, SelectionAlgorithm};
55pub use positioning::{ContextPositioner, ContextPositioningConfig, PositionedSelection, ContextPositioning};
56pub use streaming::{StreamingConfig, FileMetadata, FileChunk};
57pub use caching::CacheConfig;
58pub use parallel::ParallelConfig;
59pub use adaptive::AdaptiveConfig;
60pub use signatures::{SignatureLevel, SignatureConfig};
61pub use profiling::{RepositoryProfiler, RepositoryProfile, RepositoryType};
62pub use memory::{MemoryConfig, MemoryStats};
63pub use metrics::{ScalingMetrics, BenchmarkResult};
64
65pub use error::{ScalingError, ScalingResult};
67
68pub const VERSION: &str = env!("CARGO_PKG_VERSION");
70
71pub fn default_scaling_config() -> ScalingConfig {
73 ScalingConfig::default()
74}
75
76pub async fn create_scaling_engine<P: AsRef<std::path::Path>>(
78 repo_path: P,
79) -> ScalingResult<ScalingEngine> {
80 let profiler = RepositoryProfiler::new();
81 let profile = profiler.profile_repository(repo_path.as_ref()).await?;
82 let config = profile.to_scaling_config();
83
84 Ok(ScalingEngine::with_config(config))
85}
86
87pub async fn quick_scale_estimate<P: AsRef<std::path::Path>>(
89 repo_path: P,
90) -> ScalingResult<(usize, std::time::Duration, usize)> {
91 let profiler = RepositoryProfiler::new();
92 let (file_count, estimated_duration, memory_usage) = profiler.quick_estimate(repo_path.as_ref()).await?;
93 Ok((file_count, estimated_duration, memory_usage))
94}
95
96#[cfg(test)]
97mod tests {
98 use super::*;
99 use tempfile::TempDir;
100 use std::fs;
101
102 #[tokio::test]
103 async fn test_scaling_engine_creation() {
104 let temp_dir = TempDir::new().unwrap();
105
106 fs::write(temp_dir.path().join("test.rs"), "fn main() {}").unwrap();
108 fs::write(temp_dir.path().join("lib.rs"), "pub fn test() {}").unwrap();
109
110 let engine = create_scaling_engine(temp_dir.path()).await.unwrap();
111 assert!(engine.is_ready());
112 }
113
114 #[tokio::test]
115 async fn test_quick_scale_estimate() {
116 let temp_dir = TempDir::new().unwrap();
117
118 for i in 0..10 {
120 fs::write(temp_dir.path().join(format!("file_{}.rs", i)), "// test file").unwrap();
121 }
122
123 let (file_count, duration, memory) = quick_scale_estimate(temp_dir.path()).await.unwrap();
124 assert!(file_count >= 10);
125 assert!(duration.as_millis() > 0);
126 assert!(memory > 0);
127 }
128
129 #[test]
130 fn test_default_config() {
131 let config = default_scaling_config();
132 assert!(config.streaming.chunk_size > 0);
133 assert!(config.caching.enable_persistent_cache);
134 assert!(config.parallel.max_concurrent_tasks > 0);
135 }
136}