1use serde::{Deserialize, Serialize};
4
5#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
7#[derive(Debug, Clone, Serialize, Deserialize, Default)]
8pub struct OptimizationConfig {
9 #[serde(default)]
11 pub memory_pool: MemoryPoolConfig,
12
13 #[serde(default)]
15 pub tool_registry: ToolRegistryConfig,
16
17 #[serde(default)]
19 pub async_pipeline: AsyncPipelineConfig,
20
21 #[serde(default)]
23 pub llm_client: LLMClientConfig,
24
25 #[serde(default)]
27 pub agent_execution: AgentExecutionConfig,
28
29 #[serde(default)]
31 pub profiling: ProfilingConfig,
32}
33
34#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
36#[derive(Debug, Clone, Serialize, Deserialize)]
37pub struct MemoryPoolConfig {
38 pub enabled: bool,
40
41 pub max_string_pool_size: usize,
43
44 pub max_value_pool_size: usize,
46
47 pub max_vec_pool_size: usize,
49}
50
51#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
53#[derive(Debug, Clone, Serialize, Deserialize)]
54pub struct ToolRegistryConfig {
55 pub use_optimized_registry: bool,
57
58 pub max_concurrent_tools: usize,
60
61 pub hot_cache_size: usize,
63
64 pub default_timeout_secs: u64,
66}
67
68#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
70#[derive(Debug, Clone, Serialize, Deserialize)]
71pub struct AsyncPipelineConfig {
72 pub enable_batching: bool,
74
75 pub enable_caching: bool,
77
78 pub max_batch_size: usize,
80
81 pub batch_timeout_ms: u64,
83
84 pub cache_size: usize,
86}
87
88#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
90#[derive(Debug, Clone, Serialize, Deserialize)]
91pub struct LLMClientConfig {
92 pub enable_connection_pooling: bool,
94
95 pub enable_response_caching: bool,
97
98 pub enable_request_batching: bool,
100
101 pub connection_pool_size: usize,
103
104 pub response_cache_size: usize,
106
107 pub cache_ttl_secs: u64,
109
110 pub rate_limit_rps: f64,
112
113 pub rate_limit_burst: usize,
115}
116
117#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
119#[derive(Debug, Clone, Serialize, Deserialize)]
120pub struct AgentExecutionConfig {
121 pub use_optimized_loop: bool,
123
124 pub enable_performance_prediction: bool,
126
127 pub state_history_size: usize,
129
130 pub resource_monitor_interval_ms: u64,
132
133 pub max_memory_mb: u64,
135
136 pub max_execution_time_secs: u64,
138
139 pub idle_timeout_ms: u64,
142
143 pub idle_backoff_ms: u64,
146
147 pub max_idle_cycles: usize,
149}
150
151#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
153#[derive(Debug, Clone, Serialize, Deserialize)]
154pub struct ProfilingConfig {
155 pub enabled: bool,
157
158 pub monitor_interval_ms: u64,
160
161 pub max_history_size: usize,
163
164 pub auto_export_results: bool,
166
167 pub export_file_path: String,
169
170 pub enable_regression_testing: bool,
172
173 pub max_regression_percent: f64,
175}
176
177impl Default for MemoryPoolConfig {
178 fn default() -> Self {
179 Self {
180 enabled: true,
181 max_string_pool_size: 64,
182 max_value_pool_size: 32,
183 max_vec_pool_size: 16,
184 }
185 }
186}
187
188impl Default for ToolRegistryConfig {
189 fn default() -> Self {
190 Self {
191 use_optimized_registry: true, max_concurrent_tools: 4,
193 hot_cache_size: 16,
194 default_timeout_secs: 180,
195 }
196 }
197}
198
199impl Default for AsyncPipelineConfig {
200 fn default() -> Self {
201 Self {
202 enable_batching: false, enable_caching: true,
204 max_batch_size: 5,
205 batch_timeout_ms: 100,
206 cache_size: 100,
207 }
208 }
209}
210
211impl Default for LLMClientConfig {
212 fn default() -> Self {
213 Self {
214 enable_connection_pooling: false, enable_response_caching: true,
216 enable_request_batching: false, connection_pool_size: 4,
218 response_cache_size: 50,
219 cache_ttl_secs: 300,
220 rate_limit_rps: 10.0,
221 rate_limit_burst: 20,
222 }
223 }
224}
225
226impl Default for AgentExecutionConfig {
227 fn default() -> Self {
228 Self {
229 use_optimized_loop: true, enable_performance_prediction: false, state_history_size: 1000,
232 resource_monitor_interval_ms: 100,
233 max_memory_mb: 1024,
234 max_execution_time_secs: 300,
235 idle_timeout_ms: 5000, idle_backoff_ms: 100, max_idle_cycles: 10, }
239 }
240}
241
242impl Default for ProfilingConfig {
243 fn default() -> Self {
244 Self {
245 enabled: false, monitor_interval_ms: 100,
247 max_history_size: 1000,
248 auto_export_results: false,
249 export_file_path: "benchmark_results.json".to_string(),
250 enable_regression_testing: false,
251 max_regression_percent: 10.0,
252 }
253 }
254}
255
256impl OptimizationConfig {
257 pub fn development() -> Self {
259 Self {
260 memory_pool: MemoryPoolConfig {
261 enabled: true,
262 ..Default::default()
263 },
264 tool_registry: ToolRegistryConfig {
265 use_optimized_registry: true,
266 max_concurrent_tools: 2,
267 ..Default::default()
268 },
269 async_pipeline: AsyncPipelineConfig {
270 enable_batching: true,
271 enable_caching: true,
272 max_batch_size: 3,
273 ..Default::default()
274 },
275 llm_client: LLMClientConfig {
276 enable_connection_pooling: true,
277 enable_response_caching: true,
278 connection_pool_size: 2,
279 rate_limit_rps: 5.0,
280 ..Default::default()
281 },
282 agent_execution: AgentExecutionConfig {
283 use_optimized_loop: true,
284 enable_performance_prediction: false, max_memory_mb: 512,
286 idle_timeout_ms: 2000, idle_backoff_ms: 50, max_idle_cycles: 5, ..Default::default()
290 },
291 profiling: ProfilingConfig {
292 enabled: true, auto_export_results: true,
294 ..Default::default()
295 },
296 }
297 }
298
299 pub fn production() -> Self {
301 Self {
302 memory_pool: MemoryPoolConfig {
303 enabled: true,
304 max_string_pool_size: 128,
305 max_value_pool_size: 64,
306 max_vec_pool_size: 32,
307 },
308 tool_registry: ToolRegistryConfig {
309 use_optimized_registry: true,
310 max_concurrent_tools: 8,
311 hot_cache_size: 32,
312 default_timeout_secs: 300,
313 },
314 async_pipeline: AsyncPipelineConfig {
315 enable_batching: true,
316 enable_caching: true,
317 max_batch_size: 10,
318 batch_timeout_ms: 50,
319 cache_size: 200,
320 },
321 llm_client: LLMClientConfig {
322 enable_connection_pooling: true,
323 enable_response_caching: true,
324 enable_request_batching: true,
325 connection_pool_size: 8,
326 response_cache_size: 100,
327 cache_ttl_secs: 600,
328 rate_limit_rps: 20.0,
329 rate_limit_burst: 50,
330 },
331 agent_execution: AgentExecutionConfig {
332 use_optimized_loop: true,
333 enable_performance_prediction: true,
334 state_history_size: 2000,
335 resource_monitor_interval_ms: 50,
336 max_memory_mb: 2048,
337 max_execution_time_secs: 600,
338 idle_timeout_ms: 10000, idle_backoff_ms: 200, max_idle_cycles: 20, },
342 profiling: ProfilingConfig {
343 enabled: false, monitor_interval_ms: 1000,
345 max_history_size: 500,
346 auto_export_results: false,
347 export_file_path: "/var/log/vtcode/benchmark_results.json".to_string(),
348 enable_regression_testing: true,
349 max_regression_percent: 5.0,
350 },
351 }
352 }
353}