use serde::{Deserialize, Serialize};
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct OptimizationConfig {
#[serde(default)]
pub memory_pool: MemoryPoolConfig,
#[serde(default)]
pub tool_registry: ToolRegistryConfig,
#[serde(default)]
pub async_pipeline: AsyncPipelineConfig,
#[serde(default)]
pub llm_client: LLMClientConfig,
#[serde(default)]
pub agent_execution: AgentExecutionConfig,
#[serde(default)]
pub profiling: ProfilingConfig,
#[serde(default)]
pub file_read_cache: FileReadCacheConfig,
#[serde(default)]
pub command_cache: CommandCacheConfig,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileReadCacheConfig {
pub enabled: bool,
pub min_size_bytes: usize,
pub max_size_bytes: usize,
pub ttl_secs: u64,
pub max_entries: usize,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CommandCacheConfig {
pub enabled: bool,
pub ttl_ms: u64,
pub max_entries: usize,
pub allowlist: Vec<String>,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryPoolConfig {
pub enabled: bool,
pub max_string_pool_size: usize,
pub max_value_pool_size: usize,
pub max_vec_pool_size: usize,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolRegistryConfig {
pub use_optimized_registry: bool,
pub max_concurrent_tools: usize,
pub hot_cache_size: usize,
pub default_timeout_secs: u64,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AsyncPipelineConfig {
pub enable_batching: bool,
pub enable_caching: bool,
pub max_batch_size: usize,
pub batch_timeout_ms: u64,
pub cache_size: usize,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LLMClientConfig {
pub enable_connection_pooling: bool,
pub enable_response_caching: bool,
pub enable_request_batching: bool,
pub connection_pool_size: usize,
pub response_cache_size: usize,
pub cache_ttl_secs: u64,
pub rate_limit_rps: f64,
pub rate_limit_burst: usize,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentExecutionConfig {
pub use_optimized_loop: bool,
pub enable_performance_prediction: bool,
pub state_history_size: usize,
pub resource_monitor_interval_ms: u64,
pub max_memory_mb: u64,
pub max_execution_time_secs: u64,
pub idle_timeout_ms: u64,
pub idle_backoff_ms: u64,
pub max_idle_cycles: usize,
}
#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProfilingConfig {
pub enabled: bool,
pub monitor_interval_ms: u64,
pub max_history_size: usize,
pub auto_export_results: bool,
pub export_file_path: String,
pub enable_regression_testing: bool,
pub max_regression_percent: f64,
}
impl Default for MemoryPoolConfig {
fn default() -> Self {
Self {
enabled: true,
max_string_pool_size: 64,
max_value_pool_size: 32,
max_vec_pool_size: 16,
}
}
}
impl Default for FileReadCacheConfig {
fn default() -> Self {
Self {
enabled: true,
min_size_bytes: crate::constants::optimization::FILE_READ_CACHE_MIN_SIZE_BYTES,
max_size_bytes: crate::constants::optimization::FILE_READ_CACHE_MAX_SIZE_BYTES,
ttl_secs: crate::constants::optimization::FILE_READ_CACHE_TTL_SECS,
max_entries: crate::constants::optimization::FILE_READ_CACHE_MAX_ENTRIES,
}
}
}
impl Default for CommandCacheConfig {
fn default() -> Self {
Self {
enabled: true,
ttl_ms: crate::constants::optimization::COMMAND_CACHE_TTL_MS,
max_entries: crate::constants::optimization::COMMAND_CACHE_MAX_ENTRIES,
allowlist: crate::constants::optimization::COMMAND_CACHE_ALLOWLIST
.iter()
.map(|s| s.to_string())
.collect(),
}
}
}
impl Default for ToolRegistryConfig {
fn default() -> Self {
Self {
use_optimized_registry: true, max_concurrent_tools: 4,
hot_cache_size: 16,
default_timeout_secs: 180,
}
}
}
impl Default for AsyncPipelineConfig {
fn default() -> Self {
Self {
enable_batching: false, enable_caching: true,
max_batch_size: 5,
batch_timeout_ms: 100,
cache_size: 100,
}
}
}
impl Default for LLMClientConfig {
fn default() -> Self {
Self {
enable_connection_pooling: false, enable_response_caching: true,
enable_request_batching: false, connection_pool_size: 4,
response_cache_size: 50,
cache_ttl_secs: 300,
rate_limit_rps: 10.0,
rate_limit_burst: 20,
}
}
}
impl Default for AgentExecutionConfig {
fn default() -> Self {
Self {
use_optimized_loop: true, enable_performance_prediction: false, state_history_size: 1000,
resource_monitor_interval_ms: 100,
max_memory_mb: 1024,
max_execution_time_secs: 300,
idle_timeout_ms: 5000, idle_backoff_ms: 100, max_idle_cycles: 10, }
}
}
impl Default for ProfilingConfig {
fn default() -> Self {
Self {
enabled: false, monitor_interval_ms: 100,
max_history_size: 1000,
auto_export_results: false,
export_file_path: "benchmark_results.json".to_string(),
enable_regression_testing: false,
max_regression_percent: 10.0,
}
}
}
impl OptimizationConfig {
pub fn development() -> Self {
Self {
memory_pool: MemoryPoolConfig {
enabled: true,
..Default::default()
},
tool_registry: ToolRegistryConfig {
use_optimized_registry: true,
max_concurrent_tools: 2,
..Default::default()
},
async_pipeline: AsyncPipelineConfig {
enable_batching: true,
enable_caching: true,
max_batch_size: 3,
..Default::default()
},
llm_client: LLMClientConfig {
enable_connection_pooling: true,
enable_response_caching: true,
connection_pool_size: 2,
rate_limit_rps: 5.0,
..Default::default()
},
agent_execution: AgentExecutionConfig {
use_optimized_loop: true,
enable_performance_prediction: false, max_memory_mb: 512,
idle_timeout_ms: 2000, idle_backoff_ms: 50, max_idle_cycles: 5, ..Default::default()
},
profiling: ProfilingConfig {
enabled: true, auto_export_results: true,
..Default::default()
},
file_read_cache: FileReadCacheConfig::default(),
command_cache: CommandCacheConfig::default(),
}
}
pub fn production() -> Self {
Self {
memory_pool: MemoryPoolConfig {
enabled: true,
max_string_pool_size: 128,
max_value_pool_size: 64,
max_vec_pool_size: 32,
},
tool_registry: ToolRegistryConfig {
use_optimized_registry: true,
max_concurrent_tools: 8,
hot_cache_size: 32,
default_timeout_secs: 300,
},
async_pipeline: AsyncPipelineConfig {
enable_batching: true,
enable_caching: true,
max_batch_size: 10,
batch_timeout_ms: 50,
cache_size: 200,
},
llm_client: LLMClientConfig {
enable_connection_pooling: true,
enable_response_caching: true,
enable_request_batching: true,
connection_pool_size: 8,
response_cache_size: 100,
cache_ttl_secs: 600,
rate_limit_rps: 20.0,
rate_limit_burst: 50,
},
agent_execution: AgentExecutionConfig {
use_optimized_loop: true,
enable_performance_prediction: true,
state_history_size: 2000,
resource_monitor_interval_ms: 50,
max_memory_mb: 2048,
max_execution_time_secs: 600,
idle_timeout_ms: 10000, idle_backoff_ms: 200, max_idle_cycles: 20, },
profiling: ProfilingConfig {
enabled: false, monitor_interval_ms: 1000,
max_history_size: 500,
auto_export_results: false,
export_file_path: "/var/log/vtcode/benchmark_results.json".to_string(),
enable_regression_testing: true,
max_regression_percent: 5.0,
},
file_read_cache: FileReadCacheConfig {
enabled: true,
min_size_bytes: crate::constants::optimization::FILE_READ_CACHE_PROD_MIN_SIZE_BYTES,
max_size_bytes: crate::constants::optimization::FILE_READ_CACHE_PROD_MAX_SIZE_BYTES,
ttl_secs: crate::constants::optimization::FILE_READ_CACHE_PROD_TTL_SECS,
max_entries: crate::constants::optimization::FILE_READ_CACHE_PROD_MAX_ENTRIES,
},
command_cache: CommandCacheConfig {
enabled: true,
ttl_ms: crate::constants::optimization::COMMAND_CACHE_PROD_TTL_MS,
max_entries: crate::constants::optimization::COMMAND_CACHE_PROD_MAX_ENTRIES,
allowlist: crate::constants::optimization::COMMAND_CACHE_PROD_ALLOWLIST
.iter()
.map(|s| s.to_string())
.collect(),
},
}
}
}