use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProductionConfig {
pub connection: ConnectionConfig,
pub performance: PerformanceConfig,
pub monitoring: MonitoringConfig,
pub security: SecurityConfig,
pub logging: LoggingConfig,
pub plc_settings: HashMap<String, PlcSpecificConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnectionConfig {
pub connection_timeout: Duration,
pub read_timeout: Duration,
pub write_timeout: Duration,
pub max_connections: u32,
pub retry_attempts: u32,
pub retry_delay: Duration,
pub keep_alive_interval: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceConfig {
pub max_packet_size: usize,
pub batch_config: BatchConfig,
pub connection_pool: ConnectionPoolConfig,
pub memory_limits: MemoryLimits,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BatchConfig {
pub max_operations_per_batch: usize,
pub batch_timeout: Duration,
pub continue_on_error: bool,
pub optimize_packet_packing: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnectionPoolConfig {
pub initial_size: u32,
pub max_size: u32,
pub growth_increment: u32,
pub idle_timeout: Duration,
pub cleanup_interval: Duration,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryLimits {
pub max_memory_mb: usize,
pub warning_threshold_mb: usize,
pub enable_monitoring: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MonitoringConfig {
pub enabled: bool,
pub collection_interval: Duration,
pub health_check_interval: Duration,
pub retention_period: Duration,
pub enable_profiling: bool,
pub alert_thresholds: AlertThresholds,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlertThresholds {
pub error_rate_threshold: f64,
pub latency_threshold_ms: f64,
pub memory_threshold_mb: usize,
pub connection_failure_threshold: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SecurityConfig {
pub enable_encryption: bool,
pub validate_connections: bool,
pub validate_inputs: bool,
pub rate_limiting: RateLimitingConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RateLimitingConfig {
pub enabled: bool,
pub max_requests_per_second: u32,
pub burst_capacity: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoggingConfig {
pub level: String,
pub format: String,
pub file_path: Option<String>,
pub enable_console: bool,
pub enable_structured: bool,
pub rotation: LogRotationConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LogRotationConfig {
pub enabled: bool,
pub max_file_size_mb: usize,
pub max_files: usize,
pub schedule: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PlcSpecificConfig {
pub model: String,
pub connection_settings: HashMap<String, String>,
pub tag_discovery: TagDiscoveryConfig,
pub performance_tuning: HashMap<String, String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TagDiscoveryConfig {
pub enabled: bool,
pub interval: Duration,
pub cache_tags: bool,
pub max_tags: usize,
}
impl Default for ProductionConfig {
fn default() -> Self {
Self {
connection: ConnectionConfig {
connection_timeout: Duration::from_secs(10),
read_timeout: Duration::from_secs(5),
write_timeout: Duration::from_secs(5),
max_connections: 10,
retry_attempts: 3,
retry_delay: Duration::from_secs(1),
keep_alive_interval: Duration::from_secs(30),
},
performance: PerformanceConfig {
max_packet_size: 4000,
batch_config: BatchConfig {
max_operations_per_batch: 50,
batch_timeout: Duration::from_secs(10),
continue_on_error: true,
optimize_packet_packing: true,
},
connection_pool: ConnectionPoolConfig {
initial_size: 2,
max_size: 10,
growth_increment: 2,
idle_timeout: Duration::from_secs(300),
cleanup_interval: Duration::from_secs(60),
},
memory_limits: MemoryLimits {
max_memory_mb: 100,
warning_threshold_mb: 80,
enable_monitoring: true,
},
},
monitoring: MonitoringConfig {
enabled: true,
collection_interval: Duration::from_secs(30),
health_check_interval: Duration::from_secs(60),
retention_period: Duration::from_secs(86400), enable_profiling: false,
alert_thresholds: AlertThresholds {
error_rate_threshold: 0.05,
latency_threshold_ms: 1000.0,
memory_threshold_mb: 80,
connection_failure_threshold: 5,
},
},
security: SecurityConfig {
enable_encryption: false,
validate_connections: true,
validate_inputs: true,
rate_limiting: RateLimitingConfig {
enabled: true,
max_requests_per_second: 100,
burst_capacity: 200,
},
},
logging: LoggingConfig {
level: "info".to_string(),
format: "json".to_string(),
file_path: Some("logs/ethernet_ip.log".to_string()),
enable_console: true,
enable_structured: true,
rotation: LogRotationConfig {
enabled: true,
max_file_size_mb: 100,
max_files: 10,
schedule: "daily".to_string(),
},
},
plc_settings: HashMap::new(),
}
}
}
impl ProductionConfig {
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Box<dyn std::error::Error>> {
let content = fs::read_to_string(path)?;
let config: ProductionConfig = toml::from_str(&content)?;
Ok(config)
}
pub fn to_file<P: AsRef<Path>>(&self, path: P) -> Result<(), Box<dyn std::error::Error>> {
let content = toml::to_string_pretty(self)?;
fs::write(path, content)?;
Ok(())
}
pub fn validate(&self) -> Result<(), Vec<String>> {
let mut errors = Vec::new();
if self.connection.connection_timeout.as_secs() == 0 {
errors.push("Connection timeout must be greater than 0".to_string());
}
if self.connection.max_connections == 0 {
errors.push("Maximum connections must be greater than 0".to_string());
}
if self.performance.max_packet_size < 100 {
errors.push("Maximum packet size must be at least 100 bytes".to_string());
}
if self.performance.batch_config.max_operations_per_batch == 0 {
errors.push("Maximum operations per batch must be greater than 0".to_string());
}
if self.monitoring.collection_interval.as_secs() == 0 {
errors.push("Collection interval must be greater than 0".to_string());
}
if self.security.rate_limiting.enabled
&& self.security.rate_limiting.max_requests_per_second == 0
{
errors.push(
"Max requests per second must be greater than 0 when rate limiting is enabled"
.to_string(),
);
}
let valid_levels = ["trace", "debug", "info", "warn", "error"];
if !valid_levels.contains(&self.logging.level.as_str()) {
errors.push(format!(
"Invalid log level: {}. Must be one of: {:?}",
self.logging.level, valid_levels
));
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
pub fn get_plc_config(&self, plc_address: &str) -> Option<&PlcSpecificConfig> {
self.plc_settings.get(plc_address)
}
pub fn set_plc_config(&mut self, plc_address: String, config: PlcSpecificConfig) {
self.plc_settings.insert(plc_address, config);
}
pub fn development() -> Self {
let mut config = Self::default();
config.logging.level = "debug".to_string();
config.monitoring.enabled = false;
config.security.rate_limiting.enabled = false;
config.performance.memory_limits.enable_monitoring = false;
config
}
pub fn production() -> Self {
let mut config = Self::default();
config.logging.level = "info".to_string();
config.monitoring.enabled = true;
config.security.rate_limiting.enabled = true;
config.performance.memory_limits.enable_monitoring = true;
config.performance.memory_limits.max_memory_mb = 500;
config
}
}