scirs2_metrics/optimization/distributed_advanced/
scaling.rs1use crate::error::{MetricsError, Result};
6use std::collections::HashMap;
7use std::time::{Duration, Instant};
8
9#[derive(Debug, Clone)]
11pub struct AdvancedScalingManager {
12 node_id: String,
13 scaling_policies: HashMap<String, ScalingPolicy>,
14 resource_monitors: HashMap<String, ResourceMonitor>,
15 scaling_history: Vec<ScalingEvent>,
16 auto_scaling_enabled: bool,
17}
18
19#[derive(Debug, Clone)]
20pub struct ScalingPolicy {
21 min_instances: u32,
22 max_instances: u32,
23 target_cpu_utilization: f64,
24 target_memory_utilization: f64,
25 scale_up_cooldown: Duration,
26 scale_down_cooldown: Duration,
27 scale_up_threshold: f64,
28 scale_down_threshold: f64,
29}
30
31#[derive(Debug, Clone)]
32pub struct ResourceMonitor {
33 cpu_utilization: f64,
34 memory_utilization: f64,
35 network_utilization: f64,
36 storage_utilization: f64,
37 last_updated: Instant,
38}
39
40#[derive(Debug, Clone)]
41pub struct ScalingEvent {
42 timestamp: Instant,
43 event_type: ScalingEventType,
44 service_name: String,
45 instances_before: u32,
46 instances_after: u32,
47 trigger_reason: String,
48}
49
50#[derive(Debug, Clone)]
51pub enum ScalingEventType {
52 ScaleUp,
53 ScaleDown,
54 AutoScale,
55 ManualScale,
56}
57
58impl AdvancedScalingManager {
59 pub fn new(node_id: String) -> Self {
60 Self {
61 node_id,
62 scaling_policies: HashMap::new(),
63 resource_monitors: HashMap::new(),
64 scaling_history: Vec::new(),
65 auto_scaling_enabled: false,
66 }
67 }
68
69 pub fn add_scaling_policy(&mut self, service_name: String, policy: ScalingPolicy) {
70 self.scaling_policies.insert(service_name, policy);
71 }
72
73 pub fn enable_auto_scaling(&mut self) {
74 self.auto_scaling_enabled = true;
75 }
76
77 pub fn disable_auto_scaling(&mut self) {
78 self.auto_scaling_enabled = false;
79 }
80
81 pub fn update_resource_metrics(&mut self, service_name: String, monitor: ResourceMonitor) {
82 self.resource_monitors.insert(service_name, monitor);
83 }
84
85 pub fn evaluate_scaling_needs(&mut self) -> Result<Vec<ScalingDecision>> {
86 if !self.auto_scaling_enabled {
87 return Ok(Vec::new());
88 }
89
90 let mut decisions = Vec::new();
91
92 for (service_name, policy) in &self.scaling_policies {
93 if let Some(monitor) = self.resource_monitors.get(service_name) {
94 if let Some(decision) = self.should_scale(service_name, policy, monitor)? {
95 decisions.push(decision);
96 }
97 }
98 }
99
100 Ok(decisions)
101 }
102
103 fn should_scale(
104 &self,
105 service_name: &str,
106 policy: &ScalingPolicy,
107 monitor: &ResourceMonitor,
108 ) -> Result<Option<ScalingDecision>> {
109 let cpu_pressure = monitor.cpu_utilization > policy.scale_up_threshold;
110 let memory_pressure = monitor.memory_utilization > policy.scale_up_threshold;
111 let under_utilized = monitor.cpu_utilization < policy.scale_down_threshold
112 && monitor.memory_utilization < policy.scale_down_threshold;
113
114 if cpu_pressure || memory_pressure {
115 return Ok(Some(ScalingDecision {
116 service_name: service_name.to_string(),
117 action: ScalingAction::ScaleUp,
118 target_instances: self.calculate_scale_up_target(policy, monitor),
119 reason: format!(
120 "CPU: {:.2}%, Memory: {:.2}%",
121 monitor.cpu_utilization * 100.0,
122 monitor.memory_utilization * 100.0
123 ),
124 }));
125 }
126
127 if under_utilized {
128 return Ok(Some(ScalingDecision {
129 service_name: service_name.to_string(),
130 action: ScalingAction::ScaleDown,
131 target_instances: self.calculate_scale_down_target(policy, monitor),
132 reason: format!(
133 "Under-utilized - CPU: {:.2}%, Memory: {:.2}%",
134 monitor.cpu_utilization * 100.0,
135 monitor.memory_utilization * 100.0
136 ),
137 }));
138 }
139
140 Ok(None)
141 }
142
143 fn calculate_scale_up_target(&self, policy: &ScalingPolicy, _monitor: &ResourceMonitor) -> u32 {
144 policy.max_instances.min(policy.min_instances + 1)
146 }
147
148 fn calculate_scale_down_target(
149 &self,
150 policy: &ScalingPolicy,
151 _monitor: &ResourceMonitor,
152 ) -> u32 {
153 policy
155 .min_instances
156 .max(policy.min_instances.saturating_sub(1))
157 }
158
159 pub fn execute_scaling(&mut self, decision: ScalingDecision) -> Result<()> {
160 let event = ScalingEvent {
161 timestamp: Instant::now(),
162 event_type: match decision.action {
163 ScalingAction::ScaleUp => ScalingEventType::AutoScale,
164 ScalingAction::ScaleDown => ScalingEventType::AutoScale,
165 },
166 service_name: decision.service_name.clone(),
167 instances_before: 1, instances_after: decision.target_instances,
169 trigger_reason: decision.reason,
170 };
171
172 self.scaling_history.push(event);
173 Ok(())
175 }
176
177 pub fn get_scaling_history(&self) -> &[ScalingEvent] {
178 &self.scaling_history
179 }
180}
181
182#[derive(Debug, Clone)]
183pub struct ScalingDecision {
184 pub service_name: String,
185 pub action: ScalingAction,
186 pub target_instances: u32,
187 pub reason: String,
188}
189
190#[derive(Debug, Clone)]
191pub enum ScalingAction {
192 ScaleUp,
193 ScaleDown,
194}
195
196impl Default for ScalingPolicy {
197 fn default() -> Self {
198 Self {
199 min_instances: 1,
200 max_instances: 10,
201 target_cpu_utilization: 0.7,
202 target_memory_utilization: 0.8,
203 scale_up_cooldown: Duration::from_secs(300),
204 scale_down_cooldown: Duration::from_secs(600),
205 scale_up_threshold: 0.8,
206 scale_down_threshold: 0.3,
207 }
208 }
209}