use scirs2_core::metrics::{Counter, Gauge, Histogram, MetricsRegistry, Timer};
use scirs2_core::profiling::Profiler;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock;
use tracing::{error, info, warn};
use super::functions::CloudStorageProvider;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum StorageTier {
Hot,
Warm,
Cold,
Archive,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ElasticScalingConfig {
pub min_nodes: u32,
pub max_nodes: u32,
pub target_cpu_utilization: f64,
pub target_memory_utilization: f64,
pub scale_up_threshold: f64,
pub scale_down_threshold: f64,
pub cooldown_seconds: u32,
pub use_spot_instances: bool,
pub max_spot_ratio: f64,
pub instance_types: Vec<InstanceType>,
pub provider: CloudProvider,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CostPrediction {
pub predicted_hourly_cost: f64,
pub confidence: f64,
pub recommended_instance_type: String,
pub recommended_spot_ratio: f64,
pub estimated_monthly_savings: f64,
pub timestamp: u64,
}
pub(super) struct GCSClient {
#[allow(dead_code)]
project: String,
#[allow(dead_code)]
bucket: String,
pub(super) objects: HashMap<String, Vec<u8>>,
pub(super) metadata: HashMap<String, ObjectMetadata>,
}
#[derive(Debug, Clone, thiserror::Error)]
pub enum CloudError {
#[error("Authentication failed: {0}")]
AuthenticationError(String),
#[error("Bucket not found: {0}")]
BucketNotFound(String),
#[error("Object not found: {0}")]
ObjectNotFound(String),
#[error("Permission denied: {0}")]
PermissionDenied(String),
#[error("Network error: {0}")]
NetworkError(String),
#[error("Timeout: {0}")]
Timeout(String),
#[error("Provider error: {0}")]
ProviderError(String),
#[error("Configuration error: {0}")]
ConfigurationError(String),
#[error("Rate limited: {0}")]
RateLimited(String),
}
pub struct CloudOperationProfiler {
#[allow(dead_code)]
profiler: Profiler,
operation_metrics: Arc<RwLock<HashMap<String, OperationMetrics>>>,
#[allow(dead_code)]
metric_registry: Arc<MetricsRegistry>,
}
impl CloudOperationProfiler {
pub fn new() -> Self {
Self {
profiler: Profiler::new(),
operation_metrics: Arc::new(RwLock::new(HashMap::new())),
metric_registry: Arc::new(MetricsRegistry::new()),
}
}
pub fn start_operation(&self, _operation: &str) {}
pub fn stop_operation(&self, _operation: &str, _bytes: u64, _success: bool) {}
pub async fn get_metrics(&self, operation: &str) -> Option<OperationMetrics> {
let metrics = self.operation_metrics.read().await;
metrics.get(operation).cloned()
}
pub fn export_prometheus(&self) -> String {
"# Cloud operations metrics\n# Registry active".to_string()
}
}
pub(super) struct GCSMetrics {
pub(super) uploads: Counter,
pub(super) downloads: Counter,
pub(super) errors: Counter,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StorageOperationResult {
pub success: bool,
pub duration_ms: u64,
pub bytes_transferred: u64,
pub error: Option<String>,
pub etag: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct S3MetricsSummary {
pub total_uploads: u64,
pub total_downloads: u64,
pub total_upload_bytes: u64,
pub total_download_bytes: u64,
pub total_errors: u64,
pub avg_latency_ms: f64,
pub compression_ratio: f64,
pub gpu_operations: u64,
}
pub struct DisasterRecoveryManager {
config: DisasterRecoveryConfig,
providers: HashMap<CloudProvider, Arc<dyn CloudStorageProvider>>,
current_primary: Arc<RwLock<CloudProvider>>,
failure_counts: Arc<RwLock<HashMap<CloudProvider, u32>>>,
event_history: Arc<RwLock<VecDeque<(u64, DREvent)>>>,
replication_lag: Arc<RwLock<HashMap<CloudProvider, u64>>>,
}
impl DisasterRecoveryManager {
pub fn new(config: DisasterRecoveryConfig) -> Self {
let mut failure_counts = HashMap::new();
failure_counts.insert(config.primary_provider, 0);
for provider in &config.secondary_providers {
failure_counts.insert(*provider, 0);
}
Self {
config: config.clone(),
providers: HashMap::new(),
current_primary: Arc::new(RwLock::new(config.primary_provider)),
failure_counts: Arc::new(RwLock::new(failure_counts)),
event_history: Arc::new(RwLock::new(VecDeque::new())),
replication_lag: Arc::new(RwLock::new(HashMap::new())),
}
}
pub fn register_provider(
&mut self,
provider: CloudProvider,
backend: Arc<dyn CloudStorageProvider>,
) {
self.providers.insert(provider, backend);
}
pub async fn get_primary(&self) -> CloudProvider {
*self.current_primary.read().await
}
pub async fn health_check_all(&self) -> HashMap<CloudProvider, HealthStatus> {
let mut results = HashMap::new();
for (provider, backend) in &self.providers {
match backend.health_check().await {
Ok(status) => {
self.record_event(DREvent::HealthCheck {
provider: *provider,
healthy: status.healthy,
latency_ms: status.latency_ms,
})
.await;
let mut counts = self.failure_counts.write().await;
if status.healthy {
counts.insert(*provider, 0);
} else {
let count = counts.entry(*provider).or_insert(0);
*count += 1;
}
results.insert(*provider, status);
}
Err(e) => {
let mut counts = self.failure_counts.write().await;
let count = counts.entry(*provider).or_insert(0);
*count += 1;
results.insert(
*provider,
HealthStatus {
healthy: false,
latency_ms: 0,
error_rate: 1.0,
last_check: SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("system time should be after UNIX_EPOCH")
.as_secs(),
message: e.to_string(),
},
);
}
}
}
if self.config.auto_failover_enabled {
self.check_and_perform_failover().await;
}
results
}
async fn check_and_perform_failover(&self) {
let current_primary = *self.current_primary.read().await;
let counts = self.failure_counts.read().await;
if let Some(&failure_count) = counts.get(¤t_primary) {
if failure_count >= self.config.failover_threshold {
let mut best_secondary = None;
let mut lowest_failures = u32::MAX;
for provider in &self.config.secondary_providers {
if let Some(&count) = counts.get(provider) {
if count < lowest_failures {
lowest_failures = count;
best_secondary = Some(*provider);
}
}
}
if let Some(new_primary) = best_secondary {
drop(counts);
if let Err(e) = self.perform_failover(new_primary).await {
error!("Failover failed: {}", e);
}
}
}
}
}
pub async fn perform_failover(&self, new_primary: CloudProvider) -> Result<(), CloudError> {
let old_primary = *self.current_primary.read().await;
let start = Instant::now();
info!(
"Initiating failover from {:?} to {:?}",
old_primary, new_primary
);
self.record_event(DREvent::FailoverInitiated {
from: old_primary,
to: new_primary,
reason: "Primary provider failure threshold exceeded".to_string(),
})
.await;
*self.current_primary.write().await = new_primary;
self.failure_counts.write().await.insert(new_primary, 0);
let duration = start.elapsed().as_millis() as u64;
self.record_event(DREvent::FailoverCompleted {
from: old_primary,
to: new_primary,
duration_ms: duration,
})
.await;
info!(
"Failover completed in {}ms. New primary: {:?}",
duration, new_primary
);
Ok(())
}
pub async fn replicate_to_secondaries(&self, key: &str, data: &[u8]) -> Result<(), CloudError> {
let _primary = *self.current_primary.read().await;
for provider in &self.config.secondary_providers {
if let Some(backend) = self.providers.get(provider) {
match backend.upload(key, data, StorageTier::Hot).await {
Ok(_) => {
info!("Replicated {} to {:?}", key, provider);
}
Err(e) => {
warn!("Failed to replicate {} to {:?}: {}", key, provider, e);
}
}
}
}
Ok(())
}
pub async fn get_status(&self) -> DisasterRecoveryStatus {
let current_primary = *self.current_primary.read().await;
let failure_counts = self.failure_counts.read().await.clone();
let replication_lag = self.replication_lag.read().await.clone();
let mut provider_status = HashMap::new();
for (provider, backend) in &self.providers {
if let Ok(health) = backend.health_check().await {
provider_status.insert(
*provider,
ProviderStatus {
healthy: health.healthy,
latency_ms: health.latency_ms,
failure_count: *failure_counts.get(provider).unwrap_or(&0),
replication_lag_ms: *replication_lag.get(provider).unwrap_or(&0),
},
);
}
}
let event_history = self.event_history.read().await;
let recent_events: Vec<DREvent> = event_history
.iter()
.rev()
.take(10)
.map(|(_, e)| e.clone())
.collect();
DisasterRecoveryStatus {
current_primary,
provider_status,
rto_seconds: self.config.rto_seconds,
rpo_seconds: self.config.rpo_seconds,
auto_failover_enabled: self.config.auto_failover_enabled,
recent_events,
}
}
async fn record_event(&self, event: DREvent) {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("system time should be after UNIX_EPOCH")
.as_secs();
let mut history = self.event_history.write().await;
history.push_back((timestamp, event));
while history.len() > 1000 {
history.pop_front();
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScalingPrediction {
pub timestamp: u64,
pub horizon_minutes: u32,
pub predicted_cpu: f64,
pub predicted_memory: f64,
pub predicted_nodes_needed: u32,
pub confidence: f64,
pub trend: Trend,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum Trend {
Increasing,
Decreasing,
Stable,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MLCostRecommendation {
pub action: String,
pub predicted_savings: f64,
pub confidence: f64,
pub impact: String,
pub ml_based: bool,
pub description: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DisasterRecoveryStatus {
pub current_primary: CloudProvider,
pub provider_status: HashMap<CloudProvider, ProviderStatus>,
pub rto_seconds: u32,
pub rpo_seconds: u32,
pub auto_failover_enabled: bool,
pub recent_events: Vec<DREvent>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObjectMetadata {
pub key: String,
pub size: u64,
pub last_modified: u64,
pub content_type: String,
pub storage_tier: StorageTier,
pub etag: String,
pub custom_metadata: HashMap<String, String>,
}
pub struct GCSBackend {
#[allow(dead_code)]
config: CloudStorageConfig,
pub(super) client: Arc<RwLock<GCSClient>>,
pub(super) metrics: Arc<GCSMetrics>,
}
impl GCSBackend {
pub fn new(config: CloudStorageConfig) -> Self {
let client = GCSClient {
project: config.access_key.clone(),
bucket: config.bucket.clone(),
objects: HashMap::new(),
metadata: HashMap::new(),
};
let metrics = GCSMetrics {
uploads: Counter::new("gcs_uploads_total".to_string()),
downloads: Counter::new("gcs_downloads_total".to_string()),
errors: Counter::new("gcs_errors_total".to_string()),
};
Self {
config,
client: Arc::new(RwLock::new(client)),
metrics: Arc::new(metrics),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DisasterRecoveryConfig {
pub primary_provider: CloudProvider,
pub secondary_providers: Vec<CloudProvider>,
pub rto_seconds: u32,
pub rpo_seconds: u32,
pub auto_failover_enabled: bool,
pub health_check_interval_secs: u32,
pub failover_threshold: u32,
pub continuous_replication: bool,
pub replication_batch_size: usize,
}
pub struct AzureBlobBackend {
#[allow(dead_code)]
config: CloudStorageConfig,
pub(super) client: Arc<RwLock<AzureClient>>,
pub(super) metrics: Arc<AzureMetrics>,
}
impl AzureBlobBackend {
pub fn new(config: CloudStorageConfig) -> Self {
let client = AzureClient {
account: config.access_key.clone(),
container: config.bucket.clone(),
objects: HashMap::new(),
metadata: HashMap::new(),
};
let metrics = AzureMetrics {
uploads: Counter::new("azure_uploads_total".to_string()),
downloads: Counter::new("azure_downloads_total".to_string()),
errors: Counter::new("azure_errors_total".to_string()),
};
Self {
config,
client: Arc::new(RwLock::new(client)),
metrics: Arc::new(metrics),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProviderStatus {
pub healthy: bool,
pub latency_ms: u64,
pub failure_count: u32,
pub replication_lag_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LifecycleRule {
pub id: String,
pub prefix: Option<String>,
pub transition_days: u32,
pub target_tier: StorageTier,
pub expiration_days: u32,
}
pub struct S3Metrics {
pub uploads: Counter,
pub downloads: Counter,
pub upload_bytes: Counter,
pub download_bytes: Counter,
pub errors: Counter,
pub latency_sum: Gauge,
#[allow(dead_code)]
latency_histogram: Histogram,
#[allow(dead_code)]
operation_timer: Timer,
pub compression_ratio: Gauge,
pub gpu_acceleration_count: Counter,
}
#[derive(Debug, Clone)]
pub struct OperationMetrics {
pub operation_name: String,
pub total_count: u64,
pub success_count: u64,
pub failure_count: u64,
pub total_bytes: u64,
pub total_duration_ms: u64,
pub avg_latency_ms: f64,
pub p95_latency_ms: f64,
pub p99_latency_ms: f64,
pub compression_ratio: f64,
pub gpu_accelerated: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ScalingDecision {
ScaleUp {
count: u32,
instance_type: String,
use_spot: bool,
reason: String,
},
ScaleDown {
count: u32,
instance_ids: Vec<String>,
reason: String,
},
NoAction { reason: String },
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClusterMetrics {
pub timestamp: u64,
pub avg_cpu_utilization: f64,
pub avg_memory_utilization: f64,
pub queries_per_second: f64,
pub node_count: u32,
pub error_rate: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CostOptimization {
pub current_hourly_cost: f64,
pub current_monthly_cost: f64,
pub on_demand_count: u32,
pub spot_count: u32,
pub potential_monthly_savings: f64,
pub recommendations: Vec<CostRecommendation>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ElasticScalingStatus {
pub current_node_count: u32,
pub min_nodes: u32,
pub max_nodes: u32,
pub spot_count: u32,
pub on_demand_count: u32,
pub target_cpu: f64,
pub target_memory: f64,
pub cooldown_seconds: u32,
pub recent_events: Vec<ScalingEvent>,
}
pub struct MLCostOptimizer {
training_data: Arc<RwLock<Vec<CostTrainingData>>>,
}
impl MLCostOptimizer {
pub fn new() -> Self {
Self {
training_data: Arc::new(RwLock::new(Vec::new())),
}
}
pub async fn add_training_data(&self, data: CostTrainingData) {
let mut training = self.training_data.write().await;
training.push(data);
if training.len() > 10000 {
training.drain(0..1000);
}
}
pub async fn train_model(&mut self) -> Result<(), CloudError> {
let training_data = self.training_data.read().await;
if training_data.len() < 100 {
return Err(CloudError::ConfigurationError(
"Insufficient training data".to_string(),
));
}
info!(
"Training ML cost model with {} samples",
training_data.len()
);
Ok(())
}
pub async fn predict_cost(
&self,
current_metrics: &ClusterMetrics,
current_config: &ElasticScalingConfig,
) -> CostPrediction {
let training_data = self.training_data.read().await;
let similar_points: Vec<&CostTrainingData> = training_data
.iter()
.filter(|d| {
(d.cpu_utilization - current_metrics.avg_cpu_utilization).abs() < 0.2
&& (d.memory_utilization - current_metrics.avg_memory_utilization).abs() < 0.2
})
.collect();
let (predicted_cost, confidence) = if !similar_points.is_empty() {
let avg_cost = similar_points.iter().map(|p| p.actual_cost).sum::<f64>()
/ similar_points.len() as f64;
let variance = similar_points
.iter()
.map(|p| (p.actual_cost - avg_cost).powi(2))
.sum::<f64>()
/ similar_points.len() as f64;
let confidence = (1.0 - variance.sqrt() / avg_cost).max(0.0).min(1.0);
(avg_cost, confidence)
} else {
(0.10, 0.3)
};
let recommended_instance_type = if current_metrics.avg_cpu_utilization > 0.7 {
"large".to_string()
} else if current_metrics.avg_cpu_utilization > 0.4 {
"medium".to_string()
} else {
"small".to_string()
};
let recommended_spot_ratio = if confidence > 0.7 {
current_config.max_spot_ratio
} else {
current_config.max_spot_ratio * 0.7
};
let current_cost = predicted_cost * current_metrics.node_count as f64;
let spot_savings = current_cost * recommended_spot_ratio * 0.7;
let estimated_monthly_savings = spot_savings * 24.0 * 30.0;
CostPrediction {
predicted_hourly_cost: predicted_cost,
confidence,
recommended_instance_type,
recommended_spot_ratio,
estimated_monthly_savings,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("system time should be after UNIX_EPOCH")
.as_secs(),
}
}
pub async fn get_recommendations(
&self,
current_status: &ElasticScalingStatus,
cost_optimization: &CostOptimization,
) -> Vec<MLCostRecommendation> {
let mut recommendations = Vec::new();
if current_status.spot_count < current_status.current_node_count / 2 {
recommendations.push(MLCostRecommendation {
action: "Increase spot instance usage".to_string(),
predicted_savings: cost_optimization.potential_monthly_savings * 0.6,
confidence: 0.85,
impact: "Medium".to_string(),
ml_based: true,
description: "ML model predicts stable workload suitable for spot instances"
.to_string(),
});
}
let training_data = self.training_data.read().await;
if !training_data.is_empty() {
let recent_avg_cpu = training_data
.iter()
.rev()
.take(100)
.map(|d| d.cpu_utilization)
.sum::<f64>()
/ 100.0;
if recent_avg_cpu < 0.3 {
recommendations.push(MLCostRecommendation {
action: "Downsize instance types".to_string(),
predicted_savings: cost_optimization.current_monthly_cost * 0.3,
confidence: 0.90,
impact: "High".to_string(),
ml_based: true,
description: "ML analysis shows consistent low utilization".to_string(),
});
}
}
if training_data.len() > 1000 {
recommendations.push(MLCostRecommendation {
action: "Implement time-based scaling".to_string(),
predicted_savings: cost_optimization.current_monthly_cost * 0.15,
confidence: 0.75,
impact: "Medium".to_string(),
ml_based: true,
description: "ML detected workload patterns suitable for scheduled scaling"
.to_string(),
});
}
recommendations
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeInstance {
pub instance_id: String,
pub node_id: u64,
pub instance_type: String,
pub is_spot: bool,
pub launch_time: u64,
pub cpu_utilization: f64,
pub memory_utilization: f64,
pub provider: CloudProvider,
pub region: String,
}
pub struct GpuCompressor {
enabled: bool,
}
impl GpuCompressor {
pub fn new() -> Self {
Self { enabled: false }
}
pub async fn compress(&mut self, data: &[u8]) -> Result<Vec<u8>, CloudError> {
self.cpu_compress(data)
}
pub async fn decompress(&mut self, data: &[u8]) -> Result<Vec<u8>, CloudError> {
self.cpu_decompress(data)
}
fn cpu_compress(&self, data: &[u8]) -> Result<Vec<u8>, CloudError> {
oxiarc_zstd::encode_all(data, 3)
.map_err(|e| CloudError::ProviderError(format!("Compression failed: {}", e)))
}
fn cpu_decompress(&self, data: &[u8]) -> Result<Vec<u8>, CloudError> {
oxiarc_zstd::decode_all(data)
.map_err(|e| CloudError::ProviderError(format!("Decompression failed: {}", e)))
}
pub fn is_gpu_enabled(&self) -> bool {
self.enabled
}
}
pub(super) struct AzureMetrics {
pub(super) uploads: Counter,
pub(super) downloads: Counter,
pub(super) errors: Counter,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum CloudProvider {
AWS,
GCP,
Azure,
OnPremises,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthStatus {
pub healthy: bool,
pub latency_ms: u64,
pub error_rate: f64,
pub last_check: u64,
pub message: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CostRecommendation {
pub action: String,
pub estimated_savings: f64,
pub risk_level: String,
pub description: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum DREvent {
HealthCheck {
provider: CloudProvider,
healthy: bool,
latency_ms: u64,
},
FailoverInitiated {
from: CloudProvider,
to: CloudProvider,
reason: String,
},
FailoverCompleted {
from: CloudProvider,
to: CloudProvider,
duration_ms: u64,
},
ReplicationCompleted {
source: CloudProvider,
target: CloudProvider,
objects: usize,
bytes: u64,
},
RecoveryStarted { provider: CloudProvider },
RecoveryCompleted {
provider: CloudProvider,
duration_ms: u64,
},
}
pub struct S3Backend {
#[allow(dead_code)]
config: CloudStorageConfig,
pub(super) client: Arc<RwLock<S3Client>>,
pub(super) metrics: Arc<S3Metrics>,
}
impl S3Backend {
pub fn new(config: CloudStorageConfig) -> Self {
let client = S3Client {
endpoint: config
.endpoint
.clone()
.unwrap_or_else(|| format!("https://s3.{}.amazonaws.com", config.region)),
region: config.region.clone(),
bucket: config.bucket.clone(),
objects: HashMap::new(),
metadata: HashMap::new(),
};
let metrics = S3Metrics {
uploads: Counter::new("s3_uploads_total".to_string()),
downloads: Counter::new("s3_downloads_total".to_string()),
upload_bytes: Counter::new("s3_upload_bytes_total".to_string()),
download_bytes: Counter::new("s3_download_bytes_total".to_string()),
errors: Counter::new("s3_errors_total".to_string()),
latency_sum: Gauge::new("s3_latency_sum_ms".to_string()),
latency_histogram: Histogram::new("s3_latency_ms".to_string()),
operation_timer: Timer::new("s3_operations".to_string()),
compression_ratio: Gauge::new("s3_compression_ratio".to_string()),
gpu_acceleration_count: Counter::new("s3_gpu_operations_total".to_string()),
};
Self {
config,
client: Arc::new(RwLock::new(client)),
metrics: Arc::new(metrics),
}
}
pub fn get_metrics(&self) -> &S3Metrics {
&self.metrics
}
pub fn get_metrics_summary(&self) -> S3MetricsSummary {
S3MetricsSummary {
total_uploads: self.metrics.uploads.get(),
total_downloads: self.metrics.downloads.get(),
total_upload_bytes: self.metrics.upload_bytes.get(),
total_download_bytes: self.metrics.download_bytes.get(),
total_errors: self.metrics.errors.get(),
avg_latency_ms: self.metrics.latency_sum.get(),
compression_ratio: self.metrics.compression_ratio.get(),
gpu_operations: self.metrics.gpu_acceleration_count.get(),
}
}
}
pub(super) struct AzureClient {
#[allow(dead_code)]
account: String,
#[allow(dead_code)]
container: String,
pub(super) objects: HashMap<String, Vec<u8>>,
pub(super) metadata: HashMap<String, ObjectMetadata>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScalingEvent {
pub timestamp: u64,
pub decision: ScalingDecision,
pub success: bool,
pub duration_ms: u64,
pub error: Option<String>,
}
pub(super) struct S3Client {
#[allow(dead_code)]
endpoint: String,
#[allow(dead_code)]
region: String,
#[allow(dead_code)]
bucket: String,
pub(super) objects: HashMap<String, Vec<u8>>,
pub(super) metadata: HashMap<String, ObjectMetadata>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CloudStorageConfig {
pub provider: CloudProvider,
pub region: String,
pub bucket: String,
pub access_key: String,
pub secret_key: String,
pub endpoint: Option<String>,
pub default_tier: StorageTier,
pub encryption_enabled: bool,
pub versioning_enabled: bool,
pub lifecycle_rules: Vec<LifecycleRule>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InstanceType {
pub name: String,
pub vcpus: u32,
pub memory_gb: u32,
pub hourly_cost: f64,
pub spot_hourly_cost: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CostTrainingData {
pub instance_type: String,
pub cpu_utilization: f64,
pub memory_utilization: f64,
pub queries_per_second: f64,
pub actual_cost: f64,
pub is_spot: bool,
pub timestamp: u64,
}