torsh-backend 0.1.2

Backend abstraction layer for ToRSh
Documentation
//! Auto-generated module
//!
//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)

use std::collections::{HashMap, VecDeque};
use std::time::{Duration, Instant};

use super::types::{AdaptationAction, AdaptationStrategy, AdaptationTrigger, AdaptiveExperience, AdaptiveOptimizationController, AdaptiveResult, AggregationMethod, ApplicabilityCondition, DegradationSeverity, ExperienceContext, FeatureExtractionConfig, FeatureType, LifecycleStage, NormalizationStrategy, ParameterAdjustment, ParameterBounds, PressureTrend, ResultQualityMetrics, RiskLevel, StrategyComplexity, StrategyLearningConfig, StrategyLifecycle, StrategyResourceRequirements, SystemState, TransitionMode};

#[cfg(test)]
mod tests {
    use super::*;
    #[test]
    fn test_adaptive_controller_creation() {
        let controller = AdaptiveOptimizationController::new();
        assert!(! controller.adaptation_strategies.is_empty());
        assert!(
            controller.adaptation_strategies
            .contains_key("performance_degradation_response")
        );
        assert!(
            controller.adaptation_strategies.contains_key("resource_pressure_response")
        );
    }
    #[test]
    fn test_strategy_trigger_evaluation() {
        let controller = AdaptiveOptimizationController::new();
        let mut state = SystemState::default();
        state.performance_metrics.insert("overall_performance".to_string(), 0.7);
        state.resource_utilization.insert("cpu".to_string(), 0.5);
        state.resource_utilization.insert("memory".to_string(), 0.6);
        let recommendations = controller.get_recommendations();
        assert!(! recommendations.is_empty());
        let perf_rec = recommendations
            .iter()
            .find(|r| r.strategy_name == "performance_degradation_response");
        assert!(perf_rec.is_some());
    }
    #[test]
    fn test_resource_pressure_trigger() {
        let controller = AdaptiveOptimizationController::new();
        let mut state = SystemState::default();
        state.resource_utilization.insert("memory".to_string(), 0.9);
        state.resource_utilization.insert("cpu".to_string(), 0.3);
        let recommendations = controller.get_recommendations();
        let resource_rec = recommendations
            .iter()
            .find(|r| r.strategy_name == "resource_pressure_response");
        assert!(resource_rec.is_some());
    }
    #[test]
    fn test_strategy_application() {
        let mut controller = AdaptiveOptimizationController::new();
        let state = SystemState::default();
        let result = controller
            .apply_strategy("performance_degradation_response", &state);
        assert!(result.is_ok());
        let event = result.expect("operation should succeed");
        assert_eq!(event.strategy, "performance_degradation_response");
        assert!(! event.actions.is_empty());
        assert_eq!(controller.adaptation_history.len(), 1);
    }
    #[test]
    fn test_learning_from_experience() {
        let mut controller = AdaptiveOptimizationController::new();
        let experience = AdaptiveExperience {
            timestamp: Instant::now(),
            state: SystemState::default(),
            action: "test_action".to_string(),
            result: AdaptiveResult {
                success: true,
                performance_change: 0.1,
                resource_impact: -0.05,
                side_effects: Vec::new(),
                confidence: 0.8,
                quality_metrics: ResultQualityMetrics {
                    accuracy: 0.9,
                    precision: 0.85,
                    completeness: 1.0,
                    timeliness: 0.95,
                },
            },
            learning_value: 0.7,
            context: ExperienceContext {
                environment: HashMap::new(),
                configuration: HashMap::new(),
                active_strategies: Vec::new(),
                user_preferences: HashMap::new(),
            },
            feedback: None,
        };
        let initial_rules = controller.learning_mechanism.knowledge_base.rules.len();
        controller.learn_from_experience(experience);
        assert_eq!(controller.learning_mechanism.knowledge_base.experiences.len(), 1);
        assert!(
            controller.learning_mechanism.knowledge_base.rules.len() >= initial_rules
        );
    }
    #[test]
    fn test_priority_calculation() {
        let controller = AdaptiveOptimizationController::new();
        let strategy = controller
            .adaptation_strategies
            .get("performance_degradation_response")
            .expect("operation should succeed");
        let state = SystemState::default();
        let priority = controller.calculate_priority(strategy, &state);
        assert!(priority >= 0.0 && priority <= 1.0);
    }
    #[test]
    fn test_confidence_calculation() {
        let controller = AdaptiveOptimizationController::new();
        let mut strategy = controller
            .adaptation_strategies
            .get("performance_degradation_response")
            .expect("operation should succeed")
            .clone();
        let state = SystemState::default();
        strategy.success_rate = 0.0;
        strategy.usage_frequency = 0.0;
        let confidence = controller.calculate_confidence(&strategy, &state);
        assert!(confidence >= 0.0);
        strategy.success_rate = 0.9;
        strategy.usage_frequency = 0.5;
        let high_confidence = controller.calculate_confidence(&strategy, &state);
        assert!(high_confidence > confidence);
    }
    #[test]
    fn test_risk_assessment() {
        let controller = AdaptiveOptimizationController::new();
        let strategy = controller
            .adaptation_strategies
            .get("performance_degradation_response")
            .expect("operation should succeed");
        let risk = controller.assess_risk(strategy);
        assert!(risk >= 0.0 && risk <= 1.0);
        let mut high_risk_strategy = strategy.clone();
        high_risk_strategy.complexity = StrategyComplexity::Expert;
        high_risk_strategy.resource_requirements.risk_level = RiskLevel::Critical;
        let high_risk = controller.assess_risk(&high_risk_strategy);
        assert!(high_risk > risk);
    }
    #[test]
    fn test_applicability_conditions() {
        let controller = AdaptiveOptimizationController::new();
        let mut state = SystemState::default();
        let condition = ApplicabilityCondition::SystemLoad {
            min: 0.0,
            max: 0.5,
        };
        state.resource_utilization.insert("cpu".to_string(), 0.3);
        assert!(controller.check_applicability_condition(& condition, & state));
        state.resource_utilization.insert("cpu".to_string(), 0.8);
        assert!(! controller.check_applicability_condition(& condition, & state));
        let condition = ApplicabilityCondition::MemoryPressure {
            threshold: 0.7,
        };
        state.resource_utilization.insert("memory".to_string(), 0.5);
        assert!(controller.check_applicability_condition(& condition, & state));
        state.resource_utilization.insert("memory".to_string(), 0.9);
        assert!(! controller.check_applicability_condition(& condition, & state));
    }
    #[test]
    fn test_trigger_conditions() {
        let controller = AdaptiveOptimizationController::new();
        let mut state = SystemState::default();
        let trigger = AdaptationTrigger::PerformanceDegradation {
            threshold: 0.2,
            duration: Duration::from_secs(30),
            severity: DegradationSeverity::Moderate,
        };
        state.performance_metrics.insert("overall_performance".to_string(), 0.7);
        assert!(controller.check_trigger(& trigger, & state));
        state.performance_metrics.insert("overall_performance".to_string(), 0.9);
        assert!(! controller.check_trigger(& trigger, & state));
        let trigger = AdaptationTrigger::ResourcePressure {
            resource: "memory".to_string(),
            threshold: 0.8,
            trend: PressureTrend::Increasing,
        };
        state.resource_utilization.insert("memory".to_string(), 0.9);
        assert!(controller.check_trigger(& trigger, & state));
        state.resource_utilization.insert("memory".to_string(), 0.6);
        assert!(! controller.check_trigger(& trigger, & state));
    }
    #[test]
    fn test_action_execution() {
        let controller = AdaptiveOptimizationController::new();
        let action = AdaptationAction::ParameterAdjustment {
            parameter: "test_param".to_string(),
            adjustment: ParameterAdjustment::Relative {
                factor: 1.5,
            },
            bounds: Some(ParameterBounds {
                min: 0.0,
                max: 100.0,
                step_size: None,
            }),
        };
        let result = controller.execute_action(&action);
        assert!(result.is_ok());
        assert!(result.expect("operation should succeed").contains("test_param"));
        let action = AdaptationAction::StrategySwitch {
            from_strategy: "old_strategy".to_string(),
            to_strategy: "new_strategy".to_string(),
            transition_mode: TransitionMode::Gradual,
        };
        let result = controller.execute_action(&action);
        assert!(result.is_ok());
        assert!(result.expect("operation should succeed").contains("Switch from old_strategy to new_strategy"));
    }
    #[test]
    fn test_strategy_lifecycle() {
        let mut controller = AdaptiveOptimizationController::new();
        let custom_strategy = AdaptationStrategy {
            name: "custom_test_strategy".to_string(),
            description: "Test strategy".to_string(),
            triggers: vec![],
            actions: vec![],
            effectiveness: 0.5,
            usage_frequency: 0.0,
            success_rate: 0.0,
            complexity: StrategyComplexity::Simple,
            resource_requirements: StrategyResourceRequirements {
                cpu_cost: 0.1,
                memory_cost: 512,
                execution_time: Duration::from_millis(50),
                risk_level: RiskLevel::Low,
                data_requirements: Vec::new(),
            },
            applicability_conditions: Vec::new(),
            lifecycle: StrategyLifecycle {
                created_at: Instant::now(),
                last_updated: Instant::now(),
                last_used: None,
                usage_count: 0,
                stage: LifecycleStage::Experimental,
                retirement_conditions: Vec::new(),
            },
            learning_config: StrategyLearningConfig {
                enable_learning: false,
                learning_rate: 0.01,
                min_examples: 5,
                max_learning_data: 100,
                update_frequency: Duration::from_secs(60),
                feature_config: FeatureExtractionConfig {
                    enabled_features: vec![FeatureType::SystemMetrics],
                    window_size: 5,
                    aggregation_methods: vec![AggregationMethod::Mean],
                    normalization: NormalizationStrategy::None,
                },
            },
        };
        controller.add_strategy("custom_test_strategy".to_string(), custom_strategy);
        assert!(controller.adaptation_strategies.contains_key("custom_test_strategy"));
        let removed = controller.remove_strategy("custom_test_strategy");
        assert!(removed.is_some());
        assert!(! controller.adaptation_strategies.contains_key("custom_test_strategy"));
    }
    #[test]
    fn test_learning_performance_update() {
        let mut controller = AdaptiveOptimizationController::new();
        let experience = AdaptiveExperience {
            timestamp: Instant::now(),
            state: SystemState::default(),
            action: "test_action".to_string(),
            result: AdaptiveResult {
                success: true,
                performance_change: 0.2,
                resource_impact: -0.1,
                side_effects: Vec::new(),
                confidence: 0.9,
                quality_metrics: ResultQualityMetrics {
                    accuracy: 0.95,
                    precision: 0.9,
                    completeness: 1.0,
                    timeliness: 0.98,
                },
            },
            learning_value: 0.8,
            context: ExperienceContext {
                environment: HashMap::new(),
                configuration: HashMap::new(),
                active_strategies: Vec::new(),
                user_preferences: HashMap::new(),
            },
            feedback: None,
        };
        let initial_accuracy = controller.learning_mechanism.performance.accuracy;
        controller.update_learning_performance(&experience);
        let new_accuracy = controller.learning_mechanism.performance.accuracy;
        assert!(new_accuracy > initial_accuracy);
    }
}