use super::{
ApplicationError, ApplicationResult, Duration, ExpectedMetrics, HashMap, PlatformAvailability,
PlatformConfig, PlatformType, ProblemSpecification, ProblemType, TestExecutionResult,
};
#[derive(Debug)]
pub struct CrossPlatformValidator {
pub platforms: Vec<Platform>,
pub test_suites: HashMap<String, CrossPlatformTestSuite>,
pub compatibility_matrix: CompatibilityMatrix,
pub platform_configs: HashMap<String, PlatformConfig>,
}
#[derive(Debug, Clone)]
pub struct Platform {
pub id: String,
pub platform_type: PlatformType,
pub availability: PlatformAvailability,
pub capabilities: PlatformCapabilities,
pub performance: PlatformPerformance,
}
#[derive(Debug, Clone)]
pub struct PlatformCapabilities {
pub max_problem_size: usize,
pub supported_types: Vec<ProblemType>,
pub native_constraints: bool,
pub requires_embedding: bool,
}
#[derive(Debug, Clone)]
pub struct PlatformPerformance {
pub runtime_range: (Duration, Duration),
pub quality_range: (f64, f64),
pub reliability: f64,
pub cost_per_problem: Option<f64>,
}
#[derive(Debug)]
pub struct CrossPlatformTestSuite {
pub id: String,
pub test_cases: Vec<CrossPlatformTestCase>,
pub comparison_criteria: Vec<ComparisonCriterion>,
pub expected_differences: HashMap<String, ExpectedDifference>,
}
#[derive(Debug, Clone)]
pub struct CrossPlatformTestCase {
pub id: String,
pub problem: ProblemSpecification,
pub platform_params: HashMap<String, PlatformSpecificParams>,
pub expected_results: HashMap<String, ExpectedMetrics>,
}
#[derive(Debug, Clone)]
pub struct PlatformSpecificParams {
pub annealing_params: HashMap<String, f64>,
pub solver_settings: HashMap<String, String>,
pub resource_limits: HashMap<String, f64>,
}
#[derive(Debug, Clone)]
pub struct ComparisonCriterion {
pub id: String,
pub metric: String,
pub comparison_type: ComparisonType,
pub tolerance: f64,
pub critical: bool,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ComparisonType {
AbsoluteDifference,
RelativeDifference,
StatisticalEquivalence,
Ranking,
}
#[derive(Debug, Clone)]
pub struct ExpectedDifference {
pub platform_pair: (String, String),
pub difference_range: (f64, f64),
pub reason: String,
pub acceptable: bool,
}
#[derive(Debug)]
pub struct CompatibilityMatrix {
pub feature_compatibility: HashMap<String, HashMap<String, CompatibilityLevel>>,
pub performance_compatibility: HashMap<String, HashMap<String, f64>>,
pub known_issues: HashMap<String, Vec<CompatibilityIssue>>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CompatibilityLevel {
Full,
Partial,
Incompatible,
Unknown,
}
#[derive(Debug, Clone)]
pub struct CompatibilityIssue {
pub id: String,
pub description: String,
pub severity: IssueSeverity,
pub workaround: Option<String>,
pub affected_features: Vec<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum IssueSeverity {
Critical,
Major,
Minor,
Cosmetic,
}
impl CrossPlatformValidator {
#[must_use]
pub fn new() -> Self {
Self {
platforms: Self::create_default_platforms(),
test_suites: HashMap::new(),
compatibility_matrix: CompatibilityMatrix {
feature_compatibility: HashMap::new(),
performance_compatibility: HashMap::new(),
known_issues: HashMap::new(),
},
platform_configs: HashMap::new(),
}
}
fn create_default_platforms() -> Vec<Platform> {
vec![
Platform {
id: "classical_simulator".to_string(),
platform_type: PlatformType::Classical,
availability: PlatformAvailability::Available,
capabilities: PlatformCapabilities {
max_problem_size: 10_000,
supported_types: vec![
ProblemType::RandomIsing,
ProblemType::MaxCut,
ProblemType::VertexCover,
ProblemType::TSP,
ProblemType::Portfolio,
],
native_constraints: true,
requires_embedding: false,
},
performance: PlatformPerformance {
runtime_range: (Duration::from_millis(1), Duration::from_secs(3600)),
quality_range: (0.8, 1.0),
reliability: 0.99,
cost_per_problem: Some(0.0),
},
},
Platform {
id: "dwave_simulator".to_string(),
platform_type: PlatformType::DWave,
availability: PlatformAvailability::RequiresAuth,
capabilities: PlatformCapabilities {
max_problem_size: 5000,
supported_types: vec![ProblemType::RandomIsing, ProblemType::MaxCut],
native_constraints: false,
requires_embedding: true,
},
performance: PlatformPerformance {
runtime_range: (Duration::from_millis(20), Duration::from_secs(20)),
quality_range: (0.7, 0.95),
reliability: 0.95,
cost_per_problem: Some(0.00_037),
},
},
Platform {
id: "aws_braket".to_string(),
platform_type: PlatformType::AWSBraket,
availability: PlatformAvailability::RequiresAuth,
capabilities: PlatformCapabilities {
max_problem_size: 2000,
supported_types: vec![ProblemType::RandomIsing, ProblemType::MaxCut],
native_constraints: false,
requires_embedding: true,
},
performance: PlatformPerformance {
runtime_range: (Duration::from_secs(1), Duration::from_secs(300)),
quality_range: (0.6, 0.9),
reliability: 0.92,
cost_per_problem: Some(0.001),
},
},
]
}
pub fn add_platform(&mut self, platform: Platform) {
self.platforms.push(platform);
}
#[must_use]
pub fn get_platform(&self, platform_id: &str) -> Option<&Platform> {
self.platforms.iter().find(|p| p.id == platform_id)
}
pub fn add_test_suite(&mut self, suite: CrossPlatformTestSuite) {
self.test_suites.insert(suite.id.clone(), suite);
}
pub fn run_validation(
&self,
suite_id: &str,
) -> ApplicationResult<CrossPlatformValidationResult> {
let suite = self.test_suites.get(suite_id).ok_or_else(|| {
ApplicationError::ConfigurationError(format!("Test suite not found: {suite_id}"))
})?;
let mut platform_results = HashMap::new();
let mut comparison_results = Vec::new();
for platform in &self.platforms {
if platform.availability == PlatformAvailability::Available {
let results = self.run_suite_on_platform(suite, platform)?;
platform_results.insert(platform.id.clone(), results);
}
}
for criterion in &suite.comparison_criteria {
let comparison = self.compare_platforms(&platform_results, criterion)?;
comparison_results.push(comparison);
}
let compatibility_score = self.calculate_compatibility_score(&comparison_results);
Ok(CrossPlatformValidationResult {
suite_id: suite_id.to_string(),
platform_results,
comparison_results,
compatibility_score,
validation_time: Duration::from_secs(60), })
}
fn run_suite_on_platform(
&self,
suite: &CrossPlatformTestSuite,
platform: &Platform,
) -> ApplicationResult<PlatformTestResults> {
let mut test_results = HashMap::new();
for test_case in &suite.test_cases {
if !self.is_test_supported(test_case, platform) {
continue;
}
let result = self.run_test_case_on_platform(test_case, platform)?;
test_results.insert(test_case.id.clone(), result);
}
Ok(PlatformTestResults {
platform_id: platform.id.clone(),
test_results,
platform_info: platform.clone(),
execution_time: Duration::from_secs(30), })
}
fn is_test_supported(&self, test_case: &CrossPlatformTestCase, platform: &Platform) -> bool {
platform
.capabilities
.supported_types
.contains(&test_case.problem.problem_type)
}
fn run_test_case_on_platform(
&self,
test_case: &CrossPlatformTestCase,
platform: &Platform,
) -> ApplicationResult<TestExecutionResult> {
let base_quality = match platform.platform_type {
PlatformType::Classical => 0.95,
PlatformType::DWave => 0.85,
PlatformType::AWSBraket => 0.80,
PlatformType::FujitsuDA => 0.88,
PlatformType::Custom(_) => 0.75,
};
let problem_size = usize::midpoint(
test_case.problem.size_range.0,
test_case.problem.size_range.1,
);
let size_factor = (problem_size as f64 / 1000.0).min(1.0);
let quality = base_quality * (1.0 - size_factor * 0.2);
Ok(TestExecutionResult {
solution_quality: quality,
execution_time: Duration::from_millis((problem_size as u64).min(5000)),
final_energy: -quality * problem_size as f64,
best_solution: vec![1; problem_size],
convergence_achieved: true,
memory_used: problem_size * 8,
})
}
fn compare_platforms(
&self,
platform_results: &HashMap<String, PlatformTestResults>,
criterion: &ComparisonCriterion,
) -> ApplicationResult<ComparisonResult> {
let mut metric_values = HashMap::new();
for (platform_id, results) in platform_results {
let values: Vec<f64> = results
.test_results
.values()
.map(|result| self.extract_metric_value(result, &criterion.metric))
.collect();
if !values.is_empty() {
let mean_value = values.iter().sum::<f64>() / values.len() as f64;
metric_values.insert(platform_id.clone(), mean_value);
}
}
let mut differences = HashMap::new();
let platforms: Vec<_> = metric_values.keys().collect();
for i in 0..platforms.len() {
for j in (i + 1)..platforms.len() {
let platform1 = platforms[i];
let platform2 = platforms[j];
let value1 = metric_values[platform1];
let value2 = metric_values[platform2];
let difference = match criterion.comparison_type {
ComparisonType::AbsoluteDifference => (value1 - value2).abs(),
ComparisonType::RelativeDifference => {
((value1 - value2) / value1.max(value2)).abs()
}
_ => (value1 - value2).abs(), };
let pair_key = format!("{platform1}_{platform2}");
differences.insert(pair_key, difference);
}
}
let max_difference = differences
.values()
.fold(0.0f64, |max, &diff| max.max(diff));
let within_tolerance = max_difference <= criterion.tolerance;
Ok(ComparisonResult {
criterion_id: criterion.id.clone(),
metric: criterion.metric.clone(),
platform_values: metric_values,
differences,
max_difference,
within_tolerance,
critical: criterion.critical,
})
}
fn extract_metric_value(&self, result: &TestExecutionResult, metric: &str) -> f64 {
match metric {
"solution_quality" => result.solution_quality,
"execution_time" => result.execution_time.as_secs_f64(),
"final_energy" => result.final_energy,
"memory_used" => result.memory_used as f64,
_ => 0.0,
}
}
fn calculate_compatibility_score(&self, comparison_results: &[ComparisonResult]) -> f64 {
if comparison_results.is_empty() {
return 1.0;
}
let total_weight: f64 = comparison_results
.iter()
.map(|r| if r.critical { 2.0 } else { 1.0 })
.sum();
let weighted_score: f64 = comparison_results
.iter()
.map(|r| {
let score = if r.within_tolerance { 1.0 } else { 0.0 };
let weight = if r.critical { 2.0 } else { 1.0 };
score * weight
})
.sum();
weighted_score / total_weight
}
#[must_use]
pub fn get_compatibility(&self, platform1: &str, platform2: &str) -> CompatibilityInfo {
let feature_compat = self
.compatibility_matrix
.feature_compatibility
.get(platform1)
.and_then(|map| map.get(platform2))
.unwrap_or(&CompatibilityLevel::Unknown);
let performance_compat = self
.compatibility_matrix
.performance_compatibility
.get(platform1)
.and_then(|map| map.get(platform2))
.unwrap_or(&0.5);
let default_issues = Vec::new();
let issues = self
.compatibility_matrix
.known_issues
.get(&format!("{platform1}_{platform2}"))
.unwrap_or(&default_issues);
CompatibilityInfo {
platform_pair: (platform1.to_string(), platform2.to_string()),
feature_compatibility: feature_compat.clone(),
performance_compatibility: *performance_compat,
known_issues: issues.clone(),
}
}
}
#[derive(Debug)]
pub struct CrossPlatformValidationResult {
pub suite_id: String,
pub platform_results: HashMap<String, PlatformTestResults>,
pub comparison_results: Vec<ComparisonResult>,
pub compatibility_score: f64,
pub validation_time: Duration,
}
#[derive(Debug)]
pub struct PlatformTestResults {
pub platform_id: String,
pub test_results: HashMap<String, TestExecutionResult>,
pub platform_info: Platform,
pub execution_time: Duration,
}
#[derive(Debug)]
pub struct ComparisonResult {
pub criterion_id: String,
pub metric: String,
pub platform_values: HashMap<String, f64>,
pub differences: HashMap<String, f64>,
pub max_difference: f64,
pub within_tolerance: bool,
pub critical: bool,
}
#[derive(Debug, Clone)]
pub struct CompatibilityInfo {
pub platform_pair: (String, String),
pub feature_compatibility: CompatibilityLevel,
pub performance_compatibility: f64,
pub known_issues: Vec<CompatibilityIssue>,
}