use std::collections::{HashMap, HashSet};
use crate::composite::{BackendRole, CompositeConfig, QueryAnalyzer, QueryFeature, QueryRouter};
use crate::core::{BackendCapability, BackendKind};
use crate::types::SearchQuery;
pub struct ConfigurationAnalyzer {
query_analyzer: QueryAnalyzer,
}
impl ConfigurationAnalyzer {
pub fn new() -> Self {
Self {
query_analyzer: QueryAnalyzer::new(),
}
}
pub fn analyze(&self, config: &CompositeConfig) -> AnalysisResult {
let capability_coverage = self.analyze_capability_coverage(config);
let gap_analysis = self.analyze_gaps(config, &capability_coverage);
let redundancy_report = self.analyze_redundancy(config);
let issues = self.find_issues(config, &capability_coverage, &redundancy_report);
let recommendations = self.generate_recommendations(config, &issues, &gap_analysis);
AnalysisResult {
is_valid: issues.iter().all(|i| i.severity != IssueSeverity::Error),
capability_coverage,
gap_analysis,
redundancy_report,
issues,
recommendations,
}
}
pub fn validate(&self, config: &CompositeConfig) -> ValidationResult {
let mut errors = Vec::new();
let mut warnings = Vec::new();
let primaries: Vec<_> = config
.backends
.iter()
.filter(|b| b.role == BackendRole::Primary)
.collect();
if primaries.is_empty() {
errors.push("Configuration must have exactly one primary backend".to_string());
} else if primaries.len() > 1 {
errors.push(format!(
"Configuration has {} primary backends (expected 1)",
primaries.len()
));
}
let mut seen_ids = HashSet::new();
for backend in &config.backends {
if !seen_ids.insert(&backend.id) {
errors.push(format!("Duplicate backend ID: {}", backend.id));
}
}
let backend_ids: HashSet<_> = config.backends.iter().map(|b| &b.id).collect();
for backend in &config.backends {
if let Some(ref failover) = backend.failover_to {
if !backend_ids.contains(failover) {
errors.push(format!(
"Backend '{}' has invalid failover target: '{}'",
backend.id, failover
));
}
}
}
if let Some(cycle) = self.find_failover_cycle(config) {
errors.push(format!("Circular failover chain detected: {}", cycle));
}
if config.backends.iter().filter(|b| b.enabled).count() == 1 {
warnings.push("Only one backend enabled - no redundancy".to_string());
}
for backend in &config.backends {
if let Some(ref failover) = backend.failover_to {
if let Some(target) = config.backends.iter().find(|b| &b.id == failover) {
if !target.enabled {
warnings.push(format!(
"Failover target '{}' for backend '{}' is disabled",
failover, backend.id
));
}
}
}
}
ValidationResult {
is_valid: errors.is_empty(),
errors,
warnings,
}
}
pub fn simulate_query(&self, query: &SearchQuery, config: &CompositeConfig) -> QuerySimulation {
let analysis = self.query_analyzer.analyze(query);
let router = QueryRouter::new(config.clone());
let routing = router.route(query);
let estimated_cost = match &routing {
Ok(decision) => {
let base_cost = match decision.primary_target.as_str() {
t if config
.backends
.iter()
.find(|b| b.id == t)
.map(|b| b.kind == BackendKind::Sqlite)
.unwrap_or(false) =>
{
1.0
}
t if config
.backends
.iter()
.find(|b| b.id == t)
.map(|b| b.kind == BackendKind::Elasticsearch)
.unwrap_or(false) =>
{
2.0
}
_ => 1.5,
};
base_cost * (1.0 + analysis.complexity_score as f64 * 0.1)
}
Err(_) => 10.0, };
QuerySimulation {
query_features: analysis.features.iter().cloned().collect(),
complexity_score: analysis.complexity_score,
routing_decision: routing.as_ref().map(|d| d.primary_target.clone()).ok(),
auxiliary_targets: routing
.as_ref()
.map(|d| d.auxiliary_targets.values().cloned().collect())
.unwrap_or_default(),
estimated_cost,
routing_error: routing.as_ref().err().map(|e| format!("{:?}", e)),
}
}
fn analyze_capability_coverage(&self, config: &CompositeConfig) -> CapabilityCoverage {
let mut covered_capabilities = HashSet::new();
let mut capability_backends: HashMap<BackendCapability, Vec<String>> = HashMap::new();
for backend in &config.backends {
if !backend.enabled {
continue;
}
let capabilities = backend.effective_capabilities();
for cap in capabilities {
covered_capabilities.insert(cap);
capability_backends
.entry(cap)
.or_default()
.push(backend.id.clone());
}
}
let all_capabilities: HashSet<_> = [
BackendCapability::Crud,
BackendCapability::Versioning,
BackendCapability::BasicSearch,
BackendCapability::InstanceHistory,
BackendCapability::TypeHistory,
BackendCapability::Transactions,
BackendCapability::ChainedSearch,
BackendCapability::FullTextSearch,
BackendCapability::TerminologySearch,
BackendCapability::Include,
BackendCapability::Revinclude,
]
.into_iter()
.collect();
let missing_capabilities: HashSet<_> = all_capabilities
.difference(&covered_capabilities)
.cloned()
.collect();
CapabilityCoverage {
covered: covered_capabilities,
missing: missing_capabilities,
capability_backends,
coverage_percentage: 0.0, }
}
fn analyze_gaps(
&self,
_config: &CompositeConfig,
coverage: &CapabilityCoverage,
) -> GapAnalysis {
let mut feature_gaps = Vec::new();
let critical = [BackendCapability::Crud, BackendCapability::BasicSearch];
for cap in critical {
if coverage.missing.contains(&cap) {
feature_gaps.push(FeatureGap {
capability: cap,
impact: GapImpact::High,
suggestion: format!("Add a backend that supports {:?}", cap),
});
}
}
let advanced = [
BackendCapability::ChainedSearch,
BackendCapability::FullTextSearch,
BackendCapability::TerminologySearch,
];
for cap in advanced {
if coverage.missing.contains(&cap) {
feature_gaps.push(FeatureGap {
capability: cap,
impact: GapImpact::Medium,
suggestion: format!("Consider adding a specialized backend for {:?}", cap),
});
}
}
let total_features = critical.len() + advanced.len();
let covered_features = total_features - feature_gaps.len();
let completeness = covered_features as f64 / total_features as f64;
GapAnalysis {
feature_gaps,
completeness_score: completeness,
recommendations: Vec::new(), }
}
fn analyze_redundancy(&self, config: &CompositeConfig) -> RedundancyReport {
let mut overlapping_capabilities: HashMap<BackendCapability, Vec<String>> = HashMap::new();
let mut redundant_backends = Vec::new();
for backend in &config.backends {
if !backend.enabled {
continue;
}
let capabilities = backend.effective_capabilities();
for cap in capabilities {
overlapping_capabilities
.entry(cap)
.or_default()
.push(backend.id.clone());
}
}
overlapping_capabilities.retain(|_, v| v.len() > 1);
for backend in &config.backends {
if !backend.enabled || backend.role == BackendRole::Primary {
continue;
}
let capabilities = backend.effective_capabilities();
let all_covered = capabilities.iter().all(|cap| {
overlapping_capabilities
.get(cap)
.map(|backends| backends.iter().any(|b| b != &backend.id))
.unwrap_or(false)
});
if all_covered && !capabilities.is_empty() {
redundant_backends.push(RedundantBackend {
backend_id: backend.id.clone(),
covered_by: overlapping_capabilities
.values()
.flatten()
.filter(|b| *b != &backend.id)
.cloned()
.collect::<HashSet<_>>()
.into_iter()
.collect(),
reason: "All capabilities provided by other backends".to_string(),
});
}
}
let total_capability_assignments: usize =
overlapping_capabilities.values().map(|v| v.len()).sum();
let unique_capabilities = overlapping_capabilities.len();
let redundancy_score = if unique_capabilities > 0 {
(total_capability_assignments - unique_capabilities) as f64
/ total_capability_assignments as f64
} else {
0.0
};
RedundancyReport {
overlapping_capabilities,
redundant_backends,
redundancy_score,
}
}
fn find_issues(
&self,
config: &CompositeConfig,
coverage: &CapabilityCoverage,
redundancy: &RedundancyReport,
) -> Vec<ConfigurationIssue> {
let mut issues = Vec::new();
let primary_count = config
.backends
.iter()
.filter(|b| b.role == BackendRole::Primary)
.count();
if primary_count == 0 {
issues.push(ConfigurationIssue {
severity: IssueSeverity::Error,
category: IssueCategory::MissingRequirement,
message: "No primary backend configured".to_string(),
suggestion: Some("Add a backend with role Primary".to_string()),
});
} else if primary_count > 1 {
issues.push(ConfigurationIssue {
severity: IssueSeverity::Error,
category: IssueCategory::Configuration,
message: "Multiple primary backends configured".to_string(),
suggestion: Some("Only one backend should have role Primary".to_string()),
});
}
for cap in &coverage.missing {
let severity = match cap {
BackendCapability::Crud => IssueSeverity::Error,
BackendCapability::BasicSearch => IssueSeverity::Warning,
_ => IssueSeverity::Info,
};
issues.push(ConfigurationIssue {
severity,
category: IssueCategory::MissingCapability,
message: format!("Missing capability: {:?}", cap),
suggestion: Some(format!("Add a backend that supports {:?}", cap)),
});
}
if redundancy.redundancy_score > 0.5 {
issues.push(ConfigurationIssue {
severity: IssueSeverity::Warning,
category: IssueCategory::Redundancy,
message: format!(
"High redundancy detected ({:.0}%)",
redundancy.redundancy_score * 100.0
),
suggestion: Some("Consider consolidating backends".to_string()),
});
}
issues
}
fn generate_recommendations(
&self,
_config: &CompositeConfig,
issues: &[ConfigurationIssue],
gap_analysis: &GapAnalysis,
) -> Vec<Recommendation> {
let mut recommendations = Vec::new();
for issue in issues {
if let Some(ref suggestion) = issue.suggestion {
let priority = match issue.severity {
IssueSeverity::Error => RecommendationPriority::Critical,
IssueSeverity::Warning => RecommendationPriority::High,
IssueSeverity::Info => RecommendationPriority::Medium,
};
recommendations.push(Recommendation {
priority,
title: format!("Fix: {}", issue.message),
description: suggestion.clone(),
impact: format!("Resolves {:?} issue", issue.category),
});
}
}
for gap in &gap_analysis.feature_gaps {
recommendations.push(Recommendation {
priority: match gap.impact {
GapImpact::High => RecommendationPriority::High,
GapImpact::Medium => RecommendationPriority::Medium,
GapImpact::Low => RecommendationPriority::Low,
},
title: format!("Add support for {:?}", gap.capability),
description: gap.suggestion.clone(),
impact: format!("Enables {:?} operations", gap.capability),
});
}
recommendations
}
fn find_failover_cycle(&self, config: &CompositeConfig) -> Option<String> {
for backend in &config.backends {
let mut visited = HashSet::new();
let mut current = backend.id.clone();
while let Some(next) = config
.backends
.iter()
.find(|b| b.id == current)
.and_then(|b| b.failover_to.clone())
{
if !visited.insert(current.clone()) {
return Some(format!("{} -> {}", current, next));
}
current = next;
}
}
None
}
}
impl Default for ConfigurationAnalyzer {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct AnalysisResult {
pub is_valid: bool,
pub capability_coverage: CapabilityCoverage,
pub gap_analysis: GapAnalysis,
pub redundancy_report: RedundancyReport,
pub issues: Vec<ConfigurationIssue>,
pub recommendations: Vec<Recommendation>,
}
#[derive(Debug, Clone)]
pub struct ValidationResult {
pub is_valid: bool,
pub errors: Vec<String>,
pub warnings: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct CapabilityCoverage {
pub covered: HashSet<BackendCapability>,
pub missing: HashSet<BackendCapability>,
pub capability_backends: HashMap<BackendCapability, Vec<String>>,
pub coverage_percentage: f64,
}
#[derive(Debug, Clone)]
pub struct GapAnalysis {
pub feature_gaps: Vec<FeatureGap>,
pub completeness_score: f64,
pub recommendations: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct FeatureGap {
pub capability: BackendCapability,
pub impact: GapImpact,
pub suggestion: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum GapImpact {
High,
Medium,
Low,
}
#[derive(Debug, Clone)]
pub struct RedundancyReport {
pub overlapping_capabilities: HashMap<BackendCapability, Vec<String>>,
pub redundant_backends: Vec<RedundantBackend>,
pub redundancy_score: f64,
}
#[derive(Debug, Clone)]
pub struct RedundantBackend {
pub backend_id: String,
pub covered_by: Vec<String>,
pub reason: String,
}
#[derive(Debug, Clone)]
pub struct ConfigurationIssue {
pub severity: IssueSeverity,
pub category: IssueCategory,
pub message: String,
pub suggestion: Option<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IssueSeverity {
Error,
Warning,
Info,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IssueCategory {
MissingRequirement,
MissingCapability,
Configuration,
Redundancy,
}
#[derive(Debug, Clone)]
pub struct Recommendation {
pub priority: RecommendationPriority,
pub title: String,
pub description: String,
pub impact: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum RecommendationPriority {
Critical,
High,
Medium,
Low,
}
#[derive(Debug, Clone)]
pub struct QuerySimulation {
pub query_features: Vec<QueryFeature>,
pub complexity_score: u8,
pub routing_decision: Option<String>,
pub auxiliary_targets: Vec<String>,
pub estimated_cost: f64,
pub routing_error: Option<String>,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::composite::{BackendEntry, CompositeConfigBuilder};
#[test]
fn test_analyzer_creation() {
let analyzer = ConfigurationAnalyzer::new();
let _analysis = analyzer
.query_analyzer
.analyze(&SearchQuery::new("Patient"));
}
#[test]
fn test_validation_no_primary() {
let analyzer = ConfigurationAnalyzer::new();
let config = CompositeConfig {
backends: vec![BackendEntry::new(
"secondary",
BackendRole::Search,
BackendKind::Elasticsearch,
)],
..Default::default()
};
let result = analyzer.validate(&config);
assert!(!result.is_valid);
assert!(result.errors.iter().any(|e| e.contains("primary")));
}
#[test]
fn test_validation_valid_config() {
let analyzer = ConfigurationAnalyzer::new();
let config = CompositeConfigBuilder::new()
.primary("sqlite", BackendKind::Sqlite)
.build()
.unwrap();
let result = analyzer.validate(&config);
assert!(result.is_valid);
}
#[test]
fn test_analysis_capability_coverage() {
let analyzer = ConfigurationAnalyzer::new();
let config = CompositeConfigBuilder::new()
.primary("sqlite", BackendKind::Sqlite)
.search_backend("es", BackendKind::Elasticsearch)
.build()
.unwrap();
let result = analyzer.analyze(&config);
assert!(!result.capability_coverage.covered.is_empty());
}
#[test]
fn test_query_simulation() {
let analyzer = ConfigurationAnalyzer::new();
let config = CompositeConfigBuilder::new()
.primary("sqlite", BackendKind::Sqlite)
.build()
.unwrap();
let query = SearchQuery::new("Patient");
let simulation = analyzer.simulate_query(&query, &config);
assert!(simulation.estimated_cost > 0.0);
}
}