1use async_trait::async_trait;
41use serde::{Deserialize, Serialize};
42use std::collections::{HashMap, HashSet};
43use std::sync::Arc;
44use std::time::Duration;
45use tokio::sync::RwLock;
46
47#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
53pub enum PluginPolicy {
54 AlwaysOn,
56 OnDemand,
58 LazyPersistent,
60 WorkflowScoped,
62 Suspendable,
64}
65
66#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
68pub enum PluginState {
69 Unloaded,
71 Loading,
73 Active,
75 Suspended,
77 Error,
79}
80
81#[derive(Debug, Clone, Serialize, Deserialize)]
83pub struct PluginLifecycle {
84 pub name: String,
86 pub policy: PluginPolicy,
88 pub state: PluginState,
90 pub capabilities: Vec<Capability>,
92 pub last_used: Option<chrono::DateTime<chrono::Utc>>,
94 pub memory_mb: f64,
96 pub initialized: bool,
98 pub error: Option<String>,
100}
101
102impl PluginLifecycle {
103 pub fn always_on(name: &str, capabilities: Vec<Capability>) -> Self {
105 Self {
106 name: name.to_string(),
107 policy: PluginPolicy::AlwaysOn,
108 state: PluginState::Active,
109 capabilities,
110 last_used: Some(chrono::Utc::now()),
111 memory_mb: 0.0,
112 initialized: true,
113 error: None,
114 }
115 }
116
117 pub fn on_demand(name: &str, capabilities: Vec<Capability>, memory_mb: f64) -> Self {
119 Self {
120 name: name.to_string(),
121 policy: PluginPolicy::OnDemand,
122 state: PluginState::Unloaded,
123 capabilities,
124 last_used: None,
125 memory_mb,
126 initialized: false,
127 error: None,
128 }
129 }
130
131 pub fn can_unload(&self) -> bool {
133 self.policy != PluginPolicy::AlwaysOn && self.state == PluginState::Active
134 }
135
136 pub fn can_suspend(&self) -> bool {
138 matches!(
139 self.policy,
140 PluginPolicy::Suspendable | PluginPolicy::OnDemand
141 ) && self.state == PluginState::Active
142 }
143}
144
145pub struct AlwaysOnPlugins;
147
148impl AlwaysOnPlugins {
149 pub fn required() -> Vec<&'static str> {
151 vec![
152 "bootstrap", "judgment", "explainability", ]
156 }
157
158 pub fn required_capabilities() -> Vec<Capability> {
160 vec![Capability::PIIDetection, Capability::TamperEvidentAudit]
161 }
162
163 pub fn is_always_on(plugin_name: &str) -> bool {
165 Self::required().contains(&plugin_name)
166 }
167
168 pub fn is_required_capability(cap: Capability) -> bool {
170 Self::required_capabilities().contains(&cap)
171 }
172}
173
174#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
176pub enum IntentCategory {
177 Conversation,
179 MLInference,
181 MLTraining,
183 WorkflowOrchestration,
185 ModelDeployment,
187 HardwareOptimization,
189 AdaptiveLearning,
191 KnowledgeGraph,
193}
194
195impl IntentCategory {
196 pub fn required_capabilities(&self) -> Vec<Capability> {
198 match self {
199 Self::Conversation => vec![],
200 Self::MLInference => vec![Capability::MLInference],
201 Self::MLTraining => vec![
202 Capability::MLTraining,
203 Capability::WorkflowEngine, ],
205 Self::WorkflowOrchestration => {
206 vec![Capability::WorkflowEngine, Capability::TaskScheduling]
207 }
208 Self::ModelDeployment => vec![
209 Capability::CascadeInference,
210 Capability::TensorFlowBackend, ],
212 Self::HardwareOptimization => vec![Capability::HardwareOptimization],
213 Self::AdaptiveLearning => vec![Capability::AdaptiveLearning, Capability::HumanFeedback],
214 Self::KnowledgeGraph => vec![Capability::SourceAttribution],
215 }
216 }
217
218 pub fn suggested_plugins(&self) -> Vec<&'static str> {
220 match self {
221 Self::Conversation => vec![],
222 Self::MLInference => vec!["ml", "local-llm"],
223 Self::MLTraining => vec!["ml", "pytorch", "workflow"],
224 Self::WorkflowOrchestration => vec!["workflow"],
225 Self::ModelDeployment => vec!["tensorflow", "production"],
226 Self::HardwareOptimization => vec!["hardware"],
227 Self::AdaptiveLearning => vec!["adaptive"],
228 Self::KnowledgeGraph => vec!["knowledge"],
229 }
230 }
231}
232
233pub struct IntentDetector;
235
236impl IntentDetector {
237 pub fn detect(text: &str) -> Vec<IntentCategory> {
239 let text_lower = text.to_lowercase();
240 let mut intents = Vec::new();
241
242 if text_lower.contains("predict")
244 || text_lower.contains("classify")
245 || text_lower.contains("inference")
246 || text_lower.contains("run model")
247 || text_lower.contains("analyze with")
248 {
249 intents.push(IntentCategory::MLInference);
250 }
251
252 if text_lower.contains("train")
254 || text_lower.contains("fine-tune")
255 || text_lower.contains("fit model")
256 || text_lower.contains("learning rate")
257 || text_lower.contains("epoch")
258 {
259 intents.push(IntentCategory::MLTraining);
260 }
261
262 if text_lower.contains("workflow")
264 || text_lower.contains("pipeline")
265 || text_lower.contains("orchestrat")
266 || text_lower.contains("schedule")
267 || text_lower.contains("automat")
268 {
269 intents.push(IntentCategory::WorkflowOrchestration);
270 }
271
272 if text_lower.contains("deploy")
274 || text_lower.contains("production")
275 || text_lower.contains("serve model")
276 || text_lower.contains("scale")
277 {
278 intents.push(IntentCategory::ModelDeployment);
279 }
280
281 if text_lower.contains("gpu")
283 || text_lower.contains("hardware")
284 || text_lower.contains("optimize")
285 || text_lower.contains("performance")
286 {
287 intents.push(IntentCategory::HardwareOptimization);
288 }
289
290 if text_lower.contains("feedback")
292 || text_lower.contains("improve")
293 || text_lower.contains("learn from")
294 || text_lower.contains("adapt")
295 {
296 intents.push(IntentCategory::AdaptiveLearning);
297 }
298
299 if intents.is_empty() {
301 intents.push(IntentCategory::Conversation);
302 }
303
304 intents
305 }
306
307 pub fn capabilities_for_intents(intents: &[IntentCategory]) -> HashSet<Capability> {
309 intents
310 .iter()
311 .flat_map(|i| i.required_capabilities())
312 .collect()
313 }
314
315 pub fn plugins_for_intents(intents: &[IntentCategory]) -> HashSet<&'static str> {
317 intents.iter().flat_map(|i| i.suggested_plugins()).collect()
318 }
319}
320
321#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
323pub enum Capability {
324 MLCore,
327 MLInference,
329 MLTraining,
331 MLCompression,
333 MLHyperparameterOptimization,
335
336 PyTorchBackend,
339 TensorFlowBackend,
341 TFLiteEdge,
343 DistributedTraining,
345
346 WorkflowEngine,
349 TaskScheduling,
351 PipelineOrchestration,
353
354 CascadeInference,
357 MemoryManagement,
359 ComplianceAutomation,
361
362 AdaptiveLearning,
365 HumanFeedback,
367 KnowledgeDistillation,
369
370 ReasoningChains,
373 SourceAttribution,
375 TamperEvidentAudit,
377
378 HardwareOptimization,
381 LocalVectorDB,
383 LocalLLMInference,
385
386 PIIDetection,
389 HIPAACompliance,
391}
392
393#[derive(Debug, Default)]
395pub struct CapabilityRegistry {
396 capabilities: HashSet<Capability>,
397 plugin_capabilities: HashMap<String, Vec<Capability>>,
398}
399
400impl CapabilityRegistry {
401 pub fn new() -> Self {
403 Self::default()
404 }
405
406 pub fn register(&mut self, capability: Capability) {
408 self.capabilities.insert(capability);
409 }
410
411 pub fn register_from_plugin(&mut self, plugin_name: &str, capability: Capability) {
413 self.capabilities.insert(capability);
414 self.plugin_capabilities
415 .entry(plugin_name.to_string())
416 .or_default()
417 .push(capability);
418 }
419
420 pub fn has(&self, capability: Capability) -> bool {
422 self.capabilities.contains(&capability)
423 }
424
425 pub fn has_all(&self, capabilities: &[Capability]) -> bool {
427 capabilities.iter().all(|c| self.capabilities.contains(c))
428 }
429
430 pub fn has_any(&self, capabilities: &[Capability]) -> bool {
432 capabilities.iter().any(|c| self.capabilities.contains(c))
433 }
434
435 pub fn all(&self) -> &HashSet<Capability> {
437 &self.capabilities
438 }
439
440 pub fn get_plugin_capabilities(&self, plugin_name: &str) -> Option<&Vec<Capability>> {
442 self.plugin_capabilities.get(plugin_name)
443 }
444
445 pub fn unregister_plugin(&mut self, plugin_name: &str) {
447 if let Some(caps) = self.plugin_capabilities.remove(plugin_name) {
448 for cap in caps {
449 let still_provided = self.plugin_capabilities.values().any(|v| v.contains(&cap));
451 if !still_provided {
452 self.capabilities.remove(&cap);
453 }
454 }
455 }
456 }
457}
458
459#[async_trait]
461pub trait MLCapable: Send + Sync {
462 async fn infer(
464 &self,
465 model_name: &str,
466 input: serde_json::Value,
467 ) -> Result<serde_json::Value, String>;
468
469 async fn list_models(&self) -> Vec<String>;
471
472 async fn model_status(&self, model_name: &str) -> Option<ModelStatus>;
474}
475
476#[async_trait]
478pub trait WorkflowCapable: Send + Sync {
479 async fn create_workflow(&self, config: WorkflowConfig) -> Result<String, String>;
481
482 async fn execute_workflow(&self, workflow_id: &str) -> Result<WorkflowResult, String>;
484
485 async fn workflow_status(&self, workflow_id: &str) -> Option<WorkflowStatus>;
487}
488
489#[async_trait]
491pub trait HardwareCapable: Send + Sync {
492 async fn get_hardware_info(&self) -> HardwareInfo;
494
495 async fn get_recommendations(&self) -> Vec<OptimizationRecommendation>;
497}
498
499#[async_trait]
501pub trait ExplainabilityCapable: Send + Sync {
502 async fn explain(&self, decision_id: &str) -> Option<Explanation>;
504
505 async fn record_for_audit(&self, context: AuditContext) -> Result<String, String>;
507}
508
509#[async_trait]
511pub trait AdaptiveCapable: Send + Sync {
512 async fn add_example(&self, example: TrainingExample) -> Result<(), String>;
514
515 async fn maybe_finetune(&self) -> Option<FinetuneResult>;
517}
518
519#[derive(Debug, Clone, Serialize, Deserialize)]
521pub struct ModelStatus {
522 pub name: String,
524 pub state: String,
526 pub framework: String,
528 pub version: String,
530 pub metrics: Option<serde_json::Value>,
532}
533
534#[derive(Debug, Clone, Serialize, Deserialize)]
536pub struct WorkflowConfig {
537 pub name: String,
539 pub tasks: Vec<TaskConfig>,
541}
542
543#[derive(Debug, Clone, Serialize, Deserialize)]
545pub struct TaskConfig {
546 pub name: String,
548 pub task_type: String,
550 pub depends_on: Vec<String>,
552 pub params: serde_json::Value,
554}
555
556#[derive(Debug, Clone, Serialize, Deserialize)]
558pub struct WorkflowResult {
559 pub workflow_id: String,
561 pub status: String,
563 pub task_results: HashMap<String, serde_json::Value>,
565 pub execution_time_ms: u64,
567}
568
569#[derive(Debug, Clone, Serialize, Deserialize)]
571pub struct WorkflowStatus {
572 pub id: String,
574 pub status: String,
576 pub progress: f64,
578 pub current_task: Option<String>,
580}
581
582#[derive(Debug, Clone, Serialize, Deserialize)]
584pub struct HardwareInfo {
585 pub cpu: CpuInfo,
587 pub memory: MemoryInfo,
589 pub gpu: Option<GpuInfo>,
591}
592
593#[derive(Debug, Clone, Serialize, Deserialize)]
595pub struct CpuInfo {
596 pub cores: usize,
598 pub threads: usize,
600 pub architecture: String,
602}
603
604#[derive(Debug, Clone, Serialize, Deserialize)]
606pub struct MemoryInfo {
607 pub total_gb: f64,
609 pub available_gb: f64,
611}
612
613#[derive(Debug, Clone, Serialize, Deserialize)]
615pub struct GpuInfo {
616 pub name: String,
618 pub vram_gb: f64,
620 pub compute: String,
622}
623
624#[derive(Debug, Clone, Serialize, Deserialize)]
626pub struct OptimizationRecommendation {
627 pub recommendation_type: String,
629 pub description: String,
631 pub impact: String,
633 pub action: String,
635}
636
637#[derive(Debug, Clone, Serialize, Deserialize)]
639pub struct Explanation {
640 pub decision_id: String,
642 pub reasoning_steps: Vec<String>,
644 pub confidence: f64,
646 pub sources: Vec<String>,
648 pub alternatives: Vec<AlternativeOption>,
650}
651
652#[derive(Debug, Clone, Serialize, Deserialize)]
654pub struct AlternativeOption {
655 pub description: String,
657 pub rejection_reason: String,
659 pub hypothetical_confidence: f64,
661}
662
663#[derive(Debug, Clone, Serialize, Deserialize)]
665pub struct AuditContext {
666 pub decision_type: String,
668 pub input_hash: String,
670 pub output_hash: String,
672 pub agent_id: String,
674 pub metadata: serde_json::Value,
676}
677
678#[derive(Debug, Clone, Serialize, Deserialize)]
680pub struct TrainingExample {
681 pub input: String,
683 pub output: String,
685 pub quality: f64,
687 pub source: String,
689}
690
691#[derive(Debug, Clone, Serialize, Deserialize)]
693pub struct FinetuneResult {
694 pub adapter_name: String,
696 pub examples_used: usize,
698 pub final_loss: f64,
700 pub validation_accuracy: Option<f64>,
702}
703
704pub struct IntegrationBridge {
706 registry: Arc<RwLock<CapabilityRegistry>>,
707 ml_providers: Arc<RwLock<Vec<Arc<dyn MLCapable>>>>,
708 workflow_providers: Arc<RwLock<Vec<Arc<dyn WorkflowCapable>>>>,
709 hardware_providers: Arc<RwLock<Vec<Arc<dyn HardwareCapable>>>>,
710 explainability_providers: Arc<RwLock<Vec<Arc<dyn ExplainabilityCapable>>>>,
711 adaptive_providers: Arc<RwLock<Vec<Arc<dyn AdaptiveCapable>>>>,
712}
713
714impl Default for IntegrationBridge {
715 fn default() -> Self {
716 Self::new()
717 }
718}
719
720impl IntegrationBridge {
721 pub fn new() -> Self {
723 Self {
724 registry: Arc::new(RwLock::new(CapabilityRegistry::new())),
725 ml_providers: Arc::new(RwLock::new(Vec::new())),
726 workflow_providers: Arc::new(RwLock::new(Vec::new())),
727 hardware_providers: Arc::new(RwLock::new(Vec::new())),
728 explainability_providers: Arc::new(RwLock::new(Vec::new())),
729 adaptive_providers: Arc::new(RwLock::new(Vec::new())),
730 }
731 }
732
733 pub fn registry(&self) -> Arc<RwLock<CapabilityRegistry>> {
735 self.registry.clone()
736 }
737
738 pub async fn register_ml_provider(&self, provider: Arc<dyn MLCapable>) {
740 self.ml_providers.write().await.push(provider);
741 self.registry.write().await.register(Capability::MLCore);
742 self.registry
743 .write()
744 .await
745 .register(Capability::MLInference);
746 }
747
748 pub async fn register_workflow_provider(&self, provider: Arc<dyn WorkflowCapable>) {
750 self.workflow_providers.write().await.push(provider);
751 self.registry
752 .write()
753 .await
754 .register(Capability::WorkflowEngine);
755 }
756
757 pub async fn register_hardware_provider(&self, provider: Arc<dyn HardwareCapable>) {
759 self.hardware_providers.write().await.push(provider);
760 self.registry
761 .write()
762 .await
763 .register(Capability::HardwareOptimization);
764 }
765
766 pub async fn register_explainability_provider(&self, provider: Arc<dyn ExplainabilityCapable>) {
768 self.explainability_providers.write().await.push(provider);
769 self.registry
770 .write()
771 .await
772 .register(Capability::ReasoningChains);
773 self.registry
774 .write()
775 .await
776 .register(Capability::TamperEvidentAudit);
777 }
778
779 pub async fn register_adaptive_provider(&self, provider: Arc<dyn AdaptiveCapable>) {
781 self.adaptive_providers.write().await.push(provider);
782 self.registry
783 .write()
784 .await
785 .register(Capability::AdaptiveLearning);
786 }
787
788 pub async fn try_infer(
792 &self,
793 model_name: &str,
794 input: serde_json::Value,
795 ) -> Option<serde_json::Value> {
796 let providers = self.ml_providers.read().await;
797 if let Some(provider) = providers.first() {
798 provider.infer(model_name, input).await.ok()
799 } else {
800 tracing::debug!("ML inference not available - no ML provider registered");
801 None
802 }
803 }
804
805 pub async fn try_run_workflow(&self, config: WorkflowConfig) -> Option<WorkflowResult> {
807 let providers = self.workflow_providers.read().await;
808 if let Some(provider) = providers.first() {
809 if let Ok(workflow_id) = provider.create_workflow(config).await {
810 return provider.execute_workflow(&workflow_id).await.ok();
811 }
812 } else {
813 tracing::debug!("Workflow not available - no workflow provider registered");
814 }
815 None
816 }
817
818 pub async fn try_get_hardware_recommendations(&self) -> Vec<OptimizationRecommendation> {
820 let providers = self.hardware_providers.read().await;
821 if let Some(provider) = providers.first() {
822 provider.get_recommendations().await
823 } else {
824 tracing::debug!("Hardware optimization not available");
825 Vec::new()
826 }
827 }
828
829 pub async fn try_record_audit(&self, context: AuditContext) -> Option<String> {
831 let providers = self.explainability_providers.read().await;
832 if let Some(provider) = providers.first() {
833 provider.record_for_audit(context).await.ok()
834 } else {
835 tracing::debug!("Audit recording not available - no explainability provider");
836 None
837 }
838 }
839
840 pub async fn try_add_training_example(&self, example: TrainingExample) -> bool {
842 let providers = self.adaptive_providers.read().await;
843 if let Some(provider) = providers.first() {
844 provider.add_example(example).await.is_ok()
845 } else {
846 tracing::debug!("Adaptive learning not available");
847 false
848 }
849 }
850
851 pub async fn integration_summary(&self) -> IntegrationSummary {
853 let registry = self.registry.read().await;
854 let capabilities = registry.all().clone();
855
856 IntegrationSummary {
857 ml_available: registry.has(Capability::MLCore),
858 workflow_available: registry.has(Capability::WorkflowEngine),
859 hardware_optimization: registry.has(Capability::HardwareOptimization),
860 explainability_available: registry
861 .has_any(&[Capability::ReasoningChains, Capability::TamperEvidentAudit]),
862 adaptive_learning_available: registry.has(Capability::AdaptiveLearning),
863 total_capabilities: capabilities.len(),
864 capabilities: capabilities.into_iter().collect(),
865 }
866 }
867}
868
869#[derive(Debug, Clone, Serialize, Deserialize)]
871pub struct IntegrationSummary {
872 pub ml_available: bool,
874 pub workflow_available: bool,
876 pub hardware_optimization: bool,
878 pub explainability_available: bool,
880 pub adaptive_learning_available: bool,
882 pub total_capabilities: usize,
884 pub capabilities: Vec<Capability>,
886}
887
888pub struct DynamicPluginManager {
894 lifecycles: Arc<RwLock<HashMap<String, PluginLifecycle>>>,
896 active_by_policy: Arc<RwLock<HashMap<PluginPolicy, usize>>>,
898 memory_budget_mb: f64,
900 current_memory_mb: Arc<RwLock<f64>>,
902 bridge: Arc<IntegrationBridge>,
904 idle_timeout: Duration,
906}
907
908impl DynamicPluginManager {
909 pub fn new(bridge: Arc<IntegrationBridge>, memory_budget_mb: f64) -> Self {
911 Self {
912 lifecycles: Arc::new(RwLock::new(HashMap::new())),
913 active_by_policy: Arc::new(RwLock::new(HashMap::new())),
914 memory_budget_mb,
915 current_memory_mb: Arc::new(RwLock::new(0.0)),
916 bridge,
917 idle_timeout: Duration::from_secs(300), }
919 }
920
921 pub async fn register_always_on(&self, name: &str, capabilities: Vec<Capability>) {
923 let lifecycle = PluginLifecycle::always_on(name, capabilities.clone());
924
925 for cap in &capabilities {
927 self.bridge
928 .registry()
929 .write()
930 .await
931 .register_from_plugin(name, *cap);
932 }
933
934 self.lifecycles
935 .write()
936 .await
937 .insert(name.to_string(), lifecycle);
938
939 let mut counts = self.active_by_policy.write().await;
940 *counts.entry(PluginPolicy::AlwaysOn).or_insert(0) += 1;
941
942 tracing::info!(
943 "Registered always-on plugin: {} with {:?}",
944 name,
945 capabilities
946 );
947 }
948
949 pub async fn register_on_demand(
951 &self,
952 name: &str,
953 capabilities: Vec<Capability>,
954 memory_mb: f64,
955 ) {
956 let lifecycle = PluginLifecycle::on_demand(name, capabilities.clone(), memory_mb);
957 self.lifecycles
958 .write()
959 .await
960 .insert(name.to_string(), lifecycle);
961
962 tracing::info!(
963 "Registered on-demand plugin: {} ({:.1}MB) with {:?}",
964 name,
965 memory_mb,
966 capabilities
967 );
968 }
969
970 pub async fn ensure_capabilities(
972 &self,
973 required: &[Capability],
974 ) -> Result<Vec<String>, String> {
975 let mut loaded_plugins = Vec::new();
976
977 for cap in required {
978 if self.bridge.registry().read().await.has(*cap) {
980 continue;
981 }
982
983 let lifecycles = self.lifecycles.read().await;
985 let provider = lifecycles
986 .values()
987 .find(|l| l.capabilities.contains(cap) && l.state != PluginState::Active);
988
989 if let Some(lifecycle) = provider {
990 let plugin_name = lifecycle.name.clone();
991 drop(lifecycles);
992
993 self.load_plugin(&plugin_name).await?;
995 loaded_plugins.push(plugin_name);
996 }
997 }
998
999 Ok(loaded_plugins)
1000 }
1001
1002 pub async fn load_plugin(&self, name: &str) -> Result<(), String> {
1004 let (already_active, memory_needed) = {
1006 let mut lifecycles = self.lifecycles.write().await;
1007 let lifecycle = lifecycles
1008 .get_mut(name)
1009 .ok_or_else(|| format!("Plugin not registered: {}", name))?;
1010
1011 if lifecycle.state == PluginState::Active {
1012 lifecycle.last_used = Some(chrono::Utc::now());
1013 return Ok(());
1014 }
1015
1016 (lifecycle.state == PluginState::Active, lifecycle.memory_mb)
1017 };
1018
1019 if already_active {
1020 return Ok(());
1021 }
1022
1023 let current = *self.current_memory_mb.read().await;
1025 if current + memory_needed > self.memory_budget_mb {
1026 self.free_memory(memory_needed).await?;
1027 }
1028
1029 let mut lifecycles = self.lifecycles.write().await;
1031 let lifecycle = lifecycles.get_mut(name).unwrap();
1032 lifecycle.state = PluginState::Loading;
1033 let capabilities = lifecycle.capabilities.clone();
1034 let memory = lifecycle.memory_mb;
1035 let policy = lifecycle.policy;
1036
1037 lifecycle.state = PluginState::Active;
1039 lifecycle.initialized = true;
1040 lifecycle.last_used = Some(chrono::Utc::now());
1041 drop(lifecycles);
1042
1043 for cap in &capabilities {
1045 self.bridge
1046 .registry()
1047 .write()
1048 .await
1049 .register_from_plugin(name, *cap);
1050 }
1051
1052 *self.current_memory_mb.write().await += memory;
1054
1055 let mut counts = self.active_by_policy.write().await;
1056 *counts.entry(policy).or_insert(0) += 1;
1057
1058 tracing::info!("Loaded plugin: {} ({:.1}MB)", name, memory);
1059 Ok(())
1060 }
1061
1062 pub async fn unload_plugin(&self, name: &str) -> Result<(), String> {
1064 let mut lifecycles = self.lifecycles.write().await;
1065 let lifecycle = lifecycles
1066 .get_mut(name)
1067 .ok_or_else(|| format!("Plugin not registered: {}", name))?;
1068
1069 if lifecycle.policy == PluginPolicy::AlwaysOn {
1071 return Err(format!("Cannot unload always-on plugin: {}", name));
1072 }
1073
1074 if lifecycle.state != PluginState::Active {
1075 return Ok(());
1076 }
1077
1078 let memory = lifecycle.memory_mb;
1079 lifecycle.state = PluginState::Unloaded;
1080
1081 self.bridge.registry().write().await.unregister_plugin(name);
1083
1084 *self.current_memory_mb.write().await -= memory;
1086
1087 let mut counts = self.active_by_policy.write().await;
1088 if let Some(count) = counts.get_mut(&lifecycle.policy) {
1089 *count = count.saturating_sub(1);
1090 }
1091
1092 tracing::info!("Unloaded plugin: {} (freed {:.1}MB)", name, memory);
1093 Ok(())
1094 }
1095
1096 pub async fn suspend_plugin(&self, name: &str) -> Result<(), String> {
1098 let mut lifecycles = self.lifecycles.write().await;
1099 let lifecycle = lifecycles
1100 .get_mut(name)
1101 .ok_or_else(|| format!("Plugin not registered: {}", name))?;
1102
1103 if !lifecycle.can_suspend() {
1104 return Err(format!("Cannot suspend plugin: {}", name));
1105 }
1106
1107 let memory = lifecycle.memory_mb;
1108 lifecycle.state = PluginState::Suspended;
1109
1110 *self.current_memory_mb.write().await -= memory;
1111
1112 tracing::info!("Suspended plugin: {} (freed {:.1}MB)", name, memory);
1113 Ok(())
1114 }
1115
1116 pub async fn resume_plugin(&self, name: &str) -> Result<(), String> {
1118 let memory_needed = {
1120 let lifecycles = self.lifecycles.read().await;
1121 let lifecycle = lifecycles
1122 .get(name)
1123 .ok_or_else(|| format!("Plugin not registered: {}", name))?;
1124
1125 if lifecycle.state != PluginState::Suspended {
1126 return Err(format!("Plugin is not suspended: {}", name));
1127 }
1128
1129 lifecycle.memory_mb
1130 };
1131
1132 let current = *self.current_memory_mb.read().await;
1134 if current + memory_needed > self.memory_budget_mb {
1135 self.free_memory(memory_needed).await?;
1136 }
1137
1138 let mut lifecycles = self.lifecycles.write().await;
1140 let lifecycle = lifecycles.get_mut(name).unwrap();
1141 lifecycle.state = PluginState::Active;
1142 lifecycle.last_used = Some(chrono::Utc::now());
1143
1144 *self.current_memory_mb.write().await += lifecycle.memory_mb;
1145
1146 tracing::info!("Resumed plugin: {}", name);
1147 Ok(())
1148 }
1149
1150 async fn free_memory(&self, needed_mb: f64) -> Result<(), String> {
1152 let mut freed = 0.0;
1153 let now = chrono::Utc::now();
1154
1155 let lifecycles = self.lifecycles.read().await;
1156 let mut candidates: Vec<_> = lifecycles
1157 .values()
1158 .filter(|l| l.can_suspend() || l.can_unload())
1159 .filter(|l| {
1160 l.last_used
1161 .map(|t| (now - t).num_seconds() > self.idle_timeout.as_secs() as i64)
1162 .unwrap_or(true)
1163 })
1164 .map(|l| (l.name.clone(), l.memory_mb, l.policy))
1165 .collect();
1166 drop(lifecycles);
1167
1168 candidates.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
1170
1171 for (name, memory, policy) in candidates {
1172 if freed >= needed_mb {
1173 break;
1174 }
1175
1176 if matches!(policy, PluginPolicy::Suspendable) {
1178 if self.suspend_plugin(&name).await.is_ok() {
1179 freed += memory;
1180 }
1181 } else if self.unload_plugin(&name).await.is_ok() {
1182 freed += memory;
1183 }
1184 }
1185
1186 if freed >= needed_mb {
1187 Ok(())
1188 } else {
1189 Err(format!(
1190 "Could not free enough memory: needed {:.1}MB, freed {:.1}MB",
1191 needed_mb, freed
1192 ))
1193 }
1194 }
1195
1196 pub async fn process_intent(&self, message: &str) -> ProcessedIntent {
1198 let intents = IntentDetector::detect(message);
1199 let required_caps = IntentDetector::capabilities_for_intents(&intents);
1200 let suggested_plugins = IntentDetector::plugins_for_intents(&intents);
1201
1202 let mut all_caps: Vec<_> = required_caps.into_iter().collect();
1204 for cap in AlwaysOnPlugins::required_capabilities() {
1205 if !all_caps.contains(&cap) {
1206 all_caps.push(cap);
1207 }
1208 }
1209
1210 let loaded = self
1212 .ensure_capabilities(&all_caps)
1213 .await
1214 .unwrap_or_default();
1215
1216 ProcessedIntent {
1217 detected_intents: intents,
1218 required_capabilities: all_caps,
1219 suggested_plugins: suggested_plugins.into_iter().map(String::from).collect(),
1220 plugins_loaded: loaded,
1221 }
1222 }
1223
1224 pub async fn status(&self) -> PluginManagerStatus {
1226 let lifecycles = self.lifecycles.read().await;
1227
1228 let always_on: Vec<_> = lifecycles
1229 .values()
1230 .filter(|l| l.policy == PluginPolicy::AlwaysOn)
1231 .map(|l| l.name.clone())
1232 .collect();
1233
1234 let active: Vec<_> = lifecycles
1235 .values()
1236 .filter(|l| l.state == PluginState::Active && l.policy != PluginPolicy::AlwaysOn)
1237 .map(|l| l.name.clone())
1238 .collect();
1239
1240 let suspended: Vec<_> = lifecycles
1241 .values()
1242 .filter(|l| l.state == PluginState::Suspended)
1243 .map(|l| l.name.clone())
1244 .collect();
1245
1246 let unloaded: Vec<_> = lifecycles
1247 .values()
1248 .filter(|l| l.state == PluginState::Unloaded)
1249 .map(|l| l.name.clone())
1250 .collect();
1251
1252 PluginManagerStatus {
1253 always_on_plugins: always_on,
1254 active_plugins: active,
1255 suspended_plugins: suspended,
1256 unloaded_plugins: unloaded,
1257 memory_used_mb: *self.current_memory_mb.read().await,
1258 memory_budget_mb: self.memory_budget_mb,
1259 }
1260 }
1261}
1262
1263#[derive(Debug, Clone, Serialize, Deserialize)]
1265pub struct ProcessedIntent {
1266 pub detected_intents: Vec<IntentCategory>,
1268 pub required_capabilities: Vec<Capability>,
1270 pub suggested_plugins: Vec<String>,
1272 pub plugins_loaded: Vec<String>,
1274}
1275
1276#[derive(Debug, Clone, Serialize, Deserialize)]
1278pub struct PluginManagerStatus {
1279 pub always_on_plugins: Vec<String>,
1281 pub active_plugins: Vec<String>,
1283 pub suspended_plugins: Vec<String>,
1285 pub unloaded_plugins: Vec<String>,
1287 pub memory_used_mb: f64,
1289 pub memory_budget_mb: f64,
1291}
1292
1293#[cfg(test)]
1294mod tests {
1295 use super::*;
1296
1297 #[test]
1298 fn test_capability_registry() {
1299 let mut registry = CapabilityRegistry::new();
1300
1301 registry.register(Capability::MLCore);
1302 registry.register(Capability::MLInference);
1303
1304 assert!(registry.has(Capability::MLCore));
1305 assert!(registry.has(Capability::MLInference));
1306 assert!(!registry.has(Capability::WorkflowEngine));
1307
1308 assert!(registry.has_all(&[Capability::MLCore, Capability::MLInference]));
1309 assert!(!registry.has_all(&[Capability::MLCore, Capability::WorkflowEngine]));
1310
1311 assert!(registry.has_any(&[Capability::MLCore, Capability::WorkflowEngine]));
1312 assert!(!registry.has_any(&[Capability::WorkflowEngine, Capability::PyTorchBackend]));
1313 }
1314
1315 #[test]
1316 fn test_plugin_capability_tracking() {
1317 let mut registry = CapabilityRegistry::new();
1318
1319 registry.register_from_plugin("ml", Capability::MLCore);
1320 registry.register_from_plugin("ml", Capability::MLInference);
1321 registry.register_from_plugin("pytorch", Capability::PyTorchBackend);
1322
1323 assert_eq!(registry.get_plugin_capabilities("ml").unwrap().len(), 2);
1324 assert_eq!(
1325 registry.get_plugin_capabilities("pytorch").unwrap().len(),
1326 1
1327 );
1328
1329 registry.unregister_plugin("pytorch");
1330 assert!(!registry.has(Capability::PyTorchBackend));
1331 assert!(registry.has(Capability::MLCore)); }
1333
1334 #[tokio::test]
1335 async fn test_integration_bridge() {
1336 let bridge = IntegrationBridge::new();
1337
1338 let result = bridge.try_infer("test", serde_json::json!({})).await;
1340 assert!(result.is_none());
1341
1342 let summary = bridge.integration_summary().await;
1343 assert!(!summary.ml_available);
1344 assert!(!summary.workflow_available);
1345 assert_eq!(summary.total_capabilities, 0);
1346 }
1347}