use std::sync::Arc;
#[cfg(feature = "a2a")]
use crate::backend::ExecutionBackendFactory;
use crate::plugins::Plugin;
use awaken_contract::contract::executor::LlmExecutor;
use awaken_contract::contract::tool::Tool;
use awaken_contract::registry_spec::{AgentSpec, ModelBindingSpec};
pub trait ToolRegistry: Send + Sync {
fn get_tool(&self, id: &str) -> Option<Arc<dyn Tool>>;
fn tool_ids(&self) -> Vec<String>;
}
#[derive(Debug, Clone)]
pub struct ModelBinding {
pub provider_id: String,
pub upstream_model: String,
}
impl From<&ModelBindingSpec> for ModelBinding {
fn from(spec: &ModelBindingSpec) -> Self {
Self {
provider_id: spec.provider_id.clone(),
upstream_model: spec.upstream_model.clone(),
}
}
}
pub trait ModelRegistry: Send + Sync {
fn get_model(&self, id: &str) -> Option<ModelBinding>;
fn model_ids(&self) -> Vec<String>;
}
pub trait ProviderRegistry: Send + Sync {
fn get_provider(&self, id: &str) -> Option<Arc<dyn LlmExecutor>>;
fn provider_ids(&self) -> Vec<String>;
}
pub trait AgentSpecRegistry: Send + Sync {
fn get_agent(&self, id: &str) -> Option<AgentSpec>;
fn agent_ids(&self) -> Vec<String>;
}
pub trait PluginSource: Send + Sync {
fn get_plugin(&self, id: &str) -> Option<Arc<dyn Plugin>>;
fn plugin_ids(&self) -> Vec<String>;
}
#[cfg(feature = "a2a")]
pub trait BackendRegistry: Send + Sync {
fn get_backend_factory(&self, backend: &str) -> Option<Arc<dyn ExecutionBackendFactory>>;
fn backend_ids(&self) -> Vec<String>;
}
#[derive(Clone)]
pub struct RegistrySet {
pub agents: Arc<dyn AgentSpecRegistry>,
pub tools: Arc<dyn ToolRegistry>,
pub models: Arc<dyn ModelRegistry>,
pub providers: Arc<dyn ProviderRegistry>,
pub plugins: Arc<dyn PluginSource>,
#[cfg(feature = "a2a")]
pub backends: Arc<dyn BackendRegistry>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn model_binding_spec_converts_to_runtime_model_binding() {
let spec = ModelBindingSpec {
id: "default".into(),
provider_id: "openai".into(),
upstream_model: "gpt-4o-mini".into(),
};
let binding = ModelBinding::from(&spec);
assert_eq!(binding.provider_id, "openai");
assert_eq!(binding.upstream_model, "gpt-4o-mini");
}
}