Skip to main content

symbi_runtime/
lib.rs

1//! Symbiont Agent Runtime System
2//!
3//! The Agent Runtime System is the core orchestration layer of the Symbiont platform,
4//! responsible for managing the complete lifecycle of autonomous agents.
5
6pub mod communication;
7pub mod config;
8pub mod context;
9pub mod crypto;
10pub mod error_handler;
11pub mod integrations;
12pub mod lifecycle;
13pub mod logging;
14pub mod models;
15pub mod rag;
16pub mod resource;
17pub mod routing;
18pub mod sandbox;
19pub mod scheduler;
20pub mod secrets;
21pub mod types;
22
23#[cfg(feature = "http-api")]
24pub mod api;
25
26#[cfg(feature = "http-api")]
27use api::traits::RuntimeApiProvider;
28#[cfg(feature = "http-api")]
29use api::types::{
30    AddIdentityMappingRequest, AgentStatusResponse, ChannelActionResponse, ChannelAuditResponse,
31    ChannelDetail, ChannelHealthResponse, ChannelSummary, CreateAgentRequest, CreateAgentResponse,
32    CreateScheduleRequest, CreateScheduleResponse, DeleteAgentResponse, DeleteChannelResponse,
33    DeleteScheduleResponse, ExecuteAgentRequest, ExecuteAgentResponse, GetAgentHistoryResponse,
34    IdentityMappingEntry, NextRunsResponse, RegisterChannelRequest, RegisterChannelResponse,
35    ScheduleActionResponse, ScheduleDetail, ScheduleHistoryResponse, ScheduleSummary,
36    UpdateAgentRequest, UpdateAgentResponse, UpdateChannelRequest, UpdateScheduleRequest,
37    WorkflowExecutionRequest,
38};
39#[cfg(feature = "http-api")]
40use async_trait::async_trait;
41#[cfg(feature = "http-api")]
42use std::time::SystemTime;
43
44#[cfg(feature = "http-input")]
45pub mod http_input;
46
47// Re-export commonly used types
48pub use communication::{CommunicationBus, CommunicationConfig, DefaultCommunicationBus};
49pub use config::SecurityConfig;
50pub use context::{ContextManager, ContextManagerConfig, StandardContextManager};
51pub use error_handler::{DefaultErrorHandler, ErrorHandler, ErrorHandlerConfig};
52pub use lifecycle::{DefaultLifecycleController, LifecycleConfig, LifecycleController};
53pub use logging::{LoggingConfig, ModelInteractionType, ModelLogger, RequestData, ResponseData};
54pub use models::{ModelCatalog, ModelCatalogError, SlmRunner, SlmRunnerError};
55pub use resource::{DefaultResourceManager, ResourceManager, ResourceManagerConfig};
56pub use routing::{
57    DefaultRoutingEngine, RouteDecision, RoutingConfig, RoutingContext, RoutingEngine, TaskType,
58};
59pub use sandbox::{E2BSandbox, ExecutionResult, SandboxRunner, SandboxTier};
60#[cfg(feature = "cron")]
61pub use scheduler::{
62    cron_scheduler::{
63        CronMetrics, CronScheduler, CronSchedulerConfig, CronSchedulerError, CronSchedulerHealth,
64    },
65    cron_types::{
66        AuditLevel, CronJobDefinition, CronJobId, CronJobStatus, DeliveryChannel, DeliveryConfig,
67        DeliveryReceipt, JobRunRecord, JobRunStatus,
68    },
69    delivery::{CustomDeliveryHandler, DefaultDeliveryRouter, DeliveryResult, DeliveryRouter},
70    heartbeat::{
71        HeartbeatAssessment, HeartbeatConfig, HeartbeatContextMode, HeartbeatSeverity,
72        HeartbeatState,
73    },
74    job_store::{JobStore, JobStoreError, SqliteJobStore},
75    policy_gate::{
76        PolicyGate, ScheduleContext, SchedulePolicyCondition, SchedulePolicyDecision,
77        SchedulePolicyEffect, SchedulePolicyRule,
78    },
79};
80pub use scheduler::{AgentScheduler, DefaultAgentScheduler, SchedulerConfig};
81pub use secrets::{SecretStore, SecretsConfig};
82pub use types::*;
83
84use std::sync::Arc;
85use tokio::sync::RwLock;
86
87/// Main Agent Runtime System
88#[derive(Clone)]
89pub struct AgentRuntime {
90    pub scheduler: Arc<dyn scheduler::AgentScheduler + Send + Sync>,
91    pub lifecycle: Arc<dyn lifecycle::LifecycleController + Send + Sync>,
92    pub resource_manager: Arc<dyn resource::ResourceManager + Send + Sync>,
93    pub communication: Arc<dyn communication::CommunicationBus + Send + Sync>,
94    pub error_handler: Arc<dyn error_handler::ErrorHandler + Send + Sync>,
95    pub context_manager: Arc<dyn context::ContextManager + Send + Sync>,
96    pub model_logger: Option<Arc<logging::ModelLogger>>,
97    pub model_catalog: Option<Arc<models::ModelCatalog>>,
98    config: Arc<RwLock<RuntimeConfig>>,
99}
100
101impl AgentRuntime {
102    /// Create a new Agent Runtime System instance
103    pub async fn new(config: RuntimeConfig) -> Result<Self, RuntimeError> {
104        let config = Arc::new(RwLock::new(config));
105
106        // Initialize components
107        let scheduler = Arc::new(
108            scheduler::DefaultAgentScheduler::new(config.read().await.scheduler.clone()).await?,
109        );
110
111        let resource_manager = Arc::new(
112            resource::DefaultResourceManager::new(config.read().await.resource_manager.clone())
113                .await?,
114        );
115
116        let communication = Arc::new(
117            communication::DefaultCommunicationBus::new(config.read().await.communication.clone())
118                .await?,
119        );
120
121        let error_handler = Arc::new(
122            error_handler::DefaultErrorHandler::new(config.read().await.error_handler.clone())
123                .await?,
124        );
125
126        let lifecycle_config = lifecycle::LifecycleConfig {
127            max_agents: 1000,
128            initialization_timeout: std::time::Duration::from_secs(30),
129            termination_timeout: std::time::Duration::from_secs(30),
130            state_check_interval: std::time::Duration::from_secs(10),
131            enable_auto_recovery: true,
132            max_restart_attempts: 3,
133        };
134        let lifecycle =
135            Arc::new(lifecycle::DefaultLifecycleController::new(lifecycle_config).await?);
136
137        let context_manager = Arc::new(
138            context::StandardContextManager::new(
139                config.read().await.context_manager.clone(),
140                "runtime-system",
141            )
142            .await
143            .map_err(|e| {
144                RuntimeError::Internal(format!("Failed to create context manager: {}", e))
145            })?,
146        );
147
148        // Initialize context manager
149        context_manager.initialize().await.map_err(|e| {
150            RuntimeError::Internal(format!("Failed to initialize context manager: {}", e))
151        })?;
152
153        // Initialize model logger if enabled
154        let model_logger = if config.read().await.logging.enabled {
155            // For now, initialize without secret store to avoid type conversion issues
156            match logging::ModelLogger::new(config.read().await.logging.clone(), None) {
157                Ok(logger) => {
158                    tracing::info!("Model logging initialized successfully");
159                    Some(Arc::new(logger))
160                }
161                Err(e) => {
162                    tracing::warn!("Failed to initialize model logger: {}", e);
163                    None
164                }
165            }
166        } else {
167            tracing::info!("Model logging is disabled");
168            None
169        };
170
171        // Initialize model catalog if SLM is enabled
172        let model_catalog = if let Some(ref slm_config) = config.read().await.slm {
173            if slm_config.enabled {
174                match models::ModelCatalog::new(slm_config.clone()) {
175                    Ok(catalog) => {
176                        tracing::info!(
177                            "Model catalog initialized with {} models",
178                            catalog.list_models().len()
179                        );
180                        Some(Arc::new(catalog))
181                    }
182                    Err(e) => {
183                        tracing::warn!("Failed to initialize model catalog: {}", e);
184                        None
185                    }
186                }
187            } else {
188                tracing::info!("SLM support is disabled");
189                None
190            }
191        } else {
192            tracing::info!("No SLM configuration provided");
193            None
194        };
195
196        Ok(Self {
197            scheduler,
198            lifecycle,
199            resource_manager,
200            communication,
201            error_handler,
202            context_manager,
203            model_logger,
204            model_catalog,
205            config,
206        })
207    }
208
209    /// Get the current runtime configuration
210    pub async fn get_config(&self) -> RuntimeConfig {
211        self.config.read().await.clone()
212    }
213
214    /// Update the runtime configuration
215    pub async fn update_config(&self, config: RuntimeConfig) -> Result<(), RuntimeError> {
216        *self.config.write().await = config;
217        Ok(())
218    }
219
220    /// Shutdown the runtime system gracefully
221    pub async fn shutdown(&self) -> Result<(), RuntimeError> {
222        tracing::info!("Starting Agent Runtime shutdown sequence");
223
224        // Shutdown components in reverse order of initialization
225        self.lifecycle
226            .shutdown()
227            .await
228            .map_err(RuntimeError::Lifecycle)?;
229        self.communication
230            .shutdown()
231            .await
232            .map_err(RuntimeError::Communication)?;
233        self.resource_manager
234            .shutdown()
235            .await
236            .map_err(RuntimeError::Resource)?;
237        self.scheduler
238            .shutdown()
239            .await
240            .map_err(RuntimeError::Scheduler)?;
241        self.error_handler
242            .shutdown()
243            .await
244            .map_err(RuntimeError::ErrorHandler)?;
245
246        // Shutdown context manager last to ensure all contexts are saved
247        self.context_manager.shutdown().await.map_err(|e| {
248            RuntimeError::Internal(format!("Context manager shutdown failed: {}", e))
249        })?;
250
251        tracing::info!("Agent Runtime shutdown completed successfully");
252        Ok(())
253    }
254
255    /// Get system status
256    pub async fn get_status(&self) -> SystemStatus {
257        self.scheduler.get_system_status().await
258    }
259}
260
261/// Runtime configuration
262#[derive(Debug, Clone, Default)]
263pub struct RuntimeConfig {
264    pub scheduler: scheduler::SchedulerConfig,
265    pub resource_manager: resource::ResourceManagerConfig,
266    pub communication: communication::CommunicationConfig,
267    pub context_manager: context::ContextManagerConfig,
268    pub security: SecurityConfig,
269    pub audit: AuditConfig,
270    pub error_handler: error_handler::ErrorHandlerConfig,
271    pub logging: logging::LoggingConfig,
272    pub slm: Option<config::Slm>,
273    pub routing: Option<routing::RoutingConfig>,
274}
275
276/// Implementation of RuntimeApiProvider for AgentRuntime
277#[cfg(feature = "http-api")]
278#[async_trait]
279impl RuntimeApiProvider for AgentRuntime {
280    async fn execute_workflow(
281        &self,
282        request: WorkflowExecutionRequest,
283    ) -> Result<serde_json::Value, RuntimeError> {
284        tracing::info!("Executing workflow: {}", request.workflow_id);
285
286        // Step 1: Parse the workflow DSL and extract metadata (before any await)
287        let workflow_dsl = &request.workflow_id; // For now, treat workflow_id as DSL source
288        let (metadata, agent_config) = {
289            let parsed_tree = dsl::parse_dsl(workflow_dsl)
290                .map_err(|e| RuntimeError::Internal(format!("DSL parsing failed: {}", e)))?;
291
292            // Extract metadata from the parsed workflow
293            let metadata = dsl::extract_metadata(&parsed_tree, workflow_dsl);
294
295            // Check for parsing errors
296            let root_node = parsed_tree.root_node();
297            if root_node.has_error() {
298                return Err(RuntimeError::Internal(
299                    "DSL contains syntax errors".to_string(),
300                ));
301            }
302
303            // Create agent configuration from the workflow
304            let agent_id = request.agent_id.unwrap_or_default();
305            let agent_config = AgentConfig {
306                id: agent_id,
307                name: metadata
308                    .get("name")
309                    .cloned()
310                    .unwrap_or_else(|| "workflow_agent".to_string()),
311                dsl_source: workflow_dsl.to_string(),
312                execution_mode: ExecutionMode::Ephemeral,
313                security_tier: SecurityTier::Tier1,
314                resource_limits: ResourceLimits::default(),
315                capabilities: vec![Capability::Computation], // Basic capability for workflow execution
316                policies: vec![],
317                metadata: metadata.clone(),
318                priority: Priority::Normal,
319            };
320
321            (metadata, agent_config)
322        };
323
324        // Step 2: Schedule the agent for execution
325        let scheduled_agent_id = self
326            .scheduler
327            .schedule_agent(agent_config)
328            .await
329            .map_err(RuntimeError::Scheduler)?;
330
331        // Step 3: Wait briefly and check initial status (simple implementation)
332        tokio::time::sleep(std::time::Duration::from_millis(100)).await;
333
334        // Step 4: Collect basic execution information
335        let system_status = self.scheduler.get_system_status().await;
336
337        // Step 5: Prepare and return result
338        let mut result = serde_json::json!({
339            "status": "success",
340            "workflow_id": request.workflow_id,
341            "agent_id": scheduled_agent_id.to_string(),
342            "execution_started": true,
343            "metadata": metadata,
344            "system_status": {
345                "total_agents": system_status.total_agents,
346                "running_agents": system_status.running_agents,
347                "resource_utilization": {
348                    "memory_used": system_status.resource_utilization.memory_used,
349                    "cpu_utilization": system_status.resource_utilization.cpu_utilization,
350                    "disk_io_rate": system_status.resource_utilization.disk_io_rate,
351                    "network_io_rate": system_status.resource_utilization.network_io_rate
352                }
353            }
354        });
355
356        // Add parameters if provided
357        if !request.parameters.is_null() {
358            result["parameters"] = request.parameters;
359        }
360
361        tracing::info!(
362            "Workflow execution initiated for agent: {}",
363            scheduled_agent_id
364        );
365        Ok(result)
366    }
367
368    async fn get_agent_status(
369        &self,
370        agent_id: AgentId,
371    ) -> Result<AgentStatusResponse, RuntimeError> {
372        // Call the scheduler to get agent status
373        match self.scheduler.get_agent_status(agent_id).await {
374            Ok(agent_status) => {
375                // Convert SystemTime to DateTime<Utc>
376                let last_activity =
377                    chrono::DateTime::<chrono::Utc>::from(agent_status.last_activity);
378
379                Ok(AgentStatusResponse {
380                    agent_id: agent_status.agent_id,
381                    state: agent_status.state,
382                    last_activity,
383                    resource_usage: api::types::ResourceUsage {
384                        memory_bytes: agent_status.memory_usage,
385                        cpu_percent: agent_status.cpu_usage,
386                        active_tasks: agent_status.active_tasks,
387                    },
388                })
389            }
390            Err(scheduler_error) => {
391                tracing::warn!(
392                    "Failed to get agent status for {}: {}",
393                    agent_id,
394                    scheduler_error
395                );
396                Err(RuntimeError::Internal(format!(
397                    "Agent {} not found",
398                    agent_id
399                )))
400            }
401        }
402    }
403
404    async fn get_system_health(&self) -> Result<serde_json::Value, RuntimeError> {
405        // Check health of all components
406        let scheduler_health =
407            self.scheduler.check_health().await.map_err(|e| {
408                RuntimeError::Internal(format!("Scheduler health check failed: {}", e))
409            })?;
410
411        let lifecycle_health =
412            self.lifecycle.check_health().await.map_err(|e| {
413                RuntimeError::Internal(format!("Lifecycle health check failed: {}", e))
414            })?;
415
416        let resource_health = self.resource_manager.check_health().await.map_err(|e| {
417            RuntimeError::Internal(format!("Resource manager health check failed: {}", e))
418        })?;
419
420        let communication_health = self.communication.check_health().await.map_err(|e| {
421            RuntimeError::Internal(format!("Communication health check failed: {}", e))
422        })?;
423
424        // Determine overall system status
425        let component_healths = vec![
426            ("scheduler", &scheduler_health),
427            ("lifecycle", &lifecycle_health),
428            ("resource_manager", &resource_health),
429            ("communication", &communication_health),
430        ];
431
432        let overall_status = if component_healths
433            .iter()
434            .all(|(_, h)| h.status == HealthStatus::Healthy)
435        {
436            "healthy"
437        } else if component_healths
438            .iter()
439            .any(|(_, h)| h.status == HealthStatus::Unhealthy)
440        {
441            "unhealthy"
442        } else {
443            "degraded"
444        };
445
446        // Build response with detailed component information
447        let mut components = serde_json::Map::new();
448        for (name, health) in component_healths {
449            let component_info = serde_json::json!({
450                "status": match health.status {
451                    HealthStatus::Healthy => "healthy",
452                    HealthStatus::Degraded => "degraded",
453                    HealthStatus::Unhealthy => "unhealthy",
454                },
455                "message": health.message,
456                "last_check": health.last_check
457                    .duration_since(std::time::UNIX_EPOCH)
458                    .unwrap_or_default()
459                    .as_secs(),
460                "uptime_seconds": health.uptime.as_secs(),
461                "metrics": health.metrics
462            });
463            components.insert(name.to_string(), component_info);
464        }
465
466        Ok(serde_json::json!({
467            "status": overall_status,
468            "timestamp": std::time::SystemTime::now()
469                .duration_since(std::time::UNIX_EPOCH)
470                .unwrap_or_default()
471                .as_secs(),
472            "components": components
473        }))
474    }
475
476    async fn list_agents(&self) -> Result<Vec<AgentId>, RuntimeError> {
477        Ok(self.scheduler.list_agents().await)
478    }
479
480    async fn shutdown_agent(&self, agent_id: AgentId) -> Result<(), RuntimeError> {
481        self.scheduler
482            .shutdown_agent(agent_id)
483            .await
484            .map_err(RuntimeError::Scheduler)
485    }
486
487    async fn get_metrics(&self) -> Result<serde_json::Value, RuntimeError> {
488        let status = self.get_status().await;
489        Ok(serde_json::json!({
490            "agents": {
491                "total": status.total_agents,
492                "running": status.running_agents,
493                "idle": status.total_agents - status.running_agents,
494                "error": 0
495            },
496            "system": {
497                "uptime": 0,
498                "memory_usage": status.resource_utilization.memory_used,
499                "cpu_usage": status.resource_utilization.cpu_utilization
500            }
501        }))
502    }
503
504    async fn create_agent(
505        &self,
506        request: CreateAgentRequest,
507    ) -> Result<CreateAgentResponse, RuntimeError> {
508        // Validate input
509        if request.name.is_empty() {
510            return Err(RuntimeError::Internal(
511                "Agent name cannot be empty".to_string(),
512            ));
513        }
514
515        if request.dsl.is_empty() {
516            return Err(RuntimeError::Internal(
517                "Agent DSL cannot be empty".to_string(),
518            ));
519        }
520
521        // Create agent configuration
522        let agent_id = AgentId::new();
523        let agent_config = AgentConfig {
524            id: agent_id,
525            name: request.name,
526            dsl_source: request.dsl,
527            execution_mode: ExecutionMode::Ephemeral,
528            security_tier: SecurityTier::Tier1,
529            resource_limits: ResourceLimits::default(),
530            capabilities: vec![Capability::Computation],
531            policies: vec![],
532            metadata: std::collections::HashMap::new(),
533            priority: Priority::Normal,
534        };
535
536        // Schedule the agent for execution
537        let scheduled_agent_id = self
538            .scheduler
539            .schedule_agent(agent_config)
540            .await
541            .map_err(RuntimeError::Scheduler)?;
542
543        tracing::info!("Created and scheduled agent: {}", scheduled_agent_id);
544
545        Ok(CreateAgentResponse {
546            id: scheduled_agent_id.to_string(),
547            status: "scheduled".to_string(),
548        })
549    }
550
551    async fn update_agent(
552        &self,
553        agent_id: AgentId,
554        request: UpdateAgentRequest,
555    ) -> Result<UpdateAgentResponse, RuntimeError> {
556        // Validate that at least one field is provided for update
557        if request.name.is_none() && request.dsl.is_none() {
558            return Err(RuntimeError::Internal(
559                "At least one field (name or dsl) must be provided for update".to_string(),
560            ));
561        }
562
563        // Validate optional fields if provided
564        if let Some(ref name) = request.name {
565            if name.is_empty() {
566                return Err(RuntimeError::Internal(
567                    "Agent name cannot be empty".to_string(),
568                ));
569            }
570        }
571
572        if let Some(ref dsl) = request.dsl {
573            if dsl.is_empty() {
574                return Err(RuntimeError::Internal(
575                    "Agent DSL cannot be empty".to_string(),
576                ));
577            }
578        }
579
580        // Call the scheduler to update the agent
581        self.scheduler
582            .update_agent(agent_id, request)
583            .await
584            .map_err(RuntimeError::Scheduler)?;
585
586        tracing::info!("Successfully updated agent: {}", agent_id);
587
588        Ok(UpdateAgentResponse {
589            id: agent_id.to_string(),
590            status: "updated".to_string(),
591        })
592    }
593
594    async fn delete_agent(&self, agent_id: AgentId) -> Result<DeleteAgentResponse, RuntimeError> {
595        // Placeholder implementation - validate input and return success
596
597        Ok(DeleteAgentResponse {
598            id: agent_id.to_string(),
599            status: "deleted".to_string(),
600        })
601    }
602
603    async fn execute_agent(
604        &self,
605        agent_id: AgentId,
606        request: ExecuteAgentRequest,
607    ) -> Result<ExecuteAgentResponse, RuntimeError> {
608        let status = self.get_agent_status(agent_id).await?;
609        if status.state != AgentState::Running {
610            self.lifecycle
611                .start_agent(agent_id)
612                .await
613                .map_err(RuntimeError::Lifecycle)?;
614        }
615        let execution_id = uuid::Uuid::new_v4().to_string();
616        let payload = types::EncryptedPayload {
617            data: serde_json::to_vec(&request)
618                .map_err(|e| RuntimeError::Internal(e.to_string()))?
619                .into(),
620            encryption_algorithm: types::EncryptionAlgorithm::None,
621            nonce: vec![],
622        };
623        let signature = types::MessageSignature {
624            signature: vec![],
625            algorithm: types::SignatureAlgorithm::None,
626            public_key: vec![],
627        };
628        let message = types::SecureMessage {
629            id: types::MessageId::new(),
630            sender: AgentId::new(), // System sender
631            recipient: Some(agent_id),
632            topic: None,
633            payload,
634            signature,
635            timestamp: SystemTime::now(),
636            ttl: std::time::Duration::from_secs(300),
637            message_type: types::MessageType::Direct(agent_id),
638        };
639        self.communication
640            .send_message(message)
641            .await
642            .map_err(RuntimeError::Communication)?;
643        Ok(ExecuteAgentResponse {
644            execution_id,
645            status: "execution_started".to_string(),
646        })
647    }
648
649    async fn get_agent_history(
650        &self,
651        _agent_id: AgentId,
652    ) -> Result<GetAgentHistoryResponse, RuntimeError> {
653        // For now, return empty history as the model logger API is not yet implemented
654        let history = vec![];
655        Ok(GetAgentHistoryResponse { history })
656    }
657
658    // ── Schedule endpoints ──────────────────────────────────────────
659
660    async fn list_schedules(&self) -> Result<Vec<ScheduleSummary>, RuntimeError> {
661        Err(RuntimeError::Internal(
662            "Schedule API requires a running CronScheduler (use `symbi up` with --features cron)"
663                .to_string(),
664        ))
665    }
666
667    async fn create_schedule(
668        &self,
669        _request: CreateScheduleRequest,
670    ) -> Result<CreateScheduleResponse, RuntimeError> {
671        Err(RuntimeError::Internal(
672            "Schedule API requires a running CronScheduler".to_string(),
673        ))
674    }
675
676    async fn get_schedule(&self, _job_id: &str) -> Result<ScheduleDetail, RuntimeError> {
677        Err(RuntimeError::Internal(
678            "Schedule API requires a running CronScheduler".to_string(),
679        ))
680    }
681
682    async fn update_schedule(
683        &self,
684        _job_id: &str,
685        _request: UpdateScheduleRequest,
686    ) -> Result<ScheduleDetail, RuntimeError> {
687        Err(RuntimeError::Internal(
688            "Schedule API requires a running CronScheduler".to_string(),
689        ))
690    }
691
692    async fn delete_schedule(&self, _job_id: &str) -> Result<DeleteScheduleResponse, RuntimeError> {
693        Err(RuntimeError::Internal(
694            "Schedule API requires a running CronScheduler".to_string(),
695        ))
696    }
697
698    async fn pause_schedule(&self, _job_id: &str) -> Result<ScheduleActionResponse, RuntimeError> {
699        Err(RuntimeError::Internal(
700            "Schedule API requires a running CronScheduler".to_string(),
701        ))
702    }
703
704    async fn resume_schedule(&self, _job_id: &str) -> Result<ScheduleActionResponse, RuntimeError> {
705        Err(RuntimeError::Internal(
706            "Schedule API requires a running CronScheduler".to_string(),
707        ))
708    }
709
710    async fn trigger_schedule(
711        &self,
712        _job_id: &str,
713    ) -> Result<ScheduleActionResponse, RuntimeError> {
714        Err(RuntimeError::Internal(
715            "Schedule API requires a running CronScheduler".to_string(),
716        ))
717    }
718
719    async fn get_schedule_history(
720        &self,
721        _job_id: &str,
722        _limit: usize,
723    ) -> Result<ScheduleHistoryResponse, RuntimeError> {
724        Err(RuntimeError::Internal(
725            "Schedule API requires a running CronScheduler".to_string(),
726        ))
727    }
728
729    async fn get_schedule_next_runs(
730        &self,
731        _job_id: &str,
732        _count: usize,
733    ) -> Result<NextRunsResponse, RuntimeError> {
734        Err(RuntimeError::Internal(
735            "Schedule API requires a running CronScheduler".to_string(),
736        ))
737    }
738
739    async fn get_scheduler_health(
740        &self,
741    ) -> Result<api::types::SchedulerHealthResponse, RuntimeError> {
742        // Without a CronScheduler instance, return a minimal response.
743        Ok(api::types::SchedulerHealthResponse {
744            is_running: false,
745            store_accessible: false,
746            jobs_total: 0,
747            jobs_active: 0,
748            jobs_paused: 0,
749            jobs_dead_letter: 0,
750            global_active_runs: 0,
751            max_concurrent: 0,
752            runs_total: 0,
753            runs_succeeded: 0,
754            runs_failed: 0,
755            average_execution_time_ms: 0.0,
756            longest_run_ms: 0,
757        })
758    }
759
760    // ── Channel endpoints ──────────────────────────────────────────
761
762    async fn list_channels(&self) -> Result<Vec<ChannelSummary>, RuntimeError> {
763        Err(RuntimeError::Internal(
764            "Channel API requires a running ChannelAdapterManager".to_string(),
765        ))
766    }
767
768    async fn register_channel(
769        &self,
770        _request: RegisterChannelRequest,
771    ) -> Result<RegisterChannelResponse, RuntimeError> {
772        Err(RuntimeError::Internal(
773            "Channel API requires a running ChannelAdapterManager".to_string(),
774        ))
775    }
776
777    async fn get_channel(&self, _id: &str) -> Result<ChannelDetail, RuntimeError> {
778        Err(RuntimeError::Internal(
779            "Channel API requires a running ChannelAdapterManager".to_string(),
780        ))
781    }
782
783    async fn update_channel(
784        &self,
785        _id: &str,
786        _request: UpdateChannelRequest,
787    ) -> Result<ChannelDetail, RuntimeError> {
788        Err(RuntimeError::Internal(
789            "Channel API requires a running ChannelAdapterManager".to_string(),
790        ))
791    }
792
793    async fn delete_channel(&self, _id: &str) -> Result<DeleteChannelResponse, RuntimeError> {
794        Err(RuntimeError::Internal(
795            "Channel API requires a running ChannelAdapterManager".to_string(),
796        ))
797    }
798
799    async fn start_channel(&self, _id: &str) -> Result<ChannelActionResponse, RuntimeError> {
800        Err(RuntimeError::Internal(
801            "Channel API requires a running ChannelAdapterManager".to_string(),
802        ))
803    }
804
805    async fn stop_channel(&self, _id: &str) -> Result<ChannelActionResponse, RuntimeError> {
806        Err(RuntimeError::Internal(
807            "Channel API requires a running ChannelAdapterManager".to_string(),
808        ))
809    }
810
811    async fn get_channel_health(&self, _id: &str) -> Result<ChannelHealthResponse, RuntimeError> {
812        Err(RuntimeError::Internal(
813            "Channel API requires a running ChannelAdapterManager".to_string(),
814        ))
815    }
816
817    async fn list_channel_mappings(
818        &self,
819        _id: &str,
820    ) -> Result<Vec<IdentityMappingEntry>, RuntimeError> {
821        Err(RuntimeError::Internal(
822            "Channel identity mappings require enterprise edition".to_string(),
823        ))
824    }
825
826    async fn add_channel_mapping(
827        &self,
828        _id: &str,
829        _request: AddIdentityMappingRequest,
830    ) -> Result<IdentityMappingEntry, RuntimeError> {
831        Err(RuntimeError::Internal(
832            "Channel identity mappings require enterprise edition".to_string(),
833        ))
834    }
835
836    async fn remove_channel_mapping(&self, _id: &str, _user_id: &str) -> Result<(), RuntimeError> {
837        Err(RuntimeError::Internal(
838            "Channel identity mappings require enterprise edition".to_string(),
839        ))
840    }
841
842    async fn get_channel_audit(
843        &self,
844        _id: &str,
845        _limit: usize,
846    ) -> Result<ChannelAuditResponse, RuntimeError> {
847        Err(RuntimeError::Internal(
848            "Channel audit log requires enterprise edition".to_string(),
849        ))
850    }
851}