Skip to main content

forge/
runtime.rs

1//! FORGE - The Rust Full-Stack Framework
2//!
3//! Single binary runtime that provides:
4//! - HTTP Gateway with RPC endpoints
5//! - SSE server for real-time subscriptions
6//! - Background job workers
7//! - Cron scheduler
8//! - Workflow engine
9//! - Cluster coordination
10
11use std::future::Future;
12use std::net::IpAddr;
13use std::path::PathBuf;
14use std::pin::Pin;
15use std::sync::Arc;
16use std::time::Duration;
17
18use axum::Router;
19use axum::body::Body;
20use axum::http::Request;
21use axum::response::Response;
22use tokio::sync::broadcast;
23
24use forge_core::CircuitBreakerClient;
25use forge_core::cluster::{LeaderRole, NodeId, NodeInfo, NodeRole, NodeStatus};
26use forge_core::config::{ForgeConfig, NodeRole as ConfigNodeRole};
27use forge_core::error::{ForgeError, Result};
28use forge_core::function::{ForgeMutation, ForgeQuery};
29use forge_core::mcp::ForgeMcpTool;
30use forge_runtime::migrations::{Migration, MigrationRunner, load_migrations_from_dir};
31
32use forge_runtime::cluster::{
33    GracefulShutdown, HeartbeatConfig, HeartbeatLoop, LeaderConfig, LeaderElection, NodeRegistry,
34    ShutdownConfig,
35};
36use forge_runtime::cron::{CronRegistry, CronRunner, CronRunnerConfig};
37use forge_runtime::daemon::{DaemonRegistry, DaemonRunner};
38use forge_runtime::db::Database;
39use forge_runtime::function::FunctionRegistry;
40use forge_runtime::gateway::{AuthConfig, GatewayConfig as RuntimeGatewayConfig, GatewayServer};
41use forge_runtime::jobs::{JobDispatcher, JobQueue, JobRegistry, Worker, WorkerConfig};
42use forge_runtime::mcp::McpToolRegistry;
43use forge_runtime::webhook::{WebhookRegistry, WebhookState, webhook_handler};
44use forge_runtime::workflow::{
45    EventStore, WorkflowExecutor, WorkflowRegistry, WorkflowScheduler, WorkflowSchedulerConfig,
46};
47use tokio_util::sync::CancellationToken;
48
49/// Type alias for frontend handler function.
50pub type FrontendHandler = fn(Request<Body>) -> Pin<Box<dyn Future<Output = Response> + Send>>;
51
52/// Prelude module for common imports.
53pub mod prelude {
54    // Common types
55    pub use chrono::{DateTime, Utc};
56    pub use uuid::Uuid;
57
58    // Serde re-exports for user code
59    pub use serde::{Deserialize, Serialize};
60    pub use serde_json;
61
62    /// Timestamp type alias for convenience.
63    pub type Timestamp = DateTime<Utc>;
64
65    // Core types
66    pub use forge_core::cluster::NodeRole;
67    pub use forge_core::config::ForgeConfig;
68    pub use forge_core::cron::{CronContext, ForgeCron};
69    pub use forge_core::daemon::{DaemonContext, ForgeDaemon};
70    pub use forge_core::env::EnvAccess;
71    pub use forge_core::error::{ForgeError, Result};
72    pub use forge_core::function::{
73        AuthContext, ForgeMutation, ForgeQuery, MutationContext, QueryContext,
74    };
75    pub use forge_core::job::{ForgeJob, JobContext, JobPriority};
76    pub use forge_core::mcp::{ForgeMcpTool, McpToolContext, McpToolResult};
77    pub use forge_core::realtime::Delta;
78    pub use forge_core::schema::{FieldDef, ModelMeta, SchemaRegistry, TableDef};
79    pub use forge_core::schemars::JsonSchema;
80    pub use forge_core::types::Upload;
81    pub use forge_core::webhook::{ForgeWebhook, WebhookContext, WebhookResult, WebhookSignature};
82    pub use forge_core::workflow::{ForgeWorkflow, WorkflowContext};
83
84    // Same axum version the runtime uses, avoids type mismatches in custom handlers
85    pub use axum;
86
87    pub use crate::{Forge, ForgeBuilder};
88}
89
90/// The main FORGE runtime.
91pub struct Forge {
92    config: ForgeConfig,
93    db: Option<Database>,
94    node_id: NodeId,
95    function_registry: FunctionRegistry,
96    mcp_registry: McpToolRegistry,
97    job_registry: JobRegistry,
98    cron_registry: Arc<CronRegistry>,
99    workflow_registry: WorkflowRegistry,
100    daemon_registry: Arc<DaemonRegistry>,
101    webhook_registry: Arc<WebhookRegistry>,
102    shutdown_tx: broadcast::Sender<()>,
103    /// Path to user migrations directory (default: ./migrations).
104    migrations_dir: PathBuf,
105    /// Additional migrations provided programmatically.
106    extra_migrations: Vec<Migration>,
107    /// Optional frontend handler for embedded SPA.
108    frontend_handler: Option<FrontendHandler>,
109    /// Custom axum routes merged into the top-level router.
110    custom_routes: Option<Router>,
111}
112
113impl Forge {
114    /// Create a new builder for configuring FORGE.
115    pub fn builder() -> ForgeBuilder {
116        ForgeBuilder::new()
117    }
118
119    /// Get the node ID.
120    pub fn node_id(&self) -> NodeId {
121        self.node_id
122    }
123
124    /// Get the configuration.
125    pub fn config(&self) -> &ForgeConfig {
126        &self.config
127    }
128
129    /// Get the function registry.
130    pub fn function_registry(&self) -> &FunctionRegistry {
131        &self.function_registry
132    }
133
134    /// Get the function registry mutably.
135    pub fn function_registry_mut(&mut self) -> &mut FunctionRegistry {
136        &mut self.function_registry
137    }
138
139    /// Get the MCP tool registry mutably.
140    pub fn mcp_registry_mut(&mut self) -> &mut McpToolRegistry {
141        &mut self.mcp_registry
142    }
143
144    /// Register an MCP tool without manually accessing the registry.
145    pub fn register_mcp_tool<T: ForgeMcpTool>(&mut self) -> &mut Self {
146        self.mcp_registry.register::<T>();
147        self
148    }
149
150    /// Get the job registry.
151    pub fn job_registry(&self) -> &JobRegistry {
152        &self.job_registry
153    }
154
155    /// Get the job registry mutably.
156    pub fn job_registry_mut(&mut self) -> &mut JobRegistry {
157        &mut self.job_registry
158    }
159
160    /// Get the cron registry.
161    pub fn cron_registry(&self) -> Arc<CronRegistry> {
162        self.cron_registry.clone()
163    }
164
165    /// Get the workflow registry.
166    pub fn workflow_registry(&self) -> &WorkflowRegistry {
167        &self.workflow_registry
168    }
169
170    /// Get the workflow registry mutably.
171    pub fn workflow_registry_mut(&mut self) -> &mut WorkflowRegistry {
172        &mut self.workflow_registry
173    }
174
175    /// Get the daemon registry.
176    pub fn daemon_registry(&self) -> Arc<DaemonRegistry> {
177        self.daemon_registry.clone()
178    }
179
180    /// Get the webhook registry.
181    pub fn webhook_registry(&self) -> Arc<WebhookRegistry> {
182        self.webhook_registry.clone()
183    }
184
185    /// Run the FORGE server.
186    pub async fn run(mut self) -> Result<()> {
187        // Users shouldn't need tracing_subscriber boilerplate to see logs
188        let telemetry_config = forge_runtime::TelemetryConfig::from_observability_config(
189            &self.config.observability,
190            &self.config.project.name,
191            &self.config.project.version,
192        );
193        match forge_runtime::init_telemetry(
194            &telemetry_config,
195            &self.config.project.name,
196            &self.config.observability.log_level,
197        ) {
198            Ok(true) => {}
199            Ok(false) => {
200                // Subscriber already exists, user set one up manually
201            }
202            Err(e) => {
203                eprintln!("forge: failed to initialize telemetry: {e}");
204            }
205        }
206
207        tracing::debug!("Connecting to database");
208
209        // Connect to database
210        let db =
211            Database::from_config_with_service(&self.config.database, &self.config.project.name)
212                .await?;
213        let pool = db.primary().clone();
214        let jobs_pool = db.jobs_pool().clone();
215        let observability_pool = db.observability_pool().clone();
216        if let Some(handle) = db.start_health_monitor() {
217            let mut shutdown_rx = self.shutdown_tx.subscribe();
218            tokio::spawn(async move {
219                tokio::select! {
220                    _ = shutdown_rx.recv() => {}
221                    _ = handle => {}
222                }
223            });
224        }
225        self.db = Some(db);
226
227        tracing::debug!("Database connected");
228
229        // Run migrations with mesh-safe locking
230        // This acquires an advisory lock, so only one node runs migrations at a time
231        let runner = MigrationRunner::new(pool.clone());
232
233        // Load user migrations from directory + any programmatic ones
234        let mut user_migrations = load_migrations_from_dir(&self.migrations_dir)?;
235        user_migrations.extend(self.extra_migrations.clone());
236
237        runner.run(user_migrations).await?;
238        tracing::debug!("Migrations applied");
239
240        // Get local node info
241        let hostname = get_hostname();
242
243        let ip_address: IpAddr = "127.0.0.1".parse().expect("valid IP literal");
244        let roles: Vec<NodeRole> = self
245            .config
246            .node
247            .roles
248            .iter()
249            .map(config_role_to_node_role)
250            .collect();
251
252        let node_info = NodeInfo::new_local(
253            hostname,
254            ip_address,
255            self.config.gateway.port,
256            self.config.gateway.grpc_port,
257            roles.clone(),
258            self.config.node.worker_capabilities.clone(),
259            env!("CARGO_PKG_VERSION").to_string(),
260        );
261
262        let node_id = node_info.id;
263        self.node_id = node_id;
264
265        // Create node registry
266        let node_registry = Arc::new(NodeRegistry::new(pool.clone(), node_info));
267
268        // Register node in cluster
269        if let Err(e) = node_registry.register().await {
270            tracing::debug!("Failed to register node (tables may not exist): {}", e);
271        }
272
273        // Set node status to active
274        if let Err(e) = node_registry.set_status(NodeStatus::Active).await {
275            tracing::debug!("Failed to set node status: {}", e);
276        }
277
278        // Create leader election for scheduler role
279        let leader_election = if roles.contains(&NodeRole::Scheduler) {
280            let election = Arc::new(LeaderElection::new(
281                pool.clone(),
282                node_id,
283                LeaderRole::Scheduler,
284                LeaderConfig::default(),
285            ));
286
287            // Try to become leader
288            if let Err(e) = election.try_become_leader().await {
289                tracing::debug!("Failed to acquire leadership: {}", e);
290            }
291
292            Some(election)
293        } else {
294            None
295        };
296
297        // Create graceful shutdown coordinator
298        let shutdown = Arc::new(GracefulShutdown::new(
299            node_registry.clone(),
300            leader_election.clone(),
301            ShutdownConfig::default(),
302        ));
303
304        // Create HTTP client with circuit breaker for actions and crons
305        let http_client = CircuitBreakerClient::with_defaults(reqwest::Client::new());
306
307        // Start background tasks based on roles
308        let mut handles = Vec::new();
309
310        // Start heartbeat loop
311        {
312            let heartbeat_pool = pool.clone();
313            let heartbeat_node_id = node_id;
314            let config = HeartbeatConfig::default();
315            handles.push(tokio::spawn(async move {
316                let heartbeat = HeartbeatLoop::new(heartbeat_pool, heartbeat_node_id, config);
317                heartbeat.run().await;
318            }));
319        }
320
321        // Start leader election loop if scheduler role
322        if let Some(ref election) = leader_election {
323            let election = election.clone();
324            handles.push(tokio::spawn(async move {
325                election.run().await;
326            }));
327        }
328
329        // Start job worker if worker role
330        if roles.contains(&NodeRole::Worker) {
331            let job_queue = JobQueue::new(jobs_pool.clone());
332            let worker_config = WorkerConfig {
333                id: Some(node_id.as_uuid()),
334                capabilities: self.config.node.worker_capabilities.clone(),
335                max_concurrent: self.config.worker.max_concurrent_jobs,
336                poll_interval: Duration::from_millis(self.config.worker.poll_interval_ms),
337                ..Default::default()
338            };
339
340            let mut worker = Worker::new(
341                worker_config,
342                job_queue,
343                self.job_registry.clone(),
344                jobs_pool.clone(),
345            );
346
347            handles.push(tokio::spawn(async move {
348                if let Err(e) = worker.run().await {
349                    tracing::error!("Worker error: {}", e);
350                }
351            }));
352
353            tracing::debug!("Job worker started");
354        }
355
356        // Start cron runner if scheduler role and is leader
357        if roles.contains(&NodeRole::Scheduler) {
358            let cron_registry = self.cron_registry.clone();
359            let cron_pool = jobs_pool.clone();
360            let cron_http = http_client.clone();
361            let cron_leader_election = leader_election.clone();
362
363            let cron_config = CronRunnerConfig {
364                poll_interval: Duration::from_secs(1),
365                node_id: node_id.as_uuid(),
366                is_leader: cron_leader_election.is_none(),
367                leader_election: cron_leader_election,
368                run_stale_threshold: Duration::from_secs(15 * 60),
369            };
370
371            let cron_runner = CronRunner::new(cron_registry, cron_pool, cron_http, cron_config);
372
373            handles.push(tokio::spawn(async move {
374                if let Err(e) = cron_runner.run().await {
375                    tracing::error!("Cron runner error: {}", e);
376                }
377            }));
378
379            tracing::debug!("Cron scheduler started");
380        }
381
382        // Start workflow scheduler if scheduler role
383        let workflow_shutdown_token = CancellationToken::new();
384        if roles.contains(&NodeRole::Scheduler) {
385            let scheduler_executor = Arc::new(WorkflowExecutor::new(
386                Arc::new(self.workflow_registry.clone()),
387                jobs_pool.clone(),
388                http_client.clone(),
389            ));
390            let event_store = Arc::new(EventStore::new(jobs_pool.clone()));
391            let scheduler = WorkflowScheduler::new(
392                jobs_pool.clone(),
393                scheduler_executor,
394                event_store,
395                WorkflowSchedulerConfig::default(),
396            );
397
398            let shutdown_token = workflow_shutdown_token.clone();
399            handles.push(tokio::spawn(async move {
400                scheduler.run(shutdown_token).await;
401            }));
402
403            tracing::debug!("Workflow scheduler started");
404        }
405
406        // Create job dispatcher and workflow executor for dispatch capabilities
407        let job_queue_for_dispatch = JobQueue::new(jobs_pool.clone());
408        let job_dispatcher = Arc::new(JobDispatcher::new(
409            job_queue_for_dispatch,
410            self.job_registry.clone(),
411        ));
412        let workflow_executor = Arc::new(WorkflowExecutor::new(
413            Arc::new(self.workflow_registry.clone()),
414            jobs_pool.clone(),
415            http_client.clone(),
416        ));
417
418        // Start daemon runner if scheduler role (daemons run as singletons)
419        if roles.contains(&NodeRole::Scheduler) && !self.daemon_registry.is_empty() {
420            let daemon_registry = self.daemon_registry.clone();
421            let daemon_pool = jobs_pool.clone();
422            let daemon_http = http_client.clone();
423            let daemon_shutdown_rx = self.shutdown_tx.subscribe();
424
425            let daemon_runner = DaemonRunner::new(
426                daemon_registry,
427                daemon_pool,
428                daemon_http,
429                node_id.as_uuid(),
430                daemon_shutdown_rx,
431            )
432            .with_job_dispatch(job_dispatcher.clone())
433            .with_workflow_dispatch(workflow_executor.clone());
434
435            handles.push(tokio::spawn(async move {
436                if let Err(e) = daemon_runner.run().await {
437                    tracing::error!("Daemon runner error: {}", e);
438                }
439            }));
440
441            tracing::debug!("Daemon runner started");
442        }
443
444        // Reactor handle for shutdown
445        let mut reactor_handle = None;
446
447        // Start HTTP gateway if gateway role
448        if roles.contains(&NodeRole::Gateway) {
449            let gateway_config = RuntimeGatewayConfig {
450                port: self.config.gateway.port,
451                max_connections: self.config.gateway.max_connections,
452                request_timeout_secs: self.config.gateway.request_timeout_secs,
453                cors_enabled: self.config.gateway.cors_enabled
454                    || !self.config.gateway.cors_origins.is_empty(),
455                cors_origins: self.config.gateway.cors_origins.clone(),
456                auth: AuthConfig::from_forge_config(&self.config.auth)
457                    .map_err(|e| ForgeError::Config(e.to_string()))?,
458                mcp: self.config.mcp.clone(),
459                quiet_routes: self.config.gateway.quiet_routes.clone(),
460            };
461
462            // Build gateway server (pass Database wrapper for read replica routing)
463            let gateway = GatewayServer::new(
464                gateway_config,
465                self.function_registry.clone(),
466                self.db.clone().expect("Database must be initialized"),
467            )
468            .with_job_dispatcher(job_dispatcher.clone())
469            .with_workflow_dispatcher(workflow_executor.clone())
470            .with_mcp_registry(self.mcp_registry.clone());
471
472            // Start the reactor for real-time updates
473            let reactor = gateway.reactor();
474            if let Err(e) = reactor.start().await {
475                tracing::error!("Failed to start reactor: {}", e);
476            } else {
477                tracing::debug!("Reactor started");
478                reactor_handle = Some(reactor);
479            }
480
481            // Build API router (all under /_api)
482            let api_router = gateway.router();
483
484            // Build final router with API
485            let mut router = Router::new().nest("/_api", api_router);
486
487            // Mount webhook routes under /_api (bypasses gateway auth middleware)
488            if !self.webhook_registry.is_empty() {
489                use axum::routing::post;
490                use tower_http::cors::{Any, CorsLayer};
491
492                let webhook_state = Arc::new(
493                    WebhookState::new(self.webhook_registry.clone(), pool.clone())
494                        .with_job_dispatcher(job_dispatcher.clone()),
495                );
496
497                // Webhook routes need their own CORS layer since they're outside the API router.
498                // Reuse gateway CORS policy rather than forcing wildcard access.
499                let webhook_cors = if self.config.gateway.cors_enabled
500                    || !self.config.gateway.cors_origins.is_empty()
501                {
502                    if self.config.gateway.cors_origins.iter().any(|o| o == "*") {
503                        CorsLayer::new()
504                            .allow_origin(Any)
505                            .allow_methods(Any)
506                            .allow_headers(Any)
507                    } else {
508                        let origins: Vec<_> = self
509                            .config
510                            .gateway
511                            .cors_origins
512                            .iter()
513                            .filter_map(|o| o.parse().ok())
514                            .collect();
515                        CorsLayer::new()
516                            .allow_origin(origins)
517                            .allow_methods(Any)
518                            .allow_headers(Any)
519                    }
520                } else {
521                    CorsLayer::new()
522                };
523
524                let webhook_router = Router::new()
525                    .route("/{*path}", post(webhook_handler).with_state(webhook_state))
526                    .layer(axum::extract::DefaultBodyLimit::max(1024 * 1024))
527                    .layer(
528                        tower::ServiceBuilder::new()
529                            .layer(axum::error_handling::HandleErrorLayer::new(
530                                |err: tower::BoxError| async move {
531                                    if err.is::<tower::timeout::error::Elapsed>() {
532                                        return (
533                                            axum::http::StatusCode::REQUEST_TIMEOUT,
534                                            "Request timed out",
535                                        );
536                                    }
537                                    (
538                                        axum::http::StatusCode::SERVICE_UNAVAILABLE,
539                                        "Server overloaded",
540                                    )
541                                },
542                            ))
543                            .layer(tower::limit::ConcurrencyLimitLayer::new(
544                                self.config.gateway.max_connections,
545                            ))
546                            .layer(tower::timeout::TimeoutLayer::new(Duration::from_secs(
547                                self.config.gateway.request_timeout_secs,
548                            ))),
549                    )
550                    .layer(webhook_cors);
551
552                router = router.nest("/_api/webhooks", webhook_router);
553
554                tracing::debug!(
555                    webhooks = ?self.webhook_registry.paths().collect::<Vec<_>>(),
556                    "Webhook routes registered"
557                );
558            }
559
560            // Merge custom routes before frontend fallback so they take precedence
561            if let Some(custom) = self.custom_routes.take() {
562                router = router.merge(custom);
563                tracing::debug!("Custom routes merged");
564            }
565
566            // Add frontend handler as fallback if configured
567            if let Some(handler) = self.frontend_handler {
568                use axum::routing::get;
569                router = router.fallback(get(handler));
570                tracing::debug!("Frontend handler enabled");
571            }
572
573            let addr = gateway.addr();
574
575            handles.push(tokio::spawn(async move {
576                tracing::debug!(addr = %addr, "Gateway server binding");
577                let listener = tokio::net::TcpListener::bind(addr)
578                    .await
579                    .expect("Failed to bind");
580                if let Err(e) = axum::serve(listener, router).await {
581                    tracing::error!("Gateway server error: {}", e);
582                }
583            }));
584        }
585
586        tracing::info!(
587            queries = self.function_registry.queries().count(),
588            mutations = self.function_registry.mutations().count(),
589            jobs = self.job_registry.len(),
590            crons = self.cron_registry.len(),
591            workflows = self.workflow_registry.len(),
592            daemons = self.daemon_registry.len(),
593            webhooks = self.webhook_registry.len(),
594            mcp_tools = self.mcp_registry.len(),
595            "Functions registered"
596        );
597
598        {
599            let metrics_pool = observability_pool;
600            tokio::spawn(async move {
601                loop {
602                    tokio::time::sleep(Duration::from_secs(15)).await;
603                    forge_runtime::observability::record_pool_metrics(&metrics_pool);
604                }
605            });
606        }
607
608        tracing::info!(
609            node_id = %node_id,
610            roles = ?roles,
611            port = self.config.gateway.port,
612            "Forge started"
613        );
614
615        // Wait for shutdown signal
616        let mut shutdown_rx = self.shutdown_tx.subscribe();
617
618        tokio::select! {
619            _ = tokio::signal::ctrl_c() => {
620                tracing::debug!("Received ctrl-c");
621            }
622            _ = shutdown_rx.recv() => {
623                tracing::debug!("Received shutdown notification");
624            }
625        }
626
627        // Graceful shutdown
628        tracing::debug!("Graceful shutdown starting");
629
630        // Stop workflow scheduler
631        workflow_shutdown_token.cancel();
632
633        if let Err(e) = shutdown.shutdown().await {
634            tracing::warn!(error = %e, "Shutdown error");
635        }
636
637        // Stop leader election
638        if let Some(ref election) = leader_election {
639            election.stop();
640        }
641
642        // Stop reactor before closing database
643        if let Some(ref reactor) = reactor_handle {
644            reactor.stop();
645        }
646
647        // Close database connections
648        if let Some(ref db) = self.db {
649            db.close().await;
650        }
651
652        forge_runtime::shutdown_telemetry();
653        tracing::info!("Forge stopped");
654        Ok(())
655    }
656
657    /// Request shutdown.
658    pub fn shutdown(&self) {
659        let _ = self.shutdown_tx.send(());
660    }
661}
662
663/// Builder for configuring the FORGE runtime.
664pub struct ForgeBuilder {
665    config: Option<ForgeConfig>,
666    function_registry: FunctionRegistry,
667    mcp_registry: McpToolRegistry,
668    job_registry: JobRegistry,
669    cron_registry: CronRegistry,
670    workflow_registry: WorkflowRegistry,
671    daemon_registry: DaemonRegistry,
672    webhook_registry: WebhookRegistry,
673    migrations_dir: PathBuf,
674    extra_migrations: Vec<Migration>,
675    frontend_handler: Option<FrontendHandler>,
676    custom_routes: Option<Router>,
677}
678
679impl ForgeBuilder {
680    /// Create a new builder.
681    pub fn new() -> Self {
682        Self {
683            config: None,
684            function_registry: FunctionRegistry::new(),
685            mcp_registry: McpToolRegistry::new(),
686            job_registry: JobRegistry::new(),
687            cron_registry: CronRegistry::new(),
688            workflow_registry: WorkflowRegistry::new(),
689            daemon_registry: DaemonRegistry::new(),
690            webhook_registry: WebhookRegistry::new(),
691            migrations_dir: PathBuf::from("migrations"),
692            extra_migrations: Vec::new(),
693            frontend_handler: None,
694            custom_routes: None,
695        }
696    }
697
698    /// Set the directory to load migrations from.
699    ///
700    /// Defaults to `./migrations`. Migration files should be named like:
701    /// - `0001_create_users.sql`
702    /// - `0002_add_posts.sql`
703    pub fn migrations_dir(mut self, path: impl Into<PathBuf>) -> Self {
704        self.migrations_dir = path.into();
705        self
706    }
707
708    /// Add a migration programmatically.
709    ///
710    /// Use this for migrations that need to be generated at runtime,
711    /// or for testing. For most cases, use migration files instead.
712    pub fn migration(mut self, name: impl Into<String>, sql: impl Into<String>) -> Self {
713        self.extra_migrations.push(Migration::new(name, sql));
714        self
715    }
716
717    /// Set a frontend handler for serving embedded SPA assets.
718    ///
719    /// Use with the `embedded-frontend` feature to build a single binary
720    /// that includes both backend and frontend.
721    pub fn frontend_handler(mut self, handler: FrontendHandler) -> Self {
722        self.frontend_handler = Some(handler);
723        self
724    }
725
726    /// Add custom axum routes to the server.
727    ///
728    /// Routes are merged at the top level, outside `/_api`, giving full
729    /// control over headers, extractors, and response types. Avoid paths
730    /// starting with `/_api` as they conflict with internal routes.
731    ///
732    /// ```ignore
733    /// use axum::{Router, routing::get};
734    ///
735    /// let routes = Router::new()
736    ///     .route("/custom/health", get(|| async { "ok" }));
737    ///
738    /// builder.custom_routes(routes);
739    /// ```
740    pub fn custom_routes(mut self, router: Router) -> Self {
741        self.custom_routes = Some(router);
742        self
743    }
744
745    /// Set the configuration.
746    pub fn config(mut self, config: ForgeConfig) -> Self {
747        self.config = Some(config);
748        self
749    }
750
751    /// Get mutable access to the function registry.
752    pub fn function_registry_mut(&mut self) -> &mut FunctionRegistry {
753        &mut self.function_registry
754    }
755
756    /// Get mutable access to the job registry.
757    pub fn job_registry_mut(&mut self) -> &mut JobRegistry {
758        &mut self.job_registry
759    }
760
761    /// Get mutable access to the MCP tool registry.
762    pub fn mcp_registry_mut(&mut self) -> &mut McpToolRegistry {
763        &mut self.mcp_registry
764    }
765
766    /// Register an MCP tool without manually accessing the registry.
767    pub fn register_mcp_tool<T: ForgeMcpTool>(mut self) -> Self {
768        self.mcp_registry.register::<T>();
769        self
770    }
771
772    /// Get mutable access to the cron registry.
773    pub fn cron_registry_mut(&mut self) -> &mut CronRegistry {
774        &mut self.cron_registry
775    }
776
777    /// Get mutable access to the workflow registry.
778    pub fn workflow_registry_mut(&mut self) -> &mut WorkflowRegistry {
779        &mut self.workflow_registry
780    }
781
782    /// Get mutable access to the daemon registry.
783    pub fn daemon_registry_mut(&mut self) -> &mut DaemonRegistry {
784        &mut self.daemon_registry
785    }
786
787    /// Get mutable access to the webhook registry.
788    pub fn webhook_registry_mut(&mut self) -> &mut WebhookRegistry {
789        &mut self.webhook_registry
790    }
791
792    /// Register a query function.
793    pub fn register_query<Q: ForgeQuery>(mut self) -> Self
794    where
795        Q::Args: serde::de::DeserializeOwned + Send + 'static,
796        Q::Output: serde::Serialize + Send + 'static,
797    {
798        self.function_registry.register_query::<Q>();
799        self
800    }
801
802    /// Register a mutation function.
803    pub fn register_mutation<M: ForgeMutation>(mut self) -> Self
804    where
805        M::Args: serde::de::DeserializeOwned + Send + 'static,
806        M::Output: serde::Serialize + Send + 'static,
807    {
808        self.function_registry.register_mutation::<M>();
809        self
810    }
811
812    /// Register a background job.
813    pub fn register_job<J: forge_core::ForgeJob>(mut self) -> Self
814    where
815        J::Args: serde::de::DeserializeOwned + Send + 'static,
816        J::Output: serde::Serialize + Send + 'static,
817    {
818        self.job_registry.register::<J>();
819        self
820    }
821
822    /// Register a cron handler.
823    pub fn register_cron<C: forge_core::ForgeCron>(mut self) -> Self {
824        self.cron_registry.register::<C>();
825        self
826    }
827
828    /// Register a workflow.
829    pub fn register_workflow<W: forge_core::ForgeWorkflow>(mut self) -> Self
830    where
831        W::Input: serde::de::DeserializeOwned,
832        W::Output: serde::Serialize,
833    {
834        self.workflow_registry.register::<W>();
835        self
836    }
837
838    /// Register a daemon.
839    pub fn register_daemon<D: forge_core::ForgeDaemon>(mut self) -> Self {
840        self.daemon_registry.register::<D>();
841        self
842    }
843
844    /// Register a webhook.
845    pub fn register_webhook<W: forge_core::ForgeWebhook>(mut self) -> Self {
846        self.webhook_registry.register::<W>();
847        self
848    }
849
850    /// Build the FORGE runtime.
851    pub fn build(self) -> Result<Forge> {
852        let config = self
853            .config
854            .ok_or_else(|| ForgeError::Config("Configuration is required".to_string()))?;
855
856        let (shutdown_tx, _) = broadcast::channel(1);
857
858        Ok(Forge {
859            config,
860            db: None,
861            node_id: NodeId::new(),
862            function_registry: self.function_registry,
863            mcp_registry: self.mcp_registry,
864            job_registry: self.job_registry,
865            cron_registry: Arc::new(self.cron_registry),
866            workflow_registry: self.workflow_registry,
867            daemon_registry: Arc::new(self.daemon_registry),
868            webhook_registry: Arc::new(self.webhook_registry),
869            shutdown_tx,
870            migrations_dir: self.migrations_dir,
871            extra_migrations: self.extra_migrations,
872            frontend_handler: self.frontend_handler,
873            custom_routes: self.custom_routes,
874        })
875    }
876}
877
878impl Default for ForgeBuilder {
879    fn default() -> Self {
880        Self::new()
881    }
882}
883
884#[cfg(unix)]
885fn get_hostname() -> String {
886    nix::unistd::gethostname()
887        .map(|h| h.to_string_lossy().to_string())
888        .unwrap_or_else(|_| "unknown".to_string())
889}
890
891#[cfg(not(unix))]
892fn get_hostname() -> String {
893    std::env::var("COMPUTERNAME")
894        .or_else(|_| std::env::var("HOSTNAME"))
895        .unwrap_or_else(|_| "unknown".to_string())
896}
897
898/// Convert config NodeRole to cluster NodeRole.
899fn config_role_to_node_role(role: &ConfigNodeRole) -> NodeRole {
900    match role {
901        ConfigNodeRole::Gateway => NodeRole::Gateway,
902        ConfigNodeRole::Function => NodeRole::Function,
903        ConfigNodeRole::Worker => NodeRole::Worker,
904        ConfigNodeRole::Scheduler => NodeRole::Scheduler,
905    }
906}
907
908#[cfg(test)]
909#[allow(clippy::unwrap_used, clippy::indexing_slicing)]
910mod tests {
911    use super::*;
912    use std::future::Future;
913    use std::pin::Pin;
914
915    use forge_core::mcp::{McpToolAnnotations, McpToolInfo};
916
917    struct TestMcpTool;
918
919    impl ForgeMcpTool for TestMcpTool {
920        type Args = serde_json::Value;
921        type Output = serde_json::Value;
922
923        fn info() -> McpToolInfo {
924            McpToolInfo {
925                name: "test.mcp.tool",
926                title: None,
927                description: None,
928                required_role: None,
929                is_public: false,
930                timeout: None,
931                rate_limit_requests: None,
932                rate_limit_per_secs: None,
933                rate_limit_key: None,
934                annotations: McpToolAnnotations::default(),
935                icons: &[],
936            }
937        }
938
939        fn execute(
940            _ctx: &forge_core::McpToolContext,
941            _args: Self::Args,
942        ) -> Pin<Box<dyn Future<Output = forge_core::Result<Self::Output>> + Send + '_>> {
943            Box::pin(async { Ok(serde_json::json!({ "ok": true })) })
944        }
945    }
946
947    #[test]
948    fn test_forge_builder_new() {
949        let builder = ForgeBuilder::new();
950        assert!(builder.config.is_none());
951    }
952
953    #[test]
954    fn test_forge_builder_requires_config() {
955        let builder = ForgeBuilder::new();
956        let result = builder.build();
957        assert!(result.is_err());
958    }
959
960    #[test]
961    fn test_forge_builder_with_config() {
962        let config = ForgeConfig::default_with_database_url("postgres://localhost/test");
963        let result = ForgeBuilder::new().config(config).build();
964        assert!(result.is_ok());
965    }
966
967    #[test]
968    fn test_forge_builder_register_mcp_tool() {
969        let builder = ForgeBuilder::new().register_mcp_tool::<TestMcpTool>();
970        assert_eq!(builder.mcp_registry.len(), 1);
971    }
972
973    #[test]
974    fn test_config_role_conversion() {
975        assert_eq!(
976            config_role_to_node_role(&ConfigNodeRole::Gateway),
977            NodeRole::Gateway
978        );
979        assert_eq!(
980            config_role_to_node_role(&ConfigNodeRole::Worker),
981            NodeRole::Worker
982        );
983        assert_eq!(
984            config_role_to_node_role(&ConfigNodeRole::Scheduler),
985            NodeRole::Scheduler
986        );
987        assert_eq!(
988            config_role_to_node_role(&ConfigNodeRole::Function),
989            NodeRole::Function
990        );
991    }
992}