Skip to main content

forge_runtime/gateway/
server.rs

1use std::sync::Arc;
2use std::time::Duration;
3
4use axum::{
5    Json, Router,
6    error_handling::HandleErrorLayer,
7    extract::DefaultBodyLimit,
8    http::StatusCode,
9    middleware,
10    response::IntoResponse,
11    routing::{get, post},
12};
13use serde::Serialize;
14use tower::BoxError;
15use tower::ServiceBuilder;
16use tower::limit::ConcurrencyLimitLayer;
17use tower::timeout::TimeoutLayer;
18use tower_http::cors::{Any, CorsLayer};
19
20use forge_core::cluster::NodeId;
21use forge_core::config::McpConfig;
22use forge_core::function::{JobDispatch, WorkflowDispatch};
23use opentelemetry::global;
24use opentelemetry::propagation::Extractor;
25use tracing::Instrument;
26use tracing_opentelemetry::OpenTelemetrySpanExt;
27
28use super::auth::{AuthConfig, AuthMiddleware, auth_middleware};
29use super::mcp::{McpState, mcp_get_handler, mcp_post_handler};
30use super::multipart::rpc_multipart_handler;
31use super::response::{RpcError, RpcResponse};
32use super::rpc::{RpcHandler, rpc_function_handler, rpc_handler};
33use super::sse::{
34    SseState, sse_handler, sse_job_subscribe_handler, sse_subscribe_handler,
35    sse_unsubscribe_handler, sse_workflow_subscribe_handler,
36};
37use super::tracing::{REQUEST_ID_HEADER, SPAN_ID_HEADER, TRACE_ID_HEADER, TracingState};
38use crate::db::Database;
39use crate::function::FunctionRegistry;
40use crate::mcp::McpToolRegistry;
41use crate::realtime::{Reactor, ReactorConfig};
42
43const MAX_JSON_BODY_SIZE: usize = 1024 * 1024;
44const MAX_MULTIPART_BODY_SIZE: usize = 20 * 1024 * 1024;
45const MAX_MULTIPART_CONCURRENCY: usize = 32;
46
47/// Gateway server configuration.
48#[derive(Debug, Clone)]
49pub struct GatewayConfig {
50    /// Port to listen on.
51    pub port: u16,
52    /// Maximum number of connections.
53    pub max_connections: usize,
54    /// Request timeout in seconds.
55    pub request_timeout_secs: u64,
56    /// Enable CORS.
57    pub cors_enabled: bool,
58    /// Allowed CORS origins.
59    pub cors_origins: Vec<String>,
60    /// Authentication configuration.
61    pub auth: AuthConfig,
62    /// MCP configuration.
63    pub mcp: McpConfig,
64    /// Routes excluded from request logs, metrics, and traces.
65    pub quiet_routes: Vec<String>,
66}
67
68impl Default for GatewayConfig {
69    fn default() -> Self {
70        Self {
71            port: 8080,
72            max_connections: 512,
73            request_timeout_secs: 30,
74            cors_enabled: false,
75            cors_origins: Vec::new(),
76            auth: AuthConfig::default(),
77            mcp: McpConfig::default(),
78            quiet_routes: Vec::new(),
79        }
80    }
81}
82
83/// Health check response.
84#[derive(Debug, Serialize)]
85pub struct HealthResponse {
86    pub status: String,
87    pub version: String,
88}
89
90/// Readiness check response.
91#[derive(Debug, Serialize)]
92pub struct ReadinessResponse {
93    pub ready: bool,
94    pub database: bool,
95    pub reactor: bool,
96    pub version: String,
97}
98
99/// State for readiness check.
100#[derive(Clone)]
101pub struct ReadinessState {
102    db_pool: sqlx::PgPool,
103    reactor: Arc<Reactor>,
104}
105
106/// Gateway HTTP server.
107pub struct GatewayServer {
108    config: GatewayConfig,
109    registry: FunctionRegistry,
110    db: Database,
111    reactor: Arc<Reactor>,
112    job_dispatcher: Option<Arc<dyn JobDispatch>>,
113    workflow_dispatcher: Option<Arc<dyn WorkflowDispatch>>,
114    mcp_registry: Option<McpToolRegistry>,
115}
116
117impl GatewayServer {
118    /// Create a new gateway server.
119    pub fn new(config: GatewayConfig, registry: FunctionRegistry, db: Database) -> Self {
120        let node_id = NodeId::new();
121        let reactor = Arc::new(Reactor::new(
122            node_id,
123            db.read_pool().clone(),
124            registry.clone(),
125            ReactorConfig::default(),
126        ));
127
128        Self {
129            config,
130            registry,
131            db,
132            reactor,
133            job_dispatcher: None,
134            workflow_dispatcher: None,
135            mcp_registry: None,
136        }
137    }
138
139    /// Set the job dispatcher.
140    pub fn with_job_dispatcher(mut self, dispatcher: Arc<dyn JobDispatch>) -> Self {
141        self.job_dispatcher = Some(dispatcher);
142        self
143    }
144
145    /// Set the workflow dispatcher.
146    pub fn with_workflow_dispatcher(mut self, dispatcher: Arc<dyn WorkflowDispatch>) -> Self {
147        self.workflow_dispatcher = Some(dispatcher);
148        self
149    }
150
151    /// Set the MCP tool registry.
152    pub fn with_mcp_registry(mut self, registry: McpToolRegistry) -> Self {
153        self.mcp_registry = Some(registry);
154        self
155    }
156
157    /// Get a reference to the reactor.
158    pub fn reactor(&self) -> Arc<Reactor> {
159        self.reactor.clone()
160    }
161
162    /// Build the Axum router.
163    pub fn router(&self) -> Router {
164        let rpc_handler_state = Arc::new(RpcHandler::with_dispatch(
165            self.registry.clone(),
166            self.db.clone(),
167            self.job_dispatcher.clone(),
168            self.workflow_dispatcher.clone(),
169        ));
170
171        let auth_middleware_state = Arc::new(AuthMiddleware::new(self.config.auth.clone()));
172
173        // Build CORS layer
174        let cors = if self.config.cors_enabled {
175            if self.config.cors_origins.iter().any(|o| o == "*") {
176                CorsLayer::new()
177                    .allow_origin(Any)
178                    .allow_methods(Any)
179                    .allow_headers(Any)
180            } else {
181                let origins: Vec<_> = self
182                    .config
183                    .cors_origins
184                    .iter()
185                    .filter_map(|o| o.parse().ok())
186                    .collect();
187                CorsLayer::new()
188                    .allow_origin(origins)
189                    .allow_methods(Any)
190                    .allow_headers(Any)
191            }
192        } else {
193            CorsLayer::new()
194        };
195
196        // SSE state for Server-Sent Events
197        let sse_state = Arc::new(SseState::new(
198            self.reactor.clone(),
199            auth_middleware_state.clone(),
200        ));
201
202        // Readiness state for DB + reactor health check
203        let readiness_state = Arc::new(ReadinessState {
204            db_pool: self.db.primary().clone(),
205            reactor: self.reactor.clone(),
206        });
207
208        // Build the main router with middleware
209        let mut main_router = Router::new()
210            // Health check endpoint (liveness)
211            .route("/health", get(health_handler))
212            // Readiness check endpoint (checks DB)
213            .route("/ready", get(readiness_handler).with_state(readiness_state))
214            // RPC endpoint
215            .route("/rpc", post(rpc_handler))
216            // REST-style function endpoint (JSON)
217            .route("/rpc/{function}", post(rpc_function_handler))
218            // Prevent oversized JSON payloads from exhausting memory.
219            .layer(DefaultBodyLimit::max(MAX_JSON_BODY_SIZE))
220            // Add state
221            .with_state(rpc_handler_state.clone());
222
223        // Multipart RPC router (separate state needed for multipart)
224        let multipart_router = Router::new()
225            .route("/rpc/{function}/upload", post(rpc_multipart_handler))
226            .layer(DefaultBodyLimit::max(MAX_MULTIPART_BODY_SIZE))
227            // Cap upload fan-out; each request buffers data in memory.
228            .layer(ConcurrencyLimitLayer::new(MAX_MULTIPART_CONCURRENCY))
229            .with_state(rpc_handler_state);
230
231        // SSE router
232        let sse_router = Router::new()
233            .route("/events", get(sse_handler))
234            .route("/subscribe", post(sse_subscribe_handler))
235            .route("/unsubscribe", post(sse_unsubscribe_handler))
236            .route("/subscribe-job", post(sse_job_subscribe_handler))
237            .route("/subscribe-workflow", post(sse_workflow_subscribe_handler))
238            .with_state(sse_state);
239
240        let mut mcp_router = Router::new();
241        if self.config.mcp.enabled {
242            let path = self.config.mcp.path.clone();
243            let mcp_state = Arc::new(McpState::new(
244                self.config.mcp.clone(),
245                self.mcp_registry.clone().unwrap_or_default(),
246                self.db.primary().clone(),
247                self.job_dispatcher.clone(),
248                self.workflow_dispatcher.clone(),
249            ));
250            mcp_router = mcp_router.route(
251                &path,
252                post(mcp_post_handler)
253                    .get(mcp_get_handler)
254                    .with_state(mcp_state),
255            );
256        }
257
258        main_router = main_router
259            .merge(multipart_router)
260            .merge(sse_router)
261            .merge(mcp_router);
262
263        // Build middleware stack
264        let service_builder = ServiceBuilder::new()
265            .layer(HandleErrorLayer::new(handle_middleware_error))
266            .layer(ConcurrencyLimitLayer::new(self.config.max_connections))
267            .layer(TimeoutLayer::new(Duration::from_secs(
268                self.config.request_timeout_secs,
269            )))
270            .layer(cors.clone())
271            .layer(middleware::from_fn_with_state(
272                auth_middleware_state,
273                auth_middleware,
274            ))
275            .layer(middleware::from_fn_with_state(
276                Arc::new(self.config.quiet_routes.clone()),
277                tracing_middleware,
278            ));
279
280        // Apply the remaining middleware layers
281        main_router.layer(service_builder)
282    }
283
284    /// Get the socket address to bind to.
285    pub fn addr(&self) -> std::net::SocketAddr {
286        std::net::SocketAddr::from(([0, 0, 0, 0], self.config.port))
287    }
288
289    /// Run the server (blocking).
290    pub async fn run(self) -> Result<(), std::io::Error> {
291        let addr = self.addr();
292        let router = self.router();
293
294        // Start the reactor for real-time updates
295        self.reactor
296            .start()
297            .await
298            .map_err(|e| std::io::Error::other(format!("Failed to start reactor: {}", e)))?;
299        tracing::info!("Reactor started for real-time updates");
300
301        tracing::info!("Gateway server listening on {}", addr);
302
303        let listener = tokio::net::TcpListener::bind(addr).await?;
304        axum::serve(listener, router.into_make_service()).await
305    }
306}
307
308/// Health check handler (liveness probe).
309async fn health_handler() -> Json<HealthResponse> {
310    Json(HealthResponse {
311        status: "healthy".to_string(),
312        version: env!("CARGO_PKG_VERSION").to_string(),
313    })
314}
315
316/// Readiness check handler (readiness probe).
317async fn readiness_handler(
318    axum::extract::State(state): axum::extract::State<Arc<ReadinessState>>,
319) -> (axum::http::StatusCode, Json<ReadinessResponse>) {
320    // Check database connectivity
321    let db_ok = sqlx::query("SELECT 1")
322        .fetch_one(&state.db_pool)
323        .await
324        .is_ok();
325
326    // Check reactor health (change listener must be running for real-time updates)
327    let reactor_stats = state.reactor.stats().await;
328    let reactor_ok = reactor_stats.listener_running;
329
330    let ready = db_ok && reactor_ok;
331    let status_code = if ready {
332        axum::http::StatusCode::OK
333    } else {
334        axum::http::StatusCode::SERVICE_UNAVAILABLE
335    };
336
337    (
338        status_code,
339        Json(ReadinessResponse {
340            ready,
341            database: db_ok,
342            reactor: reactor_ok,
343            version: env!("CARGO_PKG_VERSION").to_string(),
344        }),
345    )
346}
347
348async fn handle_middleware_error(err: BoxError) -> axum::response::Response {
349    let (status, code, message) = if err.is::<tower::timeout::error::Elapsed>() {
350        (StatusCode::REQUEST_TIMEOUT, "TIMEOUT", "Request timed out")
351    } else {
352        (
353            StatusCode::SERVICE_UNAVAILABLE,
354            "SERVICE_UNAVAILABLE",
355            "Server overloaded",
356        )
357    };
358    (
359        status,
360        Json(RpcResponse::error(RpcError::new(code, message))),
361    )
362        .into_response()
363}
364
365fn set_tracing_headers(response: &mut axum::response::Response, trace_id: &str, request_id: &str) {
366    if let Ok(val) = trace_id.parse() {
367        response.headers_mut().insert(TRACE_ID_HEADER, val);
368    }
369    if let Ok(val) = request_id.parse() {
370        response.headers_mut().insert(REQUEST_ID_HEADER, val);
371    }
372}
373
374/// Extracts W3C traceparent context from HTTP headers.
375struct HeaderExtractor<'a>(&'a axum::http::HeaderMap);
376
377impl<'a> Extractor for HeaderExtractor<'a> {
378    fn get(&self, key: &str) -> Option<&str> {
379        self.0.get(key).and_then(|v| v.to_str().ok())
380    }
381
382    fn keys(&self) -> Vec<&str> {
383        self.0.keys().map(|k| k.as_str()).collect()
384    }
385}
386
387/// Wraps each request in a span with HTTP semantics and OpenTelemetry
388/// context propagation. Incoming `traceparent` headers are extracted so
389/// that spans join the caller's distributed trace.
390/// Quiet routes skip spans, logs, and metrics to avoid noise from
391/// probes or high-frequency internal endpoints.
392async fn tracing_middleware(
393    axum::extract::State(quiet_routes): axum::extract::State<Arc<Vec<String>>>,
394    req: axum::extract::Request,
395    next: axum::middleware::Next,
396) -> axum::response::Response {
397    let headers = req.headers();
398
399    // Extract W3C traceparent from incoming headers for distributed tracing
400    let parent_cx =
401        global::get_text_map_propagator(|propagator| propagator.extract(&HeaderExtractor(headers)));
402
403    let trace_id = headers
404        .get(TRACE_ID_HEADER)
405        .and_then(|v| v.to_str().ok())
406        .map(String::from)
407        .unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
408
409    let parent_span_id = headers
410        .get(SPAN_ID_HEADER)
411        .and_then(|v| v.to_str().ok())
412        .map(String::from);
413
414    let method = req.method().to_string();
415    let path = req.uri().path().to_string();
416
417    let mut tracing_state = TracingState::with_trace_id(trace_id.clone());
418    if let Some(span_id) = parent_span_id {
419        tracing_state = tracing_state.with_parent_span(span_id);
420    }
421
422    let mut req = req;
423    req.extensions_mut().insert(tracing_state.clone());
424
425    if req
426        .extensions()
427        .get::<forge_core::function::AuthContext>()
428        .is_none()
429    {
430        req.extensions_mut()
431            .insert(forge_core::function::AuthContext::unauthenticated());
432    }
433
434    // Config uses full paths (/_api/health) but axum strips the prefix
435    // for nested routers, so the middleware sees /health not /_api/health.
436    let full_path = format!("/_api{}", path);
437    let is_quiet = quiet_routes.iter().any(|r| *r == full_path || *r == path);
438
439    if is_quiet {
440        let mut response = next.run(req).await;
441        set_tracing_headers(&mut response, &trace_id, &tracing_state.request_id);
442        return response;
443    }
444
445    let span = tracing::info_span!(
446        "http.request",
447        http.method = %method,
448        http.route = %path,
449        http.status_code = tracing::field::Empty,
450        trace_id = %trace_id,
451        request_id = %tracing_state.request_id,
452    );
453
454    // Link this span to the incoming distributed trace context so
455    // fn.execute and all downstream spans share the caller's trace ID
456    span.set_parent(parent_cx);
457
458    let mut response = next.run(req).instrument(span.clone()).await;
459
460    let status = response.status().as_u16();
461    let elapsed = tracing_state.elapsed();
462
463    span.record("http.status_code", status);
464    // RPC calls already log at info via fn.execute with richer context
465    if path.starts_with("/rpc") {
466        tracing::debug!(parent: &span, duration_ms = elapsed.as_millis() as u64, "Request completed");
467    } else {
468        tracing::info!(parent: &span, duration_ms = elapsed.as_millis() as u64, "Request completed");
469    }
470    crate::observability::record_http_request(&method, &path, status, elapsed.as_secs_f64());
471
472    set_tracing_headers(&mut response, &trace_id, &tracing_state.request_id);
473    response
474}
475
476#[cfg(test)]
477#[allow(clippy::unwrap_used, clippy::indexing_slicing, clippy::panic)]
478mod tests {
479    use super::*;
480
481    #[test]
482    fn test_gateway_config_default() {
483        let config = GatewayConfig::default();
484        assert_eq!(config.port, 8080);
485        assert_eq!(config.max_connections, 512);
486        assert!(!config.cors_enabled);
487    }
488
489    #[test]
490    fn test_health_response_serialization() {
491        let resp = HealthResponse {
492            status: "healthy".to_string(),
493            version: "0.1.0".to_string(),
494        };
495        let json = serde_json::to_string(&resp).unwrap();
496        assert!(json.contains("healthy"));
497    }
498}