Skip to main content

rustkernel_ecosystem/
common.rs

1//! Common types and utilities for ecosystem integrations
2
3use serde::{Deserialize, Serialize};
4use std::sync::Arc;
5use std::time::{Duration, Instant};
6
7/// Service configuration shared across integrations
8#[derive(Debug, Clone, Serialize, Deserialize)]
9pub struct ServiceConfig {
10    /// Service name
11    pub name: String,
12    /// Service version
13    pub version: String,
14    /// Listen address
15    pub address: String,
16    /// Port
17    pub port: u16,
18    /// Enable request logging
19    pub request_logging: bool,
20    /// Enable metrics
21    pub metrics_enabled: bool,
22    /// Default timeout
23    pub default_timeout: Duration,
24    /// Max request body size
25    pub max_body_size: usize,
26    /// Enable CORS
27    pub cors_enabled: bool,
28    /// CORS allowed origins
29    pub cors_origins: Vec<String>,
30}
31
32impl Default for ServiceConfig {
33    fn default() -> Self {
34        Self {
35            name: "rustkernels".to_string(),
36            version: env!("CARGO_PKG_VERSION").to_string(),
37            address: "0.0.0.0".to_string(),
38            port: 8080,
39            request_logging: true,
40            metrics_enabled: true,
41            default_timeout: Duration::from_secs(30),
42            max_body_size: 10 * 1024 * 1024, // 10MB
43            cors_enabled: true,
44            cors_origins: vec!["*".to_string()],
45        }
46    }
47}
48
49impl ServiceConfig {
50    /// Create development configuration
51    pub fn development() -> Self {
52        Self {
53            name: "rustkernels-dev".to_string(),
54            address: "127.0.0.1".to_string(),
55            request_logging: true,
56            ..Default::default()
57        }
58    }
59
60    /// Create production configuration
61    pub fn production() -> Self {
62        Self {
63            request_logging: false, // Use structured logging
64            cors_origins: vec![],   // Configure explicitly
65            ..Default::default()
66        }
67    }
68
69    /// Get the full bind address
70    pub fn bind_address(&self) -> String {
71        format!("{}:{}", self.address, self.port)
72    }
73}
74
75/// Request context passed through the service stack
76#[derive(Debug, Clone)]
77pub struct RequestContext {
78    /// Unique request ID
79    pub request_id: String,
80    /// Start time
81    pub start_time: Instant,
82    /// Trace ID for distributed tracing
83    pub trace_id: Option<String>,
84    /// Span ID
85    pub span_id: Option<String>,
86    /// Tenant ID
87    pub tenant_id: Option<String>,
88    /// User ID (if authenticated)
89    pub user_id: Option<String>,
90    /// Request path
91    pub path: String,
92    /// Request method
93    pub method: String,
94}
95
96impl RequestContext {
97    /// Create a new request context
98    pub fn new(path: impl Into<String>, method: impl Into<String>) -> Self {
99        Self {
100            request_id: uuid::Uuid::new_v4().to_string(),
101            start_time: Instant::now(),
102            trace_id: None,
103            span_id: None,
104            tenant_id: None,
105            user_id: None,
106            path: path.into(),
107            method: method.into(),
108        }
109    }
110
111    /// Set trace ID from header
112    pub fn with_trace_id(mut self, trace_id: impl Into<String>) -> Self {
113        self.trace_id = Some(trace_id.into());
114        self
115    }
116
117    /// Set tenant ID
118    pub fn with_tenant_id(mut self, tenant_id: impl Into<String>) -> Self {
119        self.tenant_id = Some(tenant_id.into());
120        self
121    }
122
123    /// Set user ID
124    pub fn with_user_id(mut self, user_id: impl Into<String>) -> Self {
125        self.user_id = Some(user_id.into());
126        self
127    }
128
129    /// Get elapsed time since request start
130    pub fn elapsed(&self) -> Duration {
131        self.start_time.elapsed()
132    }
133
134    /// Get elapsed time in microseconds
135    pub fn elapsed_us(&self) -> u64 {
136        self.start_time.elapsed().as_micros() as u64
137    }
138}
139
140/// Rate limiter configuration
141#[derive(Debug, Clone, Serialize, Deserialize)]
142pub struct RateLimitConfig {
143    /// Enable rate limiting
144    pub enabled: bool,
145    /// Requests per second
146    pub requests_per_second: u32,
147    /// Burst size
148    pub burst_size: u32,
149    /// Per-tenant rate limiting
150    pub per_tenant: bool,
151}
152
153impl Default for RateLimitConfig {
154    fn default() -> Self {
155        Self {
156            enabled: true,
157            requests_per_second: 100,
158            burst_size: 200,
159            per_tenant: true,
160        }
161    }
162}
163
164/// Metrics collector for service endpoints
165pub struct ServiceMetrics {
166    /// Total requests
167    total_requests: std::sync::atomic::AtomicU64,
168    /// Total errors
169    total_errors: std::sync::atomic::AtomicU64,
170    /// Total latency (microseconds)
171    total_latency_us: std::sync::atomic::AtomicU64,
172    /// Min latency (microseconds)
173    min_latency_us: std::sync::atomic::AtomicU64,
174    /// Max latency (microseconds)
175    max_latency_us: std::sync::atomic::AtomicU64,
176}
177
178impl ServiceMetrics {
179    /// Create new metrics
180    pub fn new() -> Arc<Self> {
181        Arc::new(Self {
182            total_requests: std::sync::atomic::AtomicU64::new(0),
183            total_errors: std::sync::atomic::AtomicU64::new(0),
184            total_latency_us: std::sync::atomic::AtomicU64::new(0),
185            min_latency_us: std::sync::atomic::AtomicU64::new(u64::MAX),
186            max_latency_us: std::sync::atomic::AtomicU64::new(0),
187        })
188    }
189
190    /// Record a request
191    pub fn record_request(&self, latency_us: u64, is_error: bool) {
192        use std::sync::atomic::Ordering;
193        self.total_requests.fetch_add(1, Ordering::Relaxed);
194        self.total_latency_us
195            .fetch_add(latency_us, Ordering::Relaxed);
196        if is_error {
197            self.total_errors.fetch_add(1, Ordering::Relaxed);
198        }
199        // Update min latency
200        self.min_latency_us.fetch_min(latency_us, Ordering::Relaxed);
201        // Update max latency
202        self.max_latency_us.fetch_max(latency_us, Ordering::Relaxed);
203    }
204
205    /// Get request count
206    pub fn request_count(&self) -> u64 {
207        self.total_requests
208            .load(std::sync::atomic::Ordering::Relaxed)
209    }
210
211    /// Get error count
212    pub fn error_count(&self) -> u64 {
213        self.total_errors.load(std::sync::atomic::Ordering::Relaxed)
214    }
215
216    /// Get average latency in microseconds
217    pub fn avg_latency_us(&self) -> f64 {
218        use std::sync::atomic::Ordering;
219        let total = self.total_latency_us.load(Ordering::Relaxed) as f64;
220        let count = self.total_requests.load(Ordering::Relaxed) as f64;
221        if count > 0.0 { total / count } else { 0.0 }
222    }
223
224    /// Get minimum latency in microseconds (returns 0 if no requests)
225    pub fn min_latency_us(&self) -> u64 {
226        let val = self
227            .min_latency_us
228            .load(std::sync::atomic::Ordering::Relaxed);
229        if val == u64::MAX { 0 } else { val }
230    }
231
232    /// Get maximum latency in microseconds
233    pub fn max_latency_us(&self) -> u64 {
234        self.max_latency_us
235            .load(std::sync::atomic::Ordering::Relaxed)
236    }
237}
238
239impl Default for ServiceMetrics {
240    fn default() -> Self {
241        Self {
242            total_requests: std::sync::atomic::AtomicU64::new(0),
243            total_errors: std::sync::atomic::AtomicU64::new(0),
244            total_latency_us: std::sync::atomic::AtomicU64::new(0),
245            min_latency_us: std::sync::atomic::AtomicU64::new(u64::MAX),
246            max_latency_us: std::sync::atomic::AtomicU64::new(0),
247        }
248    }
249}
250
251/// Standard API paths
252pub mod paths {
253    /// Health check endpoint
254    pub const HEALTH: &str = "/health";
255    /// Liveness probe
256    pub const LIVENESS: &str = "/health/live";
257    /// Readiness probe
258    pub const READINESS: &str = "/health/ready";
259    /// Metrics endpoint
260    pub const METRICS: &str = "/metrics";
261    /// Kernel execution endpoint
262    pub const KERNEL_EXECUTE: &str = "/api/v1/kernels/:kernel_id/execute";
263    /// List kernels endpoint
264    pub const KERNEL_LIST: &str = "/api/v1/kernels";
265    /// Get kernel info endpoint
266    pub const KERNEL_INFO: &str = "/api/v1/kernels/:kernel_id";
267}
268
269/// Standard HTTP headers
270pub mod headers {
271    /// Request ID header
272    pub const X_REQUEST_ID: &str = "X-Request-ID";
273    /// Trace ID header (W3C)
274    pub const TRACEPARENT: &str = "traceparent";
275    /// Tenant ID header
276    pub const X_TENANT_ID: &str = "X-Tenant-ID";
277    /// API key header
278    pub const X_API_KEY: &str = "X-API-Key";
279}
280
281#[cfg(test)]
282mod tests {
283    use super::*;
284
285    #[test]
286    fn test_service_config() {
287        let config = ServiceConfig::default();
288        assert_eq!(config.bind_address(), "0.0.0.0:8080");
289    }
290
291    #[test]
292    fn test_request_context() {
293        let ctx = RequestContext::new("/api/v1/kernels", "POST")
294            .with_tenant_id("tenant-123")
295            .with_user_id("user-456");
296
297        assert!(!ctx.request_id.is_empty());
298        assert_eq!(ctx.tenant_id, Some("tenant-123".to_string()));
299        assert_eq!(ctx.user_id, Some("user-456".to_string()));
300    }
301
302    #[test]
303    fn test_service_metrics() {
304        let metrics = ServiceMetrics::new();
305
306        // No requests yet
307        assert_eq!(metrics.min_latency_us(), 0);
308        assert_eq!(metrics.max_latency_us(), 0);
309
310        metrics.record_request(1000, false);
311        metrics.record_request(2000, false);
312        metrics.record_request(3000, true);
313
314        assert_eq!(metrics.request_count(), 3);
315        assert_eq!(metrics.error_count(), 1);
316        assert!((metrics.avg_latency_us() - 2000.0).abs() < 0.1);
317        assert_eq!(metrics.min_latency_us(), 1000);
318        assert_eq!(metrics.max_latency_us(), 3000);
319    }
320}