mockforge_http/handlers/
contract_health.rs

1//! Unified contract health timeline
2//!
3//! This module provides a unified endpoint that combines:
4//! - Structural drift incidents
5//! - Semantic drift incidents
6//! - Threat assessments
7//! - Forecast predictions
8
9use axum::{
10    extract::{Query, State},
11    http::StatusCode,
12    response::Json,
13};
14use chrono::{DateTime, Utc};
15use serde::{Deserialize, Serialize};
16use std::sync::Arc;
17
18/// State for contract health handlers
19#[derive(Clone)]
20pub struct ContractHealthState {
21    /// Incident manager for structural incidents
22    pub incident_manager: Arc<mockforge_core::incidents::IncidentManager>,
23    /// Semantic incident manager
24    pub semantic_manager: Arc<mockforge_core::incidents::SemanticIncidentManager>,
25    /// Database connection (optional)
26    pub database: Option<crate::database::Database>,
27}
28
29/// Query parameters for timeline
30#[derive(Debug, Deserialize)]
31pub struct TimelineQuery {
32    /// Workspace ID filter
33    pub workspace_id: Option<String>,
34    /// Endpoint filter
35    pub endpoint: Option<String>,
36    /// Method filter
37    pub method: Option<String>,
38    /// Start date (ISO 8601)
39    pub start_date: Option<String>,
40    /// End date (ISO 8601)
41    pub end_date: Option<String>,
42    /// Filter by type: "structural", "semantic", "threat", "forecast", or "all"
43    pub event_type: Option<String>,
44    /// Limit results
45    pub limit: Option<usize>,
46}
47
48/// Timeline event
49#[derive(Debug, Clone, Serialize, Deserialize)]
50#[serde(tag = "type")]
51pub enum TimelineEvent {
52    /// Structural drift incident
53    #[serde(rename = "structural_drift")]
54    StructuralDrift {
55        /// Incident ID
56        id: String,
57        /// Endpoint path
58        endpoint: String,
59        /// HTTP method
60        method: String,
61        /// Type of incident
62        incident_type: String,
63        /// Severity level
64        severity: String,
65        /// Current status
66        status: String,
67        /// Detection timestamp
68        detected_at: i64,
69        /// Additional details
70        details: serde_json::Value,
71    },
72    /// Semantic drift incident
73    #[serde(rename = "semantic_drift")]
74    SemanticDrift {
75        /// Incident ID
76        id: String,
77        /// Endpoint path
78        endpoint: String,
79        /// HTTP method
80        method: String,
81        /// Type of semantic change
82        change_type: String,
83        /// Severity level
84        severity: String,
85        /// Current status
86        status: String,
87        /// Semantic confidence score
88        semantic_confidence: f64,
89        /// Soft-breaking score
90        soft_breaking_score: f64,
91        /// Detection timestamp
92        detected_at: i64,
93        /// Additional details
94        details: serde_json::Value,
95    },
96    /// Threat assessment
97    #[serde(rename = "threat_assessment")]
98    ThreatAssessment {
99        /// Assessment ID
100        id: String,
101        /// Endpoint path (optional for workspace-level)
102        endpoint: Option<String>,
103        /// HTTP method (optional for workspace-level)
104        method: Option<String>,
105        /// Threat level
106        threat_level: String,
107        /// Threat score (0.0-1.0)
108        threat_score: f64,
109        /// Assessment timestamp
110        assessed_at: i64,
111        /// Number of findings
112        findings_count: usize,
113    },
114    /// Forecast prediction
115    #[serde(rename = "forecast")]
116    Forecast {
117        /// Forecast ID
118        id: String,
119        /// Endpoint path
120        endpoint: String,
121        /// HTTP method
122        method: String,
123        /// Forecast window in days
124        window_days: u32,
125        /// Change probability (0.0-1.0)
126        change_probability: f64,
127        /// Break probability (0.0-1.0)
128        break_probability: f64,
129        /// Next expected change timestamp (optional)
130        next_expected_change: Option<i64>,
131        /// Confidence score (0.0-1.0)
132        confidence: f64,
133        /// Prediction timestamp
134        predicted_at: i64,
135    },
136}
137
138/// Timeline response
139#[derive(Debug, Serialize)]
140pub struct TimelineResponse {
141    /// Timeline events
142    pub events: Vec<TimelineEvent>,
143    /// Total count
144    pub total: usize,
145}
146
147/// Get unified contract health timeline
148///
149/// GET /api/v1/contract-health/timeline
150pub async fn get_timeline(
151    State(state): State<ContractHealthState>,
152    Query(params): Query<TimelineQuery>,
153) -> Result<Json<TimelineResponse>, StatusCode> {
154    let mut events = Vec::new();
155
156    let event_type_filter = params.event_type.as_deref().unwrap_or("all");
157
158    // Get structural drift incidents
159    if event_type_filter == "all" || event_type_filter == "structural" {
160        let mut query = mockforge_core::incidents::types::IncidentQuery::default();
161        query.workspace_id = params.workspace_id.clone();
162        query.endpoint = params.endpoint.clone();
163        query.method = params.method.clone();
164
165        let incidents = state.incident_manager.query_incidents(query).await;
166        for incident in incidents {
167            events.push(TimelineEvent::StructuralDrift {
168                id: incident.id,
169                endpoint: incident.endpoint,
170                method: incident.method,
171                incident_type: format!("{:?}", incident.incident_type),
172                severity: format!("{:?}", incident.severity),
173                status: format!("{:?}", incident.status),
174                detected_at: incident.detected_at,
175                details: incident.details,
176            });
177        }
178    }
179
180    // Get semantic drift incidents
181    if event_type_filter == "all" || event_type_filter == "semantic" {
182        let status = None; // Get all statuses for timeline
183        let semantic_incidents = state
184            .semantic_manager
185            .list_incidents(
186                params.workspace_id.as_deref(),
187                params.endpoint.as_deref(),
188                params.method.as_deref(),
189                status,
190                params.limit,
191            )
192            .await;
193
194        for incident in semantic_incidents {
195            events.push(TimelineEvent::SemanticDrift {
196                id: incident.id,
197                endpoint: incident.endpoint,
198                method: incident.method,
199                change_type: format!("{:?}", incident.semantic_change_type),
200                severity: format!("{:?}", incident.severity),
201                status: format!("{:?}", incident.status),
202                semantic_confidence: incident.semantic_confidence,
203                soft_breaking_score: incident.soft_breaking_score,
204                detected_at: incident.detected_at,
205                details: incident.details,
206            });
207        }
208    }
209
210    // Add threat assessments and forecasts from database
211    #[cfg(feature = "database")]
212    {
213        use sqlx::Row;
214        if let Some(pool) = state.database.as_ref().and_then(|db| db.pool()) {
215        // Query threat assessments
216        if let Ok(ta_rows) = sqlx::query(
217            "SELECT id, workspace_id, service_id, service_name, endpoint, method, aggregation_level,
218             threat_level, threat_score, threat_categories, findings, remediation_suggestions, assessed_at
219             FROM contract_threat_assessments
220             WHERE workspace_id = $1 OR workspace_id IS NULL
221             ORDER BY assessed_at DESC LIMIT 50"
222        )
223        .bind(params.workspace_id.as_deref())
224        .fetch_all(pool)
225        .await
226        {
227            use mockforge_core::contract_drift::threat_modeling::{ThreatLevel, AggregationLevel};
228            for row in ta_rows {
229                let id: uuid::Uuid = match row.try_get("id") {
230                    Ok(id) => id,
231                    Err(_) => continue,
232                };
233                let threat_level_str: String = match row.try_get("threat_level") {
234                    Ok(s) => s,
235                    Err(_) => continue,
236                };
237                let threat_score: f64 = match row.try_get("threat_score") {
238                    Ok(s) => s,
239                    Err(_) => continue,
240                };
241                let assessed_at: DateTime<Utc> = match row.try_get("assessed_at") {
242                    Ok(dt) => dt,
243                    Err(_) => continue,
244                };
245                let endpoint: Option<String> = row.try_get("endpoint").ok();
246                let method: Option<String> = row.try_get("method").ok();
247
248                let threat_level = match threat_level_str.as_str() {
249                    "low" => ThreatLevel::Low,
250                    "medium" => ThreatLevel::Medium,
251                    "high" => ThreatLevel::High,
252                    "critical" => ThreatLevel::Critical,
253                    _ => continue,
254                };
255
256                // Count findings from the findings JSON field
257                let findings_count = row.try_get::<serde_json::Value, _>("findings")
258                    .ok()
259                    .and_then(|v| v.as_array().map(|arr| arr.len()))
260                    .unwrap_or(0);
261
262                events.push(TimelineEvent::ThreatAssessment {
263                    id: id.to_string(),
264                    endpoint,
265                    method,
266                    threat_level: format!("{:?}", threat_level),
267                    threat_score,
268                    assessed_at: assessed_at.timestamp(),
269                    findings_count,
270                });
271            }
272        }
273
274        // Query forecasts
275        if let Ok(forecast_rows) = sqlx::query(
276            "SELECT id, service_id, service_name, endpoint, method, forecast_window_days,
277             predicted_change_probability, predicted_break_probability, next_expected_change_date,
278             confidence, predicted_at
279             FROM api_change_forecasts
280             WHERE workspace_id = $1 OR workspace_id IS NULL
281             ORDER BY predicted_at DESC LIMIT 50",
282        )
283        .bind(params.workspace_id.as_deref())
284        .fetch_all(pool)
285        .await
286        {
287            use sqlx::Row;
288            for row in forecast_rows {
289                let id: uuid::Uuid = match row.try_get("id") {
290                    Ok(id) => id,
291                    Err(_) => continue,
292                };
293                let endpoint: String = match row.try_get("endpoint") {
294                    Ok(e) => e,
295                    Err(_) => continue,
296                };
297                let method: String = match row.try_get("method") {
298                    Ok(m) => m,
299                    Err(_) => continue,
300                };
301                let forecast_window_days: i32 = match row.try_get("forecast_window_days") {
302                    Ok(d) => d,
303                    Err(_) => continue,
304                };
305                let predicted_change_probability: f64 =
306                    match row.try_get("predicted_change_probability") {
307                        Ok(p) => p,
308                        Err(_) => continue,
309                    };
310                let predicted_break_probability: f64 =
311                    match row.try_get("predicted_break_probability") {
312                        Ok(p) => p,
313                        Err(_) => continue,
314                    };
315                let next_expected_change_date: Option<DateTime<Utc>> =
316                    row.try_get("next_expected_change_date").ok();
317                let predicted_at: DateTime<Utc> = match row.try_get("predicted_at") {
318                    Ok(dt) => dt,
319                    Err(_) => continue,
320                };
321                let confidence: f64 = match row.try_get("confidence") {
322                    Ok(c) => c,
323                    Err(_) => continue,
324                };
325
326                events.push(TimelineEvent::Forecast {
327                    id: id.to_string(),
328                    endpoint,
329                    method,
330                    window_days: forecast_window_days as u32,
331                    change_probability: predicted_change_probability,
332                    break_probability: predicted_break_probability,
333                    next_expected_change: next_expected_change_date.map(|d| d.timestamp()),
334                    confidence,
335                    predicted_at: predicted_at.timestamp(),
336                });
337            }
338        }
339        }
340    }
341
342    // Sort by timestamp (most recent first)
343    events.sort_by_key(|e| {
344        std::cmp::Reverse(match e {
345            TimelineEvent::StructuralDrift { detected_at, .. } => *detected_at,
346            TimelineEvent::SemanticDrift { detected_at, .. } => *detected_at,
347            TimelineEvent::ThreatAssessment { assessed_at, .. } => *assessed_at,
348            TimelineEvent::Forecast { predicted_at, .. } => *predicted_at,
349        })
350    });
351
352    // Apply limit
353    let total = events.len();
354    if let Some(limit) = params.limit {
355        events.truncate(limit);
356    }
357
358    Ok(Json(TimelineResponse { events, total }))
359}
360
361/// Create router for contract health endpoints
362pub fn contract_health_router(state: ContractHealthState) -> axum::Router {
363    use axum::routing::get;
364    use axum::Router;
365
366    Router::new()
367        .route("/api/v1/contract-health/timeline", get(get_timeline))
368        .with_state(state)
369}