Skip to main content

waypoint_core/
safety.rs

1//! Migration safety analysis: lock levels, impact estimation, and verdicts.
2
3use serde::Serialize;
4
5use crate::error::{Result, WaypointError};
6use crate::sql_parser::DdlOperation;
7
8/// PostgreSQL lock levels, ordered from least to most restrictive.
9///
10/// The ordering matches PostgreSQL's internal lock hierarchy so that
11/// comparisons (e.g. `lock > LockLevel::ShareLock`) work correctly.
12#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize)]
13pub enum LockLevel {
14    /// No lock acquired (new objects, functions, enums).
15    None,
16    /// ACCESS SHARE — acquired by SELECT.
17    AccessShareLock,
18    /// ROW SHARE — acquired by SELECT FOR UPDATE/SHARE.
19    RowShareLock,
20    /// ROW EXCLUSIVE — acquired by INSERT/UPDATE/DELETE.
21    RowExclusiveLock,
22    /// SHARE UPDATE EXCLUSIVE — acquired by VACUUM, CREATE INDEX CONCURRENTLY.
23    ShareUpdateExclusiveLock,
24    /// SHARE — acquired by CREATE INDEX (non-concurrent).
25    ShareLock,
26    /// SHARE ROW EXCLUSIVE — acquired by some constraint triggers.
27    ShareRowExclusiveLock,
28    /// EXCLUSIVE — blocks all reads/writes except ACCESS SHARE.
29    ExclusiveLock,
30    /// ACCESS EXCLUSIVE — the strongest lock; blocks everything.
31    AccessExclusiveLock,
32}
33
34impl std::fmt::Display for LockLevel {
35    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
36        match self {
37            LockLevel::None => write!(f, "None"),
38            LockLevel::AccessShareLock => write!(f, "ACCESS SHARE"),
39            LockLevel::RowShareLock => write!(f, "ROW SHARE"),
40            LockLevel::RowExclusiveLock => write!(f, "ROW EXCLUSIVE"),
41            LockLevel::ShareUpdateExclusiveLock => write!(f, "SHARE UPDATE EXCLUSIVE"),
42            LockLevel::ShareLock => write!(f, "SHARE"),
43            LockLevel::ShareRowExclusiveLock => write!(f, "SHARE ROW EXCLUSIVE"),
44            LockLevel::ExclusiveLock => write!(f, "EXCLUSIVE"),
45            LockLevel::AccessExclusiveLock => write!(f, "ACCESS EXCLUSIVE"),
46        }
47    }
48}
49
50/// Rough classification of table size based on estimated row count.
51#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
52pub enum TableSize {
53    /// Fewer than 10,000 rows.
54    Small,
55    /// 10,000 to 1,000,000 rows.
56    Medium,
57    /// 1,000,000 to 100,000,000 rows.
58    Large,
59    /// More than 100,000,000 rows.
60    Huge,
61}
62
63impl std::fmt::Display for TableSize {
64    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
65        match self {
66            TableSize::Small => write!(f, "Small (<10k rows)"),
67            TableSize::Medium => write!(f, "Medium (10k-1M rows)"),
68            TableSize::Large => write!(f, "Large (1M-100M rows)"),
69            TableSize::Huge => write!(f, "Huge (>100M rows)"),
70        }
71    }
72}
73
74/// Overall safety verdict for a migration statement or script.
75#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize)]
76pub enum SafetyVerdict {
77    /// No significant risk detected.
78    Safe,
79    /// Moderate risk — review recommended.
80    Caution,
81    /// High risk — may cause downtime or data loss.
82    Danger,
83}
84
85impl std::fmt::Display for SafetyVerdict {
86    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
87        match self {
88            SafetyVerdict::Safe => write!(f, "SAFE"),
89            SafetyVerdict::Caution => write!(f, "CAUTION"),
90            SafetyVerdict::Danger => write!(f, "DANGER"),
91        }
92    }
93}
94
95/// Safety analysis for a single SQL statement within a migration.
96#[derive(Debug, Clone, Serialize)]
97pub struct StatementAnalysis {
98    /// A short preview of the analyzed statement.
99    pub statement_preview: String,
100    /// The PostgreSQL lock level this statement acquires.
101    pub lock_level: LockLevel,
102    /// The table affected by this statement, if identifiable.
103    pub affected_table: Option<String>,
104    /// Estimated table size classification, if known.
105    pub table_size: Option<TableSize>,
106    /// Estimated live row count, if available from statistics.
107    pub estimated_rows: Option<i64>,
108    /// The safety verdict for this statement.
109    pub verdict: SafetyVerdict,
110    /// Actionable suggestions for reducing risk.
111    pub suggestions: Vec<String>,
112    /// Whether this statement causes irreversible data loss.
113    pub data_loss: bool,
114}
115
116/// Full safety report for a migration script.
117#[derive(Debug, Clone, Serialize)]
118pub struct SafetyReport {
119    /// The migration script filename or identifier.
120    pub script: String,
121    /// The worst-case verdict across all statements.
122    pub overall_verdict: SafetyVerdict,
123    /// Per-statement analysis results.
124    pub statements: Vec<StatementAnalysis>,
125    /// Aggregated suggestions across all statements.
126    pub suggestions: Vec<String>,
127}
128
129/// Configuration for safety analysis.
130#[derive(Debug, Clone)]
131pub struct SafetyConfig {
132    /// Whether safety analysis is enabled.
133    pub enabled: bool,
134    /// Whether to block migrations that receive a DANGER verdict.
135    pub block_on_danger: bool,
136    /// Row count threshold for classifying a table as Large.
137    pub large_table_threshold: i64,
138    /// Row count threshold for classifying a table as Huge.
139    pub huge_table_threshold: i64,
140}
141
142impl Default for SafetyConfig {
143    fn default() -> Self {
144        Self {
145            enabled: true,
146            block_on_danger: false,
147            large_table_threshold: 1_000_000,
148            huge_table_threshold: 100_000_000,
149        }
150    }
151}
152
153/// Determine the PostgreSQL lock level required by a DDL operation.
154pub fn lock_level_for_ddl(op: &DdlOperation) -> LockLevel {
155    match op {
156        DdlOperation::CreateTable { .. } => LockLevel::None,
157        DdlOperation::AlterTableAddColumn { .. } => LockLevel::AccessExclusiveLock,
158        DdlOperation::AlterTableDropColumn { .. } => LockLevel::AccessExclusiveLock,
159        DdlOperation::AlterTableAlterColumn { .. } => LockLevel::AccessExclusiveLock,
160        DdlOperation::CreateIndex { is_concurrent, .. } => {
161            if *is_concurrent {
162                LockLevel::ShareUpdateExclusiveLock
163            } else {
164                LockLevel::ShareLock
165            }
166        }
167        DdlOperation::DropTable { .. } => LockLevel::AccessExclusiveLock,
168        DdlOperation::DropIndex { .. } => LockLevel::AccessExclusiveLock,
169        DdlOperation::CreateView { .. } => LockLevel::None,
170        DdlOperation::DropView { .. } => LockLevel::AccessExclusiveLock,
171        DdlOperation::CreateFunction { .. } => LockLevel::None,
172        DdlOperation::DropFunction { .. } => LockLevel::None,
173        DdlOperation::AddConstraint { .. } => LockLevel::AccessExclusiveLock,
174        DdlOperation::DropConstraint { .. } => LockLevel::AccessExclusiveLock,
175        DdlOperation::CreateEnum { .. } => LockLevel::None,
176        DdlOperation::TruncateTable { .. } => LockLevel::AccessExclusiveLock,
177        DdlOperation::Other { .. } => LockLevel::None,
178    }
179}
180
181/// Classify a table's size by querying PostgreSQL statistics.
182///
183/// Returns the classification and the estimated row count from
184/// `pg_stat_user_tables.n_live_tup`.
185pub async fn classify_table_size(
186    client: &tokio_postgres::Client,
187    schema: &str,
188    table: &str,
189    large_threshold: i64,
190    huge_threshold: i64,
191) -> Result<(TableSize, i64)> {
192    let row = client
193        .query_opt(
194            "SELECT n_live_tup FROM pg_stat_user_tables \
195             WHERE schemaname = $1 AND relname = $2",
196            &[&schema, &table],
197        )
198        .await
199        .map_err(WaypointError::DatabaseError)?;
200
201    let estimated_rows: i64 = match row {
202        Some(r) => r.get::<_, i64>(0),
203        None => 0,
204    };
205
206    let size = classify_row_count(estimated_rows, large_threshold, huge_threshold);
207    Ok((size, estimated_rows))
208}
209
210/// Classify a row count into a [`TableSize`] using the given thresholds.
211fn classify_row_count(rows: i64, large_threshold: i64, huge_threshold: i64) -> TableSize {
212    if rows > huge_threshold {
213        TableSize::Huge
214    } else if rows > large_threshold {
215        TableSize::Large
216    } else if rows >= 10_000 {
217        TableSize::Medium
218    } else {
219        TableSize::Small
220    }
221}
222
223/// Determine the safety verdict for a statement given its lock level,
224/// affected table size, and whether it causes data loss.
225fn compute_verdict(lock: LockLevel, size: TableSize, data_loss: bool) -> SafetyVerdict {
226    // AccessExclusiveLock on Large/Huge tables is always dangerous
227    if lock == LockLevel::AccessExclusiveLock
228        && (size == TableSize::Large || size == TableSize::Huge)
229    {
230        return SafetyVerdict::Danger;
231    }
232
233    // Data loss operations on Large/Huge tables are dangerous
234    if data_loss && (size == TableSize::Large || size == TableSize::Huge) {
235        return SafetyVerdict::Danger;
236    }
237
238    // AccessExclusiveLock on Small/Medium tables warrants caution
239    if lock == LockLevel::AccessExclusiveLock {
240        return SafetyVerdict::Caution;
241    }
242
243    // ShareLock (non-concurrent index) on Large/Huge warrants caution
244    if lock == LockLevel::ShareLock && (size == TableSize::Large || size == TableSize::Huge) {
245        return SafetyVerdict::Caution;
246    }
247
248    SafetyVerdict::Safe
249}
250
251/// Generate actionable suggestions for a DDL operation based on table size.
252fn generate_suggestions(op: &DdlOperation, size: TableSize) -> Vec<String> {
253    let mut suggestions = Vec::new();
254
255    match op {
256        DdlOperation::CreateIndex {
257            is_concurrent: false,
258            ..
259        } if size == TableSize::Large || size == TableSize::Huge => {
260            suggestions.push("Use CREATE INDEX CONCURRENTLY".to_string());
261        }
262        DdlOperation::AlterTableAddColumn {
263            is_not_null: true,
264            has_default: true,
265            ..
266        } if size == TableSize::Large || size == TableSize::Huge => {
267            suggestions.push("Split into: add nullable column, backfill, set NOT NULL".to_string());
268        }
269        DdlOperation::AlterTableAlterColumn { .. }
270            if size == TableSize::Large || size == TableSize::Huge =>
271        {
272            suggestions.push("Use add-column + backfill + swap pattern".to_string());
273        }
274        DdlOperation::DropTable { .. } | DdlOperation::AlterTableDropColumn { .. } => {
275            suggestions.push("Consider soft-delete pattern for reversibility".to_string());
276        }
277        DdlOperation::TruncateTable { .. } => {
278            suggestions.push("Consider DELETE with batching for large tables".to_string());
279        }
280        _ => {}
281    }
282
283    suggestions
284}
285
286/// Check whether a DDL operation causes irreversible data loss.
287fn is_data_loss(op: &DdlOperation) -> bool {
288    matches!(
289        op,
290        DdlOperation::DropTable { .. }
291            | DdlOperation::AlterTableDropColumn { .. }
292            | DdlOperation::TruncateTable { .. }
293    )
294}
295
296/// Extract the affected table name from a DDL operation, if applicable.
297fn affected_table(op: &DdlOperation) -> Option<String> {
298    match op {
299        DdlOperation::CreateTable { table, .. }
300        | DdlOperation::DropTable { table }
301        | DdlOperation::AlterTableAddColumn { table, .. }
302        | DdlOperation::AlterTableDropColumn { table, .. }
303        | DdlOperation::AlterTableAlterColumn { table, .. }
304        | DdlOperation::CreateIndex { table, .. }
305        | DdlOperation::AddConstraint { table, .. }
306        | DdlOperation::DropConstraint { table, .. }
307        | DdlOperation::TruncateTable { table } => Some(table.clone()),
308        DdlOperation::DropIndex { .. }
309        | DdlOperation::CreateView { .. }
310        | DdlOperation::DropView { .. }
311        | DdlOperation::CreateFunction { .. }
312        | DdlOperation::DropFunction { .. }
313        | DdlOperation::CreateEnum { .. }
314        | DdlOperation::Other { .. } => None,
315    }
316}
317
318/// Analyze a migration script for safety concerns.
319///
320/// Parses the SQL into individual DDL operations, queries the database
321/// for table size statistics, and produces a [`SafetyReport`] with
322/// per-statement verdicts and suggestions.
323pub async fn analyze_migration(
324    client: &tokio_postgres::Client,
325    schema: &str,
326    sql: &str,
327    script: &str,
328    config: &SafetyConfig,
329) -> Result<SafetyReport> {
330    let ops = crate::sql_parser::extract_ddl_operations(sql);
331    let mut statements = Vec::new();
332    let mut all_suggestions = Vec::new();
333    let mut worst_verdict = SafetyVerdict::Safe;
334
335    for op in &ops {
336        let lock = lock_level_for_ddl(op);
337        let table = affected_table(op);
338        let data_loss = is_data_loss(op);
339
340        let (table_size, estimated_rows) = if let Some(ref t) = table {
341            match classify_table_size(
342                client,
343                schema,
344                t,
345                config.large_table_threshold,
346                config.huge_table_threshold,
347            )
348            .await
349            {
350                Ok((size, rows)) => (Some(size), Some(rows)),
351                // Table may not exist yet (CREATE TABLE) — treat as Small
352                Err(_) => (Some(TableSize::Small), None),
353            }
354        } else {
355            (None, None)
356        };
357
358        let size_for_verdict = table_size.unwrap_or(TableSize::Small);
359        let verdict = compute_verdict(lock, size_for_verdict, data_loss);
360
361        let suggestions = generate_suggestions(op, size_for_verdict);
362        all_suggestions.extend(suggestions.clone());
363
364        // Track the worst verdict
365        if verdict == SafetyVerdict::Danger
366            || (verdict == SafetyVerdict::Caution && worst_verdict == SafetyVerdict::Safe)
367        {
368            worst_verdict = verdict;
369        }
370
371        let preview: String = op.to_string().chars().take(120).collect();
372
373        statements.push(StatementAnalysis {
374            statement_preview: preview,
375            lock_level: lock,
376            affected_table: table,
377            table_size,
378            estimated_rows,
379            verdict,
380            suggestions,
381            data_loss,
382        });
383    }
384
385    // De-duplicate suggestions
386    all_suggestions.sort();
387    all_suggestions.dedup();
388
389    Ok(SafetyReport {
390        script: script.to_string(),
391        overall_verdict: worst_verdict,
392        statements,
393        suggestions: all_suggestions,
394    })
395}
396
397#[cfg(test)]
398mod tests {
399    use super::*;
400
401    // ── Lock level mapping tests ──────────────────────────────────────
402
403    #[test]
404    fn test_lock_create_table_is_none() {
405        let op = DdlOperation::CreateTable {
406            table: "users".into(),
407            if_not_exists: false,
408        };
409        assert_eq!(lock_level_for_ddl(&op), LockLevel::None);
410    }
411
412    #[test]
413    fn test_lock_alter_table_add_column() {
414        let op = DdlOperation::AlterTableAddColumn {
415            table: "users".into(),
416            column: "email".into(),
417            data_type: "text".into(),
418            has_default: false,
419            is_not_null: false,
420        };
421        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
422    }
423
424    #[test]
425    fn test_lock_alter_table_drop_column() {
426        let op = DdlOperation::AlterTableDropColumn {
427            table: "users".into(),
428            column: "email".into(),
429        };
430        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
431    }
432
433    #[test]
434    fn test_lock_alter_table_alter_column() {
435        let op = DdlOperation::AlterTableAlterColumn {
436            table: "users".into(),
437            column: "name".into(),
438        };
439        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
440    }
441
442    #[test]
443    fn test_lock_create_index_concurrent() {
444        let op = DdlOperation::CreateIndex {
445            name: "idx_email".into(),
446            table: "users".into(),
447            is_concurrent: true,
448            is_unique: false,
449        };
450        assert_eq!(lock_level_for_ddl(&op), LockLevel::ShareUpdateExclusiveLock);
451    }
452
453    #[test]
454    fn test_lock_create_index_non_concurrent() {
455        let op = DdlOperation::CreateIndex {
456            name: "idx_email".into(),
457            table: "users".into(),
458            is_concurrent: false,
459            is_unique: false,
460        };
461        assert_eq!(lock_level_for_ddl(&op), LockLevel::ShareLock);
462    }
463
464    #[test]
465    fn test_lock_drop_table() {
466        let op = DdlOperation::DropTable {
467            table: "users".into(),
468        };
469        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
470    }
471
472    #[test]
473    fn test_lock_drop_index() {
474        let op = DdlOperation::DropIndex {
475            name: "idx_email".into(),
476        };
477        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
478    }
479
480    #[test]
481    fn test_lock_create_view() {
482        let op = DdlOperation::CreateView {
483            name: "user_stats".into(),
484            is_materialized: false,
485        };
486        assert_eq!(lock_level_for_ddl(&op), LockLevel::None);
487    }
488
489    #[test]
490    fn test_lock_drop_view() {
491        let op = DdlOperation::DropView {
492            name: "user_stats".into(),
493        };
494        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
495    }
496
497    #[test]
498    fn test_lock_create_function() {
499        let op = DdlOperation::CreateFunction {
500            name: "my_func".into(),
501        };
502        assert_eq!(lock_level_for_ddl(&op), LockLevel::None);
503    }
504
505    #[test]
506    fn test_lock_drop_function() {
507        let op = DdlOperation::DropFunction {
508            name: "my_func".into(),
509        };
510        assert_eq!(lock_level_for_ddl(&op), LockLevel::None);
511    }
512
513    #[test]
514    fn test_lock_add_constraint() {
515        let op = DdlOperation::AddConstraint {
516            table: "users".into(),
517            constraint_type: "FOREIGN KEY".into(),
518        };
519        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
520    }
521
522    #[test]
523    fn test_lock_drop_constraint() {
524        let op = DdlOperation::DropConstraint {
525            table: "users".into(),
526            name: "fk_user_org".into(),
527        };
528        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
529    }
530
531    #[test]
532    fn test_lock_create_enum() {
533        let op = DdlOperation::CreateEnum {
534            name: "mood".into(),
535        };
536        assert_eq!(lock_level_for_ddl(&op), LockLevel::None);
537    }
538
539    #[test]
540    fn test_lock_truncate_table() {
541        let op = DdlOperation::TruncateTable {
542            table: "logs".into(),
543        };
544        assert_eq!(lock_level_for_ddl(&op), LockLevel::AccessExclusiveLock);
545    }
546
547    #[test]
548    fn test_lock_other_is_none() {
549        let op = DdlOperation::Other {
550            statement_preview: "INSERT INTO ...".into(),
551        };
552        assert_eq!(lock_level_for_ddl(&op), LockLevel::None);
553    }
554
555    // ── Lock level ordering ───────────────────────────────────────────
556
557    #[test]
558    fn test_lock_level_ordering() {
559        assert!(LockLevel::None < LockLevel::AccessShareLock);
560        assert!(LockLevel::AccessShareLock < LockLevel::RowShareLock);
561        assert!(LockLevel::RowShareLock < LockLevel::RowExclusiveLock);
562        assert!(LockLevel::RowExclusiveLock < LockLevel::ShareUpdateExclusiveLock);
563        assert!(LockLevel::ShareUpdateExclusiveLock < LockLevel::ShareLock);
564        assert!(LockLevel::ShareLock < LockLevel::ShareRowExclusiveLock);
565        assert!(LockLevel::ShareRowExclusiveLock < LockLevel::ExclusiveLock);
566        assert!(LockLevel::ExclusiveLock < LockLevel::AccessExclusiveLock);
567    }
568
569    // ── Verdict computation tests ─────────────────────────────────────
570
571    #[test]
572    fn test_verdict_access_exclusive_large_is_danger() {
573        assert_eq!(
574            compute_verdict(LockLevel::AccessExclusiveLock, TableSize::Large, false),
575            SafetyVerdict::Danger
576        );
577    }
578
579    #[test]
580    fn test_verdict_access_exclusive_huge_is_danger() {
581        assert_eq!(
582            compute_verdict(LockLevel::AccessExclusiveLock, TableSize::Huge, false),
583            SafetyVerdict::Danger
584        );
585    }
586
587    #[test]
588    fn test_verdict_data_loss_on_large_is_danger() {
589        assert_eq!(
590            compute_verdict(LockLevel::AccessExclusiveLock, TableSize::Large, true),
591            SafetyVerdict::Danger
592        );
593    }
594
595    #[test]
596    fn test_verdict_data_loss_on_huge_is_danger() {
597        assert_eq!(
598            compute_verdict(LockLevel::None, TableSize::Huge, true),
599            SafetyVerdict::Danger
600        );
601    }
602
603    #[test]
604    fn test_verdict_access_exclusive_small_is_caution() {
605        assert_eq!(
606            compute_verdict(LockLevel::AccessExclusiveLock, TableSize::Small, false),
607            SafetyVerdict::Caution
608        );
609    }
610
611    #[test]
612    fn test_verdict_access_exclusive_medium_is_caution() {
613        assert_eq!(
614            compute_verdict(LockLevel::AccessExclusiveLock, TableSize::Medium, false),
615            SafetyVerdict::Caution
616        );
617    }
618
619    #[test]
620    fn test_verdict_share_lock_large_is_caution() {
621        assert_eq!(
622            compute_verdict(LockLevel::ShareLock, TableSize::Large, false),
623            SafetyVerdict::Caution
624        );
625    }
626
627    #[test]
628    fn test_verdict_share_lock_huge_is_caution() {
629        assert_eq!(
630            compute_verdict(LockLevel::ShareLock, TableSize::Huge, false),
631            SafetyVerdict::Caution
632        );
633    }
634
635    #[test]
636    fn test_verdict_share_lock_small_is_safe() {
637        assert_eq!(
638            compute_verdict(LockLevel::ShareLock, TableSize::Small, false),
639            SafetyVerdict::Safe
640        );
641    }
642
643    #[test]
644    fn test_verdict_none_lock_small_is_safe() {
645        assert_eq!(
646            compute_verdict(LockLevel::None, TableSize::Small, false),
647            SafetyVerdict::Safe
648        );
649    }
650
651    #[test]
652    fn test_verdict_concurrent_index_large_is_safe() {
653        // ShareUpdateExclusiveLock should be safe even on large tables
654        assert_eq!(
655            compute_verdict(LockLevel::ShareUpdateExclusiveLock, TableSize::Large, false),
656            SafetyVerdict::Safe
657        );
658    }
659
660    // ── Data loss detection ───────────────────────────────────────────
661
662    #[test]
663    fn test_data_loss_drop_table() {
664        let op = DdlOperation::DropTable {
665            table: "users".into(),
666        };
667        assert!(is_data_loss(&op));
668    }
669
670    #[test]
671    fn test_data_loss_drop_column() {
672        let op = DdlOperation::AlterTableDropColumn {
673            table: "users".into(),
674            column: "email".into(),
675        };
676        assert!(is_data_loss(&op));
677    }
678
679    #[test]
680    fn test_data_loss_truncate() {
681        let op = DdlOperation::TruncateTable {
682            table: "logs".into(),
683        };
684        assert!(is_data_loss(&op));
685    }
686
687    #[test]
688    fn test_no_data_loss_create_table() {
689        let op = DdlOperation::CreateTable {
690            table: "users".into(),
691            if_not_exists: false,
692        };
693        assert!(!is_data_loss(&op));
694    }
695
696    #[test]
697    fn test_no_data_loss_add_column() {
698        let op = DdlOperation::AlterTableAddColumn {
699            table: "users".into(),
700            column: "email".into(),
701            data_type: "text".into(),
702            has_default: false,
703            is_not_null: false,
704        };
705        assert!(!is_data_loss(&op));
706    }
707
708    #[test]
709    fn test_no_data_loss_create_index() {
710        let op = DdlOperation::CreateIndex {
711            name: "idx".into(),
712            table: "users".into(),
713            is_concurrent: true,
714            is_unique: false,
715        };
716        assert!(!is_data_loss(&op));
717    }
718
719    // ── Suggestion generation tests ───────────────────────────────────
720
721    #[test]
722    fn test_suggestion_non_concurrent_index_large() {
723        let op = DdlOperation::CreateIndex {
724            name: "idx_email".into(),
725            table: "users".into(),
726            is_concurrent: false,
727            is_unique: false,
728        };
729        let suggestions = generate_suggestions(&op, TableSize::Large);
730        assert_eq!(suggestions.len(), 1);
731        assert!(suggestions[0].contains("CONCURRENTLY"));
732    }
733
734    #[test]
735    fn test_suggestion_non_concurrent_index_huge() {
736        let op = DdlOperation::CreateIndex {
737            name: "idx_email".into(),
738            table: "users".into(),
739            is_concurrent: false,
740            is_unique: false,
741        };
742        let suggestions = generate_suggestions(&op, TableSize::Huge);
743        assert_eq!(suggestions.len(), 1);
744        assert!(suggestions[0].contains("CONCURRENTLY"));
745    }
746
747    #[test]
748    fn test_suggestion_non_concurrent_index_small_no_suggestion() {
749        let op = DdlOperation::CreateIndex {
750            name: "idx_email".into(),
751            table: "users".into(),
752            is_concurrent: false,
753            is_unique: false,
754        };
755        let suggestions = generate_suggestions(&op, TableSize::Small);
756        assert!(suggestions.is_empty());
757    }
758
759    #[test]
760    fn test_suggestion_concurrent_index_large_no_suggestion() {
761        let op = DdlOperation::CreateIndex {
762            name: "idx_email".into(),
763            table: "users".into(),
764            is_concurrent: true,
765            is_unique: false,
766        };
767        let suggestions = generate_suggestions(&op, TableSize::Large);
768        assert!(suggestions.is_empty());
769    }
770
771    #[test]
772    fn test_suggestion_add_not_null_default_large() {
773        let op = DdlOperation::AlterTableAddColumn {
774            table: "users".into(),
775            column: "status".into(),
776            data_type: "text".into(),
777            has_default: true,
778            is_not_null: true,
779        };
780        let suggestions = generate_suggestions(&op, TableSize::Large);
781        assert_eq!(suggestions.len(), 1);
782        assert!(suggestions[0].contains("nullable column"));
783    }
784
785    #[test]
786    fn test_suggestion_add_nullable_column_large_no_suggestion() {
787        let op = DdlOperation::AlterTableAddColumn {
788            table: "users".into(),
789            column: "bio".into(),
790            data_type: "text".into(),
791            has_default: false,
792            is_not_null: false,
793        };
794        let suggestions = generate_suggestions(&op, TableSize::Large);
795        assert!(suggestions.is_empty());
796    }
797
798    #[test]
799    fn test_suggestion_alter_column_type_huge() {
800        let op = DdlOperation::AlterTableAlterColumn {
801            table: "users".into(),
802            column: "name".into(),
803        };
804        let suggestions = generate_suggestions(&op, TableSize::Huge);
805        assert_eq!(suggestions.len(), 1);
806        assert!(suggestions[0].contains("backfill"));
807    }
808
809    #[test]
810    fn test_suggestion_alter_column_type_small_no_suggestion() {
811        let op = DdlOperation::AlterTableAlterColumn {
812            table: "users".into(),
813            column: "name".into(),
814        };
815        let suggestions = generate_suggestions(&op, TableSize::Small);
816        assert!(suggestions.is_empty());
817    }
818
819    #[test]
820    fn test_suggestion_drop_table() {
821        let op = DdlOperation::DropTable {
822            table: "users".into(),
823        };
824        let suggestions = generate_suggestions(&op, TableSize::Small);
825        assert_eq!(suggestions.len(), 1);
826        assert!(suggestions[0].contains("soft-delete"));
827    }
828
829    #[test]
830    fn test_suggestion_drop_column() {
831        let op = DdlOperation::AlterTableDropColumn {
832            table: "users".into(),
833            column: "email".into(),
834        };
835        let suggestions = generate_suggestions(&op, TableSize::Medium);
836        assert_eq!(suggestions.len(), 1);
837        assert!(suggestions[0].contains("soft-delete"));
838    }
839
840    #[test]
841    fn test_suggestion_truncate() {
842        let op = DdlOperation::TruncateTable {
843            table: "logs".into(),
844        };
845        let suggestions = generate_suggestions(&op, TableSize::Huge);
846        assert_eq!(suggestions.len(), 1);
847        assert!(suggestions[0].contains("DELETE with batching"));
848    }
849
850    // ── Affected table extraction ─────────────────────────────────────
851
852    #[test]
853    fn test_affected_table_create_table() {
854        let op = DdlOperation::CreateTable {
855            table: "orders".into(),
856            if_not_exists: false,
857        };
858        assert_eq!(affected_table(&op), Some("orders".into()));
859    }
860
861    #[test]
862    fn test_affected_table_create_view_is_none() {
863        let op = DdlOperation::CreateView {
864            name: "v_stats".into(),
865            is_materialized: false,
866        };
867        assert_eq!(affected_table(&op), None);
868    }
869
870    #[test]
871    fn test_affected_table_create_function_is_none() {
872        let op = DdlOperation::CreateFunction {
873            name: "my_func".into(),
874        };
875        assert_eq!(affected_table(&op), None);
876    }
877
878    #[test]
879    fn test_affected_table_other_is_none() {
880        let op = DdlOperation::Other {
881            statement_preview: "GRANT SELECT ON ...".into(),
882        };
883        assert_eq!(affected_table(&op), None);
884    }
885
886    // ── Display impls ─────────────────────────────────────────────────
887
888    #[test]
889    fn test_lock_level_display() {
890        assert_eq!(LockLevel::None.to_string(), "None");
891        assert_eq!(LockLevel::AccessShareLock.to_string(), "ACCESS SHARE");
892        assert_eq!(LockLevel::RowShareLock.to_string(), "ROW SHARE");
893        assert_eq!(LockLevel::RowExclusiveLock.to_string(), "ROW EXCLUSIVE");
894        assert_eq!(
895            LockLevel::ShareUpdateExclusiveLock.to_string(),
896            "SHARE UPDATE EXCLUSIVE"
897        );
898        assert_eq!(LockLevel::ShareLock.to_string(), "SHARE");
899        assert_eq!(
900            LockLevel::ShareRowExclusiveLock.to_string(),
901            "SHARE ROW EXCLUSIVE"
902        );
903        assert_eq!(LockLevel::ExclusiveLock.to_string(), "EXCLUSIVE");
904        assert_eq!(
905            LockLevel::AccessExclusiveLock.to_string(),
906            "ACCESS EXCLUSIVE"
907        );
908    }
909
910    #[test]
911    fn test_safety_verdict_display() {
912        assert_eq!(SafetyVerdict::Safe.to_string(), "SAFE");
913        assert_eq!(SafetyVerdict::Caution.to_string(), "CAUTION");
914        assert_eq!(SafetyVerdict::Danger.to_string(), "DANGER");
915    }
916
917    #[test]
918    fn test_table_size_display() {
919        assert_eq!(TableSize::Small.to_string(), "Small (<10k rows)");
920        assert_eq!(TableSize::Medium.to_string(), "Medium (10k-1M rows)");
921        assert_eq!(TableSize::Large.to_string(), "Large (1M-100M rows)");
922        assert_eq!(TableSize::Huge.to_string(), "Huge (>100M rows)");
923    }
924
925    // ── Row count classification ──────────────────────────────────────
926
927    #[test]
928    fn test_classify_row_count_small() {
929        assert_eq!(
930            classify_row_count(0, 1_000_000, 100_000_000),
931            TableSize::Small
932        );
933        assert_eq!(
934            classify_row_count(9_999, 1_000_000, 100_000_000),
935            TableSize::Small
936        );
937    }
938
939    #[test]
940    fn test_classify_row_count_medium() {
941        assert_eq!(
942            classify_row_count(10_000, 1_000_000, 100_000_000),
943            TableSize::Medium
944        );
945        assert_eq!(
946            classify_row_count(500_000, 1_000_000, 100_000_000),
947            TableSize::Medium
948        );
949        assert_eq!(
950            classify_row_count(1_000_000, 1_000_000, 100_000_000),
951            TableSize::Medium
952        );
953    }
954
955    #[test]
956    fn test_classify_row_count_large() {
957        assert_eq!(
958            classify_row_count(1_000_001, 1_000_000, 100_000_000),
959            TableSize::Large
960        );
961        assert_eq!(
962            classify_row_count(50_000_000, 1_000_000, 100_000_000),
963            TableSize::Large
964        );
965        assert_eq!(
966            classify_row_count(100_000_000, 1_000_000, 100_000_000),
967            TableSize::Large
968        );
969    }
970
971    #[test]
972    fn test_classify_row_count_huge() {
973        assert_eq!(
974            classify_row_count(100_000_001, 1_000_000, 100_000_000),
975            TableSize::Huge
976        );
977        assert_eq!(
978            classify_row_count(1_000_000_000, 1_000_000, 100_000_000),
979            TableSize::Huge
980        );
981    }
982
983    #[test]
984    fn test_classify_custom_thresholds() {
985        // With lower thresholds: large=1_000, huge=10_000
986        assert_eq!(classify_row_count(500, 1_000, 10_000), TableSize::Small);
987        assert_eq!(classify_row_count(1_001, 1_000, 10_000), TableSize::Large);
988        assert_eq!(classify_row_count(10_000, 1_000, 10_000), TableSize::Large);
989        assert_eq!(classify_row_count(10_001, 1_000, 10_000), TableSize::Huge);
990    }
991
992    // ── SafetyConfig defaults ─────────────────────────────────────────
993
994    #[test]
995    fn test_safety_config_defaults() {
996        let config = SafetyConfig::default();
997        assert!(config.enabled);
998        assert!(!config.block_on_danger);
999        assert_eq!(config.large_table_threshold, 1_000_000);
1000        assert_eq!(config.huge_table_threshold, 100_000_000);
1001    }
1002}