pg_dbmigrator 0.1.0

Library for migrating PostgreSQL databases (offline dump/restore + online logical replication)
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
//! High-level migration driver.
//!
//! [`Migrator`] takes a [`MigrationConfig`] and runs the appropriate sequence
//! of dump → restore → (optional) streaming apply.

use std::path::{Path, PathBuf};
use std::sync::Arc;

use tokio_postgres::Client;
use tokio_util::sync::CancellationToken;
use tracing::info;

use crate::config::{MigrationConfig, MigrationMode};
use crate::cutover::CutoverHandle;
use crate::dump::{run_pg_dump, CommandRunner, DumpFormat, DumpRequest, TokioCommandRunner};
use crate::error::{MigrationError, Result};
use crate::native_apply::{
    disable_target_subscription, force_clean_stale_state, run_native_apply, wait_for_slot_inactive,
    ApplyStats, PgSubscriptionLagProvider,
};
use crate::preflight::{
    ensure_pglogical_not_interfering, ensure_target_database_exists, verify_pg_tools_installed,
    verify_publication_exists, verify_source_logical_replication_ready,
};
use crate::progress::{MigrationStage, ProgressEvent, ProgressReporter, TracingReporter};
use crate::restore::{run_pg_restore, run_pg_restore_in_sections, RestoreRequest};
use crate::resume::{default_resume_path, CompletedStage, ResumeToken};
use crate::sequences::sync_sequences;
use crate::snapshot::prepare_replication_slot;
use crate::tls::connect_with_sslmode;

/// High-level migration driver.
#[derive(Debug)]
pub struct Migrator {
    config: MigrationConfig,
    runner: Arc<dyn CommandRunner>,
    reporter: Arc<dyn ProgressReporter>,
    /// Optional override for the dump archive path. Defaults to a `tempfile`
    /// inside `std::env::temp_dir()`.
    dump_path: Option<PathBuf>,
    /// Operator-facing handle for triggering cutover.
    cutover_handle: CutoverHandle,
}

impl Migrator {
    /// Construct a [`Migrator`] with the production defaults: the dump and
    /// restore are spawned via [`tokio::process::Command`] and progress is
    /// logged through the `tracing` subscriber.
    pub fn new(config: MigrationConfig) -> Self {
        Self {
            config,
            runner: Arc::new(TokioCommandRunner),
            reporter: Arc::new(TracingReporter),
            dump_path: None,
            cutover_handle: CutoverHandle::new(),
        }
    }

    /// Replace the [`CommandRunner`] used to invoke `pg_dump` / `pg_restore`.
    pub fn with_runner(mut self, runner: Arc<dyn CommandRunner>) -> Self {
        self.runner = runner;
        self
    }

    /// Replace the [`ProgressReporter`].
    pub fn with_reporter(mut self, reporter: Arc<dyn ProgressReporter>) -> Self {
        self.reporter = reporter;
        self
    }

    /// Pin the dump archive output path (otherwise it is generated in
    /// `std::env::temp_dir()`).
    pub fn with_dump_path(mut self, path: PathBuf) -> Self {
        self.dump_path = Some(path);
        self
    }

    /// Get a clone of the cutover handle. Hand this to a signal handler / RPC
    /// endpoint / UI so the operator can call
    /// [`CutoverHandle::request`] when ready to switch traffic to the target.
    pub fn cutover_handle(&self) -> CutoverHandle {
        self.cutover_handle.clone()
    }

    /// Get a read-only reference to the currently active configuration.
    pub fn config(&self) -> &MigrationConfig {
        &self.config
    }

    /// Run the migration pipeline.
    ///
    /// `cancel` lets the caller request a graceful shutdown — particularly
    /// important during the long-running streaming apply phase of an online
    /// migration.
    pub async fn run(&self, cancel: CancellationToken) -> Result<MigrationOutcome> {
        self.config.validate()?;
        verify_pg_tools_installed().await?;
        self.report(MigrationStage::Validate, "configuration valid")
            .await;

        match self.config.mode {
            MigrationMode::Offline => self.run_offline(cancel).await,
            MigrationMode::Online => self.run_online(cancel).await,
        }
    }

    /// Offline path: `pg_dump` → `pg_restore`.
    async fn run_offline(&self, cancel: CancellationToken) -> Result<MigrationOutcome> {
        let dump_path = self.dump_path_or_default("dump_offline");
        let mut token = self.load_or_init_resume(&dump_path).await?;

        if !token.has(CompletedStage::Dump) {
            self.report(MigrationStage::Dump, "starting pg_dump").await;
            run_pg_dump(
                self.runner.as_ref(),
                &self.dump_request(&dump_path, None),
                &cancel,
            )
            .await?;
            if cancel.is_cancelled() {
                return Err(MigrationError::Cancelled);
            }
            token.mark(CompletedStage::Dump);
            self.save_resume(&token, &dump_path).await;
        } else {
            self.report(
                MigrationStage::Dump,
                "skipped (resume): pg_dump already complete",
            )
            .await;
        }

        if !token.has(CompletedStage::Restore) {
            self.report(MigrationStage::Restore, "starting pg_restore")
                .await;
            self.restore(&dump_path, &cancel).await?;
            token.mark(CompletedStage::Restore);
            self.save_resume(&token, &dump_path).await;
        } else {
            self.report(
                MigrationStage::Restore,
                "skipped (resume): pg_restore already complete",
            )
            .await;
        }

        self.report(MigrationStage::Complete, "offline migration finished")
            .await;
        Ok(MigrationOutcome {
            stats: None,
            dump_path,
        })
    }

    /// Online path: slot + snapshot → snapshot-aligned dump → restore →
    /// streaming apply.
    async fn run_online(&self, cancel: CancellationToken) -> Result<MigrationOutcome> {
        // 0. Optional best-effort cleanup of leftovers from a previous run.
        if self.config.online.force_clean {
            self.report(
                MigrationStage::Validate,
                "force-clean: dropping any stale subscription/slot",
            )
            .await;
            force_clean_stale_state(
                &self.config.source.connection_string,
                &self.config.target.connection_string,
                &self.config.online,
            )
            .await?;
        }

        // 0.5. Ensure the target database exists — pg_restore needs it.
        self.report(
            MigrationStage::Validate,
            format!(
                "ensuring target database `{}` exists",
                self.config.target.database
            ),
        )
        .await;
        ensure_target_database_exists(
            &self.config.target.connection_string,
            &self.config.target.database,
        )
        .await?;

        // 0.6. Verify the source is configured for logical replication.
        // Doing this *before* slot creation gives the operator a clean,
        // actionable error instead of a confusing libpq error 30 s into
        // CREATE_REPLICATION_SLOT.
        self.report(
            MigrationStage::Validate,
            "verifying source is configured for logical replication",
        )
        .await;
        verify_source_logical_replication_ready(&self.config.source.connection_string).await?;

        let dump_path = self.dump_path_or_default("dump_online");
        let mut token = self.load_or_init_resume(&dump_path).await?;

        // When resuming past Dump, the slot was created in a previous run
        // and the exported snapshot is already gone — there is no live
        // stream to keep. We only call `prepare_replication_slot` (and
        // hold a stream) when we still need to run pg_dump.
        let mut prepared_stream = None;
        let snapshot_name = if !token.has(CompletedStage::Dump) {
            // Fail fast if the publication is missing. Without this check
            // the apply worker would only error out 10+ minutes later
            // (after dump+restore) from inside `CREATE SUBSCRIPTION`.
            self.report(
                MigrationStage::Validate,
                format!(
                    "verifying publication `{}` exists on source",
                    self.config.online.publication
                ),
            )
            .await;
            verify_publication_exists(
                &self.config.source.connection_string,
                &self.config.online.publication,
            )
            .await?;

            // 1. Prepare slot + snapshot (must happen *before* pg_dump runs).
            self.report(MigrationStage::PrepareSnapshot, "creating replication slot")
                .await;
            let prepared = prepare_replication_slot(
                &self.config.source.connection_string,
                &self.config.online,
            )
            .await?;
            let snap = prepared.snapshot_name.clone();
            prepared_stream = Some(prepared.stream);
            token.mark(CompletedStage::PrepareSnapshot);
            token.snapshot_name = snap.clone();
            self.save_resume(&token, &dump_path).await;
            snap
        } else {
            self.report(
                MigrationStage::PrepareSnapshot,
                "skipped (resume): slot/snapshot already prepared in previous run",
            )
            .await;
            token.snapshot_name.clone()
        };

        // 2. Snapshot-aligned dump.
        if !token.has(CompletedStage::Dump) {
            self.report(
                MigrationStage::Dump,
                format!(
                    "starting pg_dump with snapshot {}",
                    snapshot_name.as_deref().unwrap_or("<unknown>")
                ),
            )
            .await;
            run_pg_dump(
                self.runner.as_ref(),
                &self.dump_request(&dump_path, snapshot_name.clone()),
                &cancel,
            )
            .await?;
            if cancel.is_cancelled() {
                return Err(MigrationError::Cancelled);
            }
            token.mark(CompletedStage::Dump);
            self.save_resume(&token, &dump_path).await;
        } else {
            self.report(
                MigrationStage::Dump,
                "skipped (resume): pg_dump already complete",
            )
            .await;
        }

        // 3. Restore.
        if !token.has(CompletedStage::Restore) {
            self.report(MigrationStage::Restore, "starting pg_restore")
                .await;
            self.restore(&dump_path, &cancel).await?;
            if cancel.is_cancelled() {
                return Err(MigrationError::Cancelled);
            }
            token.mark(CompletedStage::Restore);
            self.save_resume(&token, &dump_path).await;
        } else {
            self.report(
                MigrationStage::Restore,
                "skipped (resume): pg_restore already complete",
            )
            .await;
        }

        // 4. Streaming apply via `CREATE SUBSCRIPTION` on the target. The
        // pg_walstream stream's only job was to keep the exported snapshot
        // alive across pg_dump; the slot itself persists on the source
        // independently of the stream connection, so we drop the stream
        // before handing the slot to the native apply worker.
        drop(prepared_stream);

        // When resuming into the apply phase a previous (crashed) run may
        // already have created the subscription. We leave it in place so
        // run_native_apply can re-enable it (preserving the replication
        // origin). Dropping it would lose origin tracking and cause
        // duplicate key violations when the new subscription replays WAL.
        // However, we must disable it first so the old apply worker
        // releases the slot before we re-enable with a fresh connection.
        if self.config.resume {
            disable_target_subscription(&self.config.target.connection_string, &self.config.online)
                .await;
        }

        // Wait for the slot to become inactive. On resume the old apply
        // worker may still hold the walsender connection briefly after
        // being disabled; on first run the slot should already be free.
        wait_for_slot_inactive(
            &self.config.source.connection_string,
            &self.config.online.slot_name,
            self.reporter.as_ref(),
        )
        .await?;

        // 4.5. Verify pglogical is NOT interfering with native logical replication.
        self.report(
            MigrationStage::Validate,
            "checking pglogical is not blocking native replication on target",
        )
        .await;
        ensure_pglogical_not_interfering(&self.config.target.connection_string).await?;

        let stats = self.run_native_engine(cancel).await?;
        token.last_applied_lsn = Some(stats.last_applied_lsn);
        self.save_resume(&token, &dump_path).await;

        // After a cutover-driven exit, sync sequences so the target's
        // `last_value`s match the source. PostgreSQL logical replication
        // does NOT replay nextval(), so without this step the first
        // post-cutover INSERT … DEFAULT nextval(...) would collide with
        // a row already replicated by the apply worker.
        if stats.cutover_triggered && self.config.online.sync_sequences_on_cutover {
            self.report(
                MigrationStage::Cutover,
                "syncing sequences from source to target",
            )
            .await;
            match sync_sequences(
                &self.config.source.connection_string,
                &self.config.target.connection_string,
                &self.config.schemas,
            )
            .await
            {
                Ok(applied) => {
                    self.report(
                        MigrationStage::Cutover,
                        format!("synced {applied} sequence(s) from source to target"),
                    )
                    .await;
                }
                Err(e) => {
                    // Sequence sync is best-effort — if a managed-PG
                    // role can't write to one of the target sequences,
                    // we should not roll back the otherwise-successful
                    // cutover. Surface a loud warning so the operator
                    // can fix it manually before re-pointing traffic.
                    tracing::warn!(
                        error = %e,
                        "sequence sync failed — manually run \
                         `SELECT setval('<seq>', <value>, true)` on the target \
                         for each sequence before resuming application traffic",
                    );
                    self.report(
                        MigrationStage::Cutover,
                        format!(
                            "sequence sync failed: {e} (manual sync required \
                             before re-enabling traffic)"
                        ),
                    )
                    .await;
                }
            }
        }

        self.report(MigrationStage::Complete, "online migration finished")
            .await;
        Ok(MigrationOutcome {
            stats: Some(stats),
            dump_path,
        })
    }

    /// Native PostgreSQL logical-replication apply path
    /// (`CREATE SUBSCRIPTION` on target).
    async fn run_native_engine(&self, cancel: CancellationToken) -> Result<ApplyStats> {
        let target_client = self.connect_target().await?;
        self.report(
            MigrationStage::StreamApply,
            "starting native logical-replication apply (CREATE SUBSCRIPTION)",
        )
        .await;

        let lag_provider = PgSubscriptionLagProvider::connect(
            &self.config.source.connection_string,
            &self.config.online.slot_name,
        )
        .await?;

        // The CONNECTION clause inside CREATE SUBSCRIPTION is dialed by the
        // target's apply worker, not by us — its network view of the source
        // may not match ours (e.g. operator on host vs. target in container).
        let subscription_source = self
            .config
            .online
            .subscription_source_conn
            .as_deref()
            .unwrap_or(&self.config.source.connection_string);

        run_native_apply(
            &target_client,
            &lag_provider,
            &self.config.online,
            subscription_source,
            self.cutover_handle.clone(),
            self.reporter.as_ref(),
            cancel,
        )
        .await
    }

    fn dump_request(&self, dump_path: &Path, snapshot: Option<String>) -> DumpRequest {
        // Custom format dump → fastest pg_restore; directory format if user
        // has asked for >1 jobs. We default to Custom to avoid surprising the
        // operator with a directory archive.
        let format = if self.config.jobs > 1 {
            DumpFormat::Directory
        } else {
            DumpFormat::Custom
        };
        DumpRequest {
            source: self.config.source.clone(),
            scope: self.config.dump_scope,
            jobs: self.config.jobs,
            snapshot,
            schemas: self.config.schemas.clone(),
            tables: self.config.tables.clone(),
            exclude_schemas: self.config.exclude_schemas.clone(),
            exclude_tables: self.config.exclude_tables.clone(),
            output_path: dump_path.to_path_buf(),
            format,
            no_publications: self.config.no_publications,
            no_subscriptions: self.config.no_subscriptions,
            compress: self.config.dump_compress.clone(),
            no_sync: self.config.no_sync,
            no_comments: self.config.no_comments,
            no_security_labels: self.config.no_security_labels,
            no_table_access_method: self.config.no_table_access_method,
        }
    }

    fn restore_request(&self, dump_path: &Path) -> RestoreRequest {
        RestoreRequest {
            target: self.config.target.clone(),
            input_path: dump_path.to_path_buf(),
            jobs: self.config.jobs,
            clean: self.config.drop_target_first,
            no_owner: true,
            no_acl: true,
            tolerate_errors: self.config.allow_restore_errors,
            section: None,
        }
    }

    /// Issue `pg_restore` either as a single all-in-one call or, when
    /// `split_sections` is enabled, as three section-restricted calls
    /// (pre-data → data → post-data).
    async fn restore(&self, dump_path: &Path, cancel: &CancellationToken) -> Result<()> {
        let req = self.restore_request(dump_path);
        if self.config.split_sections {
            run_pg_restore_in_sections(self.runner.as_ref(), &req, cancel).await
        } else {
            run_pg_restore(self.runner.as_ref(), &req, cancel).await
        }
    }

    fn dump_path_or_default(&self, prefix: &str) -> PathBuf {
        if let Some(p) = &self.dump_path {
            return p.clone();
        }
        if let Some(p) = &self.config.dump_path {
            return p.clone();
        }
        let mut p = std::env::temp_dir();
        p.push(format!("{prefix}-{}", std::process::id()));
        p
    }

    fn resume_path(&self, dump_path: &Path) -> PathBuf {
        self.config
            .resume_file
            .clone()
            .unwrap_or_else(|| default_resume_path(dump_path))
    }

    /// Load (or freshly create) the resume token used to skip already-
    /// completed stages. When `--resume` is off, returns a brand-new
    /// in-memory token that is also persisted on every successful stage
    /// so a future run *can* resume even if the operator forgot to opt
    /// in this time. The path is honoured strictly only when
    /// `config.resume == true`.
    async fn load_or_init_resume(&self, dump_path: &Path) -> Result<ResumeToken> {
        let path = self.resume_path(dump_path);
        if self.config.resume {
            match ResumeToken::load(&path).await? {
                Some(token) => {
                    token.check_compatible(&self.config)?;
                    info!(
                        path = %path.display(),
                        completed = ?token.completed,
                        "resume token loaded — skipping completed stages"
                    );
                    Ok(token)
                }
                None => {
                    info!(
                        path = %path.display(),
                        "--resume set but no token on disk; running from scratch"
                    );
                    Ok(ResumeToken::new(&self.config, dump_path.to_path_buf()))
                }
            }
        } else {
            Ok(ResumeToken::new(&self.config, dump_path.to_path_buf()))
        }
    }

    async fn save_resume(&self, token: &ResumeToken, dump_path: &Path) {
        let path = self.resume_path(dump_path);
        if let Err(e) = token.save(&path).await {
            // Resume is a best-effort accelerator — never abort the
            // real migration because we couldn't write the token.
            tracing::warn!(error = %e, path = %path.display(), "failed to save resume token");
        }
    }

    async fn connect_target(&self) -> Result<Client> {
        info!("connecting to target {}", self.config.target.redacted());
        connect_with_sslmode(&self.config.target.connection_string).await
    }

    async fn report(&self, stage: MigrationStage, message: impl Into<String>) {
        self.reporter
            .report(ProgressEvent::new(stage, message.into()))
            .await;
    }
}

/// Aggregate result of a single migration run.
#[derive(Debug, Clone)]
pub struct MigrationOutcome {
    /// Streaming apply statistics (only present for online migrations).
    pub stats: Option<ApplyStats>,
    /// Final dump archive path (kept on disk for inspection / re-runs).
    pub dump_path: PathBuf,
}

impl MigrationOutcome {
    /// Whether the online apply loop ended because cutover was triggered
    /// (operator-driven or auto). Always `false` for offline migrations.
    pub fn cutover_triggered(&self) -> bool {
        self.stats.map(|s| s.cutover_triggered).unwrap_or(false)
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::config::{EndpointConfig, OnlineOptions};
    use crate::dump::{CommandRunner, DumpFormat};
    use crate::progress::CollectingReporter;
    use async_trait::async_trait;
    use std::sync::Mutex;

    /// Records every command dispatched without spawning real processes.
    #[derive(Debug, Default)]
    struct RecordingRunner {
        calls: Mutex<Vec<(String, Vec<String>)>>,
    }

    impl RecordingRunner {
        fn snapshot(&self) -> Vec<(String, Vec<String>)> {
            self.calls.lock().unwrap().clone()
        }
    }

    #[async_trait]
    impl CommandRunner for RecordingRunner {
        async fn run(
            &self,
            program: &str,
            args: &[String],
            _env: &[(String, String)],
            _cancel: &CancellationToken,
        ) -> Result<()> {
            self.calls
                .lock()
                .unwrap()
                .push((program.to_string(), args.to_vec()));
            Ok(())
        }
    }

    fn baseline_config() -> MigrationConfig {
        MigrationConfig {
            source: EndpointConfig::parse("postgres://u:p@src/db").unwrap(),
            target: EndpointConfig::parse("postgres://u:p@dst/db").unwrap(),
            ..MigrationConfig::default()
        }
    }

    #[tokio::test]
    async fn offline_run_invokes_dump_then_restore() {
        let runner = Arc::new(RecordingRunner::default());
        let reporter = Arc::new(CollectingReporter::new());
        let migrator = Migrator::new(MigrationConfig {
            split_sections: false,
            ..baseline_config()
        })
        .with_runner(runner.clone())
        .with_reporter(reporter.clone())
        .with_dump_path(PathBuf::from("/tmp/pg_dbmigrator_test_dump"));

        migrator
            .run(CancellationToken::new())
            .await
            .expect("offline migration should succeed");

        let calls = runner.snapshot();
        assert_eq!(calls.len(), 2, "expected 2 calls (dump+restore)");
        assert_eq!(calls[0].0, "pg_dump");
        assert_eq!(calls[1].0, "pg_restore");

        let stages: Vec<_> = reporter
            .events()
            .await
            .into_iter()
            .map(|e| e.stage)
            .collect();
        assert!(stages.contains(&MigrationStage::Validate));
        assert!(stages.contains(&MigrationStage::Dump));
        assert!(stages.contains(&MigrationStage::Restore));
        assert!(stages.contains(&MigrationStage::Complete));
    }

    #[tokio::test]
    async fn offline_run_with_split_sections_invokes_pg_restore_three_times() {
        let runner = Arc::new(RecordingRunner::default());
        let reporter = Arc::new(CollectingReporter::new());
        let cfg = MigrationConfig {
            split_sections: true,
            ..baseline_config()
        };
        let migrator = Migrator::new(cfg)
            .with_runner(runner.clone())
            .with_reporter(reporter)
            .with_dump_path(PathBuf::from("/tmp/pg_dbmigrator_split_dump"));

        migrator
            .run(CancellationToken::new())
            .await
            .expect("split-section restore should succeed");

        let calls = runner.snapshot();
        assert_eq!(calls.len(), 4, "1 dump + 3 restore expected");
        assert_eq!(calls[0].0, "pg_dump");
        let sections: Vec<_> = calls[1..]
            .iter()
            .map(|(prog, args)| {
                assert_eq!(prog, "pg_restore");
                args.iter()
                    .find(|a| a.starts_with("--section="))
                    .cloned()
                    .unwrap_or_default()
            })
            .collect();
        assert_eq!(
            sections,
            vec![
                "--section=pre-data".to_string(),
                "--section=data".to_string(),
                "--section=post-data".to_string(),
            ]
        );
    }

    #[tokio::test]
    async fn validation_failure_short_circuits() {
        let cfg = MigrationConfig {
            jobs: 0,
            ..baseline_config()
        };
        let migrator = Migrator::new(cfg);
        let err = migrator.run(CancellationToken::new()).await.unwrap_err();
        assert!(matches!(err, MigrationError::Config(_)));
    }

    #[tokio::test]
    async fn offline_run_skips_dump_when_resume_token_says_dump_complete() {
        let dir = tempfile::tempdir().unwrap();
        let dump = dir.path().join("dump");
        let resume = dir.path().join("dump.resume.json");

        let cfg = MigrationConfig {
            resume: true,
            dump_path: Some(dump.clone()),
            resume_file: Some(resume.clone()),
            split_sections: false,
            ..baseline_config()
        };

        // Pre-seed the token: Dump already complete, Restore not yet.
        let mut t = crate::resume::ResumeToken::new(&cfg, dump.clone());
        t.mark(crate::resume::CompletedStage::Dump);
        t.save(&resume).await.unwrap();

        let runner = Arc::new(RecordingRunner::default());
        let migrator = Migrator::new(cfg)
            .with_runner(runner.clone())
            .with_reporter(Arc::new(CollectingReporter::new()));

        migrator.run(CancellationToken::new()).await.unwrap();

        let calls = runner.snapshot();
        assert_eq!(calls.len(), 1, "expected 1 call (restore only)");
        assert_eq!(calls[0].0, "pg_restore");
    }

    #[tokio::test]
    async fn validation_rejects_resume_without_dump_path() {
        let cfg = MigrationConfig {
            resume: true,
            dump_path: None,
            ..baseline_config()
        };
        let err = cfg.validate().unwrap_err();
        assert!(matches!(err, MigrationError::Config(_)));
    }

    #[test]
    fn dump_request_uses_directory_format_for_parallel_jobs() {
        let cfg = MigrationConfig {
            jobs: 4,
            ..baseline_config()
        };
        let m = Migrator::new(cfg);
        let req = m.dump_request(Path::new("/tmp/dump"), None);
        assert_eq!(req.format, DumpFormat::Directory);
    }

    #[test]
    fn dump_request_uses_custom_format_for_single_job() {
        let cfg = MigrationConfig {
            jobs: 1,
            ..baseline_config()
        };
        let m = Migrator::new(cfg);
        let req = m.dump_request(Path::new("/tmp/dump"), None);
        assert_eq!(req.format, DumpFormat::Custom);
    }

    #[test]
    fn dump_request_propagates_perf_flags() {
        let cfg = MigrationConfig {
            dump_compress: Some("zstd:3".into()),
            no_sync: true,
            no_comments: true,
            no_security_labels: true,
            no_table_access_method: true,
            ..baseline_config()
        };
        let m = Migrator::new(cfg);
        let req = m.dump_request(Path::new("/tmp/dump"), None);
        assert_eq!(req.compress.as_deref(), Some("zstd:3"));
        assert!(req.no_sync);
        assert!(req.no_comments);
        assert!(req.no_security_labels);
        assert!(req.no_table_access_method);
    }

    #[test]
    fn online_validation_inherits_offline_checks() {
        let cfg = MigrationConfig {
            mode: MigrationMode::Online,
            online: OnlineOptions {
                slot_name: "".into(),
                ..OnlineOptions::default()
            },
            ..baseline_config()
        };
        let err = cfg.validate().unwrap_err();
        assert!(matches!(err, MigrationError::Config(_)));
    }

    #[test]
    fn cutover_handle_is_clonable_and_stable_across_calls() {
        let m = Migrator::new(baseline_config());
        let h1 = m.cutover_handle();
        let h2 = m.cutover_handle();
        assert!(!h1.is_requested());
        h1.request();
        // Both clones share state with the migrator's internal handle.
        assert!(h2.is_requested());
    }

    #[test]
    fn migration_outcome_cutover_triggered_reflects_stats() {
        let mut out = MigrationOutcome {
            stats: None,
            dump_path: PathBuf::from("/tmp/x"),
        };
        assert!(!out.cutover_triggered()); // offline: always false

        out.stats = Some(ApplyStats {
            cutover_triggered: true,
            ..ApplyStats::default()
        });
        assert!(out.cutover_triggered());
    }
}