1mod analyze;
2mod convert;
3mod diff;
4mod glob_util;
5mod graph;
6mod merge;
7mod order;
8mod redact;
9mod sample;
10mod shard;
11mod split;
12mod validate;
13
14use clap::{CommandFactory, Parser, Subcommand, ValueHint};
15use clap_complete::{generate, Shell};
16use std::io;
17use std::path::PathBuf;
18
19const AFTER_HELP: &str = "\x1b[1mCommon workflows:\x1b[0m
20 Split a dump into per-table files:
21 sql-splitter split dump.sql -o tables/
22
23 Create a 10% sample for development:
24 sql-splitter sample dump.sql -o dev.sql --percent 10 --preserve-relations
25
26 Convert MySQL to PostgreSQL:
27 sql-splitter convert mysql.sql --to postgres -o pg.sql
28
29 Compare two dumps for changes:
30 sql-splitter diff old.sql new.sql --format sql -o migration.sql
31
32\x1b[1mMore info:\x1b[0m
33 Run 'sql-splitter <command> --help' for command-specific options.
34 Documentation: https://github.com/helgesverre/sql-splitter
35 Enable completions: sql-splitter completions <shell>";
36
37#[derive(Parser)]
38#[command(name = "sql-splitter")]
39#[command(author = "Helge Sverre <helge.sverre@gmail.com>")]
40#[command(version)]
41#[command(about = "High-performance CLI for splitting, merging, converting, and analyzing SQL dump files")]
42#[command(after_help = AFTER_HELP)]
43#[command(arg_required_else_help = true)]
44#[command(max_term_width = 100)]
45pub struct Cli {
46 #[command(subcommand)]
47 pub command: Commands,
48}
49
50const INPUT_OUTPUT: &str = "Input/Output";
52const FILTERING: &str = "Filtering";
53const MODE: &str = "Mode";
54const BEHAVIOR: &str = "Behavior";
55const LIMITS: &str = "Limits";
56const OUTPUT_FORMAT: &str = "Output";
57
58#[derive(Subcommand)]
59pub enum Commands {
60 #[command(visible_alias = "sp")]
62 #[command(after_help = "\x1b[1mExamples:\x1b[0m
63 sql-splitter split dump.sql -o tables/
64 sql-splitter split dump.sql.gz -o tables/ --tables users,orders
65 sql-splitter split dump.sql -o schema/ --schema-only
66 sql-splitter split \"backups/*.sql\" -o out/ --fail-fast")]
67 Split {
68 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
70 file: PathBuf,
71
72 #[arg(short, long, default_value = "output", value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
74 output: PathBuf,
75
76 #[arg(short, long, help_heading = INPUT_OUTPUT)]
78 dialect: Option<String>,
79
80 #[arg(short, long, help_heading = FILTERING)]
82 tables: Option<String>,
83
84 #[arg(long, conflicts_with = "data_only", help_heading = FILTERING)]
86 schema_only: bool,
87
88 #[arg(long, conflicts_with = "schema_only", help_heading = FILTERING)]
90 data_only: bool,
91
92 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
94 verbose: bool,
95
96 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
98 progress: bool,
99
100 #[arg(long, help_heading = OUTPUT_FORMAT)]
102 json: bool,
103
104 #[arg(long, help_heading = BEHAVIOR)]
106 dry_run: bool,
107
108 #[arg(long, help_heading = BEHAVIOR)]
110 fail_fast: bool,
111 },
112
113 #[command(visible_alias = "an")]
115 #[command(after_help = "\x1b[1mExamples:\x1b[0m
116 sql-splitter analyze dump.sql
117 sql-splitter analyze dump.sql.gz --progress
118 sql-splitter analyze \"dumps/*.sql\" --json")]
119 Analyze {
120 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
122 file: PathBuf,
123
124 #[arg(short, long, help_heading = INPUT_OUTPUT)]
126 dialect: Option<String>,
127
128 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
130 progress: bool,
131
132 #[arg(long, help_heading = OUTPUT_FORMAT)]
134 json: bool,
135
136 #[arg(long, help_heading = BEHAVIOR)]
138 fail_fast: bool,
139 },
140
141 #[command(visible_alias = "mg")]
143 #[command(after_help = "\x1b[1mExamples:\x1b[0m
144 sql-splitter merge tables/ -o restored.sql
145 sql-splitter merge tables/ -o restored.sql --transaction
146 sql-splitter merge tables/ -o partial.sql --tables users,orders
147 sql-splitter merge tables/ -o clean.sql --exclude logs,cache")]
148 Merge {
149 #[arg(value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
151 input_dir: PathBuf,
152
153 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
155 output: Option<PathBuf>,
156
157 #[arg(short, long, default_value = "mysql", help_heading = INPUT_OUTPUT)]
159 dialect: Option<String>,
160
161 #[arg(short, long, help_heading = FILTERING)]
163 tables: Option<String>,
164
165 #[arg(short, long, help_heading = FILTERING)]
167 exclude: Option<String>,
168
169 #[arg(long, help_heading = BEHAVIOR)]
171 transaction: bool,
172
173 #[arg(long, help_heading = BEHAVIOR)]
175 no_header: bool,
176
177 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
179 progress: bool,
180
181 #[arg(long, help_heading = OUTPUT_FORMAT)]
183 json: bool,
184
185 #[arg(long, help_heading = BEHAVIOR)]
187 dry_run: bool,
188 },
189
190 #[command(visible_alias = "sa")]
192 #[command(after_help = "\x1b[1mExamples:\x1b[0m
193 sql-splitter sample dump.sql -o dev.sql --percent 10
194 sql-splitter sample dump.sql -o dev.sql --rows 1000 --preserve-relations
195 sql-splitter sample dump.sql -o dev.sql --percent 5 --seed 42
196 sql-splitter sample dump.sql -o dev.sql --tables users,orders --percent 20")]
197 Sample {
198 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
200 file: PathBuf,
201
202 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
204 output: Option<PathBuf>,
205
206 #[arg(short, long, help_heading = INPUT_OUTPUT)]
208 dialect: Option<String>,
209
210 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
212 config: Option<PathBuf>,
213
214 #[arg(long, conflicts_with = "rows", help_heading = MODE)]
216 percent: Option<u32>,
217
218 #[arg(long, conflicts_with = "percent", help_heading = MODE)]
220 rows: Option<usize>,
221
222 #[arg(long, help_heading = MODE)]
224 seed: Option<u64>,
225
226 #[arg(short, long, help_heading = FILTERING)]
228 tables: Option<String>,
229
230 #[arg(short, long, help_heading = FILTERING)]
232 exclude: Option<String>,
233
234 #[arg(long, help_heading = FILTERING)]
236 root_tables: Option<String>,
237
238 #[arg(long, default_value = "lookups", help_heading = FILTERING)]
240 include_global: Option<String>,
241
242 #[arg(long, help_heading = BEHAVIOR)]
244 preserve_relations: bool,
245
246 #[arg(long, help_heading = BEHAVIOR)]
248 strict_fk: bool,
249
250 #[arg(long, help_heading = BEHAVIOR)]
252 no_schema: bool,
253
254 #[arg(long, help_heading = LIMITS)]
256 max_total_rows: Option<usize>,
257
258 #[arg(long, help_heading = LIMITS)]
260 no_limit: bool,
261
262 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
264 progress: bool,
265
266 #[arg(long, help_heading = OUTPUT_FORMAT)]
268 json: bool,
269
270 #[arg(long, help_heading = BEHAVIOR)]
272 dry_run: bool,
273 },
274
275 #[command(visible_alias = "sh")]
277 #[command(after_help = "\x1b[1mExamples:\x1b[0m
278 sql-splitter shard dump.sql -o tenant.sql --tenant-value 123
279 sql-splitter shard dump.sql -o tenant.sql --tenant-column company_id --tenant-value 42
280 sql-splitter shard dump.sql -o shards/ --tenant-values \"1,2,3\"")]
281 Shard {
282 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
284 file: PathBuf,
285
286 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
288 output: Option<PathBuf>,
289
290 #[arg(short, long, help_heading = INPUT_OUTPUT)]
292 dialect: Option<String>,
293
294 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
296 config: Option<PathBuf>,
297
298 #[arg(long, help_heading = MODE)]
300 tenant_column: Option<String>,
301
302 #[arg(long, conflicts_with = "tenant_values", help_heading = MODE)]
304 tenant_value: Option<String>,
305
306 #[arg(long, conflicts_with = "tenant_value", help_heading = MODE)]
308 tenant_values: Option<String>,
309
310 #[arg(long, help_heading = FILTERING)]
312 root_tables: Option<String>,
313
314 #[arg(long, default_value = "lookups", help_heading = FILTERING)]
316 include_global: Option<String>,
317
318 #[arg(long, help_heading = BEHAVIOR)]
320 strict_fk: bool,
321
322 #[arg(long, help_heading = BEHAVIOR)]
324 no_schema: bool,
325
326 #[arg(long, help_heading = LIMITS)]
328 max_selected_rows: Option<usize>,
329
330 #[arg(long, help_heading = LIMITS)]
332 no_limit: bool,
333
334 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
336 progress: bool,
337
338 #[arg(long, help_heading = OUTPUT_FORMAT)]
340 json: bool,
341
342 #[arg(long, help_heading = BEHAVIOR)]
344 dry_run: bool,
345 },
346
347 #[command(visible_alias = "cv")]
349 #[command(after_help = "\x1b[1mExamples:\x1b[0m
350 sql-splitter convert mysql.sql --to postgres -o pg.sql
351 sql-splitter convert pg_dump.sql --to mysql -o mysql.sql
352 sql-splitter convert dump.sql --from mysql --to sqlite -o sqlite.sql
353 sql-splitter convert mysql.sql --to postgres | psql mydb")]
354 Convert {
355 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
357 file: PathBuf,
358
359 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
361 output: Option<PathBuf>,
362
363 #[arg(long, help_heading = MODE)]
365 from: Option<String>,
366
367 #[arg(long, help_heading = MODE)]
369 to: String,
370
371 #[arg(long, help_heading = BEHAVIOR)]
373 strict: bool,
374
375 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
377 progress: bool,
378
379 #[arg(long, help_heading = OUTPUT_FORMAT)]
381 json: bool,
382
383 #[arg(long, help_heading = BEHAVIOR)]
385 dry_run: bool,
386
387 #[arg(long, help_heading = BEHAVIOR)]
389 fail_fast: bool,
390 },
391
392 #[command(visible_alias = "val")]
394 #[command(after_help = "\x1b[1mExamples:\x1b[0m
395 sql-splitter validate dump.sql
396 sql-splitter validate dump.sql --strict
397 sql-splitter validate \"dumps/*.sql\" --json --fail-fast
398 sql-splitter validate dump.sql --no-fk-checks")]
399 Validate {
400 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
402 file: PathBuf,
403
404 #[arg(short, long, help_heading = INPUT_OUTPUT)]
406 dialect: Option<String>,
407
408 #[arg(long, help_heading = BEHAVIOR)]
410 strict: bool,
411
412 #[arg(long, help_heading = BEHAVIOR)]
414 no_fk_checks: bool,
415
416 #[arg(long, help_heading = BEHAVIOR)]
418 fail_fast: bool,
419
420 #[arg(long, default_value = "1000000", help_heading = LIMITS)]
422 max_rows_per_table: usize,
423
424 #[arg(long, help_heading = LIMITS)]
426 no_limit: bool,
427
428 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
430 progress: bool,
431
432 #[arg(long, help_heading = OUTPUT_FORMAT)]
434 json: bool,
435 },
436
437 #[command(visible_alias = "df")]
439 #[command(after_help = "\x1b[1mExamples:\x1b[0m
440 sql-splitter diff old.sql new.sql
441 sql-splitter diff old.sql new.sql --schema-only
442 sql-splitter diff old.sql new.sql --format sql -o migration.sql
443 sql-splitter diff old.sql new.sql --verbose --ignore-columns \"*.updated_at\"
444 sql-splitter diff old.sql new.sql --primary-key logs:timestamp+message")]
445 Diff {
446 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
448 old_file: PathBuf,
449
450 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
452 new_file: PathBuf,
453
454 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
456 output: Option<PathBuf>,
457
458 #[arg(short, long, help_heading = INPUT_OUTPUT)]
460 dialect: Option<String>,
461
462 #[arg(short, long, help_heading = FILTERING)]
464 tables: Option<String>,
465
466 #[arg(short, long, help_heading = FILTERING)]
468 exclude: Option<String>,
469
470 #[arg(long, help_heading = FILTERING)]
472 ignore_columns: Option<String>,
473
474 #[arg(long, conflicts_with = "data_only", help_heading = MODE)]
476 schema_only: bool,
477
478 #[arg(long, conflicts_with = "schema_only", help_heading = MODE)]
480 data_only: bool,
481
482 #[arg(long, help_heading = MODE)]
484 primary_key: Option<String>,
485
486 #[arg(long, help_heading = BEHAVIOR)]
488 allow_no_pk: bool,
489
490 #[arg(long, help_heading = BEHAVIOR)]
492 ignore_order: bool,
493
494 #[arg(long, default_value = "10000000", help_heading = LIMITS)]
496 max_pk_entries: usize,
497
498 #[arg(short, long, default_value = "text", help_heading = OUTPUT_FORMAT)]
500 format: Option<String>,
501
502 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
504 verbose: bool,
505
506 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
508 progress: bool,
509 },
510
511 #[command(visible_alias = "rd")]
513 #[command(after_help = "\x1b[1mExamples:\x1b[0m
514 sql-splitter redact dump.sql -o safe.sql --config redact.yaml
515 sql-splitter redact dump.sql -o safe.sql --null \"*.ssn\" --hash \"*.email\"
516 sql-splitter redact dump.sql --generate-config -o redact.yaml
517 sql-splitter redact dump.sql -o safe.sql --config redact.yaml --seed 42")]
518 Redact {
519 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
521 file: PathBuf,
522
523 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
525 output: Option<PathBuf>,
526
527 #[arg(short, long, help_heading = INPUT_OUTPUT)]
529 dialect: Option<String>,
530
531 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
533 config: Option<PathBuf>,
534
535 #[arg(long, help_heading = MODE)]
537 generate_config: bool,
538
539 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
541 null: Vec<String>,
542
543 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
545 hash: Vec<String>,
546
547 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
549 fake: Vec<String>,
550
551 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
553 mask: Vec<String>,
554
555 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
557 constant: Vec<String>,
558
559 #[arg(long, help_heading = MODE)]
561 seed: Option<u64>,
562
563 #[arg(long, default_value = "en", help_heading = MODE)]
565 locale: String,
566
567 #[arg(short, long, value_delimiter = ',', help_heading = FILTERING)]
569 tables: Vec<String>,
570
571 #[arg(short = 'x', long, value_delimiter = ',', help_heading = FILTERING)]
573 exclude: Vec<String>,
574
575 #[arg(long, help_heading = BEHAVIOR)]
577 strict: bool,
578
579 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
581 progress: bool,
582
583 #[arg(long, help_heading = BEHAVIOR)]
585 dry_run: bool,
586
587 #[arg(long, help_heading = OUTPUT_FORMAT)]
589 json: bool,
590
591 #[arg(long, help_heading = BEHAVIOR)]
593 validate: bool,
594 },
595
596 #[command(visible_alias = "gr")]
598 #[command(after_help = "\x1b[1mExamples:\x1b[0m
599 sql-splitter graph dump.sql -o schema.html
600 sql-splitter graph dump.sql -o schema.mmd --format mermaid
601 sql-splitter graph dump.sql -o schema.png --render
602 sql-splitter graph dump.sql --cycles-only
603 sql-splitter graph dump.sql --table users --transitive")]
604 Graph {
605 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
607 file: PathBuf,
608
609 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
611 output: Option<PathBuf>,
612
613 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
615 format: Option<String>,
616
617 #[arg(short, long, help_heading = INPUT_OUTPUT)]
619 dialect: Option<String>,
620
621 #[arg(long, default_value = "lr", help_heading = OUTPUT_FORMAT)]
623 layout: Option<String>,
624
625 #[arg(long, help_heading = FILTERING)]
627 cycles_only: bool,
628
629 #[arg(long, help_heading = FILTERING)]
631 table: Option<String>,
632
633 #[arg(long, help_heading = FILTERING)]
635 transitive: bool,
636
637 #[arg(long, help_heading = FILTERING)]
639 reverse: bool,
640
641 #[arg(short, long, help_heading = FILTERING)]
643 tables: Option<String>,
644
645 #[arg(short, long, help_heading = FILTERING)]
647 exclude: Option<String>,
648
649 #[arg(long, help_heading = FILTERING)]
651 max_depth: Option<usize>,
652
653 #[arg(long, help_heading = BEHAVIOR)]
655 render: bool,
656
657 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
659 progress: bool,
660
661 #[arg(long, help_heading = OUTPUT_FORMAT)]
663 json: bool,
664 },
665
666 #[command(visible_alias = "ord")]
668 #[command(after_help = "\x1b[1mExamples:\x1b[0m
669 sql-splitter order dump.sql -o ordered.sql
670 sql-splitter order dump.sql --check
671 sql-splitter order dump.sql --dry-run
672 sql-splitter order dump.sql --reverse")]
673 Order {
674 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
676 file: PathBuf,
677
678 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
680 output: Option<PathBuf>,
681
682 #[arg(short, long, help_heading = INPUT_OUTPUT)]
684 dialect: Option<String>,
685
686 #[arg(long, help_heading = BEHAVIOR)]
688 check: bool,
689
690 #[arg(long, help_heading = BEHAVIOR)]
692 dry_run: bool,
693
694 #[arg(long, help_heading = BEHAVIOR)]
696 reverse: bool,
697 },
698
699 #[command(after_help = "\x1b[1mInstallation:\x1b[0m
701 Bash:
702 sql-splitter completions bash > /etc/bash_completion.d/sql-splitter
703 # or: sql-splitter completions bash >> ~/.bashrc
704
705 Zsh:
706 sql-splitter completions zsh > \"${fpath[1]}/_sql-splitter\"
707 # or for oh-my-zsh: sql-splitter completions zsh > ~/.oh-my-zsh/completions/_sql-splitter
708
709 Fish:
710 sql-splitter completions fish > ~/.config/fish/completions/sql-splitter.fish
711
712 PowerShell:
713 sql-splitter completions powershell >> $PROFILE")]
714 Completions {
715 #[arg(value_enum)]
717 shell: Shell,
718 },
719}
720
721pub fn run(cli: Cli) -> anyhow::Result<()> {
722 match cli.command {
723 Commands::Split {
724 file,
725 output,
726 dialect,
727 verbose,
728 dry_run,
729 progress,
730 tables,
731 schema_only,
732 data_only,
733 fail_fast,
734 json,
735 } => split::run(
736 file,
737 output,
738 dialect,
739 verbose,
740 dry_run,
741 progress,
742 tables,
743 schema_only,
744 data_only,
745 fail_fast,
746 json,
747 ),
748 Commands::Analyze {
749 file,
750 dialect,
751 progress,
752 fail_fast,
753 json,
754 } => analyze::run(file, dialect, progress, fail_fast, json),
755 Commands::Merge {
756 input_dir,
757 output,
758 dialect,
759 tables,
760 exclude,
761 transaction,
762 no_header,
763 progress,
764 dry_run,
765 json,
766 } => merge::run(
767 input_dir,
768 output,
769 dialect,
770 tables,
771 exclude,
772 transaction,
773 no_header,
774 progress,
775 dry_run,
776 json,
777 ),
778 Commands::Sample {
779 file,
780 output,
781 dialect,
782 percent,
783 rows,
784 preserve_relations,
785 tables,
786 exclude,
787 root_tables,
788 include_global,
789 seed,
790 config,
791 max_total_rows,
792 no_limit,
793 strict_fk,
794 no_schema,
795 progress,
796 dry_run,
797 json,
798 } => {
799 let effective_limit = if no_limit || max_total_rows == Some(0) {
800 None
801 } else {
802 max_total_rows
803 };
804 sample::run(
805 file,
806 output,
807 dialect,
808 percent,
809 rows,
810 preserve_relations,
811 tables,
812 exclude,
813 root_tables,
814 include_global,
815 seed,
816 config,
817 effective_limit,
818 strict_fk,
819 no_schema,
820 progress,
821 dry_run,
822 json,
823 )
824 }
825 Commands::Shard {
826 file,
827 output,
828 dialect,
829 tenant_column,
830 tenant_value,
831 tenant_values,
832 root_tables,
833 include_global,
834 config,
835 max_selected_rows,
836 no_limit,
837 strict_fk,
838 no_schema,
839 progress,
840 dry_run,
841 json,
842 } => {
843 let effective_limit = if no_limit || max_selected_rows == Some(0) {
844 None
845 } else {
846 max_selected_rows
847 };
848 shard::run(
849 file,
850 output,
851 dialect,
852 tenant_column,
853 tenant_value,
854 tenant_values,
855 root_tables,
856 include_global,
857 config,
858 effective_limit,
859 strict_fk,
860 no_schema,
861 progress,
862 dry_run,
863 json,
864 )
865 }
866 Commands::Convert {
867 file,
868 output,
869 from,
870 to,
871 strict,
872 progress,
873 dry_run,
874 fail_fast,
875 json,
876 } => convert::run(
877 file, output, from, to, strict, progress, dry_run, fail_fast, json,
878 ),
879 Commands::Validate {
880 file,
881 dialect,
882 progress,
883 strict,
884 json,
885 max_rows_per_table,
886 no_limit,
887 no_fk_checks,
888 fail_fast,
889 } => {
890 let effective_limit = if no_limit || max_rows_per_table == 0 {
891 usize::MAX
892 } else {
893 max_rows_per_table
894 };
895 validate::run(
896 file,
897 dialect,
898 progress,
899 strict,
900 json,
901 effective_limit,
902 no_fk_checks,
903 fail_fast,
904 )
905 }
906 Commands::Diff {
907 old_file,
908 new_file,
909 output,
910 tables,
911 exclude,
912 schema_only,
913 data_only,
914 format,
915 dialect,
916 verbose,
917 progress,
918 max_pk_entries,
919 allow_no_pk,
920 ignore_order,
921 primary_key,
922 ignore_columns,
923 } => diff::run(
924 old_file,
925 new_file,
926 output,
927 tables,
928 exclude,
929 schema_only,
930 data_only,
931 format,
932 dialect,
933 verbose,
934 progress,
935 max_pk_entries,
936 allow_no_pk,
937 ignore_order,
938 primary_key,
939 ignore_columns,
940 ),
941 Commands::Redact {
942 file,
943 output,
944 dialect,
945 config,
946 generate_config,
947 null,
948 hash,
949 fake,
950 mask,
951 constant,
952 seed,
953 locale,
954 tables,
955 exclude,
956 strict,
957 progress,
958 dry_run,
959 json,
960 validate,
961 } => redact::run(
962 file,
963 output,
964 dialect,
965 config,
966 generate_config,
967 null,
968 hash,
969 fake,
970 mask,
971 constant,
972 seed,
973 locale,
974 tables,
975 exclude,
976 strict,
977 progress,
978 dry_run,
979 json,
980 validate,
981 ),
982 Commands::Graph {
983 file,
984 output,
985 format,
986 dialect,
987 layout,
988 cycles_only,
989 table,
990 transitive,
991 reverse,
992 tables,
993 exclude,
994 max_depth,
995 render,
996 progress,
997 json,
998 } => graph::run(
999 file, output, format, dialect, layout, cycles_only, table, transitive, reverse,
1000 tables, exclude, max_depth, render, progress, json,
1001 ),
1002 Commands::Order {
1003 file,
1004 output,
1005 dialect,
1006 check,
1007 dry_run,
1008 reverse,
1009 } => order::run(file, output, dialect, check, dry_run, reverse),
1010 Commands::Completions { shell } => {
1011 generate(
1012 shell,
1013 &mut Cli::command(),
1014 "sql-splitter",
1015 &mut io::stdout(),
1016 );
1017 Ok(())
1018 }
1019 }
1020}