1mod analyze;
2mod convert;
3mod diff;
4mod glob_util;
5mod graph;
6mod merge;
7mod order;
8mod query;
9mod redact;
10mod sample;
11mod shard;
12mod split;
13mod validate;
14
15use clap::{CommandFactory, Parser, Subcommand, ValueHint};
16use clap_complete::{generate, Shell};
17use std::io;
18use std::path::PathBuf;
19
20const AFTER_HELP: &str = "\x1b[1mCommon workflows:\x1b[0m
21 Split a dump into per-table files:
22 sql-splitter split dump.sql -o tables/
23
24 Create a 10% sample for development:
25 sql-splitter sample dump.sql -o dev.sql --percent 10 --preserve-relations
26
27 Convert MySQL to PostgreSQL:
28 sql-splitter convert mysql.sql --to postgres -o pg.sql
29
30 Compare two dumps for changes:
31 sql-splitter diff old.sql new.sql --format sql -o migration.sql
32
33\x1b[1mMore info:\x1b[0m
34 Run 'sql-splitter <command> --help' for command-specific options.
35 Documentation: https://github.com/helgesverre/sql-splitter
36 Enable completions: sql-splitter completions <shell>";
37
38#[derive(Parser)]
39#[command(name = "sql-splitter")]
40#[command(author = "Helge Sverre <helge.sverre@gmail.com>")]
41#[command(version)]
42#[command(
43 about = "High-performance CLI for splitting, merging, converting, and analyzing SQL dump files"
44)]
45#[command(after_help = AFTER_HELP)]
46#[command(arg_required_else_help = true)]
47#[command(max_term_width = 100)]
48pub struct Cli {
49 #[command(subcommand)]
50 pub command: Commands,
51}
52
53const INPUT_OUTPUT: &str = "Input/Output";
55const FILTERING: &str = "Filtering";
56const MODE: &str = "Mode";
57const BEHAVIOR: &str = "Behavior";
58const LIMITS: &str = "Limits";
59const OUTPUT_FORMAT: &str = "Output";
60
61#[derive(Subcommand)]
62pub enum Commands {
63 #[command(visible_alias = "sp")]
65 #[command(after_help = "\x1b[1mExamples:\x1b[0m
66 sql-splitter split dump.sql -o tables/
67 sql-splitter split dump.sql.gz -o tables/ --tables users,orders
68 sql-splitter split dump.sql -o schema/ --schema-only
69 sql-splitter split \"backups/*.sql\" -o out/ --fail-fast")]
70 Split {
71 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
73 file: PathBuf,
74
75 #[arg(short, long, default_value = "output", value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
77 output: PathBuf,
78
79 #[arg(short, long, help_heading = INPUT_OUTPUT)]
81 dialect: Option<String>,
82
83 #[arg(short, long, help_heading = FILTERING)]
85 tables: Option<String>,
86
87 #[arg(long, conflicts_with = "data_only", help_heading = FILTERING)]
89 schema_only: bool,
90
91 #[arg(long, conflicts_with = "schema_only", help_heading = FILTERING)]
93 data_only: bool,
94
95 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
97 verbose: bool,
98
99 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
101 progress: bool,
102
103 #[arg(long, help_heading = OUTPUT_FORMAT)]
105 json: bool,
106
107 #[arg(long, help_heading = BEHAVIOR)]
109 dry_run: bool,
110
111 #[arg(long, help_heading = BEHAVIOR)]
113 fail_fast: bool,
114 },
115
116 #[command(visible_alias = "an")]
118 #[command(after_help = "\x1b[1mExamples:\x1b[0m
119 sql-splitter analyze dump.sql
120 sql-splitter analyze dump.sql.gz --progress
121 sql-splitter analyze \"dumps/*.sql\" --json")]
122 Analyze {
123 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
125 file: PathBuf,
126
127 #[arg(short, long, help_heading = INPUT_OUTPUT)]
129 dialect: Option<String>,
130
131 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
133 progress: bool,
134
135 #[arg(long, help_heading = OUTPUT_FORMAT)]
137 json: bool,
138
139 #[arg(long, help_heading = BEHAVIOR)]
141 fail_fast: bool,
142 },
143
144 #[command(visible_alias = "mg")]
146 #[command(after_help = "\x1b[1mExamples:\x1b[0m
147 sql-splitter merge tables/ -o restored.sql
148 sql-splitter merge tables/ -o restored.sql --transaction
149 sql-splitter merge tables/ -o partial.sql --tables users,orders
150 sql-splitter merge tables/ -o clean.sql --exclude logs,cache")]
151 Merge {
152 #[arg(value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
154 input_dir: PathBuf,
155
156 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
158 output: Option<PathBuf>,
159
160 #[arg(short, long, default_value = "mysql", help_heading = INPUT_OUTPUT)]
162 dialect: Option<String>,
163
164 #[arg(short, long, help_heading = FILTERING)]
166 tables: Option<String>,
167
168 #[arg(short, long, help_heading = FILTERING)]
170 exclude: Option<String>,
171
172 #[arg(long, help_heading = BEHAVIOR)]
174 transaction: bool,
175
176 #[arg(long, help_heading = BEHAVIOR)]
178 no_header: bool,
179
180 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
182 progress: bool,
183
184 #[arg(long, help_heading = OUTPUT_FORMAT)]
186 json: bool,
187
188 #[arg(long, help_heading = BEHAVIOR)]
190 dry_run: bool,
191 },
192
193 #[command(visible_alias = "sa")]
195 #[command(after_help = "\x1b[1mExamples:\x1b[0m
196 sql-splitter sample dump.sql -o dev.sql --percent 10
197 sql-splitter sample dump.sql -o dev.sql --rows 1000 --preserve-relations
198 sql-splitter sample dump.sql -o dev.sql --percent 5 --seed 42
199 sql-splitter sample dump.sql -o dev.sql --tables users,orders --percent 20")]
200 Sample {
201 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
203 file: PathBuf,
204
205 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
207 output: Option<PathBuf>,
208
209 #[arg(short, long, help_heading = INPUT_OUTPUT)]
211 dialect: Option<String>,
212
213 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
215 config: Option<PathBuf>,
216
217 #[arg(long, conflicts_with = "rows", help_heading = MODE)]
219 percent: Option<u32>,
220
221 #[arg(long, conflicts_with = "percent", help_heading = MODE)]
223 rows: Option<usize>,
224
225 #[arg(long, help_heading = MODE)]
227 seed: Option<u64>,
228
229 #[arg(short, long, help_heading = FILTERING)]
231 tables: Option<String>,
232
233 #[arg(short, long, help_heading = FILTERING)]
235 exclude: Option<String>,
236
237 #[arg(long, help_heading = FILTERING)]
239 root_tables: Option<String>,
240
241 #[arg(long, default_value = "lookups", help_heading = FILTERING)]
243 include_global: Option<String>,
244
245 #[arg(long, help_heading = BEHAVIOR)]
247 preserve_relations: bool,
248
249 #[arg(long, help_heading = BEHAVIOR)]
251 strict_fk: bool,
252
253 #[arg(long, help_heading = BEHAVIOR)]
255 no_schema: bool,
256
257 #[arg(long, help_heading = LIMITS)]
259 max_total_rows: Option<usize>,
260
261 #[arg(long, help_heading = LIMITS)]
263 no_limit: bool,
264
265 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
267 progress: bool,
268
269 #[arg(long, help_heading = OUTPUT_FORMAT)]
271 json: bool,
272
273 #[arg(long, help_heading = BEHAVIOR)]
275 dry_run: bool,
276 },
277
278 #[command(visible_alias = "sh")]
280 #[command(after_help = "\x1b[1mExamples:\x1b[0m
281 sql-splitter shard dump.sql -o tenant.sql --tenant-value 123
282 sql-splitter shard dump.sql -o tenant.sql --tenant-column company_id --tenant-value 42
283 sql-splitter shard dump.sql -o shards/ --tenant-values \"1,2,3\"")]
284 Shard {
285 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
287 file: PathBuf,
288
289 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
291 output: Option<PathBuf>,
292
293 #[arg(short, long, help_heading = INPUT_OUTPUT)]
295 dialect: Option<String>,
296
297 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
299 config: Option<PathBuf>,
300
301 #[arg(long, help_heading = MODE)]
303 tenant_column: Option<String>,
304
305 #[arg(long, conflicts_with = "tenant_values", help_heading = MODE)]
307 tenant_value: Option<String>,
308
309 #[arg(long, conflicts_with = "tenant_value", help_heading = MODE)]
311 tenant_values: Option<String>,
312
313 #[arg(long, help_heading = FILTERING)]
315 root_tables: Option<String>,
316
317 #[arg(long, default_value = "lookups", help_heading = FILTERING)]
319 include_global: Option<String>,
320
321 #[arg(long, help_heading = BEHAVIOR)]
323 strict_fk: bool,
324
325 #[arg(long, help_heading = BEHAVIOR)]
327 no_schema: bool,
328
329 #[arg(long, help_heading = LIMITS)]
331 max_selected_rows: Option<usize>,
332
333 #[arg(long, help_heading = LIMITS)]
335 no_limit: bool,
336
337 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
339 progress: bool,
340
341 #[arg(long, help_heading = OUTPUT_FORMAT)]
343 json: bool,
344
345 #[arg(long, help_heading = BEHAVIOR)]
347 dry_run: bool,
348 },
349
350 #[command(visible_alias = "cv")]
352 #[command(after_help = "\x1b[1mExamples:\x1b[0m
353 sql-splitter convert mysql.sql --to postgres -o pg.sql
354 sql-splitter convert pg_dump.sql --to mysql -o mysql.sql
355 sql-splitter convert dump.sql --from mysql --to sqlite -o sqlite.sql
356 sql-splitter convert mysql.sql --to postgres | psql mydb")]
357 Convert {
358 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
360 file: PathBuf,
361
362 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
364 output: Option<PathBuf>,
365
366 #[arg(long, help_heading = MODE)]
368 from: Option<String>,
369
370 #[arg(long, help_heading = MODE)]
372 to: String,
373
374 #[arg(long, help_heading = BEHAVIOR)]
376 strict: bool,
377
378 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
380 progress: bool,
381
382 #[arg(long, help_heading = OUTPUT_FORMAT)]
384 json: bool,
385
386 #[arg(long, help_heading = BEHAVIOR)]
388 dry_run: bool,
389
390 #[arg(long, help_heading = BEHAVIOR)]
392 fail_fast: bool,
393 },
394
395 #[command(visible_alias = "val")]
397 #[command(after_help = "\x1b[1mExamples:\x1b[0m
398 sql-splitter validate dump.sql
399 sql-splitter validate dump.sql --strict
400 sql-splitter validate \"dumps/*.sql\" --json --fail-fast
401 sql-splitter validate dump.sql --no-fk-checks")]
402 Validate {
403 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
405 file: PathBuf,
406
407 #[arg(short, long, help_heading = INPUT_OUTPUT)]
409 dialect: Option<String>,
410
411 #[arg(long, help_heading = BEHAVIOR)]
413 strict: bool,
414
415 #[arg(long, help_heading = BEHAVIOR)]
417 no_fk_checks: bool,
418
419 #[arg(long, help_heading = BEHAVIOR)]
421 fail_fast: bool,
422
423 #[arg(long, default_value = "1000000", help_heading = LIMITS)]
425 max_rows_per_table: usize,
426
427 #[arg(long, help_heading = LIMITS)]
429 no_limit: bool,
430
431 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
433 progress: bool,
434
435 #[arg(long, help_heading = OUTPUT_FORMAT)]
437 json: bool,
438 },
439
440 #[command(visible_alias = "df")]
442 #[command(after_help = "\x1b[1mExamples:\x1b[0m
443 sql-splitter diff old.sql new.sql
444 sql-splitter diff old.sql new.sql --schema-only
445 sql-splitter diff old.sql new.sql --format sql -o migration.sql
446 sql-splitter diff old.sql new.sql --verbose --ignore-columns \"*.updated_at\"
447 sql-splitter diff old.sql new.sql --primary-key logs:timestamp+message")]
448 Diff {
449 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
451 old_file: PathBuf,
452
453 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
455 new_file: PathBuf,
456
457 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
459 output: Option<PathBuf>,
460
461 #[arg(short, long, help_heading = INPUT_OUTPUT)]
463 dialect: Option<String>,
464
465 #[arg(short, long, help_heading = FILTERING)]
467 tables: Option<String>,
468
469 #[arg(short, long, help_heading = FILTERING)]
471 exclude: Option<String>,
472
473 #[arg(long, help_heading = FILTERING)]
475 ignore_columns: Option<String>,
476
477 #[arg(long, conflicts_with = "data_only", help_heading = MODE)]
479 schema_only: bool,
480
481 #[arg(long, conflicts_with = "schema_only", help_heading = MODE)]
483 data_only: bool,
484
485 #[arg(long, help_heading = MODE)]
487 primary_key: Option<String>,
488
489 #[arg(long, help_heading = BEHAVIOR)]
491 allow_no_pk: bool,
492
493 #[arg(long, help_heading = BEHAVIOR)]
495 ignore_order: bool,
496
497 #[arg(long, default_value = "10000000", help_heading = LIMITS)]
499 max_pk_entries: usize,
500
501 #[arg(short, long, default_value = "text", help_heading = OUTPUT_FORMAT)]
503 format: Option<String>,
504
505 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
507 verbose: bool,
508
509 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
511 progress: bool,
512 },
513
514 #[command(visible_alias = "rd")]
516 #[command(after_help = "\x1b[1mExamples:\x1b[0m
517 sql-splitter redact dump.sql -o safe.sql --config redact.yaml
518 sql-splitter redact dump.sql -o safe.sql --null \"*.ssn\" --hash \"*.email\"
519 sql-splitter redact dump.sql --generate-config -o redact.yaml
520 sql-splitter redact dump.sql -o safe.sql --config redact.yaml --seed 42")]
521 Redact {
522 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
524 file: PathBuf,
525
526 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
528 output: Option<PathBuf>,
529
530 #[arg(short, long, help_heading = INPUT_OUTPUT)]
532 dialect: Option<String>,
533
534 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
536 config: Option<PathBuf>,
537
538 #[arg(long, help_heading = MODE)]
540 generate_config: bool,
541
542 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
544 null: Vec<String>,
545
546 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
548 hash: Vec<String>,
549
550 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
552 fake: Vec<String>,
553
554 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
556 mask: Vec<String>,
557
558 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
560 constant: Vec<String>,
561
562 #[arg(long, help_heading = MODE)]
564 seed: Option<u64>,
565
566 #[arg(long, default_value = "en", help_heading = MODE)]
568 locale: String,
569
570 #[arg(short, long, value_delimiter = ',', help_heading = FILTERING)]
572 tables: Vec<String>,
573
574 #[arg(short = 'x', long, value_delimiter = ',', help_heading = FILTERING)]
576 exclude: Vec<String>,
577
578 #[arg(long, help_heading = BEHAVIOR)]
580 strict: bool,
581
582 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
584 progress: bool,
585
586 #[arg(long, help_heading = BEHAVIOR)]
588 dry_run: bool,
589
590 #[arg(long, help_heading = OUTPUT_FORMAT)]
592 json: bool,
593
594 #[arg(long, help_heading = BEHAVIOR)]
596 validate: bool,
597 },
598
599 #[command(visible_alias = "gr")]
601 #[command(after_help = "\x1b[1mExamples:\x1b[0m
602 sql-splitter graph dump.sql -o schema.html
603 sql-splitter graph dump.sql -o schema.mmd --format mermaid
604 sql-splitter graph dump.sql -o schema.png --render
605 sql-splitter graph dump.sql --cycles-only
606 sql-splitter graph dump.sql --table users --transitive")]
607 Graph {
608 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
610 file: PathBuf,
611
612 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
614 output: Option<PathBuf>,
615
616 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
618 format: Option<String>,
619
620 #[arg(short, long, help_heading = INPUT_OUTPUT)]
622 dialect: Option<String>,
623
624 #[arg(long, default_value = "lr", help_heading = OUTPUT_FORMAT)]
626 layout: Option<String>,
627
628 #[arg(long, help_heading = FILTERING)]
630 cycles_only: bool,
631
632 #[arg(long, help_heading = FILTERING)]
634 table: Option<String>,
635
636 #[arg(long, help_heading = FILTERING)]
638 transitive: bool,
639
640 #[arg(long, help_heading = FILTERING)]
642 reverse: bool,
643
644 #[arg(short, long, help_heading = FILTERING)]
646 tables: Option<String>,
647
648 #[arg(short, long, help_heading = FILTERING)]
650 exclude: Option<String>,
651
652 #[arg(long, help_heading = FILTERING)]
654 max_depth: Option<usize>,
655
656 #[arg(long, help_heading = BEHAVIOR)]
658 render: bool,
659
660 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
662 progress: bool,
663
664 #[arg(long, help_heading = OUTPUT_FORMAT)]
666 json: bool,
667 },
668
669 #[command(visible_alias = "ord")]
671 #[command(after_help = "\x1b[1mExamples:\x1b[0m
672 sql-splitter order dump.sql -o ordered.sql
673 sql-splitter order dump.sql --check
674 sql-splitter order dump.sql --dry-run
675 sql-splitter order dump.sql --reverse")]
676 Order {
677 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
679 file: PathBuf,
680
681 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
683 output: Option<PathBuf>,
684
685 #[arg(short, long, help_heading = INPUT_OUTPUT)]
687 dialect: Option<String>,
688
689 #[arg(long, help_heading = BEHAVIOR)]
691 check: bool,
692
693 #[arg(long, help_heading = BEHAVIOR)]
695 dry_run: bool,
696
697 #[arg(long, help_heading = BEHAVIOR)]
699 reverse: bool,
700 },
701
702 #[command(visible_alias = "qy")]
704 #[command(after_help = "\x1b[1mExamples:\x1b[0m
705 sql-splitter query dump.sql \"SELECT COUNT(*) FROM users\"
706 sql-splitter query dump.sql \"SELECT * FROM orders WHERE total > 100\" -f json
707 sql-splitter query dump.sql \"SELECT * FROM users LIMIT 10\" -o results.csv -f csv
708 sql-splitter query dump.sql --interactive
709 sql-splitter query huge.sql \"SELECT ...\" --disk
710 sql-splitter query dump.sql \"SELECT ...\" --cache
711 sql-splitter query --list-cache")]
712 Query(query::QueryArgs),
713
714 #[command(after_help = "\x1b[1mInstallation:\x1b[0m
716 Bash:
717 sql-splitter completions bash > /etc/bash_completion.d/sql-splitter
718 # or: sql-splitter completions bash >> ~/.bashrc
719
720 Zsh:
721 sql-splitter completions zsh > \"${fpath[1]}/_sql-splitter\"
722 # or for oh-my-zsh: sql-splitter completions zsh > ~/.oh-my-zsh/completions/_sql-splitter
723
724 Fish:
725 sql-splitter completions fish > ~/.config/fish/completions/sql-splitter.fish
726
727 PowerShell:
728 sql-splitter completions powershell >> $PROFILE")]
729 Completions {
730 #[arg(value_enum)]
732 shell: Shell,
733 },
734}
735
736pub fn run(cli: Cli) -> anyhow::Result<()> {
737 match cli.command {
738 Commands::Split {
739 file,
740 output,
741 dialect,
742 verbose,
743 dry_run,
744 progress,
745 tables,
746 schema_only,
747 data_only,
748 fail_fast,
749 json,
750 } => split::run(
751 file,
752 output,
753 dialect,
754 verbose,
755 dry_run,
756 progress,
757 tables,
758 schema_only,
759 data_only,
760 fail_fast,
761 json,
762 ),
763 Commands::Analyze {
764 file,
765 dialect,
766 progress,
767 fail_fast,
768 json,
769 } => analyze::run(file, dialect, progress, fail_fast, json),
770 Commands::Merge {
771 input_dir,
772 output,
773 dialect,
774 tables,
775 exclude,
776 transaction,
777 no_header,
778 progress,
779 dry_run,
780 json,
781 } => merge::run(
782 input_dir,
783 output,
784 dialect,
785 tables,
786 exclude,
787 transaction,
788 no_header,
789 progress,
790 dry_run,
791 json,
792 ),
793 Commands::Sample {
794 file,
795 output,
796 dialect,
797 percent,
798 rows,
799 preserve_relations,
800 tables,
801 exclude,
802 root_tables,
803 include_global,
804 seed,
805 config,
806 max_total_rows,
807 no_limit,
808 strict_fk,
809 no_schema,
810 progress,
811 dry_run,
812 json,
813 } => {
814 let effective_limit = if no_limit || max_total_rows == Some(0) {
815 None
816 } else {
817 max_total_rows
818 };
819 sample::run(
820 file,
821 output,
822 dialect,
823 percent,
824 rows,
825 preserve_relations,
826 tables,
827 exclude,
828 root_tables,
829 include_global,
830 seed,
831 config,
832 effective_limit,
833 strict_fk,
834 no_schema,
835 progress,
836 dry_run,
837 json,
838 )
839 }
840 Commands::Shard {
841 file,
842 output,
843 dialect,
844 tenant_column,
845 tenant_value,
846 tenant_values,
847 root_tables,
848 include_global,
849 config,
850 max_selected_rows,
851 no_limit,
852 strict_fk,
853 no_schema,
854 progress,
855 dry_run,
856 json,
857 } => {
858 let effective_limit = if no_limit || max_selected_rows == Some(0) {
859 None
860 } else {
861 max_selected_rows
862 };
863 shard::run(
864 file,
865 output,
866 dialect,
867 tenant_column,
868 tenant_value,
869 tenant_values,
870 root_tables,
871 include_global,
872 config,
873 effective_limit,
874 strict_fk,
875 no_schema,
876 progress,
877 dry_run,
878 json,
879 )
880 }
881 Commands::Convert {
882 file,
883 output,
884 from,
885 to,
886 strict,
887 progress,
888 dry_run,
889 fail_fast,
890 json,
891 } => convert::run(
892 file, output, from, to, strict, progress, dry_run, fail_fast, json,
893 ),
894 Commands::Validate {
895 file,
896 dialect,
897 progress,
898 strict,
899 json,
900 max_rows_per_table,
901 no_limit,
902 no_fk_checks,
903 fail_fast,
904 } => {
905 let effective_limit = if no_limit || max_rows_per_table == 0 {
906 usize::MAX
907 } else {
908 max_rows_per_table
909 };
910 validate::run(
911 file,
912 dialect,
913 progress,
914 strict,
915 json,
916 effective_limit,
917 no_fk_checks,
918 fail_fast,
919 )
920 }
921 Commands::Diff {
922 old_file,
923 new_file,
924 output,
925 tables,
926 exclude,
927 schema_only,
928 data_only,
929 format,
930 dialect,
931 verbose,
932 progress,
933 max_pk_entries,
934 allow_no_pk,
935 ignore_order,
936 primary_key,
937 ignore_columns,
938 } => diff::run(
939 old_file,
940 new_file,
941 output,
942 tables,
943 exclude,
944 schema_only,
945 data_only,
946 format,
947 dialect,
948 verbose,
949 progress,
950 max_pk_entries,
951 allow_no_pk,
952 ignore_order,
953 primary_key,
954 ignore_columns,
955 ),
956 Commands::Redact {
957 file,
958 output,
959 dialect,
960 config,
961 generate_config,
962 null,
963 hash,
964 fake,
965 mask,
966 constant,
967 seed,
968 locale,
969 tables,
970 exclude,
971 strict,
972 progress,
973 dry_run,
974 json,
975 validate,
976 } => redact::run(
977 file,
978 output,
979 dialect,
980 config,
981 generate_config,
982 null,
983 hash,
984 fake,
985 mask,
986 constant,
987 seed,
988 locale,
989 tables,
990 exclude,
991 strict,
992 progress,
993 dry_run,
994 json,
995 validate,
996 ),
997 Commands::Graph {
998 file,
999 output,
1000 format,
1001 dialect,
1002 layout,
1003 cycles_only,
1004 table,
1005 transitive,
1006 reverse,
1007 tables,
1008 exclude,
1009 max_depth,
1010 render,
1011 progress,
1012 json,
1013 } => graph::run(
1014 file,
1015 output,
1016 format,
1017 dialect,
1018 layout,
1019 cycles_only,
1020 table,
1021 transitive,
1022 reverse,
1023 tables,
1024 exclude,
1025 max_depth,
1026 render,
1027 progress,
1028 json,
1029 ),
1030 Commands::Order {
1031 file,
1032 output,
1033 dialect,
1034 check,
1035 dry_run,
1036 reverse,
1037 } => order::run(file, output, dialect, check, dry_run, reverse),
1038 Commands::Query(args) => query::run(args),
1039 Commands::Completions { shell } => {
1040 generate(
1041 shell,
1042 &mut Cli::command(),
1043 "sql-splitter",
1044 &mut io::stdout(),
1045 );
1046 Ok(())
1047 }
1048 }
1049}