1pub(crate) mod analyze;
2pub(crate) mod convert;
3mod diff;
4mod glob_util;
5pub(crate) mod graph;
6pub(crate) mod merge;
7mod order;
8mod query;
9pub(crate) mod redact;
10pub(crate) mod sample;
11pub(crate) mod shard;
12pub(crate) mod split;
13pub(crate) mod validate;
14
15use clap::{CommandFactory, Parser, Subcommand, ValueHint};
16use clap_complete::{generate, Shell};
17use std::io;
18use std::path::PathBuf;
19
20const AFTER_HELP: &str = "\x1b[1mCommon workflows:\x1b[0m
21 Split a dump into per-table files:
22 sql-splitter split dump.sql -o tables/
23
24 Create a 10% sample for development:
25 sql-splitter sample dump.sql -o dev.sql --percent 10 --preserve-relations
26
27 Convert MySQL to PostgreSQL:
28 sql-splitter convert mysql.sql --to postgres -o pg.sql
29
30 Compare two dumps for changes:
31 sql-splitter diff old.sql new.sql --format sql -o migration.sql
32
33\x1b[1mMore info:\x1b[0m
34 Run 'sql-splitter <command> --help' for command-specific options.
35 Documentation: https://github.com/helgesverre/sql-splitter
36 Enable completions: sql-splitter completions <shell>";
37
38#[derive(Parser)]
39#[command(name = "sql-splitter")]
40#[command(author = "Helge Sverre <helge.sverre@gmail.com>")]
41#[command(version)]
42#[command(
43 about = "High-performance CLI for splitting, merging, converting, and analyzing SQL dump files"
44)]
45#[command(after_help = AFTER_HELP)]
46#[command(arg_required_else_help = true)]
47#[command(max_term_width = 100)]
48pub struct Cli {
49 #[command(subcommand)]
50 pub command: Commands,
51}
52
53const INPUT_OUTPUT: &str = "Input/Output";
55const FILTERING: &str = "Filtering";
56const MODE: &str = "Mode";
57const BEHAVIOR: &str = "Behavior";
58const LIMITS: &str = "Limits";
59const OUTPUT_FORMAT: &str = "Output";
60
61#[derive(Subcommand)]
62pub enum Commands {
63 #[command(visible_alias = "sp")]
65 #[command(after_help = "\x1b[1mExamples:\x1b[0m
66 sql-splitter split dump.sql -o tables/
67 sql-splitter split dump.sql.gz -o tables/ --tables users,orders
68 sql-splitter split dump.sql -o schema/ --schema-only
69 sql-splitter split \"backups/*.sql\" -o out/ --fail-fast")]
70 Split {
71 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
73 file: PathBuf,
74
75 #[arg(short, long, default_value = "output", value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
77 output: PathBuf,
78
79 #[arg(short, long, help_heading = INPUT_OUTPUT)]
81 dialect: Option<String>,
82
83 #[arg(short, long, help_heading = FILTERING)]
85 tables: Option<String>,
86
87 #[arg(long, conflicts_with = "data_only", help_heading = FILTERING)]
89 schema_only: bool,
90
91 #[arg(long, conflicts_with = "schema_only", help_heading = FILTERING)]
93 data_only: bool,
94
95 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
97 verbose: bool,
98
99 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
101 progress: bool,
102
103 #[arg(long, help_heading = OUTPUT_FORMAT)]
105 json: bool,
106
107 #[arg(long, help_heading = BEHAVIOR)]
109 dry_run: bool,
110
111 #[arg(long, help_heading = BEHAVIOR)]
113 fail_fast: bool,
114 },
115
116 #[command(visible_alias = "an")]
118 #[command(after_help = "\x1b[1mExamples:\x1b[0m
119 sql-splitter analyze dump.sql
120 sql-splitter analyze dump.sql.gz --progress
121 sql-splitter analyze \"dumps/*.sql\" --json")]
122 Analyze {
123 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
125 file: PathBuf,
126
127 #[arg(short, long, help_heading = INPUT_OUTPUT)]
129 dialect: Option<String>,
130
131 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
133 progress: bool,
134
135 #[arg(long, help_heading = OUTPUT_FORMAT)]
137 json: bool,
138
139 #[arg(long, help_heading = BEHAVIOR)]
141 fail_fast: bool,
142 },
143
144 #[command(visible_alias = "mg")]
146 #[command(after_help = "\x1b[1mExamples:\x1b[0m
147 sql-splitter merge tables/ -o restored.sql
148 sql-splitter merge tables/ -o restored.sql --transaction
149 sql-splitter merge tables/ -o partial.sql --tables users,orders
150 sql-splitter merge tables/ -o clean.sql --exclude logs,cache")]
151 Merge {
152 #[arg(value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
154 input_dir: PathBuf,
155
156 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
158 output: Option<PathBuf>,
159
160 #[arg(short, long, default_value = "mysql", help_heading = INPUT_OUTPUT)]
162 dialect: Option<String>,
163
164 #[arg(short, long, help_heading = FILTERING)]
166 tables: Option<String>,
167
168 #[arg(short, long, help_heading = FILTERING)]
170 exclude: Option<String>,
171
172 #[arg(long, help_heading = BEHAVIOR)]
174 transaction: bool,
175
176 #[arg(long, help_heading = BEHAVIOR)]
178 no_header: bool,
179
180 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
182 progress: bool,
183
184 #[arg(long, help_heading = OUTPUT_FORMAT)]
186 json: bool,
187
188 #[arg(long, help_heading = BEHAVIOR)]
190 dry_run: bool,
191 },
192
193 #[command(visible_alias = "sa")]
195 #[command(after_help = "\x1b[1mExamples:\x1b[0m
196 sql-splitter sample dump.sql -o dev.sql --percent 10
197 sql-splitter sample dump.sql -o dev.sql --rows 1000 --preserve-relations
198 sql-splitter sample dump.sql -o dev.sql --percent 5 --seed 42
199 sql-splitter sample dump.sql -o dev.sql --tables users,orders --percent 20")]
200 Sample {
201 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
203 file: PathBuf,
204
205 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
207 output: Option<PathBuf>,
208
209 #[arg(short, long, help_heading = INPUT_OUTPUT)]
211 dialect: Option<String>,
212
213 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
215 config: Option<PathBuf>,
216
217 #[arg(long, conflicts_with = "rows", help_heading = MODE)]
219 percent: Option<u32>,
220
221 #[arg(long, conflicts_with = "percent", help_heading = MODE)]
223 rows: Option<usize>,
224
225 #[arg(long, help_heading = MODE)]
227 seed: Option<u64>,
228
229 #[arg(short, long, help_heading = FILTERING)]
231 tables: Option<String>,
232
233 #[arg(short, long, help_heading = FILTERING)]
235 exclude: Option<String>,
236
237 #[arg(long, help_heading = FILTERING)]
239 root_tables: Option<String>,
240
241 #[arg(long, default_value = "lookups", help_heading = FILTERING)]
243 include_global: Option<String>,
244
245 #[arg(long, help_heading = BEHAVIOR)]
247 preserve_relations: bool,
248
249 #[arg(long, help_heading = BEHAVIOR)]
251 strict_fk: bool,
252
253 #[arg(long, help_heading = BEHAVIOR)]
255 no_schema: bool,
256
257 #[arg(long, help_heading = LIMITS)]
259 max_total_rows: Option<usize>,
260
261 #[arg(long, help_heading = LIMITS)]
263 no_limit: bool,
264
265 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
267 progress: bool,
268
269 #[arg(long, help_heading = OUTPUT_FORMAT)]
271 json: bool,
272
273 #[arg(long, help_heading = BEHAVIOR)]
275 dry_run: bool,
276 },
277
278 #[command(visible_alias = "sh")]
280 #[command(after_help = "\x1b[1mExamples:\x1b[0m
281 sql-splitter shard dump.sql -o tenant.sql --tenant-value 123
282 sql-splitter shard dump.sql -o tenant.sql --tenant-column company_id --tenant-value 42
283 sql-splitter shard dump.sql -o shards/ --tenant-values \"1,2,3\"")]
284 Shard {
285 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
287 file: PathBuf,
288
289 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
291 output: Option<PathBuf>,
292
293 #[arg(short, long, help_heading = INPUT_OUTPUT)]
295 dialect: Option<String>,
296
297 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
299 config: Option<PathBuf>,
300
301 #[arg(long, help_heading = MODE)]
303 tenant_column: Option<String>,
304
305 #[arg(long, conflicts_with = "tenant_values", help_heading = MODE)]
307 tenant_value: Option<String>,
308
309 #[arg(long, conflicts_with = "tenant_value", help_heading = MODE)]
311 tenant_values: Option<String>,
312
313 #[arg(long, help_heading = FILTERING)]
315 root_tables: Option<String>,
316
317 #[arg(long, default_value = "lookups", help_heading = FILTERING)]
319 include_global: Option<String>,
320
321 #[arg(long, help_heading = BEHAVIOR)]
323 strict_fk: bool,
324
325 #[arg(long, help_heading = BEHAVIOR)]
327 no_schema: bool,
328
329 #[arg(long, help_heading = LIMITS)]
331 max_selected_rows: Option<usize>,
332
333 #[arg(long, help_heading = LIMITS)]
335 no_limit: bool,
336
337 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
339 progress: bool,
340
341 #[arg(long, help_heading = OUTPUT_FORMAT)]
343 json: bool,
344
345 #[arg(long, help_heading = BEHAVIOR)]
347 dry_run: bool,
348 },
349
350 #[command(visible_alias = "cv")]
352 #[command(after_help = "\x1b[1mExamples:\x1b[0m
353 sql-splitter convert mysql.sql --to postgres -o pg.sql
354 sql-splitter convert pg_dump.sql --to mysql -o mysql.sql
355 sql-splitter convert dump.sql --from mysql --to sqlite -o sqlite.sql
356 sql-splitter convert mysql.sql --to postgres | psql mydb")]
357 Convert {
358 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
360 file: PathBuf,
361
362 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
364 output: Option<PathBuf>,
365
366 #[arg(long, help_heading = MODE)]
368 from: Option<String>,
369
370 #[arg(long, help_heading = MODE)]
372 to: String,
373
374 #[arg(long, help_heading = BEHAVIOR)]
376 strict: bool,
377
378 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
380 progress: bool,
381
382 #[arg(long, help_heading = OUTPUT_FORMAT)]
384 json: bool,
385
386 #[arg(long, help_heading = BEHAVIOR)]
388 dry_run: bool,
389
390 #[arg(long, help_heading = BEHAVIOR)]
392 fail_fast: bool,
393 },
394
395 #[command(visible_alias = "val")]
397 #[command(after_help = "\x1b[1mExamples:\x1b[0m
398 sql-splitter validate dump.sql
399 sql-splitter validate dump.sql --strict
400 sql-splitter validate \"dumps/*.sql\" --json --fail-fast
401 sql-splitter validate dump.sql --no-fk-checks")]
402 Validate {
403 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
405 file: PathBuf,
406
407 #[arg(short, long, help_heading = INPUT_OUTPUT)]
409 dialect: Option<String>,
410
411 #[arg(long, help_heading = BEHAVIOR)]
413 strict: bool,
414
415 #[arg(long, help_heading = BEHAVIOR)]
417 no_fk_checks: bool,
418
419 #[arg(long, help_heading = BEHAVIOR)]
421 fail_fast: bool,
422
423 #[arg(long, default_value = "1000000", help_heading = LIMITS)]
425 max_rows_per_table: usize,
426
427 #[arg(long, help_heading = LIMITS)]
429 no_limit: bool,
430
431 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
433 progress: bool,
434
435 #[arg(long, help_heading = OUTPUT_FORMAT)]
437 json: bool,
438 },
439
440 #[command(visible_alias = "df")]
442 #[command(after_help = "\x1b[1mExamples:\x1b[0m
443 sql-splitter diff old.sql new.sql
444 sql-splitter diff old.sql new.sql --schema-only
445 sql-splitter diff old.sql new.sql --format sql -o migration.sql
446 sql-splitter diff old.sql new.sql --verbose --ignore-columns \"*.updated_at\"
447 sql-splitter diff old.sql new.sql --primary-key logs:timestamp+message")]
448 Diff {
449 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
451 old_file: PathBuf,
452
453 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
455 new_file: PathBuf,
456
457 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
459 output: Option<PathBuf>,
460
461 #[arg(short, long, help_heading = INPUT_OUTPUT)]
463 dialect: Option<String>,
464
465 #[arg(short, long, help_heading = FILTERING)]
467 tables: Option<String>,
468
469 #[arg(short, long, help_heading = FILTERING)]
471 exclude: Option<String>,
472
473 #[arg(long, help_heading = FILTERING)]
475 ignore_columns: Option<String>,
476
477 #[arg(long, conflicts_with = "data_only", help_heading = MODE)]
479 schema_only: bool,
480
481 #[arg(long, conflicts_with = "schema_only", help_heading = MODE)]
483 data_only: bool,
484
485 #[arg(long, help_heading = MODE)]
487 primary_key: Option<String>,
488
489 #[arg(long, help_heading = BEHAVIOR)]
491 allow_no_pk: bool,
492
493 #[arg(long, help_heading = BEHAVIOR)]
495 ignore_order: bool,
496
497 #[arg(long, default_value = "10000000", help_heading = LIMITS)]
499 max_pk_entries: usize,
500
501 #[arg(short, long, default_value = "text", help_heading = OUTPUT_FORMAT)]
503 format: Option<String>,
504
505 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
507 verbose: bool,
508
509 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
511 progress: bool,
512 },
513
514 #[command(visible_alias = "rd")]
516 #[command(after_help = "\x1b[1mExamples:\x1b[0m
517 sql-splitter redact dump.sql -o safe.sql --config redact.yaml
518 sql-splitter redact dump.sql -o safe.sql --null \"*.ssn\" --hash \"*.email\"
519 sql-splitter redact dump.sql --generate-config -o redact.yaml
520 sql-splitter redact dump.sql -o safe.sql --config redact.yaml --seed 42")]
521 Redact {
522 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
524 file: PathBuf,
525
526 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
528 output: Option<PathBuf>,
529
530 #[arg(short, long, help_heading = INPUT_OUTPUT)]
532 dialect: Option<String>,
533
534 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
536 config: Option<PathBuf>,
537
538 #[arg(long, help_heading = MODE)]
540 generate_config: bool,
541
542 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
544 null: Vec<String>,
545
546 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
548 hash: Vec<String>,
549
550 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
552 fake: Vec<String>,
553
554 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
556 mask: Vec<String>,
557
558 #[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
560 constant: Vec<String>,
561
562 #[arg(long, help_heading = MODE)]
564 seed: Option<u64>,
565
566 #[arg(long, default_value = "en", help_heading = MODE)]
568 locale: String,
569
570 #[arg(short, long, value_delimiter = ',', help_heading = FILTERING)]
572 tables: Vec<String>,
573
574 #[arg(short = 'x', long, value_delimiter = ',', help_heading = FILTERING)]
576 exclude: Vec<String>,
577
578 #[arg(long, help_heading = BEHAVIOR)]
580 strict: bool,
581
582 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
584 progress: bool,
585
586 #[arg(long, help_heading = BEHAVIOR)]
588 dry_run: bool,
589
590 #[arg(long, help_heading = OUTPUT_FORMAT)]
592 json: bool,
593
594 #[arg(long, help_heading = BEHAVIOR)]
596 validate: bool,
597 },
598
599 #[command(visible_alias = "gr")]
601 #[command(after_help = "\x1b[1mExamples:\x1b[0m
602 sql-splitter graph dump.sql -o schema.html
603 sql-splitter graph dump.sql -o schema.mmd --format mermaid
604 sql-splitter graph dump.sql -o schema.png --render
605 sql-splitter graph dump.sql --cycles-only
606 sql-splitter graph dump.sql --table users --transitive")]
607 Graph {
608 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
610 file: PathBuf,
611
612 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
614 output: Option<PathBuf>,
615
616 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
618 format: Option<String>,
619
620 #[arg(short, long, help_heading = INPUT_OUTPUT)]
622 dialect: Option<String>,
623
624 #[arg(long, default_value = "lr", help_heading = OUTPUT_FORMAT)]
626 layout: Option<String>,
627
628 #[arg(long, help_heading = FILTERING)]
630 cycles_only: bool,
631
632 #[arg(long, help_heading = FILTERING)]
634 table: Option<String>,
635
636 #[arg(long, help_heading = FILTERING)]
638 transitive: bool,
639
640 #[arg(long, help_heading = FILTERING)]
642 reverse: bool,
643
644 #[arg(short, long, help_heading = FILTERING)]
646 tables: Option<String>,
647
648 #[arg(short, long, help_heading = FILTERING)]
650 exclude: Option<String>,
651
652 #[arg(long, help_heading = FILTERING)]
654 max_depth: Option<usize>,
655
656 #[arg(long, help_heading = BEHAVIOR)]
658 render: bool,
659
660 #[arg(short, long, help_heading = OUTPUT_FORMAT)]
662 progress: bool,
663
664 #[arg(long, help_heading = OUTPUT_FORMAT)]
666 json: bool,
667 },
668
669 #[command(visible_alias = "ord")]
671 #[command(after_help = "\x1b[1mExamples:\x1b[0m
672 sql-splitter order dump.sql -o ordered.sql
673 sql-splitter order dump.sql --check
674 sql-splitter order dump.sql --dry-run
675 sql-splitter order dump.sql --reverse")]
676 Order {
677 #[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
679 file: PathBuf,
680
681 #[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
683 output: Option<PathBuf>,
684
685 #[arg(short, long, help_heading = INPUT_OUTPUT)]
687 dialect: Option<String>,
688
689 #[arg(long, help_heading = BEHAVIOR)]
691 check: bool,
692
693 #[arg(long, help_heading = BEHAVIOR)]
695 dry_run: bool,
696
697 #[arg(long, help_heading = BEHAVIOR)]
699 reverse: bool,
700 },
701
702 #[command(visible_alias = "qy")]
704 #[command(after_help = "\x1b[1mExamples:\x1b[0m
705 sql-splitter query dump.sql \"SELECT COUNT(*) FROM users\"
706 sql-splitter query dump.sql \"SELECT * FROM orders WHERE total > 100\" -f json
707 sql-splitter query dump.sql \"SELECT * FROM users LIMIT 10\" -o results.csv -f csv
708 sql-splitter query dump.sql --interactive
709 sql-splitter query huge.sql \"SELECT ...\" --disk
710 sql-splitter query dump.sql \"SELECT ...\" --cache
711 sql-splitter query --list-cache")]
712 Query(query::QueryArgs),
713
714 #[command(hide = true)]
716 Schema {
717 #[arg(short, long, default_value = "schemas", value_hint = ValueHint::DirPath)]
719 output: PathBuf,
720
721 #[arg(short, long)]
723 command: Option<String>,
724
725 #[arg(long)]
727 stdout: bool,
728
729 #[arg(long)]
731 list: bool,
732 },
733
734 #[command(after_help = "\x1b[1mInstallation:\x1b[0m
736 Bash:
737 sql-splitter completions bash > /etc/bash_completion.d/sql-splitter
738 # or: sql-splitter completions bash >> ~/.bashrc
739
740 Zsh:
741 sql-splitter completions zsh > \"${fpath[1]}/_sql-splitter\"
742 # or for oh-my-zsh: sql-splitter completions zsh > ~/.oh-my-zsh/completions/_sql-splitter
743
744 Fish:
745 sql-splitter completions fish > ~/.config/fish/completions/sql-splitter.fish
746
747 PowerShell:
748 sql-splitter completions powershell >> $PROFILE")]
749 Completions {
750 #[arg(value_enum)]
752 shell: Shell,
753 },
754}
755
756pub fn run(cli: Cli) -> anyhow::Result<()> {
757 match cli.command {
758 Commands::Split {
759 file,
760 output,
761 dialect,
762 verbose,
763 dry_run,
764 progress,
765 tables,
766 schema_only,
767 data_only,
768 fail_fast,
769 json,
770 } => split::run(
771 file,
772 output,
773 dialect,
774 verbose,
775 dry_run,
776 progress,
777 tables,
778 schema_only,
779 data_only,
780 fail_fast,
781 json,
782 ),
783 Commands::Analyze {
784 file,
785 dialect,
786 progress,
787 fail_fast,
788 json,
789 } => analyze::run(file, dialect, progress, fail_fast, json),
790 Commands::Merge {
791 input_dir,
792 output,
793 dialect,
794 tables,
795 exclude,
796 transaction,
797 no_header,
798 progress,
799 dry_run,
800 json,
801 } => merge::run(
802 input_dir,
803 output,
804 dialect,
805 tables,
806 exclude,
807 transaction,
808 no_header,
809 progress,
810 dry_run,
811 json,
812 ),
813 Commands::Sample {
814 file,
815 output,
816 dialect,
817 percent,
818 rows,
819 preserve_relations,
820 tables,
821 exclude,
822 root_tables,
823 include_global,
824 seed,
825 config,
826 max_total_rows,
827 no_limit,
828 strict_fk,
829 no_schema,
830 progress,
831 dry_run,
832 json,
833 } => {
834 let effective_limit = if no_limit || max_total_rows == Some(0) {
835 None
836 } else {
837 max_total_rows
838 };
839 sample::run(
840 file,
841 output,
842 dialect,
843 percent,
844 rows,
845 preserve_relations,
846 tables,
847 exclude,
848 root_tables,
849 include_global,
850 seed,
851 config,
852 effective_limit,
853 strict_fk,
854 no_schema,
855 progress,
856 dry_run,
857 json,
858 )
859 }
860 Commands::Shard {
861 file,
862 output,
863 dialect,
864 tenant_column,
865 tenant_value,
866 tenant_values,
867 root_tables,
868 include_global,
869 config,
870 max_selected_rows,
871 no_limit,
872 strict_fk,
873 no_schema,
874 progress,
875 dry_run,
876 json,
877 } => {
878 let effective_limit = if no_limit || max_selected_rows == Some(0) {
879 None
880 } else {
881 max_selected_rows
882 };
883 shard::run(
884 file,
885 output,
886 dialect,
887 tenant_column,
888 tenant_value,
889 tenant_values,
890 root_tables,
891 include_global,
892 config,
893 effective_limit,
894 strict_fk,
895 no_schema,
896 progress,
897 dry_run,
898 json,
899 )
900 }
901 Commands::Convert {
902 file,
903 output,
904 from,
905 to,
906 strict,
907 progress,
908 dry_run,
909 fail_fast,
910 json,
911 } => convert::run(
912 file, output, from, to, strict, progress, dry_run, fail_fast, json,
913 ),
914 Commands::Validate {
915 file,
916 dialect,
917 progress,
918 strict,
919 json,
920 max_rows_per_table,
921 no_limit,
922 no_fk_checks,
923 fail_fast,
924 } => {
925 let effective_limit = if no_limit || max_rows_per_table == 0 {
926 usize::MAX
927 } else {
928 max_rows_per_table
929 };
930 validate::run(
931 file,
932 dialect,
933 progress,
934 strict,
935 json,
936 effective_limit,
937 no_fk_checks,
938 fail_fast,
939 )
940 }
941 Commands::Diff {
942 old_file,
943 new_file,
944 output,
945 tables,
946 exclude,
947 schema_only,
948 data_only,
949 format,
950 dialect,
951 verbose,
952 progress,
953 max_pk_entries,
954 allow_no_pk,
955 ignore_order,
956 primary_key,
957 ignore_columns,
958 } => diff::run(
959 old_file,
960 new_file,
961 output,
962 tables,
963 exclude,
964 schema_only,
965 data_only,
966 format,
967 dialect,
968 verbose,
969 progress,
970 max_pk_entries,
971 allow_no_pk,
972 ignore_order,
973 primary_key,
974 ignore_columns,
975 ),
976 Commands::Redact {
977 file,
978 output,
979 dialect,
980 config,
981 generate_config,
982 null,
983 hash,
984 fake,
985 mask,
986 constant,
987 seed,
988 locale,
989 tables,
990 exclude,
991 strict,
992 progress,
993 dry_run,
994 json,
995 validate,
996 } => redact::run(
997 file,
998 output,
999 dialect,
1000 config,
1001 generate_config,
1002 null,
1003 hash,
1004 fake,
1005 mask,
1006 constant,
1007 seed,
1008 locale,
1009 tables,
1010 exclude,
1011 strict,
1012 progress,
1013 dry_run,
1014 json,
1015 validate,
1016 ),
1017 Commands::Graph {
1018 file,
1019 output,
1020 format,
1021 dialect,
1022 layout,
1023 cycles_only,
1024 table,
1025 transitive,
1026 reverse,
1027 tables,
1028 exclude,
1029 max_depth,
1030 render,
1031 progress,
1032 json,
1033 } => graph::run(
1034 file,
1035 output,
1036 format,
1037 dialect,
1038 layout,
1039 cycles_only,
1040 table,
1041 transitive,
1042 reverse,
1043 tables,
1044 exclude,
1045 max_depth,
1046 render,
1047 progress,
1048 json,
1049 ),
1050 Commands::Order {
1051 file,
1052 output,
1053 dialect,
1054 check,
1055 dry_run,
1056 reverse,
1057 } => order::run(file, output, dialect, check, dry_run, reverse),
1058 Commands::Query(args) => query::run(args),
1059 Commands::Schema {
1060 output,
1061 command,
1062 stdout,
1063 list,
1064 } => run_schema(output, command, stdout, list),
1065 Commands::Completions { shell } => {
1066 generate(
1067 shell,
1068 &mut Cli::command(),
1069 "sql-splitter",
1070 &mut io::stdout(),
1071 );
1072 Ok(())
1073 }
1074 }
1075}
1076
1077fn run_schema(
1078 output_dir: PathBuf,
1079 command: Option<String>,
1080 to_stdout: bool,
1081 list: bool,
1082) -> anyhow::Result<()> {
1083 use crate::json_schema;
1084 use std::fs;
1085
1086 let schemas = json_schema::all_schemas();
1087
1088 if list {
1089 for name in schemas.keys() {
1090 println!("{}", name);
1091 }
1092 return Ok(());
1093 }
1094
1095 if let Some(cmd) = command {
1096 let schema = schemas.get(cmd.as_str()).ok_or_else(|| {
1097 anyhow::anyhow!(
1098 "Unknown command: {}. Use --list to see available schemas.",
1099 cmd
1100 )
1101 })?;
1102
1103 let json = serde_json::to_string_pretty(schema)?;
1104
1105 if to_stdout {
1106 println!("{}", json);
1107 } else {
1108 fs::create_dir_all(&output_dir)?;
1109 let path = output_dir.join(format!("{}.schema.json", cmd));
1110 fs::write(&path, json)?;
1111 eprintln!("Wrote: {}", path.display());
1112 }
1113 } else if to_stdout {
1114 for (name, schema) in &schemas {
1115 let json = serde_json::to_string_pretty(schema)?;
1116 println!("// {}.schema.json\n{}\n", name, json);
1117 }
1118 } else {
1119 fs::create_dir_all(&output_dir)?;
1120 for (name, schema) in &schemas {
1121 let json = serde_json::to_string_pretty(schema)?;
1122 let path = output_dir.join(format!("{}.schema.json", name));
1123 fs::write(&path, json)?;
1124 eprintln!("Wrote: {}", path.display());
1125 }
1126 }
1127
1128 Ok(())
1129}