pub(crate) mod analyze;
pub(crate) mod convert;
mod diff;
mod glob_util;
pub(crate) mod graph;
pub(crate) mod merge;
mod order;
mod query;
pub(crate) mod redact;
pub(crate) mod sample;
pub(crate) mod shard;
pub(crate) mod split;
pub(crate) mod validate;
use clap::{CommandFactory, Parser, Subcommand, ValueHint};
use clap_complete::{generate, Shell};
use std::io;
use std::path::PathBuf;
const AFTER_HELP: &str = "\x1b[1mCommon workflows:\x1b[0m
Split a dump into per-table files:
sql-splitter split dump.sql -o tables/
Create a 10% sample for development:
sql-splitter sample dump.sql -o dev.sql --percent 10 --preserve-relations
Convert MySQL to PostgreSQL:
sql-splitter convert mysql.sql --to postgres -o pg.sql
Compare two dumps for changes:
sql-splitter diff old.sql new.sql --format sql -o migration.sql
\x1b[1mMore info:\x1b[0m
Run 'sql-splitter <command> --help' for command-specific options.
Documentation: https://github.com/helgesverre/sql-splitter
Enable completions: sql-splitter completions <shell>";
#[derive(Parser)]
#[command(name = "sql-splitter")]
#[command(author = "Helge Sverre <helge.sverre@gmail.com>")]
#[command(version)]
#[command(
about = "High-performance CLI for splitting, merging, converting, and analyzing SQL dump files"
)]
#[command(after_help = AFTER_HELP)]
#[command(arg_required_else_help = true)]
#[command(max_term_width = 100)]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
}
const INPUT_OUTPUT: &str = "Input/Output";
const FILTERING: &str = "Filtering";
const MODE: &str = "Mode";
const BEHAVIOR: &str = "Behavior";
const LIMITS: &str = "Limits";
const OUTPUT_FORMAT: &str = "Output";
#[derive(Subcommand)]
pub enum Commands {
#[command(visible_alias = "sp")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter split dump.sql -o tables/
sql-splitter split dump.sql.gz -o tables/ --tables users,orders
sql-splitter split dump.sql -o schema/ --schema-only
sql-splitter split \"backups/*.sql\" -o out/ --fail-fast")]
Split {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, default_value = "output", value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
output: PathBuf,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(short, long, help_heading = FILTERING)]
tables: Option<String>,
#[arg(long, conflicts_with = "data_only", help_heading = FILTERING)]
schema_only: bool,
#[arg(long, conflicts_with = "schema_only", help_heading = FILTERING)]
data_only: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
verbose: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
#[arg(long, help_heading = BEHAVIOR)]
dry_run: bool,
#[arg(long, help_heading = BEHAVIOR)]
fail_fast: bool,
},
#[command(visible_alias = "an")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter analyze dump.sql
sql-splitter analyze dump.sql.gz --progress
sql-splitter analyze \"dumps/*.sql\" --json")]
Analyze {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
#[arg(long, help_heading = BEHAVIOR)]
fail_fast: bool,
},
#[command(visible_alias = "mg")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter merge tables/ -o restored.sql
sql-splitter merge tables/ -o restored.sql --transaction
sql-splitter merge tables/ -o partial.sql --tables users,orders
sql-splitter merge tables/ -o clean.sql --exclude logs,cache")]
Merge {
#[arg(value_hint = ValueHint::DirPath, help_heading = INPUT_OUTPUT)]
input_dir: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(short, long, default_value = "mysql", help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(short, long, help_heading = FILTERING)]
tables: Option<String>,
#[arg(short, long, help_heading = FILTERING)]
exclude: Option<String>,
#[arg(long, help_heading = BEHAVIOR)]
transaction: bool,
#[arg(long, help_heading = BEHAVIOR)]
no_header: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
#[arg(long, help_heading = BEHAVIOR)]
dry_run: bool,
},
#[command(visible_alias = "sa")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter sample dump.sql -o dev.sql --percent 10
sql-splitter sample dump.sql -o dev.sql --rows 1000 --preserve-relations
sql-splitter sample dump.sql -o dev.sql --percent 5 --seed 42
sql-splitter sample dump.sql -o dev.sql --tables users,orders --percent 20")]
Sample {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
config: Option<PathBuf>,
#[arg(long, conflicts_with = "rows", help_heading = MODE)]
percent: Option<u32>,
#[arg(long, conflicts_with = "percent", help_heading = MODE)]
rows: Option<usize>,
#[arg(long, help_heading = MODE)]
seed: Option<u64>,
#[arg(short, long, help_heading = FILTERING)]
tables: Option<String>,
#[arg(short, long, help_heading = FILTERING)]
exclude: Option<String>,
#[arg(long, help_heading = FILTERING)]
root_tables: Option<String>,
#[arg(long, default_value = "lookups", help_heading = FILTERING)]
include_global: Option<String>,
#[arg(long, help_heading = BEHAVIOR)]
preserve_relations: bool,
#[arg(long, help_heading = BEHAVIOR)]
strict_fk: bool,
#[arg(long, help_heading = BEHAVIOR)]
no_schema: bool,
#[arg(long, help_heading = LIMITS)]
max_total_rows: Option<usize>,
#[arg(long, help_heading = LIMITS)]
no_limit: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
#[arg(long, help_heading = BEHAVIOR)]
dry_run: bool,
},
#[command(visible_alias = "sh")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter shard dump.sql -o tenant.sql --tenant-value 123
sql-splitter shard dump.sql -o tenant.sql --tenant-column company_id --tenant-value 42
sql-splitter shard dump.sql -o shards/ --tenant-values \"1,2,3\"")]
Shard {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
config: Option<PathBuf>,
#[arg(long, help_heading = MODE)]
tenant_column: Option<String>,
#[arg(long, conflicts_with = "tenant_values", help_heading = MODE)]
tenant_value: Option<String>,
#[arg(long, conflicts_with = "tenant_value", help_heading = MODE)]
tenant_values: Option<String>,
#[arg(long, help_heading = FILTERING)]
root_tables: Option<String>,
#[arg(long, default_value = "lookups", help_heading = FILTERING)]
include_global: Option<String>,
#[arg(long, help_heading = BEHAVIOR)]
strict_fk: bool,
#[arg(long, help_heading = BEHAVIOR)]
no_schema: bool,
#[arg(long, help_heading = LIMITS)]
max_selected_rows: Option<usize>,
#[arg(long, help_heading = LIMITS)]
no_limit: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
#[arg(long, help_heading = BEHAVIOR)]
dry_run: bool,
},
#[command(visible_alias = "cv")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter convert mysql.sql --to postgres -o pg.sql
sql-splitter convert pg_dump.sql --to mysql -o mysql.sql
sql-splitter convert dump.sql --from mysql --to sqlite -o sqlite.sql
sql-splitter convert mysql.sql --to postgres | psql mydb")]
Convert {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(long, help_heading = MODE)]
from: Option<String>,
#[arg(long, help_heading = MODE)]
to: String,
#[arg(long, help_heading = BEHAVIOR)]
strict: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
#[arg(long, help_heading = BEHAVIOR)]
dry_run: bool,
#[arg(long, help_heading = BEHAVIOR)]
fail_fast: bool,
},
#[command(visible_alias = "val")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter validate dump.sql
sql-splitter validate dump.sql --strict
sql-splitter validate \"dumps/*.sql\" --json --fail-fast
sql-splitter validate dump.sql --no-fk-checks")]
Validate {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(long, help_heading = BEHAVIOR)]
strict: bool,
#[arg(long, help_heading = BEHAVIOR)]
no_fk_checks: bool,
#[arg(long, help_heading = BEHAVIOR)]
fail_fast: bool,
#[arg(long, default_value = "1000000", help_heading = LIMITS)]
max_rows_per_table: usize,
#[arg(long, help_heading = LIMITS)]
no_limit: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
},
#[command(visible_alias = "df")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter diff old.sql new.sql
sql-splitter diff old.sql new.sql --schema-only
sql-splitter diff old.sql new.sql --format sql -o migration.sql
sql-splitter diff old.sql new.sql --verbose --ignore-columns \"*.updated_at\"
sql-splitter diff old.sql new.sql --primary-key logs:timestamp+message")]
Diff {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
old_file: PathBuf,
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
new_file: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(short, long, help_heading = FILTERING)]
tables: Option<String>,
#[arg(short, long, help_heading = FILTERING)]
exclude: Option<String>,
#[arg(long, help_heading = FILTERING)]
ignore_columns: Option<String>,
#[arg(long, conflicts_with = "data_only", help_heading = MODE)]
schema_only: bool,
#[arg(long, conflicts_with = "schema_only", help_heading = MODE)]
data_only: bool,
#[arg(long, help_heading = MODE)]
primary_key: Option<String>,
#[arg(long, help_heading = BEHAVIOR)]
allow_no_pk: bool,
#[arg(long, help_heading = BEHAVIOR)]
ignore_order: bool,
#[arg(long, default_value = "10000000", help_heading = LIMITS)]
max_pk_entries: usize,
#[arg(short, long, default_value = "text", help_heading = OUTPUT_FORMAT)]
format: Option<String>,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
verbose: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
},
#[command(visible_alias = "rd")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter redact dump.sql -o safe.sql --config redact.yaml
sql-splitter redact dump.sql -o safe.sql --null \"*.ssn\" --hash \"*.email\"
sql-splitter redact dump.sql --generate-config -o redact.yaml
sql-splitter redact dump.sql -o safe.sql --config redact.yaml --seed 42")]
Redact {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
config: Option<PathBuf>,
#[arg(long, help_heading = MODE)]
generate_config: bool,
#[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
null: Vec<String>,
#[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
hash: Vec<String>,
#[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
fake: Vec<String>,
#[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
mask: Vec<String>,
#[arg(long, value_delimiter = ',', help_heading = "Inline Strategies")]
constant: Vec<String>,
#[arg(long, help_heading = MODE)]
seed: Option<u64>,
#[arg(long, default_value = "en", help_heading = MODE)]
locale: String,
#[arg(short, long, value_delimiter = ',', help_heading = FILTERING)]
tables: Vec<String>,
#[arg(short = 'x', long, value_delimiter = ',', help_heading = FILTERING)]
exclude: Vec<String>,
#[arg(long, help_heading = BEHAVIOR)]
strict: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = BEHAVIOR)]
dry_run: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
#[arg(long, help_heading = BEHAVIOR)]
validate: bool,
},
#[command(visible_alias = "gr")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter graph dump.sql -o schema.html
sql-splitter graph dump.sql -o schema.mmd --format mermaid
sql-splitter graph dump.sql -o schema.png --render
sql-splitter graph dump.sql --cycles-only
sql-splitter graph dump.sql --table users --transitive")]
Graph {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
format: Option<String>,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(long, default_value = "lr", help_heading = OUTPUT_FORMAT)]
layout: Option<String>,
#[arg(long, help_heading = FILTERING)]
cycles_only: bool,
#[arg(long, help_heading = FILTERING)]
table: Option<String>,
#[arg(long, help_heading = FILTERING)]
transitive: bool,
#[arg(long, help_heading = FILTERING)]
reverse: bool,
#[arg(short, long, help_heading = FILTERING)]
tables: Option<String>,
#[arg(short, long, help_heading = FILTERING)]
exclude: Option<String>,
#[arg(long, help_heading = FILTERING)]
max_depth: Option<usize>,
#[arg(long, help_heading = BEHAVIOR)]
render: bool,
#[arg(short, long, help_heading = OUTPUT_FORMAT)]
progress: bool,
#[arg(long, help_heading = OUTPUT_FORMAT)]
json: bool,
},
#[command(visible_alias = "ord")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter order dump.sql -o ordered.sql
sql-splitter order dump.sql --check
sql-splitter order dump.sql --dry-run
sql-splitter order dump.sql --reverse")]
Order {
#[arg(value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
file: PathBuf,
#[arg(short, long, value_hint = ValueHint::FilePath, help_heading = INPUT_OUTPUT)]
output: Option<PathBuf>,
#[arg(short, long, help_heading = INPUT_OUTPUT)]
dialect: Option<String>,
#[arg(long, help_heading = BEHAVIOR)]
check: bool,
#[arg(long, help_heading = BEHAVIOR)]
dry_run: bool,
#[arg(long, help_heading = BEHAVIOR)]
reverse: bool,
},
#[command(visible_alias = "qy")]
#[command(after_help = "\x1b[1mExamples:\x1b[0m
sql-splitter query dump.sql \"SELECT COUNT(*) FROM users\"
sql-splitter query dump.sql \"SELECT * FROM orders WHERE total > 100\" -f json
sql-splitter query dump.sql \"SELECT * FROM users LIMIT 10\" -o results.csv -f csv
sql-splitter query dump.sql --interactive
sql-splitter query huge.sql \"SELECT ...\" --disk
sql-splitter query dump.sql \"SELECT ...\" --cache
sql-splitter query --list-cache")]
Query(query::QueryArgs),
#[command(hide = true)]
Schema {
#[arg(short, long, default_value = "schemas", value_hint = ValueHint::DirPath)]
output: PathBuf,
#[arg(short, long)]
command: Option<String>,
#[arg(long)]
stdout: bool,
#[arg(long)]
list: bool,
},
#[command(after_help = "\x1b[1mInstallation:\x1b[0m
Bash:
sql-splitter completions bash > /etc/bash_completion.d/sql-splitter
# or: sql-splitter completions bash >> ~/.bashrc
Zsh:
sql-splitter completions zsh > \"${fpath[1]}/_sql-splitter\"
# or for oh-my-zsh: sql-splitter completions zsh > ~/.oh-my-zsh/completions/_sql-splitter
Fish:
sql-splitter completions fish > ~/.config/fish/completions/sql-splitter.fish
PowerShell:
sql-splitter completions powershell >> $PROFILE")]
Completions {
#[arg(value_enum)]
shell: Shell,
},
}
pub fn run(cli: Cli) -> anyhow::Result<()> {
match cli.command {
Commands::Split {
file,
output,
dialect,
verbose,
dry_run,
progress,
tables,
schema_only,
data_only,
fail_fast,
json,
} => split::run(
file,
output,
dialect,
verbose,
dry_run,
progress,
tables,
schema_only,
data_only,
fail_fast,
json,
),
Commands::Analyze {
file,
dialect,
progress,
fail_fast,
json,
} => analyze::run(file, dialect, progress, fail_fast, json),
Commands::Merge {
input_dir,
output,
dialect,
tables,
exclude,
transaction,
no_header,
progress,
dry_run,
json,
} => merge::run(
input_dir,
output,
dialect,
tables,
exclude,
transaction,
no_header,
progress,
dry_run,
json,
),
Commands::Sample {
file,
output,
dialect,
percent,
rows,
preserve_relations,
tables,
exclude,
root_tables,
include_global,
seed,
config,
max_total_rows,
no_limit,
strict_fk,
no_schema,
progress,
dry_run,
json,
} => {
let effective_limit = if no_limit || max_total_rows == Some(0) {
None
} else {
max_total_rows
};
sample::run(
file,
output,
dialect,
percent,
rows,
preserve_relations,
tables,
exclude,
root_tables,
include_global,
seed,
config,
effective_limit,
strict_fk,
no_schema,
progress,
dry_run,
json,
)
}
Commands::Shard {
file,
output,
dialect,
tenant_column,
tenant_value,
tenant_values,
root_tables,
include_global,
config,
max_selected_rows,
no_limit,
strict_fk,
no_schema,
progress,
dry_run,
json,
} => {
let effective_limit = if no_limit || max_selected_rows == Some(0) {
None
} else {
max_selected_rows
};
shard::run(
file,
output,
dialect,
tenant_column,
tenant_value,
tenant_values,
root_tables,
include_global,
config,
effective_limit,
strict_fk,
no_schema,
progress,
dry_run,
json,
)
}
Commands::Convert {
file,
output,
from,
to,
strict,
progress,
dry_run,
fail_fast,
json,
} => convert::run(
file, output, from, to, strict, progress, dry_run, fail_fast, json,
),
Commands::Validate {
file,
dialect,
progress,
strict,
json,
max_rows_per_table,
no_limit,
no_fk_checks,
fail_fast,
} => {
let effective_limit = if no_limit || max_rows_per_table == 0 {
usize::MAX
} else {
max_rows_per_table
};
validate::run(
file,
dialect,
progress,
strict,
json,
effective_limit,
no_fk_checks,
fail_fast,
)
}
Commands::Diff {
old_file,
new_file,
output,
tables,
exclude,
schema_only,
data_only,
format,
dialect,
verbose,
progress,
max_pk_entries,
allow_no_pk,
ignore_order,
primary_key,
ignore_columns,
} => diff::run(
old_file,
new_file,
output,
tables,
exclude,
schema_only,
data_only,
format,
dialect,
verbose,
progress,
max_pk_entries,
allow_no_pk,
ignore_order,
primary_key,
ignore_columns,
),
Commands::Redact {
file,
output,
dialect,
config,
generate_config,
null,
hash,
fake,
mask,
constant,
seed,
locale,
tables,
exclude,
strict,
progress,
dry_run,
json,
validate,
} => redact::run(
file,
output,
dialect,
config,
generate_config,
null,
hash,
fake,
mask,
constant,
seed,
locale,
tables,
exclude,
strict,
progress,
dry_run,
json,
validate,
),
Commands::Graph {
file,
output,
format,
dialect,
layout,
cycles_only,
table,
transitive,
reverse,
tables,
exclude,
max_depth,
render,
progress,
json,
} => graph::run(
file,
output,
format,
dialect,
layout,
cycles_only,
table,
transitive,
reverse,
tables,
exclude,
max_depth,
render,
progress,
json,
),
Commands::Order {
file,
output,
dialect,
check,
dry_run,
reverse,
} => order::run(file, output, dialect, check, dry_run, reverse),
Commands::Query(args) => query::run(args),
Commands::Schema {
output,
command,
stdout,
list,
} => run_schema(output, command, stdout, list),
Commands::Completions { shell } => {
generate(
shell,
&mut Cli::command(),
"sql-splitter",
&mut io::stdout(),
);
Ok(())
}
}
}
fn run_schema(
output_dir: PathBuf,
command: Option<String>,
to_stdout: bool,
list: bool,
) -> anyhow::Result<()> {
use crate::json_schema;
use std::fs;
if list {
let schemas = json_schema::all_schemas();
for name in schemas.keys() {
println!("{}", name);
}
return Ok(());
}
if let Some(cmd) = command {
let schema = json_schema::get_schema(&cmd).ok_or_else(|| {
anyhow::anyhow!(
"Unknown command: {}. Use --list to see available schemas.",
cmd
)
})?;
let json = serde_json::to_string_pretty(&schema)?;
if to_stdout {
println!("{}", json);
} else {
fs::create_dir_all(&output_dir)?;
let path = output_dir.join(format!("{}.schema.json", cmd));
fs::write(&path, json)?;
eprintln!("Wrote: {}", path.display());
}
} else if to_stdout {
let schemas = json_schema::all_schemas();
for (name, schema) in &schemas {
let json = serde_json::to_string_pretty(schema)?;
println!("// {}.schema.json\n{}\n", name, json);
}
} else {
let schemas = json_schema::all_schemas();
fs::create_dir_all(&output_dir)?;
for (name, schema) in &schemas {
let json = serde_json::to_string_pretty(schema)?;
let path = output_dir.join(format!("{}.schema.json", name));
fs::write(&path, json)?;
eprintln!("Wrote: {}", path.display());
}
}
Ok(())
}