use std::io::Write;
use std::path::Path;
use crate::core::inventory;
use crate::sql::Pool;
use super::error::MigrateError;
use super::file::{self, DataOp, Migration, Operation};
use super::make::{make_migrations, make_migrations_for_app};
use super::runner;
use super::snapshot::SchemaSnapshot;
pub async fn run(
pool: &Pool,
dir: &Path,
args: impl IntoIterator<Item = String>,
) -> Result<(), MigrateError> {
let mut stdout = std::io::stdout();
run_with_writer(pool, dir, args, &mut stdout).await
}
pub async fn run_with_writer<W: Write + Send>(
pool: &Pool,
dir: &Path,
args: impl IntoIterator<Item = String>,
writer: &mut W,
) -> Result<(), MigrateError> {
let args: Vec<String> = args.into_iter().collect();
let cmd = args.first().map_or("", String::as_str);
match cmd {
"" | "--help" | "-h" | "help" => {
print_help(writer)?;
Ok(())
}
"makemigrations" => makemigrations(dir, &args[1..], writer),
"migrate" => migrate(pool, dir, &args[1..], writer).await,
"downgrade" => downgrade(pool, dir, &args[1..], writer).await,
"showmigrations" | "status" => showmigrations(pool, dir, writer).await,
"forget-pending" => forget_pending_cmd(pool, dir, &args[1..], writer).await,
"startapp" => startapp(&args[1..], writer),
"add-data-op" => add_data_op_cmd(dir, &args[1..], writer),
"make:viewset" => make_viewset_cmd(&args[1..], writer),
"make:api_routes" => make_api_routes_cmd(&args[1..], writer),
"make:serializer" => make_serializer_cmd(&args[1..], writer),
"make:form" => make_form_cmd(&args[1..], writer),
"make:job" => make_job_cmd(&args[1..], writer),
"make:notification" => make_notification_cmd(&args[1..], writer),
"make:middleware" => make_middleware_cmd(&args[1..], writer),
"make:test" => make_test_cmd(&args[1..], writer),
"about" => about_cmd(pool, writer).await,
"check" => check_cmd(pool, dir, &args[1..], writer).await,
"docs" => docs_cmd(writer),
"version" | "--version" => version_cmd(writer),
"db:dump" => db_dump_cmd(&args[1..], writer),
"db:restore" => db_restore_cmd(&args[1..], writer),
"db:info" => db_info_cmd(writer),
"inspectdb" => super::inspectdb::inspectdb_cmd(pool, &args[1..], writer).await,
other => Err(MigrateError::Validation(format!(
"unknown subcommand: `{other}` (run with --help for usage)"
))),
}
}
fn print_help<W: Write>(w: &mut W) -> std::io::Result<()> {
writeln!(w, "rustango::manage — Django-style migration runner\n")?;
writeln!(w, "USAGE:")?;
writeln!(w, " manage <COMMAND> [args]\n")?;
writeln!(w, "COMMANDS:")?;
writeln!(w, " makemigrations [name]")?;
writeln!(
w,
" Diff the inventory registry against the latest snapshot"
)?;
writeln!(
w,
" and write the next migration file. `name` overrides the"
)?;
writeln!(w, " auto-derived suffix.\n")?;
writeln!(w, " makemigrations --empty <name>")?;
writeln!(
w,
" Write an empty migration scaffold (`forward: []`) for"
)?;
writeln!(
w,
" hand-authored data migrations. Edit the JSON to add"
)?;
writeln!(w, " `data` ops with sql + reverse_sql.\n")?;
writeln!(w, " migrate")?;
writeln!(w, " Apply every pending migration in lex order.\n")?;
writeln!(w, " migrate <target>")?;
writeln!(
w,
" Forward or back to <target>. `zero` unapplies every"
)?;
writeln!(w, " applied migration.\n")?;
writeln!(w, " migrate --dry-run")?;
writeln!(
w,
" Print the SQL each pending migration would run; never"
)?;
writeln!(
w,
" writes. Reads the ledger so the preview is accurate.\n"
)?;
writeln!(w, " migrate --squash")?;
writeln!(
w,
" Delete every pending (un-applied) migration JSON and"
)?;
writeln!(
w,
" regenerate a single fresh diff via makemigrations. Dev-"
)?;
writeln!(
w,
" iteration escape hatch when an evolving model produces a"
)?;
writeln!(
w,
" pending migration the validator rejects (e.g. AddColumn"
)?;
writeln!(
w,
" NOT NULL with no default). Refuses to touch applied rows.\n"
)?;
writeln!(w, " downgrade [N]")?;
writeln!(w, " Step back N applied migrations (default 1).\n")?;
writeln!(w, " showmigrations | status")?;
writeln!(w, " List migrations with [X]/[ ] applied marker.\n")?;
writeln!(w, " forget-pending <name>")?;
writeln!(
w,
" Delete a migration JSON that has NOT been applied yet,"
)?;
writeln!(
w,
" so the next `makemigrations` regenerates the diff."
)?;
writeln!(
w,
" Refuses if the migration is recorded in the ledger.\n"
)?;
writeln!(
w,
" add-data-op --sql <SQL> [--reverse-sql <SQL>] [--name <name>] [--to <migration>]"
)?;
writeln!(
w,
" Add a data transformation op (up + optional down)."
)?;
writeln!(w, " --sql Forward SQL to run (required).")?;
writeln!(
w,
" --reverse-sql Rollback SQL. Omit for irreversible ops."
)?;
writeln!(
w,
" --name Name suffix for the new migration file."
)?;
writeln!(
w,
" --to Append to an existing migration instead of creating one.\n"
)?;
writeln!(w, " about")?;
writeln!(w, " Print framework version, registered models/apps,")?;
writeln!(w, " and detected backend configuration.\n")?;
writeln!(w, " check [--deploy]")?;
writeln!(
w,
" Run system audits — pending migrations, missing models, common"
)?;
writeln!(
w,
" misconfigurations. With --deploy: production hardening checks."
)?;
writeln!(w, " Exits non-zero on any error-level finding.\n")?;
writeln!(w, " docs")?;
writeln!(w, " Open docs.rs/rustango in the default browser.\n")?;
writeln!(w, " version | --version")?;
writeln!(w, " Print the rustango framework version.\n")?;
writeln!(
w,
" (To bootstrap a new project from scratch, install + run"
)?;
writeln!(
w,
" `cargo install cargo-rustango` then `cargo rustango new <name>`.)\n"
)?;
writeln!(
w,
" make:viewset <Name> [--model <Model>] [--tenant | --no-tenant]"
)?;
writeln!(
w,
" --tenant emits a `ViewSet::for_model(...).tenant_router(...)`"
)?;
writeln!(
w,
" shape that resolves a per-request connection via `Tenant`"
)?;
writeln!(
w,
" instead of baking a pool at mount time (required for tenancy)."
)?;
writeln!(
w,
" Auto-detected from Cargo.toml when the rustango dep enables"
)?;
writeln!(
w,
" `tenancy` — pass `--no-tenant` to override the auto-detection."
)?;
writeln!(w, " make:api_routes <app> [--tenant]")?;
writeln!(
w,
" Scaffold src/<app>/api_routes.rs — the per-app composer that"
)?;
writeln!(
w,
" merges every viewset's router into a single Router<()>."
)?;
writeln!(w, " make:serializer <Name> [--model <Model>]")?;
writeln!(w, " make:form <Name>")?;
writeln!(w, " make:job <Name>")?;
writeln!(w, " make:notification <Name>")?;
writeln!(w, " make:middleware <Name>")?;
writeln!(w, " make:test <Name>")?;
writeln!(
w,
" Scaffold a single source file with the chosen shape."
)?;
writeln!(
w,
" Writes to src/<snake_name>.rs (skips if exists).\n"
)?;
writeln!(
w,
" db:dump [--out <file>] [--data-only|--schema-only] [--no-owner]"
)?;
writeln!(
w,
" Run pg_dump against $DATABASE_URL. Default: prints SQL to"
)?;
writeln!(
w,
" stdout (omit --out to pipe). --data-only / --schema-only"
)?;
writeln!(
w,
" mirror pg_dump's flags. --no-owner skips OWNER lines.\n"
)?;
writeln!(w, " db:restore <file> [--clean]")?;
writeln!(
w,
" Run psql against $DATABASE_URL with `\\i <file>`. With"
)?;
writeln!(
w,
" --clean, prepend a `DROP SCHEMA public CASCADE; CREATE SCHEMA public;`"
)?;
writeln!(w, " so the restore lands on a clean database.\n")?;
writeln!(w, " db:info")?;
writeln!(
w,
" Print the resolved DB URL (password redacted), detected"
)?;
writeln!(
w,
" backend, and which `postgres`/`mysql` Cargo features are"
)?;
writeln!(w, " compiled in. Read-only — does not connect.\n")?;
writeln!(w, " inspectdb [--schema <name>] [--table <name>]")?;
writeln!(
w,
" Connect to DATABASE_URL and emit `#[derive(Model)]`"
)?;
writeln!(
w,
" source for every base table in `--schema` (default `public`)."
)?;
writeln!(
w,
" Pipe to a file the user reviews + edits. Mirrors Django's"
)?;
writeln!(
w,
" `inspectdb` shape — adopts rustango against an existing DB"
)?;
writeln!(w, " without rewriting it.\n")?;
writeln!(w, " startapp <name> [--with-manage-bin]")?;
writeln!(
w,
" Scaffold a Django-shape app module under src/<name>/"
)?;
writeln!(
w,
" (models.rs + views.rs + urls.rs + mod.rs). Idempotent;"
)?;
writeln!(
w,
" existing files are left untouched. With --with-manage-bin,"
)?;
writeln!(w, " also writes src/bin/manage.rs.")?;
Ok(())
}
fn makemigrations<W: Write>(dir: &Path, args: &[String], w: &mut W) -> Result<(), MigrateError> {
let mut empty = false;
let mut name: Option<String> = None;
let mut app: Option<String> = None;
let mut scope_override: Option<crate::core::ModelScope> = None;
let mut iter = args.iter();
while let Some(arg) = iter.next() {
match arg.as_str() {
"--empty" => empty = true,
"--app" => {
app = Some(iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--app requires an app name".into())
})?);
}
"--scope" => {
let raw = iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--scope requires \"registry\" or \"tenant\"".into())
})?;
scope_override =
Some(crate::core::ModelScope::from_str(&raw).ok_or_else(|| {
MigrateError::Validation(format!(
"--scope must be \"registry\" or \"tenant\", got {raw:?}"
))
})?);
}
"--help" | "-h" => {
writeln!(
w,
"makemigrations [name] diff registry, write next file in <dir>\n\
makemigrations --app <app> [name] diff one app, write to <project_root>/<app>/migrations/\n\
makemigrations --scope <s> [name] <s> = registry|tenant; one file with that MigrationScope\n\
makemigrations --empty <name> empty scaffold for data ops\n\
\n\
In tenancy projects (any registered model with scope = \"registry\"),\n\
a flagless makemigrations splits the diff into TWO files — one for\n\
registry-scoped models, one for tenant-scoped — so framework tables\n\
don't bleed across scopes when migrate-tenants fans out."
)?;
return Ok(());
}
other if other.starts_with('-') => {
return Err(MigrateError::Validation(format!("unknown flag: {other}")));
}
other => {
if name.is_some() {
return Err(MigrateError::Validation(format!(
"unexpected positional argument: {other}"
)));
}
name = Some(other.to_owned());
}
}
}
if empty {
let Some(n) = name else {
return Err(MigrateError::Validation(
"makemigrations --empty requires a name".into(),
));
};
let mig = make_empty(dir, &n)?;
writeln!(
w,
"wrote {} (empty scaffold — fill in `forward` with data ops)",
file_path(dir, &mig.name).display()
)?;
return Ok(());
}
if let Some(app_name) = app {
let project_root = dir.parent().unwrap_or(dir);
match make_migrations_for_app(project_root, &app_name, name.as_deref())? {
Some(mig) => {
let app_dir = project_root.join(&app_name).join("migrations");
writeln!(w, "wrote {}", file_path(&app_dir, &mig.name).display())?;
for op in &mig.forward {
writeln!(w, " + {}", describe_op(op))?;
}
}
None => writeln!(
w,
"app `{app_name}`: no changes — models match latest snapshot (or no models with this app_label)"
)?,
}
return Ok(());
}
if let Some(scope) = scope_override {
return write_scoped_migration(dir, scope, name.as_deref(), w);
}
let has_registry_scoped = inventory::iter::<crate::core::ModelEntry>
.into_iter()
.any(|e| e.schema.scope == crate::core::ModelScope::Registry);
if has_registry_scoped {
let mut wrote_any = false;
for scope in [
crate::core::ModelScope::Registry,
crate::core::ModelScope::Tenant,
] {
let mig = crate::migrate::make::make_migrations_for_scope(dir, scope, name.as_deref())?;
match mig {
Some(m) => {
writeln!(
w,
"wrote {} ({} scope)",
file_path(dir, &m.name).display(),
scope.as_str(),
)?;
for op in &m.forward {
writeln!(w, " + {}", describe_op(op))?;
}
wrote_any = true;
}
None => writeln!(w, "no changes for {} scope", scope.as_str(),)?,
}
}
if !wrote_any {
}
return Ok(());
}
match make_migrations(dir, name.as_deref())? {
Some(mig) => {
writeln!(w, "wrote {}", file_path(dir, &mig.name).display())?;
for op in &mig.forward {
writeln!(w, " + {}", describe_op(op))?;
}
}
None => writeln!(w, "no changes — registry matches latest snapshot")?,
}
Ok(())
}
fn write_scoped_migration<W: Write>(
dir: &Path,
scope: crate::core::ModelScope,
name: Option<&str>,
w: &mut W,
) -> Result<(), MigrateError> {
match crate::migrate::make::make_migrations_for_scope(dir, scope, name)? {
Some(mig) => {
writeln!(
w,
"wrote {} ({} scope)",
file_path(dir, &mig.name).display(),
scope.as_str(),
)?;
for op in &mig.forward {
writeln!(w, " + {}", describe_op(op))?;
}
}
None => writeln!(
w,
"no changes — {} models match latest snapshot",
scope.as_str(),
)?,
}
Ok(())
}
async fn migrate<W: Write>(
pool: &Pool,
dir: &Path,
args: &[String],
w: &mut W,
) -> Result<(), MigrateError> {
let mut dry_run = false;
let mut squash = false;
let mut positional: Option<&str> = None;
for arg in args {
match arg.as_str() {
"--dry-run" => dry_run = true,
"--squash" => squash = true,
"--help" | "-h" => {
writeln!(
w,
"migrate apply pending migrations\n\
migrate <target> forward or back to <target> (`zero` wipes)\n\
migrate --dry-run preview the SQL without writing\n\
migrate --squash delete every pending (un-applied) migration JSON\n\
and regenerate a single fresh diff. Dev-iteration\n\
escape hatch — refuses to touch applied rows."
)?;
return Ok(());
}
other if other.starts_with('-') => {
return Err(MigrateError::Validation(format!("unknown flag: {other}")));
}
other => {
if positional.is_some() {
return Err(MigrateError::Validation(format!(
"unexpected positional argument: {other}"
)));
}
positional = Some(other);
}
}
}
if squash {
if dry_run || positional.is_some() {
return Err(MigrateError::Validation(
"`migrate --squash` does not combine with `--dry-run` or a positional target"
.into(),
));
}
return migrate_squash(pool, dir, w).await;
}
if dry_run {
if positional.is_some() {
return Err(MigrateError::Validation(
"`migrate <target> --dry-run` is not supported in v0.4 — use plain `--dry-run` to preview pending forward migrations".into(),
));
}
let preview = runner::migrate_dry_run_pool(pool, dir).await?;
if preview.is_empty() {
writeln!(w, "nothing to migrate (already up to date)")?;
} else {
writeln!(
w,
"-- DRY RUN: {} pending migration(s); no SQL will be executed",
preview.len()
)?;
for p in &preview {
writeln!(w)?;
writeln!(
w,
"-- {} ({})",
p.name,
if p.atomic { "atomic" } else { "non-atomic" }
)?;
for stmt in &p.statements {
writeln!(w, "{stmt};")?;
}
}
}
return Ok(());
}
if let Some(target) = positional {
let touched = runner::migrate_to_pool(pool, dir, target).await?;
if touched.is_empty() {
writeln!(w, "already at {target}")?;
} else {
for m in &touched {
writeln!(w, " touched {}", m.name)?;
}
}
return Ok(());
}
let applied = runner::migrate_pool(pool, dir).await?;
if applied.is_empty() {
writeln!(w, "nothing to migrate (already up to date)")?;
} else {
for m in &applied {
writeln!(w, " applied {}", m.name)?;
}
}
Ok(())
}
async fn migrate_squash<W: Write>(pool: &Pool, dir: &Path, w: &mut W) -> Result<(), MigrateError> {
runner::ensure_ledger_pool(pool).await?;
let all = file::list_dir(dir)?;
let applied = runner::applied_set_pool(pool).await?;
let pending: Vec<&Migration> = all.iter().filter(|m| !applied.contains(&m.name)).collect();
if pending.is_empty() {
writeln!(
w,
"no pending migrations to squash (every JSON is in the ledger)"
)?;
return Ok(());
}
if pending.len() == 1 {
return Err(MigrateError::Validation(format!(
"only one pending migration (`{}`) — use `forget-pending {}` instead of `--squash`",
pending[0].name, pending[0].name,
)));
}
for m in &pending {
if applied.contains(&m.name) {
return Err(MigrateError::Validation(format!(
"migrate --squash: refused — migration `{}` appears applied. \
Use `migrate <prev>` or `downgrade` to unapply it first.",
m.name,
)));
}
}
writeln!(
w,
"squashing {} pending migration(s) into a fresh diff:",
pending.len()
)?;
for m in &pending {
let path = file_path(dir, &m.name);
std::fs::remove_file(&path).map_err(|e| {
MigrateError::Validation(format!("migrate --squash: rm {}: {e}", path.display()))
})?;
writeln!(w, " removed {}", path.display())?;
}
writeln!(w, "regenerating diff against current model registry...")?;
makemigrations(dir, &[], w)
}
async fn downgrade<W: Write>(
pool: &Pool,
dir: &Path,
args: &[String],
w: &mut W,
) -> Result<(), MigrateError> {
let steps: usize = if let Some(arg) = args.first() {
arg.parse().map_err(|_| {
MigrateError::Validation(format!(
"invalid step count: {arg} (expected a non-negative integer)"
))
})?
} else {
1
};
let touched = runner::downgrade_pool(pool, dir, steps).await?;
if touched.is_empty() {
writeln!(w, "nothing to downgrade")?;
} else {
for m in &touched {
writeln!(w, " rolled back {}", m.name)?;
}
}
Ok(())
}
async fn showmigrations<W: Write>(pool: &Pool, dir: &Path, w: &mut W) -> Result<(), MigrateError> {
runner::ensure_ledger_pool(pool).await?;
let all = file::list_dir(dir)?;
let applied = runner::applied_set_pool(pool).await?;
if all.is_empty() {
writeln!(w, "(no migrations in {})", dir.display())?;
return Ok(());
}
writeln!(w, "Migrations in {}:", dir.display())?;
for m in &all {
let mark = if applied.contains(&m.name) {
"[X]"
} else {
"[ ]"
};
writeln!(w, " {mark} {}", m.name)?;
}
Ok(())
}
async fn forget_pending_cmd<W: Write>(
pool: &Pool,
dir: &Path,
args: &[String],
w: &mut W,
) -> Result<(), MigrateError> {
let target = match args.first().map(String::as_str) {
Some("--help") | Some("-h") | None => {
writeln!(w, "forget-pending <name>")?;
writeln!(
w,
" Delete a migration JSON that has NOT been applied yet, so the"
)?;
writeln!(
w,
" next `makemigrations` regenerates the diff against current"
)?;
writeln!(
w,
" models. Refuses to delete an already-applied migration (would"
)?;
writeln!(
w,
" orphan the `{}` ledger row and break later runs).",
runner::LEDGER_TABLE
)?;
writeln!(w)?;
writeln!(
w,
" <name> can be the full migration name (e.g. `0003_auto`)"
)?;
writeln!(w, " or a unique substring; ambiguous matches error.")?;
return Ok(());
}
Some(s) if s.starts_with('-') => {
return Err(MigrateError::Validation(format!(
"forget-pending: expected positional <name> first, got flag `{s}`"
)));
}
Some(s) => s,
};
runner::ensure_ledger_pool(pool).await?;
let all = file::list_dir(dir)?;
let applied = runner::applied_set_pool(pool).await?;
let exact: Vec<&Migration> = all.iter().filter(|m| m.name == target).collect();
let candidates: Vec<&Migration> = if exact.len() == 1 {
exact
} else {
all.iter().filter(|m| m.name.contains(target)).collect()
};
let migration = match candidates.len() {
0 => {
return Err(MigrateError::Validation(format!(
"forget-pending: no migration matches `{target}` in {}",
dir.display()
)));
}
1 => candidates[0],
n => {
let names: Vec<&str> = candidates.iter().map(|m| m.name.as_str()).collect();
return Err(MigrateError::Validation(format!(
"forget-pending: `{target}` is ambiguous ({n} matches: {}); pass the full migration name",
names.join(", ")
)));
}
};
if applied.contains(&migration.name) {
return Err(MigrateError::Validation(format!(
"forget-pending: migration `{}` is already applied (recorded in `{}`). \
Use `migrate <prev>` or `downgrade` to unapply it first, then \
`forget-pending` to drop the JSON.",
migration.name,
runner::LEDGER_TABLE,
)));
}
let path = file_path(dir, &migration.name);
std::fs::remove_file(&path).map_err(|e| {
MigrateError::Validation(format!("forget-pending: rm {}: {e}", path.display()))
})?;
writeln!(w, "deleted {}", path.display())?;
writeln!(w, " next: `cargo run -- makemigrations` will regenerate")?;
writeln!(w, " the diff against the current model registry.")?;
Ok(())
}
pub fn make_empty(dir: &Path, name: &str) -> Result<Migration, MigrateError> {
let prior = file::list_dir(dir)?;
let prev_snapshot = prior.last().map_or_else(
|| SchemaSnapshot {
tables: vec![],
m2m_tables: vec![],
indexes: vec![],
checks: vec![],
},
|m| m.snapshot.clone(),
);
let prev_name = prior.last().map(|m| m.name.clone());
let next_index = prior
.last()
.and_then(|m| file::extract_index(&m.name))
.map_or(1, |n| n + 1);
let full_name = format!("{next_index:04}_{name}");
let mig = Migration {
name: full_name.clone(),
created_at: chrono::Utc::now().to_rfc3339(),
prev: prev_name,
atomic: true,
scope: super::MigrationScope::default(),
snapshot: prev_snapshot,
forward: vec![],
};
if !dir.exists() {
std::fs::create_dir_all(dir)?;
}
file::write(&file_path(dir, &mig.name), &mig)?;
Ok(mig)
}
fn file_path(dir: &Path, name: &str) -> std::path::PathBuf {
dir.join(format!("{name}.json"))
}
pub fn make_data_migration(
dir: &Path,
name: &str,
sql: &str,
reverse_sql: Option<&str>,
) -> Result<Migration, MigrateError> {
let prior = file::list_dir(dir)?;
let prev_snapshot = prior
.last()
.map_or_else(|| SchemaSnapshot::default(), |m| m.snapshot.clone());
let prev_name = prior.last().map(|m| m.name.clone());
let next_index = prior
.last()
.and_then(|m| file::extract_index(&m.name))
.map_or(1, |n| n + 1);
let full_name = format!("{next_index:04}_{name}");
let op = Operation::Data(DataOp {
sql: sql.to_owned(),
reverse_sql: reverse_sql.map(str::to_owned),
reversible: reverse_sql.is_some(),
});
let mig = Migration {
name: full_name.clone(),
created_at: chrono::Utc::now().to_rfc3339(),
prev: prev_name,
atomic: true,
scope: super::MigrationScope::default(),
snapshot: prev_snapshot,
forward: vec![op],
};
if !dir.exists() {
std::fs::create_dir_all(dir)?;
}
file::write(&file_path(dir, &mig.name), &mig)?;
Ok(mig)
}
pub fn append_data_op(
dir: &Path,
migration_name: &str,
sql: &str,
reverse_sql: Option<&str>,
) -> Result<(), MigrateError> {
let path = file_path(dir, migration_name);
let mut mig = file::load(&path).map_err(|_| {
MigrateError::Validation(format!(
"migration `{migration_name}` not found at {}",
path.display()
))
})?;
mig.forward.push(Operation::Data(DataOp {
sql: sql.to_owned(),
reverse_sql: reverse_sql.map(str::to_owned),
reversible: reverse_sql.is_some(),
}));
file::write(&path, &mig)?;
Ok(())
}
fn add_data_op_cmd<W: Write>(dir: &Path, args: &[String], w: &mut W) -> Result<(), MigrateError> {
let mut sql: Option<String> = None;
let mut reverse_sql: Option<String> = None;
let mut name: Option<String> = None;
let mut to: Option<String> = None;
let mut iter = args.iter();
while let Some(arg) = iter.next() {
match arg.as_str() {
"--sql" => {
sql =
Some(iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--sql requires a value".into())
})?);
}
"--reverse-sql" => {
reverse_sql = Some(iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--reverse-sql requires a value".into())
})?);
}
"--name" => {
name =
Some(iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--name requires a value".into())
})?);
}
"--to" => {
to = Some(iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--to requires a migration name".into())
})?);
}
"--help" | "-h" => {
writeln!(
w,
"add-data-op --sql <SQL> [--reverse-sql <SQL>] [--name <name>] [--to <migration>]"
)?;
return Ok(());
}
other if other.starts_with('-') => {
return Err(MigrateError::Validation(format!("unknown flag: {other}")));
}
other => {
return Err(MigrateError::Validation(format!(
"unexpected argument: `{other}` — use --sql, --reverse-sql, --name, --to"
)));
}
}
}
let sql = sql.ok_or_else(|| MigrateError::Validation("--sql is required".into()))?;
if let Some(migration_name) = to {
append_data_op(dir, &migration_name, &sql, reverse_sql.as_deref())?;
writeln!(w, "appended data op to {migration_name}.json")?;
} else {
let name = name.unwrap_or_else(|| "data_op".to_owned());
let mig = make_data_migration(dir, &name, &sql, reverse_sql.as_deref())?;
let rev_note = if reverse_sql.is_some() {
" (reversible)"
} else {
" (irreversible)"
};
writeln!(w, "wrote {}{rev_note}", file_path(dir, &mig.name).display())?;
}
Ok(())
}
fn describe_op(op: &Operation) -> String {
match op {
Operation::Schema(c) => format!("{c:?}"),
Operation::Data(d) => {
let head: String = d.sql.chars().take(60).collect();
let ellipsis = if d.sql.chars().count() > 60 {
"…"
} else {
""
};
format!("data: {head}{ellipsis}")
}
}
}
fn startapp<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let mut iter = args.iter();
let app_name = iter
.next()
.cloned()
.ok_or_else(|| MigrateError::Validation(usage()))?;
let mut with_manage_bin = false;
let mut into: Option<String> = None;
while let Some(arg) = iter.next() {
match arg.as_str() {
"--with-manage-bin" => with_manage_bin = true,
"--into" => {
into = Some(iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--into requires a directory argument".into())
})?);
}
"--help" | "-h" => {
writeln!(w, "{}", usage())?;
return Ok(());
}
other => {
return Err(MigrateError::Validation(format!(
"startapp: unknown argument `{other}` (run --help for usage)"
)));
}
}
}
let base_label = into.clone().unwrap_or_else(|| "src".into());
let opts = super::scaffold::StartAppOptions {
app_name: app_name.clone(),
manage_bin: with_manage_bin.then_some(super::scaffold::SINGLE_TENANT_MANAGE_BIN),
base_dir: into.map(std::path::PathBuf::from),
};
let cwd = std::env::current_dir()?;
let report = super::scaffold::startapp(&cwd, &opts)?;
write_startapp_report(w, &app_name, &base_label, &report)
}
fn write_startapp_report<W: Write>(
w: &mut W,
app_name: &str,
base_label: &str,
report: &super::scaffold::StartAppReport,
) -> Result<(), MigrateError> {
if report.written.is_empty() && report.skipped.is_empty() {
writeln!(w, "startapp: nothing to do")?;
return Ok(());
}
writeln!(w, "startapp `{app_name}`")?;
for path in &report.written {
writeln!(w, " + wrote {path}")?;
}
for path in &report.skipped {
writeln!(w, " · {path} already exists — left untouched")?;
}
for path in &report.patched {
writeln!(w, " ~ patched {path} (auto-mounted new app)")?;
}
for hint in &report.manual_steps {
writeln!(w, " ! manual: {hint}")?;
}
if !report.written.is_empty() {
writeln!(w, "next:")?;
writeln!(
w,
" add `mod {app_name};` to {base_label}/main.rs (or {base_label}/lib.rs)"
)?;
writeln!(
w,
" so the derive macros' `inventory` registrations are pulled in."
)?;
}
Ok(())
}
fn usage() -> String {
"startapp <name> [--with-manage-bin]\n \
Scaffold a Django-shape app module under src/<name>/ (mod.rs +\n \
models.rs + views.rs + urls.rs). Idempotent: existing files\n \
are left untouched. <name> must be a valid Rust identifier.\n\n \
--with-manage-bin\n \
Also write src/bin/manage.rs with the single-tenant dispatcher\n \
boilerplate. Skipped if the file already exists."
.to_owned()
}
async fn about_cmd<W: Write>(pool: &Pool, w: &mut W) -> Result<(), MigrateError> {
let registered_models = crate::core::inventory::iter::<crate::core::ModelEntry>
.into_iter()
.count();
let mut apps: std::collections::BTreeSet<&'static str> = std::collections::BTreeSet::new();
for entry in crate::core::inventory::iter::<crate::core::ModelEntry> {
if let Some(app) = entry.resolved_app_label() {
apps.insert(app);
}
}
writeln!(w, "rustango")?;
writeln!(w, " version: {}", env!("CARGO_PKG_VERSION"))?;
writeln!(w, " models: {registered_models} registered")?;
writeln!(
w,
" apps: {} ({})",
apps.len(),
if apps.is_empty() {
"none".to_owned()
} else {
apps.iter().copied().collect::<Vec<_>>().join(", ")
}
)?;
let env_label = std::env::var("RUSTANGO_ENV").unwrap_or_else(|_| "(unset)".into());
writeln!(w, " RUSTANGO_ENV: {env_label}")?;
let db_url = std::env::var("DATABASE_URL").map_or("(unset)".into(), |s| {
if let Some(at) = s.rfind('@') {
if let Some(scheme_end) = s.find("://") {
let prefix = &s[..scheme_end + 3];
let rest = &s[at..];
return format!("{prefix}***{rest}");
}
}
s
});
writeln!(w, " DATABASE_URL: {db_url}")?;
write!(w, " db_connect: ")?;
let ok = crate::sql::raw_execute_pool(pool, "SELECT 1", Vec::new())
.await
.is_ok();
writeln!(w, "{}", if ok { "ok" } else { "FAILED" })?;
Ok(())
}
async fn check_cmd<W: Write>(
pool: &Pool,
dir: &Path,
args: &[String],
w: &mut W,
) -> Result<(), MigrateError> {
let deploy = args.iter().any(|a| a == "--deploy");
let mut errors: Vec<String> = Vec::new();
let mut warnings: Vec<String> = Vec::new();
let mut info: Vec<String> = Vec::new();
writeln!(
w,
"running rustango system check{}...",
if deploy { " (deploy mode)" } else { "" }
)?;
let model_count = crate::core::inventory::iter::<crate::core::ModelEntry>
.into_iter()
.count();
if model_count == 0 {
errors.push("no models registered — every #[derive(Model)] struct must be `pub use`d through the binary's crate root".into());
} else {
info.push(format!("{model_count} models registered via inventory"));
}
if crate::sql::raw_execute_pool(pool, "SELECT 1", Vec::new())
.await
.is_err()
{
errors.push("cannot connect to database — verify DATABASE_URL is reachable".into());
} else {
info.push("database reachable".into());
}
if dir.exists() {
let prior = file::list_dir(dir)?;
if prior.is_empty() && model_count > 0 {
warnings.push(
"models registered but no migrations on disk — run `manage makemigrations`".into(),
);
} else {
info.push(format!("{} migration(s) on disk", prior.len()));
}
}
if deploy {
let mut audit = DeployAuditFindings::default();
run_deploy_audit(&deploy_audit_env(), &mut audit);
#[cfg(feature = "config")]
run_settings_audit(&mut audit);
info.extend(audit.info);
warnings.extend(audit.warnings);
errors.extend(audit.errors);
}
for msg in &info {
writeln!(w, " [info] {msg}")?;
}
for msg in &warnings {
writeln!(w, " [warning] {msg}")?;
}
for msg in &errors {
writeln!(w, " [error] {msg}")?;
}
if !errors.is_empty() {
return Err(MigrateError::Validation(format!(
"{} system check(s) failed",
errors.len()
)));
}
if warnings.is_empty() && errors.is_empty() {
writeln!(w, "all checks passed")?;
}
Ok(())
}
fn docs_cmd<W: Write>(w: &mut W) -> Result<(), MigrateError> {
let url = "https://docs.rs/rustango";
writeln!(w, "{url}")?;
let opener = if cfg!(target_os = "macos") {
Some(("open", url))
} else if cfg!(target_os = "linux") {
Some(("xdg-open", url))
} else if cfg!(target_os = "windows") {
Some(("cmd", "/C start"))
} else {
None
};
if let Some((cmd, _)) = opener {
let _ = std::process::Command::new(cmd).arg(url).spawn();
}
Ok(())
}
fn version_cmd<W: Write>(w: &mut W) -> Result<(), MigrateError> {
writeln!(w, "rustango {}", env!("CARGO_PKG_VERSION"))?;
Ok(())
}
fn parse_name_and_model(args: &[String]) -> Result<(String, Option<String>), MigrateError> {
let mut name: Option<String> = None;
let mut model: Option<String> = None;
let mut iter = args.iter();
while let Some(arg) = iter.next() {
match arg.as_str() {
"--model" => {
model =
Some(iter.next().cloned().ok_or_else(|| {
MigrateError::Validation("--model requires a value".into())
})?);
}
other if other.starts_with('-') => {
return Err(MigrateError::Validation(format!("unknown flag `{other}`")));
}
other => {
if name.is_some() {
return Err(MigrateError::Validation(format!(
"unexpected positional `{other}`"
)));
}
name = Some(other.to_owned());
}
}
}
let name = name.ok_or_else(|| {
MigrateError::Validation("name is required (e.g. `manage make:viewset PostViewSet`)".into())
})?;
if !is_valid_type_name(&name) {
return Err(MigrateError::Validation(format!(
"`{name}` is not a valid Rust type name (PascalCase, alphanumeric + underscore)"
)));
}
Ok((name, model))
}
fn is_valid_type_name(name: &str) -> bool {
let bytes = name.as_bytes();
!bytes.is_empty()
&& bytes[0].is_ascii_uppercase()
&& bytes
.iter()
.all(|b| b.is_ascii_alphanumeric() || *b == b'_')
}
fn pascal_to_snake(s: &str) -> String {
let mut out = String::with_capacity(s.len() + 4);
for (i, c) in s.chars().enumerate() {
if c.is_ascii_uppercase() && i > 0 {
out.push('_');
}
out.push(c.to_ascii_lowercase());
}
out
}
fn write_generated<W: Write>(
w: &mut W,
file_name: &str,
contents: String,
) -> Result<(), MigrateError> {
let path = std::path::PathBuf::from("src").join(file_name);
if path.exists() {
return Err(MigrateError::Validation(format!(
"{} already exists — refusing to overwrite",
path.display()
)));
}
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::write(&path, contents)?;
writeln!(w, "wrote {}", path.display())?;
writeln!(
w,
" add `mod {};` to src/main.rs (or `pub mod ...;` to src/lib.rs)",
file_name.trim_end_matches(".rs")
)?;
Ok(())
}
fn make_viewset_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let mut explicit_tenant = false;
let mut explicit_no_tenant = false;
let mut filtered: Vec<String> = Vec::with_capacity(args.len());
for a in args {
if a == "--tenant" || a == "--tenant-aware" {
explicit_tenant = true;
} else if a == "--no-tenant" {
explicit_no_tenant = true;
} else {
filtered.push(a.clone());
}
}
let (tenant_aware, echoed_auto_detect) =
resolve_viewset_tenant_mode(explicit_tenant, explicit_no_tenant, project_uses_tenancy());
if echoed_auto_detect {
writeln!(
w,
"make:viewset: auto-detected tenancy mode from Cargo.toml (pass `--no-tenant` to override)"
)?;
}
let (name, model) = parse_name_and_model(&filtered)?;
let snake = pascal_to_snake(&name);
let model = model.unwrap_or_else(|| "Post".into());
let body = if tenant_aware {
viewset_template_tenant(&name, &model, &snake)
} else {
viewset_template_pool(&name, &model, &snake)
};
write_generated(w, &format!("{snake}.rs"), body)
}
fn viewset_template_pool(name: &str, model: &str, snake: &str) -> String {
format!(
r#"//! Auto-scaffolded by `manage make:viewset {name}`.
use rustango::ViewSet;
#[derive(ViewSet)]
#[viewset(
model = {model},
fields = "id, ",
filter_fields = "",
search_fields = "",
page_size = 20,
)]
pub struct {name};
// Mount in your urls.rs:
//
// .merge({name}::router("/api/{snake}", pool.clone()))
"#
)
}
fn viewset_template_tenant(name: &str, model: &str, snake: &str) -> String {
format!(
r#"//! Auto-scaffolded by `manage make:viewset {name} --tenant`.
//!
//! Tenant-aware viewset: each request resolves the connection via
//! `rustango::extractors::Tenant`, so the same `router()` serves
//! every tenant under their own subdomain / schema / database.
//!
//! Since v0.30 (#80), `tenant_router` carries the full static-router
//! builder chain — filter_fields / search_fields / ordering /
//! page_size / permissions_for_model all work in tenant mode too.
use axum::Router;
use rustango::core::Model as _;
use rustango::viewset::ViewSet;
use crate::models::{model};
pub fn router() -> Router<()> {{
ViewSet::for_model({model}::SCHEMA)
// .fields(&["id", "name", "created_at"]) // restrict response shape
// .filter_fields(&["status", "owner_id"]) // ?status=draft&owner_id=42
// .search_fields(&["name", "description"]) // ?search=foo (ILIKE)
// .ordering(&[("created_at", true)]) // default ORDER BY
// .ordering_fields(&["name", "created_at"]) // ?ordering=-name allowlist
// .page_size(20)
// .permissions_for_model::<{model}>() // CRUD codenames
// .read_only() // GET only
.tenant_router("/api/{snake}")
}}
// Mount in your urls.rs:
//
// .merge(crate::viewsets::{snake}::router())
"#
)
}
fn resolve_viewset_tenant_mode(
explicit_tenant: bool,
explicit_no_tenant: bool,
project_tenancy: bool,
) -> (bool, bool) {
if explicit_no_tenant {
return (false, false);
}
if explicit_tenant {
return (true, false);
}
(project_tenancy, project_tenancy)
}
fn project_uses_tenancy() -> bool {
let Ok(s) = std::fs::read_to_string("Cargo.toml") else {
return false;
};
let lower = s.to_ascii_lowercase();
let has_inline = lower.contains("rustango")
&& lower
.lines()
.any(|line| line.contains("rustango") && line.contains("\"tenancy\""));
let has_table_block =
lower.contains("[dependencies.rustango]") && lower.contains("\"tenancy\"");
has_inline || has_table_block
}
fn make_api_routes_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let mut tenant_aware = false;
let mut filtered: Vec<String> = Vec::with_capacity(args.len());
for a in args {
if a == "--tenant" || a == "--tenant-aware" {
tenant_aware = true;
} else if a == "--help" || a == "-h" {
writeln!(w, "make:api_routes <app> [--tenant]")?;
writeln!(
w,
" Scaffold src/<app>/api_routes.rs — the per-app router composer."
)?;
writeln!(
w,
" Use --tenant for tenancy projects (no PgPool argument; each"
)?;
writeln!(
w,
" viewset resolves its own per-request connection via the Tenant"
)?;
writeln!(w, " extractor).")?;
return Ok(());
} else if a.starts_with('-') {
return Err(MigrateError::Validation(format!(
"make:api_routes: unrecognized flag `{a}`"
)));
} else {
filtered.push(a.clone());
}
}
let app = filtered.first().ok_or_else(|| {
MigrateError::Validation(
"app name is required (e.g. `manage make:api_routes regions`)".into(),
)
})?;
if filtered.len() > 1 {
return Err(MigrateError::Validation(format!(
"make:api_routes: expected one app name, got {} ({:?})",
filtered.len(),
filtered
)));
}
if !is_valid_app_name(app) {
return Err(MigrateError::Validation(format!(
"make:api_routes: app name `{app}` must match `[a-z_][a-z0-9_]*`"
)));
}
let app_dir = std::path::PathBuf::from("src").join(app);
if !app_dir.exists() {
return Err(MigrateError::Validation(format!(
"make:api_routes: src/{app}/ does not exist. Run `manage startapp {app}` first."
)));
}
let path = app_dir.join("api_routes.rs");
if path.exists() {
return Err(MigrateError::Validation(format!(
"{} already exists — refusing to overwrite",
path.display()
)));
}
let body = if tenant_aware {
api_routes_template_tenant(app)
} else {
api_routes_template_pool(app)
};
std::fs::write(&path, body)?;
writeln!(w, "wrote {}", path.display())?;
writeln!(
w,
" add `pub mod api_routes;` to src/{app}/mod.rs (or `mod ...;`),"
)?;
writeln!(
w,
" then `.merge({app}::api_routes::api())` from your top-level urls.rs."
)?;
Ok(())
}
fn is_valid_app_name(s: &str) -> bool {
let mut chars = s.chars();
match chars.next() {
Some(c) if c.is_ascii_lowercase() || c == '_' => {}
_ => return false,
}
chars.all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_')
}
fn api_routes_template_tenant(app: &str) -> String {
format!(
r#"//! Auto-scaffolded by `manage make:api_routes {app} --tenant`.
//!
//! API routing for the `{app}` app. Per-model viewsets live under
//! `viewsets/` and each exposes a `pub fn viewset() -> ViewSet`;
//! this file composes them into a single `Router<()>`.
//!
//! Adding a resource:
//! 1. Drop a new file under `viewsets/` exposing
//! `pub fn viewset() -> ViewSet`.
//! 2. Declare it in `viewsets/mod.rs`.
//! 3. Add one `.merge(...)` line below.
use axum::Router;
pub fn api() -> Router<()> {{
Router::new()
// .merge(super::viewsets::<model>::viewset().tenant_router("/api/<model>"))
}}
"#
)
}
fn api_routes_template_pool(app: &str) -> String {
format!(
r#"//! Auto-scaffolded by `manage make:api_routes {app}`.
//!
//! API routing for the `{app}` app. Composes per-model viewsets
//! into a single `Router<()>`. Each viewset captures the supplied
//! `PgPool` at mount time.
//!
//! Adding a resource:
//! 1. Run `manage make:viewset <Name> --model <Model>`.
//! 2. Add one `.merge(...)` line below.
use axum::Router;
use rustango::sql::sqlx::PgPool;
pub fn api(pool: PgPool) -> Router<()> {{
let _pool = pool;
Router::new()
// .merge(super::viewsets::<snake>::router("/api/<snake>", _pool.clone()))
}}
"#
)
}
fn make_serializer_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let (name, model) = parse_name_and_model(args)?;
let snake = pascal_to_snake(&name);
let model = model.unwrap_or_else(|| "Post".into());
let body = format!(
r#"//! Auto-scaffolded by `manage make:serializer {name}`.
use rustango::Serializer;
#[derive(Serializer, serde::Deserialize, Default)]
#[serializer(model = {model})]
pub struct {name} {{
pub id: i64,
// pub title: String,
// #[serializer(read_only)]
// pub created_at: chrono::DateTime<chrono::Utc>,
}}
"#
);
write_generated(w, &format!("{snake}.rs"), body)
}
fn make_form_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let (name, _) = parse_name_and_model(args)?;
let snake = pascal_to_snake(&name);
let body = format!(
r#"//! Auto-scaffolded by `manage make:form {name}`.
use rustango::forms::Form;
use rustango::Form as DeriveForm;
#[derive(DeriveForm)]
pub struct {name} {{
#[form(min_length = 1, max_length = 200)]
pub title: String,
pub body: Option<String>,
}}
"#
);
write_generated(w, &format!("{snake}.rs"), body)
}
fn make_job_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let (name, _) = parse_name_and_model(args)?;
let snake = pascal_to_snake(&name);
let body = format!(
r#"//! Auto-scaffolded by `manage make:job {name}`.
//!
//! Background job — run async work outside the request lifecycle.
//! Pair with `rustango::scheduler::Scheduler` (cron-shape) or your queue layer.
use std::sync::Arc;
use rustango::sql::sqlx::PgPool;
pub struct {name} {{
pub pool: PgPool,
}}
impl {name} {{
pub async fn run(self: Arc<Self>) {{
// TODO: implement
let _ = self.pool.acquire().await;
}}
}}
// Wire up in main.rs:
//
// let job = Arc::new({name} {{ pool: pool.clone() }});
// scheduler.every("{snake}", Duration::from_secs(60), move || {{
// let job = job.clone();
// async move {{ job.run().await }}
// }});
"#
);
write_generated(w, &format!("{snake}.rs"), body)
}
fn make_notification_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let (name, _) = parse_name_and_model(args)?;
let snake = pascal_to_snake(&name);
let body = format!(
r#"//! Auto-scaffolded by `manage make:notification {name}`.
//!
//! User-facing notification. For now this just builds an Email; once the
//! `rustango::notifications` layer ships you'll add `via()` for multi-channel.
use rustango::email::Email;
pub struct {name} {{
pub user_email: String,
pub subject: String,
}}
impl {name} {{
pub fn build_email(&self) -> Email {{
Email::new()
.to(&self.user_email)
.from("noreply@example.com")
.subject(&self.subject)
.body("Hello — this notification was generated by {name}.")
}}
}}
"#
);
write_generated(w, &format!("{snake}.rs"), body)
}
fn make_middleware_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let (name, _) = parse_name_and_model(args)?;
let snake = pascal_to_snake(&name);
let body = format!(
r#"//! Auto-scaffolded by `manage make:middleware {name}`.
use axum::body::Body;
use axum::http::{{Request, Response}};
use axum::middleware::Next;
pub async fn {snake}(req: Request<Body>, next: Next) -> Response<Body> {{
// TODO: pre-handler logic
let response = next.run(req).await;
// TODO: post-handler logic
response
}}
// Apply with:
// router.layer(axum::middleware::from_fn({snake}))
"#
);
write_generated(w, &format!("{snake}.rs"), body)
}
fn make_test_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let (name, _) = parse_name_and_model(args)?;
let snake = pascal_to_snake(&name);
let body = format!(
r#"//! Auto-scaffolded by `manage make:test {name}`.
//!
//! Integration test. Run with `cargo test --test {snake}`.
use rustango::test_client::TestClient;
use axum::Router;
use axum::routing::get;
fn app() -> Router {{
Router::new().route("/hello", get(|| async {{ "hi" }}))
}}
#[tokio::test]
async fn {snake}_smoke() {{
let client = TestClient::new(app());
let r = client.get("/hello").send().await;
assert_eq!(r.status, 200);
assert_eq!(r.text(), "hi");
}}
"#
);
let path = std::path::PathBuf::from("tests").join(format!("{snake}.rs"));
if path.exists() {
return Err(MigrateError::Validation(format!(
"{} already exists — refusing to overwrite",
path.display()
)));
}
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::write(&path, body)?;
writeln!(w, "wrote {}", path.display())?;
Ok(())
}
#[derive(Debug, PartialEq)]
struct DbDumpArgs {
out: Option<String>,
data_only: bool,
schema_only: bool,
no_owner: bool,
}
fn parse_db_dump_args(args: &[String]) -> Result<DbDumpArgs, MigrateError> {
let mut out: Option<String> = None;
let mut data_only = false;
let mut schema_only = false;
let mut no_owner = false;
let mut iter = args.iter();
while let Some(arg) = iter.next() {
match arg.as_str() {
"--out" | "-o" => {
out = Some(
iter.next()
.cloned()
.ok_or_else(|| MigrateError::Validation("--out requires a path".into()))?,
);
}
"--data-only" => data_only = true,
"--schema-only" => schema_only = true,
"--no-owner" => no_owner = true,
other => {
return Err(MigrateError::Validation(format!("unknown flag: {other}")));
}
}
}
if data_only && schema_only {
return Err(MigrateError::Validation(
"--data-only and --schema-only are mutually exclusive".into(),
));
}
Ok(DbDumpArgs {
out,
data_only,
schema_only,
no_owner,
})
}
fn build_pg_dump_argv(parsed: &DbDumpArgs, database_url: &str) -> Vec<String> {
let mut argv = vec![database_url.to_owned()];
if parsed.data_only {
argv.push("--data-only".into());
}
if parsed.schema_only {
argv.push("--schema-only".into());
}
if parsed.no_owner {
argv.push("--no-owner".into());
}
if let Some(out) = &parsed.out {
argv.push("--file".into());
argv.push(out.clone());
}
argv
}
fn db_dump_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let parsed = parse_db_dump_args(args)?;
let url = std::env::var("DATABASE_URL").map_err(|_| {
MigrateError::Validation(
"DATABASE_URL must be set for db:dump (e.g. \
postgres://user:pass@host:5432/db)"
.into(),
)
})?;
let argv = build_pg_dump_argv(&parsed, &url);
writeln!(w, "running: pg_dump {}", redact(&argv).join(" "))?;
let status = std::process::Command::new("pg_dump")
.args(&argv)
.status()
.map_err(|e| {
MigrateError::Validation(format!("could not run pg_dump (is it on PATH?): {e}"))
})?;
if !status.success() {
return Err(MigrateError::Validation(format!(
"pg_dump exited with status {status}"
)));
}
Ok(())
}
#[derive(Debug, PartialEq)]
struct DbRestoreArgs {
file: String,
clean: bool,
}
fn parse_db_restore_args(args: &[String]) -> Result<DbRestoreArgs, MigrateError> {
let mut file: Option<String> = None;
let mut clean = false;
for arg in args {
match arg.as_str() {
"--clean" => clean = true,
other if other.starts_with('-') => {
return Err(MigrateError::Validation(format!("unknown flag: {other}")));
}
other => {
if file.is_some() {
return Err(MigrateError::Validation(format!(
"unexpected argument: {other}"
)));
}
file = Some(other.to_owned());
}
}
}
let file = file.ok_or_else(|| {
MigrateError::Validation("db:restore <file> requires a dump file path".into())
})?;
Ok(DbRestoreArgs { file, clean })
}
fn build_psql_argv(parsed: &DbRestoreArgs, database_url: &str) -> Vec<String> {
let mut argv = vec![database_url.to_owned()];
argv.push("-v".into());
argv.push("ON_ERROR_STOP=1".into());
if parsed.clean {
argv.push("-c".into());
argv.push("DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public;".into());
}
argv.push("-f".into());
argv.push(parsed.file.clone());
argv
}
fn db_restore_cmd<W: Write>(args: &[String], w: &mut W) -> Result<(), MigrateError> {
let parsed = parse_db_restore_args(args)?;
let url = std::env::var("DATABASE_URL").map_err(|_| {
MigrateError::Validation(
"DATABASE_URL must be set for db:restore (e.g. \
postgres://user:pass@host:5432/db)"
.into(),
)
})?;
let argv = build_psql_argv(&parsed, &url);
writeln!(w, "running: psql {}", redact(&argv).join(" "))?;
let status = std::process::Command::new("psql")
.args(&argv)
.status()
.map_err(|e| {
MigrateError::Validation(format!("could not run psql (is it on PATH?): {e}"))
})?;
if !status.success() {
return Err(MigrateError::Validation(format!(
"psql exited with status {status}"
)));
}
Ok(())
}
fn db_info_cmd<W: Write>(w: &mut W) -> Result<(), MigrateError> {
writeln!(w, "rustango db:info")?;
writeln!(w, " framework version: {}", env!("CARGO_PKG_VERSION"))?;
let pg_enabled = cfg!(feature = "postgres");
let mysql_enabled = cfg!(feature = "mysql");
writeln!(
w,
" postgres feature: {}",
if pg_enabled { "enabled" } else { "disabled" }
)?;
writeln!(
w,
" mysql feature: {} (impl lands in v0.23.0-batch2)",
if mysql_enabled { "enabled" } else { "disabled" }
)?;
match crate::env::database_url_from_env() {
Ok(url) => {
let scheme = url.split("://").next().unwrap_or("(unknown)");
writeln!(w, " resolved URL: {}", redact_url(&url))?;
writeln!(w, " detected backend: {scheme}")?;
match scheme {
"postgres" | "postgresql" if !pg_enabled => {
writeln!(
w,
" ! warning: URL is postgres but the `postgres` feature is disabled — \
add `features = [\"postgres\"]` to rustango"
)?;
}
"mysql" if !mysql_enabled => {
writeln!(
w,
" ! warning: URL is mysql but the `mysql` feature is disabled — \
add `features = [\"mysql\"]` to rustango"
)?;
}
"mysql" if mysql_enabled => {
writeln!(
w,
" ! note: MySql connections will fail in v0.23.0-batch1 \
(MySqlDialect lands in batch2)"
)?;
}
_ => {}
}
}
Err(e) => {
writeln!(w, " resolved URL: (none — {e})")?;
writeln!(
w,
" hint: set DATABASE_URL or DB_USER+DB_NAME (+optional \
DB_HOST/DB_PORT/DB_PASSWORD/DB_DRIVER/DB_PARAMS)"
)?;
}
}
Ok(())
}
fn redact(argv: &[String]) -> Vec<String> {
argv.iter().map(|a| redact_url(a)).collect()
}
fn redact_url(s: &str) -> String {
let Some(scheme_end) = s.find("://") else {
return s.to_owned();
};
let rest = &s[scheme_end + 3..];
let Some(at) = rest.find('@') else {
return s.to_owned();
};
let creds = &rest[..at];
let Some(colon) = creds.find(':') else {
return s.to_owned();
};
let user = &creds[..colon];
let after_at = &rest[at..];
format!("{}://{user}:***{after_at}", &s[..scheme_end])
}
#[derive(Debug, Default, Clone)]
pub(crate) struct DeployAuditEnv {
pub rustango_env: Option<String>,
pub session_secret: Option<String>,
pub database_url: Option<String>,
pub apex_domain: Option<String>,
pub bind: Option<String>,
}
fn deploy_audit_env() -> DeployAuditEnv {
DeployAuditEnv {
rustango_env: std::env::var("RUSTANGO_ENV").ok(),
session_secret: std::env::var("RUSTANGO_SESSION_SECRET").ok(),
database_url: std::env::var("DATABASE_URL").ok(),
apex_domain: std::env::var("RUSTANGO_APEX_DOMAIN").ok(),
bind: std::env::var("RUSTANGO_BIND").ok(),
}
}
#[derive(Debug, Default)]
pub(crate) struct DeployAuditFindings {
pub info: Vec<String>,
pub warnings: Vec<String>,
pub errors: Vec<String>,
}
pub(crate) fn run_deploy_audit(env: &DeployAuditEnv, out: &mut DeployAuditFindings) {
match env.rustango_env.as_deref() {
Some("prod" | "production") => {
out.info
.push("RUSTANGO_ENV is set to a production value".into());
}
Some(other) => {
out.warnings.push(format!(
"RUSTANGO_ENV is `{other}` — set to `prod` (or `production`) in deployed env"
));
}
None => {
out.warnings.push(
"RUSTANGO_ENV is unset — set to `prod` so config loaders pick the right tier"
.into(),
);
}
}
match env.session_secret.as_deref() {
None => {
out.errors.push(
"RUSTANGO_SESSION_SECRET is unset — operator + tenant cookies + JWTs would use \
an ephemeral random secret that's regenerated on every restart, signing every \
user out. Set via `openssl rand -base64 32`."
.into(),
);
}
Some(s) if s.len() < 32 => {
out.errors.push(format!(
"RUSTANGO_SESSION_SECRET is only {} bytes — need ≥ 32 for HMAC key strength. \
Regenerate with `openssl rand -base64 32`.",
s.len()
));
}
Some(s) if s.contains("change-me") || s.contains("placeholder") => {
out.errors.push(
"RUSTANGO_SESSION_SECRET still contains the scaffolder placeholder \
(`change-me-...`) — replace with a real secret via `openssl rand -base64 32`."
.into(),
);
}
Some(_) => {
out.info.push("RUSTANGO_SESSION_SECRET length OK".into());
}
}
match env.database_url.as_deref() {
None => out
.errors
.push("DATABASE_URL is unset — required in production".into()),
Some(url) if url.contains("localhost") || url.contains("127.0.0.1") => {
out.warnings.push(
"DATABASE_URL points at localhost / 127.0.0.1 — verify this is intended in \
production (typically a managed service hostname)"
.into(),
);
}
Some(_) => out.info.push("DATABASE_URL set".into()),
}
match env.apex_domain.as_deref() {
None | Some("localhost") => {
out.warnings.push(
"RUSTANGO_APEX_DOMAIN is unset / `localhost` — tenancy projects need this set to \
the public-facing apex (e.g. `app.example.com`) so subdomain resolution + \
cookie scoping work in production. Single-tenant projects can ignore."
.into(),
);
}
Some(_) => out
.info
.push("RUSTANGO_APEX_DOMAIN set to a non-localhost value".into()),
}
match env.bind.as_deref() {
Some(b) if b.starts_with("127.0.0.1") => {
out.warnings.push(format!(
"RUSTANGO_BIND={b} only listens on loopback — production usually wants \
`0.0.0.0:<port>` to accept external traffic"
));
}
Some(_) | None => {} }
}
#[cfg(feature = "config")]
fn run_settings_audit(out: &mut DeployAuditFindings) {
use crate::config::Settings;
let env_tier = Settings::current_env_tier();
out.info
.push(format!("config tier resolved to `{env_tier}`"));
let settings = match Settings::load_from_env() {
Ok(s) => s,
Err(crate::config::ConfigError::Io { .. }) => {
return;
}
Err(e) => {
out.warnings.push(format!(
"config: failed to load settings for audit: {e} — fix the file shape \
to enable the rest of the deploy audit"
));
return;
}
};
settings_audit_check(&env_tier, &settings, out);
}
#[cfg(feature = "config")]
pub(crate) fn settings_audit_check(
env_tier: &str,
settings: &crate::config::Settings,
out: &mut DeployAuditFindings,
) {
let in_prod = matches!(env_tier, "prod" | "production");
if !in_prod {
return;
}
if let Some(preset) = settings.security.headers_preset.as_deref() {
if preset == "dev" || preset == "none" {
out.warnings.push(format!(
"[security] headers_preset = `{preset}` in prod tier — promote to `strict` \
so HSTS / X-Frame-Options / X-Content-Type-Options / Referrer-Policy are emitted"
));
}
}
if matches!(settings.security.hsts_max_age_secs, Some(0)) {
out.warnings.push(
"[security] hsts_max_age_secs = 0 in prod tier — disables HSTS, leaving TLS-strip \
attacks viable on first request"
.into(),
);
}
if let Some(kib) = settings.auth.argon2_memory_kib {
if kib < 19_456 {
out.warnings.push(format!(
"[auth] argon2_memory_kib = {kib} in prod tier — OWASP 2024 recommends ≥ 19456 \
for password hashing brute-force resistance"
));
}
}
if let Some(ttl) = settings.auth.jwt.access_ttl_secs {
if ttl > 3600 {
out.warnings.push(format!(
"[auth.jwt] access_ttl_secs = {ttl} in prod tier — access tokens > 1h widen \
the leaked-token blast radius. The refresh flow rotates them; keep this short."
));
}
}
if settings.audit.retention_days.is_none() {
out.info.push(
"[audit] retention_days unset — log grows forever; consider setting + scheduling \
`manage audit-cleanup --days <N>`"
.into(),
);
}
if matches!(settings.routes.legacy_preset, Some(true)) {
out.info.push(
"[routes] legacy_preset = true — using the pre-v0.29 `__`-prefixed URLs \
(`/__login`, `/__admin`, …). Switch to the friendly defaults when bookmarks allow."
.into(),
);
}
if let Some(bind) = settings.server.bind.as_deref() {
if bind.starts_with("127.0.0.1") || bind.starts_with("localhost") {
out.warnings.push(format!(
"[server] bind = `{bind}` in prod tier — only listens on loopback. \
Production usually wants `0.0.0.0:<port>` to accept external traffic."
));
}
}
if let Some(backend) = settings.database.resolved_backend() {
let tenancy_on = crate::config::Settings::detected_features()
.iter()
.any(|f| *f == "tenancy");
if tenancy_on && backend != "postgres" {
out.warnings.push(format!(
"[database] backend = `{backend}` with `tenancy` feature on — \
schema-mode multi-tenancy is Postgres-only by language semantics \
(it relies on `SET search_path`). Either switch to a Postgres URL \
in prod, or use `crate::tenancy::DatabasePools<DB>` for the \
one-database-per-tenant model that works on sqlite/mysql."
));
}
}
let admin = &settings.admin;
if matches!(admin.csrf_cookie_secure, Some(false)) {
out.warnings.push(
"[admin] csrf_cookie_secure = false in prod tier — admin CSRF cookie will be \
sent over plain HTTP, which strips its tamper resistance. Set true (or remove \
the override) so the framework default Secure flag applies."
.into(),
);
}
if let Some(hex) = admin.primary_color.as_deref() {
let stripped = hex.trim_start_matches('#');
let valid_len = matches!(stripped.len(), 3 | 6 | 8);
let all_hex = stripped.chars().all(|c| c.is_ascii_hexdigit());
if !hex.starts_with('#') || !valid_len || !all_hex {
out.warnings.push(format!(
"[admin] primary_color = `{hex}` does not parse as a hex color (expected \
`#RRGGBB`, `#RGB`, or `#RRGGBBAA`) — the theme will fall back to the default \
accent. Check for a missing leading `#` or non-hex characters."
));
}
}
if let Some(mode) = admin.theme_mode.as_deref() {
if !matches!(mode, "auto" | "light" | "dark") {
out.warnings.push(format!(
"[admin] theme_mode = `{mode}` is not one of `auto` / `light` / `dark` — \
the chrome will ignore it and fall back to `auto`."
));
}
}
if matches!(admin.session_timeout_minutes, Some(0)) {
out.info.push(
"[admin] session_timeout_minutes = 0 in prod tier — admin sessions never idle-expire. \
Confirm this is deliberate (kiosk / single-user setup); otherwise pick a non-zero \
value so abandoned sessions can't be hijacked."
.into(),
);
}
if let Some(prefix) = admin.url_prefix.as_deref() {
if prefix.ends_with('/') && prefix.len() > 1 {
out.warnings.push(format!(
"[admin] url_prefix = `{prefix}` ends with a trailing slash — Builder will \
strip it, but config files should write the canonical form (no trailing slash) \
so reviewers can grep across deployments."
));
}
}
}
#[cfg(test)]
mod gen_tests {
use super::*;
#[test]
fn pascal_to_snake_cases() {
assert_eq!(pascal_to_snake("Post"), "post");
assert_eq!(pascal_to_snake("PostViewSet"), "post_view_set");
assert_eq!(pascal_to_snake("API"), "a_p_i"); assert_eq!(pascal_to_snake("UserNotification"), "user_notification");
}
#[test]
fn is_valid_type_name_accepts_pascal() {
assert!(is_valid_type_name("Post"));
assert!(is_valid_type_name("PostViewSet"));
assert!(is_valid_type_name("Foo_Bar"));
}
#[test]
fn is_valid_type_name_rejects_invalid() {
assert!(!is_valid_type_name(""));
assert!(!is_valid_type_name("post")); assert!(!is_valid_type_name("123Foo")); assert!(!is_valid_type_name("Post!")); }
#[test]
fn parse_name_and_model_basic() {
let (n, m) = parse_name_and_model(&["PostViewSet".into()]).unwrap();
assert_eq!(n, "PostViewSet");
assert_eq!(m, None);
}
#[test]
fn parse_name_and_model_with_model_flag() {
let args: Vec<String> = vec!["PostViewSet".into(), "--model".into(), "Post".into()];
let (n, m) = parse_name_and_model(&args).unwrap();
assert_eq!(n, "PostViewSet");
assert_eq!(m, Some("Post".into()));
}
#[test]
fn parse_name_and_model_rejects_missing_name() {
let r = parse_name_and_model(&[]);
assert!(r.is_err());
}
#[test]
fn parse_name_and_model_rejects_lowercase_name() {
let r = parse_name_and_model(&["postviewset".into()]);
assert!(r.is_err());
}
#[test]
fn viewset_template_pool_emits_derive_macro() {
let body = viewset_template_pool("PostViewSet", "Post", "post_view_set");
assert!(
body.contains("#[derive(ViewSet)]"),
"expected derive macro, got: {body}"
);
assert!(
body.contains("PostViewSet::router"),
"expected `Name::router(...)` mount hint, got: {body}"
);
assert!(
!body.contains("tenant_router"),
"pool template must NOT reference tenant_router, got: {body}"
);
}
#[test]
fn viewset_template_tenant_uses_tenant_router() {
let body = viewset_template_tenant("PostViewSet", "Post", "post_view_set");
assert!(
body.contains("ViewSet::for_model"),
"expected runtime ViewSet::for_model builder, got: {body}"
);
assert!(
body.contains(".tenant_router("),
"expected `.tenant_router(...)` call, got: {body}"
);
assert!(
!body.contains("#[derive(ViewSet)]"),
"tenant template must NOT use the derive macro (pool-coupled), got: {body}"
);
assert!(
body.contains("/api/post_view_set"),
"expected snake-cased path, got: {body}"
);
assert!(
body.contains("pub fn router()"),
"expected `pub fn router()` so api_routes.rs can `.merge(...)`, got: {body}"
);
assert!(
!body.contains("v1 scope"),
"v0.30 unification removed the v1 scope caveat — \
template must reflect full feature parity, got: {body}"
);
for knob in [
".filter_fields(",
".search_fields(",
".ordering(",
".page_size(",
".permissions_for_model::",
] {
assert!(
body.contains(knob),
"expected `{knob}` in tenant template (commented hint), got: {body}"
);
}
}
fn cwd_lock() -> &'static std::sync::Mutex<()> {
static LOCK: std::sync::OnceLock<std::sync::Mutex<()>> = std::sync::OnceLock::new();
LOCK.get_or_init(|| std::sync::Mutex::new(()))
}
#[test]
fn project_uses_tenancy_detects_inline_features_array() {
let _guard = cwd_lock().lock().unwrap_or_else(|p| p.into_inner());
let dir = tempfile::tempdir().unwrap();
let cargo = dir.path().join("Cargo.toml");
std::fs::write(
&cargo,
r#"[package]
name = "demo"
version = "0.1.0"
edition = "2021"
[dependencies]
rustango = { version = "0.30", features = ["tenancy", "manage"] }
"#,
)
.unwrap();
let prev = std::env::current_dir().unwrap();
std::env::set_current_dir(dir.path()).unwrap();
let detected = project_uses_tenancy();
let _ = std::env::set_current_dir(&prev);
assert!(
detected,
"inline-table dep with `tenancy` in features should auto-detect"
);
}
#[test]
fn project_uses_tenancy_false_when_feature_absent() {
let _guard = cwd_lock().lock().unwrap_or_else(|p| p.into_inner());
let dir = tempfile::tempdir().unwrap();
let cargo = dir.path().join("Cargo.toml");
std::fs::write(
&cargo,
r#"[package]
name = "demo"
version = "0.1.0"
edition = "2021"
[dependencies]
rustango = { version = "0.30", features = ["postgres", "manage"] }
"#,
)
.unwrap();
let prev = std::env::current_dir().unwrap();
std::env::set_current_dir(dir.path()).unwrap();
let detected = project_uses_tenancy();
let _ = std::env::set_current_dir(&prev);
assert!(
!detected,
"no tenancy feature → must default to single-tenant scaffold"
);
}
#[test]
fn project_uses_tenancy_false_when_cargo_toml_missing() {
let _guard = cwd_lock().lock().unwrap_or_else(|p| p.into_inner());
let dir = tempfile::tempdir().unwrap();
let prev = std::env::current_dir().unwrap();
std::env::set_current_dir(dir.path()).unwrap();
let detected = project_uses_tenancy();
let _ = std::env::set_current_dir(&prev);
assert!(!detected);
}
#[test]
fn resolve_viewset_tenant_mode_decision_table() {
let cases: &[(bool, bool, bool, bool, bool)] = &[
(false, false, false, false, false),
(false, false, true, true, true),
(true, false, false, true, false),
(true, false, true, true, false),
(false, true, false, false, false),
(false, true, true, false, false),
(true, true, true, false, false),
];
for &(et, ent, pt, want_tenant, want_echo) in cases {
let (tenant, echo) = resolve_viewset_tenant_mode(et, ent, pt);
assert_eq!(
(tenant, echo),
(want_tenant, want_echo),
"case (explicit_tenant={et}, explicit_no_tenant={ent}, project_tenancy={pt})"
);
}
}
#[test]
fn api_routes_template_tenant_emits_no_arg_fn() {
let body = api_routes_template_tenant("regions");
assert!(
body.contains("pub fn api() -> Router<()>"),
"expected no-arg `pub fn api()`, got: {body}"
);
assert!(
body.contains("tenant_router("),
"expected tenant_router hint comment, got: {body}"
);
assert!(
!body.contains("PgPool"),
"tenant template must NOT thread PgPool through api(), got: {body}"
);
}
#[test]
fn api_routes_template_pool_threads_pgpool() {
let body = api_routes_template_pool("blog");
assert!(
body.contains("pub fn api(pool: PgPool) -> Router<()>"),
"expected pool-arg api fn, got: {body}"
);
assert!(
body.contains("use rustango::sql::sqlx::PgPool;"),
"expected PgPool import, got: {body}"
);
}
#[test]
fn is_valid_app_name_snake_case_only() {
assert!(is_valid_app_name("regions"));
assert!(is_valid_app_name("blog_posts"));
assert!(is_valid_app_name("_internal"));
assert!(!is_valid_app_name(""));
assert!(!is_valid_app_name("Regions"));
assert!(!is_valid_app_name("region-app"));
assert!(!is_valid_app_name("9_apps"));
}
fn good_prod_env() -> DeployAuditEnv {
DeployAuditEnv {
rustango_env: Some("prod".into()),
session_secret: Some("a".repeat(48)),
database_url: Some("postgres://app:s3cr3t@db.example.com/app_prod".into()),
apex_domain: Some("app.example.com".into()),
bind: Some("0.0.0.0:8080".into()),
}
}
fn run(env: &DeployAuditEnv) -> DeployAuditFindings {
let mut out = DeployAuditFindings::default();
run_deploy_audit(env, &mut out);
out
}
#[test]
fn deploy_audit_clean_prod_env_has_no_warnings_or_errors() {
let r = run(&good_prod_env());
assert!(
r.errors.is_empty(),
"expected no errors in clean prod env, got: {:?}",
r.errors
);
assert!(
r.warnings.is_empty(),
"expected no warnings in clean prod env, got: {:?}",
r.warnings
);
}
#[test]
fn deploy_audit_unset_session_secret_errors() {
let env = DeployAuditEnv {
session_secret: None,
..good_prod_env()
};
let r = run(&env);
assert!(
r.errors
.iter()
.any(|e| e.contains("RUSTANGO_SESSION_SECRET")),
"expected error for unset RUSTANGO_SESSION_SECRET, got: {:?}",
r.errors
);
}
#[test]
fn deploy_audit_short_session_secret_errors() {
let env = DeployAuditEnv {
session_secret: Some("too-short".into()),
..good_prod_env()
};
let r = run(&env);
assert!(
r.errors
.iter()
.any(|e| e.contains("only") && e.contains("bytes")),
"expected length error for short secret, got: {:?}",
r.errors
);
}
#[test]
fn deploy_audit_placeholder_session_secret_errors() {
let env = DeployAuditEnv {
session_secret: Some("change-me-base64-encoded-32-bytes-or-more".into()),
..good_prod_env()
};
let r = run(&env);
assert!(
r.errors
.iter()
.any(|e| e.contains("placeholder") || e.contains("change-me")),
"expected error for unchanged placeholder secret, got: {:?}",
r.errors
);
}
#[test]
fn deploy_audit_unset_rustango_env_warns() {
let env = DeployAuditEnv {
rustango_env: None,
..good_prod_env()
};
let r = run(&env);
assert!(
r.warnings.iter().any(|w| w.contains("RUSTANGO_ENV")),
"expected warning for unset RUSTANGO_ENV, got: {:?}",
r.warnings
);
}
#[test]
fn deploy_audit_dev_rustango_env_warns() {
let env = DeployAuditEnv {
rustango_env: Some("dev".into()),
..good_prod_env()
};
let r = run(&env);
assert!(
r.warnings.iter().any(|w| w.contains("`dev`")),
"expected warning for non-prod RUSTANGO_ENV, got: {:?}",
r.warnings
);
}
#[test]
fn deploy_audit_unset_database_url_errors() {
let env = DeployAuditEnv {
database_url: None,
..good_prod_env()
};
let r = run(&env);
assert!(
r.errors.iter().any(|e| e.contains("DATABASE_URL")),
"expected error for unset DATABASE_URL, got: {:?}",
r.errors
);
}
#[test]
fn deploy_audit_localhost_database_url_warns() {
let env = DeployAuditEnv {
database_url: Some("postgres://app:p@localhost/db".into()),
..good_prod_env()
};
let r = run(&env);
assert!(
r.warnings
.iter()
.any(|w| w.contains("DATABASE_URL") && w.contains("localhost")),
"expected warning for localhost DATABASE_URL, got: {:?}",
r.warnings
);
}
#[test]
fn deploy_audit_localhost_apex_warns_for_tenancy() {
let env = DeployAuditEnv {
apex_domain: Some("localhost".into()),
..good_prod_env()
};
let r = run(&env);
assert!(
r.warnings
.iter()
.any(|w| w.contains("RUSTANGO_APEX_DOMAIN")),
"expected warning for localhost apex, got: {:?}",
r.warnings
);
}
#[test]
fn deploy_audit_loopback_bind_warns() {
let env = DeployAuditEnv {
bind: Some("127.0.0.1:8080".into()),
..good_prod_env()
};
let r = run(&env);
assert!(
r.warnings
.iter()
.any(|w| w.contains("RUSTANGO_BIND") && w.contains("loopback")),
"expected warning for loopback bind, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
fn settings_run(env_tier: &str, s: &crate::config::Settings) -> DeployAuditFindings {
let mut out = DeployAuditFindings::default();
settings_audit_check(env_tier, s, &mut out);
out
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_dev_tier_is_quiet() {
let mut s = crate::config::Settings::default();
s.security.headers_preset = Some("dev".into());
s.security.hsts_max_age_secs = Some(0);
s.auth.argon2_memory_kib = Some(1024); let r = settings_run("dev", &s);
assert!(
r.warnings.is_empty(),
"dev tier should be quiet, got: {:?}",
r.warnings
);
assert!(
r.info.is_empty(),
"dev tier should be quiet, got: {:?}",
r.info
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_prod_with_dev_headers_preset_warns() {
let mut s = crate::config::Settings::default();
s.security.headers_preset = Some("dev".into());
let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("headers_preset") && w.contains("dev")),
"expected warning for dev headers in prod, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_prod_with_zero_hsts_warns() {
let mut s = crate::config::Settings::default();
s.security.hsts_max_age_secs = Some(0);
let r = settings_run("prod", &s);
assert!(
r.warnings.iter().any(|w| w.contains("hsts_max_age_secs")),
"expected HSTS warning, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_prod_low_argon2_warns_but_unset_is_quiet() {
let mut s = crate::config::Settings::default();
s.auth.argon2_memory_kib = Some(4096); let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("argon2_memory_kib") && w.contains("19456")),
"expected argon2 floor warning, got: {:?}",
r.warnings
);
let s_default = crate::config::Settings::default();
let r = settings_run("prod", &s_default);
assert!(
!r.warnings.iter().any(|w| w.contains("argon2")),
"default argon2 should be quiet, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_prod_long_jwt_access_ttl_warns() {
let mut s = crate::config::Settings::default();
s.auth.jwt.access_ttl_secs = Some(86400); let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("access_ttl_secs") && w.contains("86400")),
"expected access TTL warning, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_prod_loopback_bind_warns() {
let mut s = crate::config::Settings::default();
s.server.bind = Some("127.0.0.1:8080".into());
let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("[server] bind") && w.contains("loopback")),
"expected loopback warning, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_legacy_preset_and_unset_retention_are_info() {
let mut s = crate::config::Settings::default();
s.routes.legacy_preset = Some(true);
let r = settings_run("prod", &s);
assert!(
r.info.iter().any(|i| i.contains("legacy_preset")),
"expected legacy_preset info, got: {:?}",
r.info
);
assert!(
r.info.iter().any(|i| i.contains("retention_days")),
"expected retention_days info, got: {:?}",
r.info
);
assert!(
r.warnings.is_empty(),
"neither should be warnings, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_admin_csrf_insecure_warns_in_prod() {
let mut s = crate::config::Settings::default();
s.admin.csrf_cookie_secure = Some(false);
let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("[admin]") && w.contains("csrf_cookie_secure")),
"expected CSRF secure warning, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_admin_bad_primary_color_warns() {
let mut s = crate::config::Settings::default();
s.admin.primary_color = Some("not-a-color".into());
let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("[admin]") && w.contains("primary_color")),
"expected primary_color warning, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_admin_valid_hex_color_is_quiet() {
for hex in ["#abc", "#2c6fb0", "#2c6fb0ff"] {
let mut s = crate::config::Settings::default();
s.admin.primary_color = Some(hex.into());
let r = settings_run("prod", &s);
assert!(
!r.warnings
.iter()
.any(|w| w.contains("[admin]") && w.contains("primary_color")),
"expected `{hex}` to be quiet, got: {:?}",
r.warnings
);
}
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_admin_unknown_theme_mode_warns() {
let mut s = crate::config::Settings::default();
s.admin.theme_mode = Some("midnight".into());
let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("[admin]") && w.contains("theme_mode")),
"expected theme_mode warning, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_admin_zero_session_timeout_is_info() {
let mut s = crate::config::Settings::default();
s.admin.session_timeout_minutes = Some(0);
let r = settings_run("prod", &s);
assert!(
r.info
.iter()
.any(|i| i.contains("[admin]") && i.contains("session_timeout_minutes")),
"expected session_timeout_minutes info, got: {:?}",
r.info
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_admin_trailing_slash_url_prefix_warns() {
let mut s = crate::config::Settings::default();
s.admin.url_prefix = Some("/admin/".into());
let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("[admin]") && w.contains("url_prefix")),
"expected url_prefix warning, got: {:?}",
r.warnings
);
}
#[cfg(feature = "config")]
#[test]
fn settings_audit_admin_empty_url_prefix_is_quiet() {
let mut s = crate::config::Settings::default();
s.admin.url_prefix = Some("".into());
let r = settings_run("prod", &s);
assert!(
!r.warnings
.iter()
.any(|w| w.contains("[admin]") && w.contains("url_prefix")),
"empty url_prefix should be quiet, got: {:?}",
r.warnings
);
}
#[cfg(all(
feature = "config",
feature = "tenancy",
feature = "postgres",
any(feature = "sqlite", feature = "mysql"),
))]
#[test]
fn settings_audit_sqlite_backend_with_tenancy_warns_in_prod() {
let mut s = crate::config::Settings::default();
s.database.backend = Some("sqlite".into());
let r = settings_run("prod", &s);
assert!(
r.warnings
.iter()
.any(|w| w.contains("[database]") && w.contains("tenancy")),
"expected backend × tenancy warning, got: {:?}",
r.warnings
);
}
}
#[cfg(test)]
mod db_cmd_tests {
use super::*;
fn args(s: &[&str]) -> Vec<String> {
s.iter().map(|x| (*x).to_owned()).collect()
}
#[test]
fn dump_no_flags_defaults() {
let p = parse_db_dump_args(&[]).unwrap();
assert!(p.out.is_none());
assert!(!p.data_only);
assert!(!p.schema_only);
assert!(!p.no_owner);
}
#[test]
fn dump_out_flag_with_value() {
let p = parse_db_dump_args(&args(&["--out", "/tmp/db.sql"])).unwrap();
assert_eq!(p.out.as_deref(), Some("/tmp/db.sql"));
}
#[test]
fn dump_short_o_flag() {
let p = parse_db_dump_args(&args(&["-o", "/tmp/db.sql"])).unwrap();
assert_eq!(p.out.as_deref(), Some("/tmp/db.sql"));
}
#[test]
fn dump_data_only_flag() {
let p = parse_db_dump_args(&args(&["--data-only"])).unwrap();
assert!(p.data_only);
assert!(!p.schema_only);
}
#[test]
fn dump_schema_only_flag() {
let p = parse_db_dump_args(&args(&["--schema-only"])).unwrap();
assert!(p.schema_only);
assert!(!p.data_only);
}
#[test]
fn dump_no_owner_flag() {
let p = parse_db_dump_args(&args(&["--no-owner"])).unwrap();
assert!(p.no_owner);
}
#[test]
fn dump_out_without_value_errors() {
let r = parse_db_dump_args(&args(&["--out"]));
assert!(r.is_err());
}
#[test]
fn dump_data_and_schema_only_conflict() {
let r = parse_db_dump_args(&args(&["--data-only", "--schema-only"]));
assert!(r.is_err());
}
#[test]
fn dump_unknown_flag_errors() {
let r = parse_db_dump_args(&args(&["--bogus"]));
assert!(r.is_err());
}
#[test]
fn dump_argv_contains_url_first() {
let parsed = DbDumpArgs {
out: None,
data_only: false,
schema_only: false,
no_owner: false,
};
let argv = build_pg_dump_argv(&parsed, "postgres://u:p@h/db");
assert_eq!(argv[0], "postgres://u:p@h/db");
}
#[test]
fn dump_argv_includes_chosen_flags() {
let parsed = DbDumpArgs {
out: Some("/tmp/x.sql".into()),
data_only: true,
schema_only: false,
no_owner: true,
};
let argv = build_pg_dump_argv(&parsed, "postgres://u:p@h/db");
assert!(argv.contains(&"--data-only".to_owned()));
assert!(argv.contains(&"--no-owner".to_owned()));
assert!(argv.contains(&"--file".to_owned()));
assert!(argv.contains(&"/tmp/x.sql".to_owned()));
assert!(!argv.contains(&"--schema-only".to_owned()));
}
#[test]
fn restore_requires_file() {
let r = parse_db_restore_args(&[]);
assert!(r.is_err());
}
#[test]
fn restore_positional_file() {
let p = parse_db_restore_args(&args(&["/tmp/db.sql"])).unwrap();
assert_eq!(p.file, "/tmp/db.sql");
assert!(!p.clean);
}
#[test]
fn restore_with_clean_flag() {
let p = parse_db_restore_args(&args(&["--clean", "/tmp/db.sql"])).unwrap();
assert!(p.clean);
assert_eq!(p.file, "/tmp/db.sql");
}
#[test]
fn restore_clean_after_file() {
let p = parse_db_restore_args(&args(&["/tmp/db.sql", "--clean"])).unwrap();
assert!(p.clean);
}
#[test]
fn restore_two_files_errors() {
let r = parse_db_restore_args(&args(&["a.sql", "b.sql"]));
assert!(r.is_err());
}
#[test]
fn restore_argv_includes_on_error_stop() {
let parsed = DbRestoreArgs {
file: "/tmp/x.sql".into(),
clean: false,
};
let argv = build_psql_argv(&parsed, "postgres://u:p@h/db");
assert!(argv.contains(&"ON_ERROR_STOP=1".to_owned()));
assert!(argv.contains(&"-f".to_owned()));
assert!(argv.contains(&"/tmp/x.sql".to_owned()));
assert!(!argv.iter().any(|a| a.contains("DROP SCHEMA")));
}
#[test]
fn restore_argv_with_clean_drops_schema() {
let parsed = DbRestoreArgs {
file: "/tmp/x.sql".into(),
clean: true,
};
let argv = build_psql_argv(&parsed, "postgres://u:p@h/db");
assert!(argv.iter().any(|a| a.contains("DROP SCHEMA")));
assert!(argv.iter().any(|a| a.contains("CREATE SCHEMA")));
}
#[test]
fn redact_masks_password_in_postgres_url() {
assert_eq!(
redact_url("postgres://alice:supersecret@localhost:5432/mydb"),
"postgres://alice:***@localhost:5432/mydb"
);
}
#[test]
fn redact_passes_through_url_without_credentials() {
assert_eq!(
redact_url("postgres://localhost:5432/mydb"),
"postgres://localhost:5432/mydb"
);
}
#[test]
fn redact_passes_through_non_urls() {
assert_eq!(redact_url("--data-only"), "--data-only");
assert_eq!(redact_url("/tmp/db.sql"), "/tmp/db.sql");
}
#[test]
fn redact_handles_url_with_only_user() {
assert_eq!(
redact_url("postgres://alice@localhost/db"),
"postgres://alice@localhost/db"
);
}
}