use std::path::Path;
use super::diff::{detect_changes, detect_unsupported_field_changes, SchemaChange};
use super::error::MigrateError;
use super::file::{self, extract_index, Migration, Operation};
use super::snapshot::SchemaSnapshot;
pub fn make_migrations(
dir: &Path,
name_override: Option<&str>,
) -> Result<Option<Migration>, MigrateError> {
let current = SchemaSnapshot::from_registry();
make_migrations_from(dir, ¤t, name_override)
}
pub fn make_migrations_for_scope(
dir: &Path,
scope: crate::core::ModelScope,
name_override: Option<&str>,
) -> Result<Option<Migration>, MigrateError> {
let current = SchemaSnapshot::from_registry_for_scope(scope);
let migration_scope = match scope {
crate::core::ModelScope::Registry => super::MigrationScope::Registry,
crate::core::ModelScope::Tenant => super::MigrationScope::Tenant,
};
make_migrations_scoped(dir, ¤t, scope, migration_scope, name_override)
}
pub fn make_migrations_for_app(
project_root: &Path,
app: &str,
name_override: Option<&str>,
) -> Result<Option<Migration>, MigrateError> {
let app_dir = project_root.join(app).join("migrations");
if !app_dir.exists() {
std::fs::create_dir_all(&app_dir)?;
}
let current = SchemaSnapshot::from_registry_for_app(app);
make_migrations_from(&app_dir, ¤t, name_override)
}
pub fn make_migrations_scoped(
dir: &Path,
current: &SchemaSnapshot,
model_scope: crate::core::ModelScope,
migration_scope: super::MigrationScope,
name_override: Option<&str>,
) -> Result<Option<Migration>, MigrateError> {
let prior = file::list_dir(dir)?;
let prior_scoped: Vec<&Migration> = prior
.iter()
.filter(|m| m.scope == migration_scope)
.collect();
let mut prev_snapshot = prior_scoped
.last()
.map_or_else(empty_snapshot, |m| m.snapshot.clone());
let in_chain = chain_membership(&prior_scoped);
for m in &prior_scoped {
if !in_chain.contains(m.name.as_str()) {
fold_in_missing_tables(&mut prev_snapshot, &m.snapshot);
}
}
fold_in_framework_tables(&mut prev_snapshot, current);
let prev_snapshot = prev_snapshot.filtered_to_scope(model_scope);
let prev_name = prior_scoped.last().map(|m| m.name.clone());
let next_index = prior
.last()
.and_then(|m| extract_index(&m.name))
.map_or(1, |n| n + 1);
let unsupported = detect_unsupported_field_changes(&prev_snapshot, current);
if !unsupported.is_empty() {
return Err(MigrateError::Validation(format!(
"field metadata changed but v0.3 has no AlterField operation \
(deferred to v0.4); the following changes need explicit migration \
authoring:\n - {}",
unsupported.join("\n - "),
)));
}
let changes = detect_changes(&prev_snapshot, current);
if changes.is_empty() {
return Ok(None);
}
let suffix = name_override.map_or_else(
|| auto_name(&changes, prior_scoped.is_empty()),
str::to_owned,
);
let name = format!("{next_index:04}_{suffix}");
let created_at = chrono::Utc::now().to_rfc3339();
let mig = Migration {
name: name.clone(),
created_at,
prev: prev_name,
atomic: true,
scope: migration_scope,
snapshot: current.clone(),
forward: changes.into_iter().map(Operation::Schema).collect(),
};
if !dir.exists() {
std::fs::create_dir_all(dir)?;
}
let path = dir.join(format!("{name}.json"));
file::write(&path, &mig)?;
Ok(Some(mig))
}
pub fn make_migrations_from(
dir: &Path,
current: &SchemaSnapshot,
name_override: Option<&str>,
) -> Result<Option<Migration>, MigrateError> {
let prior = file::list_dir(dir)?;
let prev_snapshot = prior
.last()
.map_or_else(empty_snapshot, |m| m.snapshot.clone());
let prev_name = prior.last().map(|m| m.name.clone());
let next_index = prior
.last()
.and_then(|m| extract_index(&m.name))
.map_or(1, |n| n + 1);
let unsupported = detect_unsupported_field_changes(&prev_snapshot, current);
if !unsupported.is_empty() {
return Err(MigrateError::Validation(format!(
"field metadata changed but v0.3 has no AlterField operation \
(deferred to v0.4); the following changes need explicit migration \
authoring:\n - {}",
unsupported.join("\n - "),
)));
}
let changes = detect_changes(&prev_snapshot, current);
if changes.is_empty() {
return Ok(None);
}
let suffix = name_override.map_or_else(|| auto_name(&changes, prior.is_empty()), str::to_owned);
let name = format!("{next_index:04}_{suffix}");
let created_at = chrono::Utc::now().to_rfc3339();
let mig = Migration {
name: name.clone(),
created_at,
prev: prev_name,
atomic: true,
scope: super::MigrationScope::default(),
snapshot: current.clone(),
forward: changes.into_iter().map(Operation::Schema).collect(),
};
if !dir.exists() {
std::fs::create_dir_all(dir)?;
}
let path = dir.join(format!("{name}.json"));
file::write(&path, &mig)?;
Ok(Some(mig))
}
fn chain_membership(prior_scoped: &[&Migration]) -> std::collections::HashSet<String> {
let mut seen = std::collections::HashSet::new();
let Some(last) = prior_scoped.last() else {
return seen;
};
let mut cur: Option<&str> = Some(last.name.as_str());
while let Some(name) = cur {
if !seen.insert(name.to_owned()) {
break;
}
cur = prior_scoped
.iter()
.find(|m| m.name == name)
.and_then(|m| m.prev.as_deref());
}
seen
}
fn fold_in_framework_tables(into: &mut SchemaSnapshot, current: &SchemaSnapshot) {
for t in ¤t.tables {
if t.name.starts_with("rustango_") && !into.tables.iter().any(|x| x.name == t.name) {
into.tables.push(t.clone());
}
}
for m2m in ¤t.m2m_tables {
if m2m.through.starts_with("rustango_")
&& !into.m2m_tables.iter().any(|x| x.through == m2m.through)
{
into.m2m_tables.push(m2m.clone());
}
}
for idx in ¤t.indexes {
if idx.table.starts_with("rustango_") && !into.indexes.iter().any(|x| x.name == idx.name) {
into.indexes.push(idx.clone());
}
}
for c in ¤t.checks {
if c.table.starts_with("rustango_") && !into.checks.iter().any(|x| x.name == c.name) {
into.checks.push(c.clone());
}
}
}
fn fold_in_missing_tables(into: &mut SchemaSnapshot, from: &SchemaSnapshot) {
for t in &from.tables {
if !into.tables.iter().any(|x| x.name == t.name) {
into.tables.push(t.clone());
}
}
for m2m in &from.m2m_tables {
if !into.m2m_tables.iter().any(|x| x.through == m2m.through) {
into.m2m_tables.push(m2m.clone());
}
}
for idx in &from.indexes {
if !into.indexes.iter().any(|x| x.name == idx.name) {
into.indexes.push(idx.clone());
}
}
for c in &from.checks {
if !into.checks.iter().any(|x| x.name == c.name) {
into.checks.push(c.clone());
}
}
}
fn empty_snapshot() -> SchemaSnapshot {
SchemaSnapshot {
tables: vec![],
m2m_tables: vec![],
indexes: vec![],
checks: vec![],
}
}
fn auto_name(changes: &[SchemaChange], is_first: bool) -> String {
match changes {
[SchemaChange::CreateTable(t)] => {
if is_first {
"initial".into()
} else {
format!("create_{t}")
}
}
[SchemaChange::DropTable(t)] => format!("drop_{t}"),
[SchemaChange::AddColumn { table, column }] => format!("add_{column}_to_{table}"),
[SchemaChange::DropColumn { table, column }] => format!("drop_{column}_from_{table}"),
[SchemaChange::AlterColumnType {
table,
column,
from,
to,
}] => format!("alter_{column}_on_{table}_{from}_to_{to}"),
[SchemaChange::AlterColumnNullable {
table,
column,
nullable,
}] => {
if *nullable {
format!("make_{column}_on_{table}_nullable")
} else {
format!("make_{column}_on_{table}_not_null")
}
}
[SchemaChange::AlterColumnDefault { table, column, .. }] => {
format!("alter_default_of_{column}_on_{table}")
}
[SchemaChange::AlterColumnMaxLength { table, column, .. }] => {
format!("alter_max_length_of_{column}_on_{table}")
}
[SchemaChange::RenameTable { old_name, new_name }] => {
format!("rename_{old_name}_to_{new_name}")
}
[SchemaChange::RenameColumn {
table,
old_column,
new_column,
}] => format!("rename_{old_column}_to_{new_column}_on_{table}"),
[SchemaChange::CreateIndex { name, .. }] => format!("create_index_{name}"),
[SchemaChange::DropIndex { name }] => format!("drop_index_{name}"),
[SchemaChange::AddCheckConstraint { name, .. }] => format!("add_check_{name}"),
[SchemaChange::DropCheckConstraint { name, .. }] => format!("drop_check_{name}"),
[SchemaChange::CreateM2MTable { through, .. }] => format!("create_m2m_{through}"),
[SchemaChange::DropM2MTable { through }] => format!("drop_m2m_{through}"),
many if is_first
&& many
.iter()
.all(|c| matches!(c, SchemaChange::CreateTable(_))) =>
{
"initial".into()
}
many if many.iter().all(|c| {
matches!(
c,
SchemaChange::CreateTable(_)
| SchemaChange::CreateIndex { .. }
| SchemaChange::CreateM2MTable { .. }
)
}) =>
{
let mut tables: Vec<&str> = many
.iter()
.filter_map(|c| match c {
SchemaChange::CreateTable(t) => Some(t.as_str()),
_ => None,
})
.collect();
tables.sort_unstable();
tables.dedup();
if tables.is_empty() {
"auto".into()
} else if tables.len() <= 3 {
format!("create_{}", tables.join("_and_"))
} else {
format!("create_{}_etc", tables[..3].join("_and_"))
}
}
_ => "auto".into(),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn auto_name_initial_for_first_migration_with_create_tables() {
let changes = vec![
SchemaChange::CreateTable("a".into()),
SchemaChange::CreateTable("b".into()),
];
assert_eq!(auto_name(&changes, true), "initial");
}
#[test]
fn auto_name_single_create_table_after_initial() {
let changes = vec![SchemaChange::CreateTable("foo".into())];
assert_eq!(auto_name(&changes, false), "create_foo");
}
#[test]
fn auto_name_single_drop_table() {
let changes = vec![SchemaChange::DropTable("ghost".into())];
assert_eq!(auto_name(&changes, false), "drop_ghost");
}
#[test]
fn auto_name_add_column() {
let changes = vec![SchemaChange::AddColumn {
table: "article".into(),
column: "slug".into(),
}];
assert_eq!(auto_name(&changes, false), "add_slug_to_article");
}
#[test]
fn auto_name_drop_column() {
let changes = vec![SchemaChange::DropColumn {
table: "article".into(),
column: "deprecated".into(),
}];
assert_eq!(auto_name(&changes, false), "drop_deprecated_from_article");
}
#[test]
fn auto_name_mixed_falls_back_to_auto() {
let changes = vec![
SchemaChange::CreateTable("foo".into()),
SchemaChange::AddColumn {
table: "bar".into(),
column: "baz".into(),
},
];
assert_eq!(auto_name(&changes, false), "auto");
}
use crate::core::ModelScope;
use crate::migrate::snapshot::{FieldSnapshot, SchemaSnapshot, TableSnapshot};
use crate::migrate::MigrationScope;
fn snap_with(tables: Vec<TableSnapshot>) -> SchemaSnapshot {
SchemaSnapshot {
tables,
m2m_tables: vec![],
indexes: vec![],
checks: vec![],
}
}
fn t(name: &str) -> TableSnapshot {
TableSnapshot {
name: name.into(),
model: name.into(),
fields: vec![FieldSnapshot {
name: "id".into(),
column: "id".into(),
ty: "i64".into(),
nullable: false,
primary_key: true,
max_length: None,
min: None,
max: None,
default: None,
auto: true,
unique: false,
fk: None,
}],
composite_fks: vec![],
}
}
#[test]
fn make_migrations_scoped_with_no_changes_returns_none() {
let dir = tempdir();
let snap = snap_with(vec![t("rustango_users")]);
let prior = Migration {
name: "0001_initial".into(),
created_at: "2026-01-01T00:00:00Z".into(),
prev: None,
atomic: true,
scope: MigrationScope::Tenant,
snapshot: snap.clone(),
forward: vec![],
};
std::fs::write(
dir.join("0001_initial.json"),
serde_json::to_string(&prior).unwrap(),
)
.unwrap();
let r = make_migrations_scoped(
&dir,
&snap,
ModelScope::Tenant,
MigrationScope::Tenant,
None,
)
.unwrap();
assert!(r.is_none(), "no changes should yield no file");
let _ = std::fs::remove_dir_all(&dir);
}
#[test]
fn make_migrations_scoped_emits_with_correct_migration_scope() {
let dir = tempdir();
let snap = snap_with(vec![t("posts")]);
let mig = make_migrations_scoped(
&dir,
&snap,
ModelScope::Tenant,
MigrationScope::Tenant,
None,
)
.unwrap()
.expect("expected a migration file");
assert_eq!(mig.scope, MigrationScope::Tenant);
assert!(mig.name.starts_with("0001_"), "got: {}", mig.name);
let _ = std::fs::remove_dir_all(&dir);
}
#[test]
fn make_migrations_scoped_filters_prev_to_scope_for_old_bootstrap_layout() {
let dir = tempdir();
let prev = Migration {
name: "0001_initial".into(),
created_at: "2026-01-01T00:00:00Z".into(),
prev: None,
atomic: true,
scope: MigrationScope::Tenant,
snapshot: snap_with(vec![t("rustango_users")]),
forward: vec![],
};
let path = dir.join("0001_initial.json");
std::fs::write(&path, serde_json::to_string(&prev).unwrap()).unwrap();
let current = snap_with(vec![t("posts"), t("rustango_users")]);
let mig = make_migrations_scoped(
&dir,
¤t,
ModelScope::Tenant,
MigrationScope::Tenant,
None,
)
.unwrap()
.expect("expected a migration");
assert_eq!(mig.scope, MigrationScope::Tenant);
assert_eq!(mig.forward.len(), 1, "got: {:?}", mig.forward);
let _ = std::fs::remove_dir_all(&dir);
}
#[test]
fn make_migrations_scoped_indexes_walk_full_dir_not_just_scope() {
let dir = tempdir();
let r = Migration {
name: "0001_registry_initial".into(),
created_at: "2026-01-01T00:00:00Z".into(),
prev: None,
atomic: true,
scope: MigrationScope::Registry,
snapshot: snap_with(vec![t("rustango_orgs")]),
forward: vec![],
};
let t1 = Migration {
name: "0002_initial".into(),
created_at: "2026-01-02T00:00:00Z".into(),
prev: None,
atomic: true,
scope: MigrationScope::Tenant,
snapshot: snap_with(vec![t("rustango_users")]),
forward: vec![],
};
std::fs::write(
dir.join("0001_registry_initial.json"),
serde_json::to_string(&r).unwrap(),
)
.unwrap();
std::fs::write(
dir.join("0002_initial.json"),
serde_json::to_string(&t1).unwrap(),
)
.unwrap();
let current = snap_with(vec![t("posts"), t("rustango_users")]);
let mig = make_migrations_scoped(
&dir,
¤t,
ModelScope::Tenant,
MigrationScope::Tenant,
None,
)
.unwrap()
.expect("expected a migration");
assert!(
mig.name.starts_with("0003_"),
"next migration must be 0003 to avoid collision with 0002, got: {}",
mig.name
);
let _ = std::fs::remove_dir_all(&dir);
}
fn tempdir() -> std::path::PathBuf {
use std::sync::atomic::{AtomicU64, Ordering};
static N: AtomicU64 = AtomicU64::new(0);
let n = N.fetch_add(1, Ordering::SeqCst);
let pid = std::process::id();
let mut p = std::env::temp_dir();
p.push(format!("rustango_make_scope_test_{pid}_{n}"));
let _ = std::fs::remove_dir_all(&p);
std::fs::create_dir_all(&p).unwrap();
p
}
}