#![cfg(feature = "failpoints")]
mod helpers;
use fail::FailScenario;
use futures::FutureExt;
use omnigraph::db::Omnigraph;
use omnigraph::failpoints::ScopedFailPoint;
use helpers::recovery::{
FollowUpMutation, RecoveryExpectation, TableExpectation, assert_post_recovery_invariants,
branch_head_commit_id, single_sidecar_operation_id,
};
use helpers::{MUTATION_QUERIES, mixed_params, mutate_main, version_main};
const SCHEMA_V1: &str = "node Person { name: String @key }\n";
const SCHEMA_V2_ADDED_TYPE: &str =
"node Person { name: String @key }\nnode Company { name: String @key }\n";
fn node_table_uri(root: &str, type_name: &str) -> String {
let mut hash: u64 = 0xcbf2_9ce4_8422_2325;
for &b in type_name.as_bytes() {
hash ^= b as u64;
hash = hash.wrapping_mul(0x100_0000_01b3);
}
format!("{}/nodes/{hash:016x}", root.trim_end_matches('/'))
}
#[tokio::test]
async fn branch_create_failpoint_triggers() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap();
let db = Omnigraph::init(uri, helpers::TEST_SCHEMA).await.unwrap();
let _failpoint = ScopedFailPoint::new("branch_create.after_manifest_branch_create", "return");
let err = db.branch_create("feature").await.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: branch_create.after_manifest_branch_create")
);
}
#[tokio::test(flavor = "multi_thread")]
async fn graph_publish_failpoint_triggers_before_commit_append() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let mut db = Omnigraph::init(dir.path().to_str().unwrap(), helpers::TEST_SCHEMA)
.await
.unwrap();
let _failpoint = ScopedFailPoint::new("graph_publish.before_commit_append", "return");
let err = mutate_main(
&mut db,
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Eve")], &[("$age", 22)]),
)
.await
.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: graph_publish.before_commit_append")
);
}
#[tokio::test]
async fn schema_apply_pre_commit_crash_rolls_forward_via_sidecar() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
{
let db = Omnigraph::init(&uri, SCHEMA_V1).await.unwrap();
let _failpoint = ScopedFailPoint::new("schema_apply.after_staging_write", "return");
let err = db.apply_schema(SCHEMA_V2_ADDED_TYPE).await.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: schema_apply.after_staging_write"),
"got: {}",
err
);
}
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(
db.schema_source().as_str(),
SCHEMA_V2_ADDED_TYPE,
"live schema must reflect the rolled-forward apply (Company added)"
);
assert_no_staging_files(dir.path());
let company_rows = helpers::count_rows(&db, "node:Company").await;
assert_eq!(
company_rows, 0,
"node:Company must have a manifest entry post-recovery"
);
}
#[tokio::test]
async fn schema_apply_recovers_post_commit_crash() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
{
let db = Omnigraph::init(&uri, SCHEMA_V1).await.unwrap();
let _failpoint = ScopedFailPoint::new("schema_apply.after_manifest_commit", "return");
let err = db.apply_schema(SCHEMA_V2_ADDED_TYPE).await.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: schema_apply.after_manifest_commit"),
"got: {}",
err
);
}
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(db.schema_source().as_str(), SCHEMA_V2_ADDED_TYPE);
assert_no_staging_files(dir.path());
}
#[tokio::test]
async fn schema_apply_recovers_partial_rename() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
{
let db = Omnigraph::init(&uri, SCHEMA_V1).await.unwrap();
db.apply_schema(SCHEMA_V2_ADDED_TYPE).await.unwrap();
}
std::fs::copy(
dir.path().join("_schema.ir.json"),
dir.path().join("_schema.ir.json.staging"),
)
.unwrap();
std::fs::copy(
dir.path().join("__schema_state.json"),
dir.path().join("__schema_state.json.staging"),
)
.unwrap();
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(db.schema_source().as_str(), SCHEMA_V2_ADDED_TYPE);
assert_no_staging_files(dir.path());
}
#[tokio::test]
async fn recovery_rolls_forward_after_finalize_publisher_failure() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let operation_id;
{
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
let _failpoint = ScopedFailPoint::new("mutation.post_finalize_pre_publisher", "return");
let err = mutate_main(
&mut db,
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Eve")], &[("$age", 22)]),
)
.await
.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: mutation.post_finalize_pre_publisher"),
"unexpected error: {err}"
);
let recovery_dir = dir.path().join("__recovery");
let sidecars: Vec<_> = std::fs::read_dir(&recovery_dir)
.unwrap()
.filter_map(|e| e.ok())
.collect();
assert_eq!(
sidecars.len(),
1,
"exactly one sidecar should persist after the finalize failure"
);
operation_id = single_sidecar_operation_id(dir.path());
}
let db = Omnigraph::open(&uri).await.unwrap();
let person_count = helpers::count_rows(&db, "node:Person").await;
assert_eq!(
person_count, 1,
"exactly one person (Eve) must be visible after roll-forward"
);
drop(db);
assert_post_recovery_invariants(
dir.path(),
&operation_id,
RecoveryExpectation::RolledForward {
tables: vec![TableExpectation::main("node:Person").follow_up_mutation(
FollowUpMutation::new(
"main",
MUTATION_QUERIES,
"insert_person",
mixed_params(&[("$name", "Frank")], &[("$age", 33)]),
),
)],
},
)
.await
.unwrap();
let db = Omnigraph::open(&uri).await.unwrap();
let person_count = helpers::count_rows(&db, "node:Person").await;
assert_eq!(
person_count, 2,
"Frank's insert must land normally after recovery"
);
}
#[tokio::test]
async fn inline_delete_conflict_writes_sidecar_before_rejecting() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let db = helpers::init_and_load(&dir).await;
let pre_snapshot = db
.snapshot_of(omnigraph::db::ReadTarget::branch("main"))
.await
.unwrap();
let pre_person_pin = pre_snapshot.entry("node:Person").unwrap().table_version;
let person_uri = node_table_uri(&uri, "Person");
{
let _pause_delete = ScopedFailPoint::new("mutation.delete_node_pre_primary_delete", "pause");
let delete_params = helpers::params(&[("$name", "Alice")]);
let delete = db.mutate(
"main",
MUTATION_QUERIES,
"remove_person",
&delete_params,
);
tokio::pin!(delete);
let mut concurrent_update_succeeded = false;
for _ in 0..50 {
if delete.as_mut().now_or_never().is_some() {
panic!("delete mutation completed before primary-delete failpoint was released");
}
let mut concurrent = Omnigraph::open_read_only(&uri).await.unwrap();
if mutate_main(
&mut concurrent,
MUTATION_QUERIES,
"set_age",
&mixed_params(&[("$name", "Bob")], &[("$age", 26)]),
)
.await
.is_ok()
{
concurrent_update_succeeded = true;
break;
}
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
}
assert!(concurrent_update_succeeded, "concurrent update must land while delete is paused");
fail::remove("mutation.delete_node_pre_primary_delete");
let err = delete.await.unwrap_err();
assert!(
err.to_string().contains("stale view of 'node:Person'")
|| err.to_string().contains("ExpectedVersionMismatch")
|| err.to_string().contains("expected version mismatch"),
"unexpected error: {err}",
);
}
let person_head = lance::Dataset::open(&person_uri)
.await
.unwrap()
.version()
.version;
assert!(
person_head > pre_person_pin,
"primary inline delete must have advanced node:Person before rejecting"
);
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(
helpers::count_rows(&db, "node:Person").await,
4,
"manifest-conflicted delete must not remove net Person rows after recovery"
);
assert_eq!(
helpers::count_rows(&db, "edge:Knows").await,
3,
"manifest-conflicted delete must not remove net Knows rows after recovery"
);
}
#[tokio::test]
async fn recovery_rolls_forward_load_on_feature_branch() {
use omnigraph::loader::LoadMode;
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let operation_id;
let main_person_pin;
let feature_parent_commit_id;
{
let db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
db.branch_create("feature").await.unwrap();
db.mutate(
"feature",
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "BeforeLoad")], &[("$age", 40)]),
)
.await
.unwrap();
main_person_pin = db
.snapshot_of(omnigraph::db::ReadTarget::branch("main"))
.await
.unwrap()
.entry("node:Person")
.expect("main must have Person")
.table_version;
feature_parent_commit_id = branch_head_commit_id(dir.path(), "feature").await.unwrap();
let _failpoint = ScopedFailPoint::new("mutation.post_finalize_pre_publisher", "return");
let err = db
.load(
"feature",
r#"{"type":"Person","data":{"name":"FeatureLoad","age":41}}
"#,
LoadMode::Append,
)
.await
.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: mutation.post_finalize_pre_publisher"),
"unexpected error: {err}"
);
operation_id = single_sidecar_operation_id(dir.path());
}
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(
helpers::count_rows_branch(&db, "feature", "node:Person").await,
2,
"feature branch load row must be visible after recovery"
);
assert_eq!(
helpers::count_rows(&db, "node:Person").await,
0,
"feature branch load recovery must not publish the row to main"
);
drop(db);
assert_post_recovery_invariants(
dir.path(),
&operation_id,
RecoveryExpectation::RolledForward {
tables: vec![
TableExpectation::branch("node:Person", "feature")
.expected_main_manifest_pin(main_person_pin)
.expected_recovery_parent_commit_id(feature_parent_commit_id)
.follow_up_mutation(FollowUpMutation::new(
"feature",
MUTATION_QUERIES,
"insert_person",
mixed_params(&[("$name", "AfterLoad")], &[("$age", 42)]),
)),
],
},
)
.await
.unwrap();
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(
helpers::count_rows_branch(&db, "feature", "node:Person").await,
3,
"follow-up feature mutation must succeed after load recovery"
);
assert_eq!(
helpers::count_rows(&db, "node:Person").await,
0,
"follow-up feature mutation must not move main"
);
}
#[tokio::test]
async fn recovery_rolls_forward_ensure_indices_on_feature_branch() {
use lance_index::DatasetIndexExt;
use omnigraph::loader::{LoadMode, load_jsonl};
use omnigraph::table_store::TableStore;
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let operation_id;
let feature_parent_commit_id;
let main_person_pin;
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
"#,
LoadMode::Append,
)
.await
.unwrap();
db.branch_create("feature").await.unwrap();
db.mutate(
"feature",
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "BeforeEnsure")], &[("$age", 42)]),
)
.await
.unwrap();
main_person_pin = db
.snapshot_of(omnigraph::db::ReadTarget::branch("main"))
.await
.unwrap()
.entry("node:Person")
.expect("main must have Person")
.table_version;
let person_uri = node_table_uri(&uri, "Person");
let store = TableStore::new(&uri);
let mut ds = store
.open_dataset_head(&person_uri, Some("feature"))
.await
.unwrap();
ds.drop_index("id_idx").await.unwrap();
let dropped_index_head = ds.version().version;
db.failpoint_publish_table_head_without_index_rebuild_for_test(
"feature",
"node:Person",
Some("feature"),
)
.await
.unwrap();
let feature_snapshot = db
.snapshot_of(omnigraph::db::ReadTarget::branch("feature"))
.await
.unwrap();
assert_eq!(
feature_snapshot
.entry("node:Person")
.expect("feature must have Person")
.table_version,
dropped_index_head,
"test setup must publish the dropped-index table head before ensure_indices runs",
);
feature_parent_commit_id = branch_head_commit_id(dir.path(), "feature").await.unwrap();
{
let _failpoint =
ScopedFailPoint::new("ensure_indices.post_phase_b_pre_manifest_commit", "return");
let err = db.ensure_indices_on("feature").await.unwrap_err();
assert!(
err.to_string().contains(
"injected failpoint triggered: ensure_indices.post_phase_b_pre_manifest_commit"
),
"unexpected error: {err}"
);
operation_id = single_sidecar_operation_id(dir.path());
}
drop(db);
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(
helpers::count_rows_branch(&db, "feature", "node:Person").await,
2,
"feature should see inherited alice plus recovered branch-local row"
);
assert_eq!(
helpers::count_rows(&db, "node:Person").await,
1,
"ensure_indices branch recovery must not move main"
);
drop(db);
assert_post_recovery_invariants(
dir.path(),
&operation_id,
RecoveryExpectation::RolledForward {
tables: vec![
TableExpectation::branch("node:Person", "feature")
.expected_main_manifest_pin(main_person_pin)
.expected_recovery_parent_commit_id(feature_parent_commit_id)
.follow_up_mutation(FollowUpMutation::new(
"feature",
MUTATION_QUERIES,
"insert_person",
mixed_params(&[("$name", "AfterEnsure")], &[("$age", 44)]),
)),
],
},
)
.await
.unwrap();
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(
helpers::count_rows_branch(&db, "feature", "node:Person").await,
3,
"follow-up feature mutation must succeed after ensure_indices recovery"
);
assert_eq!(
helpers::count_rows(&db, "node:Person").await,
1,
"follow-up feature mutation must not move main"
);
}
#[tokio::test]
async fn refresh_runs_roll_forward_recovery_in_process() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
{
let _failpoint = ScopedFailPoint::new("mutation.post_finalize_pre_publisher", "return");
let err = mutate_main(
&mut db,
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Eve")], &[("$age", 22)]),
)
.await
.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: mutation.post_finalize_pre_publisher"),
"unexpected error: {err}"
);
let recovery_dir = dir.path().join("__recovery");
assert_eq!(
std::fs::read_dir(&recovery_dir).unwrap().count(),
1,
"exactly one sidecar must persist after the finalize failure"
);
}
db.refresh().await.expect("refresh must succeed");
let recovery_dir = dir.path().join("__recovery");
if recovery_dir.exists() {
let remaining: Vec<_> = std::fs::read_dir(&recovery_dir)
.unwrap()
.filter_map(|e| e.ok())
.collect();
assert!(
remaining.is_empty(),
"sidecar must be deleted by refresh-time roll-forward; remaining: {:?}",
remaining,
);
}
let person_count = helpers::count_rows(&db, "node:Person").await;
assert_eq!(
person_count, 1,
"Eve must be visible after refresh-time roll-forward"
);
mutate_main(
&mut db,
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Frank")], &[("$age", 33)]),
)
.await
.expect("Person insert must succeed after refresh-time recovery");
assert_eq!(helpers::count_rows(&db, "node:Person").await, 2);
}
#[tokio::test]
async fn refresh_defers_rollback_eligible_sidecar_to_next_open() {
use omnigraph::loader::{LoadMode, load_jsonl};
use omnigraph::table_store::TableStore;
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
"#,
LoadMode::Append,
)
.await
.unwrap();
let snapshot = db
.snapshot_of(omnigraph::db::ReadTarget::branch("main"))
.await
.unwrap();
let entry = snapshot.entry("node:Person").unwrap();
let person_uri = format!("{}/{}", uri.trim_end_matches('/'), entry.table_path);
let manifest_pin = entry.table_version;
let store = TableStore::new(&uri);
let mut ds = lance::Dataset::open(&person_uri).await.unwrap();
store
.delete_where(&person_uri, &mut ds, "1 = 2")
.await
.unwrap();
let head_after_drift = ds.version().version;
assert_eq!(head_after_drift, manifest_pin + 1);
let bogus_expected = manifest_pin - 1;
let bogus_post = head_after_drift;
let sidecar_json = format!(
r#"{{
"schema_version": 1,
"operation_id": "01H0000000000000000000RBCK",
"started_at": "0",
"branch": null,
"actor_id": "act-rollback",
"writer_kind": "Mutation",
"tables": [
{{
"table_key":"node:Person",
"table_path":"{}",
"expected_version":{},
"post_commit_pin":{}
}}
]
}}"#,
person_uri, bogus_expected, bogus_post,
);
let recovery_dir = dir.path().join("__recovery");
std::fs::create_dir_all(&recovery_dir).unwrap();
std::fs::write(
recovery_dir.join("01H0000000000000000000RBCK.json"),
&sidecar_json,
)
.unwrap();
let pre_head = lance::Dataset::open(&person_uri)
.await
.unwrap()
.version()
.version;
db.refresh()
.await
.expect("refresh must succeed (deferring rollback)");
assert_eq!(
std::fs::read_dir(&recovery_dir).unwrap().count(),
1,
"rollback-eligible sidecar must be deferred to next ReadWrite open",
);
let post_head = lance::Dataset::open(&person_uri)
.await
.unwrap()
.version()
.version;
assert_eq!(
pre_head, post_head,
"refresh-time recovery must NOT call Dataset::restore on Person; \
pre_head={pre_head}, post_head={post_head}",
);
drop(db);
let _db = Omnigraph::open(&uri).await.unwrap();
let remaining = if recovery_dir.exists() {
std::fs::read_dir(&recovery_dir).unwrap().count()
} else {
0
};
assert_eq!(
remaining, 0,
"full sweep at next open must process the deferred sidecar",
);
let final_head = lance::Dataset::open(&person_uri)
.await
.unwrap()
.version()
.version;
assert!(
final_head > post_head,
"full sweep must run Dataset::restore (head advances); \
post_head={post_head}, final_head={final_head}",
);
}
#[tokio::test]
async fn finalize_publisher_residual_does_not_drift_untouched_tables() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let mut db = Omnigraph::init(dir.path().to_str().unwrap(), helpers::TEST_SCHEMA)
.await
.unwrap();
{
let _failpoint = ScopedFailPoint::new("mutation.post_finalize_pre_publisher", "return");
let _ = mutate_main(
&mut db,
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Eve")], &[("$age", 22)]),
)
.await
.expect_err("synthetic failpoint must fire");
}
use omnigraph::loader::{LoadMode, load_jsonl};
load_jsonl(
&mut db,
r#"{"type": "Company", "data": {"name": "Acme"}}"#,
LoadMode::Append,
)
.await
.expect("Company write on a non-drifted table should succeed");
}
#[tokio::test]
async fn ensure_indices_stage_btree_failure_leaves_existing_tables_writable() {
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
let extended_schema = format!(
"{}\nnode Project {{ name: String @key }}\n",
helpers::TEST_SCHEMA
);
{
let _failpoint =
ScopedFailPoint::new("ensure_indices.post_stage_pre_commit_btree", "return");
let err = db.apply_schema(&extended_schema).await.unwrap_err();
assert!(
err.to_string()
.contains("ensure_indices.post_stage_pre_commit_btree"),
"schema apply should fail with the synthetic failpoint error, got: {err}"
);
}
mutate_main(
&mut db,
helpers::MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Eve")], &[("$age", 22)]),
)
.await
.expect("Person mutation must succeed after the failed schema apply — existing tables are not drifted");
}
fn assert_no_staging_files(repo: &std::path::Path) {
for name in [
"_schema.pg.staging",
"_schema.ir.json.staging",
"__schema_state.json.staging",
] {
let path = repo.join(name);
assert!(
!path.exists(),
"staging file {} still exists after recovery",
path.display()
);
}
}
#[tokio::test]
async fn schema_apply_without_schema_staging_rolls_back_on_next_open() {
use omnigraph::loader::{LoadMode, load_jsonl};
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let operation_id;
{
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
"#,
LoadMode::Append,
)
.await
.unwrap();
}
let pre_failure_version = {
let db = Omnigraph::open(&uri).await.unwrap();
version_main(&db).await.unwrap()
};
{
let db = Omnigraph::open(&uri).await.unwrap();
let _failpoint = ScopedFailPoint::new("schema_apply.before_staging_write", "return");
let v2_schema = r#"node Person {
name: String @key
age: I32?
city: String?
}
node Company {
name: String @key
}
node Tag {
label: String @key
}
edge Knows: Person -> Person {
since: Date?
}
edge WorksAt: Person -> Company
"#;
let err = db.apply_schema(v2_schema).await.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: schema_apply.before_staging_write"),
"unexpected error: {err}"
);
operation_id = single_sidecar_operation_id(dir.path());
}
let db = Omnigraph::open(&uri).await.unwrap();
assert_eq!(
version_main(&db).await.unwrap(),
pre_failure_version,
"manifest must remain on the old schema when no schema staging files existed"
);
assert_eq!(
helpers::count_rows(&db, "node:Person").await,
1,
"old-schema data must remain readable after rollback"
);
drop(db);
assert_post_recovery_invariants(
dir.path(),
&operation_id,
RecoveryExpectation::RolledBack {
tables: vec![TableExpectation::main("node:Person")],
},
)
.await
.unwrap();
let live_schema = std::fs::read_to_string(dir.path().join("_schema.pg")).unwrap();
assert!(
!live_schema.contains("city: String?"),
"_schema.pg must keep the OLD schema when staging files never existed; got:\n{live_schema}",
);
assert!(
!live_schema.contains("node Tag"),
"_schema.pg must keep the OLD schema when staging files never existed; got:\n{live_schema}",
);
}
#[tokio::test]
async fn schema_apply_phase_b_failure_recovered_on_next_open() {
use omnigraph::loader::{LoadMode, load_jsonl};
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let operation_id;
{
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
"#,
LoadMode::Append,
)
.await
.unwrap();
}
let pre_failure_version = {
let db = Omnigraph::open(&uri).await.unwrap();
version_main(&db).await.unwrap()
};
{
let db = Omnigraph::open(&uri).await.unwrap();
let _failpoint = ScopedFailPoint::new("schema_apply.after_staging_write", "return");
let v2_schema = r#"node Person {
name: String @key
age: I32?
city: String?
}
node Company {
name: String @key
}
node Tag {
label: String @key
}
edge Knows: Person -> Person {
since: Date?
}
edge WorksAt: Person -> Company
"#;
let err = db.apply_schema(v2_schema).await.unwrap_err();
assert!(
err.to_string()
.contains("injected failpoint triggered: schema_apply.after_staging_write"),
"unexpected error: {err}"
);
let recovery_dir = dir.path().join("__recovery");
let sidecars: Vec<_> = std::fs::read_dir(&recovery_dir)
.unwrap()
.filter_map(|e| e.ok())
.collect();
assert_eq!(
sidecars.len(),
1,
"exactly one sidecar must persist after schema_apply failure"
);
operation_id = single_sidecar_operation_id(dir.path());
}
let db = Omnigraph::open(&uri).await.unwrap();
let post_recovery_version = version_main(&db).await.unwrap();
assert!(
post_recovery_version > pre_failure_version,
"manifest version must advance post-recovery; pre={pre_failure_version}, \
post={post_recovery_version}",
);
drop(db);
assert_post_recovery_invariants(
dir.path(),
&operation_id,
RecoveryExpectation::RolledForward {
tables: vec![TableExpectation::main("node:Person")],
},
)
.await
.unwrap();
let live_schema = std::fs::read_to_string(dir.path().join("_schema.pg")).unwrap();
assert!(
live_schema.contains("city: String?"),
"_schema.pg must reflect the NEW schema (city column added); got:\n{live_schema}",
);
assert!(
live_schema.contains("node Tag"),
"_schema.pg must reflect the NEW schema (Tag type added); got:\n{live_schema}",
);
let db = Omnigraph::open(&uri).await.unwrap();
let tag_rows = helpers::count_rows(&db, "node:Tag").await;
assert_eq!(
tag_rows, 0,
"node:Tag must have a manifest entry (with 0 rows) post-recovery; \
a panic here means recovery failed to register the added table"
);
}
#[tokio::test]
async fn branch_merge_phase_b_failure_recovered_on_next_open() {
use omnigraph::loader::{LoadMode, load_jsonl};
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
{
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
"#,
LoadMode::Append,
)
.await
.unwrap();
db.branch_create("feature").await.unwrap();
db.mutate(
"feature",
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Bob")], &[("$age", 40)]),
)
.await
.unwrap();
mutate_main(
&mut db,
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Carol")], &[("$age", 50)]),
)
.await
.unwrap();
}
let pre_failure_version = {
let db = Omnigraph::open(&uri).await.unwrap();
version_main(&db).await.unwrap()
};
{
let db = Omnigraph::open(&uri).await.unwrap();
let _failpoint =
ScopedFailPoint::new("branch_merge.post_phase_b_pre_manifest_commit", "return");
let err = db.branch_merge("feature", "main").await.unwrap_err();
assert!(
err.to_string().contains(
"injected failpoint triggered: branch_merge.post_phase_b_pre_manifest_commit"
),
"unexpected error: {err}"
);
let recovery_dir = dir.path().join("__recovery");
let sidecars: Vec<_> = std::fs::read_dir(&recovery_dir)
.unwrap()
.filter_map(|e| e.ok())
.collect();
assert_eq!(
sidecars.len(),
1,
"exactly one sidecar must persist after branch_merge failure"
);
}
let db = Omnigraph::open(&uri).await.unwrap();
let recovery_dir = dir.path().join("__recovery");
if recovery_dir.exists() {
let remaining: Vec<_> = std::fs::read_dir(&recovery_dir)
.unwrap()
.filter_map(|e| e.ok())
.collect();
assert!(
remaining.is_empty(),
"sidecar must be deleted; remaining: {:?}",
remaining,
);
}
let audit_dir = dir.path().join("_graph_commit_recoveries.lance");
assert!(
audit_dir.exists(),
"_graph_commit_recoveries.lance must exist after branch_merge recovery"
);
let post_recovery_version = version_main(&db).await.unwrap();
assert!(
post_recovery_version > pre_failure_version,
"manifest version must advance post-recovery; pre={pre_failure_version}, \
post={post_recovery_version}",
);
{
use arrow_array::{Array, StringArray};
use futures::TryStreamExt;
let commits_dir = dir.path().join("_graph_commits.lance");
let ds = lance::Dataset::open(commits_dir.to_str().unwrap())
.await
.unwrap();
let batches: Vec<arrow_array::RecordBatch> = ds
.scan()
.try_into_stream()
.await
.unwrap()
.try_collect()
.await
.unwrap();
let mut found_recovery_merge = false;
for batch in batches {
let merged = batch
.column_by_name("merged_parent_commit_id")
.expect("merged_parent_commit_id column present")
.as_any()
.downcast_ref::<StringArray>()
.expect("merged_parent_commit_id is Utf8");
for i in 0..merged.len() {
if !merged.is_null(i) {
found_recovery_merge = true;
break;
}
}
}
assert!(
found_recovery_merge,
"recovered branch_merge must record `merged_parent_commit_id` so future \
merges detect already-up-to-date — no merge-parent-tagged commit found",
);
}
drop(db);
}
#[tokio::test]
async fn branch_merge_phase_b_failure_recovered_on_non_main_target() {
use omnigraph::loader::{LoadMode, load_jsonl};
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
let operation_id;
let target_parent_commit_id;
{
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
"#,
LoadMode::Append,
)
.await
.unwrap();
db.branch_create("target_branch").await.unwrap();
db.mutate(
"target_branch",
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Bob")], &[("$age", 40)]),
)
.await
.unwrap();
db.branch_create("source_branch").await.unwrap();
db.mutate(
"source_branch",
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Carol")], &[("$age", 50)]),
)
.await
.unwrap();
}
let main_person_pin = {
let db = Omnigraph::open(&uri).await.unwrap();
db.snapshot_of(omnigraph::db::ReadTarget::branch("main"))
.await
.unwrap()
.entry("node:Person")
.expect("main must have Person")
.table_version
};
target_parent_commit_id = branch_head_commit_id(dir.path(), "target_branch")
.await
.unwrap();
{
let db = Omnigraph::open(&uri).await.unwrap();
let _failpoint =
ScopedFailPoint::new("branch_merge.post_phase_b_pre_manifest_commit", "return");
let err = db
.branch_merge("source_branch", "target_branch")
.await
.unwrap_err();
assert!(
err.to_string().contains(
"injected failpoint triggered: branch_merge.post_phase_b_pre_manifest_commit"
),
"unexpected error: {err}"
);
let recovery_dir = dir.path().join("__recovery");
let sidecar_count = std::fs::read_dir(&recovery_dir).unwrap().count();
assert_eq!(
sidecar_count, 1,
"exactly one sidecar must persist after non-main branch_merge failure"
);
operation_id = single_sidecar_operation_id(dir.path());
}
let db = Omnigraph::open(&uri).await.unwrap();
drop(db);
assert_post_recovery_invariants(
dir.path(),
&operation_id,
RecoveryExpectation::RolledForward {
tables: vec![
TableExpectation::branch("node:Person", "target_branch")
.expected_main_manifest_pin(main_person_pin)
.expected_recovery_parent_commit_id(target_parent_commit_id),
],
},
)
.await
.unwrap();
}
#[tokio::test]
async fn branch_merge_sidecar_pins_table_branch_to_active_branch() {
use omnigraph::loader::{LoadMode, load_jsonl};
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
{
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
"#,
LoadMode::Append,
)
.await
.unwrap();
db.branch_create("target_branch").await.unwrap();
db.mutate(
"target_branch",
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Bob")], &[("$age", 40)]),
)
.await
.unwrap();
db.branch_create("source_branch").await.unwrap();
db.mutate(
"source_branch",
MUTATION_QUERIES,
"insert_person",
&mixed_params(&[("$name", "Carol")], &[("$age", 50)]),
)
.await
.unwrap();
}
{
let db = Omnigraph::open(&uri).await.unwrap();
let _failpoint =
ScopedFailPoint::new("branch_merge.post_phase_b_pre_manifest_commit", "return");
let _ = db
.branch_merge("source_branch", "target_branch")
.await
.expect_err("failpoint must fire");
}
let operation_id = single_sidecar_operation_id(dir.path());
let sidecar_path = dir
.path()
.join("__recovery")
.join(format!("{operation_id}.json"));
let sidecar_json = std::fs::read_to_string(&sidecar_path).unwrap();
let sidecar: serde_json::Value = serde_json::from_str(&sidecar_json).unwrap();
let tables = sidecar["tables"]
.as_array()
.expect("sidecar tables must be an array");
assert!(
!tables.is_empty(),
"sidecar must pin at least one RewriteMerged table — both branches mutated Person"
);
for pin in tables {
let table_branch = pin
.get("table_branch")
.and_then(|v| v.as_str())
.unwrap_or_else(|| {
panic!(
"sidecar pin must record table_branch as the merge target (active_branch); \
got pin {pin:?}"
)
});
assert_eq!(
table_branch, "target_branch",
"sidecar pin must record `table_branch` as the merge target branch (where \
commits actually land via publish_rewritten_merge_table → open_for_mutation), \
NOT entry.table_branch from the target snapshot. See merge.rs filter_map and \
the rationale comment at table_ops.rs:115-120. Got pin: {pin:?}"
);
}
}
#[tokio::test]
async fn ensure_indices_phase_b_failure_does_not_leak_sidecar_when_no_work_needed() {
use omnigraph::loader::{LoadMode, load_jsonl};
let _scenario = FailScenario::setup();
let dir = tempfile::tempdir().unwrap();
let uri = dir.path().to_str().unwrap().to_string();
{
let mut db = Omnigraph::init(&uri, helpers::TEST_SCHEMA).await.unwrap();
load_jsonl(
&mut db,
r#"{"type":"Person","data":{"name":"alice","age":30}}
{"type":"Person","data":{"name":"bob","age":25}}
"#,
LoadMode::Append,
)
.await
.unwrap();
}
{
let db = Omnigraph::open(&uri).await.unwrap();
let _failpoint =
ScopedFailPoint::new("ensure_indices.post_phase_b_pre_manifest_commit", "return");
let err = db.ensure_indices().await.unwrap_err();
assert!(
err.to_string().contains(
"injected failpoint triggered: ensure_indices.post_phase_b_pre_manifest_commit"
),
"unexpected error: {err}"
);
let recovery_dir = dir.path().join("__recovery");
let sidecars: Vec<_> = if recovery_dir.exists() {
std::fs::read_dir(&recovery_dir)
.unwrap()
.filter_map(|e| e.ok())
.collect()
} else {
Vec::new()
};
assert!(
sidecars.is_empty(),
"steady-state ensure_indices must not leave a sidecar; got {:?}",
sidecars,
);
}
let _db = Omnigraph::open(&uri).await.unwrap();
let recovery_dir = dir.path().join("__recovery");
if recovery_dir.exists() {
let remaining: Vec<_> = std::fs::read_dir(&recovery_dir)
.unwrap()
.filter_map(|e| e.ok())
.collect();
assert!(
remaining.is_empty(),
"sidecar must remain deleted; remaining: {:?}",
remaining,
);
}
let audit_dir = dir.path().join("_graph_commit_recoveries.lance");
assert!(
!audit_dir.exists(),
"_graph_commit_recoveries.lance must NOT exist when no sidecar was processed"
);
}