use crate::db::{
codec::{
finalize_hash_sha256, new_hash_sha256_prefixed, write_hash_str_u32, write_hash_tag_u8,
write_hash_u32,
},
data::{CanonicalSlotReader, StorageKey},
index::{IndexEntry, IndexKey, IndexState, IndexStore, RawIndexEntry, RawIndexKey},
schema::{
FieldId, PersistedFieldKind, PersistedFieldSnapshot, PersistedIndexExpressionOp,
PersistedIndexFieldPathSnapshot, PersistedIndexKeyItemSnapshot, PersistedIndexKeySnapshot,
PersistedIndexSnapshot, PersistedSchemaSnapshot, SchemaFieldSlot,
encode_persisted_schema_snapshot,
},
};
use crate::error::InternalError;
use crate::types::EntityTag;
use sha2::Digest;
use std::collections::BTreeMap;
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
const SCHEMA_MUTATION_FINGERPRINT_PROFILE_TAG: &[u8] = b"icydb:schema-mutation-plan:v1";
#[allow(
dead_code,
reason = "0.153 stages runtime epoch identity before physical runners publish snapshots"
)]
const SCHEMA_MUTATION_RUNTIME_EPOCH_PROFILE_TAG: &[u8] = b"icydb:schema-mutation-runtime-epoch:v1";
#[allow(
dead_code,
reason = "0.152 defines the first mutation vocabulary before every operation is executable"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum SchemaMutation {
AddNullableField {
field_id: FieldId,
name: String,
slot: SchemaFieldSlot,
},
AddDefaultedField {
field_id: FieldId,
name: String,
slot: SchemaFieldSlot,
},
AddNonUniqueFieldPathIndex {
target: SchemaFieldPathIndexRebuildTarget,
},
AddExpressionIndex {
target: SchemaExpressionIndexRebuildTarget,
},
DropNonRequiredSecondaryIndex {
target: SchemaSecondaryIndexDropCleanupTarget,
},
AlterNullability {
field_id: FieldId,
},
}
#[allow(
dead_code,
reason = "0.152 stages the internal mutation request API before every request has a live caller"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum SchemaMutationRequest<'a> {
ExactMatch,
AppendOnlyFields(&'a [PersistedFieldSnapshot]),
AddNonUniqueFieldPathIndex {
target: SchemaFieldPathIndexRebuildTarget,
},
AddExpressionIndex {
target: SchemaExpressionIndexRebuildTarget,
},
DropNonRequiredSecondaryIndex {
target: SchemaSecondaryIndexDropCleanupTarget,
},
AlterNullability {
field_id: FieldId,
},
Incompatible,
}
#[allow(
dead_code,
reason = "0.152 stages fail-closed mutation lowering before DDL diagnostics expose it"
)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum AcceptedSchemaMutationError {
UniqueIndexRequiresDedicatedValidation,
UnsupportedIndexKeyShape,
EmptyIndexKey,
ExpressionIndexRequiresExpressionKey,
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db) struct SchemaFieldPathIndexRebuildTarget {
ordinal: u16,
name: String,
store: String,
unique: bool,
predicate_sql: Option<String>,
key_paths: Vec<SchemaFieldPathIndexRebuildKey>,
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
impl SchemaFieldPathIndexRebuildTarget {
#[must_use]
pub(in crate::db) const fn ordinal(&self) -> u16 {
self.ordinal
}
#[must_use]
pub(in crate::db) const fn name(&self) -> &str {
self.name.as_str()
}
#[must_use]
pub(in crate::db) const fn store(&self) -> &str {
self.store.as_str()
}
#[must_use]
pub(in crate::db) const fn unique(&self) -> bool {
self.unique
}
#[must_use]
pub(in crate::db) const fn predicate_sql(&self) -> Option<&str> {
match &self.predicate_sql {
Some(predicate_sql) => Some(predicate_sql.as_str()),
None => None,
}
}
#[must_use]
pub(in crate::db) const fn key_paths(&self) -> &[SchemaFieldPathIndexRebuildKey] {
self.key_paths.as_slice()
}
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db) struct SchemaFieldPathIndexRebuildKey {
field_id: FieldId,
slot: SchemaFieldSlot,
path: Vec<String>,
kind: PersistedFieldKind,
nullable: bool,
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
impl SchemaFieldPathIndexRebuildKey {
#[must_use]
pub(in crate::db) const fn field_id(&self) -> FieldId {
self.field_id
}
#[must_use]
pub(in crate::db) const fn slot(&self) -> SchemaFieldSlot {
self.slot
}
#[must_use]
pub(in crate::db) const fn path(&self) -> &[String] {
self.path.as_slice()
}
#[must_use]
pub(in crate::db) fn field_name(&self) -> &str {
self.path.first().map_or("", String::as_str)
}
#[must_use]
pub(in crate::db) const fn kind(&self) -> &PersistedFieldKind {
&self.kind
}
#[must_use]
pub(in crate::db) const fn nullable(&self) -> bool {
self.nullable
}
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) struct SchemaExpressionIndexRebuildTarget {
ordinal: u16,
name: String,
store: String,
unique: bool,
predicate_sql: Option<String>,
key_items: Vec<SchemaExpressionIndexRebuildKey>,
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
impl SchemaExpressionIndexRebuildTarget {
#[must_use]
pub(in crate::db::schema) const fn ordinal(&self) -> u16 {
self.ordinal
}
#[must_use]
pub(in crate::db::schema) const fn name(&self) -> &str {
self.name.as_str()
}
#[must_use]
pub(in crate::db::schema) const fn store(&self) -> &str {
self.store.as_str()
}
#[must_use]
pub(in crate::db::schema) const fn unique(&self) -> bool {
self.unique
}
#[must_use]
pub(in crate::db::schema) const fn predicate_sql(&self) -> Option<&str> {
match &self.predicate_sql {
Some(predicate_sql) => Some(predicate_sql.as_str()),
None => None,
}
}
#[must_use]
pub(in crate::db::schema) const fn key_items(&self) -> &[SchemaExpressionIndexRebuildKey] {
self.key_items.as_slice()
}
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum SchemaExpressionIndexRebuildKey {
FieldPath(SchemaFieldPathIndexRebuildKey),
Expression(Box<SchemaExpressionIndexRebuildExpression>),
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) struct SchemaExpressionIndexRebuildExpression {
op: PersistedIndexExpressionOp,
source: SchemaFieldPathIndexRebuildKey,
input_kind: PersistedFieldKind,
output_kind: PersistedFieldKind,
canonical_text: String,
}
#[allow(
dead_code,
reason = "0.152 stages rebuild target contracts before a physical runner consumes them"
)]
impl SchemaExpressionIndexRebuildExpression {
#[must_use]
pub(in crate::db::schema) const fn op(&self) -> PersistedIndexExpressionOp {
self.op
}
#[must_use]
pub(in crate::db::schema) const fn source(&self) -> &SchemaFieldPathIndexRebuildKey {
&self.source
}
#[must_use]
pub(in crate::db::schema) const fn input_kind(&self) -> &PersistedFieldKind {
&self.input_kind
}
#[must_use]
pub(in crate::db::schema) const fn output_kind(&self) -> &PersistedFieldKind {
&self.output_kind
}
#[must_use]
pub(in crate::db::schema) const fn canonical_text(&self) -> &str {
self.canonical_text.as_str()
}
}
#[allow(
dead_code,
reason = "0.152 stages cleanup target contracts before a physical runner consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) struct SchemaSecondaryIndexDropCleanupTarget {
ordinal: u16,
name: String,
store: String,
unique: bool,
predicate_sql: Option<String>,
}
#[allow(
dead_code,
reason = "0.152 stages cleanup target contracts before a physical runner consumes them"
)]
impl SchemaSecondaryIndexDropCleanupTarget {
#[must_use]
pub(in crate::db::schema) const fn ordinal(&self) -> u16 {
self.ordinal
}
#[must_use]
pub(in crate::db::schema) const fn name(&self) -> &str {
self.name.as_str()
}
#[must_use]
pub(in crate::db::schema) const fn store(&self) -> &str {
self.store.as_str()
}
#[must_use]
pub(in crate::db::schema) const fn unique(&self) -> bool {
self.unique
}
#[must_use]
pub(in crate::db::schema) const fn predicate_sql(&self) -> Option<&str> {
match &self.predicate_sql {
Some(predicate_sql) => Some(predicate_sql.as_str()),
None => None,
}
}
}
#[allow(
dead_code,
reason = "0.152 stages rebuild and unsupported buckets before every bucket has a live caller"
)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum MutationCompatibility {
MetadataOnlySafe,
RequiresRebuild,
UnsupportedPreOne,
Incompatible,
}
#[allow(
dead_code,
reason = "0.152 exposes future rebuild buckets before orchestration consumes them"
)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum RebuildRequirement {
NoRebuildRequired,
IndexRebuildRequired,
FullDataRewriteRequired,
Unsupported,
}
#[allow(
dead_code,
reason = "0.152 stages rebuild orchestration contracts before execution consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum SchemaRebuildAction {
BuildFieldPathIndex {
target: SchemaFieldPathIndexRebuildTarget,
},
BuildExpressionIndex {
target: SchemaExpressionIndexRebuildTarget,
},
DropSecondaryIndex {
target: SchemaSecondaryIndexDropCleanupTarget,
},
RewriteAllRows,
Unsupported {
reason: &'static str,
},
}
#[allow(
dead_code,
reason = "0.152 stages rebuild orchestration contracts before execution consumes them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) struct SchemaRebuildPlan {
requirement: RebuildRequirement,
actions: Vec<SchemaRebuildAction>,
}
#[allow(
dead_code,
reason = "0.152 stages rebuild orchestration contracts before execution consumes them"
)]
impl SchemaRebuildPlan {
const fn no_rebuild() -> Self {
Self {
requirement: RebuildRequirement::NoRebuildRequired,
actions: Vec::new(),
}
}
const fn new(requirement: RebuildRequirement, actions: Vec<SchemaRebuildAction>) -> Self {
Self {
requirement,
actions,
}
}
#[must_use]
pub(in crate::db::schema) const fn requirement(&self) -> RebuildRequirement {
self.requirement
}
#[must_use]
pub(in crate::db::schema) const fn actions(&self) -> &[SchemaRebuildAction] {
self.actions.as_slice()
}
#[must_use]
pub(in crate::db::schema) const fn requires_physical_work(&self) -> bool {
!matches!(self.requirement, RebuildRequirement::NoRebuildRequired)
}
#[must_use]
const fn publication_blocker(&self) -> Option<MutationPublicationBlocker> {
if self.requires_physical_work() {
return Some(MutationPublicationBlocker::RebuildRequired(
self.requirement,
));
}
None
}
}
mod runner;
pub(in crate::db::schema) use self::runner::*;
mod field_path;
pub(in crate::db::schema) use self::field_path::*;
#[allow(
dead_code,
reason = "0.152 stages runner preflight publication checks before physical runners consume them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum MutationPublicationPreflight {
PublishableNow,
PhysicalWorkReady {
step_count: usize,
required: Vec<SchemaMutationRunnerCapability>,
},
MissingRunnerCapabilities {
missing: Vec<SchemaMutationRunnerCapability>,
},
Rejected {
requirement: RebuildRequirement,
},
Blocked(MutationPublicationBlocker),
}
#[allow(
dead_code,
reason = "0.152 stages runner preflight contracts before physical runners consume them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) struct SchemaMutationRunnerContract {
capabilities: Vec<SchemaMutationRunnerCapability>,
}
#[allow(
dead_code,
reason = "0.152 stages runner preflight contracts before physical runners consume them"
)]
impl SchemaMutationRunnerContract {
#[must_use]
pub(in crate::db::schema) fn new(capabilities: &[SchemaMutationRunnerCapability]) -> Self {
let mut deduped = Vec::new();
for capability in capabilities {
push_runner_capability_once(&mut deduped, *capability);
}
Self {
capabilities: deduped,
}
}
#[must_use]
pub(in crate::db::schema) const fn capabilities(&self) -> &[SchemaMutationRunnerCapability] {
self.capabilities.as_slice()
}
#[must_use]
pub(in crate::db::schema) fn preflight(
&self,
execution_plan: &SchemaMutationExecutionPlan,
) -> SchemaMutationRunnerPreflight {
match execution_plan.admit_runner_capabilities(self.capabilities()) {
SchemaMutationExecutionAdmission::PublishableNow => {
SchemaMutationRunnerPreflight::NoPhysicalWork
}
SchemaMutationExecutionAdmission::RunnerReady { required } => {
SchemaMutationRunnerPreflight::Ready {
step_count: execution_plan.steps().len(),
required,
}
}
SchemaMutationExecutionAdmission::MissingRunnerCapabilities { missing } => {
SchemaMutationRunnerPreflight::MissingCapabilities { missing }
}
SchemaMutationExecutionAdmission::Rejected { requirement } => {
SchemaMutationRunnerPreflight::Rejected { requirement }
}
}
}
#[must_use]
pub(in crate::db::schema) fn outcome(
&self,
execution_plan: &SchemaMutationExecutionPlan,
) -> SchemaMutationRunnerOutcome {
match self.preflight(execution_plan) {
SchemaMutationRunnerPreflight::NoPhysicalWork => {
SchemaMutationRunnerOutcome::NoPhysicalWork(
SchemaMutationRunnerReport::preflight_ready(0, Vec::new(), None),
)
}
SchemaMutationRunnerPreflight::Ready {
step_count,
required,
} => SchemaMutationRunnerOutcome::ReadyForPhysicalWork(
SchemaMutationRunnerReport::preflight_ready(
step_count,
required,
Some(SchemaMutationStoreVisibility::StagedOnly),
),
),
SchemaMutationRunnerPreflight::MissingCapabilities { missing } => {
SchemaMutationRunnerOutcome::Rejected(
SchemaMutationRunnerRejection::missing_runner_capabilities(
execution_plan.physical_requirement(),
missing,
),
)
}
SchemaMutationRunnerPreflight::Rejected { requirement } => {
SchemaMutationRunnerOutcome::Rejected(
SchemaMutationRunnerRejection::unsupported_requirement(requirement),
)
}
}
}
}
#[allow(
dead_code,
reason = "0.152 stages execution-boundary contracts before physical runners consume them"
)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum SchemaMutationExecutionGate {
ReadyToPublish,
AwaitingPhysicalWork {
requirement: RebuildRequirement,
step_count: usize,
},
Rejected {
requirement: RebuildRequirement,
},
}
#[allow(
dead_code,
reason = "0.152 stages execution-boundary contracts before physical runners consume them"
)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) struct SchemaMutationExecutionPlan {
readiness: SchemaMutationExecutionReadiness,
steps: Vec<SchemaMutationExecutionStep>,
}
#[allow(
dead_code,
reason = "0.152 stages execution-boundary contracts before physical runners consume them"
)]
impl SchemaMutationExecutionPlan {
const fn publishable_now() -> Self {
Self {
readiness: SchemaMutationExecutionReadiness::PublishableNow,
steps: Vec::new(),
}
}
fn from_rebuild_plan(rebuild_plan: SchemaRebuildPlan) -> Self {
if !rebuild_plan.requires_physical_work() {
return Self::publishable_now();
}
let readiness = match rebuild_plan.requirement() {
RebuildRequirement::NoRebuildRequired => {
SchemaMutationExecutionReadiness::PublishableNow
}
RebuildRequirement::IndexRebuildRequired => {
SchemaMutationExecutionReadiness::RequiresPhysicalRunner(
RebuildRequirement::IndexRebuildRequired,
)
}
RebuildRequirement::FullDataRewriteRequired | RebuildRequirement::Unsupported => {
SchemaMutationExecutionReadiness::Unsupported(rebuild_plan.requirement())
}
};
let mut steps = rebuild_plan
.actions()
.iter()
.map(|action| match action {
SchemaRebuildAction::BuildFieldPathIndex { target } => {
SchemaMutationExecutionStep::BuildFieldPathIndex {
target: target.clone(),
}
}
SchemaRebuildAction::BuildExpressionIndex { target } => {
SchemaMutationExecutionStep::BuildExpressionIndex {
target: target.clone(),
}
}
SchemaRebuildAction::DropSecondaryIndex { target } => {
SchemaMutationExecutionStep::DropSecondaryIndex {
target: target.clone(),
}
}
SchemaRebuildAction::RewriteAllRows => SchemaMutationExecutionStep::RewriteAllRows,
SchemaRebuildAction::Unsupported { reason } => {
SchemaMutationExecutionStep::Unsupported { reason }
}
})
.collect::<Vec<_>>();
if matches!(
readiness,
SchemaMutationExecutionReadiness::RequiresPhysicalRunner(_)
) {
steps.push(SchemaMutationExecutionStep::ValidatePhysicalWork);
steps.push(SchemaMutationExecutionStep::InvalidateRuntimeState);
}
Self { readiness, steps }
}
#[must_use]
pub(in crate::db::schema) const fn readiness(&self) -> SchemaMutationExecutionReadiness {
self.readiness
}
#[must_use]
pub(in crate::db::schema) const fn steps(&self) -> &[SchemaMutationExecutionStep] {
self.steps.as_slice()
}
#[must_use]
pub(in crate::db::schema) const fn execution_gate(&self) -> SchemaMutationExecutionGate {
match self.readiness {
SchemaMutationExecutionReadiness::PublishableNow => {
SchemaMutationExecutionGate::ReadyToPublish
}
SchemaMutationExecutionReadiness::RequiresPhysicalRunner(requirement) => {
SchemaMutationExecutionGate::AwaitingPhysicalWork {
requirement,
step_count: self.steps.len(),
}
}
SchemaMutationExecutionReadiness::Unsupported(requirement) => {
SchemaMutationExecutionGate::Rejected { requirement }
}
}
}
#[must_use]
const fn physical_requirement(&self) -> Option<RebuildRequirement> {
match self.execution_gate() {
SchemaMutationExecutionGate::ReadyToPublish => None,
SchemaMutationExecutionGate::AwaitingPhysicalWork { requirement, .. }
| SchemaMutationExecutionGate::Rejected { requirement } => Some(requirement),
}
}
#[must_use]
pub(in crate::db::schema) fn runner_capabilities(&self) -> Vec<SchemaMutationRunnerCapability> {
let mut capabilities = Vec::new();
for step in &self.steps {
let capability = match step {
SchemaMutationExecutionStep::BuildFieldPathIndex { .. } => {
Some(SchemaMutationRunnerCapability::BuildFieldPathIndex)
}
SchemaMutationExecutionStep::BuildExpressionIndex { .. } => {
Some(SchemaMutationRunnerCapability::BuildExpressionIndex)
}
SchemaMutationExecutionStep::DropSecondaryIndex { .. } => {
Some(SchemaMutationRunnerCapability::DropSecondaryIndex)
}
SchemaMutationExecutionStep::ValidatePhysicalWork => {
Some(SchemaMutationRunnerCapability::ValidatePhysicalWork)
}
SchemaMutationExecutionStep::InvalidateRuntimeState => {
Some(SchemaMutationRunnerCapability::InvalidateRuntimeState)
}
SchemaMutationExecutionStep::RewriteAllRows => {
Some(SchemaMutationRunnerCapability::RewriteAllRows)
}
SchemaMutationExecutionStep::Unsupported { .. } => None,
};
if let Some(capability) = capability {
push_runner_capability_once(&mut capabilities, capability);
}
}
capabilities
}
#[must_use]
pub(in crate::db::schema) fn admit_runner_capabilities(
&self,
available: &[SchemaMutationRunnerCapability],
) -> SchemaMutationExecutionAdmission {
match self.execution_gate() {
SchemaMutationExecutionGate::ReadyToPublish => {
SchemaMutationExecutionAdmission::PublishableNow
}
SchemaMutationExecutionGate::Rejected { requirement } => {
SchemaMutationExecutionAdmission::Rejected { requirement }
}
SchemaMutationExecutionGate::AwaitingPhysicalWork { .. } => {
let required = self.runner_capabilities();
let missing = required
.iter()
.copied()
.filter(|capability| !available.contains(capability))
.collect::<Vec<_>>();
if missing.is_empty() {
SchemaMutationExecutionAdmission::RunnerReady { required }
} else {
SchemaMutationExecutionAdmission::MissingRunnerCapabilities { missing }
}
}
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum MutationPublicationBlocker {
NotMetadataSafe(MutationCompatibility),
RebuildRequired(RebuildRequirement),
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum MutationPublicationStatus {
Publishable,
Blocked(MutationPublicationBlocker),
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(in crate::db::schema) enum SchemaMutationDelta<'a> {
AppendOnlyFields(&'a [PersistedFieldSnapshot]),
ExactMatch,
Incompatible,
}
pub(in crate::db::schema) fn classify_schema_mutation_delta<'a>(
actual: &PersistedSchemaSnapshot,
expected: &'a PersistedSchemaSnapshot,
) -> SchemaMutationDelta<'a> {
if actual == expected {
return SchemaMutationDelta::ExactMatch;
}
append_only_additive_fields(actual, expected).map_or(
SchemaMutationDelta::Incompatible,
SchemaMutationDelta::AppendOnlyFields,
)
}
pub(in crate::db::schema) fn schema_mutation_request_for_snapshots<'a>(
actual: &PersistedSchemaSnapshot,
expected: &'a PersistedSchemaSnapshot,
) -> SchemaMutationRequest<'a> {
SchemaMutationRequest::from(classify_schema_mutation_delta(actual, expected))
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub(in crate::db::schema) struct MutationPlan {
mutations: Vec<SchemaMutation>,
compatibility: MutationCompatibility,
rebuild: RebuildRequirement,
}
impl MutationPlan {
pub(in crate::db::schema) const fn exact_match() -> Self {
Self {
mutations: Vec::new(),
compatibility: MutationCompatibility::MetadataOnlySafe,
rebuild: RebuildRequirement::NoRebuildRequired,
}
}
pub(in crate::db::schema) fn append_only_fields(fields: &[PersistedFieldSnapshot]) -> Self {
let mutations = fields
.iter()
.map(|field| {
if field.default().is_none() {
SchemaMutation::AddNullableField {
field_id: field.id(),
name: field.name().to_string(),
slot: field.slot(),
}
} else {
SchemaMutation::AddDefaultedField {
field_id: field.id(),
name: field.name().to_string(),
slot: field.slot(),
}
}
})
.collect();
Self {
mutations,
compatibility: MutationCompatibility::MetadataOnlySafe,
rebuild: RebuildRequirement::NoRebuildRequired,
}
}
fn non_unique_field_path_index_addition(target: SchemaFieldPathIndexRebuildTarget) -> Self {
Self {
mutations: vec![SchemaMutation::AddNonUniqueFieldPathIndex { target }],
compatibility: MutationCompatibility::RequiresRebuild,
rebuild: RebuildRequirement::IndexRebuildRequired,
}
}
fn expression_index_addition(target: SchemaExpressionIndexRebuildTarget) -> Self {
Self {
mutations: vec![SchemaMutation::AddExpressionIndex { target }],
compatibility: MutationCompatibility::RequiresRebuild,
rebuild: RebuildRequirement::IndexRebuildRequired,
}
}
fn secondary_index_drop(target: SchemaSecondaryIndexDropCleanupTarget) -> Self {
Self {
mutations: vec![SchemaMutation::DropNonRequiredSecondaryIndex { target }],
compatibility: MutationCompatibility::RequiresRebuild,
rebuild: RebuildRequirement::IndexRebuildRequired,
}
}
fn nullability_alteration(field_id: FieldId) -> Self {
Self {
mutations: vec![SchemaMutation::AlterNullability { field_id }],
compatibility: MutationCompatibility::UnsupportedPreOne,
rebuild: RebuildRequirement::Unsupported,
}
}
const fn incompatible() -> Self {
Self {
mutations: Vec::new(),
compatibility: MutationCompatibility::Incompatible,
rebuild: RebuildRequirement::FullDataRewriteRequired,
}
}
#[allow(
dead_code,
reason = "mutation diagnostics and DDL lowering will consume this in the next 0.152 slice"
)]
#[must_use]
pub(in crate::db::schema) const fn mutations(&self) -> &[SchemaMutation] {
self.mutations.as_slice()
}
#[allow(
dead_code,
reason = "mutation diagnostics and DDL lowering will consume this in the next 0.152 slice"
)]
#[must_use]
pub(in crate::db::schema) const fn compatibility(&self) -> MutationCompatibility {
self.compatibility
}
#[allow(
dead_code,
reason = "mutation diagnostics and DDL lowering will consume this in the next 0.152 slice"
)]
#[must_use]
pub(in crate::db::schema) const fn rebuild_requirement(&self) -> RebuildRequirement {
self.rebuild
}
#[must_use]
pub(in crate::db::schema) fn publication_status(&self) -> MutationPublicationStatus {
if !matches!(self.compatibility, MutationCompatibility::MetadataOnlySafe) {
return MutationPublicationStatus::Blocked(
MutationPublicationBlocker::NotMetadataSafe(self.compatibility),
);
}
if let Some(blocker) = self.rebuild_plan().publication_blocker() {
return MutationPublicationStatus::Blocked(blocker);
}
MutationPublicationStatus::Publishable
}
#[allow(
dead_code,
reason = "0.152 stages runner preflight publication checks before physical runners consume them"
)]
#[must_use]
pub(in crate::db::schema) fn publication_preflight(
&self,
runner: &SchemaMutationRunnerContract,
) -> MutationPublicationPreflight {
match runner.preflight(&self.execution_plan()) {
SchemaMutationRunnerPreflight::NoPhysicalWork => match self.publication_status() {
MutationPublicationStatus::Publishable => {
MutationPublicationPreflight::PublishableNow
}
MutationPublicationStatus::Blocked(blocker) => {
MutationPublicationPreflight::Blocked(blocker)
}
},
SchemaMutationRunnerPreflight::Ready {
step_count,
required,
} => MutationPublicationPreflight::PhysicalWorkReady {
step_count,
required,
},
SchemaMutationRunnerPreflight::MissingCapabilities { missing } => {
MutationPublicationPreflight::MissingRunnerCapabilities { missing }
}
SchemaMutationRunnerPreflight::Rejected { requirement } => {
MutationPublicationPreflight::Rejected { requirement }
}
}
}
#[must_use]
pub(in crate::db::schema) fn rebuild_plan(&self) -> SchemaRebuildPlan {
if matches!(self.rebuild, RebuildRequirement::NoRebuildRequired) {
return SchemaRebuildPlan::no_rebuild();
}
let mut actions = Vec::new();
for mutation in &self.mutations {
match mutation {
SchemaMutation::AddNullableField { .. }
| SchemaMutation::AddDefaultedField { .. } => {}
SchemaMutation::AddNonUniqueFieldPathIndex { target } => {
actions.push(SchemaRebuildAction::BuildFieldPathIndex {
target: target.clone(),
});
}
SchemaMutation::AddExpressionIndex { target } => {
actions.push(SchemaRebuildAction::BuildExpressionIndex {
target: target.clone(),
});
}
SchemaMutation::DropNonRequiredSecondaryIndex { target } => {
actions.push(SchemaRebuildAction::DropSecondaryIndex {
target: target.clone(),
});
}
SchemaMutation::AlterNullability { .. } => {
actions.push(SchemaRebuildAction::Unsupported {
reason: "alter nullability requires data proof or rewrite",
});
}
}
}
if actions.is_empty() {
actions.push(match self.rebuild {
RebuildRequirement::FullDataRewriteRequired => SchemaRebuildAction::RewriteAllRows,
RebuildRequirement::Unsupported => SchemaRebuildAction::Unsupported {
reason: "unsupported schema mutation",
},
RebuildRequirement::IndexRebuildRequired => SchemaRebuildAction::Unsupported {
reason: "index rebuild mutation lacks an index target",
},
RebuildRequirement::NoRebuildRequired => {
unreachable!("no-rebuild plans returned before rebuild action derivation",)
}
});
}
SchemaRebuildPlan::new(self.rebuild, actions)
}
#[allow(
dead_code,
reason = "0.152 stages execution-boundary contracts before physical runners consume them"
)]
#[must_use]
pub(in crate::db::schema) fn execution_plan(&self) -> SchemaMutationExecutionPlan {
SchemaMutationExecutionPlan::from_rebuild_plan(self.rebuild_plan())
}
#[cfg(test)]
pub(in crate::db::schema) fn added_field_count(&self) -> usize {
self.mutations
.iter()
.filter(|mutation| {
matches!(
mutation,
SchemaMutation::AddNullableField { .. }
| SchemaMutation::AddDefaultedField { .. }
)
})
.count()
}
#[allow(
dead_code,
reason = "0.152 stages mutation audit identity before diagnostics expose it"
)]
pub(in crate::db::schema) fn fingerprint(&self) -> [u8; 16] {
let mut hasher = new_hash_sha256_prefixed(SCHEMA_MUTATION_FINGERPRINT_PROFILE_TAG);
write_hash_tag_u8(&mut hasher, self.compatibility.tag());
write_hash_tag_u8(&mut hasher, self.rebuild.tag());
write_hash_u32(
&mut hasher,
u32::try_from(self.mutations.len()).unwrap_or(u32::MAX),
);
for mutation in &self.mutations {
mutation.hash_into(&mut hasher);
}
let digest = finalize_hash_sha256(hasher);
let mut fingerprint = [0u8; 16];
fingerprint.copy_from_slice(&digest[..16]);
fingerprint
}
}
impl SchemaMutationRequest<'_> {
#[allow(
dead_code,
reason = "0.152 stages accepted index mutation lowering before DDL/rebuild callers use it"
)]
pub(in crate::db::schema) fn from_accepted_non_unique_field_path_index(
index: &PersistedIndexSnapshot,
) -> Result<Self, AcceptedSchemaMutationError> {
if index.unique() {
return Err(AcceptedSchemaMutationError::UniqueIndexRequiresDedicatedValidation);
}
let PersistedIndexKeySnapshot::FieldPath(paths) = index.key() else {
return Err(AcceptedSchemaMutationError::UnsupportedIndexKeyShape);
};
if paths.is_empty() {
return Err(AcceptedSchemaMutationError::EmptyIndexKey);
}
let key_paths = paths.iter().map(field_path_rebuild_key).collect();
Ok(Self::AddNonUniqueFieldPathIndex {
target: SchemaFieldPathIndexRebuildTarget {
ordinal: index.ordinal(),
name: index.name().to_string(),
store: index.store().to_string(),
unique: index.unique(),
predicate_sql: index.predicate_sql().map(str::to_string),
key_paths,
},
})
}
#[allow(
dead_code,
reason = "0.152 stages accepted expression-index mutation lowering before DDL/rebuild callers use it"
)]
pub(in crate::db::schema) fn from_accepted_expression_index(
index: &PersistedIndexSnapshot,
) -> Result<Self, AcceptedSchemaMutationError> {
if index.unique() {
return Err(AcceptedSchemaMutationError::UniqueIndexRequiresDedicatedValidation);
}
let PersistedIndexKeySnapshot::Items(items) = index.key() else {
return Err(AcceptedSchemaMutationError::UnsupportedIndexKeyShape);
};
if items.is_empty() {
return Err(AcceptedSchemaMutationError::EmptyIndexKey);
}
let mut has_expression = false;
let key_items = items
.iter()
.map(|item| match item {
PersistedIndexKeyItemSnapshot::FieldPath(path) => {
SchemaExpressionIndexRebuildKey::FieldPath(field_path_rebuild_key(path))
}
PersistedIndexKeyItemSnapshot::Expression(expression) => {
has_expression = true;
SchemaExpressionIndexRebuildKey::Expression(Box::new(
SchemaExpressionIndexRebuildExpression {
op: expression.op(),
source: field_path_rebuild_key(expression.source()),
input_kind: expression.input_kind().clone(),
output_kind: expression.output_kind().clone(),
canonical_text: expression.canonical_text().to_string(),
},
))
}
})
.collect();
if !has_expression {
return Err(AcceptedSchemaMutationError::ExpressionIndexRequiresExpressionKey);
}
Ok(Self::AddExpressionIndex {
target: SchemaExpressionIndexRebuildTarget {
ordinal: index.ordinal(),
name: index.name().to_string(),
store: index.store().to_string(),
unique: index.unique(),
predicate_sql: index.predicate_sql().map(str::to_string),
key_items,
},
})
}
#[allow(
dead_code,
reason = "0.152 stages accepted index cleanup lowering before DDL/rebuild callers use it"
)]
pub(in crate::db::schema) fn from_accepted_non_unique_secondary_index_drop(
index: &PersistedIndexSnapshot,
) -> Result<Self, AcceptedSchemaMutationError> {
if index.unique() {
return Err(AcceptedSchemaMutationError::UniqueIndexRequiresDedicatedValidation);
}
Ok(Self::DropNonRequiredSecondaryIndex {
target: SchemaSecondaryIndexDropCleanupTarget {
ordinal: index.ordinal(),
name: index.name().to_string(),
store: index.store().to_string(),
unique: index.unique(),
predicate_sql: index.predicate_sql().map(str::to_string),
},
})
}
#[must_use]
pub(in crate::db::schema) fn lower_to_plan(self) -> MutationPlan {
match self {
Self::ExactMatch => MutationPlan::exact_match(),
Self::AppendOnlyFields(fields) => MutationPlan::append_only_fields(fields),
Self::AddNonUniqueFieldPathIndex { target } => {
MutationPlan::non_unique_field_path_index_addition(target)
}
Self::AddExpressionIndex { target } => MutationPlan::expression_index_addition(target),
Self::DropNonRequiredSecondaryIndex { target } => {
MutationPlan::secondary_index_drop(target)
}
Self::AlterNullability { field_id } => MutationPlan::nullability_alteration(field_id),
Self::Incompatible => MutationPlan::incompatible(),
}
}
}
impl<'a> From<SchemaMutationDelta<'a>> for SchemaMutationRequest<'a> {
fn from(delta: SchemaMutationDelta<'a>) -> Self {
match delta {
SchemaMutationDelta::AppendOnlyFields(fields) => Self::AppendOnlyFields(fields),
SchemaMutationDelta::ExactMatch => Self::ExactMatch,
SchemaMutationDelta::Incompatible => Self::Incompatible,
}
}
}
impl SchemaMutation {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_into(&self, hasher: &mut sha2::Sha256) {
match self {
Self::AddNullableField {
field_id,
name,
slot,
} => {
write_hash_tag_u8(hasher, 1);
hash_field_identity(hasher, *field_id, name, *slot);
}
Self::AddDefaultedField {
field_id,
name,
slot,
} => {
write_hash_tag_u8(hasher, 2);
hash_field_identity(hasher, *field_id, name, *slot);
}
Self::AddNonUniqueFieldPathIndex { target } => {
write_hash_tag_u8(hasher, 3);
target.hash_into(hasher);
}
Self::AddExpressionIndex { target } => {
write_hash_tag_u8(hasher, 4);
target.hash_into(hasher);
}
Self::DropNonRequiredSecondaryIndex { target } => {
write_hash_tag_u8(hasher, 5);
target.hash_into(hasher);
}
Self::AlterNullability { field_id } => {
write_hash_tag_u8(hasher, 6);
write_hash_u32(hasher, field_id.get());
}
}
}
}
impl MutationCompatibility {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
const fn tag(self) -> u8 {
match self {
Self::MetadataOnlySafe => 1,
Self::RequiresRebuild => 2,
Self::UnsupportedPreOne => 3,
Self::Incompatible => 4,
}
}
}
impl RebuildRequirement {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
const fn tag(self) -> u8 {
match self {
Self::NoRebuildRequired => 1,
Self::IndexRebuildRequired => 2,
Self::FullDataRewriteRequired => 3,
Self::Unsupported => 4,
}
}
}
impl SchemaFieldPathIndexRebuildTarget {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_into(&self, hasher: &mut sha2::Sha256) {
write_hash_u32(hasher, u32::from(self.ordinal));
write_hash_str_u32(hasher, &self.name);
write_hash_str_u32(hasher, &self.store);
write_hash_bool(hasher, self.unique);
match &self.predicate_sql {
Some(predicate_sql) => {
write_hash_tag_u8(hasher, 1);
write_hash_str_u32(hasher, predicate_sql);
}
None => write_hash_tag_u8(hasher, 0),
}
write_hash_u32(
hasher,
u32::try_from(self.key_paths.len()).unwrap_or(u32::MAX),
);
for key_path in &self.key_paths {
key_path.hash_into(hasher);
}
}
}
impl SchemaFieldPathIndexRebuildKey {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_into(&self, hasher: &mut sha2::Sha256) {
write_hash_u32(hasher, self.field_id.get());
write_hash_u32(hasher, u32::from(self.slot.get()));
write_hash_u32(hasher, u32::try_from(self.path.len()).unwrap_or(u32::MAX));
for segment in &self.path {
write_hash_str_u32(hasher, segment);
}
write_hash_str_u32(hasher, &format!("{:?}", self.kind));
write_hash_bool(hasher, self.nullable);
}
}
impl SchemaExpressionIndexRebuildTarget {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_into(&self, hasher: &mut sha2::Sha256) {
write_hash_u32(hasher, u32::from(self.ordinal));
write_hash_str_u32(hasher, &self.name);
write_hash_str_u32(hasher, &self.store);
write_hash_bool(hasher, self.unique);
match &self.predicate_sql {
Some(predicate_sql) => {
write_hash_tag_u8(hasher, 1);
write_hash_str_u32(hasher, predicate_sql);
}
None => write_hash_tag_u8(hasher, 0),
}
write_hash_u32(
hasher,
u32::try_from(self.key_items.len()).unwrap_or(u32::MAX),
);
for key_item in &self.key_items {
key_item.hash_into(hasher);
}
}
}
impl SchemaExpressionIndexRebuildKey {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_into(&self, hasher: &mut sha2::Sha256) {
match self {
Self::FieldPath(path) => {
write_hash_tag_u8(hasher, 1);
path.hash_into(hasher);
}
Self::Expression(expression) => {
write_hash_tag_u8(hasher, 2);
expression.hash_into(hasher);
}
}
}
}
impl SchemaExpressionIndexRebuildExpression {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_into(&self, hasher: &mut sha2::Sha256) {
write_hash_u32(hasher, self.op as u32);
self.source.hash_into(hasher);
write_hash_str_u32(hasher, &format!("{:?}", self.input_kind));
write_hash_str_u32(hasher, &format!("{:?}", self.output_kind));
write_hash_str_u32(hasher, &self.canonical_text);
}
}
impl SchemaSecondaryIndexDropCleanupTarget {
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_into(&self, hasher: &mut sha2::Sha256) {
write_hash_u32(hasher, u32::from(self.ordinal));
write_hash_str_u32(hasher, &self.name);
write_hash_str_u32(hasher, &self.store);
write_hash_bool(hasher, self.unique);
match &self.predicate_sql {
Some(predicate_sql) => {
write_hash_tag_u8(hasher, 1);
write_hash_str_u32(hasher, predicate_sql);
}
None => write_hash_tag_u8(hasher, 0),
}
}
}
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn hash_field_identity(
hasher: &mut sha2::Sha256,
field_id: FieldId,
name: &str,
slot: SchemaFieldSlot,
) {
write_hash_u32(hasher, field_id.get());
write_hash_str_u32(hasher, name);
write_hash_u32(hasher, u32::from(slot.get()));
}
#[allow(
dead_code,
reason = "used by mutation fingerprint tests until audit identity is surfaced in diagnostics"
)]
fn write_hash_bool(hasher: &mut sha2::Sha256, value: bool) {
write_hash_tag_u8(hasher, u8::from(value));
}
fn field_path_rebuild_key(
path: &PersistedIndexFieldPathSnapshot,
) -> SchemaFieldPathIndexRebuildKey {
SchemaFieldPathIndexRebuildKey {
field_id: path.field_id(),
slot: path.slot(),
path: path.path().to_vec(),
kind: path.kind().clone(),
nullable: path.nullable(),
}
}
#[allow(
dead_code,
reason = "0.152 stages runner capability contracts before physical runners consume them"
)]
fn push_runner_capability_once(
capabilities: &mut Vec<SchemaMutationRunnerCapability>,
capability: SchemaMutationRunnerCapability,
) {
if !capabilities.contains(&capability) {
capabilities.push(capability);
}
}
#[allow(
dead_code,
reason = "0.153 stages runtime epoch identity before physical runners publish snapshots"
)]
fn runtime_epoch_fingerprint(
snapshot: &PersistedSchemaSnapshot,
) -> Result<[u8; 16], InternalError> {
let encoded_snapshot = encode_persisted_schema_snapshot(snapshot)?;
let mut hasher = new_hash_sha256_prefixed(SCHEMA_MUTATION_RUNTIME_EPOCH_PROFILE_TAG);
write_hash_str_u32(&mut hasher, snapshot.entity_path());
write_hash_u32(&mut hasher, snapshot.version().get());
write_hash_u32(
&mut hasher,
u32::try_from(encoded_snapshot.len()).unwrap_or(u32::MAX),
);
hasher.update(encoded_snapshot);
let digest = finalize_hash_sha256(hasher);
let mut fingerprint = [0u8; 16];
fingerprint.copy_from_slice(&digest[..16]);
Ok(fingerprint)
}
fn append_only_additive_fields<'a>(
actual: &PersistedSchemaSnapshot,
expected: &'a PersistedSchemaSnapshot,
) -> Option<&'a [PersistedFieldSnapshot]> {
if actual.fields().len() >= expected.fields().len()
|| actual.row_layout().field_to_slot().len() >= expected.row_layout().field_to_slot().len()
{
return None;
}
if !actual
.fields()
.iter()
.zip(expected.fields())
.all(|(actual_field, expected_field)| actual_field == expected_field)
{
return None;
}
if !actual
.row_layout()
.field_to_slot()
.iter()
.zip(expected.row_layout().field_to_slot())
.all(|(actual_pair, expected_pair)| actual_pair == expected_pair)
{
return None;
}
Some(&expected.fields()[actual.fields().len()..])
}
#[cfg(test)]
mod tests;