use crate::engine::Context;
use crate::parsing::ast::{
self as ast, DateTimeValue, LemmaFact, LemmaRule, LemmaSpec, MetaValue, ParentType, Span,
TypeDef, Value,
};
use crate::parsing::source::Source;
use crate::planning::semantics::{
conversion_target_to_semantic, primitive_boolean, primitive_date, primitive_duration,
primitive_number, primitive_ratio, primitive_text, primitive_time, type_spec_for_primitive,
value_to_semantic, ArithmeticComputation, ComparisonComputation, Expression, ExpressionKind,
FactData, FactPath, LemmaType, LiteralValue, PathSegment, RulePath, SemanticConversionTarget,
TypeDefiningSpec, TypeExtends, TypeSpecification, ValueKind,
};
use crate::planning::types::{PerSliceTypeResolver, ResolvedSpecTypes};
use crate::planning::validation::{
validate_spec_interfaces, validate_type_specifications, RuleEntryForBindingCheck,
};
use crate::Error;
use ast::FactValue as ParsedFactValue;
use indexmap::IndexMap;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque};
use std::sync::Arc;
type FactBindings = HashMap<Vec<String>, (ParsedFactValue, Source)>;
#[derive(Debug)]
pub(crate) struct Graph {
main_spec: Arc<LemmaSpec>,
facts: IndexMap<FactPath, FactData>,
rules: BTreeMap<RulePath, RuleNode>,
execution_order: Vec<RulePath>,
}
impl Graph {
pub(crate) fn facts(&self) -> &IndexMap<FactPath, FactData> {
&self.facts
}
pub(crate) fn rules(&self) -> &BTreeMap<RulePath, RuleNode> {
&self.rules
}
pub(crate) fn rules_mut(&mut self) -> &mut BTreeMap<RulePath, RuleNode> {
&mut self.rules
}
pub(crate) fn execution_order(&self) -> &[RulePath] {
&self.execution_order
}
pub(crate) fn main_spec(&self) -> &Arc<LemmaSpec> {
&self.main_spec
}
pub(crate) fn build_facts(&self) -> IndexMap<FactPath, FactData> {
let mut schema: HashMap<FactPath, LemmaType> = HashMap::new();
let mut values: HashMap<FactPath, LiteralValue> = HashMap::new();
let mut spec_arcs: HashMap<FactPath, Arc<LemmaSpec>> = HashMap::new();
for (path, rfv) in self.facts.iter() {
match rfv {
FactData::Value { value, .. } => {
values.insert(path.clone(), value.clone());
schema.insert(path.clone(), value.lemma_type.clone());
}
FactData::TypeDeclaration { resolved_type, .. } => {
schema.insert(path.clone(), resolved_type.clone());
}
FactData::SpecRef { spec: spec_arc, .. } => {
spec_arcs.insert(path.clone(), Arc::clone(spec_arc));
}
}
}
let mut default_paths: HashSet<FactPath> = HashSet::new();
for (path, schema_type) in &schema {
if values.contains_key(path) {
continue;
}
if let Some(default_value) = schema_type.create_default_value() {
values.insert(path.clone(), default_value);
default_paths.insert(path.clone());
}
}
for (path, value) in values.iter_mut() {
let Some(schema_type) = schema.get(path).cloned() else {
continue;
};
match Self::coerce_literal_to_schema_type(value, &schema_type) {
Ok(coerced) => *value = coerced,
Err(msg) => unreachable!("Fact {} incompatible: {}", path, msg),
}
}
let mut facts = IndexMap::new();
for (path, rfv) in &self.facts {
let source = rfv.source().clone();
if let Some(spec_arc) = spec_arcs.remove(path) {
let resolved_plan_hash = match rfv {
FactData::SpecRef {
resolved_plan_hash, ..
} => resolved_plan_hash.clone(),
_ => unreachable!("spec_arcs only populated from SpecRef variants"),
};
facts.insert(
path.clone(),
FactData::SpecRef {
spec: spec_arc,
source,
resolved_plan_hash,
},
);
} else if let Some(value) = values.remove(path) {
let is_default = default_paths.contains(path);
facts.insert(
path.clone(),
FactData::Value {
value,
source,
is_default,
},
);
} else {
let resolved_type = schema
.get(path)
.cloned()
.expect("non-spec-ref fact has schema (value or type-only)");
facts.insert(
path.clone(),
FactData::TypeDeclaration {
resolved_type,
source,
},
);
}
}
facts
}
fn coerce_literal_to_schema_type(
lit: &LiteralValue,
schema_type: &LemmaType,
) -> Result<LiteralValue, String> {
if lit.lemma_type.specifications == schema_type.specifications {
let mut out = lit.clone();
out.lemma_type = schema_type.clone();
return Ok(out);
}
match (&schema_type.specifications, &lit.value) {
(TypeSpecification::Number { .. }, ValueKind::Number(_))
| (TypeSpecification::Text { .. }, ValueKind::Text(_))
| (TypeSpecification::Boolean { .. }, ValueKind::Boolean(_))
| (TypeSpecification::Date { .. }, ValueKind::Date(_))
| (TypeSpecification::Time { .. }, ValueKind::Time(_))
| (TypeSpecification::Duration { .. }, ValueKind::Duration(_, _))
| (TypeSpecification::Ratio { .. }, ValueKind::Ratio(_, _))
| (TypeSpecification::Scale { .. }, ValueKind::Scale(_, _)) => {
let mut out = lit.clone();
out.lemma_type = schema_type.clone();
Ok(out)
}
(TypeSpecification::Ratio { .. }, ValueKind::Number(n)) => {
Ok(LiteralValue::ratio_with_type(*n, None, schema_type.clone()))
}
_ => Err(format!(
"value {} cannot be used as type {}",
lit,
schema_type.name()
)),
}
}
fn topological_sort(&self) -> Result<Vec<RulePath>, Vec<Error>> {
let mut in_degree: BTreeMap<RulePath, usize> = BTreeMap::new();
let mut dependents: BTreeMap<RulePath, Vec<RulePath>> = BTreeMap::new();
let mut queue = VecDeque::new();
let mut result = Vec::new();
for rule_path in self.rules.keys() {
in_degree.insert(rule_path.clone(), 0);
dependents.insert(rule_path.clone(), Vec::new());
}
for (rule_path, rule_node) in &self.rules {
for dependency in &rule_node.depends_on_rules {
if self.rules.contains_key(dependency) {
if let Some(degree) = in_degree.get_mut(rule_path) {
*degree += 1;
}
if let Some(deps) = dependents.get_mut(dependency) {
deps.push(rule_path.clone());
}
}
}
}
for (rule_path, degree) in &in_degree {
if *degree == 0 {
queue.push_back(rule_path.clone());
}
}
while let Some(rule_path) = queue.pop_front() {
result.push(rule_path.clone());
if let Some(dependent_rules) = dependents.get(&rule_path) {
for dependent in dependent_rules {
if let Some(degree) = in_degree.get_mut(dependent) {
*degree -= 1;
if *degree == 0 {
queue.push_back(dependent.clone());
}
}
}
}
}
if result.len() != self.rules.len() {
let missing: Vec<RulePath> = self
.rules
.keys()
.filter(|rule| !result.contains(rule))
.cloned()
.collect();
let cycle: Vec<Source> = missing
.iter()
.filter_map(|rule| self.rules.get(rule).map(|n| n.source.clone()))
.collect();
if cycle.is_empty() {
unreachable!(
"BUG: circular dependency detected but no sources could be collected ({} missing rules)",
missing.len()
);
}
let rules_involved: String = missing
.iter()
.map(|rp| rp.rule.as_str())
.collect::<Vec<_>>()
.join(", ");
let message = format!("Circular dependency (rules: {})", rules_involved);
let errors: Vec<Error> = cycle
.into_iter()
.map(|source| {
Error::validation_with_context(
message.clone(),
Some(source),
None::<String>,
Some(Arc::clone(&self.main_spec)),
None,
)
})
.collect();
return Err(errors);
}
Ok(result)
}
}
#[derive(Debug)]
pub(crate) struct RuleNode {
pub branches: Vec<(Option<Expression>, Expression)>,
pub source: Source,
pub depends_on_rules: BTreeSet<RulePath>,
pub rule_type: LemmaType,
pub spec_name: String,
}
type ResolvedTypesMap = HashMap<Arc<LemmaSpec>, ResolvedSpecTypes>;
struct GraphBuilder<'a> {
facts: IndexMap<FactPath, FactData>,
rules: BTreeMap<RulePath, RuleNode>,
sources: HashMap<String, String>,
context: &'a Context,
local_types: ResolvedTypesMap,
errors: Vec<Error>,
resolve_at: Option<DateTimeValue>,
main_spec: Arc<LemmaSpec>,
plan_hashes: &'a super::PlanHashRegistry,
}
impl Graph {
pub(crate) fn build(
main_spec: &Arc<LemmaSpec>,
context: &Context,
sources: HashMap<String, String>,
resolve_at: Option<DateTimeValue>,
plan_hashes: &super::PlanHashRegistry,
) -> Result<(Graph, ResolvedTypesMap), Vec<Error>> {
let mut type_resolver = PerSliceTypeResolver::new(context, resolve_at.clone(), plan_hashes);
let mut type_errors: Vec<Error> = Vec::new();
type_errors.extend(type_resolver.register_all(main_spec));
type_errors.extend(type_resolver.register_dependency_specs(main_spec));
let (resolved_types, resolve_errors) = type_resolver.resolve_all_registered_specs();
type_errors.extend(resolve_errors);
let (facts, rules, graph_errors, local_types) = {
let mut builder = GraphBuilder {
facts: IndexMap::new(),
rules: BTreeMap::new(),
sources,
context,
local_types: resolved_types,
errors: Vec::new(),
resolve_at,
main_spec: Arc::clone(main_spec),
plan_hashes,
};
builder.build_spec(main_spec, Vec::new(), HashMap::new(), &mut type_resolver)?;
(
builder.facts,
builder.rules,
builder.errors,
builder.local_types,
)
};
let mut graph = Graph {
facts,
rules,
execution_order: Vec::new(),
main_spec: Arc::clone(main_spec),
};
let validation_errors = match graph.validate(&local_types) {
Ok(()) => Vec::new(),
Err(errors) => errors,
};
let mut all_errors = type_errors;
all_errors.extend(graph_errors);
all_errors.extend(validation_errors);
if all_errors.is_empty() {
Ok((graph, local_types))
} else {
Err(all_errors)
}
}
fn validate(&mut self, resolved_types: &ResolvedTypesMap) -> Result<(), Vec<Error>> {
let mut errors = Vec::new();
if let Err(structural_errors) = check_all_rule_references_exist(self) {
errors.extend(structural_errors);
}
if let Err(collision_errors) = check_fact_and_rule_name_collisions(self) {
errors.extend(collision_errors);
}
let execution_order = match self.topological_sort() {
Ok(order) => order,
Err(circular_errors) => {
errors.extend(circular_errors);
return Err(errors);
}
};
let inferred_types = infer_rule_types(self, &execution_order, resolved_types);
if let Err(type_errors) =
check_rule_types(self, &execution_order, &inferred_types, resolved_types)
{
errors.extend(type_errors);
}
let referenced_rules = compute_referenced_rules_by_path(self);
let spec_ref_facts: Vec<(FactPath, Arc<LemmaSpec>, Source)> = self
.facts()
.iter()
.filter_map(|(path, fact)| {
fact.spec_arc()
.map(|arc| (path.clone(), Arc::clone(arc), fact.source().clone()))
})
.collect();
let rule_entries: IndexMap<RulePath, RuleEntryForBindingCheck> = self
.rules()
.iter()
.map(|(path, node)| {
let rule_type = inferred_types
.get(path)
.cloned()
.unwrap_or_else(|| node.rule_type.clone());
(
path.clone(),
RuleEntryForBindingCheck {
rule_type,
depends_on_rules: node.depends_on_rules.clone(),
branches: node.branches.clone(),
},
)
})
.collect();
if let Err(interface_errors) = validate_spec_interfaces(
&referenced_rules,
&spec_ref_facts,
self.facts(),
&rule_entries,
&self.main_spec,
) {
errors.extend(interface_errors);
}
if !errors.is_empty() {
return Err(errors);
}
apply_inferred_types(self, inferred_types);
self.execution_order = execution_order;
Ok(())
}
}
impl<'a> GraphBuilder<'a> {
fn engine_error(&self, message: impl Into<String>, source: &Source) -> Error {
Error::validation_with_context(
message.into(),
Some(source.clone()),
None::<String>,
Some(Arc::clone(&self.main_spec)),
None,
)
}
fn spec_source(&self, spec: &LemmaSpec) -> Option<Source> {
let attribute = spec.attribute.as_deref()?;
self.sources.get(attribute)?;
Some(Source::new(
attribute,
Span {
start: 0,
end: 0,
line: spec.start_line,
col: 0,
},
))
}
fn process_meta_fields(&mut self, spec: &LemmaSpec) {
let mut seen = HashSet::new();
for field in &spec.meta_fields {
if field.key == "title" && !matches!(field.value, MetaValue::Literal(Value::Text(_))) {
self.errors.push(self.engine_error(
"Meta 'title' must be a text literal",
&field.source_location,
));
}
if !seen.insert(field.key.clone()) {
self.errors.push(self.engine_error(
format!("Duplicate meta key '{}'", field.key),
&field.source_location,
));
}
}
}
fn resolve_spec_ref(
&mut self,
spec_ref: &ast::SpecRef,
error_source: &Source,
) -> Option<Arc<LemmaSpec>> {
if let Some(pin) = &spec_ref.hash_pin {
if let Some(arc) = self.plan_hashes.get_by_pin(&spec_ref.name, pin) {
if let Some(err) = super::validate_effective_for_pin(spec_ref, arc, self.context) {
self.errors.push(err);
return None;
}
return Some(Arc::clone(arc));
}
self.errors.push(self.engine_error(
format!("No spec '{}' found with plan hash {}", spec_ref.name, pin),
error_source,
));
return None;
}
let at = spec_ref.effective.as_ref().or(self.resolve_at.as_ref());
match at {
Some(dt) => self.context.get_spec(&spec_ref.name, dt),
None => self
.context
.specs_for_name(&spec_ref.name)
.into_iter()
.next(),
}
}
fn lookup_dependency_hash(&self, dep_spec: &Arc<LemmaSpec>) -> Option<String> {
self.plan_hashes
.get_by_slice(&dep_spec.name, &dep_spec.effective_from)
.map(|s| s.to_string())
}
fn resolve_type_declaration(
&mut self,
type_decl: &ParsedFactValue,
decl_source: &Source,
context_spec: &Arc<LemmaSpec>,
) -> Result<LemmaType, Vec<Error>> {
let ParsedFactValue::TypeDeclaration {
base,
constraints,
from,
} = type_decl
else {
unreachable!(
"BUG: resolve_type_declaration called with non-TypeDeclaration ParsedFactValue"
);
};
if let ParentType::Custom { ref name } = base {
if name.is_empty() {
return Err(vec![
self.engine_error("TypeDeclaration base cannot be empty", decl_source)
]);
}
}
let source_spec_owned: Option<Arc<LemmaSpec>> = from
.as_ref()
.and_then(|r| self.resolve_spec_ref(r, decl_source));
let source_spec_arc: &Arc<LemmaSpec> = source_spec_owned.as_ref().unwrap_or(context_spec);
if from.is_some() && source_spec_owned.is_none() {
return Err(vec![self.engine_error(
format!(
"Spec '{}' not found for type import",
from.as_ref().map(|r| r.name.as_str()).unwrap_or("")
),
decl_source,
)]);
}
let base_name = format!("{}", base);
let (base_lemma_type, extends) = if let ParentType::Primitive { primitive: kind } = base {
(
LemmaType::primitive(type_spec_for_primitive(*kind)),
TypeExtends::Primitive,
)
} else {
let parent_name = match base {
ParentType::Custom { ref name } => name.as_str(),
ParentType::Primitive { .. } => unreachable!("already handled above"),
};
let resolved_types = self.local_types.get(source_spec_arc).ok_or_else(|| {
vec![self.engine_error(
format!(
"Resolved types not found for spec '{}'",
source_spec_arc.name
),
decl_source,
)]
})?;
let base_type = resolved_types
.named_types
.get(parent_name)
.ok_or_else(|| {
vec![self.engine_error(
format!("Unknown type: '{}'. Type must be defined before use.", base),
decl_source,
)]
})?
.clone();
let family = base_type
.scale_family_name()
.map(String::from)
.unwrap_or_else(|| parent_name.to_string());
let defining_spec = if from.is_some() {
let hash = match self.lookup_dependency_hash(source_spec_arc) {
Some(h) => h,
None => {
return Err(vec![self.engine_error(
format!(
"Cannot import types from spec '{}': no plan hash (that spec may have failed validation)",
source_spec_arc.name
),
decl_source,
)]);
}
};
TypeDefiningSpec::Import {
spec: Arc::clone(source_spec_arc),
resolved_plan_hash: hash,
}
} else {
TypeDefiningSpec::Local
};
let extends = TypeExtends::Custom {
parent: parent_name.to_string(),
family,
defining_spec,
};
(base_type, extends)
};
let mut errors = Vec::new();
let mut specs = base_lemma_type.specifications;
if let Some(ref constraints_vec) = constraints {
for (command, args) in constraints_vec {
match specs.clone().apply_constraint(*command, args) {
Ok(updated) => specs = updated,
Err(e) => errors.push(self.engine_error(
format!("Invalid command '{}' for type '{}': {}", command, base, e),
decl_source,
)),
}
}
errors.extend(validate_type_specifications(
&specs,
&base_name,
decl_source,
Some(Arc::clone(context_spec)),
));
}
if !errors.is_empty() {
return Err(errors);
}
Ok(LemmaType::new(base_name, specs, extends))
}
fn resolve_fact_binding(
&mut self,
fact: &LemmaFact,
current_segment_names: &[String],
effective_spec_refs: &HashMap<String, Arc<LemmaSpec>>,
) -> Result<(Vec<String>, ParsedFactValue, Source), Vec<Error>> {
let fact_source = &fact.source_location;
let binding_path_display = format!(
"{}.{}",
fact.reference.segments.join("."),
fact.reference.name
);
let mut current_spec_arc: Option<Arc<LemmaSpec>> = None;
for (index, segment) in fact.reference.segments.iter().enumerate() {
let resolved_arc = if index == 0 {
match effective_spec_refs.get(segment) {
Some(arc) => Arc::clone(arc),
None => {
return Err(vec![self.engine_error(
format!(
"Invalid fact binding path '{}': '{}' is not a spec reference",
binding_path_display, segment
),
fact_source,
)]);
}
}
} else {
let prev_spec = current_spec_arc.as_ref().unwrap_or_else(|| {
unreachable!(
"BUG: current_spec_arc should be set after first segment in resolve_fact_binding"
)
});
let seg_fact = prev_spec
.facts
.iter()
.find(|f| f.reference.segments.is_empty() && f.reference.name == *segment);
match seg_fact {
Some(f) => match &f.value {
ParsedFactValue::SpecReference(spec_ref) => {
match self.resolve_spec_ref(spec_ref, fact_source) {
Some(arc) => arc,
None => {
return Err(vec![self.engine_error(
format!(
"Invalid fact binding path '{}': spec '{}' not found",
binding_path_display, spec_ref
),
fact_source,
)]);
}
}
}
_ => {
return Err(vec![self.engine_error(
format!(
"Invalid fact binding path '{}': '{}' in spec '{}' is not a spec reference",
binding_path_display, segment, prev_spec.name
),
fact_source,
)]);
}
},
None => {
return Err(vec![self.engine_error(
format!(
"Invalid fact binding path '{}': fact '{}' not found in spec '{}'",
binding_path_display, segment, prev_spec.name
),
fact_source,
)]);
}
}
};
current_spec_arc = Some(resolved_arc);
}
let mut binding_key: Vec<String> = current_segment_names.to_vec();
binding_key.extend(fact.reference.segments.iter().cloned());
binding_key.push(fact.reference.name.clone());
Ok((
binding_key,
fact.value.clone(),
fact.source_location.clone(),
))
}
fn build_fact_bindings(
&mut self,
spec: &LemmaSpec,
current_segment_names: &[String],
effective_spec_refs: &HashMap<String, Arc<LemmaSpec>>,
) -> Result<FactBindings, Vec<Error>> {
let mut bindings: FactBindings = HashMap::new();
let mut errors: Vec<Error> = Vec::new();
for fact in &spec.facts {
if fact.reference.segments.is_empty() {
continue; }
let mut name_too_long = false;
for segment in &fact.reference.segments {
if let Err(e) = crate::limits::check_max_length(
segment,
crate::limits::MAX_FACT_NAME_LENGTH,
"fact",
Some(fact.source_location.clone()),
) {
errors.push(e);
name_too_long = true;
}
}
if let Err(e) = crate::limits::check_max_length(
&fact.reference.name,
crate::limits::MAX_FACT_NAME_LENGTH,
"fact",
Some(fact.source_location.clone()),
) {
errors.push(e);
name_too_long = true;
}
if name_too_long {
continue;
}
if matches!(&fact.value, ParsedFactValue::TypeDeclaration { .. }) {
let binding_path_display = format!(
"{}.{}",
fact.reference.segments.join("."),
fact.reference.name
);
errors.push(self.engine_error(
format!(
"Fact binding '{}' must provide a literal value or spec reference, not a type declaration",
binding_path_display
),
&fact.source_location,
));
continue;
}
match self.resolve_fact_binding(fact, current_segment_names, effective_spec_refs) {
Ok((binding_key, fact_value, source)) => {
if let Some((_, existing_source)) = bindings.get(&binding_key) {
errors.push(self.engine_error(
format!(
"Duplicate fact binding for '{}' (previously bound at {}:{})",
binding_key.join("."),
existing_source.attribute,
existing_source.span.line
),
&fact.source_location,
));
} else {
bindings.insert(binding_key, (fact_value, source));
}
}
Err(mut resolve_errors) => {
errors.append(&mut resolve_errors);
}
}
}
if !errors.is_empty() {
return Err(errors);
}
Ok(bindings)
}
#[allow(clippy::too_many_arguments)]
fn add_fact(
&mut self,
fact: &LemmaFact,
current_segments: &[PathSegment],
fact_bindings: &FactBindings,
effective_spec_refs: &HashMap<String, Arc<LemmaSpec>>,
current_spec_arc: &Arc<LemmaSpec>,
used_binding_keys: &mut HashSet<Vec<String>>,
) {
if let Err(e) = crate::limits::check_max_length(
&fact.reference.name,
crate::limits::MAX_FACT_NAME_LENGTH,
"fact",
Some(fact.source_location.clone()),
) {
self.errors.push(e);
return;
}
for segment in &fact.reference.segments {
if let Err(e) = crate::limits::check_max_length(
segment,
crate::limits::MAX_FACT_NAME_LENGTH,
"fact",
Some(fact.source_location.clone()),
) {
self.errors.push(e);
return;
}
}
let fact_path = FactPath {
segments: current_segments.to_vec(),
fact: fact.reference.name.clone(),
};
if self.facts.contains_key(&fact_path) {
self.errors.push(self.engine_error(
format!("Duplicate fact '{}'", fact_path.fact),
&fact.source_location,
));
return;
}
let binding_key: Vec<String> = current_segments
.iter()
.map(|s| s.fact.clone())
.chain(std::iter::once(fact.reference.name.clone()))
.collect();
let (effective_value, effective_source) = match fact_bindings.get(&binding_key) {
Some((bound_value, bound_source)) => {
used_binding_keys.insert(binding_key);
(bound_value.clone(), bound_source.clone())
}
None => (fact.value.clone(), fact.source_location.clone()),
};
let original_schema_type = if matches!(&fact.value, ParsedFactValue::TypeDeclaration { .. })
{
match self.resolve_type_declaration(
&fact.value,
&fact.source_location,
current_spec_arc,
) {
Ok(t) => Some(t),
Err(errs) => {
self.errors.extend(errs);
return;
}
}
} else {
None
};
match &effective_value {
ParsedFactValue::Literal(value) => {
let semantic_value = match value_to_semantic(value) {
Ok(s) => s,
Err(e) => {
self.errors.push(self.engine_error(e, &effective_source));
return;
}
};
let inferred_type = match value {
Value::Text(_) => primitive_text().clone(),
Value::Number(_) => primitive_number().clone(),
Value::Scale(_, unit) => {
match self
.local_types
.get(current_spec_arc)
.and_then(|dt| dt.unit_index.get(unit))
{
Some((lt, _)) => lt.clone(),
None => {
self.errors.push(self.engine_error(
format!(
"Scale literal uses unknown unit '{}' for this spec",
unit
),
&effective_source,
));
return;
}
}
}
Value::Boolean(_) => primitive_boolean().clone(),
Value::Date(_) => primitive_date().clone(),
Value::Time(_) => primitive_time().clone(),
Value::Duration(_, _) => primitive_duration().clone(),
Value::Ratio(_, _) => primitive_ratio().clone(),
};
let schema_type = original_schema_type.unwrap_or(inferred_type);
let literal_value = LiteralValue {
value: semantic_value,
lemma_type: schema_type,
};
self.facts.insert(
fact_path,
FactData::Value {
value: literal_value,
source: effective_source,
is_default: false,
},
);
}
ParsedFactValue::TypeDeclaration { .. } => {
let resolved_type = original_schema_type.unwrap_or_else(|| {
match self.resolve_type_declaration(
&effective_value,
&effective_source,
current_spec_arc,
) {
Ok(t) => t,
Err(_) => {
unreachable!(
"BUG: TypeDeclaration effective value without original_schema_type"
)
}
}
});
self.facts.insert(
fact_path,
FactData::TypeDeclaration {
resolved_type,
source: effective_source,
},
);
}
ParsedFactValue::SpecReference(spec_ref) => {
let effective_spec_arc =
if let Some(arc) = effective_spec_refs.get(&fact.reference.name).cloned() {
arc
} else {
match self.resolve_spec_ref(spec_ref, &effective_source) {
Some(arc) => arc,
None => {
self.errors.push(self.engine_error(
format!("Spec '{}' not found", spec_ref),
&effective_source,
));
return;
}
}
};
let resolved_hash = match self.lookup_dependency_hash(&effective_spec_arc) {
Some(h) => h,
None => {
self.errors.push(self.engine_error(
format!(
"Cannot bind spec reference '{}': no plan hash for that spec (it may have failed validation)",
effective_spec_arc.name
),
&effective_source,
));
return;
}
};
if let Some(pin) = &spec_ref.hash_pin {
if !pin.trim().eq_ignore_ascii_case(resolved_hash.trim()) {
self.errors.push(self.engine_error(
format!(
"Spec '{}' plan hash mismatch: expected {}, got {}",
spec_ref.name, pin, resolved_hash
),
&effective_source,
));
return;
}
}
self.facts.insert(
fact_path,
FactData::SpecRef {
spec: Arc::clone(&effective_spec_arc),
source: effective_source,
resolved_plan_hash: resolved_hash,
},
);
}
}
}
fn resolve_path_segments(
&mut self,
segments: &[String],
reference_source: &Source,
mut current_facts_map: HashMap<String, LemmaFact>,
mut path_segments: Vec<PathSegment>,
effective_spec_refs: &HashMap<String, Arc<LemmaSpec>>,
) -> Option<(Vec<PathSegment>, Arc<LemmaSpec>)> {
let mut last_arc: Option<Arc<LemmaSpec>> = None;
for (index, segment) in segments.iter().enumerate() {
let fact_ref =
match current_facts_map.get(segment) {
Some(f) => f,
None => {
self.errors.push(self.engine_error(
format!("Fact '{}' not found", segment),
reference_source,
));
return None;
}
};
if let ParsedFactValue::SpecReference(original_spec_ref) = &fact_ref.value {
let resolved = if index == 0 {
effective_spec_refs
.get(segment)
.cloned()
.or_else(|| self.resolve_spec_ref(original_spec_ref, reference_source))
} else {
self.resolve_spec_ref(original_spec_ref, reference_source)
};
let arc = match resolved {
Some(a) => a,
None => {
self.errors.push(self.engine_error(
format!("Spec '{}' not found", original_spec_ref),
reference_source,
));
return None;
}
};
path_segments.push(PathSegment {
fact: segment.clone(),
spec: arc.name.clone(),
});
current_facts_map = arc
.facts
.iter()
.map(|f| (f.reference.name.clone(), f.clone()))
.collect();
last_arc = Some(arc);
} else {
self.errors.push(self.engine_error(
format!("Fact '{}' is not a spec reference", segment),
reference_source,
));
return None;
}
}
let final_arc = last_arc.unwrap_or_else(|| {
unreachable!(
"BUG: resolve_path_segments called with empty segments should not reach here"
)
});
Some((path_segments, final_arc))
}
fn build_spec(
&mut self,
spec_arc: &Arc<LemmaSpec>,
current_segments: Vec<PathSegment>,
fact_bindings: FactBindings,
type_resolver: &mut PerSliceTypeResolver<'a>,
) -> Result<(), Vec<Error>> {
let spec = spec_arc.as_ref();
if let Err(e) = crate::limits::check_max_length(
&spec.name,
crate::limits::MAX_SPEC_NAME_LENGTH,
"spec",
self.spec_source(spec),
) {
self.errors.push(e);
}
if current_segments.is_empty() {
self.process_meta_fields(spec);
}
for fact in spec.facts.iter() {
if let ParsedFactValue::SpecReference(spec_ref) = &fact.value {
if spec_ref.name == spec.name {
self.errors.push(self.engine_error(
format!(
"spec '{}' cannot reference '{}' (same base name)",
spec.name, spec_ref
),
&fact.source_location,
));
}
}
}
for type_def in &spec.types {
if let ast::TypeDef::Import {
from,
source_location,
..
} = type_def
{
if from.name == spec.name {
self.errors.push(self.engine_error(
format!(
"spec '{}' cannot reference '{}' (same base name)",
spec.name, from
),
source_location,
));
}
}
}
let mut effective_spec_refs: HashMap<String, Arc<LemmaSpec>> = HashMap::new();
for fact in spec.facts.iter() {
if fact.reference.segments.is_empty() {
if let ParsedFactValue::SpecReference(spec_ref) = &fact.value {
if spec_ref.name == spec.name {
continue;
}
if let Some(arc) = self.resolve_spec_ref(spec_ref, &fact.source_location) {
effective_spec_refs.insert(fact.reference.name.clone(), arc);
}
}
}
}
let current_segment_names: Vec<String> =
current_segments.iter().map(|s| s.fact.clone()).collect();
for fact in spec.facts.iter() {
if fact.reference.segments.is_empty() {
if let ParsedFactValue::SpecReference(_) = &fact.value {
let mut binding_key = current_segment_names.clone();
binding_key.push(fact.reference.name.clone());
if let Some((ParsedFactValue::SpecReference(bound_spec_ref), _)) =
fact_bindings.get(&binding_key)
{
if bound_spec_ref.name == spec.name {
continue;
}
if let Some(arc) =
self.resolve_spec_ref(bound_spec_ref, &fact.source_location)
{
effective_spec_refs.insert(fact.reference.name.clone(), arc);
}
}
}
}
}
let this_spec_bindings =
match self.build_fact_bindings(spec, ¤t_segment_names, &effective_spec_refs) {
Ok(bindings) => bindings,
Err(errors) => {
self.errors.extend(errors);
HashMap::new() }
};
let facts_map: HashMap<String, &LemmaFact> = spec
.facts
.iter()
.map(|fact| (fact.reference.name.clone(), fact))
.collect();
if current_segments.is_empty() {
for fact in &spec.facts {
if !fact.reference.segments.is_empty() {
continue;
}
if let ParsedFactValue::TypeDeclaration {
base,
constraints: inline_constraints,
from,
} = &fact.value
{
if let ParentType::Custom { ref name } = base {
if name.is_empty() {
self.errors.push(self.engine_error(
"TypeDeclaration base cannot be empty",
&fact.source_location,
));
continue;
}
}
let is_inline_type_definition = from.is_some() || inline_constraints.is_some();
if is_inline_type_definition {
let source_location = fact.source_location.clone();
let inline_type_def = TypeDef::Inline {
source_location,
parent: base.clone(),
constraints: inline_constraints.clone(),
fact_ref: fact.reference.clone(),
from: from.clone(),
};
if let Err(e) = type_resolver.register_type(spec_arc, inline_type_def) {
self.errors.push(e);
}
}
}
}
}
if let Some(existing_types) = self.local_types.get(spec_arc).cloned() {
match type_resolver.resolve_inline_types(spec_arc, existing_types) {
Ok(resolved_types) => {
for (fact_ref, lemma_type) in &resolved_types.inline_type_definitions {
let type_name = format!("{} (inline)", fact_ref.name);
let fact = spec
.facts
.iter()
.find(|f| &f.reference == fact_ref)
.unwrap_or_else(|| {
unreachable!(
"BUG: inline type definition for '{}' has no corresponding fact in spec '{}'",
fact_ref.name, spec.name
)
});
let source = &fact.source_location;
let mut spec_errors = validate_type_specifications(
&lemma_type.specifications,
&type_name,
source,
Some(Arc::clone(spec_arc)),
);
self.errors.append(&mut spec_errors);
}
self.local_types
.insert(Arc::clone(spec_arc), resolved_types);
}
Err(es) => {
self.errors.extend(es);
}
}
}
let mut used_binding_keys: HashSet<Vec<String>> = HashSet::new();
for fact in &spec.facts {
if !fact.reference.segments.is_empty() {
continue; }
if let ParsedFactValue::SpecReference(spec_ref) = &fact.value {
if spec_ref.name == spec.name {
continue; }
}
self.add_fact(
fact,
¤t_segments,
&fact_bindings,
&effective_spec_refs,
spec_arc,
&mut used_binding_keys,
);
}
for (path, rfv) in &self.facts {
if path.segments.len() != current_segments.len() {
continue;
}
if !path
.segments
.iter()
.zip(current_segments.iter())
.all(|(a, b)| a.fact == b.fact && a.spec == b.spec)
{
continue;
}
if let FactData::SpecRef { spec, .. } = rfv {
effective_spec_refs.insert(path.fact.clone(), Arc::clone(spec));
}
}
for fact in &spec.facts {
if !fact.reference.segments.is_empty() {
continue;
}
if let ParsedFactValue::SpecReference(_) = &fact.value {
let nested_arc = match effective_spec_refs.get(&fact.reference.name) {
Some(arc) => Arc::clone(arc),
None => continue,
};
let mut nested_segments = current_segments.clone();
nested_segments.push(PathSegment {
fact: fact.reference.name.clone(),
spec: nested_arc.name.clone(),
});
let nested_segment_names: Vec<String> =
nested_segments.iter().map(|s| s.fact.clone()).collect();
let mut combined_bindings = this_spec_bindings.clone();
for (key, value_and_source) in &fact_bindings {
if key.len() > nested_segment_names.len()
&& key[..nested_segment_names.len()] == nested_segment_names[..]
&& !combined_bindings.contains_key(key)
{
combined_bindings.insert(key.clone(), value_and_source.clone());
}
}
if let Err(errs) = self.build_spec(
&nested_arc,
nested_segments,
combined_bindings,
type_resolver,
) {
self.errors.extend(errs);
}
}
}
let expected_key_len = current_segments.len() + 1;
for (binding_key, (_, binding_source)) in &fact_bindings {
if binding_key.len() == expected_key_len
&& binding_key[..current_segments.len()]
.iter()
.zip(current_segments.iter())
.all(|(a, b)| a == &b.fact)
&& !used_binding_keys.contains(binding_key)
{
self.errors.push(self.engine_error(
format!(
"Fact binding targets a fact that does not exist in the referenced spec: '{}'",
binding_key.join(".")
),
binding_source,
));
}
}
let rule_names: HashSet<&str> = spec.rules.iter().map(|r| r.name.as_str()).collect();
for rule in &spec.rules {
self.add_rule(
rule,
spec_arc,
&facts_map,
¤t_segments,
&effective_spec_refs,
&rule_names,
);
}
Ok(())
}
fn add_rule(
&mut self,
rule: &LemmaRule,
current_spec_arc: &Arc<LemmaSpec>,
facts_map: &HashMap<String, &LemmaFact>,
current_segments: &[PathSegment],
effective_spec_refs: &HashMap<String, Arc<LemmaSpec>>,
rule_names: &HashSet<&str>,
) {
if let Err(e) = crate::limits::check_max_length(
&rule.name,
crate::limits::MAX_RULE_NAME_LENGTH,
"rule",
Some(rule.source_location.clone()),
) {
self.errors.push(e);
return;
}
let rule_path = RulePath {
segments: current_segments.to_vec(),
rule: rule.name.clone(),
};
if self.rules.contains_key(&rule_path) {
let rule_source = &rule.source_location;
self.errors.push(
self.engine_error(format!("Duplicate rule '{}'", rule_path.rule), rule_source),
);
return;
}
let mut branches = Vec::new();
let mut depends_on_rules = BTreeSet::new();
let converted_expression = match self.convert_expression_and_extract_dependencies(
&rule.expression,
current_spec_arc,
facts_map,
current_segments,
&mut depends_on_rules,
effective_spec_refs,
rule_names,
) {
Some(expr) => expr,
None => return,
};
branches.push((None, converted_expression));
for unless_clause in &rule.unless_clauses {
let converted_condition = match self.convert_expression_and_extract_dependencies(
&unless_clause.condition,
current_spec_arc,
facts_map,
current_segments,
&mut depends_on_rules,
effective_spec_refs,
rule_names,
) {
Some(expr) => expr,
None => return,
};
let converted_result = match self.convert_expression_and_extract_dependencies(
&unless_clause.result,
current_spec_arc,
facts_map,
current_segments,
&mut depends_on_rules,
effective_spec_refs,
rule_names,
) {
Some(expr) => expr,
None => return,
};
branches.push((Some(converted_condition), converted_result));
}
let rule_node = RuleNode {
branches,
source: rule.source_location.clone(),
depends_on_rules,
rule_type: LemmaType::veto_type(),
spec_name: current_spec_arc.name.clone(),
};
self.rules.insert(rule_path, rule_node);
}
#[allow(clippy::too_many_arguments)]
fn convert_binary_operands(
&mut self,
left: &ast::Expression,
right: &ast::Expression,
current_spec_arc: &Arc<LemmaSpec>,
facts_map: &HashMap<String, &LemmaFact>,
current_segments: &[PathSegment],
depends_on_rules: &mut BTreeSet<RulePath>,
effective_spec_refs: &HashMap<String, Arc<LemmaSpec>>,
rule_names: &HashSet<&str>,
) -> Option<(Expression, Expression)> {
let converted_left = self.convert_expression_and_extract_dependencies(
left,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
let converted_right = self.convert_expression_and_extract_dependencies(
right,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
Some((converted_left, converted_right))
}
#[allow(clippy::too_many_arguments)]
fn convert_expression_and_extract_dependencies(
&mut self,
expr: &ast::Expression,
current_spec_arc: &Arc<LemmaSpec>,
facts_map: &HashMap<String, &LemmaFact>,
current_segments: &[PathSegment],
depends_on_rules: &mut BTreeSet<RulePath>,
effective_spec_refs: &HashMap<String, Arc<LemmaSpec>>,
rule_names: &HashSet<&str>,
) -> Option<Expression> {
let expr_src = expr
.source_location
.as_ref()
.expect("BUG: AST expression missing source location");
match &expr.kind {
ast::ExpressionKind::Reference(r) => {
let expr_source = expr_src;
let (segments, target_arc_opt) = if r.segments.is_empty() {
(current_segments.to_vec(), None)
} else {
let facts_map_owned: HashMap<String, LemmaFact> = facts_map
.iter()
.map(|(k, v)| (k.clone(), (*v).clone()))
.collect();
let (segs, arc) = self.resolve_path_segments(
&r.segments,
expr_source,
facts_map_owned,
current_segments.to_vec(),
effective_spec_refs,
)?;
(segs, Some(arc))
};
let (is_fact, is_rule, target_spec_name_opt) = match &target_arc_opt {
None => {
let is_fact = facts_map.contains_key(&r.name);
let is_rule = rule_names.contains(r.name.as_str());
(is_fact, is_rule, None)
}
Some(target_arc) => {
let target_spec = target_arc.as_ref();
let target_fact_names: HashSet<&str> = target_spec
.facts
.iter()
.filter(|f| f.reference.is_local())
.map(|f| f.reference.name.as_str())
.collect();
let target_rule_names: HashSet<&str> =
target_spec.rules.iter().map(|r| r.name.as_str()).collect();
let is_fact = target_fact_names.contains(r.name.as_str());
let is_rule = target_rule_names.contains(r.name.as_str());
(is_fact, is_rule, Some(target_spec.name.as_str()))
}
};
if is_fact && is_rule {
self.errors.push(self.engine_error(
format!("'{}' is both a fact and a rule", r.name),
expr_source,
));
return None;
}
if is_fact {
let fact_path = FactPath {
segments,
fact: r.name.clone(),
};
return Some(Expression {
kind: ExpressionKind::FactPath(fact_path),
source_location: expr.source_location.clone(),
});
}
if is_rule {
let rule_path = RulePath {
segments,
rule: r.name.clone(),
};
depends_on_rules.insert(rule_path.clone());
return Some(Expression {
kind: ExpressionKind::RulePath(rule_path),
source_location: expr.source_location.clone(),
});
}
let msg = match target_spec_name_opt {
Some(s) => format!("Reference '{}' not found in spec '{}'", r.name, s),
None => format!("Reference '{}' not found", r.name),
};
self.errors.push(self.engine_error(msg, expr_source));
None
}
ast::ExpressionKind::LogicalAnd(left, right) => {
let (l, r) = self.convert_binary_operands(
left,
right,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
Some(Expression {
kind: ExpressionKind::LogicalAnd(Arc::new(l), Arc::new(r)),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::Arithmetic(left, op, right) => {
let (l, r) = self.convert_binary_operands(
left,
right,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
Some(Expression {
kind: ExpressionKind::Arithmetic(Arc::new(l), op.clone(), Arc::new(r)),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::Comparison(left, op, right) => {
let (l, r) = self.convert_binary_operands(
left,
right,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
Some(Expression {
kind: ExpressionKind::Comparison(Arc::new(l), op.clone(), Arc::new(r)),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::UnitConversion(value, target) => {
let converted_value = self.convert_expression_and_extract_dependencies(
value,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
let resolved_spec_types = self.local_types.get(current_spec_arc);
let unit_index = resolved_spec_types.map(|dt| &dt.unit_index);
let semantic_target = match conversion_target_to_semantic(target, unit_index) {
Ok(t) => t,
Err(msg) => {
let full_msg = unit_index
.map(|idx| {
let valid: Vec<&str> = idx.keys().map(String::as_str).collect();
format!("{} Valid units: {}", msg, valid.join(", "))
})
.unwrap_or(msg);
self.errors.push(Error::validation_with_context(
full_msg,
expr.source_location.clone(),
None::<String>,
Some(Arc::clone(&self.main_spec)),
None,
));
return None;
}
};
Some(Expression {
kind: ExpressionKind::UnitConversion(
Arc::new(converted_value),
semantic_target,
),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::LogicalNegation(operand, neg_type) => {
let converted_operand = self.convert_expression_and_extract_dependencies(
operand,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
Some(Expression {
kind: ExpressionKind::LogicalNegation(
Arc::new(converted_operand),
neg_type.clone(),
),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::MathematicalComputation(op, operand) => {
let converted_operand = self.convert_expression_and_extract_dependencies(
operand,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
Some(Expression {
kind: ExpressionKind::MathematicalComputation(
op.clone(),
Arc::new(converted_operand),
),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::Literal(value) => {
let semantic_value = match value_to_semantic(value) {
Ok(v) => v,
Err(e) => {
self.errors.push(self.engine_error(e, expr_src));
return None;
}
};
let lemma_type = match value {
Value::Text(_) => primitive_text().clone(),
Value::Number(_) => primitive_number().clone(),
Value::Scale(_, unit) => {
match self
.local_types
.get(current_spec_arc)
.and_then(|dt| dt.unit_index.get(unit))
{
Some((lt, _)) => lt.clone(),
None => {
self.errors.push(self.engine_error(
format!(
"Scale literal uses unknown unit '{}' for this spec",
unit
),
expr_src,
));
return None;
}
}
}
Value::Boolean(_) => primitive_boolean().clone(),
Value::Date(_) => primitive_date().clone(),
Value::Time(_) => primitive_time().clone(),
Value::Duration(_, _) => primitive_duration().clone(),
Value::Ratio(_, _) => primitive_ratio().clone(),
};
let literal_value = LiteralValue {
value: semantic_value,
lemma_type,
};
Some(Expression {
kind: ExpressionKind::Literal(Box::new(literal_value)),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::Veto(veto_expression) => Some(Expression {
kind: ExpressionKind::Veto(veto_expression.clone()),
source_location: expr.source_location.clone(),
}),
ast::ExpressionKind::UnresolvedUnitLiteral(value, unit) => {
if let Some((lt, _)) = self
.local_types
.get(current_spec_arc)
.and_then(|dt| dt.unit_index.get(unit))
{
let semantic_value = ValueKind::Scale(*value, unit.clone());
let literal_value = LiteralValue {
value: semantic_value,
lemma_type: lt.clone(),
};
Some(Expression {
kind: ExpressionKind::Literal(Box::new(literal_value)),
source_location: expr.source_location.clone(),
})
} else {
self.errors
.push(self.engine_error(format!("Unknown unit '{}'", unit), expr_src));
None
}
}
ast::ExpressionKind::Now => Some(Expression {
kind: ExpressionKind::Now,
source_location: expr.source_location.clone(),
}),
ast::ExpressionKind::DateRelative(kind, date_expr, tolerance) => {
let converted_date = self.convert_expression_and_extract_dependencies(
date_expr,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
let converted_tolerance = match tolerance {
Some(tol) => Some(Arc::new(self.convert_expression_and_extract_dependencies(
tol,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?)),
None => None,
};
Some(Expression {
kind: ExpressionKind::DateRelative(
*kind,
Arc::new(converted_date),
converted_tolerance,
),
source_location: expr.source_location.clone(),
})
}
ast::ExpressionKind::DateCalendar(kind, unit, date_expr) => {
let converted_date = self.convert_expression_and_extract_dependencies(
date_expr,
current_spec_arc,
facts_map,
current_segments,
depends_on_rules,
effective_spec_refs,
rule_names,
)?;
Some(Expression {
kind: ExpressionKind::DateCalendar(*kind, *unit, Arc::new(converted_date)),
source_location: expr.source_location.clone(),
})
}
}
}
}
fn find_types_by_name<'b>(
types: &'b ResolvedTypesMap,
name: &str,
) -> Option<&'b ResolvedSpecTypes> {
types
.iter()
.find(|(spec, _)| spec.name == name)
.map(|(_, t)| t)
}
fn compute_arithmetic_result_type(left_type: LemmaType, right_type: LemmaType) -> LemmaType {
compute_arithmetic_result_type_recursive(left_type, right_type, false)
}
fn compute_arithmetic_result_type_recursive(
left_type: LemmaType,
right_type: LemmaType,
swapped: bool,
) -> LemmaType {
match (&left_type.specifications, &right_type.specifications) {
(TypeSpecification::Veto { .. }, _) | (_, TypeSpecification::Veto { .. }) => {
LemmaType::veto_type()
}
(TypeSpecification::Undetermined, _) => LemmaType::undetermined_type(),
(TypeSpecification::Date { .. }, TypeSpecification::Date { .. }) => {
primitive_duration().clone()
}
(TypeSpecification::Date { .. }, TypeSpecification::Time { .. }) => {
primitive_duration().clone()
}
(TypeSpecification::Time { .. }, TypeSpecification::Time { .. }) => {
primitive_duration().clone()
}
_ if left_type == right_type => left_type,
(TypeSpecification::Date { .. }, TypeSpecification::Duration { .. }) => left_type,
(TypeSpecification::Time { .. }, TypeSpecification::Duration { .. }) => left_type,
(TypeSpecification::Scale { .. }, TypeSpecification::Ratio { .. }) => left_type,
(TypeSpecification::Scale { .. }, TypeSpecification::Number { .. }) => left_type,
(TypeSpecification::Scale { .. }, TypeSpecification::Duration { .. }) => {
primitive_number().clone()
}
(TypeSpecification::Scale { .. }, TypeSpecification::Scale { .. }) => left_type,
(TypeSpecification::Duration { .. }, TypeSpecification::Number { .. }) => left_type,
(TypeSpecification::Duration { .. }, TypeSpecification::Ratio { .. }) => left_type,
(TypeSpecification::Duration { .. }, TypeSpecification::Duration { .. }) => {
primitive_duration().clone()
}
(TypeSpecification::Number { .. }, TypeSpecification::Ratio { .. }) => {
primitive_number().clone()
}
(TypeSpecification::Number { .. }, TypeSpecification::Number { .. }) => {
primitive_number().clone()
}
(TypeSpecification::Ratio { .. }, TypeSpecification::Ratio { .. }) => left_type,
_ => {
if swapped {
LemmaType::undetermined_type()
} else {
compute_arithmetic_result_type_recursive(right_type, left_type, true)
}
}
}
}
fn infer_expression_type(
expression: &Expression,
graph: &Graph,
computed_rule_types: &HashMap<RulePath, LemmaType>,
resolved_types: &ResolvedTypesMap,
spec_name: &str,
) -> LemmaType {
match &expression.kind {
ExpressionKind::Literal(literal_value) => literal_value.as_ref().get_type().clone(),
ExpressionKind::FactPath(fact_path) => infer_fact_type(fact_path, graph),
ExpressionKind::RulePath(rule_path) => computed_rule_types
.get(rule_path)
.cloned()
.unwrap_or_else(LemmaType::undetermined_type),
ExpressionKind::LogicalAnd(left, right) => {
let left_type =
infer_expression_type(left, graph, computed_rule_types, resolved_types, spec_name);
let right_type =
infer_expression_type(right, graph, computed_rule_types, resolved_types, spec_name);
if left_type.vetoed() || right_type.vetoed() {
return LemmaType::veto_type();
}
if left_type.is_undetermined() || right_type.is_undetermined() {
return LemmaType::undetermined_type();
}
primitive_boolean().clone()
}
ExpressionKind::LogicalNegation(operand, _) => {
let operand_type = infer_expression_type(
operand,
graph,
computed_rule_types,
resolved_types,
spec_name,
);
if operand_type.vetoed() {
return LemmaType::veto_type();
}
if operand_type.is_undetermined() {
return LemmaType::undetermined_type();
}
primitive_boolean().clone()
}
ExpressionKind::Comparison(left, _op, right) => {
let left_type =
infer_expression_type(left, graph, computed_rule_types, resolved_types, spec_name);
let right_type =
infer_expression_type(right, graph, computed_rule_types, resolved_types, spec_name);
if left_type.vetoed() || right_type.vetoed() {
return LemmaType::veto_type();
}
if left_type.is_undetermined() || right_type.is_undetermined() {
return LemmaType::undetermined_type();
}
primitive_boolean().clone()
}
ExpressionKind::Arithmetic(left, _operator, right) => {
let left_type =
infer_expression_type(left, graph, computed_rule_types, resolved_types, spec_name);
let right_type =
infer_expression_type(right, graph, computed_rule_types, resolved_types, spec_name);
compute_arithmetic_result_type(left_type, right_type)
}
ExpressionKind::UnitConversion(source_expression, target) => {
let source_type = infer_expression_type(
source_expression,
graph,
computed_rule_types,
resolved_types,
spec_name,
);
if source_type.vetoed() {
return LemmaType::veto_type();
}
if source_type.is_undetermined() {
return LemmaType::undetermined_type();
}
match target {
SemanticConversionTarget::Duration(_) => primitive_duration().clone(),
SemanticConversionTarget::ScaleUnit(unit_name) => {
if source_type.is_number() {
find_types_by_name(resolved_types, spec_name)
.and_then(|dt| dt.unit_index.get(unit_name))
.map(|(lt, _)| lt.clone())
.unwrap_or_else(LemmaType::undetermined_type)
} else {
source_type
}
}
SemanticConversionTarget::RatioUnit(unit_name) => {
if source_type.is_number() {
find_types_by_name(resolved_types, spec_name)
.and_then(|dt| dt.unit_index.get(unit_name))
.map(|(lt, _)| lt.clone())
.unwrap_or_else(LemmaType::undetermined_type)
} else {
source_type
}
}
}
}
ExpressionKind::MathematicalComputation(_, operand) => {
let operand_type = infer_expression_type(
operand,
graph,
computed_rule_types,
resolved_types,
spec_name,
);
if operand_type.vetoed() {
return LemmaType::veto_type();
}
if operand_type.is_undetermined() {
return LemmaType::undetermined_type();
}
primitive_number().clone()
}
ExpressionKind::Veto(_) => LemmaType::veto_type(),
ExpressionKind::Now => primitive_date().clone(),
ExpressionKind::DateRelative(..) | ExpressionKind::DateCalendar(..) => {
primitive_boolean().clone()
}
}
}
fn infer_fact_type(fact_path: &FactPath, graph: &Graph) -> LemmaType {
let entry = match graph.facts().get(fact_path) {
Some(e) => e,
None => return LemmaType::undetermined_type(),
};
match entry {
FactData::Value { value, .. } => value.lemma_type.clone(),
FactData::TypeDeclaration { resolved_type, .. } => resolved_type.clone(),
FactData::SpecRef { .. } => LemmaType::undetermined_type(),
}
}
fn engine_error_at_graph(graph: &Graph, source: &Source, message: impl Into<String>) -> Error {
Error::validation_with_context(
message.into(),
Some(source.clone()),
None::<String>,
Some(Arc::clone(&graph.main_spec)),
None,
)
}
fn check_logical_operands(
graph: &Graph,
left_type: &LemmaType,
right_type: &LemmaType,
source: &Source,
) -> Result<(), Vec<Error>> {
if left_type.vetoed() || right_type.vetoed() {
return Ok(());
}
let mut errors = Vec::new();
if !left_type.is_boolean() {
errors.push(engine_error_at_graph(
graph,
source,
format!(
"Logical operation requires boolean operands, got {:?} for left operand",
left_type
),
));
}
if !right_type.is_boolean() {
errors.push(engine_error_at_graph(
graph,
source,
format!(
"Logical operation requires boolean operands, got {:?} for right operand",
right_type
),
));
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn check_logical_operand(
graph: &Graph,
operand_type: &LemmaType,
source: &Source,
) -> Result<(), Vec<Error>> {
if operand_type.vetoed() {
return Ok(());
}
if !operand_type.is_boolean() {
Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Logical negation requires boolean operand, got {:?}",
operand_type
),
)])
} else {
Ok(())
}
}
fn check_comparison_types(
graph: &Graph,
left_type: &LemmaType,
op: &ComparisonComputation,
right_type: &LemmaType,
source: &Source,
) -> Result<(), Vec<Error>> {
if left_type.vetoed() || right_type.vetoed() {
return Ok(());
}
let is_equality_only = matches!(op, ComparisonComputation::Is | ComparisonComputation::IsNot);
if left_type.is_boolean() && right_type.is_boolean() {
if !is_equality_only {
return Err(vec![engine_error_at_graph(
graph,
source,
format!("Can only use 'is' and 'is not' with booleans (got {})", op),
)]);
}
return Ok(());
}
if left_type.is_text() && right_type.is_text() {
if !is_equality_only {
return Err(vec![engine_error_at_graph(
graph,
source,
format!("Can only use 'is' and 'is not' with text (got {})", op),
)]);
}
return Ok(());
}
if left_type.is_number() && right_type.is_number() {
return Ok(());
}
if left_type.is_ratio() && right_type.is_ratio() {
return Ok(());
}
if left_type.is_date() && right_type.is_date() {
return Ok(());
}
if left_type.is_time() && right_type.is_time() {
return Ok(());
}
if left_type.is_scale() && right_type.is_scale() {
if !left_type.same_scale_family(right_type) {
return Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Cannot compare different scale types: {} and {}",
left_type.name(),
right_type.name()
),
)]);
}
return Ok(());
}
if left_type.is_duration() && right_type.is_duration() {
return Ok(());
}
if left_type.is_duration() && right_type.is_number() {
return Ok(());
}
if left_type.is_number() && right_type.is_duration() {
return Ok(());
}
Err(vec![engine_error_at_graph(
graph,
source,
format!("Cannot compare {:?} with {:?}", left_type, right_type),
)])
}
fn check_arithmetic_types(
graph: &Graph,
left_type: &LemmaType,
right_type: &LemmaType,
operator: &ArithmeticComputation,
source: &Source,
) -> Result<(), Vec<Error>> {
if left_type.vetoed() || right_type.vetoed() {
return Ok(());
}
if left_type.is_date() || left_type.is_time() || right_type.is_date() || right_type.is_time() {
let both_temporal = (left_type.is_date() || left_type.is_time())
&& (right_type.is_date() || right_type.is_time());
let one_is_duration = left_type.is_duration() || right_type.is_duration();
let valid = matches!(
operator,
ArithmeticComputation::Add | ArithmeticComputation::Subtract
) && (both_temporal || one_is_duration);
if !valid {
return Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Cannot apply '{}' to {} and {}.",
operator,
left_type.name(),
right_type.name()
),
)]);
}
return Ok(());
}
if left_type.is_scale() && right_type.is_scale() && !left_type.same_scale_family(right_type) {
return Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Cannot {} different scale types: {} and {}. Operations between different scale types produce ambiguous result units.",
match operator {
ArithmeticComputation::Add => "add",
ArithmeticComputation::Subtract => "subtract",
ArithmeticComputation::Multiply => "multiply",
ArithmeticComputation::Divide => "divide",
ArithmeticComputation::Modulo => "modulo",
ArithmeticComputation::Power => "power",
},
left_type.name(),
right_type.name()
),
)]);
}
let left_valid = left_type.is_scale()
|| left_type.is_number()
|| left_type.is_duration()
|| left_type.is_ratio();
let right_valid = right_type.is_scale()
|| right_type.is_number()
|| right_type.is_duration()
|| right_type.is_ratio();
if !left_valid || !right_valid {
return Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Cannot apply '{}' to {} and {}.",
operator,
left_type.name(),
right_type.name()
),
)]);
}
if left_type.has_same_base_type(right_type) {
return Ok(());
}
let pair = |a: fn(&LemmaType) -> bool, b: fn(&LemmaType) -> bool| {
(a(left_type) && b(right_type)) || (b(left_type) && a(right_type))
};
let allowed = match operator {
ArithmeticComputation::Multiply => {
pair(LemmaType::is_scale, LemmaType::is_number)
|| pair(LemmaType::is_scale, LemmaType::is_ratio)
|| pair(LemmaType::is_scale, LemmaType::is_duration)
|| pair(LemmaType::is_duration, LemmaType::is_number)
|| pair(LemmaType::is_duration, LemmaType::is_ratio)
|| pair(LemmaType::is_number, LemmaType::is_ratio)
}
ArithmeticComputation::Divide => {
pair(LemmaType::is_scale, LemmaType::is_number)
|| pair(LemmaType::is_scale, LemmaType::is_ratio)
|| pair(LemmaType::is_scale, LemmaType::is_duration)
|| (left_type.is_duration() && right_type.is_number())
|| (left_type.is_duration() && right_type.is_ratio())
|| pair(LemmaType::is_number, LemmaType::is_ratio)
}
ArithmeticComputation::Add | ArithmeticComputation::Subtract => {
pair(LemmaType::is_scale, LemmaType::is_number)
|| pair(LemmaType::is_scale, LemmaType::is_ratio)
|| pair(LemmaType::is_duration, LemmaType::is_number)
|| pair(LemmaType::is_duration, LemmaType::is_ratio)
|| pair(LemmaType::is_number, LemmaType::is_ratio)
}
ArithmeticComputation::Power => {
(left_type.is_number()
|| left_type.is_scale()
|| left_type.is_ratio()
|| left_type.is_duration())
&& (right_type.is_number() || right_type.is_ratio())
}
ArithmeticComputation::Modulo => right_type.is_number() || right_type.is_ratio(),
};
if !allowed {
return Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Cannot apply '{}' to {} and {}.",
operator,
left_type.name(),
right_type.name(),
),
)]);
}
Ok(())
}
fn check_unit_conversion_types(
graph: &Graph,
source_type: &LemmaType,
target: &SemanticConversionTarget,
resolved_types: &ResolvedTypesMap,
source: &Source,
spec_name: &str,
) -> Result<(), Vec<Error>> {
if source_type.vetoed() {
return Ok(());
}
match target {
SemanticConversionTarget::ScaleUnit(unit_name)
| SemanticConversionTarget::RatioUnit(unit_name) => {
let unit_check: Option<(bool, Vec<&str>)> = match (&source_type.specifications, target)
{
(
TypeSpecification::Scale { units, .. },
SemanticConversionTarget::ScaleUnit(_),
) => {
let valid: Vec<&str> = units.iter().map(|u| u.name.as_str()).collect();
let found = units.iter().any(|u| u.name.eq_ignore_ascii_case(unit_name));
Some((found, valid))
}
(
TypeSpecification::Ratio { units, .. },
SemanticConversionTarget::RatioUnit(_),
) => {
let valid: Vec<&str> = units.iter().map(|u| u.name.as_str()).collect();
let found = units.iter().any(|u| u.name.eq_ignore_ascii_case(unit_name));
Some((found, valid))
}
_ => None,
};
match unit_check {
Some((true, _)) => Ok(()),
Some((false, valid)) => Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Unknown unit '{}' for type {}. Valid units: {}",
unit_name,
source_type.name(),
valid.join(", ")
),
)]),
None if source_type.is_number() => {
if find_types_by_name(resolved_types, spec_name)
.and_then(|dt| dt.unit_index.get(unit_name))
.is_none()
{
Err(vec![engine_error_at_graph(
graph,
source,
format!("Unknown unit '{}' in spec '{}'.", unit_name, spec_name),
)])
} else {
Ok(())
}
}
None => Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Cannot convert {} to unit '{}'.",
source_type.name(),
unit_name
),
)]),
}
}
SemanticConversionTarget::Duration(_) => {
if !source_type.is_duration() && !source_type.is_numeric() {
Err(vec![engine_error_at_graph(
graph,
source,
format!("Cannot convert {} to duration.", source_type.name()),
)])
} else {
Ok(())
}
}
}
}
fn check_mathematical_operand(
graph: &Graph,
operand_type: &LemmaType,
source: &Source,
) -> Result<(), Vec<Error>> {
if operand_type.vetoed() {
return Ok(());
}
if !operand_type.is_number() {
Err(vec![engine_error_at_graph(
graph,
source,
format!(
"Mathematical function requires number operand, got {:?}",
operand_type
),
)])
} else {
Ok(())
}
}
fn check_all_rule_references_exist(graph: &Graph) -> Result<(), Vec<Error>> {
let mut errors = Vec::new();
let existing_rules: HashSet<&RulePath> = graph.rules().keys().collect();
for (rule_path, rule_node) in graph.rules() {
for dependency in &rule_node.depends_on_rules {
if !existing_rules.contains(dependency) {
errors.push(engine_error_at_graph(
graph,
&rule_node.source,
format!(
"Rule '{}' references non-existent rule '{}'",
rule_path.rule, dependency.rule
),
));
}
}
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn check_fact_and_rule_name_collisions(graph: &Graph) -> Result<(), Vec<Error>> {
let mut errors = Vec::new();
for rule_path in graph.rules().keys() {
let fact_path = FactPath::new(rule_path.segments.clone(), rule_path.rule.clone());
if graph.facts().contains_key(&fact_path) {
let rule_node = graph.rules().get(rule_path).unwrap_or_else(|| {
unreachable!(
"BUG: rule '{}' missing from graph while validating name collisions",
rule_path.rule
)
});
errors.push(engine_error_at_graph(
graph,
&rule_node.source,
format!(
"Name collision: '{}' is defined as both a fact and a rule",
fact_path
),
));
}
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn check_fact_reference(
fact_path: &FactPath,
graph: &Graph,
fact_source: &Source,
) -> Result<(), Vec<Error>> {
let entry = match graph.facts().get(fact_path) {
Some(e) => e,
None => {
return Err(vec![engine_error_at_graph(
graph,
fact_source,
format!("Unknown fact reference '{}'", fact_path),
)]);
}
};
match entry {
FactData::Value { .. } | FactData::TypeDeclaration { .. } => Ok(()),
FactData::SpecRef { .. } => Err(vec![engine_error_at_graph(
graph,
entry.source(),
format!(
"Cannot compute type for spec reference fact '{}'",
fact_path
),
)]),
}
}
fn check_expression(
expression: &Expression,
graph: &Graph,
inferred_types: &HashMap<RulePath, LemmaType>,
resolved_types: &ResolvedTypesMap,
spec_name: &str,
) -> Result<(), Vec<Error>> {
let mut errors = Vec::new();
let collect = |result: Result<(), Vec<Error>>, errors: &mut Vec<Error>| {
if let Err(errs) = result {
errors.extend(errs);
}
};
match &expression.kind {
ExpressionKind::Literal(_) => {}
ExpressionKind::FactPath(fact_path) => {
let fact_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
collect(
check_fact_reference(fact_path, graph, fact_source),
&mut errors,
);
}
ExpressionKind::RulePath(_) => {}
ExpressionKind::LogicalAnd(left, right) => {
collect(
check_expression(left, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
collect(
check_expression(right, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let left_type =
infer_expression_type(left, graph, inferred_types, resolved_types, spec_name);
let right_type =
infer_expression_type(right, graph, inferred_types, resolved_types, spec_name);
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
collect(
check_logical_operands(graph, &left_type, &right_type, expr_source),
&mut errors,
);
}
ExpressionKind::LogicalNegation(operand, _) => {
collect(
check_expression(operand, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let operand_type =
infer_expression_type(operand, graph, inferred_types, resolved_types, spec_name);
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
collect(
check_logical_operand(graph, &operand_type, expr_source),
&mut errors,
);
}
ExpressionKind::Comparison(left, op, right) => {
collect(
check_expression(left, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
collect(
check_expression(right, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let left_type =
infer_expression_type(left, graph, inferred_types, resolved_types, spec_name);
let right_type =
infer_expression_type(right, graph, inferred_types, resolved_types, spec_name);
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
collect(
check_comparison_types(graph, &left_type, op, &right_type, expr_source),
&mut errors,
);
}
ExpressionKind::Arithmetic(left, operator, right) => {
collect(
check_expression(left, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
collect(
check_expression(right, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let left_type =
infer_expression_type(left, graph, inferred_types, resolved_types, spec_name);
let right_type =
infer_expression_type(right, graph, inferred_types, resolved_types, spec_name);
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
collect(
check_arithmetic_types(graph, &left_type, &right_type, operator, expr_source),
&mut errors,
);
}
ExpressionKind::UnitConversion(source_expression, target) => {
collect(
check_expression(
source_expression,
graph,
inferred_types,
resolved_types,
spec_name,
),
&mut errors,
);
let source_type = infer_expression_type(
source_expression,
graph,
inferred_types,
resolved_types,
spec_name,
);
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
collect(
check_unit_conversion_types(
graph,
&source_type,
target,
resolved_types,
expr_source,
spec_name,
),
&mut errors,
);
if source_type.is_number() {
match target {
SemanticConversionTarget::ScaleUnit(unit_name)
| SemanticConversionTarget::RatioUnit(unit_name) => {
if find_types_by_name(resolved_types, spec_name)
.and_then(|dt| dt.unit_index.get(unit_name))
.is_none()
{
errors.push(engine_error_at_graph(
graph,
expr_source,
format!(
"Cannot resolve unit '{}' for spec '{}' (types may not have been resolved)",
unit_name,
spec_name
),
));
}
}
SemanticConversionTarget::Duration(_) => {}
}
}
}
ExpressionKind::MathematicalComputation(_, operand) => {
collect(
check_expression(operand, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let operand_type =
infer_expression_type(operand, graph, inferred_types, resolved_types, spec_name);
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
collect(
check_mathematical_operand(graph, &operand_type, expr_source),
&mut errors,
);
}
ExpressionKind::Veto(_) => {}
ExpressionKind::Now => {}
ExpressionKind::DateRelative(_, date_expr, tolerance) => {
collect(
check_expression(date_expr, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let date_type =
infer_expression_type(date_expr, graph, inferred_types, resolved_types, spec_name);
if !date_type.is_date() {
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
errors.push(engine_error_at_graph(
graph,
expr_source,
format!(
"Date sugar 'in past/future' requires a date expression, got type '{}'",
date_type
),
));
}
if let Some(tol) = tolerance {
collect(
check_expression(tol, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let tol_type =
infer_expression_type(tol, graph, inferred_types, resolved_types, spec_name);
if !tol_type.is_duration() {
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
errors.push(engine_error_at_graph(
graph,
expr_source,
format!(
"Tolerance in date sugar must be a duration, got type '{}'",
tol_type
),
));
}
}
}
ExpressionKind::DateCalendar(_, _, date_expr) => {
collect(
check_expression(date_expr, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let date_type =
infer_expression_type(date_expr, graph, inferred_types, resolved_types, spec_name);
if !date_type.is_date() {
let expr_source = expression
.source_location
.as_ref()
.expect("BUG: expression missing source in check_expression");
errors.push(engine_error_at_graph(
graph,
expr_source,
format!(
"Calendar sugar requires a date expression, got type '{}'",
date_type
),
));
}
}
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn check_rule_types(
graph: &Graph,
execution_order: &[RulePath],
inferred_types: &HashMap<RulePath, LemmaType>,
resolved_types: &ResolvedTypesMap,
) -> Result<(), Vec<Error>> {
let mut errors = Vec::new();
let collect = |result: Result<(), Vec<Error>>, errors: &mut Vec<Error>| {
if let Err(errs) = result {
errors.extend(errs);
}
};
for rule_path in execution_order {
let rule_node = match graph.rules().get(rule_path) {
Some(node) => node,
None => continue,
};
let branches = &rule_node.branches;
let spec_name = rule_node.spec_name.as_str();
if branches.is_empty() {
continue;
}
let (_, default_result) = &branches[0];
collect(
check_expression(
default_result,
graph,
inferred_types,
resolved_types,
spec_name,
),
&mut errors,
);
let default_type = infer_expression_type(
default_result,
graph,
inferred_types,
resolved_types,
spec_name,
);
let mut non_veto_type: Option<LemmaType> = None;
if !default_type.vetoed() && !default_type.is_undetermined() {
non_veto_type = Some(default_type.clone());
}
for (branch_index, (condition, result)) in branches.iter().enumerate().skip(1) {
if let Some(condition_expression) = condition {
collect(
check_expression(
condition_expression,
graph,
inferred_types,
resolved_types,
spec_name,
),
&mut errors,
);
let condition_type = infer_expression_type(
condition_expression,
graph,
inferred_types,
resolved_types,
spec_name,
);
if !condition_type.is_boolean() && !condition_type.is_undetermined() {
let condition_source = condition_expression
.source_location
.as_ref()
.expect("BUG: condition expression missing source in check_rule_types");
errors.push(engine_error_at_graph(
graph,
condition_source,
format!(
"Unless clause condition in rule '{}' must be boolean, got {:?}",
rule_path.rule, condition_type
),
));
}
}
collect(
check_expression(result, graph, inferred_types, resolved_types, spec_name),
&mut errors,
);
let result_type =
infer_expression_type(result, graph, inferred_types, resolved_types, spec_name);
if !result_type.vetoed() && !result_type.is_undetermined() {
if non_veto_type.is_none() {
non_veto_type = Some(result_type.clone());
} else if let Some(ref existing_type) = non_veto_type {
if !existing_type.has_same_base_type(&result_type) {
let Some(rule_node) = graph.rules().get(rule_path) else {
unreachable!(
"BUG: rule type validation referenced missing rule '{}'",
rule_path.rule
);
};
let rule_source = &rule_node.source;
let default_expr = &branches[0].1;
let mut location_parts = vec![format!(
"{}:{}:{}",
rule_source.attribute, rule_source.span.line, rule_source.span.col
)];
if let Some(loc) = &default_expr.source_location {
location_parts.push(format!(
"default branch at {}:{}:{}",
loc.attribute, loc.span.line, loc.span.col
));
}
if let Some(loc) = &result.source_location {
location_parts.push(format!(
"unless clause {} at {}:{}:{}",
branch_index, loc.attribute, loc.span.line, loc.span.col
));
}
errors.push(Error::validation_with_context(
format!("Type mismatch in rule '{}' in spec '{}' ({}): default branch returns {}, but unless clause {} returns {}. All branches must return the same primitive type.",
rule_path.rule,
spec_name,
location_parts.join(", "),
existing_type.name(),
branch_index,
result_type.name()),
Some(rule_source.clone()),
None::<String>,
Some(Arc::clone(&graph.main_spec)),
None,
));
}
}
}
}
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn apply_inferred_types(graph: &mut Graph, inferred_types: HashMap<RulePath, LemmaType>) {
for (rule_path, rule_type) in inferred_types {
if let Some(rule_node) = graph.rules_mut().get_mut(&rule_path) {
rule_node.rule_type = rule_type;
}
}
}
fn infer_rule_types(
graph: &Graph,
execution_order: &[RulePath],
resolved_types: &ResolvedTypesMap,
) -> HashMap<RulePath, LemmaType> {
let mut computed_types: HashMap<RulePath, LemmaType> = HashMap::new();
for rule_path in execution_order {
let rule_node = match graph.rules().get(rule_path) {
Some(node) => node,
None => continue,
};
let branches = &rule_node.branches;
let spec_name = rule_node.spec_name.as_str();
if branches.is_empty() {
continue;
}
let (_, default_result) = &branches[0];
let default_type = infer_expression_type(
default_result,
graph,
&computed_types,
resolved_types,
spec_name,
);
let mut non_veto_type: Option<LemmaType> = None;
if !default_type.vetoed() && !default_type.is_undetermined() {
non_veto_type = Some(default_type.clone());
}
for (_branch_index, (condition, result)) in branches.iter().enumerate().skip(1) {
if let Some(condition_expression) = condition {
let _condition_type = infer_expression_type(
condition_expression,
graph,
&computed_types,
resolved_types,
spec_name,
);
}
let result_type =
infer_expression_type(result, graph, &computed_types, resolved_types, spec_name);
if !result_type.vetoed() && !result_type.is_undetermined() && non_veto_type.is_none() {
non_veto_type = Some(result_type.clone());
}
}
let rule_type = non_veto_type.unwrap_or_else(LemmaType::veto_type);
computed_types.insert(rule_path.clone(), rule_type);
}
computed_types
}
fn compute_referenced_rules_by_path(graph: &Graph) -> HashMap<Vec<String>, HashSet<String>> {
let mut referenced_rules: HashMap<Vec<String>, HashSet<String>> = HashMap::new();
for rule_node in graph.rules().values() {
for rule_dependency in &rule_node.depends_on_rules {
if !rule_dependency.segments.is_empty() {
let path: Vec<String> = rule_dependency
.segments
.iter()
.map(|segment| segment.fact.clone())
.collect();
referenced_rules
.entry(path)
.or_default()
.insert(rule_dependency.rule.clone());
}
}
}
referenced_rules
}
#[cfg(test)]
mod tests {
use super::*;
use crate::parsing::ast::{BooleanValue, Reference, Span, Value};
fn test_source() -> Source {
Source::new(
"test.lemma",
Span {
start: 0,
end: 0,
line: 1,
col: 0,
},
)
}
fn test_sources() -> HashMap<String, String> {
let mut sources = HashMap::new();
sources.insert("test.lemma".to_string(), "spec test\n".to_string());
sources
}
fn build_graph(
main_spec: &LemmaSpec,
all_specs: &[LemmaSpec],
sources: HashMap<String, String>,
) -> Result<Graph, Vec<Error>> {
let mut ctx = Context::new();
for spec in all_specs {
ctx.insert_spec(Arc::new(spec.clone()), spec.from_registry)
.expect("test specs must have valid timespans");
}
let main_spec_arc = ctx
.get_spec_effective_from(main_spec.name.as_str(), main_spec.effective_from())
.expect("main_spec must be in all_specs");
let mut plan_hashes = crate::planning::PlanHashRegistry::default();
for spec in all_specs {
if spec.name == main_spec.name {
continue;
}
let dep_arc = ctx
.get_spec_effective_from(&spec.name, spec.effective_from())
.expect("dep spec must be in context");
if let Ok((dep_graph, dep_types)) = Graph::build(
&dep_arc,
&ctx,
sources.clone(),
dep_arc.effective_from().cloned(),
&plan_hashes,
) {
let dep_plan = crate::planning::execution_plan::build_execution_plan(
&dep_graph,
&dep_types,
dep_arc.effective_from().cloned(),
None,
);
plan_hashes.insert(
&dep_arc,
dep_arc.effective_from().cloned(),
dep_plan.plan_hash(),
);
}
}
match Graph::build(
&main_spec_arc,
&ctx,
sources,
main_spec_arc.effective_from().cloned(),
&plan_hashes,
) {
Ok((graph, _types)) => Ok(graph),
Err(errors) => Err(errors),
}
}
fn create_test_spec(name: &str) -> LemmaSpec {
LemmaSpec::new(name.to_string())
}
fn create_literal_fact(name: &str, value: Value) -> LemmaFact {
LemmaFact {
reference: Reference {
segments: Vec::new(),
name: name.to_string(),
},
value: ParsedFactValue::Literal(value),
source_location: test_source(),
}
}
fn create_literal_expr(value: Value) -> ast::Expression {
ast::Expression {
kind: ast::ExpressionKind::Literal(value),
source_location: Some(test_source()),
}
}
#[test]
fn test_build_simple_graph() {
let mut spec = create_test_spec("test");
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(25)),
));
spec = spec.add_fact(create_literal_fact("name", Value::Text("John".to_string())));
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_ok(), "Should build graph successfully");
let graph = result.unwrap();
assert_eq!(graph.facts().len(), 2);
assert_eq!(graph.rules().len(), 0);
}
#[test]
fn test_build_graph_with_rule() {
let mut spec = create_test_spec("test");
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(25)),
));
let age_expr = ast::Expression {
kind: ast::ExpressionKind::Reference(Reference {
segments: Vec::new(),
name: "age".to_string(),
}),
source_location: Some(test_source()),
};
let rule = LemmaRule {
name: "is_adult".to_string(),
expression: age_expr,
unless_clauses: Vec::new(),
source_location: test_source(),
};
spec = spec.add_rule(rule);
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_ok(), "Should build graph successfully");
let graph = result.unwrap();
assert_eq!(graph.facts().len(), 1);
assert_eq!(graph.rules().len(), 1);
}
#[test]
fn should_reject_fact_binding_into_non_spec_fact() {
let mut spec = create_test_spec("test");
spec = spec.add_fact(create_literal_fact("x", Value::Number(1.into())));
spec = spec.add_fact(LemmaFact {
reference: Reference::from_path(vec!["x".to_string(), "y".to_string()]),
value: ParsedFactValue::Literal(Value::Number(2.into())),
source_location: test_source(),
});
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(
result.is_err(),
"Overriding x.y must fail when x is not a spec reference"
);
}
#[test]
fn should_reject_fact_and_rule_name_collision() {
let mut spec = create_test_spec("test");
spec = spec.add_fact(create_literal_fact("x", Value::Number(1.into())));
spec = spec.add_rule(LemmaRule {
name: "x".to_string(),
expression: create_literal_expr(Value::Number(2.into())),
unless_clauses: Vec::new(),
source_location: test_source(),
});
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(
result.is_err(),
"Fact and rule name collisions should be rejected"
);
}
#[test]
fn test_duplicate_fact() {
let mut spec = create_test_spec("test");
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(25)),
));
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(30)),
));
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_err(), "Should detect duplicate fact");
let errors = result.unwrap_err();
assert!(errors
.iter()
.any(|e| e.to_string().contains("Duplicate fact") && e.to_string().contains("age")));
}
#[test]
fn test_duplicate_rule() {
let mut spec = create_test_spec("test");
let rule1 = LemmaRule {
name: "test_rule".to_string(),
expression: create_literal_expr(Value::Boolean(BooleanValue::True)),
unless_clauses: Vec::new(),
source_location: test_source(),
};
let rule2 = LemmaRule {
name: "test_rule".to_string(),
expression: create_literal_expr(Value::Boolean(BooleanValue::False)),
unless_clauses: Vec::new(),
source_location: test_source(),
};
spec = spec.add_rule(rule1);
spec = spec.add_rule(rule2);
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_err(), "Should detect duplicate rule");
let errors = result.unwrap_err();
assert!(errors.iter().any(
|e| e.to_string().contains("Duplicate rule") && e.to_string().contains("test_rule")
));
}
#[test]
fn test_missing_fact_reference() {
let mut spec = create_test_spec("test");
let missing_fact_expr = ast::Expression {
kind: ast::ExpressionKind::Reference(Reference {
segments: Vec::new(),
name: "nonexistent".to_string(),
}),
source_location: Some(test_source()),
};
let rule = LemmaRule {
name: "test_rule".to_string(),
expression: missing_fact_expr,
unless_clauses: Vec::new(),
source_location: test_source(),
};
spec = spec.add_rule(rule);
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_err(), "Should detect missing fact");
let errors = result.unwrap_err();
assert!(errors
.iter()
.any(|e| e.to_string().contains("Reference 'nonexistent' not found")));
}
#[test]
fn test_missing_spec_reference() {
let mut spec = create_test_spec("test");
let fact = LemmaFact {
reference: Reference {
segments: Vec::new(),
name: "contract".to_string(),
},
value: ParsedFactValue::SpecReference(crate::parsing::ast::SpecRef::local(
"nonexistent",
)),
source_location: test_source(),
};
spec = spec.add_fact(fact);
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_err(), "Should detect missing spec");
let errors = result.unwrap_err();
assert!(errors
.iter()
.any(|e| e.to_string().contains("Spec 'nonexistent' not found")));
}
#[test]
fn test_fact_reference_conversion() {
let mut spec = create_test_spec("test");
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(25)),
));
let age_expr = ast::Expression {
kind: ast::ExpressionKind::Reference(Reference {
segments: Vec::new(),
name: "age".to_string(),
}),
source_location: Some(test_source()),
};
let rule = LemmaRule {
name: "test_rule".to_string(),
expression: age_expr,
unless_clauses: Vec::new(),
source_location: test_source(),
};
spec = spec.add_rule(rule);
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_ok(), "Should build graph successfully");
let graph = result.unwrap();
let rule_node = graph.rules().values().next().unwrap();
assert!(matches!(
rule_node.branches[0].1.kind,
ExpressionKind::FactPath(_)
));
}
#[test]
fn test_rule_reference_conversion() {
let mut spec = create_test_spec("test");
let rule1_expr = ast::Expression {
kind: ast::ExpressionKind::Reference(Reference {
segments: Vec::new(),
name: "age".to_string(),
}),
source_location: Some(test_source()),
};
let rule1 = LemmaRule {
name: "rule1".to_string(),
expression: rule1_expr,
unless_clauses: Vec::new(),
source_location: test_source(),
};
spec = spec.add_rule(rule1);
let rule2_expr = ast::Expression {
kind: ast::ExpressionKind::Reference(Reference {
segments: Vec::new(),
name: "rule1".to_string(),
}),
source_location: Some(test_source()),
};
let rule2 = LemmaRule {
name: "rule2".to_string(),
expression: rule2_expr,
unless_clauses: Vec::new(),
source_location: test_source(),
};
spec = spec.add_rule(rule2);
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(25)),
));
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_ok(), "Should build graph successfully");
let graph = result.unwrap();
let rule2_node = graph
.rules()
.get(&RulePath {
segments: Vec::new(),
rule: "rule2".to_string(),
})
.unwrap();
assert_eq!(rule2_node.depends_on_rules.len(), 1);
assert!(matches!(
rule2_node.branches[0].1.kind,
ExpressionKind::RulePath(_)
));
}
#[test]
fn test_collect_multiple_errors() {
let mut spec = create_test_spec("test");
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(25)),
));
spec = spec.add_fact(create_literal_fact(
"age",
Value::Number(rust_decimal::Decimal::from(30)),
));
let missing_fact_expr = ast::Expression {
kind: ast::ExpressionKind::Reference(Reference {
segments: Vec::new(),
name: "nonexistent".to_string(),
}),
source_location: Some(test_source()),
};
let rule = LemmaRule {
name: "test_rule".to_string(),
expression: missing_fact_expr,
unless_clauses: Vec::new(),
source_location: test_source(),
};
spec = spec.add_rule(rule);
let result = build_graph(&spec, &[spec.clone()], test_sources());
assert!(result.is_err(), "Should collect multiple errors");
let errors = result.unwrap_err();
assert!(errors.len() >= 2, "Should have at least 2 errors");
assert!(errors
.iter()
.any(|e| e.to_string().contains("Duplicate fact")));
assert!(errors
.iter()
.any(|e| e.to_string().contains("Reference 'nonexistent' not found")));
}
#[test]
fn test_type_registration_collects_multiple_errors() {
use crate::parsing::ast::{FactValue, ParentType, PrimitiveKind, SpecRef, TypeDef};
let type_source = Source::new(
"a.lemma",
Span {
start: 0,
end: 0,
line: 1,
col: 0,
},
);
let spec_a = create_test_spec("spec_a")
.with_attribute("a.lemma".to_string())
.add_fact(LemmaFact {
reference: Reference::local("dep".to_string()),
value: FactValue::SpecReference(SpecRef::local("spec_b")),
source_location: type_source.clone(),
})
.add_type(TypeDef::Regular {
source_location: type_source.clone(),
name: "money".to_string(),
parent: ParentType::Primitive {
primitive: PrimitiveKind::Number,
},
constraints: None,
})
.add_type(TypeDef::Regular {
source_location: type_source,
name: "money".to_string(),
parent: ParentType::Primitive {
primitive: PrimitiveKind::Number,
},
constraints: None,
});
let type_source_b = Source::new(
"b.lemma",
Span {
start: 0,
end: 0,
line: 1,
col: 0,
},
);
let spec_b = create_test_spec("spec_b")
.with_attribute("b.lemma".to_string())
.add_type(TypeDef::Regular {
source_location: type_source_b.clone(),
name: "length".to_string(),
parent: ParentType::Primitive {
primitive: PrimitiveKind::Number,
},
constraints: None,
})
.add_type(TypeDef::Regular {
source_location: type_source_b,
name: "length".to_string(),
parent: ParentType::Primitive {
primitive: PrimitiveKind::Number,
},
constraints: None,
});
let mut sources = HashMap::new();
sources.insert(
"a.lemma".to_string(),
"spec spec_a\nfact dep: spec spec_b\ntype money: number\ntype money: number"
.to_string(),
);
sources.insert(
"b.lemma".to_string(),
"spec spec_b\ntype length: number\ntype length: number".to_string(),
);
let result = build_graph(&spec_a, &[spec_a.clone(), spec_b.clone()], sources);
assert!(result.is_err(), "Should fail with duplicate type errors");
let errors = result.unwrap_err();
assert!(
errors.len() >= 2,
"Should collect duplicate type error from each spec, got {}",
errors.len()
);
assert!(
errors
.iter()
.any(|e| e.to_string().contains("Type 'money' is already defined")),
"Should report duplicate 'money' in spec_a: {:?}",
errors.iter().map(|e| e.to_string()).collect::<Vec<_>>()
);
assert!(
errors
.iter()
.any(|e| e.to_string().contains("Type 'length' is already defined")),
"Should report duplicate 'length' in spec_b: {:?}",
errors.iter().map(|e| e.to_string()).collect::<Vec<_>>()
);
}
#[test]
fn spec_ref_resolves_to_matching_spec() {
let code = r#"spec myspec
fact x: 10
spec consumer
fact m: spec myspec
rule result: m.x"#;
let specs = crate::parse(code, "test.lemma", &crate::ResourceLimits::default())
.unwrap()
.specs;
let consumer = specs.iter().find(|d| d.name == "consumer").unwrap();
let mut sources = HashMap::new();
sources.insert("test.lemma".to_string(), code.to_string());
let result = build_graph(consumer, &specs, sources);
assert!(
result.is_ok(),
"spec ref should resolve: {:?}",
result.err()
);
}
#[test]
fn spec_ref_resolves_to_single_spec_by_name() {
let code = r#"spec myspec
fact x: 10
spec consumer
fact m: spec myspec
rule result: m.x"#;
let specs = crate::parse(code, "test.lemma", &crate::ResourceLimits::default())
.unwrap()
.specs;
let consumer = specs.iter().find(|d| d.name == "consumer").unwrap();
let mut sources = HashMap::new();
sources.insert("test.lemma".to_string(), code.to_string());
let graph = build_graph(consumer, &specs, sources).unwrap();
let fact_path = FactPath {
segments: vec![PathSegment {
fact: "m".to_string(),
spec: "myspec".to_string(),
}],
fact: "x".to_string(),
};
assert!(
graph.facts.contains_key(&fact_path),
"Ref should resolve to myspec. Facts: {:?}",
graph.facts.keys().collect::<Vec<_>>()
);
}
#[test]
fn spec_ref_to_nonexistent_spec_is_error() {
let code = r#"spec myspec
fact x: 10
spec consumer
fact m: spec nonexistent
rule result: m.x"#;
let specs = crate::parse(code, "test.lemma", &crate::ResourceLimits::default())
.unwrap()
.specs;
let consumer = specs.iter().find(|d| d.name == "consumer").unwrap();
let mut sources = HashMap::new();
sources.insert("test.lemma".to_string(), code.to_string());
let result = build_graph(consumer, &specs, sources);
assert!(result.is_err(), "Should fail for non-existent spec");
}
#[test]
fn unversioned_ref_resolves_to_only_unversioned_spec() {
let code = r#"spec myspec
fact x: 10
spec consumer
fact m: spec myspec
rule result: m.x"#;
let specs = crate::parse(code, "test.lemma", &crate::ResourceLimits::default())
.unwrap()
.specs;
let consumer = specs.iter().find(|d| d.name == "consumer").unwrap();
let mut sources = HashMap::new();
sources.insert("test.lemma".to_string(), code.to_string());
let result = build_graph(consumer, &specs, sources);
assert!(
result.is_ok(),
"Should resolve to unversioned myspec: {:?}",
result.err()
);
}
#[test]
fn self_reference_is_error() {
let code = "spec myspec\nfact m: spec myspec";
let specs = crate::parse(code, "test.lemma", &crate::ResourceLimits::default())
.unwrap()
.specs;
let mut sources = HashMap::new();
sources.insert("test.lemma".to_string(), code.to_string());
let result = build_graph(&specs[0], &specs, sources);
assert!(result.is_err(), "Self-reference should be an error");
let errors = result.unwrap_err();
assert!(
errors
.iter()
.any(|e| e.to_string().contains("same base name")),
"Error should mention same base name: {:?}",
errors.iter().map(|e| e.to_string()).collect::<Vec<_>>()
);
}
#[test]
fn reference_to_different_spec_is_allowed() {
let code = r#"spec myspec
fact x: 10
spec other_spec
fact m: spec myspec
rule result: m.x"#;
let specs = crate::parse(code, "test.lemma", &crate::ResourceLimits::default())
.unwrap()
.specs;
let other_spec = specs.iter().find(|d| d.name == "other_spec").unwrap();
let mut sources = HashMap::new();
sources.insert("test.lemma".to_string(), code.to_string());
let result = build_graph(other_spec, &specs, sources);
assert!(
result.is_ok(),
"Reference to different spec should be allowed: {:?}",
result.err()
);
}
}