use crate::{
function_target::{FunctionData, FunctionTarget},
function_target_pipeline::{
FunctionTargetProcessor, FunctionTargetsHolder, FunctionVariant, VerificationFlavor,
},
stackless_bytecode::{BorrowNode, Bytecode, Operation, PropKind},
usage_analysis,
verification_analysis::{is_invariant_suspendable, InvariantAnalysisData},
};
use move_binary_format::file_format::CodeOffset;
use move_model::{
ast::ConditionKind,
model::{FunId, FunctionEnv, GlobalEnv, GlobalId, QualifiedId, QualifiedInstId, StructId},
ty::{Type, TypeDisplayContext, TypeInstantiationDerivation, TypeUnificationAdapter, Variance},
};
use std::{
collections::{BTreeMap, BTreeSet},
fmt,
};
#[derive(Default, Clone)]
pub struct PerBytecodeRelevance {
pub insts: BTreeMap<Vec<Type>, BTreeSet<Vec<Type>>>,
}
impl PerBytecodeRelevance {
fn merge(&mut self, other: PerBytecodeRelevance) {
for (fun_inst, inv_insts) in other.insts {
self.insts.entry(fun_inst).or_default().extend(inv_insts);
}
}
}
#[derive(Clone)]
pub struct PerFunctionRelevance {
pub entrypoint_assumptions: BTreeMap<GlobalId, PerBytecodeRelevance>,
pub per_bytecode_assertions: BTreeMap<CodeOffset, BTreeMap<GlobalId, PerBytecodeRelevance>>,
pub exitpoint_assertions: BTreeMap<GlobalId, PerBytecodeRelevance>,
pub ghost_type_param_count: usize,
}
pub fn get_info<'env>(target: &FunctionTarget<'env>) -> &'env PerFunctionRelevance {
target
.get_annotations()
.get::<PerFunctionRelevance>()
.expect("Global invariant analysis not performed")
}
pub struct GlobalInvariantAnalysisProcessor {}
impl GlobalInvariantAnalysisProcessor {
pub fn new() -> Box<Self> {
Box::new(Self {})
}
}
impl FunctionTargetProcessor for GlobalInvariantAnalysisProcessor {
fn process(
&self,
targets: &mut FunctionTargetsHolder,
fun_env: &FunctionEnv<'_>,
mut data: FunctionData,
) -> FunctionData {
if fun_env.is_native() || fun_env.is_intrinsic() {
return data;
}
if !data.variant.is_verified() {
return data;
}
let target = FunctionTarget::new(fun_env, &data);
let analysis_result = PerFunctionRelevance::analyze(&target, targets);
data.annotations.set(analysis_result);
data
}
fn name(&self) -> String {
"global_invariant_analysis".to_string()
}
fn dump_result(
&self,
f: &mut fmt::Formatter<'_>,
env: &GlobalEnv,
targets: &FunctionTargetsHolder,
) -> fmt::Result {
let type_display_ctxt = TypeDisplayContext::WithEnv {
env,
type_param_names: None,
};
let display_type_slice = |tys: &[Type]| -> String {
let content = tys
.iter()
.map(|t| t.display(&type_display_ctxt).to_string())
.collect::<Vec<_>>()
.join(", ");
format!("<{}>", content)
};
let make_indent = |indent: usize| " ".repeat(indent);
let display_inv_relevance = |f: &mut fmt::Formatter,
invs: &BTreeMap<GlobalId, PerBytecodeRelevance>,
header: &str,
assert_or_assume: &str|
-> fmt::Result {
let mut indent = 1;
if invs.is_empty() {
return writeln!(f, "{}{} {{}}", make_indent(indent), header);
}
writeln!(f, "{}{} {{", make_indent(indent), header)?;
indent += 1;
for (inv_id, inv_rel) in invs {
writeln!(
f,
"{}{} {} = [",
make_indent(indent),
assert_or_assume,
inv_id
)?;
indent += 1;
for (rel_inst, inv_insts) in &inv_rel.insts {
writeln!(
f,
"{}{} -> [",
make_indent(indent),
display_type_slice(rel_inst)
)?;
indent += 1;
for inv_inst in inv_insts {
writeln!(f, "{}{}", make_indent(indent), display_type_slice(inv_inst))?;
}
indent -= 1;
writeln!(f, "{}]", make_indent(indent))?;
}
indent -= 1;
writeln!(f, "{}]", make_indent(indent))?;
}
indent -= 1;
writeln!(f, "{}}}", make_indent(indent))
};
writeln!(
f,
"\n********* Result of global invariant instrumentation *********\n"
)?;
for (fun_id, fun_variant) in targets.get_funs_and_variants() {
if !matches!(
fun_variant,
FunctionVariant::Verification(VerificationFlavor::Regular)
) {
continue;
}
let fenv = env.get_function(fun_id);
let target = targets.get_target(&fenv, &fun_variant);
let result = target
.get_annotations()
.get::<PerFunctionRelevance>()
.expect("Analysis not performed");
writeln!(f, "{}: [", fenv.get_full_name_str())?;
display_inv_relevance(f, &result.entrypoint_assumptions, "entrypoint", "assume")?;
for (code_offset, code_invs) in &result.per_bytecode_assertions {
let bc = target.data.code.get(*code_offset as usize).unwrap();
let header = format!("{}: {}", code_offset, bc.display(&target, &BTreeMap::new()));
display_inv_relevance(f, code_invs, &header, "assert")?;
}
display_inv_relevance(f, &result.exitpoint_assertions, "exitpoint", "assert")?;
writeln!(f, "]")?;
}
writeln!(f, "\n********* Global invariants by ID *********\n")?;
let mut all_invs = BTreeSet::new();
for menv in env.get_modules() {
all_invs.extend(env.get_global_invariants_by_module(menv.get_id()));
}
for inv_id in all_invs {
let inv = env.get_global_invariant(inv_id).unwrap();
let inv_src = env.get_source(&inv.loc).unwrap_or("<unknown invariant>");
writeln!(f, "{} => {}", inv_id, inv_src)?;
}
writeln!(f)
}
}
impl PerFunctionRelevance {
fn analyze(target: &FunctionTarget, targets: &FunctionTargetsHolder) -> Self {
use BorrowNode::*;
use Bytecode::*;
use Operation::*;
let fid = target.func_env.get_qualified_id();
let env = target.global_env();
let inv_analysis = env
.get_extension::<InvariantAnalysisData>()
.expect("Verification analysis not performed");
let check_suspendable_inv_on_return =
inv_analysis.fun_set_with_inv_check_on_exit.contains(&fid);
let inv_applicability = inv_analysis
.fun_to_inv_map
.get(&fid)
.expect("Invariant applicability not available");
let fun_type_params_arity = target.get_type_parameter_count();
let inv_ro = &inv_applicability.accessed;
let (inv_rw_return, inv_rw_normal): (BTreeSet<_>, BTreeSet<_>) =
if check_suspendable_inv_on_return {
inv_applicability
.direct_modified
.iter()
.cloned()
.partition(|inv_id| is_invariant_suspendable(env, *inv_id))
} else {
(BTreeSet::new(), inv_applicability.direct_modified.clone())
};
let mut entrypoint_assumptions = BTreeMap::new();
let mut per_bytecode_assertions = BTreeMap::new();
let mut exitpoint_assertions = BTreeMap::new();
let mut ghost_type_param_count = 0;
for (code_offset, bc) in target.data.code.iter().enumerate() {
let code_offset = code_offset as CodeOffset;
let (mem_ro, mem_rw) = match bc {
Call(_, _, oper, _, _) => match oper {
Function(mid, fid, inst) => {
let callee_fid = mid.qualified(*fid);
get_callee_memory_usage_for_invariant_instrumentation(
env, targets, callee_fid, inst,
)
}
OpaqueCallBegin(mid, fid, inst) => {
let callee_fid = mid.qualified(*fid);
let (mem_ro, _) = get_callee_memory_usage_for_invariant_instrumentation(
env, targets, callee_fid, inst,
);
(mem_ro, BTreeSet::new())
}
OpaqueCallEnd(mid, fid, inst) => {
let callee_fid = mid.qualified(*fid);
let (_, mem_rw) = get_callee_memory_usage_for_invariant_instrumentation(
env, targets, callee_fid, inst,
);
(BTreeSet::new(), mem_rw)
}
MoveTo(mid, sid, inst) | MoveFrom(mid, sid, inst) => {
let mem = mid.qualified_inst(*sid, inst.to_owned());
(BTreeSet::new(), std::iter::once(mem).collect())
}
WriteBack(GlobalRoot(mem), _) => {
(BTreeSet::new(), std::iter::once(mem.clone()).collect())
}
Exists(mid, sid, inst) | GetGlobal(mid, sid, inst) => {
let mem = mid.qualified_inst(*sid, inst.to_owned());
(std::iter::once(mem).collect(), BTreeSet::new())
}
_ => continue,
},
Prop(_, PropKind::Assert, exp) | Prop(_, PropKind::Assume, exp) => (
exp.used_memory(env)
.into_iter()
.map(|(usage, _)| usage)
.collect(),
BTreeSet::new(),
),
_ => continue,
};
let relevance_ro = Self::calculate_invariant_relevance(
env,
mem_ro.iter(),
inv_ro,
fun_type_params_arity,
&mut ghost_type_param_count,
true,
);
let relevance_rw_normal = Self::calculate_invariant_relevance(
env,
mem_rw.iter(),
&inv_rw_normal,
fun_type_params_arity,
&mut ghost_type_param_count,
false,
);
let relevance_rw_return = Self::calculate_invariant_relevance(
env,
mem_rw.iter(),
&inv_rw_return,
fun_type_params_arity,
&mut ghost_type_param_count,
false,
);
for (inv_id, inv_rel) in relevance_ro
.iter()
.chain(relevance_rw_normal.iter())
.chain(relevance_rw_return.iter())
{
let inv = env.get_global_invariant(*inv_id).unwrap();
if matches!(inv.kind, ConditionKind::GlobalInvariantUpdate(..)) {
continue;
}
entrypoint_assumptions
.entry(*inv_id)
.or_insert_with(PerBytecodeRelevance::default)
.merge(inv_rel.clone());
}
per_bytecode_assertions.insert(code_offset, relevance_rw_normal);
for (inv_id, inv_rel) in relevance_rw_return {
exitpoint_assertions
.entry(inv_id)
.or_insert_with(PerBytecodeRelevance::default)
.merge(inv_rel);
}
}
Self {
entrypoint_assumptions,
per_bytecode_assertions,
exitpoint_assertions,
ghost_type_param_count,
}
}
fn calculate_invariant_relevance<'a>(
env: &GlobalEnv,
mem_related: impl Iterator<Item = &'a QualifiedInstId<StructId>>,
inv_related: &BTreeSet<GlobalId>,
fun_type_params_arity: usize,
fun_type_params_ghost_count: &mut usize,
ignore_uninstantiated_invariant: bool,
) -> BTreeMap<GlobalId, PerBytecodeRelevance> {
let mut result = BTreeMap::new();
for rel_mem in mem_related {
let rel_ty = rel_mem.to_type();
for inv_id in inv_related {
let inv = env.get_global_invariant(*inv_id).unwrap();
let inv_type_params = match &inv.kind {
ConditionKind::GlobalInvariant(params) => params,
ConditionKind::GlobalInvariantUpdate(params) => params,
_ => unreachable!(
"A global invariant must have a condition kind of either \
`GlobalInvariant` or `GlobalInvariantUpdate`"
),
};
let inv_type_params_arity = inv_type_params.len();
for inv_mem in &inv.mem_usage {
let inv_ty = inv_mem.to_type();
let adapter = TypeUnificationAdapter::new_pair(&rel_ty, &inv_ty, true, true);
if adapter.unify(Variance::Allow, false).is_none() {
continue;
}
let rel_insts = TypeInstantiationDerivation::progressive_instantiation(
std::iter::once(&rel_ty),
std::iter::once(&inv_ty),
true,
true,
true,
false,
fun_type_params_arity,
true,
false,
);
for rel_inst in rel_insts {
let inst_rel_ty = rel_ty.instantiate(&rel_inst);
let inv_insts = TypeInstantiationDerivation::progressive_instantiation(
std::iter::once(&inst_rel_ty),
std::iter::once(&inv_ty),
false,
true,
false,
true,
inv_type_params_arity,
false,
true,
);
let mut wellformed_inv_inst = vec![];
for inv_inst in inv_insts {
if inv_inst.iter().any(|t| matches!(t, Type::Error)) {
if ignore_uninstantiated_invariant {
continue;
}
let adapted_inv_inst = inv_inst
.into_iter()
.map(|t| {
if matches!(t, Type::Error) {
let ghost_idx = fun_type_params_arity
+ *fun_type_params_ghost_count;
*fun_type_params_ghost_count += 1;
Type::TypeParameter(ghost_idx as u16)
} else {
t
}
})
.collect();
wellformed_inv_inst.push(adapted_inv_inst);
} else {
wellformed_inv_inst.push(inv_inst);
}
}
result
.entry(*inv_id)
.or_insert_with(PerBytecodeRelevance::default)
.insts
.entry(rel_inst)
.or_insert_with(BTreeSet::new)
.extend(wellformed_inv_inst);
}
}
}
}
result
}
}
fn get_callee_memory_usage_for_invariant_instrumentation(
env: &GlobalEnv,
targets: &FunctionTargetsHolder,
callee_fid: QualifiedId<FunId>,
callee_inst: &[Type],
) -> (
BTreeSet<QualifiedInstId<StructId>>, // memory constitute to entry-point assumptions
BTreeSet<QualifiedInstId<StructId>>, // memory constitute to in-line or exit-point assertions
) {
let inv_analysis = env
.get_extension::<InvariantAnalysisData>()
.expect("Verification analysis not performed");
let callee_env = env.get_function(callee_fid);
let callee_target = targets.get_target(&callee_env, &FunctionVariant::Baseline);
let callee_usage = usage_analysis::get_memory_usage(&callee_target);
let all_accessed = callee_usage.accessed.get_all_inst(callee_inst);
if inv_analysis.fun_set_with_no_inv_check.contains(&callee_fid) {
let mem_rw = callee_usage.modified.get_all_inst(callee_inst);
let mem_ro = all_accessed.difference(&mem_rw).cloned().collect();
(mem_ro, mem_rw)
} else {
(all_accessed, BTreeSet::new())
}
}