use crate::{BinderInfo, Expr, Name};
use std::collections::HashMap;
#[allow(dead_code)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum EtaOutcome {
AlreadyNormal,
Reduced,
NotApplicable,
DepthLimitExceeded,
}
#[allow(dead_code)]
impl EtaOutcome {
pub fn is_success(self) -> bool {
matches!(self, EtaOutcome::Reduced | EtaOutcome::AlreadyNormal)
}
pub fn label(self) -> &'static str {
match self {
EtaOutcome::AlreadyNormal => "already_normal",
EtaOutcome::Reduced => "reduced",
EtaOutcome::NotApplicable => "not_applicable",
EtaOutcome::DepthLimitExceeded => "depth_limit",
}
}
}
#[allow(dead_code)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum EtaStructure {
Atomic,
Lambda(usize),
App(usize),
EtaExpandable,
}
#[allow(dead_code)]
impl EtaStructure {
pub fn arity(&self) -> usize {
match self {
EtaStructure::Lambda(n) | EtaStructure::App(n) => *n,
_ => 0,
}
}
}
#[allow(dead_code)]
pub struct WindowIterator<'a, T> {
pub(super) data: &'a [T],
pub(super) pos: usize,
pub(super) window: usize,
}
#[allow(dead_code)]
impl<'a, T> WindowIterator<'a, T> {
pub fn new(data: &'a [T], window: usize) -> Self {
Self {
data,
pos: 0,
window,
}
}
}
#[allow(dead_code)]
pub struct StringPool {
free: Vec<String>,
}
#[allow(dead_code)]
impl StringPool {
pub fn new() -> Self {
Self { free: Vec::new() }
}
pub fn take(&mut self) -> String {
self.free.pop().unwrap_or_default()
}
pub fn give(&mut self, mut s: String) {
s.clear();
self.free.push(s);
}
pub fn free_count(&self) -> usize {
self.free.len()
}
}
#[derive(Clone, Debug)]
pub struct RewriteRule {
pub lhs: Expr,
pub rhs: Expr,
pub name: String,
}
impl RewriteRule {
pub fn new(name: impl Into<String>, lhs: Expr, rhs: Expr) -> Self {
Self {
lhs,
rhs,
name: name.into(),
}
}
pub fn apply_top(&self, expr: &Expr) -> Option<Expr> {
if expr == &self.lhs {
Some(self.rhs.clone())
} else {
None
}
}
}
#[allow(dead_code)]
pub struct StatSummary {
count: u64,
sum: f64,
min: f64,
max: f64,
}
#[allow(dead_code)]
impl StatSummary {
pub fn new() -> Self {
Self {
count: 0,
sum: 0.0,
min: f64::INFINITY,
max: f64::NEG_INFINITY,
}
}
pub fn record(&mut self, val: f64) {
self.count += 1;
self.sum += val;
if val < self.min {
self.min = val;
}
if val > self.max {
self.max = val;
}
}
pub fn mean(&self) -> Option<f64> {
if self.count == 0 {
None
} else {
Some(self.sum / self.count as f64)
}
}
pub fn min(&self) -> Option<f64> {
if self.count == 0 {
None
} else {
Some(self.min)
}
}
pub fn max(&self) -> Option<f64> {
if self.count == 0 {
None
} else {
Some(self.max)
}
}
pub fn count(&self) -> u64 {
self.count
}
}
#[allow(dead_code)]
pub struct EtaChecker {
cache: EtaNormalCache,
log: EtaLog,
}
#[allow(dead_code)]
impl EtaChecker {
pub fn new() -> Self {
Self {
cache: EtaNormalCache::new(),
log: EtaLog::new(),
}
}
pub fn is_eta_normal(&mut self, hash: u64) -> EtaOutcome {
if let Some(is_normal) = self.cache.query(hash) {
let outcome = if is_normal {
EtaOutcome::AlreadyNormal
} else {
EtaOutcome::NotApplicable
};
self.log.record(outcome);
return outcome;
}
self.cache.insert(hash, false);
self.log.record(EtaOutcome::NotApplicable);
EtaOutcome::NotApplicable
}
pub fn success_rate(&self) -> f64 {
self.log.success_rate()
}
}
#[allow(dead_code)]
pub struct VersionedRecord<T: Clone> {
history: Vec<T>,
}
#[allow(dead_code)]
impl<T: Clone> VersionedRecord<T> {
pub fn new(initial: T) -> Self {
Self {
history: vec![initial],
}
}
pub fn update(&mut self, val: T) {
self.history.push(val);
}
pub fn current(&self) -> &T {
self.history
.last()
.expect("VersionedRecord history is always non-empty after construction")
}
pub fn at_version(&self, n: usize) -> Option<&T> {
self.history.get(n)
}
pub fn version(&self) -> usize {
self.history.len() - 1
}
pub fn has_history(&self) -> bool {
self.history.len() > 1
}
}
#[allow(dead_code)]
#[allow(missing_docs)]
pub struct EtaJob {
pub id: u64,
pub hash: u64,
pub prio: u32,
}
#[allow(dead_code)]
impl EtaJob {
pub fn new(id: u64, hash: u64, prio: u32) -> Self {
Self { id, hash, prio }
}
}
#[allow(dead_code)]
#[allow(missing_docs)]
pub struct EtaRewriteRule {
pub lhs: String,
pub rhs: String,
}
#[allow(dead_code)]
impl EtaRewriteRule {
pub fn new(lhs: impl Into<String>, rhs: impl Into<String>) -> Self {
Self {
lhs: lhs.into(),
rhs: rhs.into(),
}
}
}
#[derive(Debug, Default, Clone)]
pub struct LambdaStats {
pub lambda_count: usize,
pub pi_count: usize,
pub app_count: usize,
pub let_count: usize,
pub max_depth: usize,
}
impl LambdaStats {
pub fn compute(expr: &Expr) -> Self {
let mut stats = Self::default();
Self::walk(expr, &mut stats, 0);
stats
}
fn walk(expr: &Expr, stats: &mut Self, depth: usize) {
if depth > stats.max_depth {
stats.max_depth = depth;
}
match expr {
Expr::Lam(_, _, ty, body) => {
stats.lambda_count += 1;
Self::walk(ty, stats, depth + 1);
Self::walk(body, stats, depth + 1);
}
Expr::Pi(_, _, ty, body) => {
stats.pi_count += 1;
Self::walk(ty, stats, depth + 1);
Self::walk(body, stats, depth + 1);
}
Expr::App(f, a) => {
stats.app_count += 1;
Self::walk(f, stats, depth + 1);
Self::walk(a, stats, depth + 1);
}
Expr::Let(_, ty, val, body) => {
stats.let_count += 1;
Self::walk(ty, stats, depth + 1);
Self::walk(val, stats, depth + 1);
Self::walk(body, stats, depth + 1);
}
_ => {}
}
}
pub fn total_binders(&self) -> usize {
self.lambda_count + self.pi_count + self.let_count
}
}
#[allow(dead_code)]
pub struct TransitiveClosure {
adj: Vec<Vec<usize>>,
n: usize,
}
#[allow(dead_code)]
impl TransitiveClosure {
pub fn new(n: usize) -> Self {
Self {
adj: vec![Vec::new(); n],
n,
}
}
pub fn add_edge(&mut self, from: usize, to: usize) {
if from < self.n {
self.adj[from].push(to);
}
}
pub fn reachable_from(&self, start: usize) -> Vec<usize> {
let mut visited = vec![false; self.n];
let mut queue = std::collections::VecDeque::new();
queue.push_back(start);
while let Some(node) = queue.pop_front() {
if node >= self.n || visited[node] {
continue;
}
visited[node] = true;
for &next in &self.adj[node] {
queue.push_back(next);
}
}
(0..self.n).filter(|&i| visited[i]).collect()
}
pub fn can_reach(&self, from: usize, to: usize) -> bool {
self.reachable_from(from).contains(&to)
}
}
#[allow(dead_code)]
pub struct LabelSet {
labels: Vec<String>,
}
#[allow(dead_code)]
impl LabelSet {
pub fn new() -> Self {
Self { labels: Vec::new() }
}
pub fn add(&mut self, label: impl Into<String>) {
let s = label.into();
if !self.labels.contains(&s) {
self.labels.push(s);
}
}
pub fn has(&self, label: &str) -> bool {
self.labels.iter().any(|l| l == label)
}
pub fn count(&self) -> usize {
self.labels.len()
}
pub fn all(&self) -> &[String] {
&self.labels
}
}
#[allow(dead_code)]
pub struct EtaNormalCache {
cache: std::collections::HashMap<u64, bool>,
}
#[allow(dead_code)]
impl EtaNormalCache {
pub fn new() -> Self {
Self {
cache: std::collections::HashMap::new(),
}
}
pub fn insert(&mut self, hash: u64, is_normal: bool) {
self.cache.insert(hash, is_normal);
}
pub fn query(&self, hash: u64) -> Option<bool> {
self.cache.get(&hash).copied()
}
pub fn clear(&mut self) {
self.cache.clear();
}
pub fn len(&self) -> usize {
self.cache.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
#[allow(dead_code)]
pub struct TransformStat {
before: StatSummary,
after: StatSummary,
}
#[allow(dead_code)]
impl TransformStat {
pub fn new() -> Self {
Self {
before: StatSummary::new(),
after: StatSummary::new(),
}
}
pub fn record_before(&mut self, v: f64) {
self.before.record(v);
}
pub fn record_after(&mut self, v: f64) {
self.after.record(v);
}
pub fn mean_ratio(&self) -> Option<f64> {
let b = self.before.mean()?;
let a = self.after.mean()?;
if b.abs() < f64::EPSILON {
return None;
}
Some(a / b)
}
}
#[allow(dead_code)]
pub struct TokenBucket {
capacity: u64,
tokens: u64,
refill_per_ms: u64,
last_refill: std::time::Instant,
}
#[allow(dead_code)]
impl TokenBucket {
pub fn new(capacity: u64, refill_per_ms: u64) -> Self {
Self {
capacity,
tokens: capacity,
refill_per_ms,
last_refill: std::time::Instant::now(),
}
}
pub fn try_consume(&mut self, n: u64) -> bool {
self.refill();
if self.tokens >= n {
self.tokens -= n;
true
} else {
false
}
}
fn refill(&mut self) {
let now = std::time::Instant::now();
let elapsed_ms = now.duration_since(self.last_refill).as_millis() as u64;
if elapsed_ms > 0 {
let new_tokens = elapsed_ms * self.refill_per_ms;
self.tokens = (self.tokens + new_tokens).min(self.capacity);
self.last_refill = now;
}
}
pub fn available(&self) -> u64 {
self.tokens
}
pub fn capacity(&self) -> u64 {
self.capacity
}
}
#[allow(dead_code)]
pub struct NonEmptyVec<T> {
head: T,
tail: Vec<T>,
}
#[allow(dead_code)]
impl<T> NonEmptyVec<T> {
pub fn singleton(val: T) -> Self {
Self {
head: val,
tail: Vec::new(),
}
}
pub fn push(&mut self, val: T) {
self.tail.push(val);
}
pub fn first(&self) -> &T {
&self.head
}
pub fn last(&self) -> &T {
self.tail.last().unwrap_or(&self.head)
}
pub fn len(&self) -> usize {
1 + self.tail.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn to_vec(&self) -> Vec<&T> {
let mut v = vec![&self.head];
v.extend(self.tail.iter());
v
}
}
#[allow(dead_code)]
pub struct RawFnPtr {
ptr: usize,
pub(super) arity: usize,
name: String,
}
#[allow(dead_code)]
impl RawFnPtr {
pub fn new(ptr: usize, arity: usize, name: impl Into<String>) -> Self {
Self {
ptr,
arity,
name: name.into(),
}
}
pub fn arity(&self) -> usize {
self.arity
}
pub fn name(&self) -> &str {
&self.name
}
pub fn raw(&self) -> usize {
self.ptr
}
}
#[allow(dead_code)]
pub struct FocusStack<T> {
items: Vec<T>,
}
#[allow(dead_code)]
impl<T> FocusStack<T> {
pub fn new() -> Self {
Self { items: Vec::new() }
}
pub fn focus(&mut self, item: T) {
self.items.push(item);
}
pub fn blur(&mut self) -> Option<T> {
self.items.pop()
}
pub fn current(&self) -> Option<&T> {
self.items.last()
}
pub fn depth(&self) -> usize {
self.items.len()
}
pub fn is_empty(&self) -> bool {
self.items.is_empty()
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct EtaNormInfo {
pub normalized: Expr,
pub contractions: usize,
pub already_normal: bool,
}
impl EtaNormInfo {
pub fn already_normal(expr: Expr) -> Self {
Self {
normalized: expr,
contractions: 0,
already_normal: true,
}
}
pub fn contracted(expr: Expr, n: usize) -> Self {
Self {
normalized: expr,
contractions: n,
already_normal: false,
}
}
}
#[allow(dead_code)]
pub struct SimpleDag {
edges: Vec<Vec<usize>>,
}
#[allow(dead_code)]
impl SimpleDag {
pub fn new(n: usize) -> Self {
Self {
edges: vec![Vec::new(); n],
}
}
pub fn add_edge(&mut self, from: usize, to: usize) {
if from < self.edges.len() {
self.edges[from].push(to);
}
}
pub fn successors(&self, node: usize) -> &[usize] {
self.edges.get(node).map(|v| v.as_slice()).unwrap_or(&[])
}
pub fn can_reach(&self, from: usize, to: usize) -> bool {
let mut visited = vec![false; self.edges.len()];
self.dfs(from, to, &mut visited)
}
fn dfs(&self, cur: usize, target: usize, visited: &mut Vec<bool>) -> bool {
if cur == target {
return true;
}
if cur >= visited.len() || visited[cur] {
return false;
}
visited[cur] = true;
for &next in self.successors(cur) {
if self.dfs(next, target, visited) {
return true;
}
}
false
}
pub fn topological_sort(&self) -> Option<Vec<usize>> {
let n = self.edges.len();
let mut in_degree = vec![0usize; n];
for succs in &self.edges {
for &s in succs {
if s < n {
in_degree[s] += 1;
}
}
}
let mut queue: std::collections::VecDeque<usize> =
(0..n).filter(|&i| in_degree[i] == 0).collect();
let mut order = Vec::new();
while let Some(node) = queue.pop_front() {
order.push(node);
for &s in self.successors(node) {
if s < n {
in_degree[s] -= 1;
if in_degree[s] == 0 {
queue.push_back(s);
}
}
}
}
if order.len() == n {
Some(order)
} else {
None
}
}
pub fn num_nodes(&self) -> usize {
self.edges.len()
}
}
#[allow(dead_code)]
pub struct SlidingSum {
window: Vec<f64>,
capacity: usize,
pos: usize,
sum: f64,
count: usize,
}
#[allow(dead_code)]
impl SlidingSum {
pub fn new(capacity: usize) -> Self {
Self {
window: vec![0.0; capacity],
capacity,
pos: 0,
sum: 0.0,
count: 0,
}
}
pub fn push(&mut self, val: f64) {
let oldest = self.window[self.pos];
self.sum -= oldest;
self.sum += val;
self.window[self.pos] = val;
self.pos = (self.pos + 1) % self.capacity;
if self.count < self.capacity {
self.count += 1;
}
}
pub fn sum(&self) -> f64 {
self.sum
}
pub fn mean(&self) -> Option<f64> {
if self.count == 0 {
None
} else {
Some(self.sum / self.count as f64)
}
}
pub fn count(&self) -> usize {
self.count
}
}
#[allow(dead_code)]
#[allow(missing_docs)]
pub struct EtaReductionStats {
pub reductions: u64,
pub examined: u64,
pub expansions: u64,
pub max_depth: usize,
}
#[allow(dead_code)]
impl EtaReductionStats {
pub fn new() -> Self {
Self {
reductions: 0,
examined: 0,
expansions: 0,
max_depth: 0,
}
}
pub fn ratio(&self) -> f64 {
if self.examined == 0 {
return 0.0;
}
self.reductions as f64 / self.examined as f64
}
}
#[allow(dead_code)]
pub struct PathBuf {
components: Vec<String>,
}
#[allow(dead_code)]
impl PathBuf {
pub fn new() -> Self {
Self {
components: Vec::new(),
}
}
pub fn push(&mut self, comp: impl Into<String>) {
self.components.push(comp.into());
}
pub fn pop(&mut self) {
self.components.pop();
}
pub fn as_str(&self) -> String {
self.components.join("/")
}
pub fn depth(&self) -> usize {
self.components.len()
}
pub fn clear(&mut self) {
self.components.clear();
}
}
#[allow(dead_code)]
pub struct EtaJobQueue {
jobs: Vec<EtaJob>,
}
#[allow(dead_code)]
impl EtaJobQueue {
pub fn new() -> Self {
Self { jobs: Vec::new() }
}
pub fn enqueue(&mut self, job: EtaJob) {
let pos = self.jobs.partition_point(|j| j.prio >= job.prio);
self.jobs.insert(pos, job);
}
pub fn dequeue(&mut self) -> Option<EtaJob> {
if self.jobs.is_empty() {
None
} else {
Some(self.jobs.remove(0))
}
}
pub fn len(&self) -> usize {
self.jobs.len()
}
pub fn is_empty(&self) -> bool {
self.jobs.is_empty()
}
}
#[allow(dead_code)]
pub struct ConfigNode {
key: String,
value: Option<String>,
children: Vec<ConfigNode>,
}
#[allow(dead_code)]
impl ConfigNode {
pub fn leaf(key: impl Into<String>, value: impl Into<String>) -> Self {
Self {
key: key.into(),
value: Some(value.into()),
children: Vec::new(),
}
}
pub fn section(key: impl Into<String>) -> Self {
Self {
key: key.into(),
value: None,
children: Vec::new(),
}
}
pub fn add_child(&mut self, child: ConfigNode) {
self.children.push(child);
}
pub fn key(&self) -> &str {
&self.key
}
pub fn value(&self) -> Option<&str> {
self.value.as_deref()
}
pub fn num_children(&self) -> usize {
self.children.len()
}
pub fn lookup(&self, path: &str) -> Option<&str> {
let mut parts = path.splitn(2, '.');
let head = parts.next()?;
let tail = parts.next();
if head != self.key {
return None;
}
match tail {
None => self.value.as_deref(),
Some(rest) => self.children.iter().find_map(|c| c.lookup_relative(rest)),
}
}
fn lookup_relative(&self, path: &str) -> Option<&str> {
let mut parts = path.splitn(2, '.');
let head = parts.next()?;
let tail = parts.next();
if head != self.key {
return None;
}
match tail {
None => self.value.as_deref(),
Some(rest) => self.children.iter().find_map(|c| c.lookup_relative(rest)),
}
}
}
#[allow(dead_code)]
pub struct EtaLog {
entries: Vec<(std::time::Instant, EtaOutcome)>,
}
#[allow(dead_code)]
impl EtaLog {
pub fn new() -> Self {
Self {
entries: Vec::new(),
}
}
pub fn record(&mut self, outcome: EtaOutcome) {
self.entries.push((std::time::Instant::now(), outcome));
}
pub fn len(&self) -> usize {
self.entries.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn count(&self, target: EtaOutcome) -> usize {
self.entries.iter().filter(|(_, o)| *o == target).count()
}
pub fn success_rate(&self) -> f64 {
if self.entries.is_empty() {
return 0.0;
}
let successes = self.entries.iter().filter(|(_, o)| o.is_success()).count();
successes as f64 / self.entries.len() as f64
}
}
#[allow(dead_code)]
pub struct SmallMap<K: Ord + Clone, V: Clone> {
entries: Vec<(K, V)>,
}
#[allow(dead_code)]
impl<K: Ord + Clone, V: Clone> SmallMap<K, V> {
pub fn new() -> Self {
Self {
entries: Vec::new(),
}
}
pub fn insert(&mut self, key: K, val: V) {
match self.entries.binary_search_by_key(&&key, |(k, _)| k) {
Ok(i) => self.entries[i].1 = val,
Err(i) => self.entries.insert(i, (key, val)),
}
}
pub fn get(&self, key: &K) -> Option<&V> {
self.entries
.binary_search_by_key(&key, |(k, _)| k)
.ok()
.map(|i| &self.entries[i].1)
}
pub fn len(&self) -> usize {
self.entries.len()
}
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
pub fn keys(&self) -> Vec<&K> {
self.entries.iter().map(|(k, _)| k).collect()
}
pub fn values(&self) -> Vec<&V> {
self.entries.iter().map(|(_, v)| v).collect()
}
}
#[allow(dead_code)]
pub struct EtaStatCounter {
count: u64,
sum: i64,
min: i64,
max: i64,
}
#[allow(dead_code)]
impl EtaStatCounter {
pub fn new() -> Self {
Self {
count: 0,
sum: 0,
min: i64::MAX,
max: i64::MIN,
}
}
pub fn record(&mut self, v: i64) {
self.count += 1;
self.sum += v;
if v < self.min {
self.min = v;
}
if v > self.max {
self.max = v;
}
}
pub fn mean(&self) -> Option<f64> {
if self.count == 0 {
None
} else {
Some(self.sum as f64 / self.count as f64)
}
}
pub fn count(&self) -> u64 {
self.count
}
}
#[allow(dead_code)]
pub struct EtaOpCounter {
counts: std::collections::HashMap<String, u64>,
}
#[allow(dead_code)]
impl EtaOpCounter {
pub fn new() -> Self {
Self {
counts: std::collections::HashMap::new(),
}
}
pub fn inc(&mut self, op: &str) {
*self.counts.entry(op.to_string()).or_insert(0) += 1;
}
pub fn get(&self, op: &str) -> u64 {
self.counts.get(op).copied().unwrap_or(0)
}
pub fn total(&self) -> u64 {
self.counts.values().sum()
}
}