1use crate::lcnf::{LcnfArg, LcnfExpr, LcnfFunDecl, LcnfLetValue};
6use std::collections::{HashMap, HashSet};
7
8use std::collections::VecDeque;
9
10#[allow(dead_code)]
12#[derive(Debug, Clone, Default)]
13pub struct OEX2Liveness {
14 pub live_in: Vec<Vec<usize>>,
15 pub live_out: Vec<Vec<usize>>,
16 pub defs: Vec<Vec<usize>>,
17 pub uses: Vec<Vec<usize>>,
18}
19impl OEX2Liveness {
20 #[allow(dead_code)]
21 pub fn new(n: usize) -> Self {
22 Self {
23 live_in: vec![Vec::new(); n],
24 live_out: vec![Vec::new(); n],
25 defs: vec![Vec::new(); n],
26 uses: vec![Vec::new(); n],
27 }
28 }
29 #[allow(dead_code)]
30 pub fn live_in(&self, b: usize, v: usize) -> bool {
31 self.live_in.get(b).map(|s| s.contains(&v)).unwrap_or(false)
32 }
33 #[allow(dead_code)]
34 pub fn live_out(&self, b: usize, v: usize) -> bool {
35 self.live_out
36 .get(b)
37 .map(|s| s.contains(&v))
38 .unwrap_or(false)
39 }
40 #[allow(dead_code)]
41 pub fn add_def(&mut self, b: usize, v: usize) {
42 if let Some(s) = self.defs.get_mut(b) {
43 if !s.contains(&v) {
44 s.push(v);
45 }
46 }
47 }
48 #[allow(dead_code)]
49 pub fn add_use(&mut self, b: usize, v: usize) {
50 if let Some(s) = self.uses.get_mut(b) {
51 if !s.contains(&v) {
52 s.push(v);
53 }
54 }
55 }
56 #[allow(dead_code)]
57 pub fn var_is_used_in_block(&self, b: usize, v: usize) -> bool {
58 self.uses.get(b).map(|s| s.contains(&v)).unwrap_or(false)
59 }
60 #[allow(dead_code)]
61 pub fn var_is_def_in_block(&self, b: usize, v: usize) -> bool {
62 self.defs.get(b).map(|s| s.contains(&v)).unwrap_or(false)
63 }
64}
65#[allow(dead_code)]
67#[derive(Debug, Clone)]
68pub struct OEExtDepGraph {
69 pub(super) n: usize,
70 pub(super) adj: Vec<Vec<usize>>,
71 pub(super) rev: Vec<Vec<usize>>,
72 pub(super) edge_count: usize,
73}
74impl OEExtDepGraph {
75 #[allow(dead_code)]
76 pub fn new(n: usize) -> Self {
77 Self {
78 n,
79 adj: vec![Vec::new(); n],
80 rev: vec![Vec::new(); n],
81 edge_count: 0,
82 }
83 }
84 #[allow(dead_code)]
85 pub fn add_edge(&mut self, from: usize, to: usize) {
86 if from < self.n && to < self.n {
87 if !self.adj[from].contains(&to) {
88 self.adj[from].push(to);
89 self.rev[to].push(from);
90 self.edge_count += 1;
91 }
92 }
93 }
94 #[allow(dead_code)]
95 pub fn succs(&self, n: usize) -> &[usize] {
96 self.adj.get(n).map(|v| v.as_slice()).unwrap_or(&[])
97 }
98 #[allow(dead_code)]
99 pub fn preds(&self, n: usize) -> &[usize] {
100 self.rev.get(n).map(|v| v.as_slice()).unwrap_or(&[])
101 }
102 #[allow(dead_code)]
103 pub fn topo_sort(&self) -> Option<Vec<usize>> {
104 let mut deg: Vec<usize> = (0..self.n).map(|i| self.rev[i].len()).collect();
105 let mut q: std::collections::VecDeque<usize> =
106 (0..self.n).filter(|&i| deg[i] == 0).collect();
107 let mut out = Vec::with_capacity(self.n);
108 while let Some(u) = q.pop_front() {
109 out.push(u);
110 for &v in &self.adj[u] {
111 deg[v] -= 1;
112 if deg[v] == 0 {
113 q.push_back(v);
114 }
115 }
116 }
117 if out.len() == self.n {
118 Some(out)
119 } else {
120 None
121 }
122 }
123 #[allow(dead_code)]
124 pub fn has_cycle(&self) -> bool {
125 self.topo_sort().is_none()
126 }
127 #[allow(dead_code)]
128 pub fn reachable(&self, start: usize) -> Vec<usize> {
129 let mut vis = vec![false; self.n];
130 let mut stk = vec![start];
131 let mut out = Vec::new();
132 while let Some(u) = stk.pop() {
133 if u < self.n && !vis[u] {
134 vis[u] = true;
135 out.push(u);
136 for &v in &self.adj[u] {
137 if !vis[v] {
138 stk.push(v);
139 }
140 }
141 }
142 }
143 out
144 }
145 #[allow(dead_code)]
146 pub fn scc(&self) -> Vec<Vec<usize>> {
147 let mut visited = vec![false; self.n];
148 let mut order = Vec::new();
149 for i in 0..self.n {
150 if !visited[i] {
151 let mut stk = vec![(i, 0usize)];
152 while let Some((u, idx)) = stk.last_mut() {
153 if !visited[*u] {
154 visited[*u] = true;
155 }
156 if *idx < self.adj[*u].len() {
157 let v = self.adj[*u][*idx];
158 *idx += 1;
159 if !visited[v] {
160 stk.push((v, 0));
161 }
162 } else {
163 order.push(*u);
164 stk.pop();
165 }
166 }
167 }
168 }
169 let mut comp = vec![usize::MAX; self.n];
170 let mut components: Vec<Vec<usize>> = Vec::new();
171 for &start in order.iter().rev() {
172 if comp[start] == usize::MAX {
173 let cid = components.len();
174 let mut component = Vec::new();
175 let mut stk = vec![start];
176 while let Some(u) = stk.pop() {
177 if comp[u] == usize::MAX {
178 comp[u] = cid;
179 component.push(u);
180 for &v in &self.rev[u] {
181 if comp[v] == usize::MAX {
182 stk.push(v);
183 }
184 }
185 }
186 }
187 components.push(component);
188 }
189 }
190 components
191 }
192 #[allow(dead_code)]
193 pub fn node_count(&self) -> usize {
194 self.n
195 }
196 #[allow(dead_code)]
197 pub fn edge_count(&self) -> usize {
198 self.edge_count
199 }
200}
201#[derive(Debug, Clone, Default)]
203pub struct EscapeReport {
204 pub total_allocations: usize,
206 pub stack_allocated: usize,
208 pub heap_allocated: usize,
210 pub unknown: usize,
212}
213impl EscapeReport {
214 pub fn summary(&self) -> String {
216 format!(
217 "EscapeReport: total={} stack={} heap={} unknown={} (estimated savings: {} bytes)",
218 self.total_allocations,
219 self.stack_allocated,
220 self.heap_allocated,
221 self.unknown,
222 self.stack_savings_estimate(),
223 )
224 }
225 pub fn stack_savings_estimate(&self) -> u64 {
228 self.stack_allocated as u64 * 32
229 }
230}
231#[allow(dead_code)]
233#[derive(Debug, Clone)]
234pub struct OEX2DepGraph {
235 pub(super) n: usize,
236 pub(super) adj: Vec<Vec<usize>>,
237 pub(super) rev: Vec<Vec<usize>>,
238 pub(super) edge_count: usize,
239}
240impl OEX2DepGraph {
241 #[allow(dead_code)]
242 pub fn new(n: usize) -> Self {
243 Self {
244 n,
245 adj: vec![Vec::new(); n],
246 rev: vec![Vec::new(); n],
247 edge_count: 0,
248 }
249 }
250 #[allow(dead_code)]
251 pub fn add_edge(&mut self, from: usize, to: usize) {
252 if from < self.n && to < self.n {
253 if !self.adj[from].contains(&to) {
254 self.adj[from].push(to);
255 self.rev[to].push(from);
256 self.edge_count += 1;
257 }
258 }
259 }
260 #[allow(dead_code)]
261 pub fn succs(&self, n: usize) -> &[usize] {
262 self.adj.get(n).map(|v| v.as_slice()).unwrap_or(&[])
263 }
264 #[allow(dead_code)]
265 pub fn preds(&self, n: usize) -> &[usize] {
266 self.rev.get(n).map(|v| v.as_slice()).unwrap_or(&[])
267 }
268 #[allow(dead_code)]
269 pub fn topo_sort(&self) -> Option<Vec<usize>> {
270 let mut deg: Vec<usize> = (0..self.n).map(|i| self.rev[i].len()).collect();
271 let mut q: std::collections::VecDeque<usize> =
272 (0..self.n).filter(|&i| deg[i] == 0).collect();
273 let mut out = Vec::with_capacity(self.n);
274 while let Some(u) = q.pop_front() {
275 out.push(u);
276 for &v in &self.adj[u] {
277 deg[v] -= 1;
278 if deg[v] == 0 {
279 q.push_back(v);
280 }
281 }
282 }
283 if out.len() == self.n {
284 Some(out)
285 } else {
286 None
287 }
288 }
289 #[allow(dead_code)]
290 pub fn has_cycle(&self) -> bool {
291 self.topo_sort().is_none()
292 }
293 #[allow(dead_code)]
294 pub fn reachable(&self, start: usize) -> Vec<usize> {
295 let mut vis = vec![false; self.n];
296 let mut stk = vec![start];
297 let mut out = Vec::new();
298 while let Some(u) = stk.pop() {
299 if u < self.n && !vis[u] {
300 vis[u] = true;
301 out.push(u);
302 for &v in &self.adj[u] {
303 if !vis[v] {
304 stk.push(v);
305 }
306 }
307 }
308 }
309 out
310 }
311 #[allow(dead_code)]
312 pub fn scc(&self) -> Vec<Vec<usize>> {
313 let mut visited = vec![false; self.n];
314 let mut order = Vec::new();
315 for i in 0..self.n {
316 if !visited[i] {
317 let mut stk = vec![(i, 0usize)];
318 while let Some((u, idx)) = stk.last_mut() {
319 if !visited[*u] {
320 visited[*u] = true;
321 }
322 if *idx < self.adj[*u].len() {
323 let v = self.adj[*u][*idx];
324 *idx += 1;
325 if !visited[v] {
326 stk.push((v, 0));
327 }
328 } else {
329 order.push(*u);
330 stk.pop();
331 }
332 }
333 }
334 }
335 let mut comp = vec![usize::MAX; self.n];
336 let mut components: Vec<Vec<usize>> = Vec::new();
337 for &start in order.iter().rev() {
338 if comp[start] == usize::MAX {
339 let cid = components.len();
340 let mut component = Vec::new();
341 let mut stk = vec![start];
342 while let Some(u) = stk.pop() {
343 if comp[u] == usize::MAX {
344 comp[u] = cid;
345 component.push(u);
346 for &v in &self.rev[u] {
347 if comp[v] == usize::MAX {
348 stk.push(v);
349 }
350 }
351 }
352 }
353 components.push(component);
354 }
355 }
356 components
357 }
358 #[allow(dead_code)]
359 pub fn node_count(&self) -> usize {
360 self.n
361 }
362 #[allow(dead_code)]
363 pub fn edge_count(&self) -> usize {
364 self.edge_count
365 }
366}
367#[allow(dead_code)]
369#[derive(Debug, Default)]
370pub struct OEExtPassRegistry {
371 pub(super) configs: Vec<OEExtPassConfig>,
372 pub(super) stats: Vec<OEExtPassStats>,
373}
374impl OEExtPassRegistry {
375 #[allow(dead_code)]
376 pub fn new() -> Self {
377 Self::default()
378 }
379 #[allow(dead_code)]
380 pub fn register(&mut self, c: OEExtPassConfig) {
381 self.stats.push(OEExtPassStats::new());
382 self.configs.push(c);
383 }
384 #[allow(dead_code)]
385 pub fn len(&self) -> usize {
386 self.configs.len()
387 }
388 #[allow(dead_code)]
389 pub fn is_empty(&self) -> bool {
390 self.configs.is_empty()
391 }
392 #[allow(dead_code)]
393 pub fn get(&self, i: usize) -> Option<&OEExtPassConfig> {
394 self.configs.get(i)
395 }
396 #[allow(dead_code)]
397 pub fn get_stats(&self, i: usize) -> Option<&OEExtPassStats> {
398 self.stats.get(i)
399 }
400 #[allow(dead_code)]
401 pub fn enabled_passes(&self) -> Vec<&OEExtPassConfig> {
402 self.configs.iter().filter(|c| c.enabled).collect()
403 }
404 #[allow(dead_code)]
405 pub fn passes_in_phase(&self, ph: &OEExtPassPhase) -> Vec<&OEExtPassConfig> {
406 self.configs
407 .iter()
408 .filter(|c| c.enabled && &c.phase == ph)
409 .collect()
410 }
411 #[allow(dead_code)]
412 pub fn total_nodes_visited(&self) -> usize {
413 self.stats.iter().map(|s| s.nodes_visited).sum()
414 }
415 #[allow(dead_code)]
416 pub fn any_changed(&self) -> bool {
417 self.stats.iter().any(|s| s.changed)
418 }
419}
420#[allow(dead_code)]
422#[derive(Debug, Clone)]
423pub struct OEExtPassConfig {
424 pub name: String,
425 pub phase: OEExtPassPhase,
426 pub enabled: bool,
427 pub max_iterations: usize,
428 pub debug: u32,
429 pub timeout_ms: Option<u64>,
430}
431impl OEExtPassConfig {
432 #[allow(dead_code)]
433 pub fn new(name: impl Into<String>) -> Self {
434 Self {
435 name: name.into(),
436 phase: OEExtPassPhase::Middle,
437 enabled: true,
438 max_iterations: 100,
439 debug: 0,
440 timeout_ms: None,
441 }
442 }
443 #[allow(dead_code)]
444 pub fn with_phase(mut self, phase: OEExtPassPhase) -> Self {
445 self.phase = phase;
446 self
447 }
448 #[allow(dead_code)]
449 pub fn with_max_iter(mut self, n: usize) -> Self {
450 self.max_iterations = n;
451 self
452 }
453 #[allow(dead_code)]
454 pub fn with_debug(mut self, d: u32) -> Self {
455 self.debug = d;
456 self
457 }
458 #[allow(dead_code)]
459 pub fn disabled(mut self) -> Self {
460 self.enabled = false;
461 self
462 }
463 #[allow(dead_code)]
464 pub fn with_timeout(mut self, ms: u64) -> Self {
465 self.timeout_ms = Some(ms);
466 self
467 }
468 #[allow(dead_code)]
469 pub fn is_debug_enabled(&self) -> bool {
470 self.debug > 0
471 }
472}
473#[derive(Debug, Default)]
475pub struct EscapeAnalyzer {
476 pub results: HashMap<String, EscapeAnalysisResult>,
478}
479impl EscapeAnalyzer {
480 pub fn new() -> Self {
482 EscapeAnalyzer {
483 results: HashMap::new(),
484 }
485 }
486 pub fn analyze(&mut self, decls: &[LcnfFunDecl]) {
488 for decl in decls {
489 let result = self.analyze_decl(decl);
490 self.results.insert(decl.name.clone(), result);
491 }
492 }
493 pub fn analyze_decl(&mut self, decl: &LcnfFunDecl) -> EscapeAnalysisResult {
495 let mut result = EscapeAnalysisResult::new(&decl.name);
496 self.analyze_expr(&decl.body, &mut result, true);
497 self.propagate_escapes(&mut result);
498 result
499 }
500 pub fn analyze_expr(&self, expr: &LcnfExpr, result: &mut EscapeAnalysisResult, in_tail: bool) {
505 match expr {
506 LcnfExpr::Let {
507 name, value, body, ..
508 } => {
509 self.analyze_let_value(name, value, result);
510 self.analyze_expr(body, result, in_tail);
511 }
512 LcnfExpr::Case { alts, default, .. } => {
513 for alt in alts {
514 self.analyze_expr(&alt.body, result, in_tail);
515 }
516 if let Some(def) = default {
517 self.analyze_expr(def, result, in_tail);
518 }
519 }
520 LcnfExpr::Return(arg) => {
521 if let LcnfArg::Var(vid) = arg {
522 let var_name = format!("_x{}", vid.0);
523 result
524 .escape_sets
525 .insert(var_name.clone(), EscapeStatus::ReturnEscape);
526 for site in &mut result.allocations {
527 if site.var == var_name {
528 site.set_status(EscapeStatus::ReturnEscape);
529 }
530 }
531 }
532 let _ = in_tail;
533 }
534 LcnfExpr::TailCall(func, args) => {
535 if let LcnfArg::Var(vid) = func {
536 let var_name = format!("_x{}", vid.0);
537 result
538 .escape_sets
539 .insert(var_name, EscapeStatus::ArgumentEscape(0));
540 }
541 for (idx, arg) in args.iter().enumerate() {
542 if let LcnfArg::Var(vid) = arg {
543 let var_name = format!("_x{}", vid.0);
544 result
545 .escape_sets
546 .insert(var_name, EscapeStatus::ArgumentEscape(idx + 1));
547 }
548 }
549 }
550 LcnfExpr::Unreachable => {}
551 }
552 }
553 pub(super) fn analyze_let_value(
555 &self,
556 name: &str,
557 value: &LcnfLetValue,
558 result: &mut EscapeAnalysisResult,
559 ) {
560 match value {
561 LcnfLetValue::Ctor(_, _, args) | LcnfLetValue::Reuse(_, _, _, args) => {
562 let mut site = AllocationSite::new(name, &result.func_name);
563 let obj_fields = args.iter().filter(|a| matches!(a, LcnfArg::Var(_))).count();
564 site.size_estimate = (obj_fields as u64) * 8;
565 site.set_status(EscapeStatus::NoEscape);
566 result.allocations.push(site);
567 result
568 .escape_sets
569 .insert(name.to_owned(), EscapeStatus::NoEscape);
570 for (idx, arg) in args.iter().enumerate() {
571 if let LcnfArg::Var(vid) = arg {
572 let arg_name = format!("_x{}", vid.0);
573 result
574 .escape_sets
575 .entry(arg_name)
576 .or_insert(EscapeStatus::HeapEscape);
577 let _ = idx;
578 }
579 }
580 }
581 LcnfLetValue::App(func, args) => {
582 if let LcnfArg::Var(vid) = func {
583 let fn_name = format!("_x{}", vid.0);
584 result
585 .escape_sets
586 .entry(fn_name)
587 .or_insert(EscapeStatus::ArgumentEscape(0));
588 }
589 for (idx, arg) in args.iter().enumerate() {
590 if let LcnfArg::Var(vid) = arg {
591 let arg_name = format!("_x{}", vid.0);
592 result
593 .escape_sets
594 .insert(arg_name, EscapeStatus::ArgumentEscape(idx + 1));
595 }
596 }
597 }
598 LcnfLetValue::Proj(_, _, _src_vid) => {
599 result
600 .escape_sets
601 .insert(name.to_owned(), EscapeStatus::LocalEscape);
602 }
603 LcnfLetValue::FVar(vid) => {
604 let src = format!("_x{}", vid.0);
605 result
606 .escape_sets
607 .entry(src)
608 .or_insert(EscapeStatus::LocalEscape);
609 result
610 .escape_sets
611 .insert(name.to_owned(), EscapeStatus::LocalEscape);
612 }
613 LcnfLetValue::Reset(_) => {
614 result
615 .escape_sets
616 .insert(name.to_owned(), EscapeStatus::LocalEscape);
617 }
618 LcnfLetValue::Lit(_) | LcnfLetValue::Erased => {
619 result
620 .escape_sets
621 .insert(name.to_owned(), EscapeStatus::NoEscape);
622 }
623 }
624 }
625 pub fn propagate_escapes(&self, result: &mut EscapeAnalysisResult) {
628 for site in &mut result.allocations {
629 if let Some(status) = result.escape_sets.get(&site.var) {
630 if status.is_heap_allocated() {
631 site.set_status(status.clone());
632 }
633 }
634 }
635 }
636}
637#[allow(dead_code)]
638#[derive(Debug, Clone)]
639pub struct FieldEscapeInfo {
640 pub struct_type: String,
641 pub field_name: String,
642 pub escapes_via_return: bool,
643 pub escapes_via_store: bool,
644 pub escapes_via_call: bool,
645}
646impl FieldEscapeInfo {
647 #[allow(dead_code)]
648 pub fn new(struct_type: impl Into<String>, field: impl Into<String>) -> Self {
649 FieldEscapeInfo {
650 struct_type: struct_type.into(),
651 field_name: field.into(),
652 escapes_via_return: false,
653 escapes_via_store: false,
654 escapes_via_call: false,
655 }
656 }
657 #[allow(dead_code)]
658 pub fn escapes(&self) -> bool {
659 self.escapes_via_return || self.escapes_via_store || self.escapes_via_call
660 }
661}
662#[allow(dead_code)]
663#[derive(Debug, Clone)]
664pub struct OEDominatorTree {
665 pub idom: Vec<Option<u32>>,
666 pub dom_children: Vec<Vec<u32>>,
667 pub dom_depth: Vec<u32>,
668}
669impl OEDominatorTree {
670 #[allow(dead_code)]
671 pub fn new(size: usize) -> Self {
672 OEDominatorTree {
673 idom: vec![None; size],
674 dom_children: vec![Vec::new(); size],
675 dom_depth: vec![0; size],
676 }
677 }
678 #[allow(dead_code)]
679 pub fn set_idom(&mut self, node: usize, idom: u32) {
680 self.idom[node] = Some(idom);
681 }
682 #[allow(dead_code)]
683 pub fn dominates(&self, a: usize, b: usize) -> bool {
684 if a == b {
685 return true;
686 }
687 let mut cur = b;
688 loop {
689 match self.idom[cur] {
690 Some(parent) if parent as usize == a => return true,
691 Some(parent) if parent as usize == cur => return false,
692 Some(parent) => cur = parent as usize,
693 None => return false,
694 }
695 }
696 }
697 #[allow(dead_code)]
698 pub fn depth(&self, node: usize) -> u32 {
699 self.dom_depth.get(node).copied().unwrap_or(0)
700 }
701}
702#[allow(dead_code)]
703#[derive(Debug, Clone)]
704pub struct OEAnalysisCache {
705 pub(super) entries: std::collections::HashMap<String, OECacheEntry>,
706 pub(super) max_size: usize,
707 pub(super) hits: u64,
708 pub(super) misses: u64,
709}
710impl OEAnalysisCache {
711 #[allow(dead_code)]
712 pub fn new(max_size: usize) -> Self {
713 OEAnalysisCache {
714 entries: std::collections::HashMap::new(),
715 max_size,
716 hits: 0,
717 misses: 0,
718 }
719 }
720 #[allow(dead_code)]
721 pub fn get(&mut self, key: &str) -> Option<&OECacheEntry> {
722 if self.entries.contains_key(key) {
723 self.hits += 1;
724 self.entries.get(key)
725 } else {
726 self.misses += 1;
727 None
728 }
729 }
730 #[allow(dead_code)]
731 pub fn insert(&mut self, key: String, data: Vec<u8>) {
732 if self.entries.len() >= self.max_size {
733 if let Some(oldest) = self.entries.keys().next().cloned() {
734 self.entries.remove(&oldest);
735 }
736 }
737 self.entries.insert(
738 key.clone(),
739 OECacheEntry {
740 key,
741 data,
742 timestamp: 0,
743 valid: true,
744 },
745 );
746 }
747 #[allow(dead_code)]
748 pub fn invalidate(&mut self, key: &str) {
749 if let Some(entry) = self.entries.get_mut(key) {
750 entry.valid = false;
751 }
752 }
753 #[allow(dead_code)]
754 pub fn clear(&mut self) {
755 self.entries.clear();
756 }
757 #[allow(dead_code)]
758 pub fn hit_rate(&self) -> f64 {
759 let total = self.hits + self.misses;
760 if total == 0 {
761 return 0.0;
762 }
763 self.hits as f64 / total as f64
764 }
765 #[allow(dead_code)]
766 pub fn size(&self) -> usize {
767 self.entries.len()
768 }
769}
770#[allow(dead_code)]
772#[derive(Debug, Clone, Default)]
773pub struct OEX2PassStats {
774 pub iterations: usize,
775 pub changed: bool,
776 pub nodes_visited: usize,
777 pub nodes_modified: usize,
778 pub time_ms: u64,
779 pub memory_bytes: usize,
780 pub errors: usize,
781}
782impl OEX2PassStats {
783 #[allow(dead_code)]
784 pub fn new() -> Self {
785 Self::default()
786 }
787 #[allow(dead_code)]
788 pub fn visit(&mut self) {
789 self.nodes_visited += 1;
790 }
791 #[allow(dead_code)]
792 pub fn modify(&mut self) {
793 self.nodes_modified += 1;
794 self.changed = true;
795 }
796 #[allow(dead_code)]
797 pub fn iterate(&mut self) {
798 self.iterations += 1;
799 }
800 #[allow(dead_code)]
801 pub fn error(&mut self) {
802 self.errors += 1;
803 }
804 #[allow(dead_code)]
805 pub fn efficiency(&self) -> f64 {
806 if self.nodes_visited == 0 {
807 0.0
808 } else {
809 self.nodes_modified as f64 / self.nodes_visited as f64
810 }
811 }
812 #[allow(dead_code)]
813 pub fn merge(&mut self, o: &OEX2PassStats) {
814 self.iterations += o.iterations;
815 self.changed |= o.changed;
816 self.nodes_visited += o.nodes_visited;
817 self.nodes_modified += o.nodes_modified;
818 self.time_ms += o.time_ms;
819 self.memory_bytes = self.memory_bytes.max(o.memory_bytes);
820 self.errors += o.errors;
821 }
822}
823#[allow(dead_code)]
824pub struct EscapeFlowGraph2 {
825 pub nodes: Vec<u32>,
826 pub edges: Vec<EscapeFlowEdge>,
827 pub allocation_sites: std::collections::HashMap<u32, PointsToTarget>,
828}
829impl EscapeFlowGraph2 {
830 #[allow(dead_code)]
831 pub fn new() -> Self {
832 EscapeFlowGraph2 {
833 nodes: Vec::new(),
834 edges: Vec::new(),
835 allocation_sites: std::collections::HashMap::new(),
836 }
837 }
838 #[allow(dead_code)]
839 pub fn add_node(&mut self, id: u32) {
840 if !self.nodes.contains(&id) {
841 self.nodes.push(id);
842 }
843 }
844 #[allow(dead_code)]
845 pub fn add_edge(&mut self, from: u32, to: u32, kind: EscapeEdgeKind) {
846 self.add_node(from);
847 self.add_node(to);
848 self.edges.push(EscapeFlowEdge {
849 from,
850 to,
851 edge_kind: kind,
852 });
853 }
854 #[allow(dead_code)]
855 pub fn register_allocation(&mut self, node: u32, target: PointsToTarget) {
856 self.allocation_sites.insert(node, target);
857 }
858 #[allow(dead_code)]
859 pub fn successors(&self, node: u32) -> Vec<u32> {
860 self.edges
861 .iter()
862 .filter(|e| e.from == node)
863 .map(|e| e.to)
864 .collect()
865 }
866 #[allow(dead_code)]
867 pub fn predecessors(&self, node: u32) -> Vec<u32> {
868 self.edges
869 .iter()
870 .filter(|e| e.to == node)
871 .map(|e| e.from)
872 .collect()
873 }
874 #[allow(dead_code)]
875 pub fn node_count(&self) -> usize {
876 self.nodes.len()
877 }
878 #[allow(dead_code)]
879 pub fn edge_count(&self) -> usize {
880 self.edges.len()
881 }
882}
883#[derive(Debug, Clone)]
885pub struct EscapeAnalysisResult {
886 pub func_name: String,
888 pub allocations: Vec<AllocationSite>,
890 pub escape_sets: HashMap<String, EscapeStatus>,
892}
893impl EscapeAnalysisResult {
894 pub fn new(func: impl Into<String>) -> Self {
896 EscapeAnalysisResult {
897 func_name: func.into(),
898 allocations: Vec::new(),
899 escape_sets: HashMap::new(),
900 }
901 }
902 pub fn num_escaped(&self) -> usize {
904 self.allocations
905 .iter()
906 .filter(|a| a.status.is_heap_allocated())
907 .count()
908 }
909 pub fn num_stack_eligible(&self) -> usize {
911 self.allocations
912 .iter()
913 .filter(|a| a.status.can_stack_allocate())
914 .count()
915 }
916 pub fn stack_allocation_candidates(&self) -> Vec<&AllocationSite> {
918 self.allocations
919 .iter()
920 .filter(|a| a.status.can_stack_allocate())
921 .collect()
922 }
923}
924#[allow(dead_code)]
926#[derive(Debug, Clone, Default)]
927pub struct OEExtPassStats {
928 pub iterations: usize,
929 pub changed: bool,
930 pub nodes_visited: usize,
931 pub nodes_modified: usize,
932 pub time_ms: u64,
933 pub memory_bytes: usize,
934 pub errors: usize,
935}
936impl OEExtPassStats {
937 #[allow(dead_code)]
938 pub fn new() -> Self {
939 Self::default()
940 }
941 #[allow(dead_code)]
942 pub fn visit(&mut self) {
943 self.nodes_visited += 1;
944 }
945 #[allow(dead_code)]
946 pub fn modify(&mut self) {
947 self.nodes_modified += 1;
948 self.changed = true;
949 }
950 #[allow(dead_code)]
951 pub fn iterate(&mut self) {
952 self.iterations += 1;
953 }
954 #[allow(dead_code)]
955 pub fn error(&mut self) {
956 self.errors += 1;
957 }
958 #[allow(dead_code)]
959 pub fn efficiency(&self) -> f64 {
960 if self.nodes_visited == 0 {
961 0.0
962 } else {
963 self.nodes_modified as f64 / self.nodes_visited as f64
964 }
965 }
966 #[allow(dead_code)]
967 pub fn merge(&mut self, o: &OEExtPassStats) {
968 self.iterations += o.iterations;
969 self.changed |= o.changed;
970 self.nodes_visited += o.nodes_visited;
971 self.nodes_modified += o.nodes_modified;
972 self.time_ms += o.time_ms;
973 self.memory_bytes = self.memory_bytes.max(o.memory_bytes);
974 self.errors += o.errors;
975 }
976}
977#[allow(dead_code)]
978pub struct InterproceduralEscapeAnalysis {
979 pub function_summaries: std::collections::HashMap<String, EscapeSummary>,
980}
981impl InterproceduralEscapeAnalysis {
982 #[allow(dead_code)]
983 pub fn new() -> Self {
984 InterproceduralEscapeAnalysis {
985 function_summaries: std::collections::HashMap::new(),
986 }
987 }
988 #[allow(dead_code)]
989 pub fn register_summary(&mut self, func: impl Into<String>, summary: EscapeSummary) {
990 self.function_summaries.insert(func.into(), summary);
991 }
992 #[allow(dead_code)]
993 pub fn get_summary(&self, func: &str) -> Option<&EscapeSummary> {
994 self.function_summaries.get(func)
995 }
996 #[allow(dead_code)]
997 pub fn param_escapes(&self, func: &str, param_idx: u32) -> bool {
998 self.function_summaries
999 .get(func)
1000 .map(|s| s.escaping_params.contains(¶m_idx))
1001 .unwrap_or(true)
1002 }
1003 #[allow(dead_code)]
1004 pub fn function_count(&self) -> usize {
1005 self.function_summaries.len()
1006 }
1007}
1008#[allow(dead_code)]
1009#[derive(Debug, Clone, Default)]
1010pub struct OEPassStats {
1011 pub total_runs: u32,
1012 pub successful_runs: u32,
1013 pub total_changes: u64,
1014 pub time_ms: u64,
1015 pub iterations_used: u32,
1016}
1017impl OEPassStats {
1018 #[allow(dead_code)]
1019 pub fn new() -> Self {
1020 Self::default()
1021 }
1022 #[allow(dead_code)]
1023 pub fn record_run(&mut self, changes: u64, time_ms: u64, iterations: u32) {
1024 self.total_runs += 1;
1025 self.successful_runs += 1;
1026 self.total_changes += changes;
1027 self.time_ms += time_ms;
1028 self.iterations_used = iterations;
1029 }
1030 #[allow(dead_code)]
1031 pub fn average_changes_per_run(&self) -> f64 {
1032 if self.total_runs == 0 {
1033 return 0.0;
1034 }
1035 self.total_changes as f64 / self.total_runs as f64
1036 }
1037 #[allow(dead_code)]
1038 pub fn success_rate(&self) -> f64 {
1039 if self.total_runs == 0 {
1040 return 0.0;
1041 }
1042 self.successful_runs as f64 / self.total_runs as f64
1043 }
1044 #[allow(dead_code)]
1045 pub fn format_summary(&self) -> String {
1046 format!(
1047 "Runs: {}/{}, Changes: {}, Time: {}ms",
1048 self.successful_runs, self.total_runs, self.total_changes, self.time_ms
1049 )
1050 }
1051}
1052#[allow(dead_code)]
1053#[derive(Debug, Clone, PartialEq, Eq, Hash)]
1054pub enum PointsToTarget {
1055 HeapObject(u32),
1056 StackSlot(u32),
1057 GlobalVar(String),
1058 Unknown,
1059}
1060#[allow(dead_code)]
1061pub struct StructFieldEscapeAnalyzer {
1062 pub field_info: Vec<FieldEscapeInfo>,
1063}
1064impl StructFieldEscapeAnalyzer {
1065 #[allow(dead_code)]
1066 pub fn new() -> Self {
1067 StructFieldEscapeAnalyzer {
1068 field_info: Vec::new(),
1069 }
1070 }
1071 #[allow(dead_code)]
1072 pub fn add_field(&mut self, info: FieldEscapeInfo) {
1073 self.field_info.push(info);
1074 }
1075 #[allow(dead_code)]
1076 pub fn escaping_fields(&self) -> Vec<&FieldEscapeInfo> {
1077 self.field_info.iter().filter(|f| f.escapes()).collect()
1078 }
1079 #[allow(dead_code)]
1080 pub fn non_escaping_fields(&self) -> Vec<&FieldEscapeInfo> {
1081 self.field_info.iter().filter(|f| !f.escapes()).collect()
1082 }
1083 #[allow(dead_code)]
1084 pub fn can_scalar_replace_struct(&self, struct_type: &str) -> bool {
1085 let fields: Vec<_> = self
1086 .field_info
1087 .iter()
1088 .filter(|f| f.struct_type == struct_type)
1089 .collect();
1090 !fields.is_empty() && fields.iter().all(|f| !f.escapes())
1091 }
1092}
1093#[allow(dead_code)]
1095#[derive(Debug, Clone)]
1096pub struct OEExtDomTree {
1097 pub(super) idom: Vec<Option<usize>>,
1098 pub(super) children: Vec<Vec<usize>>,
1099 pub(super) depth: Vec<usize>,
1100}
1101impl OEExtDomTree {
1102 #[allow(dead_code)]
1103 pub fn new(n: usize) -> Self {
1104 Self {
1105 idom: vec![None; n],
1106 children: vec![Vec::new(); n],
1107 depth: vec![0; n],
1108 }
1109 }
1110 #[allow(dead_code)]
1111 pub fn set_idom(&mut self, node: usize, dom: usize) {
1112 if node < self.idom.len() {
1113 self.idom[node] = Some(dom);
1114 if dom < self.children.len() {
1115 self.children[dom].push(node);
1116 }
1117 self.depth[node] = if dom < self.depth.len() {
1118 self.depth[dom] + 1
1119 } else {
1120 1
1121 };
1122 }
1123 }
1124 #[allow(dead_code)]
1125 pub fn dominates(&self, a: usize, mut b: usize) -> bool {
1126 if a == b {
1127 return true;
1128 }
1129 let n = self.idom.len();
1130 for _ in 0..n {
1131 match self.idom.get(b).copied().flatten() {
1132 None => return false,
1133 Some(p) if p == a => return true,
1134 Some(p) if p == b => return false,
1135 Some(p) => b = p,
1136 }
1137 }
1138 false
1139 }
1140 #[allow(dead_code)]
1141 pub fn children_of(&self, n: usize) -> &[usize] {
1142 self.children.get(n).map(|v| v.as_slice()).unwrap_or(&[])
1143 }
1144 #[allow(dead_code)]
1145 pub fn depth_of(&self, n: usize) -> usize {
1146 self.depth.get(n).copied().unwrap_or(0)
1147 }
1148 #[allow(dead_code)]
1149 pub fn lca(&self, mut a: usize, mut b: usize) -> usize {
1150 let n = self.idom.len();
1151 for _ in 0..(2 * n) {
1152 if a == b {
1153 return a;
1154 }
1155 if self.depth_of(a) > self.depth_of(b) {
1156 a = self.idom.get(a).and_then(|x| *x).unwrap_or(a);
1157 } else {
1158 b = self.idom.get(b).and_then(|x| *x).unwrap_or(b);
1159 }
1160 }
1161 0
1162 }
1163}
1164#[derive(Debug, Clone, PartialEq, Eq, Hash)]
1166pub enum EscapeStatus {
1167 NoEscape,
1169 LocalEscape,
1171 HeapEscape,
1173 ReturnEscape,
1175 ArgumentEscape(usize),
1177 Unknown,
1179}
1180impl EscapeStatus {
1181 pub fn is_heap_allocated(&self) -> bool {
1183 matches!(
1184 self,
1185 EscapeStatus::HeapEscape
1186 | EscapeStatus::ReturnEscape
1187 | EscapeStatus::ArgumentEscape(_)
1188 | EscapeStatus::Unknown
1189 )
1190 }
1191 pub fn can_stack_allocate(&self) -> bool {
1193 matches!(self, EscapeStatus::NoEscape | EscapeStatus::LocalEscape)
1194 }
1195}
1196#[allow(dead_code)]
1198#[derive(Debug, Clone, Default)]
1199pub struct OEExtLiveness {
1200 pub live_in: Vec<Vec<usize>>,
1201 pub live_out: Vec<Vec<usize>>,
1202 pub defs: Vec<Vec<usize>>,
1203 pub uses: Vec<Vec<usize>>,
1204}
1205impl OEExtLiveness {
1206 #[allow(dead_code)]
1207 pub fn new(n: usize) -> Self {
1208 Self {
1209 live_in: vec![Vec::new(); n],
1210 live_out: vec![Vec::new(); n],
1211 defs: vec![Vec::new(); n],
1212 uses: vec![Vec::new(); n],
1213 }
1214 }
1215 #[allow(dead_code)]
1216 pub fn live_in(&self, b: usize, v: usize) -> bool {
1217 self.live_in.get(b).map(|s| s.contains(&v)).unwrap_or(false)
1218 }
1219 #[allow(dead_code)]
1220 pub fn live_out(&self, b: usize, v: usize) -> bool {
1221 self.live_out
1222 .get(b)
1223 .map(|s| s.contains(&v))
1224 .unwrap_or(false)
1225 }
1226 #[allow(dead_code)]
1227 pub fn add_def(&mut self, b: usize, v: usize) {
1228 if let Some(s) = self.defs.get_mut(b) {
1229 if !s.contains(&v) {
1230 s.push(v);
1231 }
1232 }
1233 }
1234 #[allow(dead_code)]
1235 pub fn add_use(&mut self, b: usize, v: usize) {
1236 if let Some(s) = self.uses.get_mut(b) {
1237 if !s.contains(&v) {
1238 s.push(v);
1239 }
1240 }
1241 }
1242 #[allow(dead_code)]
1243 pub fn var_is_used_in_block(&self, b: usize, v: usize) -> bool {
1244 self.uses.get(b).map(|s| s.contains(&v)).unwrap_or(false)
1245 }
1246 #[allow(dead_code)]
1247 pub fn var_is_def_in_block(&self, b: usize, v: usize) -> bool {
1248 self.defs.get(b).map(|s| s.contains(&v)).unwrap_or(false)
1249 }
1250}
1251#[allow(dead_code)]
1252#[derive(Debug, Clone, Default)]
1253pub struct EscapeSummary {
1254 pub escaping_params: Vec<u32>,
1255 pub return_escapes: bool,
1256 pub modifies_global: bool,
1257}
1258#[allow(dead_code)]
1259#[derive(Debug, Clone)]
1260pub struct OEPassConfig {
1261 pub phase: OEPassPhase,
1262 pub enabled: bool,
1263 pub max_iterations: u32,
1264 pub debug_output: bool,
1265 pub pass_name: String,
1266}
1267impl OEPassConfig {
1268 #[allow(dead_code)]
1269 pub fn new(name: impl Into<String>, phase: OEPassPhase) -> Self {
1270 OEPassConfig {
1271 phase,
1272 enabled: true,
1273 max_iterations: 10,
1274 debug_output: false,
1275 pass_name: name.into(),
1276 }
1277 }
1278 #[allow(dead_code)]
1279 pub fn disabled(mut self) -> Self {
1280 self.enabled = false;
1281 self
1282 }
1283 #[allow(dead_code)]
1284 pub fn with_debug(mut self) -> Self {
1285 self.debug_output = true;
1286 self
1287 }
1288 #[allow(dead_code)]
1289 pub fn max_iter(mut self, n: u32) -> Self {
1290 self.max_iterations = n;
1291 self
1292 }
1293}
1294#[allow(dead_code)]
1295#[derive(Debug, Clone)]
1296pub struct OEDepGraph {
1297 pub(super) nodes: Vec<u32>,
1298 pub(super) edges: Vec<(u32, u32)>,
1299}
1300impl OEDepGraph {
1301 #[allow(dead_code)]
1302 pub fn new() -> Self {
1303 OEDepGraph {
1304 nodes: Vec::new(),
1305 edges: Vec::new(),
1306 }
1307 }
1308 #[allow(dead_code)]
1309 pub fn add_node(&mut self, id: u32) {
1310 if !self.nodes.contains(&id) {
1311 self.nodes.push(id);
1312 }
1313 }
1314 #[allow(dead_code)]
1315 pub fn add_dep(&mut self, dep: u32, dependent: u32) {
1316 self.add_node(dep);
1317 self.add_node(dependent);
1318 self.edges.push((dep, dependent));
1319 }
1320 #[allow(dead_code)]
1321 pub fn dependents_of(&self, node: u32) -> Vec<u32> {
1322 self.edges
1323 .iter()
1324 .filter(|(d, _)| *d == node)
1325 .map(|(_, dep)| *dep)
1326 .collect()
1327 }
1328 #[allow(dead_code)]
1329 pub fn dependencies_of(&self, node: u32) -> Vec<u32> {
1330 self.edges
1331 .iter()
1332 .filter(|(_, dep)| *dep == node)
1333 .map(|(d, _)| *d)
1334 .collect()
1335 }
1336 #[allow(dead_code)]
1337 pub fn topological_sort(&self) -> Vec<u32> {
1338 let mut in_degree: std::collections::HashMap<u32, u32> = std::collections::HashMap::new();
1339 for &n in &self.nodes {
1340 in_degree.insert(n, 0);
1341 }
1342 for (_, dep) in &self.edges {
1343 *in_degree.entry(*dep).or_insert(0) += 1;
1344 }
1345 let mut queue: std::collections::VecDeque<u32> = self
1346 .nodes
1347 .iter()
1348 .filter(|&&n| in_degree[&n] == 0)
1349 .copied()
1350 .collect();
1351 let mut result = Vec::new();
1352 while let Some(node) = queue.pop_front() {
1353 result.push(node);
1354 for dep in self.dependents_of(node) {
1355 let cnt = in_degree.entry(dep).or_insert(0);
1356 *cnt = cnt.saturating_sub(1);
1357 if *cnt == 0 {
1358 queue.push_back(dep);
1359 }
1360 }
1361 }
1362 result
1363 }
1364 #[allow(dead_code)]
1365 pub fn has_cycle(&self) -> bool {
1366 self.topological_sort().len() < self.nodes.len()
1367 }
1368}
1369#[allow(dead_code)]
1371#[derive(Debug, Clone, PartialEq, Eq, Hash)]
1372pub enum OEExtPassPhase {
1373 Early,
1374 Middle,
1375 Late,
1376 Finalize,
1377}
1378impl OEExtPassPhase {
1379 #[allow(dead_code)]
1380 pub fn is_early(&self) -> bool {
1381 matches!(self, Self::Early)
1382 }
1383 #[allow(dead_code)]
1384 pub fn is_middle(&self) -> bool {
1385 matches!(self, Self::Middle)
1386 }
1387 #[allow(dead_code)]
1388 pub fn is_late(&self) -> bool {
1389 matches!(self, Self::Late)
1390 }
1391 #[allow(dead_code)]
1392 pub fn is_finalize(&self) -> bool {
1393 matches!(self, Self::Finalize)
1394 }
1395 #[allow(dead_code)]
1396 pub fn order(&self) -> u32 {
1397 match self {
1398 Self::Early => 0,
1399 Self::Middle => 1,
1400 Self::Late => 2,
1401 Self::Finalize => 3,
1402 }
1403 }
1404 #[allow(dead_code)]
1405 pub fn from_order(n: u32) -> Option<Self> {
1406 match n {
1407 0 => Some(Self::Early),
1408 1 => Some(Self::Middle),
1409 2 => Some(Self::Late),
1410 3 => Some(Self::Finalize),
1411 _ => None,
1412 }
1413 }
1414}
1415#[derive(Debug, Clone)]
1417pub struct AllocationSite {
1418 pub var: String,
1420 pub func: String,
1422 pub size_estimate: u64,
1424 pub status: EscapeStatus,
1426}
1427impl AllocationSite {
1428 pub fn new(var: impl Into<String>, func: impl Into<String>) -> Self {
1430 AllocationSite {
1431 var: var.into(),
1432 func: func.into(),
1433 size_estimate: 0,
1434 status: EscapeStatus::Unknown,
1435 }
1436 }
1437 pub fn set_status(&mut self, s: EscapeStatus) {
1439 self.status = s;
1440 }
1441}
1442#[allow(dead_code)]
1443#[derive(Debug, Clone)]
1444pub struct OECacheEntry {
1445 pub key: String,
1446 pub data: Vec<u8>,
1447 pub timestamp: u64,
1448 pub valid: bool,
1449}
1450#[derive(Debug, Clone, Default)]
1454pub struct EscapeGraph {
1455 pub(super) edges: HashMap<String, Vec<String>>,
1457}
1458impl EscapeGraph {
1459 pub fn new() -> Self {
1461 EscapeGraph {
1462 edges: HashMap::new(),
1463 }
1464 }
1465 pub fn add_edge(&mut self, from: &str, to: &str) {
1467 self.edges
1468 .entry(from.to_owned())
1469 .or_default()
1470 .push(to.to_owned());
1471 }
1472 pub fn reachable_from(&self, src: &str) -> HashSet<String> {
1474 let mut visited = HashSet::new();
1475 let mut worklist = vec![src.to_owned()];
1476 while let Some(node) = worklist.pop() {
1477 if visited.insert(node.clone()) {
1478 if let Some(neighbors) = self.edges.get(&node) {
1479 for n in neighbors {
1480 if !visited.contains(n) {
1481 worklist.push(n.clone());
1482 }
1483 }
1484 }
1485 }
1486 }
1487 visited
1488 }
1489}
1490#[allow(dead_code)]
1492#[derive(Debug, Clone)]
1493pub struct OEX2Worklist {
1494 pub(super) items: std::collections::VecDeque<usize>,
1495 pub(super) present: Vec<bool>,
1496}
1497impl OEX2Worklist {
1498 #[allow(dead_code)]
1499 pub fn new(capacity: usize) -> Self {
1500 Self {
1501 items: std::collections::VecDeque::new(),
1502 present: vec![false; capacity],
1503 }
1504 }
1505 #[allow(dead_code)]
1506 pub fn push(&mut self, id: usize) {
1507 if id < self.present.len() && !self.present[id] {
1508 self.present[id] = true;
1509 self.items.push_back(id);
1510 }
1511 }
1512 #[allow(dead_code)]
1513 pub fn push_front(&mut self, id: usize) {
1514 if id < self.present.len() && !self.present[id] {
1515 self.present[id] = true;
1516 self.items.push_front(id);
1517 }
1518 }
1519 #[allow(dead_code)]
1520 pub fn pop(&mut self) -> Option<usize> {
1521 let id = self.items.pop_front()?;
1522 if id < self.present.len() {
1523 self.present[id] = false;
1524 }
1525 Some(id)
1526 }
1527 #[allow(dead_code)]
1528 pub fn is_empty(&self) -> bool {
1529 self.items.is_empty()
1530 }
1531 #[allow(dead_code)]
1532 pub fn len(&self) -> usize {
1533 self.items.len()
1534 }
1535 #[allow(dead_code)]
1536 pub fn contains(&self, id: usize) -> bool {
1537 id < self.present.len() && self.present[id]
1538 }
1539 #[allow(dead_code)]
1540 pub fn drain_all(&mut self) -> Vec<usize> {
1541 let v: Vec<usize> = self.items.drain(..).collect();
1542 for &id in &v {
1543 if id < self.present.len() {
1544 self.present[id] = false;
1545 }
1546 }
1547 v
1548 }
1549}
1550#[allow(dead_code)]
1551pub struct EscapeOptimizationPass {
1552 pub results: Vec<EscapeOptimizationResult>,
1553 pub min_confidence: f64,
1554}
1555impl EscapeOptimizationPass {
1556 #[allow(dead_code)]
1557 pub fn new() -> Self {
1558 EscapeOptimizationPass {
1559 results: Vec::new(),
1560 min_confidence: 0.8,
1561 }
1562 }
1563 #[allow(dead_code)]
1564 pub fn add_result(&mut self, result: EscapeOptimizationResult) {
1565 self.results.push(result);
1566 }
1567 #[allow(dead_code)]
1568 pub fn stack_promotable(&self) -> Vec<&EscapeOptimizationResult> {
1569 self.results
1570 .iter()
1571 .filter(|r| {
1572 r.recommended_sink.is_stack_eligible() && r.confidence >= self.min_confidence
1573 })
1574 .collect()
1575 }
1576 #[allow(dead_code)]
1577 pub fn total_optimizations(&self) -> usize {
1578 self.results.len()
1579 }
1580 #[allow(dead_code)]
1581 pub fn emit_report(&self) -> String {
1582 let mut out = format!(
1583 "Escape Optimization Report: {} results\n",
1584 self.results.len()
1585 );
1586 let promotable = self.stack_promotable();
1587 out.push_str(&format!(
1588 " Stack-promotable allocations: {}\n",
1589 promotable.len()
1590 ));
1591 for r in &promotable {
1592 out.push_str(&format!(
1593 " Alloc #{}: {} (confidence: {:.0}%)\n",
1594 r.allocation_id,
1595 r.reason,
1596 r.confidence * 100.0
1597 ));
1598 }
1599 out
1600 }
1601}
1602#[allow(dead_code)]
1603#[derive(Debug, Clone)]
1604pub struct ConnectionNode {
1605 pub id: u32,
1606 pub escape_state: EscapeStatus,
1607 pub kind: String,
1608}
1609#[allow(dead_code)]
1610#[derive(Debug, Clone)]
1611pub struct EscapeFlowEdge {
1612 pub from: u32,
1613 pub to: u32,
1614 pub edge_kind: EscapeEdgeKind,
1615}
1616#[derive(Debug, Default)]
1619pub struct StackAllocationOpt {
1620 pub analyzer: EscapeAnalyzer,
1622 pub config: EscapeOptConfig,
1624}
1625impl StackAllocationOpt {
1626 pub fn new() -> Self {
1628 StackAllocationOpt {
1629 analyzer: EscapeAnalyzer::new(),
1630 config: EscapeOptConfig::default(),
1631 }
1632 }
1633 pub fn with_config(config: EscapeOptConfig) -> Self {
1635 StackAllocationOpt {
1636 analyzer: EscapeAnalyzer::new(),
1637 config,
1638 }
1639 }
1640 pub fn run(&mut self, decls: &mut [LcnfFunDecl]) {
1643 self.analyzer.analyze(decls);
1644 let names: Vec<String> = decls.iter().map(|d| d.name.clone()).collect();
1645 for decl in decls.iter_mut() {
1646 if let Some(analysis) = names
1647 .iter()
1648 .find(|n| *n == &decl.name)
1649 .and_then(|n| self.analyzer.results.get(n))
1650 {
1651 self.optimize_decl(decl, analysis);
1652 }
1653 }
1654 }
1655 pub fn optimize_decl(&self, decl: &mut LcnfFunDecl, analysis: &EscapeAnalysisResult) {
1657 if !self.config.enable_stack_alloc {
1658 return;
1659 }
1660 let candidates: HashSet<String> = analysis
1661 .stack_allocation_candidates()
1662 .into_iter()
1663 .filter(|site| site.size_estimate <= self.config.max_stack_size_bytes)
1664 .map(|site| site.var.clone())
1665 .collect();
1666 if !candidates.is_empty() {
1667 Self::mark_stack_allocated(&mut decl.body, &candidates);
1668 }
1669 }
1670 pub fn mark_stack_allocated(expr: &mut LcnfExpr, candidates: &HashSet<String>) {
1674 match expr {
1675 LcnfExpr::Let { name, body, .. } => {
1676 let _is_stack = candidates.contains(name.as_str());
1677 Self::mark_stack_allocated(body, candidates);
1678 }
1679 LcnfExpr::Case { alts, default, .. } => {
1680 for alt in alts.iter_mut() {
1681 Self::mark_stack_allocated(&mut alt.body, candidates);
1682 }
1683 if let Some(def) = default {
1684 Self::mark_stack_allocated(def, candidates);
1685 }
1686 }
1687 LcnfExpr::Return(_) | LcnfExpr::TailCall(_, _) | LcnfExpr::Unreachable => {}
1688 }
1689 }
1690 pub fn report(&self) -> EscapeReport {
1692 let mut rep = EscapeReport::default();
1693 for analysis in self.analyzer.results.values() {
1694 rep.total_allocations += analysis.allocations.len();
1695 for site in &analysis.allocations {
1696 match &site.status {
1697 EscapeStatus::NoEscape | EscapeStatus::LocalEscape => {
1698 rep.stack_allocated += 1;
1699 }
1700 EscapeStatus::HeapEscape
1701 | EscapeStatus::ReturnEscape
1702 | EscapeStatus::ArgumentEscape(_) => {
1703 rep.heap_allocated += 1;
1704 }
1705 EscapeStatus::Unknown => {
1706 rep.unknown += 1;
1707 }
1708 }
1709 }
1710 }
1711 rep
1712 }
1713}
1714#[allow(dead_code)]
1715#[derive(Debug, Clone)]
1716pub struct EscapeAnnotationPass {
1717 pub annotated_nodes: Vec<(u32, EscapeAnnotation)>,
1718}
1719impl EscapeAnnotationPass {
1720 #[allow(dead_code)]
1721 pub fn new() -> Self {
1722 EscapeAnnotationPass {
1723 annotated_nodes: Vec::new(),
1724 }
1725 }
1726 #[allow(dead_code)]
1727 pub fn annotate(&mut self, node: u32, annotation: EscapeAnnotation) {
1728 self.annotated_nodes.push((node, annotation));
1729 }
1730 #[allow(dead_code)]
1731 pub fn get_annotation(&self, node: u32) -> Option<&EscapeAnnotation> {
1732 self.annotated_nodes
1733 .iter()
1734 .find(|(id, _)| *id == node)
1735 .map(|(_, a)| a)
1736 }
1737 #[allow(dead_code)]
1738 pub fn stack_promote_candidates(&self) -> Vec<u32> {
1739 self.annotated_nodes
1740 .iter()
1741 .filter(|(_, a)| matches!(a, EscapeAnnotation::StackAlloc))
1742 .map(|(id, _)| *id)
1743 .collect()
1744 }
1745}
1746#[allow(dead_code)]
1747#[derive(Debug, Clone)]
1748pub struct EscapeOptimizationResult {
1749 pub allocation_id: u32,
1750 pub original_kind: String,
1751 pub recommended_sink: AllocationSinkKind,
1752 pub confidence: f64,
1753 pub reason: String,
1754}
1755impl EscapeOptimizationResult {
1756 #[allow(dead_code)]
1757 pub fn new(allocation_id: u32, sink: AllocationSinkKind, reason: impl Into<String>) -> Self {
1758 EscapeOptimizationResult {
1759 allocation_id,
1760 original_kind: "heap".to_string(),
1761 recommended_sink: sink,
1762 confidence: 1.0,
1763 reason: reason.into(),
1764 }
1765 }
1766 #[allow(dead_code)]
1767 pub fn with_confidence(mut self, c: f64) -> Self {
1768 self.confidence = c;
1769 self
1770 }
1771 #[allow(dead_code)]
1772 pub fn is_high_confidence(&self) -> bool {
1773 self.confidence >= 0.9
1774 }
1775}
1776#[allow(dead_code)]
1778#[derive(Debug, Clone, PartialEq, Eq, Hash)]
1779pub enum OEX2PassPhase {
1780 Early,
1781 Middle,
1782 Late,
1783 Finalize,
1784}
1785impl OEX2PassPhase {
1786 #[allow(dead_code)]
1787 pub fn is_early(&self) -> bool {
1788 matches!(self, Self::Early)
1789 }
1790 #[allow(dead_code)]
1791 pub fn is_middle(&self) -> bool {
1792 matches!(self, Self::Middle)
1793 }
1794 #[allow(dead_code)]
1795 pub fn is_late(&self) -> bool {
1796 matches!(self, Self::Late)
1797 }
1798 #[allow(dead_code)]
1799 pub fn is_finalize(&self) -> bool {
1800 matches!(self, Self::Finalize)
1801 }
1802 #[allow(dead_code)]
1803 pub fn order(&self) -> u32 {
1804 match self {
1805 Self::Early => 0,
1806 Self::Middle => 1,
1807 Self::Late => 2,
1808 Self::Finalize => 3,
1809 }
1810 }
1811 #[allow(dead_code)]
1812 pub fn from_order(n: u32) -> Option<Self> {
1813 match n {
1814 0 => Some(Self::Early),
1815 1 => Some(Self::Middle),
1816 2 => Some(Self::Late),
1817 3 => Some(Self::Finalize),
1818 _ => None,
1819 }
1820 }
1821}
1822#[allow(dead_code)]
1824#[derive(Debug, Clone)]
1825pub struct OEX2DomTree {
1826 pub(super) idom: Vec<Option<usize>>,
1827 pub(super) children: Vec<Vec<usize>>,
1828 pub(super) depth: Vec<usize>,
1829}
1830impl OEX2DomTree {
1831 #[allow(dead_code)]
1832 pub fn new(n: usize) -> Self {
1833 Self {
1834 idom: vec![None; n],
1835 children: vec![Vec::new(); n],
1836 depth: vec![0; n],
1837 }
1838 }
1839 #[allow(dead_code)]
1840 pub fn set_idom(&mut self, node: usize, dom: usize) {
1841 if node < self.idom.len() {
1842 self.idom[node] = Some(dom);
1843 if dom < self.children.len() {
1844 self.children[dom].push(node);
1845 }
1846 self.depth[node] = if dom < self.depth.len() {
1847 self.depth[dom] + 1
1848 } else {
1849 1
1850 };
1851 }
1852 }
1853 #[allow(dead_code)]
1854 pub fn dominates(&self, a: usize, mut b: usize) -> bool {
1855 if a == b {
1856 return true;
1857 }
1858 let n = self.idom.len();
1859 for _ in 0..n {
1860 match self.idom.get(b).copied().flatten() {
1861 None => return false,
1862 Some(p) if p == a => return true,
1863 Some(p) if p == b => return false,
1864 Some(p) => b = p,
1865 }
1866 }
1867 false
1868 }
1869 #[allow(dead_code)]
1870 pub fn children_of(&self, n: usize) -> &[usize] {
1871 self.children.get(n).map(|v| v.as_slice()).unwrap_or(&[])
1872 }
1873 #[allow(dead_code)]
1874 pub fn depth_of(&self, n: usize) -> usize {
1875 self.depth.get(n).copied().unwrap_or(0)
1876 }
1877 #[allow(dead_code)]
1878 pub fn lca(&self, mut a: usize, mut b: usize) -> usize {
1879 let n = self.idom.len();
1880 for _ in 0..(2 * n) {
1881 if a == b {
1882 return a;
1883 }
1884 if self.depth_of(a) > self.depth_of(b) {
1885 a = self.idom.get(a).and_then(|x| *x).unwrap_or(a);
1886 } else {
1887 b = self.idom.get(b).and_then(|x| *x).unwrap_or(b);
1888 }
1889 }
1890 0
1891 }
1892}
1893#[allow(dead_code)]
1894pub struct EscapeAnalysisSummaryPrinter;
1895impl EscapeAnalysisSummaryPrinter {
1896 #[allow(dead_code)]
1897 pub fn print_result(result: &EscapeAnalysisResult) -> String {
1898 let mut out = String::from("=== Escape Analysis Result ===\n");
1899 out.push_str(&format!("Allocations: {}\n", result.allocations.len()));
1900 out.push_str(&format!("Escape sets: {}\n", result.escape_sets.len()));
1901 out.push_str(&format!("Function: {}\n", result.func_name));
1902 out
1903 }
1904 #[allow(dead_code)]
1905 pub fn print_report(report: &EscapeReport) -> String {
1906 let mut out = String::from("=== Escape Report ===\n");
1907 out.push_str(&format!(
1908 "Total allocations: {}\n",
1909 report.total_allocations
1910 ));
1911 out.push_str(&format!("Stack-allocated: {}\n", report.stack_allocated));
1912 out.push_str(&format!("Heap-allocated: {}\n", report.heap_allocated));
1913 out
1914 }
1915}
1916#[derive(Debug, Clone, PartialEq, Eq)]
1918pub enum EscapeAnnotation {
1919 StackAlloc,
1921 HeapAlloc,
1923 Unknown,
1925}
1926#[allow(dead_code)]
1927#[derive(Debug, Clone)]
1928pub struct OELivenessInfo {
1929 pub live_in: Vec<std::collections::HashSet<u32>>,
1930 pub live_out: Vec<std::collections::HashSet<u32>>,
1931 pub defs: Vec<std::collections::HashSet<u32>>,
1932 pub uses: Vec<std::collections::HashSet<u32>>,
1933}
1934impl OELivenessInfo {
1935 #[allow(dead_code)]
1936 pub fn new(block_count: usize) -> Self {
1937 OELivenessInfo {
1938 live_in: vec![std::collections::HashSet::new(); block_count],
1939 live_out: vec![std::collections::HashSet::new(); block_count],
1940 defs: vec![std::collections::HashSet::new(); block_count],
1941 uses: vec![std::collections::HashSet::new(); block_count],
1942 }
1943 }
1944 #[allow(dead_code)]
1945 pub fn add_def(&mut self, block: usize, var: u32) {
1946 if block < self.defs.len() {
1947 self.defs[block].insert(var);
1948 }
1949 }
1950 #[allow(dead_code)]
1951 pub fn add_use(&mut self, block: usize, var: u32) {
1952 if block < self.uses.len() {
1953 self.uses[block].insert(var);
1954 }
1955 }
1956 #[allow(dead_code)]
1957 pub fn is_live_in(&self, block: usize, var: u32) -> bool {
1958 self.live_in
1959 .get(block)
1960 .map(|s| s.contains(&var))
1961 .unwrap_or(false)
1962 }
1963 #[allow(dead_code)]
1964 pub fn is_live_out(&self, block: usize, var: u32) -> bool {
1965 self.live_out
1966 .get(block)
1967 .map(|s| s.contains(&var))
1968 .unwrap_or(false)
1969 }
1970}
1971#[allow(dead_code)]
1972#[derive(Debug, Clone)]
1973pub struct ConnectionGraph {
1974 pub nodes: std::collections::HashMap<u32, ConnectionNode>,
1975 pub edges: Vec<(u32, u32)>,
1976}
1977impl ConnectionGraph {
1978 #[allow(dead_code)]
1979 pub fn new() -> Self {
1980 ConnectionGraph {
1981 nodes: std::collections::HashMap::new(),
1982 edges: Vec::new(),
1983 }
1984 }
1985 #[allow(dead_code)]
1986 pub fn add_node(&mut self, id: u32, kind: impl Into<String>) {
1987 self.nodes.insert(
1988 id,
1989 ConnectionNode {
1990 id,
1991 escape_state: EscapeStatus::NoEscape,
1992 kind: kind.into(),
1993 },
1994 );
1995 }
1996 #[allow(dead_code)]
1997 pub fn add_deferred_edge(&mut self, from: u32, to: u32) {
1998 self.edges.push((from, to));
1999 }
2000 #[allow(dead_code)]
2001 pub fn propagate_escape(&mut self) {
2002 let mut changed = true;
2003 while changed {
2004 changed = false;
2005 let edges_copy = self.edges.clone();
2006 for (from, to) in edges_copy {
2007 let from_state = self.nodes.get(&from).map(|n| n.escape_state.clone());
2008 if let Some(state) = from_state {
2009 if let Some(to_node) = self.nodes.get_mut(&to) {
2010 match (&state, &to_node.escape_state) {
2011 (EscapeStatus::HeapEscape, EscapeStatus::NoEscape)
2012 | (EscapeStatus::ReturnEscape, EscapeStatus::NoEscape) => {
2013 to_node.escape_state = state;
2014 changed = true;
2015 }
2016 _ => {}
2017 }
2018 }
2019 }
2020 }
2021 }
2022 }
2023 #[allow(dead_code)]
2024 pub fn non_escaping_allocations(&self) -> Vec<u32> {
2025 self.nodes
2026 .iter()
2027 .filter(|(_, n)| matches!(n.escape_state, EscapeStatus::NoEscape))
2028 .map(|(id, _)| *id)
2029 .collect()
2030 }
2031}
2032#[allow(dead_code)]
2033#[derive(Debug, Clone)]
2034pub struct OEWorklist {
2035 pub(super) items: std::collections::VecDeque<u32>,
2036 pub(super) in_worklist: std::collections::HashSet<u32>,
2037}
2038impl OEWorklist {
2039 #[allow(dead_code)]
2040 pub fn new() -> Self {
2041 OEWorklist {
2042 items: std::collections::VecDeque::new(),
2043 in_worklist: std::collections::HashSet::new(),
2044 }
2045 }
2046 #[allow(dead_code)]
2047 pub fn push(&mut self, item: u32) -> bool {
2048 if self.in_worklist.insert(item) {
2049 self.items.push_back(item);
2050 true
2051 } else {
2052 false
2053 }
2054 }
2055 #[allow(dead_code)]
2056 pub fn pop(&mut self) -> Option<u32> {
2057 let item = self.items.pop_front()?;
2058 self.in_worklist.remove(&item);
2059 Some(item)
2060 }
2061 #[allow(dead_code)]
2062 pub fn is_empty(&self) -> bool {
2063 self.items.is_empty()
2064 }
2065 #[allow(dead_code)]
2066 pub fn len(&self) -> usize {
2067 self.items.len()
2068 }
2069 #[allow(dead_code)]
2070 pub fn contains(&self, item: u32) -> bool {
2071 self.in_worklist.contains(&item)
2072 }
2073}
2074#[allow(dead_code)]
2076#[derive(Debug, Clone, Default)]
2077pub struct OEExtConstFolder {
2078 pub(super) folds: usize,
2079 pub(super) failures: usize,
2080 pub(super) enabled: bool,
2081}
2082impl OEExtConstFolder {
2083 #[allow(dead_code)]
2084 pub fn new() -> Self {
2085 Self {
2086 folds: 0,
2087 failures: 0,
2088 enabled: true,
2089 }
2090 }
2091 #[allow(dead_code)]
2092 pub fn add_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2093 self.folds += 1;
2094 a.checked_add(b)
2095 }
2096 #[allow(dead_code)]
2097 pub fn sub_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2098 self.folds += 1;
2099 a.checked_sub(b)
2100 }
2101 #[allow(dead_code)]
2102 pub fn mul_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2103 self.folds += 1;
2104 a.checked_mul(b)
2105 }
2106 #[allow(dead_code)]
2107 pub fn div_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2108 if b == 0 {
2109 self.failures += 1;
2110 None
2111 } else {
2112 self.folds += 1;
2113 a.checked_div(b)
2114 }
2115 }
2116 #[allow(dead_code)]
2117 pub fn rem_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2118 if b == 0 {
2119 self.failures += 1;
2120 None
2121 } else {
2122 self.folds += 1;
2123 a.checked_rem(b)
2124 }
2125 }
2126 #[allow(dead_code)]
2127 pub fn neg_i64(&mut self, a: i64) -> Option<i64> {
2128 self.folds += 1;
2129 a.checked_neg()
2130 }
2131 #[allow(dead_code)]
2132 pub fn shl_i64(&mut self, a: i64, s: u32) -> Option<i64> {
2133 if s >= 64 {
2134 self.failures += 1;
2135 None
2136 } else {
2137 self.folds += 1;
2138 a.checked_shl(s)
2139 }
2140 }
2141 #[allow(dead_code)]
2142 pub fn shr_i64(&mut self, a: i64, s: u32) -> Option<i64> {
2143 if s >= 64 {
2144 self.failures += 1;
2145 None
2146 } else {
2147 self.folds += 1;
2148 a.checked_shr(s)
2149 }
2150 }
2151 #[allow(dead_code)]
2152 pub fn and_i64(&mut self, a: i64, b: i64) -> i64 {
2153 self.folds += 1;
2154 a & b
2155 }
2156 #[allow(dead_code)]
2157 pub fn or_i64(&mut self, a: i64, b: i64) -> i64 {
2158 self.folds += 1;
2159 a | b
2160 }
2161 #[allow(dead_code)]
2162 pub fn xor_i64(&mut self, a: i64, b: i64) -> i64 {
2163 self.folds += 1;
2164 a ^ b
2165 }
2166 #[allow(dead_code)]
2167 pub fn not_i64(&mut self, a: i64) -> i64 {
2168 self.folds += 1;
2169 !a
2170 }
2171 #[allow(dead_code)]
2172 pub fn fold_count(&self) -> usize {
2173 self.folds
2174 }
2175 #[allow(dead_code)]
2176 pub fn failure_count(&self) -> usize {
2177 self.failures
2178 }
2179 #[allow(dead_code)]
2180 pub fn enable(&mut self) {
2181 self.enabled = true;
2182 }
2183 #[allow(dead_code)]
2184 pub fn disable(&mut self) {
2185 self.enabled = false;
2186 }
2187 #[allow(dead_code)]
2188 pub fn is_enabled(&self) -> bool {
2189 self.enabled
2190 }
2191}
2192#[allow(dead_code)]
2193#[derive(Debug, Clone, PartialEq)]
2194pub enum AllocationSinkKind {
2195 Stack,
2196 ThreadLocal,
2197 ArenaAllocated,
2198 HeapLongLived,
2199 HeapShortLived,
2200}
2201impl AllocationSinkKind {
2202 #[allow(dead_code)]
2203 pub fn is_stack_eligible(&self) -> bool {
2204 matches!(
2205 self,
2206 AllocationSinkKind::Stack | AllocationSinkKind::ArenaAllocated
2207 )
2208 }
2209 #[allow(dead_code)]
2210 pub fn description(&self) -> &str {
2211 match self {
2212 AllocationSinkKind::Stack => "stack allocation",
2213 AllocationSinkKind::ThreadLocal => "thread-local storage",
2214 AllocationSinkKind::ArenaAllocated => "arena allocation",
2215 AllocationSinkKind::HeapLongLived => "long-lived heap",
2216 AllocationSinkKind::HeapShortLived => "short-lived heap",
2217 }
2218 }
2219}
2220#[allow(dead_code)]
2221#[derive(Debug, Clone)]
2222pub struct EscapeBasedRefCountOpt {
2223 pub eliminated_increments: u32,
2224 pub eliminated_decrements: u32,
2225 pub replaced_with_stack: Vec<u32>,
2226}
2227impl EscapeBasedRefCountOpt {
2228 #[allow(dead_code)]
2229 pub fn new() -> Self {
2230 EscapeBasedRefCountOpt {
2231 eliminated_increments: 0,
2232 eliminated_decrements: 0,
2233 replaced_with_stack: Vec::new(),
2234 }
2235 }
2236 #[allow(dead_code)]
2237 pub fn record_elimination(&mut self) {
2238 self.eliminated_increments += 1;
2239 self.eliminated_decrements += 1;
2240 }
2241 #[allow(dead_code)]
2242 pub fn record_stack_replace(&mut self, alloc_id: u32) {
2243 self.replaced_with_stack.push(alloc_id);
2244 }
2245 #[allow(dead_code)]
2246 pub fn total_eliminated(&self) -> u32 {
2247 self.eliminated_increments + self.eliminated_decrements
2248 }
2249 #[allow(dead_code)]
2250 pub fn savings_report(&self) -> String {
2251 format!(
2252 "Eliminated {} retain/release pairs, {} stack promotions",
2253 self.eliminated_increments,
2254 self.replaced_with_stack.len()
2255 )
2256 }
2257}
2258#[allow(dead_code)]
2260#[derive(Debug)]
2261pub struct OEX2Cache {
2262 pub(super) entries: Vec<(u64, Vec<u8>, bool, u32)>,
2263 pub(super) cap: usize,
2264 pub(super) total_hits: u64,
2265 pub(super) total_misses: u64,
2266}
2267impl OEX2Cache {
2268 #[allow(dead_code)]
2269 pub fn new(cap: usize) -> Self {
2270 Self {
2271 entries: Vec::new(),
2272 cap,
2273 total_hits: 0,
2274 total_misses: 0,
2275 }
2276 }
2277 #[allow(dead_code)]
2278 pub fn get(&mut self, key: u64) -> Option<&[u8]> {
2279 for e in self.entries.iter_mut() {
2280 if e.0 == key && e.2 {
2281 e.3 += 1;
2282 self.total_hits += 1;
2283 return Some(&e.1);
2284 }
2285 }
2286 self.total_misses += 1;
2287 None
2288 }
2289 #[allow(dead_code)]
2290 pub fn put(&mut self, key: u64, data: Vec<u8>) {
2291 if self.entries.len() >= self.cap {
2292 self.entries.retain(|e| e.2);
2293 if self.entries.len() >= self.cap {
2294 self.entries.remove(0);
2295 }
2296 }
2297 self.entries.push((key, data, true, 0));
2298 }
2299 #[allow(dead_code)]
2300 pub fn invalidate(&mut self) {
2301 for e in self.entries.iter_mut() {
2302 e.2 = false;
2303 }
2304 }
2305 #[allow(dead_code)]
2306 pub fn hit_rate(&self) -> f64 {
2307 let t = self.total_hits + self.total_misses;
2308 if t == 0 {
2309 0.0
2310 } else {
2311 self.total_hits as f64 / t as f64
2312 }
2313 }
2314 #[allow(dead_code)]
2315 pub fn live_count(&self) -> usize {
2316 self.entries.iter().filter(|e| e.2).count()
2317 }
2318}
2319#[allow(dead_code)]
2320#[derive(Debug, Clone, PartialEq)]
2321pub enum OEPassPhase {
2322 Analysis,
2323 Transformation,
2324 Verification,
2325 Cleanup,
2326}
2327impl OEPassPhase {
2328 #[allow(dead_code)]
2329 pub fn name(&self) -> &str {
2330 match self {
2331 OEPassPhase::Analysis => "analysis",
2332 OEPassPhase::Transformation => "transformation",
2333 OEPassPhase::Verification => "verification",
2334 OEPassPhase::Cleanup => "cleanup",
2335 }
2336 }
2337 #[allow(dead_code)]
2338 pub fn is_modifying(&self) -> bool {
2339 matches!(self, OEPassPhase::Transformation | OEPassPhase::Cleanup)
2340 }
2341}
2342#[derive(Debug, Clone)]
2344pub struct EscapeOptConfig {
2345 pub enable_stack_alloc: bool,
2347 pub max_stack_size_bytes: u64,
2349 pub aggressive_mode: bool,
2351}
2352#[allow(dead_code)]
2353#[derive(Debug, Clone, Default)]
2354pub struct PointsToSet2 {
2355 pub targets: std::collections::HashSet<PointsToTarget>,
2356}
2357impl PointsToSet2 {
2358 #[allow(dead_code)]
2359 pub fn new() -> Self {
2360 Self::default()
2361 }
2362 #[allow(dead_code)]
2363 pub fn add(&mut self, target: PointsToTarget) -> bool {
2364 self.targets.insert(target)
2365 }
2366 #[allow(dead_code)]
2367 pub fn may_alias(&self, other: &PointsToSet2) -> bool {
2368 self.targets.iter().any(|t| other.targets.contains(t))
2369 }
2370 #[allow(dead_code)]
2371 pub fn is_empty(&self) -> bool {
2372 self.targets.is_empty()
2373 }
2374 #[allow(dead_code)]
2375 pub fn len(&self) -> usize {
2376 self.targets.len()
2377 }
2378 #[allow(dead_code)]
2379 pub fn union(&mut self, other: &PointsToSet2) -> bool {
2380 let before = self.targets.len();
2381 self.targets.extend(other.targets.iter().cloned());
2382 self.targets.len() > before
2383 }
2384}
2385#[allow(dead_code)]
2386pub struct OEConstantFoldingHelper;
2387impl OEConstantFoldingHelper {
2388 #[allow(dead_code)]
2389 pub fn fold_add_i64(a: i64, b: i64) -> Option<i64> {
2390 a.checked_add(b)
2391 }
2392 #[allow(dead_code)]
2393 pub fn fold_sub_i64(a: i64, b: i64) -> Option<i64> {
2394 a.checked_sub(b)
2395 }
2396 #[allow(dead_code)]
2397 pub fn fold_mul_i64(a: i64, b: i64) -> Option<i64> {
2398 a.checked_mul(b)
2399 }
2400 #[allow(dead_code)]
2401 pub fn fold_div_i64(a: i64, b: i64) -> Option<i64> {
2402 if b == 0 {
2403 None
2404 } else {
2405 a.checked_div(b)
2406 }
2407 }
2408 #[allow(dead_code)]
2409 pub fn fold_add_f64(a: f64, b: f64) -> f64 {
2410 a + b
2411 }
2412 #[allow(dead_code)]
2413 pub fn fold_mul_f64(a: f64, b: f64) -> f64 {
2414 a * b
2415 }
2416 #[allow(dead_code)]
2417 pub fn fold_neg_i64(a: i64) -> Option<i64> {
2418 a.checked_neg()
2419 }
2420 #[allow(dead_code)]
2421 pub fn fold_not_bool(a: bool) -> bool {
2422 !a
2423 }
2424 #[allow(dead_code)]
2425 pub fn fold_and_bool(a: bool, b: bool) -> bool {
2426 a && b
2427 }
2428 #[allow(dead_code)]
2429 pub fn fold_or_bool(a: bool, b: bool) -> bool {
2430 a || b
2431 }
2432 #[allow(dead_code)]
2433 pub fn fold_shl_i64(a: i64, b: u32) -> Option<i64> {
2434 a.checked_shl(b)
2435 }
2436 #[allow(dead_code)]
2437 pub fn fold_shr_i64(a: i64, b: u32) -> Option<i64> {
2438 a.checked_shr(b)
2439 }
2440 #[allow(dead_code)]
2441 pub fn fold_rem_i64(a: i64, b: i64) -> Option<i64> {
2442 if b == 0 {
2443 None
2444 } else {
2445 Some(a % b)
2446 }
2447 }
2448 #[allow(dead_code)]
2449 pub fn fold_bitand_i64(a: i64, b: i64) -> i64 {
2450 a & b
2451 }
2452 #[allow(dead_code)]
2453 pub fn fold_bitor_i64(a: i64, b: i64) -> i64 {
2454 a | b
2455 }
2456 #[allow(dead_code)]
2457 pub fn fold_bitxor_i64(a: i64, b: i64) -> i64 {
2458 a ^ b
2459 }
2460 #[allow(dead_code)]
2461 pub fn fold_bitnot_i64(a: i64) -> i64 {
2462 !a
2463 }
2464}
2465#[allow(dead_code)]
2467#[derive(Debug, Clone)]
2468pub struct OEX2PassConfig {
2469 pub name: String,
2470 pub phase: OEX2PassPhase,
2471 pub enabled: bool,
2472 pub max_iterations: usize,
2473 pub debug: u32,
2474 pub timeout_ms: Option<u64>,
2475}
2476impl OEX2PassConfig {
2477 #[allow(dead_code)]
2478 pub fn new(name: impl Into<String>) -> Self {
2479 Self {
2480 name: name.into(),
2481 phase: OEX2PassPhase::Middle,
2482 enabled: true,
2483 max_iterations: 100,
2484 debug: 0,
2485 timeout_ms: None,
2486 }
2487 }
2488 #[allow(dead_code)]
2489 pub fn with_phase(mut self, phase: OEX2PassPhase) -> Self {
2490 self.phase = phase;
2491 self
2492 }
2493 #[allow(dead_code)]
2494 pub fn with_max_iter(mut self, n: usize) -> Self {
2495 self.max_iterations = n;
2496 self
2497 }
2498 #[allow(dead_code)]
2499 pub fn with_debug(mut self, d: u32) -> Self {
2500 self.debug = d;
2501 self
2502 }
2503 #[allow(dead_code)]
2504 pub fn disabled(mut self) -> Self {
2505 self.enabled = false;
2506 self
2507 }
2508 #[allow(dead_code)]
2509 pub fn with_timeout(mut self, ms: u64) -> Self {
2510 self.timeout_ms = Some(ms);
2511 self
2512 }
2513 #[allow(dead_code)]
2514 pub fn is_debug_enabled(&self) -> bool {
2515 self.debug > 0
2516 }
2517}
2518#[allow(dead_code)]
2520#[derive(Debug, Default)]
2521pub struct OEX2PassRegistry {
2522 pub(super) configs: Vec<OEX2PassConfig>,
2523 pub(super) stats: Vec<OEX2PassStats>,
2524}
2525impl OEX2PassRegistry {
2526 #[allow(dead_code)]
2527 pub fn new() -> Self {
2528 Self::default()
2529 }
2530 #[allow(dead_code)]
2531 pub fn register(&mut self, c: OEX2PassConfig) {
2532 self.stats.push(OEX2PassStats::new());
2533 self.configs.push(c);
2534 }
2535 #[allow(dead_code)]
2536 pub fn len(&self) -> usize {
2537 self.configs.len()
2538 }
2539 #[allow(dead_code)]
2540 pub fn is_empty(&self) -> bool {
2541 self.configs.is_empty()
2542 }
2543 #[allow(dead_code)]
2544 pub fn get(&self, i: usize) -> Option<&OEX2PassConfig> {
2545 self.configs.get(i)
2546 }
2547 #[allow(dead_code)]
2548 pub fn get_stats(&self, i: usize) -> Option<&OEX2PassStats> {
2549 self.stats.get(i)
2550 }
2551 #[allow(dead_code)]
2552 pub fn enabled_passes(&self) -> Vec<&OEX2PassConfig> {
2553 self.configs.iter().filter(|c| c.enabled).collect()
2554 }
2555 #[allow(dead_code)]
2556 pub fn passes_in_phase(&self, ph: &OEX2PassPhase) -> Vec<&OEX2PassConfig> {
2557 self.configs
2558 .iter()
2559 .filter(|c| c.enabled && &c.phase == ph)
2560 .collect()
2561 }
2562 #[allow(dead_code)]
2563 pub fn total_nodes_visited(&self) -> usize {
2564 self.stats.iter().map(|s| s.nodes_visited).sum()
2565 }
2566 #[allow(dead_code)]
2567 pub fn any_changed(&self) -> bool {
2568 self.stats.iter().any(|s| s.changed)
2569 }
2570}
2571#[allow(dead_code)]
2572#[derive(Debug, Clone)]
2573pub enum EscapeEdgeKind {
2574 DirectAssign,
2575 FieldWrite { field: String },
2576 FieldRead { field: String },
2577 ArrayWrite,
2578 ArrayRead,
2579 Return,
2580 CallArg { arg_index: u32 },
2581 CallRet,
2582 CapturedByLambda,
2583 GlobalWrite,
2584}
2585#[allow(dead_code)]
2587#[derive(Debug)]
2588pub struct OEExtCache {
2589 pub(super) entries: Vec<(u64, Vec<u8>, bool, u32)>,
2590 pub(super) cap: usize,
2591 pub(super) total_hits: u64,
2592 pub(super) total_misses: u64,
2593}
2594impl OEExtCache {
2595 #[allow(dead_code)]
2596 pub fn new(cap: usize) -> Self {
2597 Self {
2598 entries: Vec::new(),
2599 cap,
2600 total_hits: 0,
2601 total_misses: 0,
2602 }
2603 }
2604 #[allow(dead_code)]
2605 pub fn get(&mut self, key: u64) -> Option<&[u8]> {
2606 for e in self.entries.iter_mut() {
2607 if e.0 == key && e.2 {
2608 e.3 += 1;
2609 self.total_hits += 1;
2610 return Some(&e.1);
2611 }
2612 }
2613 self.total_misses += 1;
2614 None
2615 }
2616 #[allow(dead_code)]
2617 pub fn put(&mut self, key: u64, data: Vec<u8>) {
2618 if self.entries.len() >= self.cap {
2619 self.entries.retain(|e| e.2);
2620 if self.entries.len() >= self.cap {
2621 self.entries.remove(0);
2622 }
2623 }
2624 self.entries.push((key, data, true, 0));
2625 }
2626 #[allow(dead_code)]
2627 pub fn invalidate(&mut self) {
2628 for e in self.entries.iter_mut() {
2629 e.2 = false;
2630 }
2631 }
2632 #[allow(dead_code)]
2633 pub fn hit_rate(&self) -> f64 {
2634 let t = self.total_hits + self.total_misses;
2635 if t == 0 {
2636 0.0
2637 } else {
2638 self.total_hits as f64 / t as f64
2639 }
2640 }
2641 #[allow(dead_code)]
2642 pub fn live_count(&self) -> usize {
2643 self.entries.iter().filter(|e| e.2).count()
2644 }
2645}
2646#[derive(Debug, Clone, Default)]
2648pub struct EscapeSet {
2649 pub(super) escapees: HashSet<String>,
2650}
2651impl EscapeSet {
2652 pub fn new() -> Self {
2654 EscapeSet {
2655 escapees: HashSet::new(),
2656 }
2657 }
2658 pub fn insert(&mut self, var: &str) {
2660 self.escapees.insert(var.to_owned());
2661 }
2662 pub fn contains(&self, var: &str) -> bool {
2664 self.escapees.contains(var)
2665 }
2666 pub fn len(&self) -> usize {
2668 self.escapees.len()
2669 }
2670 pub fn is_empty(&self) -> bool {
2672 self.escapees.is_empty()
2673 }
2674}
2675#[allow(dead_code)]
2676pub struct OEPassRegistry {
2677 pub(super) configs: Vec<OEPassConfig>,
2678 pub(super) stats: std::collections::HashMap<String, OEPassStats>,
2679}
2680impl OEPassRegistry {
2681 #[allow(dead_code)]
2682 pub fn new() -> Self {
2683 OEPassRegistry {
2684 configs: Vec::new(),
2685 stats: std::collections::HashMap::new(),
2686 }
2687 }
2688 #[allow(dead_code)]
2689 pub fn register(&mut self, config: OEPassConfig) {
2690 self.stats
2691 .insert(config.pass_name.clone(), OEPassStats::new());
2692 self.configs.push(config);
2693 }
2694 #[allow(dead_code)]
2695 pub fn enabled_passes(&self) -> Vec<&OEPassConfig> {
2696 self.configs.iter().filter(|c| c.enabled).collect()
2697 }
2698 #[allow(dead_code)]
2699 pub fn get_stats(&self, name: &str) -> Option<&OEPassStats> {
2700 self.stats.get(name)
2701 }
2702 #[allow(dead_code)]
2703 pub fn total_passes(&self) -> usize {
2704 self.configs.len()
2705 }
2706 #[allow(dead_code)]
2707 pub fn enabled_count(&self) -> usize {
2708 self.enabled_passes().len()
2709 }
2710 #[allow(dead_code)]
2711 pub fn update_stats(&mut self, name: &str, changes: u64, time_ms: u64, iter: u32) {
2712 if let Some(stats) = self.stats.get_mut(name) {
2713 stats.record_run(changes, time_ms, iter);
2714 }
2715 }
2716}
2717#[allow(dead_code)]
2719#[derive(Debug, Clone, Default)]
2720pub struct OEX2ConstFolder {
2721 pub(super) folds: usize,
2722 pub(super) failures: usize,
2723 pub(super) enabled: bool,
2724}
2725impl OEX2ConstFolder {
2726 #[allow(dead_code)]
2727 pub fn new() -> Self {
2728 Self {
2729 folds: 0,
2730 failures: 0,
2731 enabled: true,
2732 }
2733 }
2734 #[allow(dead_code)]
2735 pub fn add_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2736 self.folds += 1;
2737 a.checked_add(b)
2738 }
2739 #[allow(dead_code)]
2740 pub fn sub_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2741 self.folds += 1;
2742 a.checked_sub(b)
2743 }
2744 #[allow(dead_code)]
2745 pub fn mul_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2746 self.folds += 1;
2747 a.checked_mul(b)
2748 }
2749 #[allow(dead_code)]
2750 pub fn div_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2751 if b == 0 {
2752 self.failures += 1;
2753 None
2754 } else {
2755 self.folds += 1;
2756 a.checked_div(b)
2757 }
2758 }
2759 #[allow(dead_code)]
2760 pub fn rem_i64(&mut self, a: i64, b: i64) -> Option<i64> {
2761 if b == 0 {
2762 self.failures += 1;
2763 None
2764 } else {
2765 self.folds += 1;
2766 a.checked_rem(b)
2767 }
2768 }
2769 #[allow(dead_code)]
2770 pub fn neg_i64(&mut self, a: i64) -> Option<i64> {
2771 self.folds += 1;
2772 a.checked_neg()
2773 }
2774 #[allow(dead_code)]
2775 pub fn shl_i64(&mut self, a: i64, s: u32) -> Option<i64> {
2776 if s >= 64 {
2777 self.failures += 1;
2778 None
2779 } else {
2780 self.folds += 1;
2781 a.checked_shl(s)
2782 }
2783 }
2784 #[allow(dead_code)]
2785 pub fn shr_i64(&mut self, a: i64, s: u32) -> Option<i64> {
2786 if s >= 64 {
2787 self.failures += 1;
2788 None
2789 } else {
2790 self.folds += 1;
2791 a.checked_shr(s)
2792 }
2793 }
2794 #[allow(dead_code)]
2795 pub fn and_i64(&mut self, a: i64, b: i64) -> i64 {
2796 self.folds += 1;
2797 a & b
2798 }
2799 #[allow(dead_code)]
2800 pub fn or_i64(&mut self, a: i64, b: i64) -> i64 {
2801 self.folds += 1;
2802 a | b
2803 }
2804 #[allow(dead_code)]
2805 pub fn xor_i64(&mut self, a: i64, b: i64) -> i64 {
2806 self.folds += 1;
2807 a ^ b
2808 }
2809 #[allow(dead_code)]
2810 pub fn not_i64(&mut self, a: i64) -> i64 {
2811 self.folds += 1;
2812 !a
2813 }
2814 #[allow(dead_code)]
2815 pub fn fold_count(&self) -> usize {
2816 self.folds
2817 }
2818 #[allow(dead_code)]
2819 pub fn failure_count(&self) -> usize {
2820 self.failures
2821 }
2822 #[allow(dead_code)]
2823 pub fn enable(&mut self) {
2824 self.enabled = true;
2825 }
2826 #[allow(dead_code)]
2827 pub fn disable(&mut self) {
2828 self.enabled = false;
2829 }
2830 #[allow(dead_code)]
2831 pub fn is_enabled(&self) -> bool {
2832 self.enabled
2833 }
2834}
2835#[allow(dead_code)]
2837#[derive(Debug, Clone)]
2838pub struct OEExtWorklist {
2839 pub(super) items: std::collections::VecDeque<usize>,
2840 pub(super) present: Vec<bool>,
2841}
2842impl OEExtWorklist {
2843 #[allow(dead_code)]
2844 pub fn new(capacity: usize) -> Self {
2845 Self {
2846 items: std::collections::VecDeque::new(),
2847 present: vec![false; capacity],
2848 }
2849 }
2850 #[allow(dead_code)]
2851 pub fn push(&mut self, id: usize) {
2852 if id < self.present.len() && !self.present[id] {
2853 self.present[id] = true;
2854 self.items.push_back(id);
2855 }
2856 }
2857 #[allow(dead_code)]
2858 pub fn push_front(&mut self, id: usize) {
2859 if id < self.present.len() && !self.present[id] {
2860 self.present[id] = true;
2861 self.items.push_front(id);
2862 }
2863 }
2864 #[allow(dead_code)]
2865 pub fn pop(&mut self) -> Option<usize> {
2866 let id = self.items.pop_front()?;
2867 if id < self.present.len() {
2868 self.present[id] = false;
2869 }
2870 Some(id)
2871 }
2872 #[allow(dead_code)]
2873 pub fn is_empty(&self) -> bool {
2874 self.items.is_empty()
2875 }
2876 #[allow(dead_code)]
2877 pub fn len(&self) -> usize {
2878 self.items.len()
2879 }
2880 #[allow(dead_code)]
2881 pub fn contains(&self, id: usize) -> bool {
2882 id < self.present.len() && self.present[id]
2883 }
2884 #[allow(dead_code)]
2885 pub fn drain_all(&mut self) -> Vec<usize> {
2886 let v: Vec<usize> = self.items.drain(..).collect();
2887 for &id in &v {
2888 if id < self.present.len() {
2889 self.present[id] = false;
2890 }
2891 }
2892 v
2893 }
2894}