1pub mod codegen;
2pub mod optimizer;
3pub mod profiler;
4pub mod trace;
5use crate::bytecode::Value;
6use crate::VM;
7pub use codegen::JitCompiler;
8pub use optimizer::TraceOptimizer;
9pub use profiler::{HotSpot, Profiler};
10use std::collections::HashMap;
11pub use trace::{Trace, TraceOp, TraceRecorder};
12#[cfg(debug_assertions)]
13#[inline]
14pub(crate) fn log<F>(message: F)
15where
16 F: FnOnce() -> String,
17{
18 println!("{}", message());
19}
20
21#[cfg(not(debug_assertions))]
22#[inline]
23pub(crate) fn log<F>(_message: F)
24where
25 F: FnOnce() -> String,
26{
27}
28
29pub const HOT_THRESHOLD: u32 = 10;
30pub const MAX_TRACE_LENGTH: usize = 200;
31pub const SIDE_EXIT_THRESHOLD: u32 = 10;
32pub const UNROLL_FACTOR: usize = 32;
33#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
34pub struct TraceId(pub usize);
35pub struct CompiledTrace {
36 pub id: TraceId,
37 pub entry: extern "C" fn(*mut Value, *mut VM, *const crate::bytecode::Function) -> i32,
38 pub trace: Trace,
39 pub guards: Vec<Guard>,
40 pub parent: Option<TraceId>,
41 pub side_traces: Vec<TraceId>,
42 pub leaked_constants: Vec<*const Value>,
43 pub hoisted_constants: Vec<(u8, Value)>,
44}
45
46#[derive(Debug, Clone)]
47pub struct Guard {
48 pub index: usize,
49 pub bailout_ip: usize,
50 pub kind: GuardKind,
51 pub fail_count: u32,
52 pub side_trace: Option<TraceId>,
53}
54
55#[derive(Debug, Clone)]
56pub enum GuardKind {
57 IntType {
58 register: u8,
59 },
60 FloatType {
61 register: u8,
62 },
63 Truthy {
64 register: u8,
65 },
66 Falsy {
67 register: u8,
68 },
69 ArrayBoundsCheck {
70 array_register: u8,
71 index_register: u8,
72 },
73 NestedLoop {
74 function_idx: usize,
75 loop_start_ip: usize,
76 },
77 NativeFunction {
78 register: u8,
79 expected: *const (),
80 },
81}
82
83pub struct JitState {
84 pub profiler: Profiler,
85 pub traces: HashMap<TraceId, CompiledTrace>,
86 pub root_traces: HashMap<(usize, usize), TraceId>,
87 next_trace_id: usize,
88 pub enabled: bool,
89}
90
91impl JitState {
92 pub fn new() -> Self {
93 let enabled = cfg!(target_arch = "x86_64");
94 Self {
95 profiler: Profiler::new(),
96 traces: HashMap::new(),
97 root_traces: HashMap::new(),
98 next_trace_id: 0,
99 enabled,
100 }
101 }
102
103 pub fn alloc_trace_id(&mut self) -> TraceId {
104 let id = TraceId(self.next_trace_id);
105 self.next_trace_id += 1;
106 id
107 }
108
109 pub fn check_hot(&mut self, func_idx: usize, ip: usize) -> bool {
110 if !self.enabled {
111 return false;
112 }
113
114 self.profiler.record_backedge(func_idx, ip) >= HOT_THRESHOLD
115 }
116
117 pub fn get_root_trace(&self, func_idx: usize, ip: usize) -> Option<&CompiledTrace> {
118 self.root_traces
119 .get(&(func_idx, ip))
120 .and_then(|id| self.traces.get(id))
121 }
122
123 pub fn get_trace(&self, id: TraceId) -> Option<&CompiledTrace> {
124 self.traces.get(&id)
125 }
126
127 pub fn get_trace_mut(&mut self, id: TraceId) -> Option<&mut CompiledTrace> {
128 self.traces.get_mut(&id)
129 }
130
131 pub fn store_root_trace(&mut self, func_idx: usize, ip: usize, trace: CompiledTrace) {
132 let id = trace.id;
133 self.root_traces.insert((func_idx, ip), id);
134 self.traces.insert(id, trace);
135 }
136
137 pub fn store_side_trace(&mut self, trace: CompiledTrace) {
138 let id = trace.id;
139 self.traces.insert(id, trace);
140 }
141}
142
143impl Default for JitState {
144 fn default() -> Self {
145 Self::new()
146 }
147}