1#[cfg(feature = "std")]
2pub mod codegen;
3pub mod optimizer;
4pub mod profiler;
5pub mod trace;
6use crate::bytecode::Value;
7use crate::VM;
8#[cfg(feature = "std")]
9pub use codegen::JitCompiler;
10#[cfg(not(feature = "std"))]
11pub struct JitCompiler;
12pub use optimizer::TraceOptimizer;
13pub use profiler::{HotSpot, Profiler};
14use alloc::{string::String, vec::Vec};
15use hashbrown::HashMap;
16pub use trace::{Trace, TraceOp, TraceRecorder};
17#[cfg(not(feature = "std"))]
18impl JitCompiler {
19 pub fn new() -> Self {
20 Self
21 }
22
23 pub fn compile_trace(
24 &mut self,
25 _trace: &Trace,
26 _trace_id: TraceId,
27 _parent: Option<TraceId>,
28 _hoisted_constants: Vec<(u8, Value)>,
29 ) -> crate::Result<CompiledTrace> {
30 Err(crate::LustError::RuntimeError {
31 message: "JIT is unavailable without the `std` feature".into(),
32 })
33 }
34}
35#[cfg(all(debug_assertions, feature = "std"))]
36#[inline]
37pub(crate) fn log<F>(message: F)
38where
39 F: FnOnce() -> String,
40{
41 println!("{}", message());
42}
43
44#[cfg(not(all(debug_assertions, feature = "std")))]
45#[inline]
46pub(crate) fn log<F>(_message: F)
47where
48 F: FnOnce() -> String,
49{
50}
51
52pub const HOT_THRESHOLD: u32 = 10;
53pub const MAX_TRACE_LENGTH: usize = 200;
54pub const SIDE_EXIT_THRESHOLD: u32 = 10;
55pub const UNROLL_FACTOR: usize = 32;
56#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
57pub struct TraceId(pub usize);
58pub struct CompiledTrace {
59 pub id: TraceId,
60 pub entry: extern "C" fn(*mut Value, *mut VM, *const crate::bytecode::Function) -> i32,
61 pub trace: Trace,
62 pub guards: Vec<Guard>,
63 pub parent: Option<TraceId>,
64 pub side_traces: Vec<TraceId>,
65 pub leaked_constants: Vec<*const Value>,
66 pub hoisted_constants: Vec<(u8, Value)>,
67}
68
69#[derive(Debug, Clone)]
70pub struct Guard {
71 pub index: usize,
72 pub bailout_ip: usize,
73 pub kind: GuardKind,
74 pub fail_count: u32,
75 pub side_trace: Option<TraceId>,
76}
77
78#[derive(Debug, Clone)]
79pub enum GuardKind {
80 IntType {
81 register: u8,
82 },
83 FloatType {
84 register: u8,
85 },
86 Truthy {
87 register: u8,
88 },
89 Falsy {
90 register: u8,
91 },
92 ArrayBoundsCheck {
93 array_register: u8,
94 index_register: u8,
95 },
96 NestedLoop {
97 function_idx: usize,
98 loop_start_ip: usize,
99 },
100 NativeFunction {
101 register: u8,
102 expected: *const (),
103 },
104}
105
106pub struct JitState {
107 pub profiler: Profiler,
108 pub traces: HashMap<TraceId, CompiledTrace>,
109 pub root_traces: HashMap<(usize, usize), TraceId>,
110 next_trace_id: usize,
111 pub enabled: bool,
112}
113
114impl JitState {
115 pub fn new() -> Self {
116 let enabled = cfg!(all(feature = "std", target_arch = "x86_64"));
117 Self {
118 profiler: Profiler::new(),
119 traces: HashMap::new(),
120 root_traces: HashMap::new(),
121 next_trace_id: 0,
122 enabled,
123 }
124 }
125
126 pub fn alloc_trace_id(&mut self) -> TraceId {
127 let id = TraceId(self.next_trace_id);
128 self.next_trace_id += 1;
129 id
130 }
131
132 pub fn check_hot(&mut self, func_idx: usize, ip: usize) -> bool {
133 if !self.enabled {
134 return false;
135 }
136
137 self.profiler.record_backedge(func_idx, ip) >= HOT_THRESHOLD
138 }
139
140 pub fn get_root_trace(&self, func_idx: usize, ip: usize) -> Option<&CompiledTrace> {
141 self.root_traces
142 .get(&(func_idx, ip))
143 .and_then(|id| self.traces.get(id))
144 }
145
146 pub fn get_trace(&self, id: TraceId) -> Option<&CompiledTrace> {
147 self.traces.get(&id)
148 }
149
150 pub fn get_trace_mut(&mut self, id: TraceId) -> Option<&mut CompiledTrace> {
151 self.traces.get_mut(&id)
152 }
153
154 pub fn store_root_trace(&mut self, func_idx: usize, ip: usize, trace: CompiledTrace) {
155 let id = trace.id;
156 self.root_traces.insert((func_idx, ip), id);
157 self.traces.insert(id, trace);
158 }
159
160 pub fn store_side_trace(&mut self, trace: CompiledTrace) {
161 let id = trace.id;
162 self.traces.insert(id, trace);
163 }
164}
165
166impl Default for JitState {
167 fn default() -> Self {
168 Self::new()
169 }
170}