wain_exec/
stack.rs

1use crate::value::{LittleEndian, Value};
2use std::cmp::Ordering;
3use std::convert::{TryFrom, TryInto};
4use std::fmt;
5use std::mem;
6use std::mem::size_of;
7use wain_ast::{AsValType, ValType};
8
9// Vec<Value> consumes too much space since its element size is always 16bytes.
10// To use space more efficiently, here use u8 for storing values as bytes.
11
12#[derive(Default)]
13pub struct Stack {
14    bytes: Vec<u8>,      // Bytes buffer for actual values
15    types: Vec<ValType>, // this stack is necessary to pop arbitrary value
16    frame: CallFrame,
17}
18
19pub trait StackAccess: Sized {
20    fn pop(stack: &mut Stack) -> Self {
21        let v = Self::top(stack);
22        stack.erase_top(size_of::<Self>());
23        v
24    }
25    fn push(stack: &mut Stack, v: Self);
26    fn top(stack: &Stack) -> Self;
27}
28
29impl StackAccess for i32 {
30    fn push(stack: &mut Stack, v: Self) {
31        stack.push_bytes(&v.to_le_bytes(), ValType::I32);
32    }
33    fn top(stack: &Stack) -> Self {
34        assert_eq!(stack.top_type(), ValType::I32);
35        i32::from_le_bytes(stack.top_bytes())
36    }
37}
38
39impl StackAccess for i64 {
40    fn push(stack: &mut Stack, v: Self) {
41        stack.push_bytes(&v.to_le_bytes(), ValType::I64);
42    }
43    fn top(stack: &Stack) -> Self {
44        assert_eq!(stack.top_type(), ValType::I64);
45        i64::from_le_bytes(stack.top_bytes())
46    }
47}
48
49impl StackAccess for f32 {
50    fn push(stack: &mut Stack, v: Self) {
51        stack.push_bytes(&v.to_le_bytes(), ValType::F32);
52    }
53    fn top(stack: &Stack) -> Self {
54        assert_eq!(stack.top_type(), ValType::F32);
55        f32::from_le_bytes(stack.top_bytes())
56    }
57}
58
59impl StackAccess for f64 {
60    fn push(stack: &mut Stack, v: Self) {
61        stack.push_bytes(&v.to_le_bytes(), ValType::F64);
62    }
63    fn top(stack: &Stack) -> Self {
64        assert_eq!(stack.top_type(), ValType::F64);
65        f64::from_le_bytes(stack.top_bytes())
66    }
67}
68
69impl StackAccess for Value {
70    fn pop(stack: &mut Stack) -> Self {
71        match stack.types[stack.len() - 1] {
72            ValType::I32 => Value::I32(StackAccess::pop(stack)),
73            ValType::I64 => Value::I64(StackAccess::pop(stack)),
74            ValType::F32 => Value::F32(StackAccess::pop(stack)),
75            ValType::F64 => Value::F64(StackAccess::pop(stack)),
76        }
77    }
78    fn push(stack: &mut Stack, v: Self) {
79        match v {
80            Value::I32(i) => StackAccess::push(stack, i),
81            Value::I64(i) => StackAccess::push(stack, i),
82            Value::F32(f) => StackAccess::push(stack, f),
83            Value::F64(f) => StackAccess::push(stack, f),
84        }
85    }
86    fn top(stack: &Stack) -> Self {
87        match stack.types[stack.len() - 1] {
88            ValType::I32 => Value::I32(StackAccess::top(stack)),
89            ValType::I64 => Value::I64(StackAccess::top(stack)),
90            ValType::F32 => Value::F32(StackAccess::top(stack)),
91            ValType::F64 => Value::F64(StackAccess::top(stack)),
92        }
93    }
94}
95
96impl Stack {
97    // Note: Here I don't use std::slice::from_raw since its unsafe
98
99    fn top_type(&self) -> ValType {
100        self.types[self.len() - 1]
101    }
102
103    fn push_bytes(&mut self, bytes: &[u8], ty: ValType) {
104        self.types.push(ty);
105        self.bytes.extend_from_slice(bytes);
106    }
107
108    pub fn push<V: StackAccess>(&mut self, v: V) {
109        StackAccess::push(self, v);
110    }
111
112    fn top_bytes<'a, T>(&'a self) -> T
113    where
114        T: TryFrom<&'a [u8]>,
115        T::Error: fmt::Debug,
116    {
117        let len = self.top_addr() - size_of::<T>();
118        self.bytes[len..].try_into().expect("top bytes")
119    }
120
121    fn erase_top(&mut self, len: usize) {
122        self.types.pop();
123        self.bytes.truncate(self.top_addr() - len);
124    }
125
126    pub fn pop<V: StackAccess>(&mut self) -> V {
127        StackAccess::pop(self)
128    }
129
130    pub fn top<V: StackAccess>(&self) -> V {
131        StackAccess::top(self)
132    }
133
134    pub fn write_top_bytes<V: LittleEndian>(&mut self, v: V) {
135        let addr = self.top_addr() - size_of::<V>();
136        LittleEndian::write(&mut self.bytes, addr, v);
137    }
138
139    pub fn write_top_type(&mut self, t: ValType) {
140        let idx = self.len() - 1;
141        self.types[idx] = t;
142    }
143
144    pub fn write_top<T: StackAccess, V: LittleEndian + AsValType>(&mut self, v: V) {
145        // Expect optimizations by compiler since conditions of these if statements can be
146        // calculated at compile time
147        //
148        // Note: The same value as size_of::<T>() can be obtained by self.top_type().bytes(), but
149        // it's runtime value preventing compiler from optimization.
150        match size_of::<T>().cmp(&size_of::<V>()) {
151            Ordering::Equal => {}
152            Ordering::Greater => {
153                let len = self.top_addr() - (size_of::<T>() - size_of::<V>());
154                self.bytes.truncate(len);
155            }
156            Ordering::Less => {
157                let len = self.top_addr() + (size_of::<V>() - size_of::<T>());
158                self.bytes.resize(len, 0);
159            }
160        }
161        self.write_top_bytes(v);
162        self.write_top_type(V::VAL_TYPE);
163    }
164
165    fn write<V: LittleEndian>(&mut self, addr: usize, v: V) {
166        LittleEndian::write(&mut self.bytes, addr, v);
167    }
168
169    fn read<V: LittleEndian>(&self, addr: usize) -> V {
170        LittleEndian::read(&self.bytes, addr)
171    }
172
173    pub fn write_local(&mut self, localidx: u32) {
174        let addr = self.local_addr(localidx);
175        match self.local_type(localidx) {
176            ValType::I32 => self.write(addr, self.top::<i32>()),
177            ValType::I64 => self.write(addr, self.top::<i64>()),
178            ValType::F32 => self.write(addr, self.top::<f32>()),
179            ValType::F64 => self.write(addr, self.top::<f64>()),
180        }
181    }
182
183    pub fn read_local(&mut self, localidx: u32) {
184        let addr = self.local_addr(localidx);
185        match self.local_type(localidx) {
186            ValType::I32 => self.push(self.read::<i32>(addr)),
187            ValType::I64 => self.push(self.read::<i64>(addr)),
188            ValType::F32 => self.push(self.read::<f32>(addr)),
189            ValType::F64 => self.push(self.read::<f64>(addr)),
190        }
191    }
192
193    fn top_addr(&self) -> usize {
194        self.bytes.len()
195    }
196
197    // len() returns number of values pushed on this stack
198    fn len(&self) -> usize {
199        self.types.len()
200    }
201
202    fn restore(&mut self, addr: usize, type_idx: usize, has_result: bool) {
203        if has_result {
204            let v: Value = self.pop();
205            self.bytes.truncate(addr);
206            self.types.truncate(type_idx);
207            self.push(v);
208        } else {
209            self.bytes.truncate(addr);
210            self.types.truncate(type_idx);
211        }
212    }
213
214    pub fn push_label(&self) -> Label {
215        Label {
216            addr: self.top_addr(),
217            type_idx: self.len(),
218        }
219    }
220
221    pub fn pop_label(&mut self, label: &Label, has_result: bool) {
222        // Part of 'br' instruction: https://webassembly.github.io/spec/core/exec/instructions.html#exec-br
223        self.restore(label.addr, label.type_idx, has_result)
224    }
225
226    pub fn push_frame(&mut self, params: &[ValType], locals: &[ValType]) -> CallFrame {
227        let mut local_addrs = Vec::with_capacity(params.len() + locals.len());
228
229        // Note: Params were already pushed to stack
230        let params_bytes = params.iter().fold(0, |acc, p| acc + p.bytes());
231        let base_addr = self.top_addr() - params_bytes;
232        let base_idx = self.len() - params.len();
233
234        self.types.extend_from_slice(locals);
235
236        let mut addr = base_addr;
237        for p in &self.types[base_idx..] {
238            local_addrs.push(addr);
239            addr += p.bytes();
240        }
241
242        self.bytes.resize(addr, 0);
243
244        mem::replace(
245            &mut self.frame,
246            CallFrame {
247                base_addr,
248                base_idx,
249                local_addrs,
250            },
251        )
252    }
253
254    pub fn pop_frame(&mut self, prev_frame: CallFrame, has_result: bool) {
255        self.restore(self.frame.base_addr, self.frame.base_idx, has_result);
256        self.frame = prev_frame;
257    }
258
259    fn local_addr(&self, localidx: u32) -> usize {
260        self.frame.local_addrs[localidx as usize]
261    }
262
263    fn local_type(&self, localidx: u32) -> ValType {
264        let idx = localidx as usize;
265        self.types[self.frame.base_idx + idx]
266    }
267}
268
269// Activations of function frames
270// This class is outside Machine because it has shorter lifetime. It only lives while the current
271// function is being invoked
272#[derive(Default)]
273pub struct CallFrame {
274    base_addr: usize,
275    base_idx: usize,
276    local_addrs: Vec<usize>, // Calculate local addresses in advance for random access
277}
278
279pub struct Label {
280    addr: usize,
281    type_idx: usize,
282}
283
284#[cfg(test)]
285mod tests {
286    use super::*;
287
288    #[test]
289    fn i32_value() {
290        let mut s = Stack::default();
291        s.push(0i32);
292        assert_eq!(s.top::<i32>(), 0);
293        s.push(1i32);
294        s.push(-1i32);
295        s.push(i32::MAX);
296        s.push(i32::MIN);
297
298        assert_eq!(s.pop::<i32>(), i32::MIN);
299        assert_eq!(s.pop::<i32>(), i32::MAX);
300        assert_eq!(s.pop::<i32>(), -1);
301        assert_eq!(s.pop::<i32>(), 1);
302        assert_eq!(s.pop::<i32>(), 0);
303    }
304
305    #[test]
306    fn i64_value() {
307        let mut s = Stack::default();
308        s.push(0i64);
309        s.push(1i64);
310        s.push(-1i64);
311        s.push(i32::MAX as i64);
312        s.push(i32::MIN as i64);
313        s.push(i64::MAX);
314        s.push(i64::MIN);
315
316        assert_eq!(s.pop::<i64>(), i64::MIN);
317        assert_eq!(s.pop::<i64>(), i64::MAX);
318        assert_eq!(s.pop::<i64>(), i32::MIN as i64);
319        assert_eq!(s.pop::<i64>(), i32::MAX as i64);
320        assert_eq!(s.pop::<i64>(), -1);
321        assert_eq!(s.pop::<i64>(), 1);
322        assert_eq!(s.pop::<i64>(), 0);
323    }
324
325    #[test]
326    fn f32_value() {
327        let mut s = Stack::default();
328        s.push(0.0f32);
329        assert_eq!(s.top::<f32>(), 0.0);
330        s.push(3.14f32);
331        s.push(-1.0f32);
332        s.push(f32::INFINITY);
333        s.push(f32::NEG_INFINITY);
334        s.push(f32::NAN);
335
336        assert!(s.pop::<f32>().is_nan());
337        assert_eq!(s.pop::<f32>(), f32::NEG_INFINITY);
338        assert_eq!(s.pop::<f32>(), f32::INFINITY);
339        assert_eq!(s.pop::<f32>(), -1.0);
340        assert_eq!(s.pop::<f32>(), 3.14);
341        assert_eq!(s.pop::<f32>(), 0.0);
342    }
343
344    #[test]
345    fn f64_value() {
346        let mut s = Stack::default();
347        s.push(0.0f64);
348        assert_eq!(s.top::<f64>(), 0.0f64);
349        s.push(3.14f64);
350        s.push(-1.0f64);
351        s.push(f64::INFINITY);
352        s.push(f64::NEG_INFINITY);
353        s.push(f64::NAN);
354
355        assert!(s.pop::<f64>().is_nan());
356        assert_eq!(s.pop::<f64>(), f64::NEG_INFINITY);
357        assert_eq!(s.pop::<f64>(), f64::INFINITY);
358        assert_eq!(s.pop::<f64>(), -1.0);
359        assert_eq!(s.pop::<f64>(), 3.14);
360        assert_eq!(s.pop::<f64>(), 0.0);
361    }
362
363    #[test]
364    fn any_value() {
365        let i32_s = [0, 1, -1, i32::MAX, i32::MIN];
366        let i64_s = [0, 1, -1, i32::MAX as i64, i32::MIN as i64, i64::MAX, i64::MIN];
367        let f32_s = [0.0, 3.14, -1.0, f32::INFINITY, f32::NEG_INFINITY, f32::NAN];
368        let f64_s = [0.0, 3.14, -1.0, f64::INFINITY, f64::NEG_INFINITY, f64::NAN];
369
370        let mut s = Stack::default();
371        for (((i32v, i64v), f32v), f64v) in i32_s
372            .iter()
373            .cycle()
374            .zip(i64_s.iter().cycle())
375            .zip(f32_s.iter().cycle())
376            .zip(f64_s.iter().cycle())
377            .take(100)
378        {
379            s.push(*i32v);
380            s.push(*i64v);
381            s.push(*f32v);
382            s.push(*f64v);
383        }
384
385        for (((i32v, i64v), f32v), f64v) in i32_s
386            .iter()
387            .cycle()
388            .zip(i64_s.iter().cycle())
389            .zip(f32_s.iter().cycle())
390            .zip(f64_s.iter().cycle())
391            .take(100)
392            .collect::<Vec<_>>()
393            .into_iter()
394            .rev()
395        {
396            if f64v.is_nan() {
397                match s.pop() {
398                    Value::F64(v) => assert!(v.is_nan()),
399                    v => panic!("not match: {:?}", v),
400                }
401            } else {
402                assert_eq!(s.top::<Value>(), Value::F64(*f64v));
403                assert_eq!(s.pop::<Value>(), Value::F64(*f64v));
404            }
405            if f32v.is_nan() {
406                match s.pop() {
407                    Value::F32(v) => assert!(v.is_nan()),
408                    v => panic!("not match: {:?}", v),
409                }
410            } else {
411                assert_eq!(s.top::<Value>(), Value::F32(*f32v));
412                assert_eq!(s.pop::<Value>(), Value::F32(*f32v));
413            }
414            assert_eq!(s.top::<Value>(), Value::I64(*i64v));
415            assert_eq!(s.pop::<Value>(), Value::I64(*i64v));
416            assert_eq!(s.top::<Value>(), Value::I32(*i32v));
417            assert_eq!(s.pop::<Value>(), Value::I32(*i32v));
418        }
419    }
420}