wasm_core/
int_ops.rs

1use prelude::intrinsics;
2use value::Value;
3use opcode::Memarg;
4use executor::Memory;
5use executor::{ExecuteResult, ExecuteError};
6
7#[inline]
8pub fn i32_clz(v: i32) -> Value {
9    Value::I32(unsafe {
10        intrinsics::ctlz(v)
11    })
12}
13
14#[inline]
15pub fn i32_ctz(v: i32) -> Value {
16    Value::I32(unsafe {
17        intrinsics::cttz(v)
18    })
19}
20
21#[inline]
22pub fn i32_popcnt(v: i32) -> Value {
23    Value::I32(unsafe {
24        intrinsics::ctpop(v)
25    })
26}
27
28#[inline]
29pub fn i32_add(a: i32, b: i32) -> Value {
30    Value::I32(a.wrapping_add(b))
31}
32
33#[inline]
34pub fn i32_sub(a: i32, b: i32) -> Value {
35    Value::I32(a.wrapping_sub(b))
36}
37
38#[inline]
39pub fn i32_mul(a: i32, b: i32) -> Value {
40    Value::I32(a.wrapping_mul(b))
41}
42
43#[inline]
44pub fn i32_div_u(a: i32, b: i32) -> Value {
45    Value::I32((a as u32).wrapping_div(b as u32) as i32)
46}
47
48#[inline]
49pub fn i32_div_s(a: i32, b: i32) -> Value {
50    Value::I32(a.wrapping_div(b))
51}
52
53#[inline]
54pub fn i32_rem_u(a: i32, b: i32) -> Value {
55    Value::I32((a as u32).wrapping_rem(b as u32) as i32)
56}
57
58#[inline]
59pub fn i32_rem_s(a: i32, b: i32) -> Value {
60    Value::I32(a.wrapping_rem(b))
61}
62
63#[inline]
64pub fn i32_and(a: i32, b: i32) -> Value {
65    Value::I32(a & b)
66}
67
68#[inline]
69pub fn i32_or(a: i32, b: i32) -> Value {
70    Value::I32(a | b)
71}
72
73#[inline]
74pub fn i32_xor(a: i32, b: i32) -> Value {
75    Value::I32(a ^ b)
76}
77
78#[inline]
79pub fn i32_shl(a: i32, b: i32) -> Value {
80    Value::I32(a.wrapping_shl((b as u32) & 31))
81}
82
83#[inline]
84pub fn i32_shr_u(a: i32, b: i32) -> Value {
85    Value::I32(((a as u32).wrapping_shr((b as u32) & 31)) as i32)
86}
87
88#[inline]
89pub fn i32_shr_s(a: i32, b: i32) -> Value {
90    Value::I32(a.wrapping_shr((b as u32) & 31))
91}
92
93#[inline]
94pub fn i32_rotl(a: i32, b: i32) -> Value {
95    Value::I32(a.rotate_left(b as u32))
96}
97
98#[inline]
99pub fn i32_rotr(a: i32, b: i32) -> Value {
100    Value::I32(a.rotate_right(b as u32))
101}
102
103#[inline]
104pub fn i32_eqz(v: i32) -> Value {
105    if v == 0 {
106        Value::I32(1)
107    } else {
108        Value::I32(0)
109    }
110}
111
112#[inline]
113pub fn i32_eq(a: i32, b: i32) -> Value {
114    if a == b {
115        Value::I32(1)
116    } else {
117        Value::I32(0)
118    }
119}
120
121#[inline]
122pub fn i32_ne(a: i32, b: i32) -> Value {
123    if a == b {
124        Value::I32(0)
125    } else {
126        Value::I32(1)
127    }
128}
129
130#[inline]
131pub fn i32_lt_u(a: i32, b: i32) -> Value {
132    if (a as u32) < (b as u32) {
133        Value::I32(1)
134    } else {
135        Value::I32(0)
136    }
137}
138
139#[inline]
140pub fn i32_lt_s(a: i32, b: i32) -> Value {
141    if a < b {
142        Value::I32(1)
143    } else {
144        Value::I32(0)
145    }
146}
147
148#[inline]
149pub fn i32_le_u(a: i32, b: i32) -> Value {
150    if (a as u32) <= (b as u32) {
151        Value::I32(1)
152    } else {
153        Value::I32(0)
154    }
155}
156
157#[inline]
158pub fn i32_le_s(a: i32, b: i32) -> Value {
159    if a <= b {
160        Value::I32(1)
161    } else {
162        Value::I32(0)
163    }
164}
165
166#[inline]
167pub fn i32_gt_u(a: i32, b: i32) -> Value {
168    if (a as u32) > (b as u32) {
169        Value::I32(1)
170    } else {
171        Value::I32(0)
172    }
173}
174
175#[inline]
176pub fn i32_gt_s(a: i32, b: i32) -> Value {
177    if a > b {
178        Value::I32(1)
179    } else {
180        Value::I32(0)
181    }
182}
183
184#[inline]
185pub fn i32_ge_u(a: i32, b: i32) -> Value {
186    if (a as u32) >= (b as u32) {
187        Value::I32(1)
188    } else {
189        Value::I32(0)
190    }
191}
192
193#[inline]
194pub fn i32_ge_s(a: i32, b: i32) -> Value {
195    if a >= b {
196        Value::I32(1)
197    } else {
198        Value::I32(0)
199    }
200}
201
202#[inline]
203pub fn i32_wrap_i64(a: i64) -> Value {
204    Value::I32(a as i32)
205}
206
207unsafe trait LoadStore: Copy + Sized {}
208unsafe impl LoadStore for i32 {}
209unsafe impl LoadStore for i64 {}
210
211#[inline]
212fn load_from_mem<T: LoadStore>(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<T> {
213    let n = n as usize;
214
215    let t_size = ::prelude::mem::size_of::<T>();
216    if n > t_size {
217        return Err(ExecuteError::InvalidMemoryOperation);
218    }
219
220    let data: &[u8] = storage.data.as_slice();
221
222    let ea = (index + m.offset) as usize;
223    if ea + n > data.len() {
224        return Err(ExecuteError::AddrOutOfBound(ea as u32, n as u32));
225    }
226
227    // n <= sizeof(T) holds here so we can copy safely.
228    unsafe {
229        let mut result: T = ::prelude::mem::zeroed();
230        ::prelude::ptr::copy(
231            &data[ea] as *const u8,
232            &mut result as *mut T as *mut u8,
233            n
234        );
235
236        Ok(result)
237    }
238}
239
240#[inline]
241fn store_to_mem<T: LoadStore>(index: u32, val: T, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<()> {
242    let n = n as usize;
243
244    let t_size = ::prelude::mem::size_of::<T>();
245    if n > t_size {
246        return Err(ExecuteError::InvalidMemoryOperation);
247    }
248
249    let data: &mut [u8] = storage.data.as_mut_slice();
250
251    let ea = (index + m.offset) as usize;
252
253    // this will not overflow because all of index, m.offset
254    // and n is in the range of u32.
255    if ea + n > data.len() {
256        return Err(ExecuteError::AddrOutOfBound(ea as u32, n as u32));
257    }
258
259    // ea + n <= data.len() && n <= sizeof(T) holds here so we can copy safely.
260    unsafe {
261        ::prelude::ptr::copy(
262            &val as *const T as *const u8,
263            &mut data[ea] as *mut u8,
264            n
265        );
266    }
267
268    Ok(())
269}
270
271#[inline]
272fn unsigned_loaded_i32_to_signed(v: i32, n: u32) -> i32 {
273    match n {
274        1 => (v as u32) as u8 as i8 as i32,
275        2 => (v as u32) as u16 as i16 as i32,
276        _ => v
277    }
278}
279
280#[inline]
281fn unsigned_loaded_i64_to_signed(v: i64, n: u32) -> i64 {
282    match n {
283        1 => (v as u64) as u8 as i8 as i64,
284        2 => (v as u64) as u16 as i16 as i64,
285        4 => (v as u64) as u32 as i32 as i64,
286        _ => v
287    }
288}
289
290#[cfg(test)]
291#[test]
292fn test_unsigned_loaded_to_signed() {
293    assert_eq!(unsigned_loaded_i32_to_signed(0b11111011, 1), -5);
294    assert_eq!(unsigned_loaded_i64_to_signed(0b11111011, 1), -5);
295}
296
297#[inline]
298pub fn i32_load_s(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<Value> {
299    let v: i32 = load_from_mem(index, m, storage, n)?;
300    Ok(Value::I32(unsigned_loaded_i32_to_signed(v, n)))
301}
302
303#[inline]
304pub fn i32_load_u(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<Value> {
305    Ok(Value::I32(load_from_mem(index, m, storage, n)?))
306}
307
308#[inline]
309pub fn i32_store(index: u32, val: Value, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<()> {
310    store_to_mem(index, val.get_i32()?, m, storage, n)
311}
312
313#[inline]
314pub fn i64_clz(v: i64) -> Value {
315    Value::I64(unsafe {
316        intrinsics::ctlz(v)
317    })
318}
319
320#[inline]
321pub fn i64_ctz(v: i64) -> Value {
322    Value::I64(unsafe {
323        intrinsics::cttz(v)
324    })
325}
326
327#[inline]
328pub fn i64_popcnt(v: i64) -> Value {
329    Value::I64(unsafe {
330        intrinsics::ctpop(v)
331    })
332}
333
334#[inline]
335pub fn i64_add(a: i64, b: i64) -> Value {
336    Value::I64(a.wrapping_add(b))
337}
338
339#[inline]
340pub fn i64_sub(a: i64, b: i64) -> Value {
341    Value::I64(a.wrapping_sub(b))
342}
343
344#[inline]
345pub fn i64_mul(a: i64, b: i64) -> Value {
346    Value::I64(a.wrapping_mul(b))
347}
348
349#[inline]
350pub fn i64_div_u(a: i64, b: i64) -> Value {
351    Value::I64((a as u64).wrapping_div(b as u64) as i64)
352}
353
354#[inline]
355pub fn i64_div_s(a: i64, b: i64) -> Value {
356    Value::I64(a.wrapping_div(b))
357}
358
359#[inline]
360pub fn i64_rem_u(a: i64, b: i64) -> Value {
361    Value::I64((a as u64).wrapping_rem(b as u64) as i64)
362}
363
364#[inline]
365pub fn i64_rem_s(a: i64, b: i64) -> Value {
366    Value::I64(a.wrapping_rem(b))
367}
368
369#[inline]
370pub fn i64_and(a: i64, b: i64) -> Value {
371    Value::I64(a & b)
372}
373
374#[inline]
375pub fn i64_or(a: i64, b: i64) -> Value {
376    Value::I64(a | b)
377}
378
379#[inline]
380pub fn i64_xor(a: i64, b: i64) -> Value {
381    Value::I64(a ^ b)
382}
383
384#[inline]
385pub fn i64_shl(a: i64, b: i64) -> Value {
386    Value::I64(a.wrapping_shl(b as u32))
387}
388
389#[inline]
390pub fn i64_shr_u(a: i64, b: i64) -> Value {
391    Value::I64(((a as u64).wrapping_shr(b as u32)) as i64)
392}
393
394#[inline]
395pub fn i64_shr_s(a: i64, b: i64) -> Value {
396    Value::I64(a.wrapping_shr(b as u32))
397}
398
399#[inline]
400pub fn i64_rotl(a: i64, b: i64) -> Value {
401    Value::I64(a.rotate_left(b as u32))
402}
403
404#[inline]
405pub fn i64_rotr(a: i64, b: i64) -> Value {
406    Value::I64(a.rotate_right(b as u32))
407}
408
409#[inline]
410pub fn i64_eqz(v: i64) -> Value {
411    if v == 0 {
412        Value::I32(1)
413    } else {
414        Value::I32(0)
415    }
416}
417
418#[inline]
419pub fn i64_eq(a: i64, b: i64) -> Value {
420    if a == b {
421        Value::I32(1)
422    } else {
423        Value::I32(0)
424    }
425}
426
427#[inline]
428pub fn i64_ne(a: i64, b: i64) -> Value {
429    if a == b {
430        Value::I32(0)
431    } else {
432        Value::I32(1)
433    }
434}
435
436#[inline]
437pub fn i64_lt_u(a: i64, b: i64) -> Value {
438    if (a as u64) < (b as u64) {
439        Value::I32(1)
440    } else {
441        Value::I32(0)
442    }
443}
444
445#[inline]
446pub fn i64_lt_s(a: i64, b: i64) -> Value {
447    if a < b {
448        Value::I32(1)
449    } else {
450        Value::I32(0)
451    }
452}
453
454#[inline]
455pub fn i64_le_u(a: i64, b: i64) -> Value {
456    if (a as u64) <= (b as u64) {
457        Value::I32(1)
458    } else {
459        Value::I32(0)
460    }
461}
462
463#[inline]
464pub fn i64_le_s(a: i64, b: i64) -> Value {
465    if a <= b {
466        Value::I32(1)
467    } else {
468        Value::I32(0)
469    }
470}
471
472#[inline]
473pub fn i64_gt_u(a: i64, b: i64) -> Value {
474    if (a as u64) > (b as u64) {
475        Value::I32(1)
476    } else {
477        Value::I32(0)
478    }
479}
480
481#[inline]
482pub fn i64_gt_s(a: i64, b: i64) -> Value {
483    if a > b {
484        Value::I32(1)
485    } else {
486        Value::I32(0)
487    }
488}
489
490#[inline]
491pub fn i64_ge_u(a: i64, b: i64) -> Value {
492    if (a as u64) >= (b as u64) {
493        Value::I32(1)
494    } else {
495        Value::I32(0)
496    }
497}
498
499#[inline]
500pub fn i64_ge_s(a: i64, b: i64) -> Value {
501    if a >= b {
502        Value::I32(1)
503    } else {
504        Value::I32(0)
505    }
506}
507
508#[inline]
509pub fn i64_extend_i32_u(v: i32) -> Value {
510    // FIXME: Is this correct?
511    Value::I64((v as i64) & 0x00000000ffffffffi64)
512}
513
514#[inline]
515pub fn i64_extend_i32_s(v: i32) -> Value {
516    Value::I64(v as i64)
517}
518
519#[inline]
520pub fn i64_load_s(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<Value> {
521    let v: i64 = load_from_mem(index, m, storage, n)?;
522    Ok(Value::I64(unsigned_loaded_i64_to_signed(v, n)))
523}
524
525#[inline]
526pub fn i64_load_u(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<Value> {
527    Ok(Value::I64(load_from_mem(index, m, storage, n)?))
528}
529
530#[inline]
531pub fn i64_store(index: u32, val: Value, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<()> {
532    store_to_mem(index, val.get_i64()?, m, storage, n)
533}