1extern crate blake2b_simd;
2
3use self::blake2b_simd::{Hash, Params, blake2b};
4use super::common::{mulh, randomx_reciprocal, smulh, u64_from_i32_imm};
5use super::hash::{fill_aes_1rx4_u64, gen_program_aes_4rx4, hash_aes_1rx4};
6use super::m128::{m128d, m128i};
7use super::memory::{CACHE_LINE_SIZE, VmMemory};
8use super::program::{Instr, MAX_FLOAT_REG, MAX_REG, Mode, Program, Store};
9use std::convert::TryInto;
10use std::sync::Arc;
11
12unsafe extern "C" {
13 fn fesetround(round: i32) -> i32;
14}
15
16fn set_rounding_mode_env(mode: u32) {
19 unsafe {
20 let fe_mode: i32 = match mode {
21 0 => 0, 1 => 0x400, 2 => 0x800, 3 => 0xC00, _ => 0,
26 };
27 fesetround(fe_mode);
28 }
29}
30
31pub const SCRATCHPAD_L1_MASK: u64 = 0x3ff8;
32pub const SCRATCHPAD_L2_MASK: u64 = 0x3fff8;
33pub const SCRATCHPAD_L3_MASK: u64 = 0x1ffff8;
34const SCRATCHPAD_L3_MASK_U32: u32 = 0x1fffc0;
35
36const SCRATCHPAD_SIZE: usize = 262144;
37const MXCSR_DEFAULT: u32 = 0x9FC0;
38const CONDITION_OFFSET: u64 = 8;
39const CONDITION_MASK: u64 = (1 << CONDITION_OFFSET) - 1;
40
41const RANDOMX_PROGRAM_COUNT: usize = 8;
42const RANDOMX_PROGRAM_SIZE: i32 = 256;
43const RANDOMX_PROGRAM_ITERATIONS: usize = 2048;
44const RANDOMX_DATASET_BASE_SIZE: usize = 2147483648;
45const RANDOMX_DATASET_ITEM_SIZE: usize = 64;
46const RANDOMX_DATASET_EXTRA_SIZE: usize = 33554368;
47const RANDOMX_HASH_SIZE: usize = 32;
48
49const DATASET_EXTRA_ITEMS: usize = RANDOMX_DATASET_EXTRA_SIZE / RANDOMX_DATASET_ITEM_SIZE;
50
51const MANTISSA_SIZE: u64 = 52;
52const MANTISSA_MASK: u64 = (1 << MANTISSA_SIZE) - 1;
53const EXPONENT_SIZE: u64 = 11;
54const EXPONENT_BIAS: u64 = 1023;
55const EXPONENT_MASK: u64 = (1 << EXPONENT_SIZE) - 1;
56const EXPONENT_BITS: u64 = 0x300;
57const DYNAMIC_EXPONENT_BITS: u64 = 4;
58const STATIC_EXPONENT_BITS: u64 = 4;
59const DYNAMIC_MANTISSA_MASK: u64 = (1 << (MANTISSA_SIZE + DYNAMIC_EXPONENT_BITS)) - 1;
60
61const CACHE_LINE_ALIGN_MASK: u64 =
62 ((RANDOMX_DATASET_BASE_SIZE - 1) & !(RANDOMX_DATASET_ITEM_SIZE - 1)) as u64;
63
64pub struct MemoryRegister {
65 pub mx: usize,
66 pub ma: usize,
67}
68
69pub struct Register {
70 pub r: [u64; MAX_REG as usize],
71 pub f: [m128d; MAX_FLOAT_REG as usize],
72 pub e: [m128d; MAX_FLOAT_REG as usize],
73 pub a: [m128d; MAX_FLOAT_REG as usize],
74}
75
76pub fn new_register() -> Register {
77 Register {
78 r: [0; MAX_REG as usize],
79 f: [m128d::zero(); MAX_FLOAT_REG as usize],
80 e: [m128d::zero(); MAX_FLOAT_REG as usize],
81 a: [m128d::zero(); MAX_FLOAT_REG as usize],
82 }
83}
84
85impl Register {
86 pub fn to_bytes(&self) -> [u8; 256] {
87 let mut bytes = [0; 256];
88 let mut offset = 0;
89 for i in 0..MAX_REG {
90 Register::copy_into_le(&mut bytes, offset, self.r[i]);
91 offset += 1;
92 }
93
94 for i in 0..MAX_FLOAT_REG {
95 let (h, l) = self.f[i].as_u64();
96 Register::copy_into_le(&mut bytes, offset, l);
97 offset += 1;
98 Register::copy_into_le(&mut bytes, offset, h);
99 offset += 1;
100 }
101
102 for i in 0..MAX_FLOAT_REG {
103 let (h, l) = self.e[i].as_u64();
104 Register::copy_into_le(&mut bytes, offset, l);
105 offset += 1;
106 Register::copy_into_le(&mut bytes, offset, h);
107 offset += 1;
108 }
109
110 for i in 0..MAX_FLOAT_REG {
111 let (h, l) = self.a[i].as_u64();
112 Register::copy_into_le(&mut bytes, offset, l);
113 offset += 1;
114 Register::copy_into_le(&mut bytes, offset, h);
115 offset += 1;
116 }
117
118 bytes
119 }
120
121 fn copy_into_le(bytes: &mut [u8; 256], offset: usize, u: u64) {
122 let reg_bytes = u.to_le_bytes();
123 for k in 0..8 {
124 bytes[offset * 8 + k] = reg_bytes[k];
125 }
126 }
127}
128
129pub struct VmConfig {
130 pub e_mask: [u64; 2],
131 pub read_reg: [usize; 4],
132}
133
134pub struct Vm {
135 pub mem_reg: MemoryRegister,
136 pub reg: Register,
137 pub scratchpad: Vec<u64>,
138 pub pc: i32,
139 pub config: VmConfig,
140 pub mem: Arc<VmMemory>,
141 pub dataset_offset: u64,
142 mxcsr: u32,
143}
144
145impl Vm {
146 pub fn init_vm(&mut self, prog: &Program) {
147 self.reg.a[0] = m128d::from_u64(
148 small_positive_float_bit(prog.entropy[1]),
149 small_positive_float_bit(prog.entropy[0]),
150 );
151 self.reg.a[1] = m128d::from_u64(
152 small_positive_float_bit(prog.entropy[3]),
153 small_positive_float_bit(prog.entropy[2]),
154 );
155 self.reg.a[2] = m128d::from_u64(
156 small_positive_float_bit(prog.entropy[5]),
157 small_positive_float_bit(prog.entropy[4]),
158 );
159 self.reg.a[3] = m128d::from_u64(
160 small_positive_float_bit(prog.entropy[7]),
161 small_positive_float_bit(prog.entropy[6]),
162 );
163
164 self.mem_reg.ma = ((prog.entropy[8] & CACHE_LINE_ALIGN_MASK) as u32) as usize;
165 self.mem_reg.mx = (prog.entropy[10] as u32) as usize;
166
167 let mut address_reg = prog.entropy[12] as usize;
168 self.config.read_reg[0] = address_reg & 1;
169 address_reg >>= 1;
170 self.config.read_reg[1] = 2 + (address_reg & 1);
171 address_reg >>= 1;
172 self.config.read_reg[2] = 4 + (address_reg & 1);
173 address_reg >>= 1;
174 self.config.read_reg[3] = 6 + (address_reg & 1);
175
176 self.dataset_offset =
177 (prog.entropy[13] % (DATASET_EXTRA_ITEMS as u64 + 1)) * CACHE_LINE_SIZE;
178
179 self.config.e_mask[0] = float_mask(prog.entropy[14]);
180 self.config.e_mask[1] = float_mask(prog.entropy[15]);
181
182 for i in 0..MAX_REG {
183 self.reg.r[i] = 0;
184 }
185 }
186
187 pub fn init_scratchpad(&mut self, seed: &[m128i; 4]) -> [m128i; 4] {
188 fill_aes_1rx4_u64(seed, &mut self.scratchpad)
189 }
190
191 pub fn calculate_hash(&mut self, input: &[u8]) -> Hash {
192 let hash = blake2b(input);
193 let seed = hash_to_m128i_array(&hash);
194
195 let mut tmp_hash = self.init_scratchpad(&seed);
196 self.reset_rounding_mode();
197
198 for _ in 0..(RANDOMX_PROGRAM_COUNT - 1) {
199 self.run(&tmp_hash);
200 let blake_result = blake2b(&self.reg.to_bytes());
201 tmp_hash = hash_to_m128i_array(&blake_result);
202 }
203
204 self.run(&tmp_hash);
205 let final_hash = hash_aes_1rx4(&self.scratchpad);
206 self.reg.a[0] = final_hash[0].as_m128d();
207 self.reg.a[1] = final_hash[1].as_m128d();
208 self.reg.a[2] = final_hash[2].as_m128d();
209 self.reg.a[3] = final_hash[3].as_m128d();
210
211 let mut params = Params::new();
212 params.hash_length(RANDOMX_HASH_SIZE);
213 params.hash(&self.reg.to_bytes())
214 }
215
216 pub fn run(&mut self, seed: &[m128i; 4]) {
218 let prog = Program::from_bytes(gen_program_aes_4rx4(seed, 136));
219
220 self.init_vm(&prog);
221
222 let mut sp_addr_0: u32 = self.mem_reg.mx as u32;
223 let mut sp_addr_1: u32 = self.mem_reg.ma as u32;
224
225 for _ in 0..RANDOMX_PROGRAM_ITERATIONS {
226 let sp_mix = self.reg.r[self.config.read_reg[0]] ^ self.reg.r[self.config.read_reg[1]];
227
228 sp_addr_0 ^= sp_mix as u32;
229 sp_addr_0 &= SCRATCHPAD_L3_MASK_U32;
230 sp_addr_0 /= 8;
231 sp_addr_1 ^= (sp_mix >> 32) as u32;
232 sp_addr_1 &= SCRATCHPAD_L3_MASK_U32;
233 sp_addr_1 /= 8;
234
235 for i in 0..MAX_REG {
236 self.reg.r[i] ^= self.scratchpad[sp_addr_0 as usize + i];
237 }
238 for i in 0..MAX_FLOAT_REG {
239 self.reg.f[i] =
240 m128i::from_u64(0, self.scratchpad[sp_addr_1 as usize + i]).lower_to_m128d();
241 }
242 for i in 0..MAX_FLOAT_REG {
243 self.reg.e[i] = self.mask_register_exponent_mantissa(
244 m128i::from_u64(0, self.scratchpad[sp_addr_1 as usize + i + MAX_FLOAT_REG])
245 .lower_to_m128d(),
246 );
247 }
248
249 self.pc = 0;
250 while self.pc < RANDOMX_PROGRAM_SIZE {
251 let instr = &prog.program[self.pc as usize];
252 instr.execute(self);
253 self.pc += 1;
254 }
255
256 self.mem_reg.mx ^= (self.reg.r[self.config.read_reg[2]]
257 ^ self.reg.r[self.config.read_reg[3]]) as usize;
258 self.mem_reg.mx &= CACHE_LINE_ALIGN_MASK as usize;
259 self.mem.dataset_read(
260 self.dataset_offset + self.mem_reg.ma as u64,
261 &mut self.reg.r,
262 );
263
264 std::mem::swap(&mut self.mem_reg.mx, &mut self.mem_reg.ma);
265
266 for i in 0..MAX_REG {
267 self.scratchpad[sp_addr_1 as usize + i] = self.reg.r[i];
268 }
269 for i in 0..MAX_FLOAT_REG {
270 self.reg.f[i] = self.reg.f[i] ^ self.reg.e[i];
271 }
272
273 for i in 0..MAX_FLOAT_REG {
274 let (u1, u0) = self.reg.f[i].as_u64();
275 let ix = sp_addr_0 as usize + 2 * i;
276 self.scratchpad[ix] = u0;
277 self.scratchpad[ix + 1] = u1;
278 }
279 sp_addr_0 = 0;
280 sp_addr_1 = 0;
281 }
282 }
283
284 pub fn reset_rounding_mode(&mut self) {
285 self.mxcsr = MXCSR_DEFAULT;
286 set_rounding_mode_env(0);
287 }
288
289 pub fn set_rounding_mode(&mut self, mode: u32) {
290 self.mxcsr = MXCSR_DEFAULT | (mode << 13);
291 set_rounding_mode_env(mode);
292 }
293
294 pub fn get_rounding_mode(&self) -> u32 {
295 (self.mxcsr >> 13) & 3
296 }
297
298 pub fn exec_fswap_r(&mut self, instr: &Instr) {
301 let v_dst = self.read_float_reg(&instr.dst);
302 self.write_float_reg(&instr.dst, v_dst.shuffle_1(&v_dst));
303 }
304
305 pub fn exec_fadd_r(&mut self, instr: &Instr) {
306 let v_src = self.read_a(&instr.src);
307 let v_dst = self.read_f(&instr.dst);
308 self.write_f(&instr.dst, v_src + v_dst);
309 }
310
311 pub fn exec_fadd_m(&mut self, instr: &Instr) {
312 let v = self.scratchpad[self.scratchpad_src_ix(instr)];
313 let v_src = m128i::from_u64(0, v).lower_to_m128d();
314 let v_dst = self.read_f(&instr.dst);
315 self.write_f(&instr.dst, v_dst + v_src);
316 }
317
318 pub fn exec_fsub_r(&mut self, instr: &Instr) {
319 let v_src = self.read_a(&instr.src);
320 let v_dst = self.read_f(&instr.dst);
321 self.write_f(&instr.dst, v_dst - v_src);
322 }
323
324 pub fn exec_fsub_m(&mut self, instr: &Instr) {
325 let v = self.scratchpad[self.scratchpad_src_ix(instr)];
326 let v_src = m128i::from_u64(0, v).lower_to_m128d();
327 let v_dst = self.read_f(&instr.dst);
328 self.write_f(&instr.dst, v_dst - v_src);
329 }
330
331 pub fn exec_fscal_r(&mut self, instr: &Instr) {
332 let v_dst = self.read_f(&instr.dst);
333 let mask = m128d::from_u64(0x80F0000000000000, 0x80F0000000000000);
334 self.write_f(&instr.dst, v_dst ^ mask);
335 }
336
337 pub fn exec_fmul_r(&mut self, instr: &Instr) {
338 let v_src = self.read_a(&instr.src);
339 let v_dst = self.read_e(&instr.dst);
340 self.write_e(&instr.dst, v_src * v_dst);
341 }
342
343 pub fn exec_fsqrt_r(&mut self, instr: &Instr) {
344 let v_dst = self.read_e(&instr.dst);
345 self.write_e(&instr.dst, v_dst.sqrt());
346 }
347
348 pub fn exec_fdiv_m(&mut self, instr: &Instr) {
349 let v = self.scratchpad[self.scratchpad_src_ix(instr)];
350 let v_src = self.mask_register_exponent_mantissa(m128i::from_u64(0, v).lower_to_m128d());
351 let v_dst = self.read_e(&instr.dst);
352 self.write_e(&instr.dst, v_dst / v_src);
353 }
354
355 pub fn exec_iadd_m(&mut self, instr: &Instr) {
358 let ix = self.scratchpad_src_ix(instr);
359 self.write_r(
360 &instr.dst,
361 self.read_r(&instr.dst).wrapping_add(self.scratchpad[ix]),
362 );
363 }
364
365 pub fn exec_isub_m(&mut self, instr: &Instr) {
366 let ix = self.scratchpad_src_ix(instr);
367 self.write_r(
368 &instr.dst,
369 self.read_r(&instr.dst).wrapping_sub(self.scratchpad[ix]),
370 );
371 }
372
373 pub fn exec_imul_m(&mut self, instr: &Instr) {
374 let ix = self.scratchpad_src_ix(instr);
375 self.write_r(
376 &instr.dst,
377 self.read_r(&instr.dst).wrapping_mul(self.scratchpad[ix]),
378 );
379 }
380 pub fn exec_iadd_rs(&mut self, instr: &Instr) {
381 let mut v = self.read_r(&instr.src) << shift_mode(instr);
382 if let Some(imm) = instr.imm {
383 v = v.wrapping_add(u64_from_i32_imm(imm));
384 }
385 self.write_r(&instr.dst, self.read_r(&instr.dst).wrapping_add(v));
386 }
387 pub fn exec_isub_r(&mut self, instr: &Instr) {
388 let v = self.imm_or_r(instr);
389 self.write_r(&instr.dst, self.read_r(&instr.dst).wrapping_sub(v));
390 }
391
392 pub fn exec_imul_r(&mut self, instr: &Instr) {
393 let v = self.imm_or_r(instr);
394 self.write_r(&instr.dst, self.read_r(&instr.dst).wrapping_mul(v));
395 }
396
397 pub fn exec_imul_rcp(&mut self, instr: &Instr) {
398 if !is_zero_or_power_of_2(instr.imm.unwrap() as u64) {
399 let v = randomx_reciprocal((instr.imm.unwrap() as u64) & 0x00000000FFFFFFFF);
400 self.write_r(&instr.dst, self.read_r(&instr.dst).wrapping_mul(v));
401 } }
403
404 pub fn exec_imulh_r(&mut self, instr: &Instr) {
405 let v_src = self.read_r(&instr.src);
406 let v_dst = self.read_r(&instr.dst);
407 self.write_r(&instr.dst, mulh(v_src, v_dst));
408 }
409
410 pub fn exec_imulh_m(&mut self, instr: &Instr) {
411 let v_dst = self.read_r(&instr.dst);
412 let v_src = self.scratchpad[self.scratchpad_src_ix(instr)];
413 self.write_r(&instr.dst, mulh(v_src, v_dst));
414 }
415
416 pub fn exec_ismulh_r(&mut self, instr: &Instr) {
417 let v_src = self.read_r(&instr.src);
418 let v_dst = self.read_r(&instr.dst);
419 self.write_r(&instr.dst, smulh(v_src, v_dst));
420 }
421
422 pub fn exec_ismulh_m(&mut self, instr: &Instr) {
423 let v_src = self.scratchpad[self.scratchpad_src_ix(instr)];
424 let v_dst = self.read_r(&instr.dst);
425 self.write_r(&instr.dst, smulh(v_src, v_dst));
426 }
427
428 pub fn exec_ineg_r(&mut self, instr: &Instr) {
429 let v_dst = self.read_r(&instr.dst);
430 self.write_r(&instr.dst, (!v_dst).wrapping_add(1));
431 }
432
433 pub fn exec_ixor_r(&mut self, instr: &Instr) {
434 let v_src = self.imm_or_r(instr);
435 let v_dst = self.read_r(&instr.dst);
436 self.write_r(&instr.dst, v_dst ^ v_src);
437 }
438
439 pub fn exec_ixor_m(&mut self, instr: &Instr) {
440 let v_src = self.scratchpad[self.scratchpad_src_ix(instr)];
441 let v_dst = self.read_r(&instr.dst);
442 self.write_r(&instr.dst, v_dst ^ v_src);
443 }
444
445 pub fn exec_iror_r(&mut self, instr: &Instr) {
446 let v_src = (self.imm_or_r(instr) & 0xFFFFFF) as u32;
447 let v_dst = self.read_r(&instr.dst);
448 self.write_r(&instr.dst, v_dst.rotate_right(v_src));
449 }
450
451 pub fn exec_irol_r(&mut self, instr: &Instr) {
452 let v_src = (self.imm_or_r(instr) & 0xFFFFFF) as u32;
453 let v_dst = self.read_r(&instr.dst);
454 self.write_r(&instr.dst, v_dst.rotate_left(v_src));
455 }
456
457 pub fn exec_iswap_r(&mut self, instr: &Instr) {
458 let v_src = self.read_r(&instr.src);
459 let v_dst = self.read_r(&instr.dst);
460 self.write_r(&instr.dst, v_src);
461 self.write_r(&instr.src, v_dst);
462 }
463
464 pub fn exec_istore(&mut self, instr: &Instr) {
465 let ix = self.scratchpad_dst_ix(instr);
466 self.scratchpad[ix] = self.read_r(&instr.src);
467 }
468
469 pub fn exec_cfround(&mut self, instr: &Instr) {
472 let v_src = self.read_r(&instr.src);
473 let mode = (v_src.rotate_right(instr.imm.unwrap() as u32) % 4) as u32;
474 self.set_rounding_mode(mode);
475 }
476
477 pub fn exec_cbranch(&mut self, instr: &Instr) {
478 let shift = cond_mode(instr) as u64 + CONDITION_OFFSET;
479 let mut imm = u64_from_i32_imm(instr.imm.unwrap()) | 1 << shift;
480 if CONDITION_OFFSET > 0 || shift > 0 {
481 imm &= !(1 << (shift - 1));
482 }
483 let v_dst = self.read_r(&instr.dst).wrapping_add(imm);
484 self.write_r(&instr.dst, v_dst);
485 if v_dst & (CONDITION_MASK << shift) == 0 {
486 self.pc = instr.target.unwrap();
487 }
488 }
489
490 fn imm_or_r(&self, instr: &Instr) -> u64 {
493 if instr.src == Store::NONE {
494 return instr.imm.unwrap() as u64;
495 }
496 self.read_r(&instr.src)
497 }
498
499 fn read_float_reg(&self, store: &Store) -> m128d {
500 match store {
501 Store::A(i) => self.reg.a[*i],
502 Store::E(i) => self.reg.e[*i],
503 Store::F(i) => self.reg.f[*i],
504 _ => panic!("illegal read from float register"),
505 }
506 }
507
508 fn write_float_reg(&mut self, store: &Store, v: m128d) {
509 match store {
510 Store::A(i) => self.reg.a[*i] = v,
511 Store::E(i) => self.reg.e[*i] = v,
512 Store::F(i) => self.reg.f[*i] = v,
513 _ => panic!("illegal write to float register"),
514 }
515 }
516
517 fn read_r(&self, store: &Store) -> u64 {
518 match store {
519 Store::R(i) => self.reg.r[*i],
520 _ => panic!("illegal read from register r"),
521 }
522 }
523 fn write_r(&mut self, store: &Store, v: u64) {
524 match store {
525 Store::R(i) => self.reg.r[*i] = v,
526 _ => panic!("illegal store to register r"),
527 }
528 }
529 fn read_f(&self, store: &Store) -> m128d {
530 match store {
531 Store::F(i) => self.reg.f[*i],
532 _ => panic!("illegal read from register f"),
533 }
534 }
535 fn write_f(&mut self, store: &Store, v: m128d) {
536 match store {
537 Store::F(i) => self.reg.f[*i] = v,
538 _ => panic!("illegal store to register f"),
539 }
540 }
541
542 fn read_a(&self, store: &Store) -> m128d {
543 match store {
544 Store::A(i) => self.reg.a[*i],
545 _ => panic!("illegal read from register a"),
546 }
547 }
548
549 fn read_e(&self, store: &Store) -> m128d {
550 match store {
551 Store::E(i) => self.reg.e[*i],
552 _ => panic!("illegal read from register e"),
553 }
554 }
555
556 fn write_e(&mut self, store: &Store, v: m128d) {
557 match store {
558 Store::E(i) => self.reg.e[*i] = v,
559 _ => panic!("illegal store to register e"),
560 }
561 }
562 fn scratchpad_src_ix(&self, instr: &Instr) -> usize {
563 let imm = u64_from_i32_imm(instr.imm.unwrap());
564 let addr: usize = match &instr.src {
565 Store::L1(d) => (self.read_r(d).wrapping_add(imm)) & SCRATCHPAD_L1_MASK,
566 Store::L2(d) => (self.read_r(d).wrapping_add(imm)) & SCRATCHPAD_L2_MASK,
567 Store::L3(_) => imm & SCRATCHPAD_L3_MASK,
568 _ => panic!("illegal read from scratchpad"),
569 }
570 .try_into()
571 .unwrap();
572 addr / 8
573 }
574
575 fn scratchpad_dst_ix(&self, instr: &Instr) -> usize {
576 let imm = u64_from_i32_imm(instr.imm.unwrap());
577 let addr: usize = match &instr.dst {
578 Store::L1(d) => (self.read_r(d).wrapping_add(imm)) & SCRATCHPAD_L1_MASK,
579 Store::L2(d) => (self.read_r(d).wrapping_add(imm)) & SCRATCHPAD_L2_MASK,
580 Store::L3(d) => (self.read_r(d).wrapping_add(imm)) & SCRATCHPAD_L3_MASK,
581 _ => panic!("illegal read from scratchpad"),
582 }
583 .try_into()
584 .unwrap();
585 addr / 8
586 }
587
588 fn mask_register_exponent_mantissa(&self, v: m128d) -> m128d {
589 let mantissa_mask = m128d::from_u64(DYNAMIC_MANTISSA_MASK, DYNAMIC_MANTISSA_MASK);
590 let exponent_mask = m128d::from_u64(self.config.e_mask[1], self.config.e_mask[0]);
591 (v & mantissa_mask) | exponent_mask
592 }
593}
594
595pub fn hash_to_m128i_array(hash: &Hash) -> [m128i; 4] {
596 let bytes = hash.as_bytes();
597 let i1 = m128i::from_u8(&bytes[0..16]);
598 let i2 = m128i::from_u8(&bytes[16..32]);
599 let i3 = m128i::from_u8(&bytes[32..48]);
600 let i4 = m128i::from_u8(&bytes[48..64]);
601 [i1, i2, i3, i4]
602}
603
604fn shift_mode(instr: &Instr) -> u8 {
605 match instr.mode {
606 Mode::Shft(x) => x,
607 _ => panic!("illegal shift mode {}", instr.mode),
608 }
609}
610
611fn cond_mode(instr: &Instr) -> u8 {
612 match instr.mode {
613 Mode::Cond(x) => x,
614 _ => panic!("illegal cond mode {}", instr.mode),
615 }
616}
617
618pub fn is_zero_or_power_of_2(imm: u64) -> bool {
619 imm & imm.wrapping_sub(1) == 0
620}
621
622fn small_positive_float_bit(entropy: u64) -> u64 {
623 let mut exponent = entropy >> 59; let mantissa = entropy & MANTISSA_MASK;
625 exponent += EXPONENT_BIAS;
626 exponent &= EXPONENT_MASK;
627 exponent <<= MANTISSA_SIZE;
628 exponent | mantissa
629}
630
631fn float_mask(entropy: u64) -> u64 {
632 let mask22bit = (1 << 22) - 1;
633 entropy & mask22bit | static_exponent(entropy)
634}
635
636fn static_exponent(entropy: u64) -> u64 {
637 let mut exponent = EXPONENT_BITS;
638 exponent |= (entropy >> (64 - STATIC_EXPONENT_BITS)) << DYNAMIC_EXPONENT_BITS;
639 exponent << MANTISSA_SIZE
640}
641
642pub fn new_vm(mem: Arc<VmMemory>) -> Vm {
643 Vm {
644 mem_reg: MemoryRegister { mx: 0, ma: 0 },
645 reg: new_register(),
646 scratchpad: vec![0; SCRATCHPAD_SIZE],
647 pc: 0,
648 config: VmConfig {
649 e_mask: [0; 2],
650 read_reg: [0; 4],
651 },
652 mem,
653 dataset_offset: 0,
654 mxcsr: MXCSR_DEFAULT,
655 }
656}