Skip to main content

cranelift_codegen/
isle_prelude.rs

1//! Shared ISLE prelude implementation for optimization (mid-end) and
2//! lowering (backend) ISLE environments.
3
4/// Helper macro to define methods in `prelude.isle` within `impl Context for
5/// ...` for each backend. These methods are shared amongst all backends.
6#[macro_export]
7#[doc(hidden)]
8macro_rules! isle_common_prelude_methods {
9    () => {
10        isle_numerics_methods!();
11
12        /// We don't have a way of making a `()` value in isle directly.
13        #[inline]
14        fn unit(&mut self) -> Unit {
15            ()
16        }
17
18        #[inline]
19        fn checked_add_with_type(&mut self, ty: Type, a: u64, b: u64) -> Option<u64> {
20            let c = a.checked_add(b)?;
21            let ty_mask = self.ty_mask(ty);
22            if (c & !ty_mask) == 0 { Some(c) } else { None }
23        }
24
25        #[inline]
26        fn add_overflows_with_type(&mut self, ty: Type, a: u64, b: u64) -> bool {
27            self.checked_add_with_type(ty, a, b).is_none()
28        }
29
30        #[inline]
31        fn imm64_clz(&mut self, ty: Type, a: Imm64) -> Imm64 {
32            let bits = ty.bits();
33            assert!(bits <= 64);
34            let clz_offset = 64 - bits;
35            let a_v: u64 = a.bits().cast_unsigned();
36            let lz = a_v.leading_zeros() - clz_offset;
37            Imm64::new(i64::from(lz))
38        }
39
40        #[inline]
41        fn imm64_ctz(&mut self, ty: Type, a: Imm64) -> Imm64 {
42            let bits = ty.bits();
43            assert!(bits <= 64);
44            let a_v: u64 = a.bits().cast_unsigned();
45            if a_v == 0 {
46                // ctz(0) is defined to be the number of bits in the type.
47                Imm64::new(i64::from(bits))
48            } else {
49                let lz = a_v.trailing_zeros();
50                Imm64::new(i64::from(lz))
51            }
52        }
53
54        #[inline]
55        fn imm64_sdiv(&mut self, ty: Type, x: Imm64, y: Imm64) -> Option<Imm64> {
56            // Sign extend `x` and `y`.
57            let type_width = ty.bits();
58            assert!(type_width <= 64);
59            let x = x.sign_extend_from_width(type_width).bits();
60            let y = y.sign_extend_from_width(type_width).bits();
61            let shift = 64 - type_width;
62
63            // NB: We can't rely on `checked_div` to detect `ty::MIN / -1`
64            // (which overflows and should trap) because we are working with
65            // `i64` values here, and `i32::MIN != i64::MIN`, for
66            // example. Therefore, we have to explicitly check for this case
67            // ourselves.
68            let min = ((self.ty_smin(ty) as i64) << shift) >> shift;
69            if x == min && y == -1 {
70                return None;
71            }
72
73            let result = x.checked_div(y)?;
74            Some(Imm64::new(result).mask_to_width(type_width))
75        }
76
77        #[inline]
78        fn imm64_srem(&mut self, ty: Type, x: Imm64, y: Imm64) -> Option<Imm64> {
79            // Sign extend `x` and `y`.
80            let type_width = ty.bits();
81            assert!(type_width <= 64);
82            let x = x.sign_extend_from_width(type_width).bits();
83            let y = y.sign_extend_from_width(type_width).bits();
84
85            // iN::min % -1 is defined as 0 in wasm so no need
86            // to check for it
87
88            let result = x.checked_rem(y)?;
89            Some(Imm64::new(result).mask_to_width(type_width))
90        }
91
92        #[inline]
93        fn imm64_shl(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
94            // Mask off any excess shift bits.
95            let shift_mask = (ty.bits() - 1) as u64;
96            let y = (y.bits() as u64) & shift_mask;
97
98            // Mask the result to `ty` bits.
99            let ty_mask = self.ty_mask(ty) as i64;
100            Imm64::new((x.bits() << y) & ty_mask)
101        }
102
103        #[inline]
104        fn imm64_ushr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
105            let ty_mask = self.ty_mask(ty);
106            let x = (x.bits() as u64) & ty_mask;
107
108            // Mask off any excess shift bits.
109            let shift_mask = (ty.bits() - 1) as u64;
110            let y = (y.bits() as u64) & shift_mask;
111
112            // NB: No need to mask off high bits because they are already zero.
113            Imm64::new((x >> y) as i64)
114        }
115
116        #[inline]
117        fn imm64_sshr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
118            // Sign extend `x` from `ty.bits()`-width to the full 64 bits.
119            let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);
120            let x = (x.bits() << shift) >> shift;
121
122            // Mask off any excess shift bits.
123            let shift_mask = (ty.bits() - 1) as i64;
124            let y = y.bits() & shift_mask;
125
126            // Mask off sign bits that aren't part of `ty`.
127            let ty_mask = self.ty_mask(ty) as i64;
128            Imm64::new((x >> y) & ty_mask)
129        }
130
131        #[inline]
132        fn i64_sextend_u64(&mut self, ty: Type, x: u64) -> i64 {
133            let shift_amt = core::cmp::max(0, 64 - ty.bits());
134            ((x as i64) << shift_amt) >> shift_amt
135        }
136
137        #[inline]
138        fn i64_sextend_imm64(&mut self, ty: Type, x: Imm64) -> i64 {
139            x.sign_extend_from_width(ty.bits()).bits()
140        }
141
142        #[inline]
143        fn u64_uextend_imm64(&mut self, ty: Type, x: Imm64) -> u64 {
144            (x.bits() as u64) & self.ty_mask(ty)
145        }
146
147        #[inline]
148        fn imm64_icmp(&mut self, ty: Type, cc: &IntCC, x: Imm64, y: Imm64) -> Imm64 {
149            let ux = self.u64_uextend_imm64(ty, x);
150            let uy = self.u64_uextend_imm64(ty, y);
151            let sx = self.i64_sextend_imm64(ty, x);
152            let sy = self.i64_sextend_imm64(ty, y);
153            let result = match cc {
154                IntCC::Equal => ux == uy,
155                IntCC::NotEqual => ux != uy,
156                IntCC::UnsignedGreaterThanOrEqual => ux >= uy,
157                IntCC::UnsignedGreaterThan => ux > uy,
158                IntCC::UnsignedLessThanOrEqual => ux <= uy,
159                IntCC::UnsignedLessThan => ux < uy,
160                IntCC::SignedGreaterThanOrEqual => sx >= sy,
161                IntCC::SignedGreaterThan => sx > sy,
162                IntCC::SignedLessThanOrEqual => sx <= sy,
163                IntCC::SignedLessThan => sx < sy,
164            };
165            Imm64::new(result.into())
166        }
167
168        #[inline]
169        fn ty_bits(&mut self, ty: Type) -> u8 {
170            use core::convert::TryInto;
171            ty.bits().try_into().unwrap()
172        }
173
174        #[inline]
175        fn ty_bits_u16(&mut self, ty: Type) -> u16 {
176            ty.bits() as u16
177        }
178
179        #[inline]
180        fn ty_bits_u64(&mut self, ty: Type) -> u64 {
181            ty.bits() as u64
182        }
183
184        #[inline]
185        fn ty_bytes(&mut self, ty: Type) -> u16 {
186            u16::try_from(ty.bytes()).unwrap()
187        }
188
189        #[inline]
190        fn ty_mask(&mut self, ty: Type) -> u64 {
191            let ty_bits = ty.bits();
192            debug_assert_ne!(ty_bits, 0);
193            let shift = 64_u64
194                .checked_sub(ty_bits.into())
195                .expect("unimplemented for > 64 bits");
196            u64::MAX >> shift
197        }
198
199        #[inline]
200        fn ty_lane_mask(&mut self, ty: Type) -> u64 {
201            let ty_lane_count = ty.lane_count();
202            debug_assert_ne!(ty_lane_count, 0);
203            let shift = 64_u64
204                .checked_sub(ty_lane_count.into())
205                .expect("unimplemented for > 64 bits");
206            u64::MAX >> shift
207        }
208
209        #[inline]
210        fn ty_lane_count(&mut self, ty: Type) -> u64 {
211            ty.lane_count() as u64
212        }
213
214        #[inline]
215        fn ty_umin(&mut self, _ty: Type) -> u64 {
216            0
217        }
218
219        #[inline]
220        fn ty_umax(&mut self, ty: Type) -> u64 {
221            self.ty_mask(ty)
222        }
223
224        #[inline]
225        fn ty_smin(&mut self, ty: Type) -> u64 {
226            let ty_bits = ty.bits();
227            debug_assert_ne!(ty_bits, 0);
228            let shift = 64_u64
229                .checked_sub(ty_bits.into())
230                .expect("unimplemented for > 64 bits");
231            (i64::MIN as u64) >> shift
232        }
233
234        #[inline]
235        fn ty_smax(&mut self, ty: Type) -> u64 {
236            let ty_bits = ty.bits();
237            debug_assert_ne!(ty_bits, 0);
238            let shift = 64_u64
239                .checked_sub(ty_bits.into())
240                .expect("unimplemented for > 64 bits");
241            (i64::MAX as u64) >> shift
242        }
243
244        fn fits_in_16(&mut self, ty: Type) -> Option<Type> {
245            if ty.bits() <= 16 && !ty.is_dynamic_vector() {
246                Some(ty)
247            } else {
248                None
249            }
250        }
251
252        #[inline]
253        fn fits_in_32(&mut self, ty: Type) -> Option<Type> {
254            if ty.bits() <= 32 && !ty.is_dynamic_vector() {
255                Some(ty)
256            } else {
257                None
258            }
259        }
260
261        #[inline]
262        fn lane_fits_in_32(&mut self, ty: Type) -> Option<Type> {
263            if !ty.is_vector() && !ty.is_dynamic_vector() {
264                None
265            } else if ty.lane_type().bits() <= 32 {
266                Some(ty)
267            } else {
268                None
269            }
270        }
271
272        #[inline]
273        fn fits_in_64(&mut self, ty: Type) -> Option<Type> {
274            if ty.bits() <= 64 && !ty.is_dynamic_vector() {
275                Some(ty)
276            } else {
277                None
278            }
279        }
280
281        #[inline]
282        fn ty_int_ref_scalar_64(&mut self, ty: Type) -> Option<Type> {
283            if ty.bits() <= 64 && !ty.is_float() && !ty.is_vector() && !ty.is_dynamic_vector() {
284                Some(ty)
285            } else {
286                None
287            }
288        }
289
290        #[inline]
291        fn ty_int_ref_scalar_64_extract(&mut self, ty: Type) -> Option<Type> {
292            self.ty_int_ref_scalar_64(ty)
293        }
294
295        #[inline]
296        fn ty_16(&mut self, ty: Type) -> Option<Type> {
297            if ty.bits() == 16 { Some(ty) } else { None }
298        }
299
300        #[inline]
301        fn ty_32(&mut self, ty: Type) -> Option<Type> {
302            if ty.bits() == 32 { Some(ty) } else { None }
303        }
304
305        #[inline]
306        fn ty_64(&mut self, ty: Type) -> Option<Type> {
307            if ty.bits() == 64 { Some(ty) } else { None }
308        }
309
310        #[inline]
311        fn ty_128(&mut self, ty: Type) -> Option<Type> {
312            if ty.bits() == 128 { Some(ty) } else { None }
313        }
314
315        #[inline]
316        fn ty_32_or_64(&mut self, ty: Type) -> Option<Type> {
317            if ty.bits() == 32 || ty.bits() == 64 {
318                Some(ty)
319            } else {
320                None
321            }
322        }
323
324        #[inline]
325        fn ty_8_or_16(&mut self, ty: Type) -> Option<Type> {
326            if ty.bits() == 8 || ty.bits() == 16 {
327                Some(ty)
328            } else {
329                None
330            }
331        }
332
333        #[inline]
334        fn ty_16_or_32(&mut self, ty: Type) -> Option<Type> {
335            if ty.bits() == 16 || ty.bits() == 32 {
336                Some(ty)
337            } else {
338                None
339            }
340        }
341
342        #[inline]
343        fn int_fits_in_32(&mut self, ty: Type) -> Option<Type> {
344            match ty {
345                I8 | I16 | I32 => Some(ty),
346                _ => None,
347            }
348        }
349
350        #[inline]
351        fn ty_int_ref_64(&mut self, ty: Type) -> Option<Type> {
352            match ty {
353                I64 => Some(ty),
354                _ => None,
355            }
356        }
357
358        #[inline]
359        fn ty_int_ref_16_to_64(&mut self, ty: Type) -> Option<Type> {
360            match ty {
361                I16 | I32 | I64 => Some(ty),
362                _ => None,
363            }
364        }
365
366        #[inline]
367        fn ty_int(&mut self, ty: Type) -> Option<Type> {
368            ty.is_int().then(|| ty)
369        }
370
371        #[inline]
372        fn ty_scalar(&mut self, ty: Type) -> Option<Type> {
373            if ty.lane_count() == 1 { Some(ty) } else { None }
374        }
375
376        #[inline]
377        fn ty_scalar_float(&mut self, ty: Type) -> Option<Type> {
378            if ty.is_float() { Some(ty) } else { None }
379        }
380
381        #[inline]
382        fn ty_float_or_vec(&mut self, ty: Type) -> Option<Type> {
383            if ty.is_float() || ty.is_vector() {
384                Some(ty)
385            } else {
386                None
387            }
388        }
389
390        fn ty_vector_float(&mut self, ty: Type) -> Option<Type> {
391            if ty.is_vector() && ty.lane_type().is_float() {
392                Some(ty)
393            } else {
394                None
395            }
396        }
397
398        #[inline]
399        fn ty_vector_not_float(&mut self, ty: Type) -> Option<Type> {
400            if ty.is_vector() && !ty.lane_type().is_float() {
401                Some(ty)
402            } else {
403                None
404            }
405        }
406
407        #[inline]
408        fn ty_vec64_ctor(&mut self, ty: Type) -> Option<Type> {
409            if ty.is_vector() && ty.bits() == 64 {
410                Some(ty)
411            } else {
412                None
413            }
414        }
415
416        #[inline]
417        fn ty_vec64(&mut self, ty: Type) -> Option<Type> {
418            if ty.is_vector() && ty.bits() == 64 {
419                Some(ty)
420            } else {
421                None
422            }
423        }
424
425        #[inline]
426        fn ty_vec128(&mut self, ty: Type) -> Option<Type> {
427            if ty.is_vector() && ty.bits() == 128 {
428                Some(ty)
429            } else {
430                None
431            }
432        }
433
434        #[inline]
435        fn ty_dyn_vec64(&mut self, ty: Type) -> Option<Type> {
436            if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 64 {
437                Some(ty)
438            } else {
439                None
440            }
441        }
442
443        #[inline]
444        fn ty_dyn_vec128(&mut self, ty: Type) -> Option<Type> {
445            if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 128 {
446                Some(ty)
447            } else {
448                None
449            }
450        }
451
452        #[inline]
453        fn ty_vec64_int(&mut self, ty: Type) -> Option<Type> {
454            if ty.is_vector() && ty.bits() == 64 && ty.lane_type().is_int() {
455                Some(ty)
456            } else {
457                None
458            }
459        }
460
461        #[inline]
462        fn ty_vec128_int(&mut self, ty: Type) -> Option<Type> {
463            if ty.is_vector() && ty.bits() == 128 && ty.lane_type().is_int() {
464                Some(ty)
465            } else {
466                None
467            }
468        }
469
470        #[inline]
471        fn ty_addr64(&mut self, ty: Type) -> Option<Type> {
472            match ty {
473                I64 => Some(ty),
474                _ => None,
475            }
476        }
477
478        #[inline]
479        fn u64_from_imm64(&mut self, imm: Imm64) -> u64 {
480            imm.bits() as u64
481        }
482
483        #[inline]
484        fn imm64_power_of_two(&mut self, x: Imm64) -> Option<u64> {
485            let x = i64::from(x);
486            let x = u64::try_from(x).ok()?;
487            if x.is_power_of_two() {
488                Some(x.trailing_zeros().into())
489            } else {
490                None
491            }
492        }
493
494        #[inline]
495        fn u64_from_bool(&mut self, b: bool) -> u64 {
496            if b { u64::MAX } else { 0 }
497        }
498
499        #[inline]
500        fn multi_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
501            if ty.lane_count() > 1 {
502                Some((ty.lane_bits(), ty.lane_count()))
503            } else {
504                None
505            }
506        }
507
508        #[inline]
509        fn dynamic_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
510            if ty.is_dynamic_vector() {
511                Some((ty.lane_bits(), ty.min_lane_count()))
512            } else {
513                None
514            }
515        }
516
517        #[inline]
518        fn ty_dyn64_int(&mut self, ty: Type) -> Option<Type> {
519            if ty.is_dynamic_vector() && ty.min_bits() == 64 && ty.lane_type().is_int() {
520                Some(ty)
521            } else {
522                None
523            }
524        }
525
526        #[inline]
527        fn ty_dyn128_int(&mut self, ty: Type) -> Option<Type> {
528            if ty.is_dynamic_vector() && ty.min_bits() == 128 && ty.lane_type().is_int() {
529                Some(ty)
530            } else {
531                None
532            }
533        }
534
535        fn u16_from_ieee16(&mut self, val: Ieee16) -> u16 {
536            val.bits()
537        }
538
539        fn u32_from_ieee32(&mut self, val: Ieee32) -> u32 {
540            val.bits()
541        }
542
543        fn u64_from_ieee64(&mut self, val: Ieee64) -> u64 {
544            val.bits()
545        }
546
547        fn u8_from_uimm8(&mut self, val: Uimm8) -> u8 {
548            val
549        }
550
551        fn not_vec32x2(&mut self, ty: Type) -> Option<Type> {
552            if ty.lane_bits() == 32 && ty.lane_count() == 2 {
553                None
554            } else {
555                Some(ty)
556            }
557        }
558
559        fn not_i64x2(&mut self, ty: Type) -> Option<()> {
560            if ty == I64X2 { None } else { Some(()) }
561        }
562
563        fn trap_code_division_by_zero(&mut self) -> TrapCode {
564            TrapCode::INTEGER_DIVISION_BY_ZERO
565        }
566
567        fn trap_code_integer_overflow(&mut self) -> TrapCode {
568            TrapCode::INTEGER_OVERFLOW
569        }
570
571        fn trap_code_bad_conversion_to_integer(&mut self) -> TrapCode {
572            TrapCode::BAD_CONVERSION_TO_INTEGER
573        }
574
575        fn nonzero_u64_from_imm64(&mut self, val: Imm64) -> Option<u64> {
576            match val.bits() {
577                0 => None,
578                n => Some(n as u64),
579            }
580        }
581
582        #[inline]
583        fn u32_nonnegative(&mut self, x: u32) -> Option<u32> {
584            if (x as i32) >= 0 { Some(x) } else { None }
585        }
586
587        #[inline]
588        fn imm64(&mut self, x: u64) -> Imm64 {
589            Imm64::new(x as i64)
590        }
591
592        #[inline]
593        fn imm64_masked(&mut self, ty: Type, x: u64) -> Imm64 {
594            Imm64::new((x & self.ty_mask(ty)) as i64)
595        }
596
597        #[inline]
598        fn offset32(&mut self, x: Offset32) -> i32 {
599            x.into()
600        }
601
602        #[inline]
603        fn lane_type(&mut self, ty: Type) -> Type {
604            ty.lane_type()
605        }
606
607        #[inline]
608        fn ty_half_lanes(&mut self, ty: Type) -> Option<Type> {
609            if ty.lane_count() == 1 {
610                None
611            } else {
612                ty.lane_type().by(ty.lane_count() / 2)
613            }
614        }
615
616        #[inline]
617        fn ty_half_width(&mut self, ty: Type) -> Option<Type> {
618            ty.half_width()
619        }
620
621        #[inline]
622        fn ty_equal(&mut self, lhs: Type, rhs: Type) -> bool {
623            lhs == rhs
624        }
625
626        #[inline]
627        fn offset32_to_i32(&mut self, offset: Offset32) -> i32 {
628            offset.into()
629        }
630
631        #[inline]
632        fn i32_to_offset32(&mut self, offset: i32) -> Offset32 {
633            Offset32::new(offset)
634        }
635
636        #[inline]
637        fn mem_flags_trusted(&mut self) -> MemFlags {
638            MemFlags::trusted()
639        }
640
641        #[inline]
642        fn little_or_native_endian(&mut self, flags: MemFlags) -> Option<MemFlags> {
643            match flags.explicit_endianness() {
644                Some(crate::ir::Endianness::Little) | None => Some(flags),
645                Some(crate::ir::Endianness::Big) => None,
646            }
647        }
648
649        #[inline]
650        fn intcc_unsigned(&mut self, x: &IntCC) -> IntCC {
651            x.unsigned()
652        }
653
654        #[inline]
655        fn signed_cond_code(&mut self, cc: &IntCC) -> Option<IntCC> {
656            match cc {
657                IntCC::Equal
658                | IntCC::UnsignedGreaterThanOrEqual
659                | IntCC::UnsignedGreaterThan
660                | IntCC::UnsignedLessThanOrEqual
661                | IntCC::UnsignedLessThan
662                | IntCC::NotEqual => None,
663                IntCC::SignedGreaterThanOrEqual
664                | IntCC::SignedGreaterThan
665                | IntCC::SignedLessThanOrEqual
666                | IntCC::SignedLessThan => Some(*cc),
667            }
668        }
669
670        #[inline]
671        fn intcc_swap_args(&mut self, cc: &IntCC) -> IntCC {
672            cc.swap_args()
673        }
674
675        #[inline]
676        fn intcc_complement(&mut self, cc: &IntCC) -> IntCC {
677            cc.complement()
678        }
679
680        #[inline]
681        fn intcc_without_eq(&mut self, x: &IntCC) -> IntCC {
682            x.without_equal()
683        }
684
685        #[inline]
686        fn floatcc_swap_args(&mut self, cc: &FloatCC) -> FloatCC {
687            cc.swap_args()
688        }
689
690        #[inline]
691        fn floatcc_complement(&mut self, cc: &FloatCC) -> FloatCC {
692            cc.complement()
693        }
694
695        fn floatcc_unordered(&mut self, cc: &FloatCC) -> bool {
696            match *cc {
697                FloatCC::Unordered
698                | FloatCC::UnorderedOrEqual
699                | FloatCC::UnorderedOrLessThan
700                | FloatCC::UnorderedOrLessThanOrEqual
701                | FloatCC::UnorderedOrGreaterThan
702                | FloatCC::UnorderedOrGreaterThanOrEqual => true,
703                _ => false,
704            }
705        }
706
707        #[inline]
708        fn unpack_value_array_2(&mut self, arr: &ValueArray2) -> (Value, Value) {
709            let [a, b] = *arr;
710            (a, b)
711        }
712
713        #[inline]
714        fn pack_value_array_2(&mut self, a: Value, b: Value) -> ValueArray2 {
715            [a, b]
716        }
717
718        #[inline]
719        fn unpack_value_array_3(&mut self, arr: &ValueArray3) -> (Value, Value, Value) {
720            let [a, b, c] = *arr;
721            (a, b, c)
722        }
723
724        #[inline]
725        fn pack_value_array_3(&mut self, a: Value, b: Value, c: Value) -> ValueArray3 {
726            [a, b, c]
727        }
728
729        #[inline]
730        fn unpack_block_array_2(&mut self, arr: &BlockArray2) -> (BlockCall, BlockCall) {
731            let [a, b] = *arr;
732            (a, b)
733        }
734
735        #[inline]
736        fn pack_block_array_2(&mut self, a: BlockCall, b: BlockCall) -> BlockArray2 {
737            [a, b]
738        }
739
740        fn u128_replicated_u64(&mut self, val: u128) -> Option<u64> {
741            let low64 = val as u64 as u128;
742            if (low64 | (low64 << 64)) == val {
743                Some(low64 as u64)
744            } else {
745                None
746            }
747        }
748
749        fn u64_replicated_u32(&mut self, val: u64) -> Option<u64> {
750            let low32 = val as u32 as u64;
751            if (low32 | (low32 << 32)) == val {
752                Some(low32)
753            } else {
754                None
755            }
756        }
757
758        fn u32_replicated_u16(&mut self, val: u64) -> Option<u64> {
759            let val = val as u32;
760            let low16 = val as u16 as u32;
761            if (low16 | (low16 << 16)) == val {
762                Some(low16.into())
763            } else {
764                None
765            }
766        }
767
768        fn u16_replicated_u8(&mut self, val: u64) -> Option<u8> {
769            let val = val as u16;
770            let low8 = val as u8 as u16;
771            if (low8 | (low8 << 8)) == val {
772                Some(low8 as u8)
773            } else {
774                None
775            }
776        }
777
778        fn u128_low_bits(&mut self, val: u128) -> u64 {
779            val as u64
780        }
781
782        fn u128_high_bits(&mut self, val: u128) -> u64 {
783            (val >> 64) as u64
784        }
785
786        fn f16_min(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {
787            a.minimum(b).non_nan()
788        }
789
790        fn f16_max(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {
791            a.maximum(b).non_nan()
792        }
793
794        fn f16_neg(&mut self, n: Ieee16) -> Ieee16 {
795            -n
796        }
797
798        fn f16_abs(&mut self, n: Ieee16) -> Ieee16 {
799            n.abs()
800        }
801
802        fn f16_copysign(&mut self, a: Ieee16, b: Ieee16) -> Ieee16 {
803            a.copysign(b)
804        }
805
806        fn f32_add(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
807            (lhs + rhs).non_nan()
808        }
809
810        fn f32_sub(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
811            (lhs - rhs).non_nan()
812        }
813
814        fn f32_mul(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
815            (lhs * rhs).non_nan()
816        }
817
818        fn f32_div(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
819            (lhs / rhs).non_nan()
820        }
821
822        fn f32_sqrt(&mut self, n: Ieee32) -> Option<Ieee32> {
823            n.sqrt().non_nan()
824        }
825
826        fn f32_ceil(&mut self, n: Ieee32) -> Option<Ieee32> {
827            n.ceil().non_nan()
828        }
829
830        fn f32_floor(&mut self, n: Ieee32) -> Option<Ieee32> {
831            n.floor().non_nan()
832        }
833
834        fn f32_trunc(&mut self, n: Ieee32) -> Option<Ieee32> {
835            n.trunc().non_nan()
836        }
837
838        fn f32_nearest(&mut self, n: Ieee32) -> Option<Ieee32> {
839            n.round_ties_even().non_nan()
840        }
841
842        fn f32_min(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {
843            a.minimum(b).non_nan()
844        }
845
846        fn f32_max(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {
847            a.maximum(b).non_nan()
848        }
849
850        fn f32_neg(&mut self, n: Ieee32) -> Ieee32 {
851            -n
852        }
853
854        fn f32_abs(&mut self, n: Ieee32) -> Ieee32 {
855            n.abs()
856        }
857
858        fn f32_copysign(&mut self, a: Ieee32, b: Ieee32) -> Ieee32 {
859            a.copysign(b)
860        }
861
862        fn f64_add(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
863            (lhs + rhs).non_nan()
864        }
865
866        fn f64_sub(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
867            (lhs - rhs).non_nan()
868        }
869
870        fn f64_mul(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
871            (lhs * rhs).non_nan()
872        }
873
874        fn f64_div(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
875            (lhs / rhs).non_nan()
876        }
877
878        fn f64_sqrt(&mut self, n: Ieee64) -> Option<Ieee64> {
879            n.sqrt().non_nan()
880        }
881
882        fn f64_ceil(&mut self, n: Ieee64) -> Option<Ieee64> {
883            n.ceil().non_nan()
884        }
885
886        fn f64_floor(&mut self, n: Ieee64) -> Option<Ieee64> {
887            n.floor().non_nan()
888        }
889
890        fn f64_trunc(&mut self, n: Ieee64) -> Option<Ieee64> {
891            n.trunc().non_nan()
892        }
893
894        fn f64_nearest(&mut self, n: Ieee64) -> Option<Ieee64> {
895            n.round_ties_even().non_nan()
896        }
897
898        fn f64_min(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {
899            a.minimum(b).non_nan()
900        }
901
902        fn f64_max(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {
903            a.maximum(b).non_nan()
904        }
905
906        fn f64_neg(&mut self, n: Ieee64) -> Ieee64 {
907            -n
908        }
909
910        fn f64_abs(&mut self, n: Ieee64) -> Ieee64 {
911            n.abs()
912        }
913
914        fn f64_copysign(&mut self, a: Ieee64, b: Ieee64) -> Ieee64 {
915            a.copysign(b)
916        }
917
918        fn f128_min(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {
919            a.minimum(b).non_nan()
920        }
921
922        fn f128_max(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {
923            a.maximum(b).non_nan()
924        }
925
926        fn f128_neg(&mut self, n: Ieee128) -> Ieee128 {
927            -n
928        }
929
930        fn f128_abs(&mut self, n: Ieee128) -> Ieee128 {
931            n.abs()
932        }
933
934        fn f128_copysign(&mut self, a: Ieee128, b: Ieee128) -> Ieee128 {
935            a.copysign(b)
936        }
937
938        #[inline]
939        fn def_inst(&mut self, val: Value) -> Option<Inst> {
940            self.dfg().value_def(val).inst()
941        }
942    };
943}