wit_bindgen_core/abi.rs
1use std::fmt;
2use std::iter;
3
4use wit_parser::Param;
5pub use wit_parser::abi::{AbiVariant, FlatTypes, WasmSignature, WasmType};
6use wit_parser::{
7 Alignment, ArchitectureSize, ElementInfo, Enum, Flags, FlagsRepr, Function, Handle, Int,
8 Record, Resolve, Result_, SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant, align_to_arch,
9};
10
11// Helper macro for defining instructions without having to have tons of
12// exhaustive `match` statements to update
13macro_rules! def_instruction {
14 (
15 $( #[$enum_attr:meta] )*
16 pub enum $name:ident<'a> {
17 $(
18 $( #[$attr:meta] )*
19 $variant:ident $( {
20 $($field:ident : $field_ty:ty $(,)* )*
21 } )?
22 :
23 [$num_popped:expr] => [$num_pushed:expr],
24 )*
25 }
26 ) => {
27 $( #[$enum_attr] )*
28 pub enum $name<'a> {
29 $(
30 $( #[$attr] )*
31 $variant $( {
32 $(
33 $field : $field_ty,
34 )*
35 } )? ,
36 )*
37 }
38
39 impl $name<'_> {
40 /// How many operands does this instruction pop from the stack?
41 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
42 pub fn operands_len(&self) -> usize {
43 match self {
44 $(
45 Self::$variant $( {
46 $(
47 $field,
48 )*
49 } )? => $num_popped,
50 )*
51 }
52 }
53
54 /// How many results does this instruction push onto the stack?
55 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
56 pub fn results_len(&self) -> usize {
57 match self {
58 $(
59 Self::$variant $( {
60 $(
61 $field,
62 )*
63 } )? => $num_pushed,
64 )*
65 }
66 }
67 }
68 };
69}
70
71def_instruction! {
72 #[derive(Debug)]
73 pub enum Instruction<'a> {
74 /// Acquires the specified parameter and places it on the stack.
75 /// Depending on the context this may refer to wasm parameters or
76 /// interface types parameters.
77 GetArg { nth: usize } : [0] => [1],
78
79 // Integer const/manipulation instructions
80
81 /// Pushes the constant `val` onto the stack.
82 I32Const { val: i32 } : [0] => [1],
83 /// Casts the top N items on the stack using the `Bitcast` enum
84 /// provided. Consumes the same number of operands that this produces.
85 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
86 /// Pushes a number of constant zeros for each wasm type on the stack.
87 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
88
89 // Memory load/store instructions
90
91 /// Pops a pointer from the stack and loads a little-endian `i32` from
92 /// it, using the specified constant offset.
93 I32Load { offset: ArchitectureSize } : [1] => [1],
94 /// Pops a pointer from the stack and loads a little-endian `i8` from
95 /// it, using the specified constant offset. The value loaded is the
96 /// zero-extended to 32-bits
97 I32Load8U { offset: ArchitectureSize } : [1] => [1],
98 /// Pops a pointer from the stack and loads a little-endian `i8` from
99 /// it, using the specified constant offset. The value loaded is the
100 /// sign-extended to 32-bits
101 I32Load8S { offset: ArchitectureSize } : [1] => [1],
102 /// Pops a pointer from the stack and loads a little-endian `i16` from
103 /// it, using the specified constant offset. The value loaded is the
104 /// zero-extended to 32-bits
105 I32Load16U { offset: ArchitectureSize } : [1] => [1],
106 /// Pops a pointer from the stack and loads a little-endian `i16` from
107 /// it, using the specified constant offset. The value loaded is the
108 /// sign-extended to 32-bits
109 I32Load16S { offset: ArchitectureSize } : [1] => [1],
110 /// Pops a pointer from the stack and loads a little-endian `i64` from
111 /// it, using the specified constant offset.
112 I64Load { offset: ArchitectureSize } : [1] => [1],
113 /// Pops a pointer from the stack and loads a little-endian `f32` from
114 /// it, using the specified constant offset.
115 F32Load { offset: ArchitectureSize } : [1] => [1],
116 /// Pops a pointer from the stack and loads a little-endian `f64` from
117 /// it, using the specified constant offset.
118 F64Load { offset: ArchitectureSize } : [1] => [1],
119
120 /// Like `I32Load` or `I64Load`, but for loading pointer values.
121 PointerLoad { offset: ArchitectureSize } : [1] => [1],
122 /// Like `I32Load` or `I64Load`, but for loading array length values.
123 LengthLoad { offset: ArchitectureSize } : [1] => [1],
124
125 /// Pops a pointer from the stack and then an `i32` value.
126 /// Stores the value in little-endian at the pointer specified plus the
127 /// constant `offset`.
128 I32Store { offset: ArchitectureSize } : [2] => [0],
129 /// Pops a pointer from the stack and then an `i32` value.
130 /// Stores the low 8 bits of the value in little-endian at the pointer
131 /// specified plus the constant `offset`.
132 I32Store8 { offset: ArchitectureSize } : [2] => [0],
133 /// Pops a pointer from the stack and then an `i32` value.
134 /// Stores the low 16 bits of the value in little-endian at the pointer
135 /// specified plus the constant `offset`.
136 I32Store16 { offset: ArchitectureSize } : [2] => [0],
137 /// Pops a pointer from the stack and then an `i64` value.
138 /// Stores the value in little-endian at the pointer specified plus the
139 /// constant `offset`.
140 I64Store { offset: ArchitectureSize } : [2] => [0],
141 /// Pops a pointer from the stack and then an `f32` value.
142 /// Stores the value in little-endian at the pointer specified plus the
143 /// constant `offset`.
144 F32Store { offset: ArchitectureSize } : [2] => [0],
145 /// Pops a pointer from the stack and then an `f64` value.
146 /// Stores the value in little-endian at the pointer specified plus the
147 /// constant `offset`.
148 F64Store { offset: ArchitectureSize } : [2] => [0],
149
150 /// Like `I32Store` or `I64Store`, but for storing pointer values.
151 PointerStore { offset: ArchitectureSize } : [2] => [0],
152 /// Like `I32Store` or `I64Store`, but for storing array length values.
153 LengthStore { offset: ArchitectureSize } : [2] => [0],
154
155 // Scalar lifting/lowering
156
157 /// Converts an interface type `char` value to a 32-bit integer
158 /// representing the unicode scalar value.
159 I32FromChar : [1] => [1],
160 /// Converts an interface type `u64` value to a wasm `i64`.
161 I64FromU64 : [1] => [1],
162 /// Converts an interface type `s64` value to a wasm `i64`.
163 I64FromS64 : [1] => [1],
164 /// Converts an interface type `u32` value to a wasm `i32`.
165 I32FromU32 : [1] => [1],
166 /// Converts an interface type `s32` value to a wasm `i32`.
167 I32FromS32 : [1] => [1],
168 /// Converts an interface type `u16` value to a wasm `i32`.
169 I32FromU16 : [1] => [1],
170 /// Converts an interface type `s16` value to a wasm `i32`.
171 I32FromS16 : [1] => [1],
172 /// Converts an interface type `u8` value to a wasm `i32`.
173 I32FromU8 : [1] => [1],
174 /// Converts an interface type `s8` value to a wasm `i32`.
175 I32FromS8 : [1] => [1],
176 /// Conversion an interface type `f32` value to a wasm `f32`.
177 ///
178 /// This may be a noop for some implementations, but it's here in case the
179 /// native language representation of `f32` is different than the wasm
180 /// representation of `f32`.
181 CoreF32FromF32 : [1] => [1],
182 /// Conversion an interface type `f64` value to a wasm `f64`.
183 ///
184 /// This may be a noop for some implementations, but it's here in case the
185 /// native language representation of `f64` is different than the wasm
186 /// representation of `f64`.
187 CoreF64FromF64 : [1] => [1],
188
189 /// Converts a native wasm `i32` to an interface type `s8`.
190 ///
191 /// This will truncate the upper bits of the `i32`.
192 S8FromI32 : [1] => [1],
193 /// Converts a native wasm `i32` to an interface type `u8`.
194 ///
195 /// This will truncate the upper bits of the `i32`.
196 U8FromI32 : [1] => [1],
197 /// Converts a native wasm `i32` to an interface type `s16`.
198 ///
199 /// This will truncate the upper bits of the `i32`.
200 S16FromI32 : [1] => [1],
201 /// Converts a native wasm `i32` to an interface type `u16`.
202 ///
203 /// This will truncate the upper bits of the `i32`.
204 U16FromI32 : [1] => [1],
205 /// Converts a native wasm `i32` to an interface type `s32`.
206 S32FromI32 : [1] => [1],
207 /// Converts a native wasm `i32` to an interface type `u32`.
208 U32FromI32 : [1] => [1],
209 /// Converts a native wasm `i64` to an interface type `s64`.
210 S64FromI64 : [1] => [1],
211 /// Converts a native wasm `i64` to an interface type `u64`.
212 U64FromI64 : [1] => [1],
213 /// Converts a native wasm `i32` to an interface type `char`.
214 ///
215 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
216 CharFromI32 : [1] => [1],
217 /// Converts a native wasm `f32` to an interface type `f32`.
218 F32FromCoreF32 : [1] => [1],
219 /// Converts a native wasm `f64` to an interface type `f64`.
220 F64FromCoreF64 : [1] => [1],
221
222 /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
223 /// zero or one.
224 BoolFromI32 : [1] => [1],
225 /// Creates an `i32` from a `bool` input, must return 0 or 1.
226 I32FromBool : [1] => [1],
227
228 // lists
229
230 /// Lowers a list where the element's layout in the native language is
231 /// expected to match the canonical ABI definition of interface types.
232 ///
233 /// Pops a list value from the stack and pushes the pointer/length onto
234 /// the stack. If `realloc` is set to `Some` then this is expected to
235 /// *consume* the list which means that the data needs to be copied. An
236 /// allocation/copy is expected when:
237 ///
238 /// * A host is calling a wasm export with a list (it needs to copy the
239 /// list in to the callee's module, allocating space with `realloc`)
240 /// * A wasm export is returning a list (it's expected to use `realloc`
241 /// to give ownership of the list to the caller.
242 /// * A host is returning a list in a import definition, meaning that
243 /// space needs to be allocated in the caller with `realloc`).
244 ///
245 /// A copy does not happen (e.g. `realloc` is `None`) when:
246 ///
247 /// * A wasm module calls an import with the list. In this situation
248 /// it's expected the caller will know how to access this module's
249 /// memory (e.g. the host has raw access or wasm-to-wasm communication
250 /// would copy the list).
251 ///
252 /// If `realloc` is `Some` then the adapter is not responsible for
253 /// cleaning up this list because the other end is receiving the
254 /// allocation. If `realloc` is `None` then the adapter is responsible
255 /// for cleaning up any temporary allocation it created, if any.
256 ListCanonLower {
257 element: &'a Type,
258 realloc: Option<&'a str>,
259 } : [1] => [2],
260
261 /// Same as `ListCanonLower`, but used for strings
262 StringLower {
263 realloc: Option<&'a str>,
264 } : [1] => [2],
265
266 /// Lowers a list where the element's layout in the native language is
267 /// not expected to match the canonical ABI definition of interface
268 /// types.
269 ///
270 /// Pops a list value from the stack and pushes the pointer/length onto
271 /// the stack. This operation also pops a block from the block stack
272 /// which is used as the iteration body of writing each element of the
273 /// list consumed.
274 ///
275 /// The `realloc` field here behaves the same way as `ListCanonLower`.
276 /// It's only set to `None` when a wasm module calls a declared import.
277 /// Otherwise lowering in other contexts requires allocating memory for
278 /// the receiver to own.
279 ListLower {
280 element: &'a Type,
281 realloc: Option<&'a str>,
282 } : [1] => [2],
283
284 /// Lifts a list which has a canonical representation into an interface
285 /// types value.
286 ///
287 /// The term "canonical" representation here means that the
288 /// representation of the interface types value in the native language
289 /// exactly matches the canonical ABI definition of the type.
290 ///
291 /// This will consume two `i32` values from the stack, a pointer and a
292 /// length, and then produces an interface value list.
293 ListCanonLift {
294 element: &'a Type,
295 ty: TypeId,
296 } : [2] => [1],
297
298 /// Same as `ListCanonLift`, but used for strings
299 StringLift : [2] => [1],
300
301 /// Lifts a list which into an interface types value.
302 ///
303 /// This will consume two `i32` values from the stack, a pointer and a
304 /// length, and then produces an interface value list.
305 ///
306 /// This will also pop a block from the block stack which is how to
307 /// read each individual element from the list.
308 ListLift {
309 element: &'a Type,
310 ty: TypeId,
311 } : [2] => [1],
312
313 /// Lowers a map into a canonical pointer/length pair.
314 ///
315 /// This operation pops a map value from the stack and pushes pointer
316 /// and length. A block is popped from the block stack to lower one
317 /// key/value entry into linear memory.
318 MapLower {
319 key: &'a Type,
320 value: &'a Type,
321 realloc: Option<&'a str>,
322 } : [1] => [2],
323
324 /// Lifts a canonical pointer/length pair into a map.
325 ///
326 /// This operation consumes pointer and length from the stack. A block
327 /// is popped from the block stack and must produce key/value for one
328 /// map entry.
329 MapLift {
330 key: &'a Type,
331 value: &'a Type,
332 ty: TypeId,
333 } : [2] => [1],
334
335 /// Pops all fields for a fixed list off the stack and then composes them
336 /// into an array.
337 FixedLengthListLift {
338 element: &'a Type,
339 size: u32,
340 id: TypeId,
341 } : [*size as usize] => [1],
342
343 /// Pops an array off the stack, decomposes the elements and then pushes them onto the stack.
344 FixedLengthListLower {
345 element: &'a Type,
346 size: u32,
347 id: TypeId,
348 } : [1] => [*size as usize],
349
350 /// Pops an array and an address off the stack, passes each element to a block storing it
351 FixedLengthListLowerToMemory {
352 element: &'a Type,
353 size: u32,
354 id: TypeId,
355 } : [2] => [0],
356
357 /// Pops base address, pushes an array
358 ///
359 /// This will also pop a block from the block stack which is how to
360 /// read each individual element from the list.
361 FixedLengthListLiftFromMemory {
362 element: &'a Type,
363 size: u32,
364 id: TypeId,
365 } : [1] => [1],
366
367
368 /// Pushes an operand onto the stack representing the list item from
369 /// each iteration of the list.
370 ///
371 /// This is only used inside of blocks related to lowering lists.
372 IterElem { element: &'a Type } : [0] => [1],
373
374 /// Pushes an operand onto the stack representing the current map key
375 /// for each map iteration.
376 IterMapKey { key: &'a Type } : [0] => [1],
377
378 /// Pushes an operand onto the stack representing the current map value
379 /// for each map iteration.
380 IterMapValue { value: &'a Type } : [0] => [1],
381
382 /// Pushes an operand onto the stack representing the base pointer of
383 /// the next element in a list.
384 ///
385 /// This is used for both lifting and lowering lists.
386 IterBasePointer : [0] => [1],
387
388 // records and tuples
389
390 /// Pops a record value off the stack, decomposes the record to all of
391 /// its fields, and then pushes the fields onto the stack.
392 RecordLower {
393 record: &'a Record,
394 name: &'a str,
395 ty: TypeId,
396 } : [1] => [record.fields.len()],
397
398 /// Pops all fields for a record off the stack and then composes them
399 /// into a record.
400 RecordLift {
401 record: &'a Record,
402 name: &'a str,
403 ty: TypeId,
404 } : [record.fields.len()] => [1],
405
406 /// Create an `i32` from a handle.
407 HandleLower {
408 handle: &'a Handle,
409 name: &'a str,
410 ty: TypeId,
411 } : [1] => [1],
412
413 /// Create a handle from an `i32`.
414 HandleLift {
415 handle: &'a Handle,
416 name: &'a str,
417 ty: TypeId,
418 } : [1] => [1],
419
420 /// Create an `i32` from a future.
421 FutureLower {
422 payload: &'a Option<Type>,
423 ty: TypeId,
424 } : [1] => [1],
425
426 /// Create a future from an `i32`.
427 FutureLift {
428 payload: &'a Option<Type>,
429 ty: TypeId,
430 } : [1] => [1],
431
432 /// Create an `i32` from a stream.
433 StreamLower {
434 payload: &'a Option<Type>,
435 ty: TypeId,
436 } : [1] => [1],
437
438 /// Create a stream from an `i32`.
439 StreamLift {
440 payload: &'a Option<Type>,
441 ty: TypeId,
442 } : [1] => [1],
443
444 /// Create an `i32` from an error-context.
445 ErrorContextLower : [1] => [1],
446
447 /// Create a error-context from an `i32`.
448 ErrorContextLift : [1] => [1],
449
450 /// Pops a tuple value off the stack, decomposes the tuple to all of
451 /// its fields, and then pushes the fields onto the stack.
452 TupleLower {
453 tuple: &'a Tuple,
454 ty: TypeId,
455 } : [1] => [tuple.types.len()],
456
457 /// Pops all fields for a tuple off the stack and then composes them
458 /// into a tuple.
459 TupleLift {
460 tuple: &'a Tuple,
461 ty: TypeId,
462 } : [tuple.types.len()] => [1],
463
464 /// Converts a language-specific record-of-bools to a list of `i32`.
465 FlagsLower {
466 flags: &'a Flags,
467 name: &'a str,
468 ty: TypeId,
469 } : [1] => [flags.repr().count()],
470 /// Converts a list of native wasm `i32` to a language-specific
471 /// record-of-bools.
472 FlagsLift {
473 flags: &'a Flags,
474 name: &'a str,
475 ty: TypeId,
476 } : [flags.repr().count()] => [1],
477
478 // variants
479
480 /// This is a special instruction used for `VariantLower`
481 /// instruction to determine the name of the payload, if present, to use
482 /// within each block.
483 ///
484 /// Each sub-block will have this be the first instruction, and if it
485 /// lowers a payload it will expect something bound to this name.
486 VariantPayloadName : [0] => [1],
487
488 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
489 /// from the code generator. Uses each of those blocks and the value
490 /// from the stack to produce `nresults` of items.
491 VariantLower {
492 variant: &'a Variant,
493 name: &'a str,
494 ty: TypeId,
495 results: &'a [WasmType],
496 } : [1] => [results.len()],
497
498 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
499 /// from the code generator. Uses each of those blocks and the value
500 /// from the stack to produce a final variant.
501 VariantLift {
502 variant: &'a Variant,
503 name: &'a str,
504 ty: TypeId,
505 } : [1] => [1],
506
507 /// Pops an enum off the stack and pushes the `i32` representation.
508 EnumLower {
509 enum_: &'a Enum,
510 name: &'a str,
511 ty: TypeId,
512 } : [1] => [1],
513
514 /// Pops an `i32` off the stack and lifts it into the `enum` specified.
515 EnumLift {
516 enum_: &'a Enum,
517 name: &'a str,
518 ty: TypeId,
519 } : [1] => [1],
520
521 /// Specialization of `VariantLower` for specifically `option<T>` types,
522 /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
523 /// the two cases.
524 OptionLower {
525 payload: &'a Type,
526 ty: TypeId,
527 results: &'a [WasmType],
528 } : [1] => [results.len()],
529
530 /// Specialization of `VariantLift` for specifically the `option<T>`
531 /// type. Otherwise behaves the same as the `VariantLift` instruction
532 /// with two blocks for the lift.
533 OptionLift {
534 payload: &'a Type,
535 ty: TypeId,
536 } : [1] => [1],
537
538 /// Specialization of `VariantLower` for specifically `result<T, E>`
539 /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
540 /// for the two cases.
541 ResultLower {
542 result: &'a Result_
543 ty: TypeId,
544 results: &'a [WasmType],
545 } : [1] => [results.len()],
546
547 /// Specialization of `VariantLift` for specifically the `result<T,
548 /// E>` type. Otherwise behaves the same as the `VariantLift`
549 /// instruction with two blocks for the lift.
550 ResultLift {
551 result: &'a Result_,
552 ty: TypeId,
553 } : [1] => [1],
554
555 // calling/control flow
556
557 /// Represents a call to a raw WebAssembly API. The module/name are
558 /// provided inline as well as the types if necessary.
559 CallWasm {
560 name: &'a str,
561 sig: &'a WasmSignature,
562 } : [sig.params.len()] => [sig.results.len()],
563
564 /// Same as `CallWasm`, except the dual where an interface is being
565 /// called rather than a raw wasm function.
566 ///
567 /// Note that this will be used for async functions, and `async_`
568 /// indicates whether the function should be invoked in an async
569 /// fashion.
570 CallInterface {
571 func: &'a Function,
572 async_: bool,
573 } : [func.params.len()] => [usize::from(func.result.is_some())],
574
575 /// Returns `amt` values on the stack. This is always the last
576 /// instruction.
577 Return { amt: usize, func: &'a Function } : [*amt] => [0],
578
579 /// Calls the `realloc` function specified in a malloc-like fashion
580 /// allocating `size` bytes with alignment `align`.
581 ///
582 /// Pushes the returned pointer onto the stack.
583 Malloc {
584 realloc: &'static str,
585 size: ArchitectureSize,
586 align: Alignment,
587 } : [0] => [1],
588
589 /// Used exclusively for guest-code generation this indicates that
590 /// the standard memory deallocation function needs to be invoked with
591 /// the specified parameters.
592 ///
593 /// This will pop a pointer from the stack and push nothing.
594 GuestDeallocate {
595 size: ArchitectureSize,
596 align: Alignment,
597 } : [1] => [0],
598
599 /// Used exclusively for guest-code generation this indicates that
600 /// a string is being deallocated. The ptr/length are on the stack and
601 /// are poppped off and used to deallocate the string.
602 GuestDeallocateString : [2] => [0],
603
604 /// Used exclusively for guest-code generation this indicates that
605 /// a list is being deallocated. The ptr/length are on the stack and
606 /// are poppped off and used to deallocate the list.
607 ///
608 /// This variant also pops a block off the block stack to be used as the
609 /// body of the deallocation loop.
610 GuestDeallocateList {
611 element: &'a Type,
612 } : [2] => [0],
613
614 /// Used exclusively for guest-code generation this indicates that a
615 /// map is being deallocated. The ptr/length are on the stack and are
616 /// popped off and used to deallocate the map entry buffer.
617 ///
618 /// This variant also pops a block off the block stack to be used as
619 /// the body of the deallocation loop over map entries.
620 GuestDeallocateMap {
621 key: &'a Type,
622 value: &'a Type,
623 } : [2] => [0],
624
625 /// Used exclusively for guest-code generation this indicates that
626 /// a variant is being deallocated. The integer discriminant is popped
627 /// off the stack as well as `blocks` number of blocks popped from the
628 /// blocks stack. The variant is used to select, at runtime, which of
629 /// the blocks is executed to deallocate the variant.
630 GuestDeallocateVariant {
631 blocks: usize,
632 } : [1] => [0],
633
634 /// Deallocates the language-specific handle representation on the top
635 /// of the stack. Used for async imports.
636 DropHandle { ty: &'a Type } : [1] => [0],
637
638 /// Call `task.return` for an async-lifted export.
639 ///
640 /// This will call core wasm import `name` which will be mapped to
641 /// `task.return` later on. The function given has `params` as its
642 /// parameters and it will return no results. This is used to pass the
643 /// lowered representation of a function's results to `task.return`.
644 AsyncTaskReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
645
646 /// Force the evaluation of the specified number of expressions and push
647 /// the results to the stack.
648 ///
649 /// This is useful prior to disposing of temporary variables and/or
650 /// allocations which are referenced by one or more not-yet-evaluated
651 /// expressions.
652 Flush { amt: usize } : [*amt] => [*amt],
653 }
654}
655
656#[derive(Debug, PartialEq)]
657pub enum Bitcast {
658 // Upcasts
659 F32ToI32,
660 F64ToI64,
661 I32ToI64,
662 F32ToI64,
663
664 // Downcasts
665 I32ToF32,
666 I64ToF64,
667 I64ToI32,
668 I64ToF32,
669
670 // PointerOrI64 conversions. These preserve provenance when the source
671 // or destination is a pointer value.
672 //
673 // These are used when pointer values are being stored in
674 // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
675 // always have to preserve provenance when the value being loaded or
676 // stored is a pointer.
677 P64ToI64,
678 I64ToP64,
679 P64ToP,
680 PToP64,
681
682 // Pointer<->number conversions. These do not preserve provenance.
683 //
684 // These are used when integer or floating-point values are being stored in
685 // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
686 // never have any provenance to preserve.
687 I32ToP,
688 PToI32,
689 PToL,
690 LToP,
691
692 // Number<->Number conversions.
693 I32ToL,
694 LToI32,
695 I64ToL,
696 LToI64,
697
698 // Multiple conversions in sequence.
699 Sequence(Box<[Bitcast; 2]>),
700
701 None,
702}
703
704/// Whether the glue code surrounding a call is lifting arguments and lowering
705/// results or vice versa.
706#[derive(Clone, Copy, PartialEq, Eq)]
707pub enum LiftLower {
708 /// When the glue code lifts arguments and lowers results.
709 ///
710 /// ```text
711 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
712 /// ```
713 LiftArgsLowerResults,
714 /// When the glue code lowers arguments and lifts results.
715 ///
716 /// ```text
717 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
718 /// ```
719 LowerArgsLiftResults,
720}
721
722/// Trait for language implementors to use to generate glue code between native
723/// WebAssembly signatures and interface types signatures.
724///
725/// This is used as an implementation detail in interpreting the ABI between
726/// interface types and wasm types. Eventually this will be driven by interface
727/// types adapters themselves, but for now the ABI of a function dictates what
728/// instructions are fed in.
729///
730/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
731/// generate code for. Instructions operate like a stack machine where each
732/// instruction has a list of inputs and a list of outputs (provided by the
733/// `emit` function).
734pub trait Bindgen {
735 /// The intermediate type for fragments of code for this type.
736 ///
737 /// For most languages `String` is a suitable intermediate type.
738 type Operand: Clone + fmt::Debug;
739
740 /// Emit code to implement the given instruction.
741 ///
742 /// Each operand is given in `operands` and can be popped off if ownership
743 /// is required. It's guaranteed that `operands` has the appropriate length
744 /// for the `inst` given, as specified with [`Instruction`].
745 ///
746 /// Each result variable should be pushed onto `results`. This function must
747 /// push the appropriate number of results or binding generation will panic.
748 fn emit(
749 &mut self,
750 resolve: &Resolve,
751 inst: &Instruction<'_>,
752 operands: &mut Vec<Self::Operand>,
753 results: &mut Vec<Self::Operand>,
754 );
755
756 /// Gets a operand reference to the return pointer area.
757 ///
758 /// The provided size and alignment is for the function's return type.
759 fn return_pointer(&mut self, size: ArchitectureSize, align: Alignment) -> Self::Operand;
760
761 /// Enters a new block of code to generate code for.
762 ///
763 /// This is currently exclusively used for constructing variants. When a
764 /// variant is constructed a block here will be pushed for each case of a
765 /// variant, generating the code necessary to translate a variant case.
766 ///
767 /// Blocks are completed with `finish_block` below. It's expected that `emit`
768 /// will always push code (if necessary) into the "current block", which is
769 /// updated by calling this method and `finish_block` below.
770 fn push_block(&mut self);
771
772 /// Indicates to the code generator that a block is completed, and the
773 /// `operand` specified was the resulting value of the block.
774 ///
775 /// This method will be used to compute the value of each arm of lifting a
776 /// variant. The `operand` will be `None` if the variant case didn't
777 /// actually have any type associated with it. Otherwise it will be `Some`
778 /// as the last value remaining on the stack representing the value
779 /// associated with a variant's `case`.
780 ///
781 /// It's expected that this will resume code generation in the previous
782 /// block before `push_block` was called. This must also save the results
783 /// of the current block internally for instructions like `ResultLift` to
784 /// use later.
785 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
786
787 /// Returns size information that was previously calculated for all types.
788 fn sizes(&self) -> &SizeAlign;
789
790 /// Returns whether or not the specified element type is represented in a
791 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
792 /// and `ListCanonLift` instructions are used or not.
793 fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
794}
795
796/// Generates an abstract sequence of instructions which represents this
797/// function being adapted as an imported function.
798///
799/// The instructions here, when executed, will emulate a language with
800/// interface types calling the concrete wasm implementation. The parameters
801/// for the returned instruction sequence are the language's own
802/// interface-types parameters. One instruction in the instruction stream
803/// will be a `Call` which represents calling the actual raw wasm function
804/// signature.
805///
806/// This function is useful, for example, if you're building a language
807/// generator for WASI bindings. This will document how to translate
808/// language-specific values into the wasm types to call a WASI function,
809/// and it will also automatically convert the results of the WASI function
810/// back to a language-specific value.
811pub fn call(
812 resolve: &Resolve,
813 variant: AbiVariant,
814 lift_lower: LiftLower,
815 func: &Function,
816 bindgen: &mut impl Bindgen,
817 async_: bool,
818) {
819 Generator::new(resolve, bindgen).call(func, variant, lift_lower, async_);
820}
821
822pub fn lower_to_memory<B: Bindgen>(
823 resolve: &Resolve,
824 bindgen: &mut B,
825 address: B::Operand,
826 value: B::Operand,
827 ty: &Type,
828) {
829 let mut generator = Generator::new(resolve, bindgen);
830 // TODO: make this configurable? Right now this function is only called for
831 // future/stream callbacks so it's appropriate to skip realloc here as it's
832 // all "lower for wasm import", but this might get reused for something else
833 // in the future.
834 generator.realloc = Some(Realloc::Export("cabi_realloc"));
835 generator.stack.push(value);
836 generator.write_to_memory(ty, address, Default::default());
837}
838
839pub fn lower_flat<B: Bindgen>(
840 resolve: &Resolve,
841 bindgen: &mut B,
842 value: B::Operand,
843 ty: &Type,
844) -> Vec<B::Operand> {
845 let mut generator = Generator::new(resolve, bindgen);
846 generator.stack.push(value);
847 generator.realloc = Some(Realloc::Export("cabi_realloc"));
848 generator.lower(ty);
849 generator.stack
850}
851
852pub fn lift_from_memory<B: Bindgen>(
853 resolve: &Resolve,
854 bindgen: &mut B,
855 address: B::Operand,
856 ty: &Type,
857) -> B::Operand {
858 let mut generator = Generator::new(resolve, bindgen);
859 generator.read_from_memory(ty, address, Default::default());
860 generator.stack.pop().unwrap()
861}
862
863/// Used in a similar manner as the `Interface::call` function except is
864/// used to generate the `post-return` callback for `func`.
865///
866/// This is only intended to be used in guest generators for exported
867/// functions and will primarily generate `GuestDeallocate*` instructions,
868/// plus others used as input to those instructions.
869pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen) {
870 Generator::new(resolve, bindgen).post_return(func);
871}
872
873/// Returns whether the `Function` specified needs a post-return function to
874/// be generated in guest code.
875///
876/// This is used when the return value contains a memory allocation such as
877/// a list or a string primarily.
878pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
879 func.result
880 .map(|t| needs_deallocate(resolve, &t, Deallocate::Lists))
881 .unwrap_or(false)
882}
883
884pub fn guest_export_params_have_allocations(resolve: &Resolve, func: &Function) -> bool {
885 func.params
886 .iter()
887 .any(|param| needs_deallocate(resolve, ¶m.ty, Deallocate::Lists))
888}
889
890fn needs_deallocate(resolve: &Resolve, ty: &Type, what: Deallocate) -> bool {
891 match ty {
892 Type::String => true,
893 Type::ErrorContext => true,
894 Type::Id(id) => match &resolve.types[*id].kind {
895 TypeDefKind::List(_) => true,
896 TypeDefKind::Type(t) => needs_deallocate(resolve, t, what),
897 TypeDefKind::Handle(Handle::Own(_)) => what.handles(),
898 TypeDefKind::Handle(Handle::Borrow(_)) => false,
899 TypeDefKind::Resource => false,
900 TypeDefKind::Record(r) => r
901 .fields
902 .iter()
903 .any(|f| needs_deallocate(resolve, &f.ty, what)),
904 TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_deallocate(resolve, t, what)),
905 TypeDefKind::Variant(t) => t
906 .cases
907 .iter()
908 .filter_map(|t| t.ty.as_ref())
909 .any(|t| needs_deallocate(resolve, t, what)),
910 TypeDefKind::Option(t) => needs_deallocate(resolve, t, what),
911 TypeDefKind::Result(t) => [&t.ok, &t.err]
912 .iter()
913 .filter_map(|t| t.as_ref())
914 .any(|t| needs_deallocate(resolve, t, what)),
915 TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
916 TypeDefKind::Future(_) | TypeDefKind::Stream(_) => what.handles(),
917 TypeDefKind::Unknown => unreachable!(),
918 TypeDefKind::FixedLengthList(t, _) => needs_deallocate(resolve, t, what),
919 TypeDefKind::Map(_, _) => true,
920 },
921
922 Type::Bool
923 | Type::U8
924 | Type::S8
925 | Type::U16
926 | Type::S16
927 | Type::U32
928 | Type::S32
929 | Type::U64
930 | Type::S64
931 | Type::F32
932 | Type::F64
933 | Type::Char => false,
934 }
935}
936
937/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
938/// that's a pointer to a sequence of `types` stored in linear memory.
939pub fn deallocate_lists_in_types<B: Bindgen>(
940 resolve: &Resolve,
941 types: &[Type],
942 operands: &[B::Operand],
943 indirect: bool,
944 bindgen: &mut B,
945) {
946 Generator::new(resolve, bindgen).deallocate_in_types(
947 types,
948 operands,
949 indirect,
950 Deallocate::Lists,
951 );
952}
953
954/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
955/// that's a pointer to a sequence of `types` stored in linear memory.
956pub fn deallocate_lists_and_own_in_types<B: Bindgen>(
957 resolve: &Resolve,
958 types: &[Type],
959 operands: &[B::Operand],
960 indirect: bool,
961 bindgen: &mut B,
962) {
963 Generator::new(resolve, bindgen).deallocate_in_types(
964 types,
965 operands,
966 indirect,
967 Deallocate::ListsAndOwn,
968 );
969}
970
971#[derive(Copy, Clone)]
972pub enum Realloc {
973 None,
974 Export(&'static str),
975}
976
977/// What to deallocate in various `deallocate_*` methods.
978#[derive(Copy, Clone)]
979enum Deallocate {
980 /// Only deallocate lists.
981 Lists,
982 /// Deallocate lists and owned resources such as `own<T>` and
983 /// futures/streams.
984 ListsAndOwn,
985}
986
987impl Deallocate {
988 fn handles(&self) -> bool {
989 match self {
990 Deallocate::Lists => false,
991 Deallocate::ListsAndOwn => true,
992 }
993 }
994}
995
996struct Generator<'a, B: Bindgen> {
997 bindgen: &'a mut B,
998 resolve: &'a Resolve,
999 operands: Vec<B::Operand>,
1000 results: Vec<B::Operand>,
1001 stack: Vec<B::Operand>,
1002 return_pointer: Option<B::Operand>,
1003 realloc: Option<Realloc>,
1004}
1005
1006const MAX_FLAT_PARAMS: usize = 16;
1007const MAX_FLAT_ASYNC_PARAMS: usize = 4;
1008
1009impl<'a, B: Bindgen> Generator<'a, B> {
1010 fn new(resolve: &'a Resolve, bindgen: &'a mut B) -> Generator<'a, B> {
1011 Generator {
1012 resolve,
1013 bindgen,
1014 operands: Vec::new(),
1015 results: Vec::new(),
1016 stack: Vec::new(),
1017 return_pointer: None,
1018 realloc: None,
1019 }
1020 }
1021
1022 fn call(&mut self, func: &Function, variant: AbiVariant, lift_lower: LiftLower, async_: bool) {
1023 let sig = self.resolve.wasm_signature(variant, func);
1024
1025 // Lowering parameters calling a wasm import _or_ returning a result
1026 // from an async-lifted wasm export means we don't need to pass
1027 // ownership, but we pass ownership in all other cases.
1028 let realloc = match (variant, lift_lower, async_) {
1029 (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
1030 | (
1031 AbiVariant::GuestExport
1032 | AbiVariant::GuestExportAsync
1033 | AbiVariant::GuestExportAsyncStackful,
1034 LiftLower::LiftArgsLowerResults,
1035 true,
1036 ) => Realloc::None,
1037 _ => Realloc::Export("cabi_realloc"),
1038 };
1039 assert!(self.realloc.is_none());
1040
1041 match lift_lower {
1042 LiftLower::LowerArgsLiftResults => {
1043 self.realloc = Some(realloc);
1044
1045 // Create a function that performs individual lowering of operands
1046 let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
1047 let mut offset = ArchitectureSize::default();
1048 for (nth, Param { ty, .. }) in func.params.iter().enumerate() {
1049 self_.emit(&Instruction::GetArg { nth });
1050 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1051 self_.write_to_memory(ty, ptr.clone(), offset);
1052 offset += self_.bindgen.sizes().size(ty);
1053 }
1054
1055 self_.stack.push(ptr);
1056 };
1057
1058 // Lower parameters
1059 if sig.indirect_params {
1060 // If parameters are indirect space is
1061 // allocated for them and each argument is lowered
1062 // individually into memory.
1063 let ElementInfo { size, align } = self
1064 .bindgen
1065 .sizes()
1066 .record(func.params.iter().map(|param| ¶m.ty));
1067
1068 // Resolve the pointer to the indirectly stored parameters
1069 let ptr = match variant {
1070 // When a wasm module calls an import it will provide
1071 // space that isn't explicitly deallocated.
1072 AbiVariant::GuestImport => self.bindgen.return_pointer(size, align),
1073
1074 AbiVariant::GuestImportAsync => {
1075 todo!("direct param lowering for async guest import not implemented")
1076 }
1077
1078 // When calling a wasm module from the outside, though,
1079 // malloc needs to be called.
1080 AbiVariant::GuestExport => {
1081 self.emit(&Instruction::Malloc {
1082 realloc: "cabi_realloc",
1083 size,
1084 align,
1085 });
1086 self.stack.pop().unwrap()
1087 }
1088
1089 AbiVariant::GuestExportAsync | AbiVariant::GuestExportAsyncStackful => {
1090 todo!("direct param lowering for async not implemented")
1091 }
1092 };
1093
1094 // Lower the parameters to memory
1095 lower_to_memory(self, ptr);
1096 } else {
1097 // ... otherwise arguments are direct,
1098 // (there aren't too many) then we simply do a normal lower
1099 // operation for them all.
1100 for (nth, Param { ty, .. }) in func.params.iter().enumerate() {
1101 self.emit(&Instruction::GetArg { nth });
1102 self.lower(ty);
1103 }
1104 }
1105 self.realloc = None;
1106
1107 // If necessary we may need to prepare a return pointer for this ABI.
1108 if variant == AbiVariant::GuestImport && sig.retptr {
1109 let info = self.bindgen.sizes().params(&func.result);
1110 let ptr = self.bindgen.return_pointer(info.size, info.align);
1111 self.return_pointer = Some(ptr.clone());
1112 self.stack.push(ptr);
1113 }
1114
1115 // Call the Wasm function
1116 assert_eq!(self.stack.len(), sig.params.len());
1117 self.emit(&Instruction::CallWasm {
1118 name: &func.name,
1119 sig: &sig,
1120 });
1121
1122 // Handle the result
1123 if sig.retptr {
1124 // If there is a return pointer, we must get the pointer to where results
1125 // should be stored, and store the results there?
1126
1127 let ptr = match variant {
1128 // imports into guests means it's a wasm module
1129 // calling an imported function. We supplied the
1130 // return pointer as the last argument (saved in
1131 // `self.return_pointer`) so we use that to read
1132 // the result of the function from memory.
1133 AbiVariant::GuestImport => {
1134 assert!(sig.results.is_empty());
1135 self.return_pointer.take().unwrap()
1136 }
1137
1138 // guest exports means that this is a host
1139 // calling wasm so wasm returned a pointer to where
1140 // the result is stored
1141 AbiVariant::GuestExport => self.stack.pop().unwrap(),
1142
1143 AbiVariant::GuestImportAsync
1144 | AbiVariant::GuestExportAsync
1145 | AbiVariant::GuestExportAsyncStackful => {
1146 unreachable!()
1147 }
1148 };
1149
1150 if let (AbiVariant::GuestExport, true) = (variant, async_) {
1151 // If we're dealing with an async function, the result should not be read from memory
1152 // immediately, as it's the async call result
1153 //
1154 // We can leave the result of the call (the indication of what to do as an async call)
1155 // on the stack as a return
1156 self.stack.push(ptr);
1157 } else {
1158 // If we're not dealing with an async call, the result must be in memory at this point and can be read out
1159 self.read_results_from_memory(
1160 &func.result,
1161 ptr.clone(),
1162 ArchitectureSize::default(),
1163 );
1164 self.emit(&Instruction::Flush {
1165 amt: usize::from(func.result.is_some()),
1166 });
1167 }
1168 } else {
1169 // With no return pointer in use we can simply lift the
1170 // result(s) of the function from the result of the core
1171 // wasm function.
1172 if let Some(ty) = &func.result {
1173 self.lift(ty)
1174 }
1175 }
1176
1177 // Emit the function return
1178 if async_ {
1179 self.emit(&Instruction::AsyncTaskReturn {
1180 name: &func.name,
1181 params: if func.result.is_some() {
1182 &[WasmType::Pointer]
1183 } else {
1184 &[]
1185 },
1186 });
1187 } else {
1188 self.emit(&Instruction::Return {
1189 func,
1190 amt: usize::from(func.result.is_some()),
1191 });
1192 }
1193 }
1194
1195 LiftLower::LiftArgsLowerResults => {
1196 let max_flat_params = match (variant, async_) {
1197 (AbiVariant::GuestImportAsync, _is_async @ true) => MAX_FLAT_ASYNC_PARAMS,
1198 _ => MAX_FLAT_PARAMS,
1199 };
1200
1201 // Read parameters from memory
1202 let read_from_memory = |self_: &mut Self| {
1203 let mut offset = ArchitectureSize::default();
1204 let ptr = self_
1205 .stack
1206 .pop()
1207 .expect("empty stack during read param from memory");
1208 for Param { ty, .. } in func.params.iter() {
1209 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1210 self_.read_from_memory(ty, ptr.clone(), offset);
1211 offset += self_.bindgen.sizes().size(ty);
1212 }
1213 };
1214
1215 // Resolve parameters
1216 if sig.indirect_params {
1217 // If parameters were passed indirectly, arguments must be
1218 // read in succession from memory, with the pointer to the arguments
1219 // being the first argument to the function.
1220 self.emit(&Instruction::GetArg { nth: 0 });
1221 read_from_memory(self);
1222 } else {
1223 // ... otherwise, if parameters were passed directly then we lift each
1224 // argument in succession from the component wasm types that
1225 // make-up the type.
1226 let mut offset = 0;
1227 for Param {
1228 name: param_name,
1229 ty,
1230 ..
1231 } in func.params.iter()
1232 {
1233 let Some(types) = flat_types(self.resolve, ty, Some(max_flat_params))
1234 else {
1235 panic!(
1236 "failed to flatten types during direct parameter lifting ('{param_name}' in func '{}')",
1237 func.name
1238 );
1239 };
1240 for _ in 0..types.len() {
1241 self.emit(&Instruction::GetArg { nth: offset });
1242 offset += 1;
1243 }
1244 self.lift(ty);
1245 }
1246 }
1247
1248 // ... and that allows us to call the interface types function
1249 self.emit(&Instruction::CallInterface { func, async_ });
1250
1251 // The return value of an async function is *not* the result of the function
1252 // itself or a pointer but rather a status code.
1253 //
1254 // Asynchronous functions will call `task.return` after the
1255 // interface function completes, so lowering is conditional
1256 // based on slightly different logic for the `task.return`
1257 // intrinsic.
1258 //
1259 // Note that in the async import case teh code below deals with the CM function being lowered,
1260 // not the core function that is underneath that (i.e. func.result may be empty,
1261 // where the associated core function underneath must have a i32 status code result)
1262 let (lower_to_memory, async_flat_results) = match (async_, &func.result) {
1263 // All async cases pass along the function results and flatten where necesary
1264 (_is_async @ true, func_result) => {
1265 let results = match &func_result {
1266 Some(ty) => flat_types(self.resolve, ty, Some(max_flat_params)),
1267 None => Some(Vec::new()),
1268 };
1269 (results.is_none(), Some(results))
1270 }
1271 // All other non-async cases
1272 (_is_async @ false, _) => (sig.retptr, None),
1273 };
1274
1275 // This was dynamically allocated by the caller (or async start
1276 // function) so after it's been read by the guest we need to
1277 // deallocate it.
1278 if let AbiVariant::GuestExport
1279 | AbiVariant::GuestExportAsync
1280 | AbiVariant::GuestExportAsyncStackful = variant
1281 {
1282 if sig.indirect_params && !async_ {
1283 let ElementInfo { size, align } = self
1284 .bindgen
1285 .sizes()
1286 .record(func.params.iter().map(|param| ¶m.ty));
1287 self.emit(&Instruction::GetArg { nth: 0 });
1288 self.emit(&Instruction::GuestDeallocate { size, align });
1289 }
1290 }
1291
1292 self.realloc = Some(realloc);
1293
1294 // Perform memory lowing of relevant results, including out pointers as well as traditional results
1295 match (lower_to_memory, sig.retptr, variant) {
1296 // For sync calls, if no lowering to memory is required and there *is* a return pointer in use
1297 // then we need to lower then simply lower the result(s) and return that directly from the function.
1298 (_lower_to_memory @ false, _, _) => {
1299 if let Some(ty) = &func.result {
1300 self.lower(ty);
1301 }
1302 }
1303
1304 // Lowering to memory for a guest import
1305 //
1306 // When a function is imported to a guest this means
1307 // it's a host providing the implementation of the
1308 // import. The result is stored in the pointer
1309 // specified in the last argument, so we get the
1310 // pointer here and then write the return value into
1311 // it.
1312 (
1313 _lower_to_memory @ true,
1314 _has_ret_ptr @ true,
1315 AbiVariant::GuestImport | AbiVariant::GuestImportAsync,
1316 ) => {
1317 self.emit(&Instruction::GetArg {
1318 nth: sig.params.len() - 1,
1319 });
1320 let ptr = self
1321 .stack
1322 .pop()
1323 .expect("empty stack during result lower to memory");
1324 self.write_params_to_memory(&func.result, ptr, Default::default());
1325 }
1326
1327 // Lowering to memory for a guest export
1328 //
1329 // For a guest import this is a function defined in
1330 // wasm, so we're returning a pointer where the
1331 // value was stored at. Allocate some space here
1332 // (statically) and then write the result into that
1333 // memory, returning the pointer at the end.
1334 (_lower_to_memory @ true, _, variant) => match variant {
1335 AbiVariant::GuestExport | AbiVariant::GuestExportAsync => {
1336 let ElementInfo { size, align } =
1337 self.bindgen.sizes().params(&func.result);
1338 let ptr = self.bindgen.return_pointer(size, align);
1339 self.write_params_to_memory(
1340 &func.result,
1341 ptr.clone(),
1342 Default::default(),
1343 );
1344 self.stack.push(ptr);
1345 }
1346 AbiVariant::GuestImport | AbiVariant::GuestImportAsync => {
1347 unreachable!(
1348 "lowering to memory cannot be performed without a return pointer ({async_note} func [{func_name}], variant {variant:#?})",
1349 async_note = async_.then_some("async").unwrap_or("sync"),
1350 func_name = func.name,
1351 )
1352 }
1353 AbiVariant::GuestExportAsyncStackful => {
1354 todo!("stackful exports are not yet supported")
1355 }
1356 },
1357 }
1358
1359 // Build and emit the appropriate return
1360 match (variant, async_flat_results) {
1361 // Async guest imports always return a i32 status code
1362 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, None) if async_ => {
1363 unreachable!("async guest imports must have a return")
1364 }
1365
1366 // Async guest imports with results return the status code, not a pointer to any results
1367 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, Some(results))
1368 if async_ =>
1369 {
1370 let name = &format!("[task-return]{}", func.name);
1371 let params = results.as_deref().unwrap_or_default();
1372 self.emit(&Instruction::AsyncTaskReturn { name, params });
1373 }
1374
1375 // All async/non-async cases with results that need to be returned
1376 //
1377 // In practice, async imports should not end up here, as the returned result of an
1378 // async import is *not* a pointer but instead a status code.
1379 (_, Some(results)) => {
1380 let name = &format!("[task-return]{}", func.name);
1381 let params = results.as_deref().unwrap_or(&[WasmType::Pointer]);
1382 self.emit(&Instruction::AsyncTaskReturn { name, params });
1383 }
1384
1385 // All async/non-async cases with no results
1386 (_, None) => {
1387 if async_ {
1388 let name = &format!("[task-return]{}", func.name);
1389 self.emit(&Instruction::AsyncTaskReturn {
1390 name: name,
1391 params: if sig.results.len() > MAX_FLAT_ASYNC_PARAMS {
1392 &[WasmType::Pointer]
1393 } else {
1394 &sig.results
1395 },
1396 });
1397 } else {
1398 self.emit(&Instruction::Return {
1399 func,
1400 amt: sig.results.len(),
1401 });
1402 }
1403 }
1404 }
1405
1406 self.realloc = None;
1407 }
1408 }
1409
1410 assert!(self.realloc.is_none());
1411
1412 assert!(
1413 self.stack.is_empty(),
1414 "stack has {} items remaining: {:?}",
1415 self.stack.len(),
1416 self.stack,
1417 );
1418 }
1419
1420 fn post_return(&mut self, func: &Function) {
1421 let sig = self.resolve.wasm_signature(AbiVariant::GuestExport, func);
1422
1423 // Currently post-return is only used for lists and lists are always
1424 // returned indirectly through memory due to their flat representation
1425 // having more than one type. Assert that a return pointer is used,
1426 // though, in case this ever changes.
1427 assert!(sig.retptr);
1428
1429 self.emit(&Instruction::GetArg { nth: 0 });
1430 let addr = self.stack.pop().unwrap();
1431
1432 let mut types = Vec::new();
1433 types.extend(func.result);
1434 self.deallocate_in_types(&types, &[addr], true, Deallocate::Lists);
1435
1436 self.emit(&Instruction::Return { func, amt: 0 });
1437 }
1438
1439 fn deallocate_in_types(
1440 &mut self,
1441 types: &[Type],
1442 operands: &[B::Operand],
1443 indirect: bool,
1444 what: Deallocate,
1445 ) {
1446 if indirect {
1447 assert_eq!(operands.len(), 1);
1448 for (offset, ty) in self.bindgen.sizes().field_offsets(types) {
1449 self.deallocate_indirect(ty, operands[0].clone(), offset, what);
1450 }
1451 assert!(
1452 self.stack.is_empty(),
1453 "stack has {} items remaining",
1454 self.stack.len()
1455 );
1456 } else {
1457 let mut operands = operands;
1458 let mut operands_for_ty;
1459 for ty in types {
1460 let types = flat_types(self.resolve, ty, None).unwrap();
1461 (operands_for_ty, operands) = operands.split_at(types.len());
1462 self.stack.extend_from_slice(operands_for_ty);
1463 self.deallocate(ty, what);
1464 assert!(
1465 self.stack.is_empty(),
1466 "stack has {} items remaining",
1467 self.stack.len()
1468 );
1469 }
1470 assert!(operands.is_empty());
1471 }
1472 }
1473
1474 fn emit(&mut self, inst: &Instruction<'_>) {
1475 self.operands.clear();
1476 self.results.clear();
1477
1478 let operands_len = inst.operands_len();
1479 assert!(
1480 self.stack.len() >= operands_len,
1481 "not enough operands on stack for {:?}: have {} need {operands_len}",
1482 inst,
1483 self.stack.len(),
1484 );
1485 self.operands
1486 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1487 self.results.reserve(inst.results_len());
1488
1489 self.bindgen
1490 .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1491
1492 assert_eq!(
1493 self.results.len(),
1494 inst.results_len(),
1495 "{:?} expected {} results, got {}",
1496 inst,
1497 inst.results_len(),
1498 self.results.len()
1499 );
1500 self.stack.append(&mut self.results);
1501 }
1502
1503 fn push_block(&mut self) {
1504 self.bindgen.push_block();
1505 }
1506
1507 fn finish_block(&mut self, size: usize) {
1508 self.operands.clear();
1509 assert!(
1510 size <= self.stack.len(),
1511 "not enough operands on stack for finishing block",
1512 );
1513 self.operands
1514 .extend(self.stack.drain((self.stack.len() - size)..));
1515 self.bindgen.finish_block(&mut self.operands);
1516 }
1517
1518 fn lower(&mut self, ty: &Type) {
1519 use Instruction::*;
1520
1521 match *ty {
1522 Type::Bool => self.emit(&I32FromBool),
1523 Type::S8 => self.emit(&I32FromS8),
1524 Type::U8 => self.emit(&I32FromU8),
1525 Type::S16 => self.emit(&I32FromS16),
1526 Type::U16 => self.emit(&I32FromU16),
1527 Type::S32 => self.emit(&I32FromS32),
1528 Type::U32 => self.emit(&I32FromU32),
1529 Type::S64 => self.emit(&I64FromS64),
1530 Type::U64 => self.emit(&I64FromU64),
1531 Type::Char => self.emit(&I32FromChar),
1532 Type::F32 => self.emit(&CoreF32FromF32),
1533 Type::F64 => self.emit(&CoreF64FromF64),
1534 Type::String => {
1535 let realloc = self.list_realloc();
1536 self.emit(&StringLower { realloc });
1537 }
1538 Type::ErrorContext => self.emit(&ErrorContextLower),
1539 Type::Id(id) => match &self.resolve.types[id].kind {
1540 TypeDefKind::Type(t) => self.lower(t),
1541 TypeDefKind::List(element) => {
1542 let realloc = self.list_realloc();
1543 if self.bindgen.is_list_canonical(self.resolve, element) {
1544 self.emit(&ListCanonLower { element, realloc });
1545 } else {
1546 self.push_block();
1547 self.emit(&IterElem { element });
1548 self.emit(&IterBasePointer);
1549 let addr = self.stack.pop().unwrap();
1550 self.write_to_memory(element, addr, Default::default());
1551 self.finish_block(0);
1552 self.emit(&ListLower { element, realloc });
1553 }
1554 }
1555 TypeDefKind::Handle(handle) => {
1556 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1557 self.emit(&HandleLower {
1558 handle,
1559 ty: id,
1560 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1561 });
1562 }
1563 TypeDefKind::Resource => {
1564 todo!();
1565 }
1566 TypeDefKind::Record(record) => {
1567 self.emit(&RecordLower {
1568 record,
1569 ty: id,
1570 name: self.resolve.types[id].name.as_deref().unwrap(),
1571 });
1572 let values = self
1573 .stack
1574 .drain(self.stack.len() - record.fields.len()..)
1575 .collect::<Vec<_>>();
1576 for (field, value) in record.fields.iter().zip(values) {
1577 self.stack.push(value);
1578 self.lower(&field.ty);
1579 }
1580 }
1581 TypeDefKind::Tuple(tuple) => {
1582 self.emit(&TupleLower { tuple, ty: id });
1583 let values = self
1584 .stack
1585 .drain(self.stack.len() - tuple.types.len()..)
1586 .collect::<Vec<_>>();
1587 for (ty, value) in tuple.types.iter().zip(values) {
1588 self.stack.push(value);
1589 self.lower(ty);
1590 }
1591 }
1592
1593 TypeDefKind::Flags(flags) => {
1594 self.emit(&FlagsLower {
1595 flags,
1596 ty: id,
1597 name: self.resolve.types[id].name.as_ref().unwrap(),
1598 });
1599 }
1600
1601 TypeDefKind::Variant(v) => {
1602 let results =
1603 self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1604 self.emit(&VariantLower {
1605 variant: v,
1606 ty: id,
1607 results: &results,
1608 name: self.resolve.types[id].name.as_deref().unwrap(),
1609 });
1610 }
1611 TypeDefKind::Enum(enum_) => {
1612 self.emit(&EnumLower {
1613 enum_,
1614 ty: id,
1615 name: self.resolve.types[id].name.as_deref().unwrap(),
1616 });
1617 }
1618 TypeDefKind::Option(t) => {
1619 let results = self.lower_variant_arms(ty, [None, Some(t)]);
1620 self.emit(&OptionLower {
1621 payload: t,
1622 ty: id,
1623 results: &results,
1624 });
1625 }
1626 TypeDefKind::Result(r) => {
1627 let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1628 self.emit(&ResultLower {
1629 result: r,
1630 ty: id,
1631 results: &results,
1632 });
1633 }
1634 TypeDefKind::Future(ty) => {
1635 self.emit(&FutureLower {
1636 payload: ty,
1637 ty: id,
1638 });
1639 }
1640 TypeDefKind::Stream(ty) => {
1641 self.emit(&StreamLower {
1642 payload: ty,
1643 ty: id,
1644 });
1645 }
1646 TypeDefKind::Unknown => unreachable!(),
1647 TypeDefKind::FixedLengthList(ty, size) => {
1648 self.emit(&FixedLengthListLower {
1649 element: ty,
1650 size: *size,
1651 id,
1652 });
1653 let mut values = self
1654 .stack
1655 .drain(self.stack.len() - (*size as usize)..)
1656 .collect::<Vec<_>>();
1657 for value in values.drain(..) {
1658 self.stack.push(value);
1659 self.lower(ty);
1660 }
1661 }
1662 TypeDefKind::Map(key, value) => {
1663 let realloc = self.list_realloc();
1664 let value_offset = self.bindgen.sizes().field_offsets([key, value])[1].0;
1665 self.push_block();
1666 self.emit(&IterMapKey { key });
1667 self.emit(&IterBasePointer);
1668 let key_addr = self.stack.pop().unwrap();
1669 self.write_to_memory(key, key_addr, Default::default());
1670 self.emit(&IterMapValue { value });
1671 self.emit(&IterBasePointer);
1672 let value_addr = self.stack.pop().unwrap();
1673 self.write_to_memory(value, value_addr, value_offset);
1674 self.finish_block(0);
1675 self.emit(&MapLower {
1676 key,
1677 value,
1678 realloc,
1679 });
1680 }
1681 },
1682 }
1683 }
1684
1685 fn lower_variant_arms<'b>(
1686 &mut self,
1687 ty: &Type,
1688 cases: impl IntoIterator<Item = Option<&'b Type>>,
1689 ) -> Vec<WasmType> {
1690 use Instruction::*;
1691 let results = flat_types(self.resolve, ty, None).unwrap();
1692 let mut casts = Vec::new();
1693 for (i, ty) in cases.into_iter().enumerate() {
1694 self.push_block();
1695 self.emit(&VariantPayloadName);
1696 let payload_name = self.stack.pop().unwrap();
1697 self.emit(&I32Const { val: i as i32 });
1698 let mut pushed = 1;
1699 if let Some(ty) = ty {
1700 // Using the payload of this block we lower the type to
1701 // raw wasm values.
1702 self.stack.push(payload_name);
1703 self.lower(ty);
1704
1705 // Determine the types of all the wasm values we just
1706 // pushed, and record how many. If we pushed too few
1707 // then we'll need to push some zeros after this.
1708 let temp = flat_types(self.resolve, ty, None).unwrap();
1709 pushed += temp.len();
1710
1711 // For all the types pushed we may need to insert some
1712 // bitcasts. This will go through and cast everything
1713 // to the right type to ensure all blocks produce the
1714 // same set of results.
1715 casts.truncate(0);
1716 for (actual, expected) in temp.iter().zip(&results[1..]) {
1717 casts.push(cast(*actual, *expected));
1718 }
1719 if casts.iter().any(|c| *c != Bitcast::None) {
1720 self.emit(&Bitcasts { casts: &casts });
1721 }
1722 }
1723
1724 // If we haven't pushed enough items in this block to match
1725 // what other variants are pushing then we need to push
1726 // some zeros.
1727 if pushed < results.len() {
1728 self.emit(&ConstZero {
1729 tys: &results[pushed..],
1730 });
1731 }
1732 self.finish_block(results.len());
1733 }
1734 results
1735 }
1736
1737 fn list_realloc(&self) -> Option<&'static str> {
1738 match self.realloc.expect("realloc should be configured") {
1739 Realloc::None => None,
1740 Realloc::Export(s) => Some(s),
1741 }
1742 }
1743
1744 /// Note that in general everything in this function is the opposite of the
1745 /// `lower` function above. This is intentional and should be kept this way!
1746 fn lift(&mut self, ty: &Type) {
1747 use Instruction::*;
1748
1749 match *ty {
1750 Type::Bool => self.emit(&BoolFromI32),
1751 Type::S8 => self.emit(&S8FromI32),
1752 Type::U8 => self.emit(&U8FromI32),
1753 Type::S16 => self.emit(&S16FromI32),
1754 Type::U16 => self.emit(&U16FromI32),
1755 Type::S32 => self.emit(&S32FromI32),
1756 Type::U32 => self.emit(&U32FromI32),
1757 Type::S64 => self.emit(&S64FromI64),
1758 Type::U64 => self.emit(&U64FromI64),
1759 Type::Char => self.emit(&CharFromI32),
1760 Type::F32 => self.emit(&F32FromCoreF32),
1761 Type::F64 => self.emit(&F64FromCoreF64),
1762 Type::String => self.emit(&StringLift),
1763 Type::ErrorContext => self.emit(&ErrorContextLift),
1764 Type::Id(id) => match &self.resolve.types[id].kind {
1765 TypeDefKind::Type(t) => self.lift(t),
1766 TypeDefKind::List(element) => {
1767 if self.bindgen.is_list_canonical(self.resolve, element) {
1768 self.emit(&ListCanonLift { element, ty: id });
1769 } else {
1770 self.push_block();
1771 self.emit(&IterBasePointer);
1772 let addr = self.stack.pop().unwrap();
1773 self.read_from_memory(element, addr, Default::default());
1774 self.finish_block(1);
1775 self.emit(&ListLift { element, ty: id });
1776 }
1777 }
1778 TypeDefKind::Handle(handle) => {
1779 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1780 self.emit(&HandleLift {
1781 handle,
1782 ty: id,
1783 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1784 });
1785 }
1786 TypeDefKind::Resource => {
1787 todo!();
1788 }
1789 TypeDefKind::Record(record) => {
1790 self.flat_for_each_record_type(
1791 ty,
1792 record.fields.iter().map(|f| &f.ty),
1793 Self::lift,
1794 );
1795 self.emit(&RecordLift {
1796 record,
1797 ty: id,
1798 name: self.resolve.types[id].name.as_deref().unwrap(),
1799 });
1800 }
1801 TypeDefKind::Tuple(tuple) => {
1802 self.flat_for_each_record_type(ty, tuple.types.iter(), Self::lift);
1803 self.emit(&TupleLift { tuple, ty: id });
1804 }
1805 TypeDefKind::Flags(flags) => {
1806 self.emit(&FlagsLift {
1807 flags,
1808 ty: id,
1809 name: self.resolve.types[id].name.as_ref().unwrap(),
1810 });
1811 }
1812
1813 TypeDefKind::Variant(v) => {
1814 self.flat_for_each_variant_arm(
1815 ty,
1816 true,
1817 v.cases.iter().map(|c| c.ty.as_ref()),
1818 Self::lift,
1819 );
1820 self.emit(&VariantLift {
1821 variant: v,
1822 ty: id,
1823 name: self.resolve.types[id].name.as_deref().unwrap(),
1824 });
1825 }
1826
1827 TypeDefKind::Enum(enum_) => {
1828 self.emit(&EnumLift {
1829 enum_,
1830 ty: id,
1831 name: self.resolve.types[id].name.as_deref().unwrap(),
1832 });
1833 }
1834
1835 TypeDefKind::Option(t) => {
1836 self.flat_for_each_variant_arm(ty, true, [None, Some(t)], Self::lift);
1837 self.emit(&OptionLift { payload: t, ty: id });
1838 }
1839
1840 TypeDefKind::Result(r) => {
1841 self.flat_for_each_variant_arm(
1842 ty,
1843 true,
1844 [r.ok.as_ref(), r.err.as_ref()],
1845 Self::lift,
1846 );
1847 self.emit(&ResultLift { result: r, ty: id });
1848 }
1849
1850 TypeDefKind::Future(ty) => {
1851 self.emit(&FutureLift {
1852 payload: ty,
1853 ty: id,
1854 });
1855 }
1856 TypeDefKind::Stream(ty) => {
1857 self.emit(&StreamLift {
1858 payload: ty,
1859 ty: id,
1860 });
1861 }
1862 TypeDefKind::Unknown => unreachable!(),
1863 TypeDefKind::FixedLengthList(ty, size) => {
1864 let temp = flat_types(self.resolve, ty, None).unwrap();
1865 let flat_per_elem = temp.to_vec().len();
1866 let flatsize = flat_per_elem * (*size as usize);
1867 let mut lowered_args = self
1868 .stack
1869 .drain(self.stack.len() - flatsize..)
1870 .collect::<Vec<_>>();
1871 for _ in 0..*size {
1872 self.stack.extend(lowered_args.drain(..flat_per_elem));
1873 self.lift(ty);
1874 }
1875 self.emit(&FixedLengthListLift {
1876 element: ty,
1877 size: *size,
1878 id,
1879 });
1880 }
1881 TypeDefKind::Map(key, value) => {
1882 let value_offset = self.bindgen.sizes().field_offsets([key, value])[1].0;
1883 self.push_block();
1884 self.emit(&IterBasePointer);
1885 let entry_addr = self.stack.pop().unwrap();
1886 self.read_from_memory(key, entry_addr.clone(), Default::default());
1887 self.read_from_memory(value, entry_addr, value_offset);
1888 self.finish_block(2);
1889 self.emit(&MapLift { key, value, ty: id });
1890 }
1891 },
1892 }
1893 }
1894
1895 fn flat_for_each_record_type<'b>(
1896 &mut self,
1897 container: &Type,
1898 types: impl Iterator<Item = &'b Type>,
1899 mut iter: impl FnMut(&mut Self, &Type),
1900 ) {
1901 let temp = flat_types(self.resolve, container, None).unwrap();
1902 let mut args = self
1903 .stack
1904 .drain(self.stack.len() - temp.len()..)
1905 .collect::<Vec<_>>();
1906 for ty in types {
1907 let temp = flat_types(self.resolve, ty, None).unwrap();
1908 self.stack.extend(args.drain(..temp.len()));
1909 iter(self, ty);
1910 }
1911 }
1912
1913 fn flat_for_each_variant_arm<'b>(
1914 &mut self,
1915 ty: &Type,
1916 blocks_with_type_have_result: bool,
1917 cases: impl IntoIterator<Item = Option<&'b Type>>,
1918 mut iter: impl FnMut(&mut Self, &Type),
1919 ) {
1920 let params = flat_types(self.resolve, ty, None).unwrap();
1921 let mut casts = Vec::new();
1922 let block_inputs = self
1923 .stack
1924 .drain(self.stack.len() + 1 - params.len()..)
1925 .collect::<Vec<_>>();
1926 for ty in cases {
1927 self.push_block();
1928 if let Some(ty) = ty {
1929 // Push only the values we need for this variant onto
1930 // the stack.
1931 let temp = flat_types(self.resolve, ty, None).unwrap();
1932 self.stack
1933 .extend(block_inputs[..temp.len()].iter().cloned());
1934
1935 // Cast all the types we have on the stack to the actual
1936 // types needed for this variant, if necessary.
1937 casts.truncate(0);
1938 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
1939 casts.push(cast(*expected, *actual));
1940 }
1941 if casts.iter().any(|c| *c != Bitcast::None) {
1942 self.emit(&Instruction::Bitcasts { casts: &casts });
1943 }
1944
1945 // Then recursively lift this variant's payload.
1946 iter(self, ty);
1947 }
1948 self.finish_block(if blocks_with_type_have_result {
1949 ty.is_some() as usize
1950 } else {
1951 0
1952 });
1953 }
1954 }
1955
1956 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1957 use Instruction::*;
1958
1959 match *ty {
1960 // Builtin types need different flavors of storage instructions
1961 // depending on the size of the value written.
1962 Type::Bool | Type::U8 | Type::S8 => {
1963 self.lower_and_emit(ty, addr, &I32Store8 { offset })
1964 }
1965 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1966 Type::U32 | Type::S32 | Type::Char => {
1967 self.lower_and_emit(ty, addr, &I32Store { offset })
1968 }
1969 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1970 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1971 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1972 Type::String => self.write_list_to_memory(ty, addr, offset),
1973 Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1974
1975 Type::Id(id) => match &self.resolve.types[id].kind {
1976 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1977 TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1978 // Maps have the same linear memory layout as list<tuple<K, V>>.
1979 TypeDefKind::Map(_, _) => self.write_list_to_memory(ty, addr, offset),
1980
1981 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1982 self.lower_and_emit(ty, addr, &I32Store { offset })
1983 }
1984
1985 // Decompose the record into its components and then write all
1986 // the components into memory one-by-one.
1987 TypeDefKind::Record(record) => {
1988 self.emit(&RecordLower {
1989 record,
1990 ty: id,
1991 name: self.resolve.types[id].name.as_deref().unwrap(),
1992 });
1993 self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1994 }
1995 TypeDefKind::Resource => {
1996 todo!()
1997 }
1998 TypeDefKind::Tuple(tuple) => {
1999 self.emit(&TupleLower { tuple, ty: id });
2000 self.write_fields_to_memory(tuple.types.iter(), addr, offset);
2001 }
2002
2003 TypeDefKind::Flags(f) => {
2004 self.lower(ty);
2005 match f.repr() {
2006 FlagsRepr::U8 => {
2007 self.stack.push(addr);
2008 self.store_intrepr(offset, Int::U8);
2009 }
2010 FlagsRepr::U16 => {
2011 self.stack.push(addr);
2012 self.store_intrepr(offset, Int::U16);
2013 }
2014 FlagsRepr::U32(n) => {
2015 for i in (0..n).rev() {
2016 self.stack.push(addr.clone());
2017 self.emit(&I32Store {
2018 offset: offset.add_bytes(i * 4),
2019 });
2020 }
2021 }
2022 }
2023 }
2024
2025 // Each case will get its own block, and the first item in each
2026 // case is writing the discriminant. After that if we have a
2027 // payload we write the payload after the discriminant, aligned up
2028 // to the type's alignment.
2029 TypeDefKind::Variant(v) => {
2030 self.write_variant_arms_to_memory(
2031 offset,
2032 addr,
2033 v.tag(),
2034 v.cases.iter().map(|c| c.ty.as_ref()),
2035 );
2036 self.emit(&VariantLower {
2037 variant: v,
2038 ty: id,
2039 results: &[],
2040 name: self.resolve.types[id].name.as_deref().unwrap(),
2041 });
2042 }
2043
2044 TypeDefKind::Option(t) => {
2045 self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
2046 self.emit(&OptionLower {
2047 payload: t,
2048 ty: id,
2049 results: &[],
2050 });
2051 }
2052
2053 TypeDefKind::Result(r) => {
2054 self.write_variant_arms_to_memory(
2055 offset,
2056 addr,
2057 Int::U8,
2058 [r.ok.as_ref(), r.err.as_ref()],
2059 );
2060 self.emit(&ResultLower {
2061 result: r,
2062 ty: id,
2063 results: &[],
2064 });
2065 }
2066
2067 TypeDefKind::Enum(e) => {
2068 self.lower(ty);
2069 self.stack.push(addr);
2070 self.store_intrepr(offset, e.tag());
2071 }
2072
2073 TypeDefKind::Unknown => unreachable!(),
2074 TypeDefKind::FixedLengthList(element, size) => {
2075 // resembles write_list_to_memory
2076 self.push_block();
2077 self.emit(&IterElem { element });
2078 self.emit(&IterBasePointer);
2079 let elem_addr = self.stack.pop().unwrap();
2080 self.write_to_memory(element, elem_addr, offset);
2081 self.finish_block(0);
2082 self.stack.push(addr);
2083 self.emit(&FixedLengthListLowerToMemory {
2084 element,
2085 size: *size,
2086 id,
2087 });
2088 }
2089 },
2090 }
2091 }
2092
2093 fn write_params_to_memory<'b>(
2094 &mut self,
2095 params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
2096 addr: B::Operand,
2097 offset: ArchitectureSize,
2098 ) {
2099 self.write_fields_to_memory(params, addr, offset);
2100 }
2101
2102 fn write_variant_arms_to_memory<'b>(
2103 &mut self,
2104 offset: ArchitectureSize,
2105 addr: B::Operand,
2106 tag: Int,
2107 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2108 ) {
2109 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2110 for (i, ty) in cases.into_iter().enumerate() {
2111 self.push_block();
2112 self.emit(&Instruction::VariantPayloadName);
2113 let payload_name = self.stack.pop().unwrap();
2114 self.emit(&Instruction::I32Const { val: i as i32 });
2115 self.stack.push(addr.clone());
2116 self.store_intrepr(offset, tag);
2117 if let Some(ty) = ty {
2118 self.stack.push(payload_name.clone());
2119 self.write_to_memory(ty, addr.clone(), payload_offset);
2120 }
2121 self.finish_block(0);
2122 }
2123 }
2124
2125 fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2126 // After lowering the list there's two i32 values on the stack
2127 // which we write into memory, writing the pointer into the low address
2128 // and the length into the high address.
2129 self.lower(ty);
2130 self.stack.push(addr.clone());
2131 self.emit(&Instruction::LengthStore {
2132 offset: offset + self.bindgen.sizes().align(ty).into(),
2133 });
2134 self.stack.push(addr);
2135 self.emit(&Instruction::PointerStore { offset });
2136 }
2137
2138 fn write_fields_to_memory<'b>(
2139 &mut self,
2140 tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
2141 addr: B::Operand,
2142 offset: ArchitectureSize,
2143 ) {
2144 let tys = tys.into_iter();
2145 let fields = self
2146 .stack
2147 .drain(self.stack.len() - tys.len()..)
2148 .collect::<Vec<_>>();
2149 for ((field_offset, ty), op) in self
2150 .bindgen
2151 .sizes()
2152 .field_offsets(tys)
2153 .into_iter()
2154 .zip(fields)
2155 {
2156 self.stack.push(op);
2157 self.write_to_memory(ty, addr.clone(), offset + (field_offset));
2158 }
2159 }
2160
2161 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2162 self.lower(ty);
2163 self.stack.push(addr);
2164 self.emit(instr);
2165 }
2166
2167 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2168 use Instruction::*;
2169
2170 match *ty {
2171 Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2172 Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2173 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
2174 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
2175 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
2176 Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
2177 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
2178 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
2179 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
2180 Type::String => self.read_list_from_memory(ty, addr, offset),
2181 Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
2182
2183 Type::Id(id) => match &self.resolve.types[id].kind {
2184 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
2185
2186 TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
2187 // Maps have the same linear memory layout as list<tuple<K, V>>.
2188 TypeDefKind::Map(_, _) => self.read_list_from_memory(ty, addr, offset),
2189
2190 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
2191 self.emit_and_lift(ty, addr, &I32Load { offset })
2192 }
2193
2194 TypeDefKind::Resource => {
2195 todo!();
2196 }
2197
2198 // Read and lift each field individually, adjusting the offset
2199 // as we go along, then aggregate all the fields into the
2200 // record.
2201 TypeDefKind::Record(record) => {
2202 self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
2203 self.emit(&RecordLift {
2204 record,
2205 ty: id,
2206 name: self.resolve.types[id].name.as_deref().unwrap(),
2207 });
2208 }
2209
2210 TypeDefKind::Tuple(tuple) => {
2211 self.read_fields_from_memory(&tuple.types, addr, offset);
2212 self.emit(&TupleLift { tuple, ty: id });
2213 }
2214
2215 TypeDefKind::Flags(f) => {
2216 match f.repr() {
2217 FlagsRepr::U8 => {
2218 self.stack.push(addr);
2219 self.load_intrepr(offset, Int::U8);
2220 }
2221 FlagsRepr::U16 => {
2222 self.stack.push(addr);
2223 self.load_intrepr(offset, Int::U16);
2224 }
2225 FlagsRepr::U32(n) => {
2226 for i in 0..n {
2227 self.stack.push(addr.clone());
2228 self.emit(&I32Load {
2229 offset: offset.add_bytes(i * 4),
2230 });
2231 }
2232 }
2233 }
2234 self.lift(ty);
2235 }
2236
2237 // Each case will get its own block, and we'll dispatch to the
2238 // right block based on the `i32.load` we initially perform. Each
2239 // individual block is pretty simple and just reads the payload type
2240 // from the corresponding offset if one is available.
2241 TypeDefKind::Variant(variant) => {
2242 self.read_variant_arms_from_memory(
2243 offset,
2244 addr,
2245 variant.tag(),
2246 variant.cases.iter().map(|c| c.ty.as_ref()),
2247 );
2248 self.emit(&VariantLift {
2249 variant,
2250 ty: id,
2251 name: self.resolve.types[id].name.as_deref().unwrap(),
2252 });
2253 }
2254
2255 TypeDefKind::Option(t) => {
2256 self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
2257 self.emit(&OptionLift { payload: t, ty: id });
2258 }
2259
2260 TypeDefKind::Result(r) => {
2261 self.read_variant_arms_from_memory(
2262 offset,
2263 addr,
2264 Int::U8,
2265 [r.ok.as_ref(), r.err.as_ref()],
2266 );
2267 self.emit(&ResultLift { result: r, ty: id });
2268 }
2269
2270 TypeDefKind::Enum(e) => {
2271 self.stack.push(addr.clone());
2272 self.load_intrepr(offset, e.tag());
2273 self.lift(ty);
2274 }
2275
2276 TypeDefKind::Unknown => unreachable!(),
2277 TypeDefKind::FixedLengthList(ty, size) => {
2278 self.push_block();
2279 self.emit(&IterBasePointer);
2280 let elemaddr = self.stack.pop().unwrap();
2281 self.read_from_memory(ty, elemaddr, offset);
2282 self.finish_block(1);
2283 self.stack.push(addr.clone());
2284 self.emit(&FixedLengthListLiftFromMemory {
2285 element: ty,
2286 size: *size,
2287 id,
2288 });
2289 }
2290 },
2291 }
2292 }
2293
2294 fn read_results_from_memory(
2295 &mut self,
2296 result: &Option<Type>,
2297 addr: B::Operand,
2298 offset: ArchitectureSize,
2299 ) {
2300 self.read_fields_from_memory(result, addr, offset)
2301 }
2302
2303 fn read_variant_arms_from_memory<'b>(
2304 &mut self,
2305 offset: ArchitectureSize,
2306 addr: B::Operand,
2307 tag: Int,
2308 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2309 ) {
2310 self.stack.push(addr.clone());
2311 self.load_intrepr(offset, tag);
2312 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2313 for ty in cases {
2314 self.push_block();
2315 if let Some(ty) = ty {
2316 self.read_from_memory(ty, addr.clone(), payload_offset);
2317 }
2318 self.finish_block(ty.is_some() as usize);
2319 }
2320 }
2321
2322 fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2323 // Read the pointer/len and then perform the standard lifting
2324 // proceses.
2325 self.stack.push(addr.clone());
2326 self.emit(&Instruction::PointerLoad { offset });
2327 self.stack.push(addr);
2328 self.emit(&Instruction::LengthLoad {
2329 offset: offset + self.bindgen.sizes().align(ty).into(),
2330 });
2331 self.lift(ty);
2332 }
2333
2334 fn read_fields_from_memory<'b>(
2335 &mut self,
2336 tys: impl IntoIterator<Item = &'b Type>,
2337 addr: B::Operand,
2338 offset: ArchitectureSize,
2339 ) {
2340 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
2341 self.read_from_memory(ty, addr.clone(), offset + (*field_offset));
2342 }
2343 }
2344
2345 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2346 self.stack.push(addr);
2347 self.emit(instr);
2348 self.lift(ty);
2349 }
2350
2351 fn load_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2352 self.emit(&match repr {
2353 Int::U64 => Instruction::I64Load { offset },
2354 Int::U32 => Instruction::I32Load { offset },
2355 Int::U16 => Instruction::I32Load16U { offset },
2356 Int::U8 => Instruction::I32Load8U { offset },
2357 });
2358 }
2359
2360 fn store_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2361 self.emit(&match repr {
2362 Int::U64 => Instruction::I64Store { offset },
2363 Int::U32 => Instruction::I32Store { offset },
2364 Int::U16 => Instruction::I32Store16 { offset },
2365 Int::U8 => Instruction::I32Store8 { offset },
2366 });
2367 }
2368
2369 /// Runs the deallocation of `ty` for the operands currently on
2370 /// `self.stack`.
2371 ///
2372 /// This will pop the ABI items of `ty` from `self.stack`.
2373 fn deallocate(&mut self, ty: &Type, what: Deallocate) {
2374 use Instruction::*;
2375
2376 match *ty {
2377 Type::String => {
2378 self.emit(&Instruction::GuestDeallocateString);
2379 }
2380
2381 Type::Bool
2382 | Type::U8
2383 | Type::S8
2384 | Type::U16
2385 | Type::S16
2386 | Type::U32
2387 | Type::S32
2388 | Type::Char
2389 | Type::U64
2390 | Type::S64
2391 | Type::F32
2392 | Type::F64
2393 | Type::ErrorContext => {
2394 // No deallocation necessary, just discard the operand on the
2395 // stack.
2396 self.stack.pop().unwrap();
2397 }
2398
2399 Type::Id(id) => match &self.resolve.types[id].kind {
2400 TypeDefKind::Type(t) => self.deallocate(t, what),
2401
2402 TypeDefKind::List(element) => {
2403 self.push_block();
2404 self.emit(&IterBasePointer);
2405 let elemaddr = self.stack.pop().unwrap();
2406 self.deallocate_indirect(element, elemaddr, Default::default(), what);
2407 self.finish_block(0);
2408
2409 self.emit(&Instruction::GuestDeallocateList { element });
2410 }
2411
2412 TypeDefKind::Map(key, value) => {
2413 let value_offset = self.bindgen.sizes().field_offsets([key, value])[1].0;
2414 self.push_block();
2415 self.emit(&IterBasePointer);
2416 let entry_addr = self.stack.pop().unwrap();
2417 self.deallocate_indirect(key, entry_addr.clone(), Default::default(), what);
2418 self.deallocate_indirect(value, entry_addr, value_offset, what);
2419 self.finish_block(0);
2420
2421 self.emit(&Instruction::GuestDeallocateMap { key, value });
2422 }
2423
2424 TypeDefKind::Handle(Handle::Own(_))
2425 | TypeDefKind::Future(_)
2426 | TypeDefKind::Stream(_)
2427 if what.handles() =>
2428 {
2429 self.lift(ty);
2430 self.emit(&DropHandle { ty });
2431 }
2432
2433 TypeDefKind::Record(record) => {
2434 self.flat_for_each_record_type(
2435 ty,
2436 record.fields.iter().map(|f| &f.ty),
2437 |me, ty| me.deallocate(ty, what),
2438 );
2439 }
2440
2441 TypeDefKind::Tuple(tuple) => {
2442 self.flat_for_each_record_type(ty, tuple.types.iter(), |me, ty| {
2443 me.deallocate(ty, what)
2444 });
2445 }
2446
2447 TypeDefKind::Variant(variant) => {
2448 self.flat_for_each_variant_arm(
2449 ty,
2450 false,
2451 variant.cases.iter().map(|c| c.ty.as_ref()),
2452 |me, ty| me.deallocate(ty, what),
2453 );
2454 self.emit(&GuestDeallocateVariant {
2455 blocks: variant.cases.len(),
2456 });
2457 }
2458
2459 TypeDefKind::Option(t) => {
2460 self.flat_for_each_variant_arm(ty, false, [None, Some(t)], |me, ty| {
2461 me.deallocate(ty, what)
2462 });
2463 self.emit(&GuestDeallocateVariant { blocks: 2 });
2464 }
2465
2466 TypeDefKind::Result(e) => {
2467 self.flat_for_each_variant_arm(
2468 ty,
2469 false,
2470 [e.ok.as_ref(), e.err.as_ref()],
2471 |me, ty| me.deallocate(ty, what),
2472 );
2473 self.emit(&GuestDeallocateVariant { blocks: 2 });
2474 }
2475
2476 // discard the operand on the stack, otherwise nothing to free.
2477 TypeDefKind::Flags(_)
2478 | TypeDefKind::Enum(_)
2479 | TypeDefKind::Future(_)
2480 | TypeDefKind::Stream(_)
2481 | TypeDefKind::Handle(Handle::Own(_))
2482 | TypeDefKind::Handle(Handle::Borrow(_)) => {
2483 self.stack.pop().unwrap();
2484 }
2485
2486 TypeDefKind::Resource => unreachable!(),
2487 TypeDefKind::Unknown => unreachable!(),
2488
2489 TypeDefKind::FixedLengthList(..) => todo!(),
2490 },
2491 }
2492 }
2493
2494 fn deallocate_indirect(
2495 &mut self,
2496 ty: &Type,
2497 addr: B::Operand,
2498 offset: ArchitectureSize,
2499 what: Deallocate,
2500 ) {
2501 use Instruction::*;
2502
2503 // No need to execute any instructions if this type itself doesn't
2504 // require any form of post-return.
2505 if !needs_deallocate(self.resolve, ty, what) {
2506 return;
2507 }
2508
2509 match *ty {
2510 Type::String => {
2511 self.stack.push(addr.clone());
2512 self.emit(&Instruction::PointerLoad { offset });
2513 self.stack.push(addr);
2514 self.emit(&Instruction::LengthLoad {
2515 offset: offset + self.bindgen.sizes().align(ty).into(),
2516 });
2517 self.deallocate(ty, what);
2518 }
2519
2520 Type::Bool
2521 | Type::U8
2522 | Type::S8
2523 | Type::U16
2524 | Type::S16
2525 | Type::U32
2526 | Type::S32
2527 | Type::Char
2528 | Type::U64
2529 | Type::S64
2530 | Type::F32
2531 | Type::F64
2532 | Type::ErrorContext => {}
2533
2534 Type::Id(id) => match &self.resolve.types[id].kind {
2535 TypeDefKind::Type(t) => self.deallocate_indirect(t, addr, offset, what),
2536
2537 TypeDefKind::List(_) => {
2538 self.stack.push(addr.clone());
2539 self.emit(&Instruction::PointerLoad { offset });
2540 self.stack.push(addr);
2541 self.emit(&Instruction::LengthLoad {
2542 offset: offset + self.bindgen.sizes().align(ty).into(),
2543 });
2544
2545 self.deallocate(ty, what);
2546 }
2547
2548 TypeDefKind::Map(_, _) => {
2549 self.stack.push(addr.clone());
2550 self.emit(&Instruction::PointerLoad { offset });
2551 self.stack.push(addr);
2552 self.emit(&Instruction::LengthLoad {
2553 offset: offset + self.bindgen.sizes().align(ty).into(),
2554 });
2555
2556 self.deallocate(ty, what);
2557 }
2558
2559 TypeDefKind::Handle(Handle::Own(_))
2560 | TypeDefKind::Future(_)
2561 | TypeDefKind::Stream(_)
2562 if what.handles() =>
2563 {
2564 self.read_from_memory(ty, addr, offset);
2565 self.emit(&DropHandle { ty });
2566 }
2567
2568 TypeDefKind::Handle(Handle::Own(_)) => unreachable!(),
2569 TypeDefKind::Handle(Handle::Borrow(_)) => unreachable!(),
2570 TypeDefKind::Resource => unreachable!(),
2571
2572 TypeDefKind::Record(record) => {
2573 self.deallocate_indirect_fields(
2574 &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2575 addr,
2576 offset,
2577 what,
2578 );
2579 }
2580
2581 TypeDefKind::Tuple(tuple) => {
2582 self.deallocate_indirect_fields(&tuple.types, addr, offset, what);
2583 }
2584
2585 TypeDefKind::Flags(_) => {}
2586
2587 TypeDefKind::Variant(variant) => {
2588 self.deallocate_indirect_variant(
2589 offset,
2590 addr,
2591 variant.tag(),
2592 variant.cases.iter().map(|c| c.ty.as_ref()),
2593 what,
2594 );
2595 self.emit(&GuestDeallocateVariant {
2596 blocks: variant.cases.len(),
2597 });
2598 }
2599
2600 TypeDefKind::Option(t) => {
2601 self.deallocate_indirect_variant(offset, addr, Int::U8, [None, Some(t)], what);
2602 self.emit(&GuestDeallocateVariant { blocks: 2 });
2603 }
2604
2605 TypeDefKind::Result(e) => {
2606 self.deallocate_indirect_variant(
2607 offset,
2608 addr,
2609 Int::U8,
2610 [e.ok.as_ref(), e.err.as_ref()],
2611 what,
2612 );
2613 self.emit(&GuestDeallocateVariant { blocks: 2 });
2614 }
2615
2616 TypeDefKind::Enum(_) => {}
2617
2618 TypeDefKind::Future(_) => unreachable!(),
2619 TypeDefKind::Stream(_) => unreachable!(),
2620 TypeDefKind::Unknown => unreachable!(),
2621 TypeDefKind::FixedLengthList(_, _) => {}
2622 },
2623 }
2624 }
2625
2626 fn deallocate_indirect_variant<'b>(
2627 &mut self,
2628 offset: ArchitectureSize,
2629 addr: B::Operand,
2630 tag: Int,
2631 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2632 what: Deallocate,
2633 ) {
2634 self.stack.push(addr.clone());
2635 self.load_intrepr(offset, tag);
2636 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2637 for ty in cases {
2638 self.push_block();
2639 if let Some(ty) = ty {
2640 self.deallocate_indirect(ty, addr.clone(), payload_offset, what);
2641 }
2642 self.finish_block(0);
2643 }
2644 }
2645
2646 fn deallocate_indirect_fields(
2647 &mut self,
2648 tys: &[Type],
2649 addr: B::Operand,
2650 offset: ArchitectureSize,
2651 what: Deallocate,
2652 ) {
2653 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2654 self.deallocate_indirect(ty, addr.clone(), offset + (field_offset), what);
2655 }
2656 }
2657}
2658
2659fn cast(from: WasmType, to: WasmType) -> Bitcast {
2660 use WasmType::*;
2661
2662 match (from, to) {
2663 (I32, I32)
2664 | (I64, I64)
2665 | (F32, F32)
2666 | (F64, F64)
2667 | (Pointer, Pointer)
2668 | (PointerOrI64, PointerOrI64)
2669 | (Length, Length) => Bitcast::None,
2670
2671 (I32, I64) => Bitcast::I32ToI64,
2672 (F32, I32) => Bitcast::F32ToI32,
2673 (F64, I64) => Bitcast::F64ToI64,
2674
2675 (I64, I32) => Bitcast::I64ToI32,
2676 (I32, F32) => Bitcast::I32ToF32,
2677 (I64, F64) => Bitcast::I64ToF64,
2678
2679 (F32, I64) => Bitcast::F32ToI64,
2680 (I64, F32) => Bitcast::I64ToF32,
2681
2682 (I64, PointerOrI64) => Bitcast::I64ToP64,
2683 (Pointer, PointerOrI64) => Bitcast::PToP64,
2684 (_, PointerOrI64) => {
2685 Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2686 }
2687
2688 (PointerOrI64, I64) => Bitcast::P64ToI64,
2689 (PointerOrI64, Pointer) => Bitcast::P64ToP,
2690 (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2691
2692 (I32, Pointer) => Bitcast::I32ToP,
2693 (Pointer, I32) => Bitcast::PToI32,
2694 (I32, Length) => Bitcast::I32ToL,
2695 (Length, I32) => Bitcast::LToI32,
2696 (I64, Length) => Bitcast::I64ToL,
2697 (Length, I64) => Bitcast::LToI64,
2698 (Pointer, Length) => Bitcast::PToL,
2699 (Length, Pointer) => Bitcast::LToP,
2700
2701 (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2702 (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2703
2704 (F32, F64)
2705 | (F64, F32)
2706 | (F64, I32)
2707 | (I32, F64)
2708 | (Pointer | Length, I64 | F64)
2709 | (I64 | F64, Pointer | Length) => {
2710 unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2711 }
2712 }
2713}
2714
2715/// Flatten types in a given type
2716///
2717/// It is sometimes necessary to restrict the number of max parameters dynamically,
2718/// for example during an async guest import call (flat params are limited to 4)
2719fn flat_types(resolve: &Resolve, ty: &Type, max_params: Option<usize>) -> Option<Vec<WasmType>> {
2720 let max_params = max_params.unwrap_or(MAX_FLAT_PARAMS);
2721 let mut storage = iter::repeat_n(WasmType::I32, max_params).collect::<Vec<_>>();
2722 let mut flat = FlatTypes::new(storage.as_mut_slice());
2723 resolve.push_flat(ty, &mut flat).then_some(flat.to_vec())
2724}