wit_bindgen_core/
abi.rs

1use std::fmt;
2use std::iter;
3
4pub use wit_parser::abi::{AbiVariant, FlatTypes, WasmSignature, WasmType};
5use wit_parser::{
6    align_to_arch, Alignment, ArchitectureSize, ElementInfo, Enum, Flags, FlagsRepr, Function,
7    Handle, Int, Record, Resolve, Result_, SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant,
8};
9
10// Helper macro for defining instructions without having to have tons of
11// exhaustive `match` statements to update
12macro_rules! def_instruction {
13    (
14        $( #[$enum_attr:meta] )*
15        pub enum $name:ident<'a> {
16            $(
17                $( #[$attr:meta] )*
18                $variant:ident $( {
19                    $($field:ident : $field_ty:ty $(,)* )*
20                } )?
21                    :
22                [$num_popped:expr] => [$num_pushed:expr],
23            )*
24        }
25    ) => {
26        $( #[$enum_attr] )*
27        pub enum $name<'a> {
28            $(
29                $( #[$attr] )*
30                $variant $( {
31                    $(
32                        $field : $field_ty,
33                    )*
34                } )? ,
35            )*
36        }
37
38        impl $name<'_> {
39            /// How many operands does this instruction pop from the stack?
40            #[allow(unused_variables)]
41            pub fn operands_len(&self) -> usize {
42                match self {
43                    $(
44                        Self::$variant $( {
45                            $(
46                                $field,
47                            )*
48                        } )? => $num_popped,
49                    )*
50                }
51            }
52
53            /// How many results does this instruction push onto the stack?
54            #[allow(unused_variables)]
55            pub fn results_len(&self) -> usize {
56                match self {
57                    $(
58                        Self::$variant $( {
59                            $(
60                                $field,
61                            )*
62                        } )? => $num_pushed,
63                    )*
64                }
65            }
66        }
67    };
68}
69
70def_instruction! {
71    #[derive(Debug)]
72    pub enum Instruction<'a> {
73        /// Acquires the specified parameter and places it on the stack.
74        /// Depending on the context this may refer to wasm parameters or
75        /// interface types parameters.
76        GetArg { nth: usize } : [0] => [1],
77
78        // Integer const/manipulation instructions
79
80        /// Pushes the constant `val` onto the stack.
81        I32Const { val: i32 } : [0] => [1],
82        /// Casts the top N items on the stack using the `Bitcast` enum
83        /// provided. Consumes the same number of operands that this produces.
84        Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
85        /// Pushes a number of constant zeros for each wasm type on the stack.
86        ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
87
88        // Memory load/store instructions
89
90        /// Pops a pointer from the stack and loads a little-endian `i32` from
91        /// it, using the specified constant offset.
92        I32Load { offset: ArchitectureSize } : [1] => [1],
93        /// Pops a pointer from the stack and loads a little-endian `i8` from
94        /// it, using the specified constant offset. The value loaded is the
95        /// zero-extended to 32-bits
96        I32Load8U { offset: ArchitectureSize } : [1] => [1],
97        /// Pops a pointer from the stack and loads a little-endian `i8` from
98        /// it, using the specified constant offset. The value loaded is the
99        /// sign-extended to 32-bits
100        I32Load8S { offset: ArchitectureSize } : [1] => [1],
101        /// Pops a pointer from the stack and loads a little-endian `i16` from
102        /// it, using the specified constant offset. The value loaded is the
103        /// zero-extended to 32-bits
104        I32Load16U { offset: ArchitectureSize } : [1] => [1],
105        /// Pops a pointer from the stack and loads a little-endian `i16` from
106        /// it, using the specified constant offset. The value loaded is the
107        /// sign-extended to 32-bits
108        I32Load16S { offset: ArchitectureSize } : [1] => [1],
109        /// Pops a pointer from the stack and loads a little-endian `i64` from
110        /// it, using the specified constant offset.
111        I64Load { offset: ArchitectureSize } : [1] => [1],
112        /// Pops a pointer from the stack and loads a little-endian `f32` from
113        /// it, using the specified constant offset.
114        F32Load { offset: ArchitectureSize } : [1] => [1],
115        /// Pops a pointer from the stack and loads a little-endian `f64` from
116        /// it, using the specified constant offset.
117        F64Load { offset: ArchitectureSize } : [1] => [1],
118
119        /// Like `I32Load` or `I64Load`, but for loading pointer values.
120        PointerLoad { offset: ArchitectureSize } : [1] => [1],
121        /// Like `I32Load` or `I64Load`, but for loading array length values.
122        LengthLoad { offset: ArchitectureSize } : [1] => [1],
123
124        /// Pops a pointer from the stack and then an `i32` value.
125        /// Stores the value in little-endian at the pointer specified plus the
126        /// constant `offset`.
127        I32Store { offset: ArchitectureSize } : [2] => [0],
128        /// Pops a pointer from the stack and then an `i32` value.
129        /// Stores the low 8 bits of the value in little-endian at the pointer
130        /// specified plus the constant `offset`.
131        I32Store8 { offset: ArchitectureSize } : [2] => [0],
132        /// Pops a pointer from the stack and then an `i32` value.
133        /// Stores the low 16 bits of the value in little-endian at the pointer
134        /// specified plus the constant `offset`.
135        I32Store16 { offset: ArchitectureSize } : [2] => [0],
136        /// Pops a pointer from the stack and then an `i64` value.
137        /// Stores the value in little-endian at the pointer specified plus the
138        /// constant `offset`.
139        I64Store { offset: ArchitectureSize } : [2] => [0],
140        /// Pops a pointer from the stack and then an `f32` value.
141        /// Stores the value in little-endian at the pointer specified plus the
142        /// constant `offset`.
143        F32Store { offset: ArchitectureSize } : [2] => [0],
144        /// Pops a pointer from the stack and then an `f64` value.
145        /// Stores the value in little-endian at the pointer specified plus the
146        /// constant `offset`.
147        F64Store { offset: ArchitectureSize } : [2] => [0],
148
149        /// Like `I32Store` or `I64Store`, but for storing pointer values.
150        PointerStore { offset: ArchitectureSize } : [2] => [0],
151        /// Like `I32Store` or `I64Store`, but for storing array length values.
152        LengthStore { offset: ArchitectureSize } : [2] => [0],
153
154        // Scalar lifting/lowering
155
156        /// Converts an interface type `char` value to a 32-bit integer
157        /// representing the unicode scalar value.
158        I32FromChar : [1] => [1],
159        /// Converts an interface type `u64` value to a wasm `i64`.
160        I64FromU64 : [1] => [1],
161        /// Converts an interface type `s64` value to a wasm `i64`.
162        I64FromS64 : [1] => [1],
163        /// Converts an interface type `u32` value to a wasm `i32`.
164        I32FromU32 : [1] => [1],
165        /// Converts an interface type `s32` value to a wasm `i32`.
166        I32FromS32 : [1] => [1],
167        /// Converts an interface type `u16` value to a wasm `i32`.
168        I32FromU16 : [1] => [1],
169        /// Converts an interface type `s16` value to a wasm `i32`.
170        I32FromS16 : [1] => [1],
171        /// Converts an interface type `u8` value to a wasm `i32`.
172        I32FromU8 : [1] => [1],
173        /// Converts an interface type `s8` value to a wasm `i32`.
174        I32FromS8 : [1] => [1],
175        /// Conversion an interface type `f32` value to a wasm `f32`.
176        ///
177        /// This may be a noop for some implementations, but it's here in case the
178        /// native language representation of `f32` is different than the wasm
179        /// representation of `f32`.
180        CoreF32FromF32 : [1] => [1],
181        /// Conversion an interface type `f64` value to a wasm `f64`.
182        ///
183        /// This may be a noop for some implementations, but it's here in case the
184        /// native language representation of `f64` is different than the wasm
185        /// representation of `f64`.
186        CoreF64FromF64 : [1] => [1],
187
188        /// Converts a native wasm `i32` to an interface type `s8`.
189        ///
190        /// This will truncate the upper bits of the `i32`.
191        S8FromI32 : [1] => [1],
192        /// Converts a native wasm `i32` to an interface type `u8`.
193        ///
194        /// This will truncate the upper bits of the `i32`.
195        U8FromI32 : [1] => [1],
196        /// Converts a native wasm `i32` to an interface type `s16`.
197        ///
198        /// This will truncate the upper bits of the `i32`.
199        S16FromI32 : [1] => [1],
200        /// Converts a native wasm `i32` to an interface type `u16`.
201        ///
202        /// This will truncate the upper bits of the `i32`.
203        U16FromI32 : [1] => [1],
204        /// Converts a native wasm `i32` to an interface type `s32`.
205        S32FromI32 : [1] => [1],
206        /// Converts a native wasm `i32` to an interface type `u32`.
207        U32FromI32 : [1] => [1],
208        /// Converts a native wasm `i64` to an interface type `s64`.
209        S64FromI64 : [1] => [1],
210        /// Converts a native wasm `i64` to an interface type `u64`.
211        U64FromI64 : [1] => [1],
212        /// Converts a native wasm `i32` to an interface type `char`.
213        ///
214        /// It's safe to assume that the `i32` is indeed a valid unicode code point.
215        CharFromI32 : [1] => [1],
216        /// Converts a native wasm `f32` to an interface type `f32`.
217        F32FromCoreF32 : [1] => [1],
218        /// Converts a native wasm `f64` to an interface type `f64`.
219        F64FromCoreF64 : [1] => [1],
220
221        /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
222        /// zero or one.
223        BoolFromI32 : [1] => [1],
224        /// Creates an `i32` from a `bool` input, must return 0 or 1.
225        I32FromBool : [1] => [1],
226
227        // lists
228
229        /// Lowers a list where the element's layout in the native language is
230        /// expected to match the canonical ABI definition of interface types.
231        ///
232        /// Pops a list value from the stack and pushes the pointer/length onto
233        /// the stack. If `realloc` is set to `Some` then this is expected to
234        /// *consume* the list which means that the data needs to be copied. An
235        /// allocation/copy is expected when:
236        ///
237        /// * A host is calling a wasm export with a list (it needs to copy the
238        ///   list in to the callee's module, allocating space with `realloc`)
239        /// * A wasm export is returning a list (it's expected to use `realloc`
240        ///   to give ownership of the list to the caller.
241        /// * A host is returning a list in a import definition, meaning that
242        ///   space needs to be allocated in the caller with `realloc`).
243        ///
244        /// A copy does not happen (e.g. `realloc` is `None`) when:
245        ///
246        /// * A wasm module calls an import with the list. In this situation
247        ///   it's expected the caller will know how to access this module's
248        ///   memory (e.g. the host has raw access or wasm-to-wasm communication
249        ///   would copy the list).
250        ///
251        /// If `realloc` is `Some` then the adapter is not responsible for
252        /// cleaning up this list because the other end is receiving the
253        /// allocation. If `realloc` is `None` then the adapter is responsible
254        /// for cleaning up any temporary allocation it created, if any.
255        ListCanonLower {
256            element: &'a Type,
257            realloc: Option<&'a str>,
258        } : [1] => [2],
259
260        /// Same as `ListCanonLower`, but used for strings
261        StringLower {
262            realloc: Option<&'a str>,
263        } : [1] => [2],
264
265        /// Lowers a list where the element's layout in the native language is
266        /// not expected to match the canonical ABI definition of interface
267        /// types.
268        ///
269        /// Pops a list value from the stack and pushes the pointer/length onto
270        /// the stack. This operation also pops a block from the block stack
271        /// which is used as the iteration body of writing each element of the
272        /// list consumed.
273        ///
274        /// The `realloc` field here behaves the same way as `ListCanonLower`.
275        /// It's only set to `None` when a wasm module calls a declared import.
276        /// Otherwise lowering in other contexts requires allocating memory for
277        /// the receiver to own.
278        ListLower {
279            element: &'a Type,
280            realloc: Option<&'a str>,
281        } : [1] => [2],
282
283        /// Lifts a list which has a canonical representation into an interface
284        /// types value.
285        ///
286        /// The term "canonical" representation here means that the
287        /// representation of the interface types value in the native language
288        /// exactly matches the canonical ABI definition of the type.
289        ///
290        /// This will consume two `i32` values from the stack, a pointer and a
291        /// length, and then produces an interface value list.
292        ListCanonLift {
293            element: &'a Type,
294            ty: TypeId,
295        } : [2] => [1],
296
297        /// Same as `ListCanonLift`, but used for strings
298        StringLift : [2] => [1],
299
300        /// Lifts a list which into an interface types value.
301        ///
302        /// This will consume two `i32` values from the stack, a pointer and a
303        /// length, and then produces an interface value list.
304        ///
305        /// This will also pop a block from the block stack which is how to
306        /// read each individual element from the list.
307        ListLift {
308            element: &'a Type,
309            ty: TypeId,
310        } : [2] => [1],
311
312        /// Pushes an operand onto the stack representing the list item from
313        /// each iteration of the list.
314        ///
315        /// This is only used inside of blocks related to lowering lists.
316        IterElem { element: &'a Type } : [0] => [1],
317
318        /// Pushes an operand onto the stack representing the base pointer of
319        /// the next element in a list.
320        ///
321        /// This is used for both lifting and lowering lists.
322        IterBasePointer : [0] => [1],
323
324        // records and tuples
325
326        /// Pops a record value off the stack, decomposes the record to all of
327        /// its fields, and then pushes the fields onto the stack.
328        RecordLower {
329            record: &'a Record,
330            name: &'a str,
331            ty: TypeId,
332        } : [1] => [record.fields.len()],
333
334        /// Pops all fields for a record off the stack and then composes them
335        /// into a record.
336        RecordLift {
337            record: &'a Record,
338            name: &'a str,
339            ty: TypeId,
340        } : [record.fields.len()] => [1],
341
342        /// Create an `i32` from a handle.
343        HandleLower {
344            handle: &'a Handle,
345            name: &'a str,
346            ty: TypeId,
347        } : [1] => [1],
348
349        /// Create a handle from an `i32`.
350        HandleLift {
351            handle: &'a Handle,
352            name: &'a str,
353            ty: TypeId,
354        } : [1] => [1],
355
356        /// Create an `i32` from a future.
357        FutureLower {
358            payload: &'a Option<Type>,
359            ty: TypeId,
360        } : [1] => [1],
361
362        /// Create a future from an `i32`.
363        FutureLift {
364            payload: &'a Option<Type>,
365            ty: TypeId,
366        } : [1] => [1],
367
368        /// Create an `i32` from a stream.
369        StreamLower {
370            payload: &'a Option<Type>,
371            ty: TypeId,
372        } : [1] => [1],
373
374        /// Create a stream from an `i32`.
375        StreamLift {
376            payload: &'a Option<Type>,
377            ty: TypeId,
378        } : [1] => [1],
379
380        /// Create an `i32` from an error-context.
381        ErrorContextLower : [1] => [1],
382
383        /// Create a error-context from an `i32`.
384        ErrorContextLift : [1] => [1],
385
386        /// Pops a tuple value off the stack, decomposes the tuple to all of
387        /// its fields, and then pushes the fields onto the stack.
388        TupleLower {
389            tuple: &'a Tuple,
390            ty: TypeId,
391        } : [1] => [tuple.types.len()],
392
393        /// Pops all fields for a tuple off the stack and then composes them
394        /// into a tuple.
395        TupleLift {
396            tuple: &'a Tuple,
397            ty: TypeId,
398        } : [tuple.types.len()] => [1],
399
400        /// Converts a language-specific record-of-bools to a list of `i32`.
401        FlagsLower {
402            flags: &'a Flags,
403            name: &'a str,
404            ty: TypeId,
405        } : [1] => [flags.repr().count()],
406        /// Converts a list of native wasm `i32` to a language-specific
407        /// record-of-bools.
408        FlagsLift {
409            flags: &'a Flags,
410            name: &'a str,
411            ty: TypeId,
412        } : [flags.repr().count()] => [1],
413
414        // variants
415
416        /// This is a special instruction used for `VariantLower`
417        /// instruction to determine the name of the payload, if present, to use
418        /// within each block.
419        ///
420        /// Each sub-block will have this be the first instruction, and if it
421        /// lowers a payload it will expect something bound to this name.
422        VariantPayloadName : [0] => [1],
423
424        /// Pops a variant off the stack as well as `ty.cases.len()` blocks
425        /// from the code generator. Uses each of those blocks and the value
426        /// from the stack to produce `nresults` of items.
427        VariantLower {
428            variant: &'a Variant,
429            name: &'a str,
430            ty: TypeId,
431            results: &'a [WasmType],
432        } : [1] => [results.len()],
433
434        /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
435        /// from the code generator. Uses each of those blocks and the value
436        /// from the stack to produce a final variant.
437        VariantLift {
438            variant: &'a Variant,
439            name: &'a str,
440            ty: TypeId,
441        } : [1] => [1],
442
443        /// Pops an enum off the stack and pushes the `i32` representation.
444        EnumLower {
445            enum_: &'a Enum,
446            name: &'a str,
447            ty: TypeId,
448        } : [1] => [1],
449
450        /// Pops an `i32` off the stack and lifts it into the `enum` specified.
451        EnumLift {
452            enum_: &'a Enum,
453            name: &'a str,
454            ty: TypeId,
455        } : [1] => [1],
456
457        /// Specialization of `VariantLower` for specifically `option<T>` types,
458        /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
459        /// the two cases.
460        OptionLower {
461            payload: &'a Type,
462            ty: TypeId,
463            results: &'a [WasmType],
464        } : [1] => [results.len()],
465
466        /// Specialization of `VariantLift` for specifically the `option<T>`
467        /// type. Otherwise behaves the same as the `VariantLift` instruction
468        /// with two blocks for the lift.
469        OptionLift {
470            payload: &'a Type,
471            ty: TypeId,
472        } : [1] => [1],
473
474        /// Specialization of `VariantLower` for specifically `result<T, E>`
475        /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
476        /// for the two cases.
477        ResultLower {
478            result: &'a Result_
479            ty: TypeId,
480            results: &'a [WasmType],
481        } : [1] => [results.len()],
482
483        /// Specialization of `VariantLift` for specifically the `result<T,
484        /// E>` type. Otherwise behaves the same as the `VariantLift`
485        /// instruction with two blocks for the lift.
486        ResultLift {
487            result: &'a Result_,
488            ty: TypeId,
489        } : [1] => [1],
490
491        // calling/control flow
492
493        /// Represents a call to a raw WebAssembly API. The module/name are
494        /// provided inline as well as the types if necessary.
495        CallWasm {
496            name: &'a str,
497            sig: &'a WasmSignature,
498        } : [sig.params.len()] => [sig.results.len()],
499
500        /// Same as `CallWasm`, except the dual where an interface is being
501        /// called rather than a raw wasm function.
502        ///
503        /// Note that this will be used for async functions, and `async_`
504        /// indicates whether the function should be invoked in an async
505        /// fashion.
506        CallInterface {
507            func: &'a Function,
508            async_: bool,
509        } : [func.params.len()] => [usize::from(func.result.is_some())],
510
511        /// Returns `amt` values on the stack. This is always the last
512        /// instruction.
513        Return { amt: usize, func: &'a Function } : [*amt] => [0],
514
515        /// Calls the `realloc` function specified in a malloc-like fashion
516        /// allocating `size` bytes with alignment `align`.
517        ///
518        /// Pushes the returned pointer onto the stack.
519        Malloc {
520            realloc: &'static str,
521            size: ArchitectureSize,
522            align: Alignment,
523        } : [0] => [1],
524
525        /// Used exclusively for guest-code generation this indicates that
526        /// the standard memory deallocation function needs to be invoked with
527        /// the specified parameters.
528        ///
529        /// This will pop a pointer from the stack and push nothing.
530        GuestDeallocate {
531            size: ArchitectureSize,
532            align: Alignment,
533        } : [1] => [0],
534
535        /// Used exclusively for guest-code generation this indicates that
536        /// a string is being deallocated. The ptr/length are on the stack and
537        /// are poppped off and used to deallocate the string.
538        GuestDeallocateString : [2] => [0],
539
540        /// Used exclusively for guest-code generation this indicates that
541        /// a list is being deallocated. The ptr/length are on the stack and
542        /// are poppped off and used to deallocate the list.
543        ///
544        /// This variant also pops a block off the block stack to be used as the
545        /// body of the deallocation loop.
546        GuestDeallocateList {
547            element: &'a Type,
548        } : [2] => [0],
549
550        /// Used exclusively for guest-code generation this indicates that
551        /// a variant is being deallocated. The integer discriminant is popped
552        /// off the stack as well as `blocks` number of blocks popped from the
553        /// blocks stack. The variant is used to select, at runtime, which of
554        /// the blocks is executed to deallocate the variant.
555        GuestDeallocateVariant {
556            blocks: usize,
557        } : [1] => [0],
558
559        /// Deallocates the language-specific handle representation on the top
560        /// of the stack. Used for async imports.
561        DropHandle { ty: &'a Type } : [1] => [0],
562
563        /// Call `task.return` for an async-lifted export.
564        ///
565        /// This will call core wasm import `name` which will be mapped to
566        /// `task.return` later on. The function given has `params` as its
567        /// parameters and it will return no results. This is used to pass the
568        /// lowered representation of a function's results to `task.return`.
569        AsyncTaskReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
570
571        /// Force the evaluation of the specified number of expressions and push
572        /// the results to the stack.
573        ///
574        /// This is useful prior to disposing of temporary variables and/or
575        /// allocations which are referenced by one or more not-yet-evaluated
576        /// expressions.
577        Flush { amt: usize } : [*amt] => [*amt],
578    }
579}
580
581#[derive(Debug, PartialEq)]
582pub enum Bitcast {
583    // Upcasts
584    F32ToI32,
585    F64ToI64,
586    I32ToI64,
587    F32ToI64,
588
589    // Downcasts
590    I32ToF32,
591    I64ToF64,
592    I64ToI32,
593    I64ToF32,
594
595    // PointerOrI64 conversions. These preserve provenance when the source
596    // or destination is a pointer value.
597    //
598    // These are used when pointer values are being stored in
599    // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
600    // always have to preserve provenance when the value being loaded or
601    // stored is a pointer.
602    P64ToI64,
603    I64ToP64,
604    P64ToP,
605    PToP64,
606
607    // Pointer<->number conversions. These do not preserve provenance.
608    //
609    // These are used when integer or floating-point values are being stored in
610    // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
611    // never have any provenance to preserve.
612    I32ToP,
613    PToI32,
614    PToL,
615    LToP,
616
617    // Number<->Number conversions.
618    I32ToL,
619    LToI32,
620    I64ToL,
621    LToI64,
622
623    // Multiple conversions in sequence.
624    Sequence(Box<[Bitcast; 2]>),
625
626    None,
627}
628
629/// Whether the glue code surrounding a call is lifting arguments and lowering
630/// results or vice versa.
631#[derive(Clone, Copy, PartialEq, Eq)]
632pub enum LiftLower {
633    /// When the glue code lifts arguments and lowers results.
634    ///
635    /// ```text
636    /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
637    /// ```
638    LiftArgsLowerResults,
639    /// When the glue code lowers arguments and lifts results.
640    ///
641    /// ```text
642    /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
643    /// ```
644    LowerArgsLiftResults,
645}
646
647/// Trait for language implementors to use to generate glue code between native
648/// WebAssembly signatures and interface types signatures.
649///
650/// This is used as an implementation detail in interpreting the ABI between
651/// interface types and wasm types. Eventually this will be driven by interface
652/// types adapters themselves, but for now the ABI of a function dictates what
653/// instructions are fed in.
654///
655/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
656/// generate code for. Instructions operate like a stack machine where each
657/// instruction has a list of inputs and a list of outputs (provided by the
658/// `emit` function).
659pub trait Bindgen {
660    /// The intermediate type for fragments of code for this type.
661    ///
662    /// For most languages `String` is a suitable intermediate type.
663    type Operand: Clone + fmt::Debug;
664
665    /// Emit code to implement the given instruction.
666    ///
667    /// Each operand is given in `operands` and can be popped off if ownership
668    /// is required. It's guaranteed that `operands` has the appropriate length
669    /// for the `inst` given, as specified with [`Instruction`].
670    ///
671    /// Each result variable should be pushed onto `results`. This function must
672    /// push the appropriate number of results or binding generation will panic.
673    fn emit(
674        &mut self,
675        resolve: &Resolve,
676        inst: &Instruction<'_>,
677        operands: &mut Vec<Self::Operand>,
678        results: &mut Vec<Self::Operand>,
679    );
680
681    /// Gets a operand reference to the return pointer area.
682    ///
683    /// The provided size and alignment is for the function's return type.
684    fn return_pointer(&mut self, size: ArchitectureSize, align: Alignment) -> Self::Operand;
685
686    /// Enters a new block of code to generate code for.
687    ///
688    /// This is currently exclusively used for constructing variants. When a
689    /// variant is constructed a block here will be pushed for each case of a
690    /// variant, generating the code necessary to translate a variant case.
691    ///
692    /// Blocks are completed with `finish_block` below. It's expected that `emit`
693    /// will always push code (if necessary) into the "current block", which is
694    /// updated by calling this method and `finish_block` below.
695    fn push_block(&mut self);
696
697    /// Indicates to the code generator that a block is completed, and the
698    /// `operand` specified was the resulting value of the block.
699    ///
700    /// This method will be used to compute the value of each arm of lifting a
701    /// variant. The `operand` will be `None` if the variant case didn't
702    /// actually have any type associated with it. Otherwise it will be `Some`
703    /// as the last value remaining on the stack representing the value
704    /// associated with a variant's `case`.
705    ///
706    /// It's expected that this will resume code generation in the previous
707    /// block before `push_block` was called. This must also save the results
708    /// of the current block internally for instructions like `ResultLift` to
709    /// use later.
710    fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
711
712    /// Returns size information that was previously calculated for all types.
713    fn sizes(&self) -> &SizeAlign;
714
715    /// Returns whether or not the specified element type is represented in a
716    /// "canonical" form for lists. This dictates whether the `ListCanonLower`
717    /// and `ListCanonLift` instructions are used or not.
718    fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
719}
720
721/// Generates an abstract sequence of instructions which represents this
722/// function being adapted as an imported function.
723///
724/// The instructions here, when executed, will emulate a language with
725/// interface types calling the concrete wasm implementation. The parameters
726/// for the returned instruction sequence are the language's own
727/// interface-types parameters. One instruction in the instruction stream
728/// will be a `Call` which represents calling the actual raw wasm function
729/// signature.
730///
731/// This function is useful, for example, if you're building a language
732/// generator for WASI bindings. This will document how to translate
733/// language-specific values into the wasm types to call a WASI function,
734/// and it will also automatically convert the results of the WASI function
735/// back to a language-specific value.
736pub fn call(
737    resolve: &Resolve,
738    variant: AbiVariant,
739    lift_lower: LiftLower,
740    func: &Function,
741    bindgen: &mut impl Bindgen,
742    async_: bool,
743) {
744    Generator::new(resolve, bindgen).call(func, variant, lift_lower, async_);
745}
746
747pub fn lower_to_memory<B: Bindgen>(
748    resolve: &Resolve,
749    bindgen: &mut B,
750    address: B::Operand,
751    value: B::Operand,
752    ty: &Type,
753) {
754    let mut generator = Generator::new(resolve, bindgen);
755    // TODO: make this configurable? Right now this function is only called for
756    // future/stream callbacks so it's appropriate to skip realloc here as it's
757    // all "lower for wasm import", but this might get reused for something else
758    // in the future.
759    generator.realloc = Some(Realloc::Export("cabi_realloc"));
760    generator.stack.push(value);
761    generator.write_to_memory(ty, address, Default::default());
762}
763
764pub fn lower_flat<B: Bindgen>(
765    resolve: &Resolve,
766    bindgen: &mut B,
767    value: B::Operand,
768    ty: &Type,
769) -> Vec<B::Operand> {
770    let mut generator = Generator::new(resolve, bindgen);
771    generator.stack.push(value);
772    generator.realloc = Some(Realloc::Export("cabi_realloc"));
773    generator.lower(ty);
774    generator.stack
775}
776
777pub fn lift_from_memory<B: Bindgen>(
778    resolve: &Resolve,
779    bindgen: &mut B,
780    address: B::Operand,
781    ty: &Type,
782) -> B::Operand {
783    let mut generator = Generator::new(resolve, bindgen);
784    generator.read_from_memory(ty, address, Default::default());
785    generator.stack.pop().unwrap()
786}
787
788/// Used in a similar manner as the `Interface::call` function except is
789/// used to generate the `post-return` callback for `func`.
790///
791/// This is only intended to be used in guest generators for exported
792/// functions and will primarily generate `GuestDeallocate*` instructions,
793/// plus others used as input to those instructions.
794pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen) {
795    Generator::new(resolve, bindgen).post_return(func);
796}
797
798/// Returns whether the `Function` specified needs a post-return function to
799/// be generated in guest code.
800///
801/// This is used when the return value contains a memory allocation such as
802/// a list or a string primarily.
803pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
804    func.result
805        .map(|t| needs_deallocate(resolve, &t, Deallocate::Lists))
806        .unwrap_or(false)
807}
808
809fn needs_deallocate(resolve: &Resolve, ty: &Type, what: Deallocate) -> bool {
810    match ty {
811        Type::String => true,
812        Type::ErrorContext => true,
813        Type::Id(id) => match &resolve.types[*id].kind {
814            TypeDefKind::List(_) => true,
815            TypeDefKind::Type(t) => needs_deallocate(resolve, t, what),
816            TypeDefKind::Handle(Handle::Own(_)) => what.handles(),
817            TypeDefKind::Handle(Handle::Borrow(_)) => false,
818            TypeDefKind::Resource => false,
819            TypeDefKind::Record(r) => r
820                .fields
821                .iter()
822                .any(|f| needs_deallocate(resolve, &f.ty, what)),
823            TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_deallocate(resolve, t, what)),
824            TypeDefKind::Variant(t) => t
825                .cases
826                .iter()
827                .filter_map(|t| t.ty.as_ref())
828                .any(|t| needs_deallocate(resolve, t, what)),
829            TypeDefKind::Option(t) => needs_deallocate(resolve, t, what),
830            TypeDefKind::Result(t) => [&t.ok, &t.err]
831                .iter()
832                .filter_map(|t| t.as_ref())
833                .any(|t| needs_deallocate(resolve, t, what)),
834            TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
835            TypeDefKind::Future(_) | TypeDefKind::Stream(_) => what.handles(),
836            TypeDefKind::Unknown => unreachable!(),
837            TypeDefKind::FixedSizeList(..) => todo!(),
838        },
839
840        Type::Bool
841        | Type::U8
842        | Type::S8
843        | Type::U16
844        | Type::S16
845        | Type::U32
846        | Type::S32
847        | Type::U64
848        | Type::S64
849        | Type::F32
850        | Type::F64
851        | Type::Char => false,
852    }
853}
854
855/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
856/// that's a pointer to a sequence of `types` stored in linear memory.
857pub fn deallocate_lists_in_types<B: Bindgen>(
858    resolve: &Resolve,
859    types: &[Type],
860    operands: &[B::Operand],
861    indirect: bool,
862    bindgen: &mut B,
863) {
864    Generator::new(resolve, bindgen).deallocate_in_types(
865        types,
866        operands,
867        indirect,
868        Deallocate::Lists,
869    );
870}
871
872/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
873/// that's a pointer to a sequence of `types` stored in linear memory.
874pub fn deallocate_lists_and_own_in_types<B: Bindgen>(
875    resolve: &Resolve,
876    types: &[Type],
877    operands: &[B::Operand],
878    indirect: bool,
879    bindgen: &mut B,
880) {
881    Generator::new(resolve, bindgen).deallocate_in_types(
882        types,
883        operands,
884        indirect,
885        Deallocate::ListsAndOwn,
886    );
887}
888
889#[derive(Copy, Clone)]
890pub enum Realloc {
891    None,
892    Export(&'static str),
893}
894
895/// What to deallocate in various `deallocate_*` methods.
896#[derive(Copy, Clone)]
897enum Deallocate {
898    /// Only deallocate lists.
899    Lists,
900    /// Deallocate lists and owned resources such as `own<T>` and
901    /// futures/streams.
902    ListsAndOwn,
903}
904
905impl Deallocate {
906    fn handles(&self) -> bool {
907        match self {
908            Deallocate::Lists => false,
909            Deallocate::ListsAndOwn => true,
910        }
911    }
912}
913
914struct Generator<'a, B: Bindgen> {
915    bindgen: &'a mut B,
916    resolve: &'a Resolve,
917    operands: Vec<B::Operand>,
918    results: Vec<B::Operand>,
919    stack: Vec<B::Operand>,
920    return_pointer: Option<B::Operand>,
921    realloc: Option<Realloc>,
922}
923
924const MAX_FLAT_PARAMS: usize = 16;
925const MAX_FLAT_ASYNC_PARAMS: usize = 4;
926
927impl<'a, B: Bindgen> Generator<'a, B> {
928    fn new(resolve: &'a Resolve, bindgen: &'a mut B) -> Generator<'a, B> {
929        Generator {
930            resolve,
931            bindgen,
932            operands: Vec::new(),
933            results: Vec::new(),
934            stack: Vec::new(),
935            return_pointer: None,
936            realloc: None,
937        }
938    }
939
940    fn call(&mut self, func: &Function, variant: AbiVariant, lift_lower: LiftLower, async_: bool) {
941        let sig = self.resolve.wasm_signature(variant, func);
942
943        // Lowering parameters calling a wasm import _or_ returning a result
944        // from an async-lifted wasm export means we don't need to pass
945        // ownership, but we pass ownership in all other cases.
946        let realloc = match (variant, lift_lower, async_) {
947            (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
948            | (
949                AbiVariant::GuestExport
950                | AbiVariant::GuestExportAsync
951                | AbiVariant::GuestExportAsyncStackful,
952                LiftLower::LiftArgsLowerResults,
953                true,
954            ) => Realloc::None,
955            _ => Realloc::Export("cabi_realloc"),
956        };
957        assert!(self.realloc.is_none());
958
959        match lift_lower {
960            LiftLower::LowerArgsLiftResults => {
961                self.realloc = Some(realloc);
962
963                // Create a function that performs individual lowering of operands
964                let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
965                    let mut offset = ArchitectureSize::default();
966                    for (nth, (_, ty)) in func.params.iter().enumerate() {
967                        self_.emit(&Instruction::GetArg { nth });
968                        offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
969                        self_.write_to_memory(ty, ptr.clone(), offset);
970                        offset += self_.bindgen.sizes().size(ty);
971                    }
972
973                    self_.stack.push(ptr);
974                };
975
976                // Lower parameters
977                if sig.indirect_params {
978                    // If parameters are indirect space is
979                    // allocated for them and each argument is lowered
980                    // individually into memory.
981                    let ElementInfo { size, align } = self
982                        .bindgen
983                        .sizes()
984                        .record(func.params.iter().map(|t| &t.1));
985
986                    // Resolve the pointer to the indirectly stored parameters
987                    let ptr = match variant {
988                        // When a wasm module calls an import it will provide
989                        // space that isn't explicitly deallocated.
990                        AbiVariant::GuestImport => self.bindgen.return_pointer(size, align),
991
992                        AbiVariant::GuestImportAsync => {
993                            todo!("direct param lowering for async guest import not implemented")
994                        }
995
996                        // When calling a wasm module from the outside, though,
997                        // malloc needs to be called.
998                        AbiVariant::GuestExport => {
999                            self.emit(&Instruction::Malloc {
1000                                realloc: "cabi_realloc",
1001                                size,
1002                                align,
1003                            });
1004                            self.stack.pop().unwrap()
1005                        }
1006
1007                        AbiVariant::GuestExportAsync | AbiVariant::GuestExportAsyncStackful => {
1008                            todo!("direct param lowering for async not implemented")
1009                        }
1010                    };
1011
1012                    // Lower the parameters to memory
1013                    lower_to_memory(self, ptr);
1014                } else {
1015                    // ... otherwise arguments are direct,
1016                    // (there aren't too many) then we simply do a normal lower
1017                    // operation for them all.
1018                    for (nth, (_, ty)) in func.params.iter().enumerate() {
1019                        self.emit(&Instruction::GetArg { nth });
1020                        self.lower(ty);
1021                    }
1022                }
1023                self.realloc = None;
1024
1025                // If necessary we may need to prepare a return pointer for this ABI.
1026                if variant == AbiVariant::GuestImport && sig.retptr {
1027                    let info = self.bindgen.sizes().params(&func.result);
1028                    let ptr = self.bindgen.return_pointer(info.size, info.align);
1029                    self.return_pointer = Some(ptr.clone());
1030                    self.stack.push(ptr);
1031                }
1032
1033                // Call the Wasm function
1034                assert_eq!(self.stack.len(), sig.params.len());
1035                self.emit(&Instruction::CallWasm {
1036                    name: &func.name,
1037                    sig: &sig,
1038                });
1039
1040                // Handle the result
1041                if sig.retptr {
1042                    // If there is a return pointer, we must get the pointer to where results
1043                    // should be stored, and store the results there?
1044
1045                    let ptr = match variant {
1046                        // imports into guests means it's a wasm module
1047                        // calling an imported function. We supplied the
1048                        // return pointer as the last argument (saved in
1049                        // `self.return_pointer`) so we use that to read
1050                        // the result of the function from memory.
1051                        AbiVariant::GuestImport => {
1052                            assert!(sig.results.is_empty());
1053                            self.return_pointer.take().unwrap()
1054                        }
1055
1056                        // guest exports means that this is a host
1057                        // calling wasm so wasm returned a pointer to where
1058                        // the result is stored
1059                        AbiVariant::GuestExport => self.stack.pop().unwrap(),
1060
1061                        AbiVariant::GuestImportAsync
1062                        | AbiVariant::GuestExportAsync
1063                        | AbiVariant::GuestExportAsyncStackful => {
1064                            unreachable!()
1065                        }
1066                    };
1067
1068                    if let (AbiVariant::GuestExport, true) = (variant, async_) {
1069                        // If we're dealing with an async function, the result should not be read from memory
1070                        // immediately, as it's the async call result
1071                        //
1072                        // We can leave the result of the call (the indication of what to do as an async call)
1073                        // on the stack as a return
1074                        self.stack.push(ptr);
1075                    } else {
1076                        // If we're not dealing with an async call, the result must be in memory at this point and can be read out
1077                        self.read_results_from_memory(
1078                            &func.result,
1079                            ptr.clone(),
1080                            ArchitectureSize::default(),
1081                        );
1082                        self.emit(&Instruction::Flush {
1083                            amt: usize::from(func.result.is_some()),
1084                        });
1085                    }
1086                } else {
1087                    // With no return pointer in use we can simply lift the
1088                    // result(s) of the function from the result of the core
1089                    // wasm function.
1090                    if let Some(ty) = &func.result {
1091                        self.lift(ty)
1092                    }
1093                }
1094
1095                // Emit the function return
1096                self.emit(&Instruction::Return {
1097                    func,
1098                    amt: usize::from(func.result.is_some()),
1099                });
1100            }
1101
1102            LiftLower::LiftArgsLowerResults => {
1103                let max_flat_params = match (variant, async_) {
1104                    (AbiVariant::GuestImportAsync, _is_async @ true) => MAX_FLAT_ASYNC_PARAMS,
1105                    _ => MAX_FLAT_PARAMS,
1106                };
1107
1108                // Read parameters from memory
1109                let read_from_memory = |self_: &mut Self| {
1110                    let mut offset = ArchitectureSize::default();
1111                    let ptr = self_
1112                        .stack
1113                        .pop()
1114                        .expect("empty stack during read param from memory");
1115                    for (_, ty) in func.params.iter() {
1116                        offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1117                        self_.read_from_memory(ty, ptr.clone(), offset);
1118                        offset += self_.bindgen.sizes().size(ty);
1119                    }
1120                };
1121
1122                // Resolve parameters
1123                if sig.indirect_params {
1124                    // If parameters were passed indirectly, arguments must be
1125                    // read in succession from memory, with the pointer to the arguments
1126                    // being the first argument to the function.
1127                    self.emit(&Instruction::GetArg { nth: 0 });
1128                    read_from_memory(self);
1129                } else {
1130                    // ... otherwise, if parameters were passed directly then we lift each
1131                    // argument in succession from the component wasm types that
1132                    // make-up the type.
1133                    let mut offset = 0;
1134                    for (param_name, ty) in func.params.iter() {
1135                        let Some(types) = flat_types(self.resolve, ty, Some(max_flat_params))
1136                        else {
1137                            panic!("failed to flatten types during direct parameter lifting ('{param_name}' in func '{}')", func.name);
1138                        };
1139                        for _ in 0..types.len() {
1140                            self.emit(&Instruction::GetArg { nth: offset });
1141                            offset += 1;
1142                        }
1143                        self.lift(ty);
1144                    }
1145                }
1146
1147                // ... and that allows us to call the interface types function
1148                self.emit(&Instruction::CallInterface { func, async_ });
1149
1150                // The return value of an async function is *not* the result of the function
1151                // itself or a pointer but rather a status code.
1152                //
1153                // Asynchronous functions will call `task.return` after the
1154                // interface function completes, so lowering is conditional
1155                // based on slightly different logic for the `task.return`
1156                // intrinsic.
1157                //
1158                // Note that in the async import case teh code below deals with the CM function being lowered,
1159                // not the core function that is underneath that (i.e. func.result may be empty,
1160                // where the associated core function underneath must have a i32 status code result)
1161                let (lower_to_memory, async_flat_results) = match (async_, &func.result) {
1162                    // All async cases pass along the function results and flatten where necesary
1163                    (_is_async @ true, func_result) => {
1164                        let results = match &func_result {
1165                            Some(ty) => flat_types(self.resolve, ty, Some(max_flat_params)),
1166                            None => Some(Vec::new()),
1167                        };
1168                        (results.is_none(), Some(results))
1169                    }
1170                    // All other non-async cases
1171                    (_is_async @ false, _) => (sig.retptr, None),
1172                };
1173
1174                // This was dynamically allocated by the caller (or async start
1175                // function) so after it's been read by the guest we need to
1176                // deallocate it.
1177                if let AbiVariant::GuestExport
1178                | AbiVariant::GuestExportAsync
1179                | AbiVariant::GuestExportAsyncStackful = variant
1180                {
1181                    if sig.indirect_params && !async_ {
1182                        let ElementInfo { size, align } = self
1183                            .bindgen
1184                            .sizes()
1185                            .record(func.params.iter().map(|t| &t.1));
1186                        self.emit(&Instruction::GetArg { nth: 0 });
1187                        self.emit(&Instruction::GuestDeallocate { size, align });
1188                    }
1189                }
1190
1191                self.realloc = Some(realloc);
1192
1193                // Perform memory lowing of relevant results, including out pointers as well as traditional results
1194                match (lower_to_memory, sig.retptr, variant) {
1195                    // For sync calls, if no lowering to memory is required and there *is* a return pointer in use
1196                    // then we need to lower then simply lower the result(s) and return that directly from the function.
1197                    (_lower_to_memory @ false, _, _) => {
1198                        if let Some(ty) = &func.result {
1199                            self.lower(ty);
1200                        }
1201                    }
1202
1203                    // Lowering to memory for a guest import
1204                    //
1205                    // When a function is imported to a guest this means
1206                    // it's a host providing the implementation of the
1207                    // import. The result is stored in the pointer
1208                    // specified in the last argument, so we get the
1209                    // pointer here and then write the return value into
1210                    // it.
1211                    (
1212                        _lower_to_memory @ true,
1213                        _has_ret_ptr @ true,
1214                        AbiVariant::GuestImport | AbiVariant::GuestImportAsync,
1215                    ) => {
1216                        self.emit(&Instruction::GetArg {
1217                            nth: sig.params.len() - 1,
1218                        });
1219                        let ptr = self
1220                            .stack
1221                            .pop()
1222                            .expect("empty stack during result lower to memory");
1223                        self.write_params_to_memory(&func.result, ptr, Default::default());
1224                    }
1225
1226                    // Lowering to memory for a guest export
1227                    //
1228                    // For a guest import this is a function defined in
1229                    // wasm, so we're returning a pointer where the
1230                    // value was stored at. Allocate some space here
1231                    // (statically) and then write the result into that
1232                    // memory, returning the pointer at the end.
1233                    (_lower_to_memory @ true, _, variant) => match variant {
1234                        AbiVariant::GuestExport | AbiVariant::GuestExportAsync => {
1235                            let ElementInfo { size, align } =
1236                                self.bindgen.sizes().params(&func.result);
1237                            let ptr = self.bindgen.return_pointer(size, align);
1238                            self.write_params_to_memory(
1239                                &func.result,
1240                                ptr.clone(),
1241                                Default::default(),
1242                            );
1243                            self.stack.push(ptr);
1244                        }
1245                        AbiVariant::GuestImport | AbiVariant::GuestImportAsync => {
1246                            unreachable!(
1247                                "lowering to memory cannot be performed without a return pointer ({async_note} func [{func_name}], variant {variant:#?})",
1248                                async_note = async_.then_some("async").unwrap_or("sync"),
1249                                func_name = func.name,
1250                            )
1251                        }
1252                        AbiVariant::GuestExportAsyncStackful => {
1253                            todo!("stackful exports are not yet supported")
1254                        }
1255                    },
1256                }
1257
1258                // Build and emit the appropriate return
1259                match (variant, async_flat_results) {
1260                    // Async guest imports always return a i32 status code
1261                    (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, None) if async_ => {
1262                        unreachable!("async guest imports must have a return")
1263                    }
1264
1265                    // Async guest imports with results return the status code, not a pointer to any results
1266                    (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, Some(results))
1267                        if async_ =>
1268                    {
1269                        let name = &format!("[task-return]{}", func.name);
1270                        let params = results.as_deref().unwrap_or_default();
1271                        self.emit(&Instruction::AsyncTaskReturn { name, params });
1272                    }
1273
1274                    // All async/non-async cases with results that need to be returned are present here
1275                    //
1276                    // In practice, async imports should not end up here, as the returned result of an
1277                    // async import is *not* a pointer but instead a status code.
1278                    (_, Some(results)) => {
1279                        let name = &format!("[task-return]{}", func.name);
1280                        let params = results.as_deref().unwrap_or(&[WasmType::Pointer]);
1281                        self.emit(&Instruction::AsyncTaskReturn { name, params });
1282                    }
1283
1284                    // All async/non-async cases with no results simply return
1285                    //
1286                    // In practice, an async import will never get here (it always has a result, the error code)
1287                    (_, None) => {
1288                        self.emit(&Instruction::Return {
1289                            func,
1290                            amt: sig.results.len(),
1291                        });
1292                    }
1293                }
1294
1295                self.realloc = None;
1296            }
1297        }
1298
1299        assert!(self.realloc.is_none());
1300
1301        assert!(
1302            self.stack.is_empty(),
1303            "stack has {} items remaining: {:?}",
1304            self.stack.len(),
1305            self.stack,
1306        );
1307    }
1308
1309    fn post_return(&mut self, func: &Function) {
1310        let sig = self.resolve.wasm_signature(AbiVariant::GuestExport, func);
1311
1312        // Currently post-return is only used for lists and lists are always
1313        // returned indirectly through memory due to their flat representation
1314        // having more than one type. Assert that a return pointer is used,
1315        // though, in case this ever changes.
1316        assert!(sig.retptr);
1317
1318        self.emit(&Instruction::GetArg { nth: 0 });
1319        let addr = self.stack.pop().unwrap();
1320
1321        let mut types = Vec::new();
1322        types.extend(func.result);
1323        self.deallocate_in_types(&types, &[addr], true, Deallocate::Lists);
1324
1325        self.emit(&Instruction::Return { func, amt: 0 });
1326    }
1327
1328    fn deallocate_in_types(
1329        &mut self,
1330        types: &[Type],
1331        operands: &[B::Operand],
1332        indirect: bool,
1333        what: Deallocate,
1334    ) {
1335        if indirect {
1336            assert_eq!(operands.len(), 1);
1337            for (offset, ty) in self.bindgen.sizes().field_offsets(types) {
1338                self.deallocate_indirect(ty, operands[0].clone(), offset, what);
1339            }
1340            assert!(
1341                self.stack.is_empty(),
1342                "stack has {} items remaining",
1343                self.stack.len()
1344            );
1345        } else {
1346            let mut operands = operands;
1347            let mut operands_for_ty;
1348            for ty in types {
1349                let types = flat_types(self.resolve, ty, None).unwrap();
1350                (operands_for_ty, operands) = operands.split_at(types.len());
1351                self.stack.extend_from_slice(operands_for_ty);
1352                self.deallocate(ty, what);
1353                assert!(
1354                    self.stack.is_empty(),
1355                    "stack has {} items remaining",
1356                    self.stack.len()
1357                );
1358            }
1359            assert!(operands.is_empty());
1360        }
1361    }
1362
1363    fn emit(&mut self, inst: &Instruction<'_>) {
1364        self.operands.clear();
1365        self.results.clear();
1366
1367        let operands_len = inst.operands_len();
1368        assert!(
1369            self.stack.len() >= operands_len,
1370            "not enough operands on stack for {:?}: have {} need {operands_len}",
1371            inst,
1372            self.stack.len(),
1373        );
1374        self.operands
1375            .extend(self.stack.drain((self.stack.len() - operands_len)..));
1376        self.results.reserve(inst.results_len());
1377
1378        self.bindgen
1379            .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1380
1381        assert_eq!(
1382            self.results.len(),
1383            inst.results_len(),
1384            "{:?} expected {} results, got {}",
1385            inst,
1386            inst.results_len(),
1387            self.results.len()
1388        );
1389        self.stack.append(&mut self.results);
1390    }
1391
1392    fn push_block(&mut self) {
1393        self.bindgen.push_block();
1394    }
1395
1396    fn finish_block(&mut self, size: usize) {
1397        self.operands.clear();
1398        assert!(
1399            size <= self.stack.len(),
1400            "not enough operands on stack for finishing block",
1401        );
1402        self.operands
1403            .extend(self.stack.drain((self.stack.len() - size)..));
1404        self.bindgen.finish_block(&mut self.operands);
1405    }
1406
1407    fn lower(&mut self, ty: &Type) {
1408        use Instruction::*;
1409
1410        match *ty {
1411            Type::Bool => self.emit(&I32FromBool),
1412            Type::S8 => self.emit(&I32FromS8),
1413            Type::U8 => self.emit(&I32FromU8),
1414            Type::S16 => self.emit(&I32FromS16),
1415            Type::U16 => self.emit(&I32FromU16),
1416            Type::S32 => self.emit(&I32FromS32),
1417            Type::U32 => self.emit(&I32FromU32),
1418            Type::S64 => self.emit(&I64FromS64),
1419            Type::U64 => self.emit(&I64FromU64),
1420            Type::Char => self.emit(&I32FromChar),
1421            Type::F32 => self.emit(&CoreF32FromF32),
1422            Type::F64 => self.emit(&CoreF64FromF64),
1423            Type::String => {
1424                let realloc = self.list_realloc();
1425                self.emit(&StringLower { realloc });
1426            }
1427            Type::ErrorContext => self.emit(&ErrorContextLower),
1428            Type::Id(id) => match &self.resolve.types[id].kind {
1429                TypeDefKind::Type(t) => self.lower(t),
1430                TypeDefKind::List(element) => {
1431                    let realloc = self.list_realloc();
1432                    if self.bindgen.is_list_canonical(self.resolve, element) {
1433                        self.emit(&ListCanonLower { element, realloc });
1434                    } else {
1435                        self.push_block();
1436                        self.emit(&IterElem { element });
1437                        self.emit(&IterBasePointer);
1438                        let addr = self.stack.pop().unwrap();
1439                        self.write_to_memory(element, addr, Default::default());
1440                        self.finish_block(0);
1441                        self.emit(&ListLower { element, realloc });
1442                    }
1443                }
1444                TypeDefKind::Handle(handle) => {
1445                    let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1446                    self.emit(&HandleLower {
1447                        handle,
1448                        ty: id,
1449                        name: self.resolve.types[*ty].name.as_deref().unwrap(),
1450                    });
1451                }
1452                TypeDefKind::Resource => {
1453                    todo!();
1454                }
1455                TypeDefKind::Record(record) => {
1456                    self.emit(&RecordLower {
1457                        record,
1458                        ty: id,
1459                        name: self.resolve.types[id].name.as_deref().unwrap(),
1460                    });
1461                    let values = self
1462                        .stack
1463                        .drain(self.stack.len() - record.fields.len()..)
1464                        .collect::<Vec<_>>();
1465                    for (field, value) in record.fields.iter().zip(values) {
1466                        self.stack.push(value);
1467                        self.lower(&field.ty);
1468                    }
1469                }
1470                TypeDefKind::Tuple(tuple) => {
1471                    self.emit(&TupleLower { tuple, ty: id });
1472                    let values = self
1473                        .stack
1474                        .drain(self.stack.len() - tuple.types.len()..)
1475                        .collect::<Vec<_>>();
1476                    for (ty, value) in tuple.types.iter().zip(values) {
1477                        self.stack.push(value);
1478                        self.lower(ty);
1479                    }
1480                }
1481
1482                TypeDefKind::Flags(flags) => {
1483                    self.emit(&FlagsLower {
1484                        flags,
1485                        ty: id,
1486                        name: self.resolve.types[id].name.as_ref().unwrap(),
1487                    });
1488                }
1489
1490                TypeDefKind::Variant(v) => {
1491                    let results =
1492                        self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1493                    self.emit(&VariantLower {
1494                        variant: v,
1495                        ty: id,
1496                        results: &results,
1497                        name: self.resolve.types[id].name.as_deref().unwrap(),
1498                    });
1499                }
1500                TypeDefKind::Enum(enum_) => {
1501                    self.emit(&EnumLower {
1502                        enum_,
1503                        ty: id,
1504                        name: self.resolve.types[id].name.as_deref().unwrap(),
1505                    });
1506                }
1507                TypeDefKind::Option(t) => {
1508                    let results = self.lower_variant_arms(ty, [None, Some(t)]);
1509                    self.emit(&OptionLower {
1510                        payload: t,
1511                        ty: id,
1512                        results: &results,
1513                    });
1514                }
1515                TypeDefKind::Result(r) => {
1516                    let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1517                    self.emit(&ResultLower {
1518                        result: r,
1519                        ty: id,
1520                        results: &results,
1521                    });
1522                }
1523                TypeDefKind::Future(ty) => {
1524                    self.emit(&FutureLower {
1525                        payload: ty,
1526                        ty: id,
1527                    });
1528                }
1529                TypeDefKind::Stream(ty) => {
1530                    self.emit(&StreamLower {
1531                        payload: ty,
1532                        ty: id,
1533                    });
1534                }
1535                TypeDefKind::Unknown => unreachable!(),
1536                TypeDefKind::FixedSizeList(..) => todo!(),
1537            },
1538        }
1539    }
1540
1541    fn lower_variant_arms<'b>(
1542        &mut self,
1543        ty: &Type,
1544        cases: impl IntoIterator<Item = Option<&'b Type>>,
1545    ) -> Vec<WasmType> {
1546        use Instruction::*;
1547        let results = flat_types(self.resolve, ty, None).unwrap();
1548        let mut casts = Vec::new();
1549        for (i, ty) in cases.into_iter().enumerate() {
1550            self.push_block();
1551            self.emit(&VariantPayloadName);
1552            let payload_name = self.stack.pop().unwrap();
1553            self.emit(&I32Const { val: i as i32 });
1554            let mut pushed = 1;
1555            if let Some(ty) = ty {
1556                // Using the payload of this block we lower the type to
1557                // raw wasm values.
1558                self.stack.push(payload_name);
1559                self.lower(ty);
1560
1561                // Determine the types of all the wasm values we just
1562                // pushed, and record how many. If we pushed too few
1563                // then we'll need to push some zeros after this.
1564                let temp = flat_types(self.resolve, ty, None).unwrap();
1565                pushed += temp.len();
1566
1567                // For all the types pushed we may need to insert some
1568                // bitcasts. This will go through and cast everything
1569                // to the right type to ensure all blocks produce the
1570                // same set of results.
1571                casts.truncate(0);
1572                for (actual, expected) in temp.iter().zip(&results[1..]) {
1573                    casts.push(cast(*actual, *expected));
1574                }
1575                if casts.iter().any(|c| *c != Bitcast::None) {
1576                    self.emit(&Bitcasts { casts: &casts });
1577                }
1578            }
1579
1580            // If we haven't pushed enough items in this block to match
1581            // what other variants are pushing then we need to push
1582            // some zeros.
1583            if pushed < results.len() {
1584                self.emit(&ConstZero {
1585                    tys: &results[pushed..],
1586                });
1587            }
1588            self.finish_block(results.len());
1589        }
1590        results
1591    }
1592
1593    fn list_realloc(&self) -> Option<&'static str> {
1594        match self.realloc.expect("realloc should be configured") {
1595            Realloc::None => None,
1596            Realloc::Export(s) => Some(s),
1597        }
1598    }
1599
1600    /// Note that in general everything in this function is the opposite of the
1601    /// `lower` function above. This is intentional and should be kept this way!
1602    fn lift(&mut self, ty: &Type) {
1603        use Instruction::*;
1604
1605        match *ty {
1606            Type::Bool => self.emit(&BoolFromI32),
1607            Type::S8 => self.emit(&S8FromI32),
1608            Type::U8 => self.emit(&U8FromI32),
1609            Type::S16 => self.emit(&S16FromI32),
1610            Type::U16 => self.emit(&U16FromI32),
1611            Type::S32 => self.emit(&S32FromI32),
1612            Type::U32 => self.emit(&U32FromI32),
1613            Type::S64 => self.emit(&S64FromI64),
1614            Type::U64 => self.emit(&U64FromI64),
1615            Type::Char => self.emit(&CharFromI32),
1616            Type::F32 => self.emit(&F32FromCoreF32),
1617            Type::F64 => self.emit(&F64FromCoreF64),
1618            Type::String => self.emit(&StringLift),
1619            Type::ErrorContext => self.emit(&ErrorContextLift),
1620            Type::Id(id) => match &self.resolve.types[id].kind {
1621                TypeDefKind::Type(t) => self.lift(t),
1622                TypeDefKind::List(element) => {
1623                    if self.bindgen.is_list_canonical(self.resolve, element) {
1624                        self.emit(&ListCanonLift { element, ty: id });
1625                    } else {
1626                        self.push_block();
1627                        self.emit(&IterBasePointer);
1628                        let addr = self.stack.pop().unwrap();
1629                        self.read_from_memory(element, addr, Default::default());
1630                        self.finish_block(1);
1631                        self.emit(&ListLift { element, ty: id });
1632                    }
1633                }
1634                TypeDefKind::Handle(handle) => {
1635                    let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1636                    self.emit(&HandleLift {
1637                        handle,
1638                        ty: id,
1639                        name: self.resolve.types[*ty].name.as_deref().unwrap(),
1640                    });
1641                }
1642                TypeDefKind::Resource => {
1643                    todo!();
1644                }
1645                TypeDefKind::Record(record) => {
1646                    self.flat_for_each_record_type(
1647                        ty,
1648                        record.fields.iter().map(|f| &f.ty),
1649                        Self::lift,
1650                    );
1651                    self.emit(&RecordLift {
1652                        record,
1653                        ty: id,
1654                        name: self.resolve.types[id].name.as_deref().unwrap(),
1655                    });
1656                }
1657                TypeDefKind::Tuple(tuple) => {
1658                    self.flat_for_each_record_type(ty, tuple.types.iter(), Self::lift);
1659                    self.emit(&TupleLift { tuple, ty: id });
1660                }
1661                TypeDefKind::Flags(flags) => {
1662                    self.emit(&FlagsLift {
1663                        flags,
1664                        ty: id,
1665                        name: self.resolve.types[id].name.as_ref().unwrap(),
1666                    });
1667                }
1668
1669                TypeDefKind::Variant(v) => {
1670                    self.flat_for_each_variant_arm(
1671                        ty,
1672                        true,
1673                        v.cases.iter().map(|c| c.ty.as_ref()),
1674                        Self::lift,
1675                    );
1676                    self.emit(&VariantLift {
1677                        variant: v,
1678                        ty: id,
1679                        name: self.resolve.types[id].name.as_deref().unwrap(),
1680                    });
1681                }
1682
1683                TypeDefKind::Enum(enum_) => {
1684                    self.emit(&EnumLift {
1685                        enum_,
1686                        ty: id,
1687                        name: self.resolve.types[id].name.as_deref().unwrap(),
1688                    });
1689                }
1690
1691                TypeDefKind::Option(t) => {
1692                    self.flat_for_each_variant_arm(ty, true, [None, Some(t)], Self::lift);
1693                    self.emit(&OptionLift { payload: t, ty: id });
1694                }
1695
1696                TypeDefKind::Result(r) => {
1697                    self.flat_for_each_variant_arm(
1698                        ty,
1699                        true,
1700                        [r.ok.as_ref(), r.err.as_ref()],
1701                        Self::lift,
1702                    );
1703                    self.emit(&ResultLift { result: r, ty: id });
1704                }
1705
1706                TypeDefKind::Future(ty) => {
1707                    self.emit(&FutureLift {
1708                        payload: ty,
1709                        ty: id,
1710                    });
1711                }
1712                TypeDefKind::Stream(ty) => {
1713                    self.emit(&StreamLift {
1714                        payload: ty,
1715                        ty: id,
1716                    });
1717                }
1718                TypeDefKind::Unknown => unreachable!(),
1719                TypeDefKind::FixedSizeList(..) => todo!(),
1720            },
1721        }
1722    }
1723
1724    fn flat_for_each_record_type<'b>(
1725        &mut self,
1726        container: &Type,
1727        types: impl Iterator<Item = &'b Type>,
1728        mut iter: impl FnMut(&mut Self, &Type),
1729    ) {
1730        let temp = flat_types(self.resolve, container, None).unwrap();
1731        let mut args = self
1732            .stack
1733            .drain(self.stack.len() - temp.len()..)
1734            .collect::<Vec<_>>();
1735        for ty in types {
1736            let temp = flat_types(self.resolve, ty, None).unwrap();
1737            self.stack.extend(args.drain(..temp.len()));
1738            iter(self, ty);
1739        }
1740    }
1741
1742    fn flat_for_each_variant_arm<'b>(
1743        &mut self,
1744        ty: &Type,
1745        blocks_with_type_have_result: bool,
1746        cases: impl IntoIterator<Item = Option<&'b Type>>,
1747        mut iter: impl FnMut(&mut Self, &Type),
1748    ) {
1749        let params = flat_types(self.resolve, ty, None).unwrap();
1750        let mut casts = Vec::new();
1751        let block_inputs = self
1752            .stack
1753            .drain(self.stack.len() + 1 - params.len()..)
1754            .collect::<Vec<_>>();
1755        for ty in cases {
1756            self.push_block();
1757            if let Some(ty) = ty {
1758                // Push only the values we need for this variant onto
1759                // the stack.
1760                let temp = flat_types(self.resolve, ty, None).unwrap();
1761                self.stack
1762                    .extend(block_inputs[..temp.len()].iter().cloned());
1763
1764                // Cast all the types we have on the stack to the actual
1765                // types needed for this variant, if necessary.
1766                casts.truncate(0);
1767                for (actual, expected) in temp.iter().zip(&params[1..]) {
1768                    casts.push(cast(*expected, *actual));
1769                }
1770                if casts.iter().any(|c| *c != Bitcast::None) {
1771                    self.emit(&Instruction::Bitcasts { casts: &casts });
1772                }
1773
1774                // Then recursively lift this variant's payload.
1775                iter(self, ty);
1776            }
1777            self.finish_block(if blocks_with_type_have_result {
1778                ty.is_some() as usize
1779            } else {
1780                0
1781            });
1782        }
1783    }
1784
1785    fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1786        use Instruction::*;
1787
1788        match *ty {
1789            // Builtin types need different flavors of storage instructions
1790            // depending on the size of the value written.
1791            Type::Bool | Type::U8 | Type::S8 => {
1792                self.lower_and_emit(ty, addr, &I32Store8 { offset })
1793            }
1794            Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1795            Type::U32 | Type::S32 | Type::Char => {
1796                self.lower_and_emit(ty, addr, &I32Store { offset })
1797            }
1798            Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1799            Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1800            Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1801            Type::String => self.write_list_to_memory(ty, addr, offset),
1802            Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1803
1804            Type::Id(id) => match &self.resolve.types[id].kind {
1805                TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1806                TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1807
1808                TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1809                    self.lower_and_emit(ty, addr, &I32Store { offset })
1810                }
1811
1812                // Decompose the record into its components and then write all
1813                // the components into memory one-by-one.
1814                TypeDefKind::Record(record) => {
1815                    self.emit(&RecordLower {
1816                        record,
1817                        ty: id,
1818                        name: self.resolve.types[id].name.as_deref().unwrap(),
1819                    });
1820                    self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1821                }
1822                TypeDefKind::Resource => {
1823                    todo!()
1824                }
1825                TypeDefKind::Tuple(tuple) => {
1826                    self.emit(&TupleLower { tuple, ty: id });
1827                    self.write_fields_to_memory(tuple.types.iter(), addr, offset);
1828                }
1829
1830                TypeDefKind::Flags(f) => {
1831                    self.lower(ty);
1832                    match f.repr() {
1833                        FlagsRepr::U8 => {
1834                            self.stack.push(addr);
1835                            self.store_intrepr(offset, Int::U8);
1836                        }
1837                        FlagsRepr::U16 => {
1838                            self.stack.push(addr);
1839                            self.store_intrepr(offset, Int::U16);
1840                        }
1841                        FlagsRepr::U32(n) => {
1842                            for i in (0..n).rev() {
1843                                self.stack.push(addr.clone());
1844                                self.emit(&I32Store {
1845                                    offset: offset.add_bytes(i * 4),
1846                                });
1847                            }
1848                        }
1849                    }
1850                }
1851
1852                // Each case will get its own block, and the first item in each
1853                // case is writing the discriminant. After that if we have a
1854                // payload we write the payload after the discriminant, aligned up
1855                // to the type's alignment.
1856                TypeDefKind::Variant(v) => {
1857                    self.write_variant_arms_to_memory(
1858                        offset,
1859                        addr,
1860                        v.tag(),
1861                        v.cases.iter().map(|c| c.ty.as_ref()),
1862                    );
1863                    self.emit(&VariantLower {
1864                        variant: v,
1865                        ty: id,
1866                        results: &[],
1867                        name: self.resolve.types[id].name.as_deref().unwrap(),
1868                    });
1869                }
1870
1871                TypeDefKind::Option(t) => {
1872                    self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
1873                    self.emit(&OptionLower {
1874                        payload: t,
1875                        ty: id,
1876                        results: &[],
1877                    });
1878                }
1879
1880                TypeDefKind::Result(r) => {
1881                    self.write_variant_arms_to_memory(
1882                        offset,
1883                        addr,
1884                        Int::U8,
1885                        [r.ok.as_ref(), r.err.as_ref()],
1886                    );
1887                    self.emit(&ResultLower {
1888                        result: r,
1889                        ty: id,
1890                        results: &[],
1891                    });
1892                }
1893
1894                TypeDefKind::Enum(e) => {
1895                    self.lower(ty);
1896                    self.stack.push(addr);
1897                    self.store_intrepr(offset, e.tag());
1898                }
1899
1900                TypeDefKind::Unknown => unreachable!(),
1901                TypeDefKind::FixedSizeList(..) => todo!(),
1902            },
1903        }
1904    }
1905
1906    fn write_params_to_memory<'b>(
1907        &mut self,
1908        params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1909        addr: B::Operand,
1910        offset: ArchitectureSize,
1911    ) {
1912        self.write_fields_to_memory(params, addr, offset);
1913    }
1914
1915    fn write_variant_arms_to_memory<'b>(
1916        &mut self,
1917        offset: ArchitectureSize,
1918        addr: B::Operand,
1919        tag: Int,
1920        cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1921    ) {
1922        let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
1923        for (i, ty) in cases.into_iter().enumerate() {
1924            self.push_block();
1925            self.emit(&Instruction::VariantPayloadName);
1926            let payload_name = self.stack.pop().unwrap();
1927            self.emit(&Instruction::I32Const { val: i as i32 });
1928            self.stack.push(addr.clone());
1929            self.store_intrepr(offset, tag);
1930            if let Some(ty) = ty {
1931                self.stack.push(payload_name.clone());
1932                self.write_to_memory(ty, addr.clone(), payload_offset);
1933            }
1934            self.finish_block(0);
1935        }
1936    }
1937
1938    fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1939        // After lowering the list there's two i32 values on the stack
1940        // which we write into memory, writing the pointer into the low address
1941        // and the length into the high address.
1942        self.lower(ty);
1943        self.stack.push(addr.clone());
1944        self.emit(&Instruction::LengthStore {
1945            offset: offset + self.bindgen.sizes().align(ty).into(),
1946        });
1947        self.stack.push(addr);
1948        self.emit(&Instruction::PointerStore { offset });
1949    }
1950
1951    fn write_fields_to_memory<'b>(
1952        &mut self,
1953        tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1954        addr: B::Operand,
1955        offset: ArchitectureSize,
1956    ) {
1957        let tys = tys.into_iter();
1958        let fields = self
1959            .stack
1960            .drain(self.stack.len() - tys.len()..)
1961            .collect::<Vec<_>>();
1962        for ((field_offset, ty), op) in self
1963            .bindgen
1964            .sizes()
1965            .field_offsets(tys)
1966            .into_iter()
1967            .zip(fields)
1968        {
1969            self.stack.push(op);
1970            self.write_to_memory(ty, addr.clone(), offset + (field_offset));
1971        }
1972    }
1973
1974    fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
1975        self.lower(ty);
1976        self.stack.push(addr);
1977        self.emit(instr);
1978    }
1979
1980    fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1981        use Instruction::*;
1982
1983        match *ty {
1984            Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1985            Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1986            Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
1987            Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
1988            Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
1989            Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
1990            Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
1991            Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
1992            Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
1993            Type::String => self.read_list_from_memory(ty, addr, offset),
1994            Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
1995
1996            Type::Id(id) => match &self.resolve.types[id].kind {
1997                TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
1998
1999                TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
2000
2001                TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
2002                    self.emit_and_lift(ty, addr, &I32Load { offset })
2003                }
2004
2005                TypeDefKind::Resource => {
2006                    todo!();
2007                }
2008
2009                // Read and lift each field individually, adjusting the offset
2010                // as we go along, then aggregate all the fields into the
2011                // record.
2012                TypeDefKind::Record(record) => {
2013                    self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
2014                    self.emit(&RecordLift {
2015                        record,
2016                        ty: id,
2017                        name: self.resolve.types[id].name.as_deref().unwrap(),
2018                    });
2019                }
2020
2021                TypeDefKind::Tuple(tuple) => {
2022                    self.read_fields_from_memory(&tuple.types, addr, offset);
2023                    self.emit(&TupleLift { tuple, ty: id });
2024                }
2025
2026                TypeDefKind::Flags(f) => {
2027                    match f.repr() {
2028                        FlagsRepr::U8 => {
2029                            self.stack.push(addr);
2030                            self.load_intrepr(offset, Int::U8);
2031                        }
2032                        FlagsRepr::U16 => {
2033                            self.stack.push(addr);
2034                            self.load_intrepr(offset, Int::U16);
2035                        }
2036                        FlagsRepr::U32(n) => {
2037                            for i in 0..n {
2038                                self.stack.push(addr.clone());
2039                                self.emit(&I32Load {
2040                                    offset: offset.add_bytes(i * 4),
2041                                });
2042                            }
2043                        }
2044                    }
2045                    self.lift(ty);
2046                }
2047
2048                // Each case will get its own block, and we'll dispatch to the
2049                // right block based on the `i32.load` we initially perform. Each
2050                // individual block is pretty simple and just reads the payload type
2051                // from the corresponding offset if one is available.
2052                TypeDefKind::Variant(variant) => {
2053                    self.read_variant_arms_from_memory(
2054                        offset,
2055                        addr,
2056                        variant.tag(),
2057                        variant.cases.iter().map(|c| c.ty.as_ref()),
2058                    );
2059                    self.emit(&VariantLift {
2060                        variant,
2061                        ty: id,
2062                        name: self.resolve.types[id].name.as_deref().unwrap(),
2063                    });
2064                }
2065
2066                TypeDefKind::Option(t) => {
2067                    self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
2068                    self.emit(&OptionLift { payload: t, ty: id });
2069                }
2070
2071                TypeDefKind::Result(r) => {
2072                    self.read_variant_arms_from_memory(
2073                        offset,
2074                        addr,
2075                        Int::U8,
2076                        [r.ok.as_ref(), r.err.as_ref()],
2077                    );
2078                    self.emit(&ResultLift { result: r, ty: id });
2079                }
2080
2081                TypeDefKind::Enum(e) => {
2082                    self.stack.push(addr.clone());
2083                    self.load_intrepr(offset, e.tag());
2084                    self.lift(ty);
2085                }
2086
2087                TypeDefKind::Unknown => unreachable!(),
2088                TypeDefKind::FixedSizeList(..) => todo!(),
2089            },
2090        }
2091    }
2092
2093    fn read_results_from_memory(
2094        &mut self,
2095        result: &Option<Type>,
2096        addr: B::Operand,
2097        offset: ArchitectureSize,
2098    ) {
2099        self.read_fields_from_memory(result, addr, offset)
2100    }
2101
2102    fn read_variant_arms_from_memory<'b>(
2103        &mut self,
2104        offset: ArchitectureSize,
2105        addr: B::Operand,
2106        tag: Int,
2107        cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2108    ) {
2109        self.stack.push(addr.clone());
2110        self.load_intrepr(offset, tag);
2111        let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2112        for ty in cases {
2113            self.push_block();
2114            if let Some(ty) = ty {
2115                self.read_from_memory(ty, addr.clone(), payload_offset);
2116            }
2117            self.finish_block(ty.is_some() as usize);
2118        }
2119    }
2120
2121    fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2122        // Read the pointer/len and then perform the standard lifting
2123        // proceses.
2124        self.stack.push(addr.clone());
2125        self.emit(&Instruction::PointerLoad { offset });
2126        self.stack.push(addr);
2127        self.emit(&Instruction::LengthLoad {
2128            offset: offset + self.bindgen.sizes().align(ty).into(),
2129        });
2130        self.lift(ty);
2131    }
2132
2133    fn read_fields_from_memory<'b>(
2134        &mut self,
2135        tys: impl IntoIterator<Item = &'b Type>,
2136        addr: B::Operand,
2137        offset: ArchitectureSize,
2138    ) {
2139        for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
2140            self.read_from_memory(ty, addr.clone(), offset + (*field_offset));
2141        }
2142    }
2143
2144    fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2145        self.stack.push(addr);
2146        self.emit(instr);
2147        self.lift(ty);
2148    }
2149
2150    fn load_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2151        self.emit(&match repr {
2152            Int::U64 => Instruction::I64Load { offset },
2153            Int::U32 => Instruction::I32Load { offset },
2154            Int::U16 => Instruction::I32Load16U { offset },
2155            Int::U8 => Instruction::I32Load8U { offset },
2156        });
2157    }
2158
2159    fn store_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2160        self.emit(&match repr {
2161            Int::U64 => Instruction::I64Store { offset },
2162            Int::U32 => Instruction::I32Store { offset },
2163            Int::U16 => Instruction::I32Store16 { offset },
2164            Int::U8 => Instruction::I32Store8 { offset },
2165        });
2166    }
2167
2168    /// Runs the deallocation of `ty` for the operands currently on
2169    /// `self.stack`.
2170    ///
2171    /// This will pop the ABI items of `ty` from `self.stack`.
2172    fn deallocate(&mut self, ty: &Type, what: Deallocate) {
2173        use Instruction::*;
2174
2175        match *ty {
2176            Type::String => {
2177                self.emit(&Instruction::GuestDeallocateString);
2178            }
2179
2180            Type::Bool
2181            | Type::U8
2182            | Type::S8
2183            | Type::U16
2184            | Type::S16
2185            | Type::U32
2186            | Type::S32
2187            | Type::Char
2188            | Type::U64
2189            | Type::S64
2190            | Type::F32
2191            | Type::F64
2192            | Type::ErrorContext => {
2193                // No deallocation necessary, just discard the operand on the
2194                // stack.
2195                self.stack.pop().unwrap();
2196            }
2197
2198            Type::Id(id) => match &self.resolve.types[id].kind {
2199                TypeDefKind::Type(t) => self.deallocate(t, what),
2200
2201                TypeDefKind::List(element) => {
2202                    self.push_block();
2203                    self.emit(&IterBasePointer);
2204                    let elemaddr = self.stack.pop().unwrap();
2205                    self.deallocate_indirect(element, elemaddr, Default::default(), what);
2206                    self.finish_block(0);
2207
2208                    self.emit(&Instruction::GuestDeallocateList { element });
2209                }
2210
2211                TypeDefKind::Handle(Handle::Own(_))
2212                | TypeDefKind::Future(_)
2213                | TypeDefKind::Stream(_)
2214                    if what.handles() =>
2215                {
2216                    self.lift(ty);
2217                    self.emit(&DropHandle { ty });
2218                }
2219
2220                TypeDefKind::Record(record) => {
2221                    self.flat_for_each_record_type(
2222                        ty,
2223                        record.fields.iter().map(|f| &f.ty),
2224                        |me, ty| me.deallocate(ty, what),
2225                    );
2226                }
2227
2228                TypeDefKind::Tuple(tuple) => {
2229                    self.flat_for_each_record_type(ty, tuple.types.iter(), |me, ty| {
2230                        me.deallocate(ty, what)
2231                    });
2232                }
2233
2234                TypeDefKind::Variant(variant) => {
2235                    self.flat_for_each_variant_arm(
2236                        ty,
2237                        false,
2238                        variant.cases.iter().map(|c| c.ty.as_ref()),
2239                        |me, ty| me.deallocate(ty, what),
2240                    );
2241                    self.emit(&GuestDeallocateVariant {
2242                        blocks: variant.cases.len(),
2243                    });
2244                }
2245
2246                TypeDefKind::Option(t) => {
2247                    self.flat_for_each_variant_arm(ty, false, [None, Some(t)], |me, ty| {
2248                        me.deallocate(ty, what)
2249                    });
2250                    self.emit(&GuestDeallocateVariant { blocks: 2 });
2251                }
2252
2253                TypeDefKind::Result(e) => {
2254                    self.flat_for_each_variant_arm(
2255                        ty,
2256                        false,
2257                        [e.ok.as_ref(), e.err.as_ref()],
2258                        |me, ty| me.deallocate(ty, what),
2259                    );
2260                    self.emit(&GuestDeallocateVariant { blocks: 2 });
2261                }
2262
2263                // discard the operand on the stack, otherwise nothing to free.
2264                TypeDefKind::Flags(_)
2265                | TypeDefKind::Enum(_)
2266                | TypeDefKind::Future(_)
2267                | TypeDefKind::Stream(_)
2268                | TypeDefKind::Handle(Handle::Own(_))
2269                | TypeDefKind::Handle(Handle::Borrow(_)) => {
2270                    self.stack.pop().unwrap();
2271                }
2272
2273                TypeDefKind::Resource => unreachable!(),
2274                TypeDefKind::Unknown => unreachable!(),
2275
2276                TypeDefKind::FixedSizeList(..) => todo!(),
2277            },
2278        }
2279    }
2280
2281    fn deallocate_indirect(
2282        &mut self,
2283        ty: &Type,
2284        addr: B::Operand,
2285        offset: ArchitectureSize,
2286        what: Deallocate,
2287    ) {
2288        use Instruction::*;
2289
2290        // No need to execute any instructions if this type itself doesn't
2291        // require any form of post-return.
2292        if !needs_deallocate(self.resolve, ty, what) {
2293            return;
2294        }
2295
2296        match *ty {
2297            Type::String => {
2298                self.stack.push(addr.clone());
2299                self.emit(&Instruction::PointerLoad { offset });
2300                self.stack.push(addr);
2301                self.emit(&Instruction::LengthLoad {
2302                    offset: offset + self.bindgen.sizes().align(ty).into(),
2303                });
2304                self.deallocate(ty, what);
2305            }
2306
2307            Type::Bool
2308            | Type::U8
2309            | Type::S8
2310            | Type::U16
2311            | Type::S16
2312            | Type::U32
2313            | Type::S32
2314            | Type::Char
2315            | Type::U64
2316            | Type::S64
2317            | Type::F32
2318            | Type::F64
2319            | Type::ErrorContext => {}
2320
2321            Type::Id(id) => match &self.resolve.types[id].kind {
2322                TypeDefKind::Type(t) => self.deallocate_indirect(t, addr, offset, what),
2323
2324                TypeDefKind::List(_) => {
2325                    self.stack.push(addr.clone());
2326                    self.emit(&Instruction::PointerLoad { offset });
2327                    self.stack.push(addr);
2328                    self.emit(&Instruction::LengthLoad {
2329                        offset: offset + self.bindgen.sizes().align(ty).into(),
2330                    });
2331
2332                    self.deallocate(ty, what);
2333                }
2334
2335                TypeDefKind::Handle(Handle::Own(_))
2336                | TypeDefKind::Future(_)
2337                | TypeDefKind::Stream(_)
2338                    if what.handles() =>
2339                {
2340                    self.read_from_memory(ty, addr, offset);
2341                    self.emit(&DropHandle { ty });
2342                }
2343
2344                TypeDefKind::Handle(Handle::Own(_)) => unreachable!(),
2345                TypeDefKind::Handle(Handle::Borrow(_)) => unreachable!(),
2346                TypeDefKind::Resource => unreachable!(),
2347
2348                TypeDefKind::Record(record) => {
2349                    self.deallocate_indirect_fields(
2350                        &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2351                        addr,
2352                        offset,
2353                        what,
2354                    );
2355                }
2356
2357                TypeDefKind::Tuple(tuple) => {
2358                    self.deallocate_indirect_fields(&tuple.types, addr, offset, what);
2359                }
2360
2361                TypeDefKind::Flags(_) => {}
2362
2363                TypeDefKind::Variant(variant) => {
2364                    self.deallocate_indirect_variant(
2365                        offset,
2366                        addr,
2367                        variant.tag(),
2368                        variant.cases.iter().map(|c| c.ty.as_ref()),
2369                        what,
2370                    );
2371                    self.emit(&GuestDeallocateVariant {
2372                        blocks: variant.cases.len(),
2373                    });
2374                }
2375
2376                TypeDefKind::Option(t) => {
2377                    self.deallocate_indirect_variant(offset, addr, Int::U8, [None, Some(t)], what);
2378                    self.emit(&GuestDeallocateVariant { blocks: 2 });
2379                }
2380
2381                TypeDefKind::Result(e) => {
2382                    self.deallocate_indirect_variant(
2383                        offset,
2384                        addr,
2385                        Int::U8,
2386                        [e.ok.as_ref(), e.err.as_ref()],
2387                        what,
2388                    );
2389                    self.emit(&GuestDeallocateVariant { blocks: 2 });
2390                }
2391
2392                TypeDefKind::Enum(_) => {}
2393
2394                TypeDefKind::Future(_) => unreachable!(),
2395                TypeDefKind::Stream(_) => unreachable!(),
2396                TypeDefKind::Unknown => unreachable!(),
2397                TypeDefKind::FixedSizeList(..) => todo!(),
2398            },
2399        }
2400    }
2401
2402    fn deallocate_indirect_variant<'b>(
2403        &mut self,
2404        offset: ArchitectureSize,
2405        addr: B::Operand,
2406        tag: Int,
2407        cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2408        what: Deallocate,
2409    ) {
2410        self.stack.push(addr.clone());
2411        self.load_intrepr(offset, tag);
2412        let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2413        for ty in cases {
2414            self.push_block();
2415            if let Some(ty) = ty {
2416                self.deallocate_indirect(ty, addr.clone(), payload_offset, what);
2417            }
2418            self.finish_block(0);
2419        }
2420    }
2421
2422    fn deallocate_indirect_fields(
2423        &mut self,
2424        tys: &[Type],
2425        addr: B::Operand,
2426        offset: ArchitectureSize,
2427        what: Deallocate,
2428    ) {
2429        for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2430            self.deallocate_indirect(ty, addr.clone(), offset + (field_offset), what);
2431        }
2432    }
2433}
2434
2435fn cast(from: WasmType, to: WasmType) -> Bitcast {
2436    use WasmType::*;
2437
2438    match (from, to) {
2439        (I32, I32)
2440        | (I64, I64)
2441        | (F32, F32)
2442        | (F64, F64)
2443        | (Pointer, Pointer)
2444        | (PointerOrI64, PointerOrI64)
2445        | (Length, Length) => Bitcast::None,
2446
2447        (I32, I64) => Bitcast::I32ToI64,
2448        (F32, I32) => Bitcast::F32ToI32,
2449        (F64, I64) => Bitcast::F64ToI64,
2450
2451        (I64, I32) => Bitcast::I64ToI32,
2452        (I32, F32) => Bitcast::I32ToF32,
2453        (I64, F64) => Bitcast::I64ToF64,
2454
2455        (F32, I64) => Bitcast::F32ToI64,
2456        (I64, F32) => Bitcast::I64ToF32,
2457
2458        (I64, PointerOrI64) => Bitcast::I64ToP64,
2459        (Pointer, PointerOrI64) => Bitcast::PToP64,
2460        (_, PointerOrI64) => {
2461            Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2462        }
2463
2464        (PointerOrI64, I64) => Bitcast::P64ToI64,
2465        (PointerOrI64, Pointer) => Bitcast::P64ToP,
2466        (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2467
2468        (I32, Pointer) => Bitcast::I32ToP,
2469        (Pointer, I32) => Bitcast::PToI32,
2470        (I32, Length) => Bitcast::I32ToL,
2471        (Length, I32) => Bitcast::LToI32,
2472        (I64, Length) => Bitcast::I64ToL,
2473        (Length, I64) => Bitcast::LToI64,
2474        (Pointer, Length) => Bitcast::PToL,
2475        (Length, Pointer) => Bitcast::LToP,
2476
2477        (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2478        (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2479
2480        (F32, F64)
2481        | (F64, F32)
2482        | (F64, I32)
2483        | (I32, F64)
2484        | (Pointer | Length, I64 | F64)
2485        | (I64 | F64, Pointer | Length) => {
2486            unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2487        }
2488    }
2489}
2490
2491/// Flatten types in a given type
2492///
2493/// It is sometimes necessary to restrict the number of max parameters dynamically,
2494/// for example during an async guest import call (flat params are limited to 4)
2495fn flat_types(resolve: &Resolve, ty: &Type, max_params: Option<usize>) -> Option<Vec<WasmType>> {
2496    let max_params = max_params.unwrap_or(MAX_FLAT_PARAMS);
2497    let mut storage = iter::repeat_n(WasmType::I32, max_params).collect::<Vec<_>>();
2498    let mut flat = FlatTypes::new(storage.as_mut_slice());
2499    resolve.push_flat(ty, &mut flat).then_some(flat.to_vec())
2500}