wit_bindgen_core/abi.rs
1pub use wit_parser::abi::{AbiVariant, WasmSignature, WasmType};
2use wit_parser::{
3 align_to_arch, Alignment, ArchitectureSize, ElementInfo, Enum, Flags, FlagsRepr, Function,
4 Handle, Int, Record, Resolve, Result_, SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant,
5};
6
7// Helper macro for defining instructions without having to have tons of
8// exhaustive `match` statements to update
9macro_rules! def_instruction {
10 (
11 $( #[$enum_attr:meta] )*
12 pub enum $name:ident<'a> {
13 $(
14 $( #[$attr:meta] )*
15 $variant:ident $( {
16 $($field:ident : $field_ty:ty $(,)* )*
17 } )?
18 :
19 [$num_popped:expr] => [$num_pushed:expr],
20 )*
21 }
22 ) => {
23 $( #[$enum_attr] )*
24 pub enum $name<'a> {
25 $(
26 $( #[$attr] )*
27 $variant $( {
28 $(
29 $field : $field_ty,
30 )*
31 } )? ,
32 )*
33 }
34
35 impl $name<'_> {
36 /// How many operands does this instruction pop from the stack?
37 #[allow(unused_variables)]
38 pub fn operands_len(&self) -> usize {
39 match self {
40 $(
41 Self::$variant $( {
42 $(
43 $field,
44 )*
45 } )? => $num_popped,
46 )*
47 }
48 }
49
50 /// How many results does this instruction push onto the stack?
51 #[allow(unused_variables)]
52 pub fn results_len(&self) -> usize {
53 match self {
54 $(
55 Self::$variant $( {
56 $(
57 $field,
58 )*
59 } )? => $num_pushed,
60 )*
61 }
62 }
63 }
64 };
65}
66
67def_instruction! {
68 #[derive(Debug)]
69 pub enum Instruction<'a> {
70 /// Acquires the specified parameter and places it on the stack.
71 /// Depending on the context this may refer to wasm parameters or
72 /// interface types parameters.
73 GetArg { nth: usize } : [0] => [1],
74
75 // Integer const/manipulation instructions
76
77 /// Pushes the constant `val` onto the stack.
78 I32Const { val: i32 } : [0] => [1],
79 /// Casts the top N items on the stack using the `Bitcast` enum
80 /// provided. Consumes the same number of operands that this produces.
81 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
82 /// Pushes a number of constant zeros for each wasm type on the stack.
83 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
84
85 // Memory load/store instructions
86
87 /// Pops a pointer from the stack and loads a little-endian `i32` from
88 /// it, using the specified constant offset.
89 I32Load { offset: ArchitectureSize } : [1] => [1],
90 /// Pops a pointer from the stack and loads a little-endian `i8` from
91 /// it, using the specified constant offset. The value loaded is the
92 /// zero-extended to 32-bits
93 I32Load8U { offset: ArchitectureSize } : [1] => [1],
94 /// Pops a pointer from the stack and loads a little-endian `i8` from
95 /// it, using the specified constant offset. The value loaded is the
96 /// sign-extended to 32-bits
97 I32Load8S { offset: ArchitectureSize } : [1] => [1],
98 /// Pops a pointer from the stack and loads a little-endian `i16` from
99 /// it, using the specified constant offset. The value loaded is the
100 /// zero-extended to 32-bits
101 I32Load16U { offset: ArchitectureSize } : [1] => [1],
102 /// Pops a pointer from the stack and loads a little-endian `i16` from
103 /// it, using the specified constant offset. The value loaded is the
104 /// sign-extended to 32-bits
105 I32Load16S { offset: ArchitectureSize } : [1] => [1],
106 /// Pops a pointer from the stack and loads a little-endian `i64` from
107 /// it, using the specified constant offset.
108 I64Load { offset: ArchitectureSize } : [1] => [1],
109 /// Pops a pointer from the stack and loads a little-endian `f32` from
110 /// it, using the specified constant offset.
111 F32Load { offset: ArchitectureSize } : [1] => [1],
112 /// Pops a pointer from the stack and loads a little-endian `f64` from
113 /// it, using the specified constant offset.
114 F64Load { offset: ArchitectureSize } : [1] => [1],
115
116 /// Like `I32Load` or `I64Load`, but for loading pointer values.
117 PointerLoad { offset: ArchitectureSize } : [1] => [1],
118 /// Like `I32Load` or `I64Load`, but for loading array length values.
119 LengthLoad { offset: ArchitectureSize } : [1] => [1],
120
121 /// Pops a pointer from the stack and then an `i32` value.
122 /// Stores the value in little-endian at the pointer specified plus the
123 /// constant `offset`.
124 I32Store { offset: ArchitectureSize } : [2] => [0],
125 /// Pops a pointer from the stack and then an `i32` value.
126 /// Stores the low 8 bits of the value in little-endian at the pointer
127 /// specified plus the constant `offset`.
128 I32Store8 { offset: ArchitectureSize } : [2] => [0],
129 /// Pops a pointer from the stack and then an `i32` value.
130 /// Stores the low 16 bits of the value in little-endian at the pointer
131 /// specified plus the constant `offset`.
132 I32Store16 { offset: ArchitectureSize } : [2] => [0],
133 /// Pops a pointer from the stack and then an `i64` value.
134 /// Stores the value in little-endian at the pointer specified plus the
135 /// constant `offset`.
136 I64Store { offset: ArchitectureSize } : [2] => [0],
137 /// Pops a pointer from the stack and then an `f32` value.
138 /// Stores the value in little-endian at the pointer specified plus the
139 /// constant `offset`.
140 F32Store { offset: ArchitectureSize } : [2] => [0],
141 /// Pops a pointer from the stack and then an `f64` value.
142 /// Stores the value in little-endian at the pointer specified plus the
143 /// constant `offset`.
144 F64Store { offset: ArchitectureSize } : [2] => [0],
145
146 /// Like `I32Store` or `I64Store`, but for storing pointer values.
147 PointerStore { offset: ArchitectureSize } : [2] => [0],
148 /// Like `I32Store` or `I64Store`, but for storing array length values.
149 LengthStore { offset: ArchitectureSize } : [2] => [0],
150
151 // Scalar lifting/lowering
152
153 /// Converts an interface type `char` value to a 32-bit integer
154 /// representing the unicode scalar value.
155 I32FromChar : [1] => [1],
156 /// Converts an interface type `u64` value to a wasm `i64`.
157 I64FromU64 : [1] => [1],
158 /// Converts an interface type `s64` value to a wasm `i64`.
159 I64FromS64 : [1] => [1],
160 /// Converts an interface type `u32` value to a wasm `i32`.
161 I32FromU32 : [1] => [1],
162 /// Converts an interface type `s32` value to a wasm `i32`.
163 I32FromS32 : [1] => [1],
164 /// Converts an interface type `u16` value to a wasm `i32`.
165 I32FromU16 : [1] => [1],
166 /// Converts an interface type `s16` value to a wasm `i32`.
167 I32FromS16 : [1] => [1],
168 /// Converts an interface type `u8` value to a wasm `i32`.
169 I32FromU8 : [1] => [1],
170 /// Converts an interface type `s8` value to a wasm `i32`.
171 I32FromS8 : [1] => [1],
172 /// Conversion an interface type `f32` value to a wasm `f32`.
173 ///
174 /// This may be a noop for some implementations, but it's here in case the
175 /// native language representation of `f32` is different than the wasm
176 /// representation of `f32`.
177 CoreF32FromF32 : [1] => [1],
178 /// Conversion an interface type `f64` value to a wasm `f64`.
179 ///
180 /// This may be a noop for some implementations, but it's here in case the
181 /// native language representation of `f64` is different than the wasm
182 /// representation of `f64`.
183 CoreF64FromF64 : [1] => [1],
184
185 /// Converts a native wasm `i32` to an interface type `s8`.
186 ///
187 /// This will truncate the upper bits of the `i32`.
188 S8FromI32 : [1] => [1],
189 /// Converts a native wasm `i32` to an interface type `u8`.
190 ///
191 /// This will truncate the upper bits of the `i32`.
192 U8FromI32 : [1] => [1],
193 /// Converts a native wasm `i32` to an interface type `s16`.
194 ///
195 /// This will truncate the upper bits of the `i32`.
196 S16FromI32 : [1] => [1],
197 /// Converts a native wasm `i32` to an interface type `u16`.
198 ///
199 /// This will truncate the upper bits of the `i32`.
200 U16FromI32 : [1] => [1],
201 /// Converts a native wasm `i32` to an interface type `s32`.
202 S32FromI32 : [1] => [1],
203 /// Converts a native wasm `i32` to an interface type `u32`.
204 U32FromI32 : [1] => [1],
205 /// Converts a native wasm `i64` to an interface type `s64`.
206 S64FromI64 : [1] => [1],
207 /// Converts a native wasm `i64` to an interface type `u64`.
208 U64FromI64 : [1] => [1],
209 /// Converts a native wasm `i32` to an interface type `char`.
210 ///
211 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
212 CharFromI32 : [1] => [1],
213 /// Converts a native wasm `f32` to an interface type `f32`.
214 F32FromCoreF32 : [1] => [1],
215 /// Converts a native wasm `f64` to an interface type `f64`.
216 F64FromCoreF64 : [1] => [1],
217
218 /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
219 /// zero or one.
220 BoolFromI32 : [1] => [1],
221 /// Creates an `i32` from a `bool` input, must return 0 or 1.
222 I32FromBool : [1] => [1],
223
224 // lists
225
226 /// Lowers a list where the element's layout in the native language is
227 /// expected to match the canonical ABI definition of interface types.
228 ///
229 /// Pops a list value from the stack and pushes the pointer/length onto
230 /// the stack. If `realloc` is set to `Some` then this is expected to
231 /// *consume* the list which means that the data needs to be copied. An
232 /// allocation/copy is expected when:
233 ///
234 /// * A host is calling a wasm export with a list (it needs to copy the
235 /// list in to the callee's module, allocating space with `realloc`)
236 /// * A wasm export is returning a list (it's expected to use `realloc`
237 /// to give ownership of the list to the caller.
238 /// * A host is returning a list in a import definition, meaning that
239 /// space needs to be allocated in the caller with `realloc`).
240 ///
241 /// A copy does not happen (e.g. `realloc` is `None`) when:
242 ///
243 /// * A wasm module calls an import with the list. In this situation
244 /// it's expected the caller will know how to access this module's
245 /// memory (e.g. the host has raw access or wasm-to-wasm communication
246 /// would copy the list).
247 ///
248 /// If `realloc` is `Some` then the adapter is not responsible for
249 /// cleaning up this list because the other end is receiving the
250 /// allocation. If `realloc` is `None` then the adapter is responsible
251 /// for cleaning up any temporary allocation it created, if any.
252 ListCanonLower {
253 element: &'a Type,
254 realloc: Option<&'a str>,
255 } : [1] => [2],
256
257 /// Same as `ListCanonLower`, but used for strings
258 StringLower {
259 realloc: Option<&'a str>,
260 } : [1] => [2],
261
262 /// Lowers a list where the element's layout in the native language is
263 /// not expected to match the canonical ABI definition of interface
264 /// types.
265 ///
266 /// Pops a list value from the stack and pushes the pointer/length onto
267 /// the stack. This operation also pops a block from the block stack
268 /// which is used as the iteration body of writing each element of the
269 /// list consumed.
270 ///
271 /// The `realloc` field here behaves the same way as `ListCanonLower`.
272 /// It's only set to `None` when a wasm module calls a declared import.
273 /// Otherwise lowering in other contexts requires allocating memory for
274 /// the receiver to own.
275 ListLower {
276 element: &'a Type,
277 realloc: Option<&'a str>,
278 } : [1] => [2],
279
280 /// Lifts a list which has a canonical representation into an interface
281 /// types value.
282 ///
283 /// The term "canonical" representation here means that the
284 /// representation of the interface types value in the native language
285 /// exactly matches the canonical ABI definition of the type.
286 ///
287 /// This will consume two `i32` values from the stack, a pointer and a
288 /// length, and then produces an interface value list.
289 ListCanonLift {
290 element: &'a Type,
291 ty: TypeId,
292 } : [2] => [1],
293
294 /// Same as `ListCanonLift`, but used for strings
295 StringLift : [2] => [1],
296
297 /// Lifts a list which into an interface types value.
298 ///
299 /// This will consume two `i32` values from the stack, a pointer and a
300 /// length, and then produces an interface value list.
301 ///
302 /// This will also pop a block from the block stack which is how to
303 /// read each individual element from the list.
304 ListLift {
305 element: &'a Type,
306 ty: TypeId,
307 } : [2] => [1],
308
309 /// Pushes an operand onto the stack representing the list item from
310 /// each iteration of the list.
311 ///
312 /// This is only used inside of blocks related to lowering lists.
313 IterElem { element: &'a Type } : [0] => [1],
314
315 /// Pushes an operand onto the stack representing the base pointer of
316 /// the next element in a list.
317 ///
318 /// This is used for both lifting and lowering lists.
319 IterBasePointer : [0] => [1],
320
321 // records and tuples
322
323 /// Pops a record value off the stack, decomposes the record to all of
324 /// its fields, and then pushes the fields onto the stack.
325 RecordLower {
326 record: &'a Record,
327 name: &'a str,
328 ty: TypeId,
329 } : [1] => [record.fields.len()],
330
331 /// Pops all fields for a record off the stack and then composes them
332 /// into a record.
333 RecordLift {
334 record: &'a Record,
335 name: &'a str,
336 ty: TypeId,
337 } : [record.fields.len()] => [1],
338
339 /// Create an `i32` from a handle.
340 HandleLower {
341 handle: &'a Handle,
342 name: &'a str,
343 ty: TypeId,
344 } : [1] => [1],
345
346 /// Create a handle from an `i32`.
347 HandleLift {
348 handle: &'a Handle,
349 name: &'a str,
350 ty: TypeId,
351 } : [1] => [1],
352
353 /// Create an `i32` from a future.
354 FutureLower {
355 payload: &'a Option<Type>,
356 ty: TypeId,
357 } : [1] => [1],
358
359 /// Create a future from an `i32`.
360 FutureLift {
361 payload: &'a Option<Type>,
362 ty: TypeId,
363 } : [1] => [1],
364
365 /// Create an `i32` from a stream.
366 StreamLower {
367 payload: &'a Option<Type>,
368 ty: TypeId,
369 } : [1] => [1],
370
371 /// Create a stream from an `i32`.
372 StreamLift {
373 payload: &'a Option<Type>,
374 ty: TypeId,
375 } : [1] => [1],
376
377 /// Create an `i32` from an error-context.
378 ErrorContextLower : [1] => [1],
379
380 /// Create a error-context from an `i32`.
381 ErrorContextLift : [1] => [1],
382
383 /// Pops a tuple value off the stack, decomposes the tuple to all of
384 /// its fields, and then pushes the fields onto the stack.
385 TupleLower {
386 tuple: &'a Tuple,
387 ty: TypeId,
388 } : [1] => [tuple.types.len()],
389
390 /// Pops all fields for a tuple off the stack and then composes them
391 /// into a tuple.
392 TupleLift {
393 tuple: &'a Tuple,
394 ty: TypeId,
395 } : [tuple.types.len()] => [1],
396
397 /// Converts a language-specific record-of-bools to a list of `i32`.
398 FlagsLower {
399 flags: &'a Flags,
400 name: &'a str,
401 ty: TypeId,
402 } : [1] => [flags.repr().count()],
403 /// Converts a list of native wasm `i32` to a language-specific
404 /// record-of-bools.
405 FlagsLift {
406 flags: &'a Flags,
407 name: &'a str,
408 ty: TypeId,
409 } : [flags.repr().count()] => [1],
410
411 // variants
412
413 /// This is a special instruction used for `VariantLower`
414 /// instruction to determine the name of the payload, if present, to use
415 /// within each block.
416 ///
417 /// Each sub-block will have this be the first instruction, and if it
418 /// lowers a payload it will expect something bound to this name.
419 VariantPayloadName : [0] => [1],
420
421 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
422 /// from the code generator. Uses each of those blocks and the value
423 /// from the stack to produce `nresults` of items.
424 VariantLower {
425 variant: &'a Variant,
426 name: &'a str,
427 ty: TypeId,
428 results: &'a [WasmType],
429 } : [1] => [results.len()],
430
431 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
432 /// from the code generator. Uses each of those blocks and the value
433 /// from the stack to produce a final variant.
434 VariantLift {
435 variant: &'a Variant,
436 name: &'a str,
437 ty: TypeId,
438 } : [1] => [1],
439
440 /// Pops an enum off the stack and pushes the `i32` representation.
441 EnumLower {
442 enum_: &'a Enum,
443 name: &'a str,
444 ty: TypeId,
445 } : [1] => [1],
446
447 /// Pops an `i32` off the stack and lifts it into the `enum` specified.
448 EnumLift {
449 enum_: &'a Enum,
450 name: &'a str,
451 ty: TypeId,
452 } : [1] => [1],
453
454 /// Specialization of `VariantLower` for specifically `option<T>` types,
455 /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
456 /// the two cases.
457 OptionLower {
458 payload: &'a Type,
459 ty: TypeId,
460 results: &'a [WasmType],
461 } : [1] => [results.len()],
462
463 /// Specialization of `VariantLift` for specifically the `option<T>`
464 /// type. Otherwise behaves the same as the `VariantLift` instruction
465 /// with two blocks for the lift.
466 OptionLift {
467 payload: &'a Type,
468 ty: TypeId,
469 } : [1] => [1],
470
471 /// Specialization of `VariantLower` for specifically `result<T, E>`
472 /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
473 /// for the two cases.
474 ResultLower {
475 result: &'a Result_
476 ty: TypeId,
477 results: &'a [WasmType],
478 } : [1] => [results.len()],
479
480 /// Specialization of `VariantLift` for specifically the `result<T,
481 /// E>` type. Otherwise behaves the same as the `VariantLift`
482 /// instruction with two blocks for the lift.
483 ResultLift {
484 result: &'a Result_,
485 ty: TypeId,
486 } : [1] => [1],
487
488 // calling/control flow
489
490 /// Represents a call to a raw WebAssembly API. The module/name are
491 /// provided inline as well as the types if necessary.
492 CallWasm {
493 name: &'a str,
494 sig: &'a WasmSignature,
495 } : [sig.params.len()] => [sig.results.len()],
496
497 /// Same as `CallWasm`, except the dual where an interface is being
498 /// called rather than a raw wasm function.
499 ///
500 /// Note that this will be used for async functions.
501 CallInterface {
502 func: &'a Function,
503 async_: bool,
504 } : [func.params.len()] => [if *async_ { 1 } else { usize::from(func.result.is_some()) }],
505
506 /// Returns `amt` values on the stack. This is always the last
507 /// instruction.
508 Return { amt: usize, func: &'a Function } : [*amt] => [0],
509
510 /// Calls the `realloc` function specified in a malloc-like fashion
511 /// allocating `size` bytes with alignment `align`.
512 ///
513 /// Pushes the returned pointer onto the stack.
514 Malloc {
515 realloc: &'static str,
516 size: ArchitectureSize,
517 align: Alignment,
518 } : [0] => [1],
519
520 /// Used exclusively for guest-code generation this indicates that
521 /// the standard memory deallocation function needs to be invoked with
522 /// the specified parameters.
523 ///
524 /// This will pop a pointer from the stack and push nothing.
525 GuestDeallocate {
526 size: ArchitectureSize,
527 align: Alignment,
528 } : [1] => [0],
529
530 /// Used exclusively for guest-code generation this indicates that
531 /// a string is being deallocated. The ptr/length are on the stack and
532 /// are poppped off and used to deallocate the string.
533 GuestDeallocateString : [2] => [0],
534
535 /// Used exclusively for guest-code generation this indicates that
536 /// a list is being deallocated. The ptr/length are on the stack and
537 /// are poppped off and used to deallocate the list.
538 ///
539 /// This variant also pops a block off the block stack to be used as the
540 /// body of the deallocation loop.
541 GuestDeallocateList {
542 element: &'a Type,
543 } : [2] => [0],
544
545 /// Used exclusively for guest-code generation this indicates that
546 /// a variant is being deallocated. The integer discriminant is popped
547 /// off the stack as well as `blocks` number of blocks popped from the
548 /// blocks stack. The variant is used to select, at runtime, which of
549 /// the blocks is executed to deallocate the variant.
550 GuestDeallocateVariant {
551 blocks: usize,
552 } : [1] => [0],
553
554 /// Call an async-lowered import.
555 AsyncCallWasm { name: &'a str } : [2] => [0],
556
557 /// Generate code to run after `CallInterface` for an async-lifted export.
558 ///
559 /// For example, this might include task management for the
560 /// future/promise/task returned by the call made for `CallInterface`.
561 AsyncPostCallInterface { func: &'a Function } : [1] => [usize::from(func.result.is_some()) + 1],
562
563 /// Call `task.return` for an async-lifted export once the task returned
564 /// by `CallInterface` and managed by `AsyncPostCallInterface`
565 /// yields a value.
566 AsyncCallReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
567
568 /// Force the evaluation of the specified number of expressions and push
569 /// the results to the stack.
570 ///
571 /// This is useful prior to disposing of temporary variables and/or
572 /// allocations which are referenced by one or more not-yet-evaluated
573 /// expressions.
574 Flush { amt: usize } : [*amt] => [*amt],
575 }
576}
577
578#[derive(Debug, PartialEq)]
579pub enum Bitcast {
580 // Upcasts
581 F32ToI32,
582 F64ToI64,
583 I32ToI64,
584 F32ToI64,
585
586 // Downcasts
587 I32ToF32,
588 I64ToF64,
589 I64ToI32,
590 I64ToF32,
591
592 // PointerOrI64 conversions. These preserve provenance when the source
593 // or destination is a pointer value.
594 //
595 // These are used when pointer values are being stored in
596 // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
597 // always have to preserve provenance when the value being loaded or
598 // stored is a pointer.
599 P64ToI64,
600 I64ToP64,
601 P64ToP,
602 PToP64,
603
604 // Pointer<->number conversions. These do not preserve provenance.
605 //
606 // These are used when integer or floating-point values are being stored in
607 // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
608 // never have any provenance to preserve.
609 I32ToP,
610 PToI32,
611 PToL,
612 LToP,
613
614 // Number<->Number conversions.
615 I32ToL,
616 LToI32,
617 I64ToL,
618 LToI64,
619
620 // Multiple conversions in sequence.
621 Sequence(Box<[Bitcast; 2]>),
622
623 None,
624}
625
626/// Whether the glue code surrounding a call is lifting arguments and lowering
627/// results or vice versa.
628#[derive(Clone, Copy, PartialEq, Eq)]
629pub enum LiftLower {
630 /// When the glue code lifts arguments and lowers results.
631 ///
632 /// ```text
633 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
634 /// ```
635 LiftArgsLowerResults,
636 /// When the glue code lowers arguments and lifts results.
637 ///
638 /// ```text
639 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
640 /// ```
641 LowerArgsLiftResults,
642}
643
644/// Trait for language implementors to use to generate glue code between native
645/// WebAssembly signatures and interface types signatures.
646///
647/// This is used as an implementation detail in interpreting the ABI between
648/// interface types and wasm types. Eventually this will be driven by interface
649/// types adapters themselves, but for now the ABI of a function dictates what
650/// instructions are fed in.
651///
652/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
653/// generate code for. Instructions operate like a stack machine where each
654/// instruction has a list of inputs and a list of outputs (provided by the
655/// `emit` function).
656pub trait Bindgen {
657 /// The intermediate type for fragments of code for this type.
658 ///
659 /// For most languages `String` is a suitable intermediate type.
660 type Operand: Clone;
661
662 /// Emit code to implement the given instruction.
663 ///
664 /// Each operand is given in `operands` and can be popped off if ownership
665 /// is required. It's guaranteed that `operands` has the appropriate length
666 /// for the `inst` given, as specified with [`Instruction`].
667 ///
668 /// Each result variable should be pushed onto `results`. This function must
669 /// push the appropriate number of results or binding generation will panic.
670 fn emit(
671 &mut self,
672 resolve: &Resolve,
673 inst: &Instruction<'_>,
674 operands: &mut Vec<Self::Operand>,
675 results: &mut Vec<Self::Operand>,
676 );
677
678 /// Gets a operand reference to the return pointer area.
679 ///
680 /// The provided size and alignment is for the function's return type.
681 fn return_pointer(&mut self, size: ArchitectureSize, align: Alignment) -> Self::Operand;
682
683 /// Enters a new block of code to generate code for.
684 ///
685 /// This is currently exclusively used for constructing variants. When a
686 /// variant is constructed a block here will be pushed for each case of a
687 /// variant, generating the code necessary to translate a variant case.
688 ///
689 /// Blocks are completed with `finish_block` below. It's expected that `emit`
690 /// will always push code (if necessary) into the "current block", which is
691 /// updated by calling this method and `finish_block` below.
692 fn push_block(&mut self);
693
694 /// Indicates to the code generator that a block is completed, and the
695 /// `operand` specified was the resulting value of the block.
696 ///
697 /// This method will be used to compute the value of each arm of lifting a
698 /// variant. The `operand` will be `None` if the variant case didn't
699 /// actually have any type associated with it. Otherwise it will be `Some`
700 /// as the last value remaining on the stack representing the value
701 /// associated with a variant's `case`.
702 ///
703 /// It's expected that this will resume code generation in the previous
704 /// block before `push_block` was called. This must also save the results
705 /// of the current block internally for instructions like `ResultLift` to
706 /// use later.
707 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
708
709 /// Returns size information that was previously calculated for all types.
710 fn sizes(&self) -> &SizeAlign;
711
712 /// Returns whether or not the specified element type is represented in a
713 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
714 /// and `ListCanonLift` instructions are used or not.
715 fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
716}
717
718/// Generates an abstract sequence of instructions which represents this
719/// function being adapted as an imported function.
720///
721/// The instructions here, when executed, will emulate a language with
722/// interface types calling the concrete wasm implementation. The parameters
723/// for the returned instruction sequence are the language's own
724/// interface-types parameters. One instruction in the instruction stream
725/// will be a `Call` which represents calling the actual raw wasm function
726/// signature.
727///
728/// This function is useful, for example, if you're building a language
729/// generator for WASI bindings. This will document how to translate
730/// language-specific values into the wasm types to call a WASI function,
731/// and it will also automatically convert the results of the WASI function
732/// back to a language-specific value.
733pub fn call(
734 resolve: &Resolve,
735 variant: AbiVariant,
736 lift_lower: LiftLower,
737 func: &Function,
738 bindgen: &mut impl Bindgen,
739 async_: bool,
740) {
741 Generator::new(resolve, variant, lift_lower, bindgen, async_).call(func);
742}
743
744pub fn lower_to_memory<B: Bindgen>(
745 resolve: &Resolve,
746 bindgen: &mut B,
747 address: B::Operand,
748 value: B::Operand,
749 ty: &Type,
750) {
751 // TODO: refactor so we don't need to pass in a bunch of unused dummy parameters:
752 let mut generator = Generator::new(
753 resolve,
754 AbiVariant::GuestImport,
755 LiftLower::LowerArgsLiftResults,
756 bindgen,
757 true,
758 );
759 generator.stack.push(value);
760 generator.write_to_memory(ty, address, Default::default());
761}
762
763pub fn lift_from_memory<B: Bindgen>(
764 resolve: &Resolve,
765 bindgen: &mut B,
766 address: B::Operand,
767 ty: &Type,
768) -> B::Operand {
769 // TODO: refactor so we don't need to pass in a bunch of unused dummy parameters:
770 let mut generator = Generator::new(
771 resolve,
772 AbiVariant::GuestImport,
773 LiftLower::LowerArgsLiftResults,
774 bindgen,
775 true,
776 );
777 generator.read_from_memory(ty, address, Default::default());
778 generator.stack.pop().unwrap()
779}
780
781/// Used in a similar manner as the `Interface::call` function except is
782/// used to generate the `post-return` callback for `func`.
783///
784/// This is only intended to be used in guest generators for exported
785/// functions and will primarily generate `GuestDeallocate*` instructions,
786/// plus others used as input to those instructions.
787pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen, async_: bool) {
788 Generator::new(
789 resolve,
790 AbiVariant::GuestExport,
791 LiftLower::LiftArgsLowerResults,
792 bindgen,
793 async_,
794 )
795 .post_return(func);
796}
797/// Returns whether the `Function` specified needs a post-return function to
798/// be generated in guest code.
799///
800/// This is used when the return value contains a memory allocation such as
801/// a list or a string primarily.
802pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
803 func.result
804 .map(|t| needs_post_return(resolve, &t))
805 .unwrap_or(false)
806}
807
808fn needs_post_return(resolve: &Resolve, ty: &Type) -> bool {
809 match ty {
810 Type::String => true,
811 Type::ErrorContext => true,
812 Type::Id(id) => match &resolve.types[*id].kind {
813 TypeDefKind::List(_) => true,
814 TypeDefKind::Type(t) => needs_post_return(resolve, t),
815 TypeDefKind::Handle(_) => false,
816 TypeDefKind::Resource => false,
817 TypeDefKind::Record(r) => r.fields.iter().any(|f| needs_post_return(resolve, &f.ty)),
818 TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_post_return(resolve, t)),
819 TypeDefKind::Variant(t) => t
820 .cases
821 .iter()
822 .filter_map(|t| t.ty.as_ref())
823 .any(|t| needs_post_return(resolve, t)),
824 TypeDefKind::Option(t) => needs_post_return(resolve, t),
825 TypeDefKind::Result(t) => [&t.ok, &t.err]
826 .iter()
827 .filter_map(|t| t.as_ref())
828 .any(|t| needs_post_return(resolve, t)),
829 TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
830 TypeDefKind::Future(_) | TypeDefKind::Stream(_) => false,
831 TypeDefKind::Unknown => unreachable!(),
832 },
833
834 Type::Bool
835 | Type::U8
836 | Type::S8
837 | Type::U16
838 | Type::S16
839 | Type::U32
840 | Type::S32
841 | Type::U64
842 | Type::S64
843 | Type::F32
844 | Type::F64
845 | Type::Char => false,
846 }
847}
848
849struct Generator<'a, B: Bindgen> {
850 variant: AbiVariant,
851 lift_lower: LiftLower,
852 bindgen: &'a mut B,
853 async_: bool,
854 resolve: &'a Resolve,
855 operands: Vec<B::Operand>,
856 results: Vec<B::Operand>,
857 stack: Vec<B::Operand>,
858 return_pointer: Option<B::Operand>,
859}
860
861impl<'a, B: Bindgen> Generator<'a, B> {
862 fn new(
863 resolve: &'a Resolve,
864 variant: AbiVariant,
865 lift_lower: LiftLower,
866 bindgen: &'a mut B,
867 async_: bool,
868 ) -> Generator<'a, B> {
869 Generator {
870 resolve,
871 variant,
872 lift_lower,
873 bindgen,
874 async_,
875 operands: Vec::new(),
876 results: Vec::new(),
877 stack: Vec::new(),
878 return_pointer: None,
879 }
880 }
881
882 fn call(&mut self, func: &Function) {
883 const MAX_FLAT_PARAMS: usize = 16;
884
885 let sig = self.resolve.wasm_signature(self.variant, func);
886
887 match self.lift_lower {
888 LiftLower::LowerArgsLiftResults => {
889 if let (AbiVariant::GuestExport, true) = (self.variant, self.async_) {
890 unimplemented!("host-side code generation for async lift/lower not supported");
891 }
892
893 let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
894 let mut offset = ArchitectureSize::default();
895 for (nth, (_, ty)) in func.params.iter().enumerate() {
896 self_.emit(&Instruction::GetArg { nth });
897 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
898 self_.write_to_memory(ty, ptr.clone(), offset);
899 offset += self_.bindgen.sizes().size(ty);
900 }
901
902 self_.stack.push(ptr);
903 };
904
905 if self.async_ {
906 let ElementInfo { size, align } = self
907 .bindgen
908 .sizes()
909 .record(func.params.iter().map(|(_, ty)| ty));
910 let ptr = self.bindgen.return_pointer(size, align);
911 lower_to_memory(self, ptr);
912 } else {
913 if !sig.indirect_params {
914 // If the parameters for this function aren't indirect
915 // (there aren't too many) then we simply do a normal lower
916 // operation for them all.
917 for (nth, (_, ty)) in func.params.iter().enumerate() {
918 self.emit(&Instruction::GetArg { nth });
919 self.lower(ty);
920 }
921 } else {
922 // ... otherwise if parameters are indirect space is
923 // allocated from them and each argument is lowered
924 // individually into memory.
925 let ElementInfo { size, align } = self
926 .bindgen
927 .sizes()
928 .record(func.params.iter().map(|t| &t.1));
929 let ptr = match self.variant {
930 // When a wasm module calls an import it will provide
931 // space that isn't explicitly deallocated.
932 AbiVariant::GuestImport => self.bindgen.return_pointer(size, align),
933 // When calling a wasm module from the outside, though,
934 // malloc needs to be called.
935 AbiVariant::GuestExport => {
936 self.emit(&Instruction::Malloc {
937 realloc: "cabi_realloc",
938 size,
939 align,
940 });
941 self.stack.pop().unwrap()
942 }
943 AbiVariant::GuestImportAsync
944 | AbiVariant::GuestExportAsync
945 | AbiVariant::GuestExportAsyncStackful => {
946 unreachable!()
947 }
948 };
949 lower_to_memory(self, ptr);
950 }
951 }
952
953 if self.async_ {
954 let ElementInfo { size, align } =
955 self.bindgen.sizes().record(func.result.iter());
956 let ptr = self.bindgen.return_pointer(size, align);
957 self.return_pointer = Some(ptr.clone());
958 self.stack.push(ptr);
959
960 assert_eq!(self.stack.len(), 2);
961 self.emit(&Instruction::AsyncCallWasm {
962 name: &format!("[async-lower]{}", func.name),
963 });
964 } else {
965 // If necessary we may need to prepare a return pointer for
966 // this ABI.
967 if self.variant == AbiVariant::GuestImport && sig.retptr {
968 let info = self.bindgen.sizes().params(&func.result);
969 let ptr = self.bindgen.return_pointer(info.size, info.align);
970 self.return_pointer = Some(ptr.clone());
971 self.stack.push(ptr);
972 }
973
974 assert_eq!(self.stack.len(), sig.params.len());
975 self.emit(&Instruction::CallWasm {
976 name: &func.name,
977 sig: &sig,
978 });
979 }
980
981 if !(sig.retptr || self.async_) {
982 // With no return pointer in use we can simply lift the
983 // result(s) of the function from the result of the core
984 // wasm function.
985 if let Some(ty) = &func.result {
986 self.lift(ty)
987 }
988 } else {
989 let ptr = match self.variant {
990 // imports into guests means it's a wasm module
991 // calling an imported function. We supplied the
992 // return pointer as the last argument (saved in
993 // `self.return_pointer`) so we use that to read
994 // the result of the function from memory.
995 AbiVariant::GuestImport => {
996 assert!(sig.results.is_empty() || self.async_);
997 self.return_pointer.take().unwrap()
998 }
999
1000 // guest exports means that this is a host
1001 // calling wasm so wasm returned a pointer to where
1002 // the result is stored
1003 AbiVariant::GuestExport => self.stack.pop().unwrap(),
1004
1005 AbiVariant::GuestImportAsync
1006 | AbiVariant::GuestExportAsync
1007 | AbiVariant::GuestExportAsyncStackful => {
1008 unreachable!()
1009 }
1010 };
1011
1012 self.read_results_from_memory(
1013 &func.result,
1014 ptr.clone(),
1015 ArchitectureSize::default(),
1016 );
1017 self.emit(&Instruction::Flush {
1018 amt: usize::from(func.result.is_some()),
1019 });
1020 }
1021
1022 self.emit(&Instruction::Return {
1023 func,
1024 amt: usize::from(func.result.is_some()),
1025 });
1026 }
1027 LiftLower::LiftArgsLowerResults => {
1028 if let (AbiVariant::GuestImport, true) = (self.variant, self.async_) {
1029 todo!("implement host-side support for async lift/lower");
1030 }
1031
1032 let read_from_memory = |self_: &mut Self| {
1033 let mut offset = ArchitectureSize::default();
1034 let ptr = self_.stack.pop().unwrap();
1035 for (_, ty) in func.params.iter() {
1036 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1037 self_.read_from_memory(ty, ptr.clone(), offset);
1038 offset += self_.bindgen.sizes().size(ty);
1039 }
1040 };
1041
1042 if !sig.indirect_params {
1043 // If parameters are not passed indirectly then we lift each
1044 // argument in succession from the component wasm types that
1045 // make-up the type.
1046 let mut offset = 0;
1047 let mut temp = Vec::new();
1048 for (_, ty) in func.params.iter() {
1049 temp.truncate(0);
1050 self.resolve.push_flat(ty, &mut temp);
1051 for _ in 0..temp.len() {
1052 self.emit(&Instruction::GetArg { nth: offset });
1053 offset += 1;
1054 }
1055 self.lift(ty);
1056 }
1057 } else {
1058 // ... otherwise argument is read in succession from memory
1059 // where the pointer to the arguments is the first argument
1060 // to the function.
1061 self.emit(&Instruction::GetArg { nth: 0 });
1062 read_from_memory(self);
1063 }
1064
1065 // ... and that allows us to call the interface types function
1066 self.emit(&Instruction::CallInterface {
1067 func,
1068 async_: self.async_,
1069 });
1070
1071 let (lower_to_memory, async_results) = if self.async_ {
1072 self.emit(&Instruction::AsyncPostCallInterface { func });
1073
1074 let mut results = Vec::new();
1075 if let Some(ty) = &func.result {
1076 self.resolve.push_flat(ty, &mut results);
1077 }
1078 (results.len() > MAX_FLAT_PARAMS, Some(results))
1079 } else {
1080 (sig.retptr, None)
1081 };
1082
1083 // This was dynamically allocated by the caller (or async start
1084 // function) so after it's been read by the guest we need to
1085 // deallocate it.
1086 if let AbiVariant::GuestExport = self.variant {
1087 if sig.indirect_params && !self.async_ {
1088 let ElementInfo { size, align } = self
1089 .bindgen
1090 .sizes()
1091 .record(func.params.iter().map(|t| &t.1));
1092 self.emit(&Instruction::GetArg { nth: 0 });
1093 self.emit(&Instruction::GuestDeallocate { size, align });
1094 }
1095 }
1096
1097 if !lower_to_memory {
1098 // With no return pointer in use we simply lower the
1099 // result(s) and return that directly from the function.
1100 if let Some(ty) = &func.result {
1101 self.lower(ty);
1102 }
1103 } else {
1104 match self.variant {
1105 // When a function is imported to a guest this means
1106 // it's a host providing the implementation of the
1107 // import. The result is stored in the pointer
1108 // specified in the last argument, so we get the
1109 // pointer here and then write the return value into
1110 // it.
1111 AbiVariant::GuestImport => {
1112 self.emit(&Instruction::GetArg {
1113 nth: sig.params.len() - 1,
1114 });
1115 let ptr = self.stack.pop().unwrap();
1116 self.write_params_to_memory(&func.result, ptr, Default::default());
1117 }
1118
1119 // For a guest import this is a function defined in
1120 // wasm, so we're returning a pointer where the
1121 // value was stored at. Allocate some space here
1122 // (statically) and then write the result into that
1123 // memory, returning the pointer at the end.
1124 AbiVariant::GuestExport => {
1125 let ElementInfo { size, align } =
1126 self.bindgen.sizes().params(&func.result);
1127 let ptr = self.bindgen.return_pointer(size, align);
1128 self.write_params_to_memory(
1129 &func.result,
1130 ptr.clone(),
1131 Default::default(),
1132 );
1133 self.stack.push(ptr);
1134 }
1135
1136 AbiVariant::GuestImportAsync
1137 | AbiVariant::GuestExportAsync
1138 | AbiVariant::GuestExportAsyncStackful => {
1139 unreachable!()
1140 }
1141 }
1142 }
1143
1144 if let Some(results) = async_results {
1145 let name = &format!("[task-return]{}", func.name);
1146
1147 self.emit(&Instruction::AsyncCallReturn {
1148 name,
1149 params: &if results.len() > MAX_FLAT_PARAMS {
1150 vec![WasmType::Pointer]
1151 } else {
1152 results
1153 },
1154 });
1155 self.emit(&Instruction::Return { func, amt: 1 });
1156 } else {
1157 self.emit(&Instruction::Return {
1158 func,
1159 amt: sig.results.len(),
1160 });
1161 }
1162 }
1163 }
1164
1165 assert!(
1166 self.stack.is_empty(),
1167 "stack has {} items remaining",
1168 self.stack.len()
1169 );
1170 }
1171
1172 fn post_return(&mut self, func: &Function) {
1173 let sig = self.resolve.wasm_signature(self.variant, func);
1174
1175 // Currently post-return is only used for lists and lists are always
1176 // returned indirectly through memory due to their flat representation
1177 // having more than one type. Assert that a return pointer is used,
1178 // though, in case this ever changes.
1179 assert!(sig.retptr);
1180
1181 self.emit(&Instruction::GetArg { nth: 0 });
1182 let addr = self.stack.pop().unwrap();
1183 for (offset, ty) in self.bindgen.sizes().field_offsets(&func.result) {
1184 self.deallocate(ty, addr.clone(), offset);
1185 }
1186 self.emit(&Instruction::Return { func, amt: 0 });
1187
1188 assert!(
1189 self.stack.is_empty(),
1190 "stack has {} items remaining",
1191 self.stack.len()
1192 );
1193 }
1194
1195 fn emit(&mut self, inst: &Instruction<'_>) {
1196 self.operands.clear();
1197 self.results.clear();
1198
1199 let operands_len = inst.operands_len();
1200 assert!(
1201 self.stack.len() >= operands_len,
1202 "not enough operands on stack for {:?}",
1203 inst
1204 );
1205 self.operands
1206 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1207 self.results.reserve(inst.results_len());
1208
1209 self.bindgen
1210 .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1211
1212 assert_eq!(
1213 self.results.len(),
1214 inst.results_len(),
1215 "{:?} expected {} results, got {}",
1216 inst,
1217 inst.results_len(),
1218 self.results.len()
1219 );
1220 self.stack.append(&mut self.results);
1221 }
1222
1223 fn push_block(&mut self) {
1224 self.bindgen.push_block();
1225 }
1226
1227 fn finish_block(&mut self, size: usize) {
1228 self.operands.clear();
1229 assert!(
1230 size <= self.stack.len(),
1231 "not enough operands on stack for finishing block",
1232 );
1233 self.operands
1234 .extend(self.stack.drain((self.stack.len() - size)..));
1235 self.bindgen.finish_block(&mut self.operands);
1236 }
1237
1238 fn lower(&mut self, ty: &Type) {
1239 use Instruction::*;
1240
1241 match *ty {
1242 Type::Bool => self.emit(&I32FromBool),
1243 Type::S8 => self.emit(&I32FromS8),
1244 Type::U8 => self.emit(&I32FromU8),
1245 Type::S16 => self.emit(&I32FromS16),
1246 Type::U16 => self.emit(&I32FromU16),
1247 Type::S32 => self.emit(&I32FromS32),
1248 Type::U32 => self.emit(&I32FromU32),
1249 Type::S64 => self.emit(&I64FromS64),
1250 Type::U64 => self.emit(&I64FromU64),
1251 Type::Char => self.emit(&I32FromChar),
1252 Type::F32 => self.emit(&CoreF32FromF32),
1253 Type::F64 => self.emit(&CoreF64FromF64),
1254 Type::String => {
1255 let realloc = self.list_realloc();
1256 self.emit(&StringLower { realloc });
1257 }
1258 Type::ErrorContext => self.emit(&ErrorContextLower),
1259 Type::Id(id) => match &self.resolve.types[id].kind {
1260 TypeDefKind::Type(t) => self.lower(t),
1261 TypeDefKind::List(element) => {
1262 let realloc = self.list_realloc();
1263 if self.bindgen.is_list_canonical(self.resolve, element) {
1264 self.emit(&ListCanonLower { element, realloc });
1265 } else {
1266 self.push_block();
1267 self.emit(&IterElem { element });
1268 self.emit(&IterBasePointer);
1269 let addr = self.stack.pop().unwrap();
1270 self.write_to_memory(element, addr, Default::default());
1271 self.finish_block(0);
1272 self.emit(&ListLower { element, realloc });
1273 }
1274 }
1275 TypeDefKind::Handle(handle) => {
1276 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1277 self.emit(&HandleLower {
1278 handle,
1279 ty: id,
1280 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1281 });
1282 }
1283 TypeDefKind::Resource => {
1284 todo!();
1285 }
1286 TypeDefKind::Record(record) => {
1287 self.emit(&RecordLower {
1288 record,
1289 ty: id,
1290 name: self.resolve.types[id].name.as_deref().unwrap(),
1291 });
1292 let values = self
1293 .stack
1294 .drain(self.stack.len() - record.fields.len()..)
1295 .collect::<Vec<_>>();
1296 for (field, value) in record.fields.iter().zip(values) {
1297 self.stack.push(value);
1298 self.lower(&field.ty);
1299 }
1300 }
1301 TypeDefKind::Tuple(tuple) => {
1302 self.emit(&TupleLower { tuple, ty: id });
1303 let values = self
1304 .stack
1305 .drain(self.stack.len() - tuple.types.len()..)
1306 .collect::<Vec<_>>();
1307 for (ty, value) in tuple.types.iter().zip(values) {
1308 self.stack.push(value);
1309 self.lower(ty);
1310 }
1311 }
1312
1313 TypeDefKind::Flags(flags) => {
1314 self.emit(&FlagsLower {
1315 flags,
1316 ty: id,
1317 name: self.resolve.types[id].name.as_ref().unwrap(),
1318 });
1319 }
1320
1321 TypeDefKind::Variant(v) => {
1322 let results =
1323 self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1324 self.emit(&VariantLower {
1325 variant: v,
1326 ty: id,
1327 results: &results,
1328 name: self.resolve.types[id].name.as_deref().unwrap(),
1329 });
1330 }
1331 TypeDefKind::Enum(enum_) => {
1332 self.emit(&EnumLower {
1333 enum_,
1334 ty: id,
1335 name: self.resolve.types[id].name.as_deref().unwrap(),
1336 });
1337 }
1338 TypeDefKind::Option(t) => {
1339 let results = self.lower_variant_arms(ty, [None, Some(t)]);
1340 self.emit(&OptionLower {
1341 payload: t,
1342 ty: id,
1343 results: &results,
1344 });
1345 }
1346 TypeDefKind::Result(r) => {
1347 let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1348 self.emit(&ResultLower {
1349 result: r,
1350 ty: id,
1351 results: &results,
1352 });
1353 }
1354 TypeDefKind::Future(ty) => {
1355 self.emit(&FutureLower {
1356 payload: ty,
1357 ty: id,
1358 });
1359 }
1360 TypeDefKind::Stream(ty) => {
1361 self.emit(&StreamLower {
1362 payload: ty,
1363 ty: id,
1364 });
1365 }
1366 TypeDefKind::Unknown => unreachable!(),
1367 },
1368 }
1369 }
1370
1371 fn lower_variant_arms<'b>(
1372 &mut self,
1373 ty: &Type,
1374 cases: impl IntoIterator<Item = Option<&'b Type>>,
1375 ) -> Vec<WasmType> {
1376 use Instruction::*;
1377 let mut results = Vec::new();
1378 let mut temp = Vec::new();
1379 let mut casts = Vec::new();
1380 self.resolve.push_flat(ty, &mut results);
1381 for (i, ty) in cases.into_iter().enumerate() {
1382 self.push_block();
1383 self.emit(&VariantPayloadName);
1384 let payload_name = self.stack.pop().unwrap();
1385 self.emit(&I32Const { val: i as i32 });
1386 let mut pushed = 1;
1387 if let Some(ty) = ty {
1388 // Using the payload of this block we lower the type to
1389 // raw wasm values.
1390 self.stack.push(payload_name);
1391 self.lower(ty);
1392
1393 // Determine the types of all the wasm values we just
1394 // pushed, and record how many. If we pushed too few
1395 // then we'll need to push some zeros after this.
1396 temp.truncate(0);
1397 self.resolve.push_flat(ty, &mut temp);
1398 pushed += temp.len();
1399
1400 // For all the types pushed we may need to insert some
1401 // bitcasts. This will go through and cast everything
1402 // to the right type to ensure all blocks produce the
1403 // same set of results.
1404 casts.truncate(0);
1405 for (actual, expected) in temp.iter().zip(&results[1..]) {
1406 casts.push(cast(*actual, *expected));
1407 }
1408 if casts.iter().any(|c| *c != Bitcast::None) {
1409 self.emit(&Bitcasts { casts: &casts });
1410 }
1411 }
1412
1413 // If we haven't pushed enough items in this block to match
1414 // what other variants are pushing then we need to push
1415 // some zeros.
1416 if pushed < results.len() {
1417 self.emit(&ConstZero {
1418 tys: &results[pushed..],
1419 });
1420 }
1421 self.finish_block(results.len());
1422 }
1423 results
1424 }
1425
1426 fn list_realloc(&self) -> Option<&'static str> {
1427 // Lowering parameters calling a wasm import _or_ returning a result
1428 // from an async-lifted wasm export means we don't need to pass
1429 // ownership, but we pass ownership in all other cases.
1430 match (self.variant, self.lift_lower, self.async_) {
1431 (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
1432 | (AbiVariant::GuestExport, LiftLower::LiftArgsLowerResults, true) => None,
1433 _ => Some("cabi_realloc"),
1434 }
1435 }
1436
1437 /// Note that in general everything in this function is the opposite of the
1438 /// `lower` function above. This is intentional and should be kept this way!
1439 fn lift(&mut self, ty: &Type) {
1440 use Instruction::*;
1441
1442 match *ty {
1443 Type::Bool => self.emit(&BoolFromI32),
1444 Type::S8 => self.emit(&S8FromI32),
1445 Type::U8 => self.emit(&U8FromI32),
1446 Type::S16 => self.emit(&S16FromI32),
1447 Type::U16 => self.emit(&U16FromI32),
1448 Type::S32 => self.emit(&S32FromI32),
1449 Type::U32 => self.emit(&U32FromI32),
1450 Type::S64 => self.emit(&S64FromI64),
1451 Type::U64 => self.emit(&U64FromI64),
1452 Type::Char => self.emit(&CharFromI32),
1453 Type::F32 => self.emit(&F32FromCoreF32),
1454 Type::F64 => self.emit(&F64FromCoreF64),
1455 Type::String => self.emit(&StringLift),
1456 Type::ErrorContext => self.emit(&ErrorContextLift),
1457 Type::Id(id) => match &self.resolve.types[id].kind {
1458 TypeDefKind::Type(t) => self.lift(t),
1459 TypeDefKind::List(element) => {
1460 if self.bindgen.is_list_canonical(self.resolve, element) {
1461 self.emit(&ListCanonLift { element, ty: id });
1462 } else {
1463 self.push_block();
1464 self.emit(&IterBasePointer);
1465 let addr = self.stack.pop().unwrap();
1466 self.read_from_memory(element, addr, Default::default());
1467 self.finish_block(1);
1468 self.emit(&ListLift { element, ty: id });
1469 }
1470 }
1471 TypeDefKind::Handle(handle) => {
1472 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1473 self.emit(&HandleLift {
1474 handle,
1475 ty: id,
1476 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1477 });
1478 }
1479 TypeDefKind::Resource => {
1480 todo!();
1481 }
1482 TypeDefKind::Record(record) => {
1483 let mut temp = Vec::new();
1484 self.resolve.push_flat(ty, &mut temp);
1485 let mut args = self
1486 .stack
1487 .drain(self.stack.len() - temp.len()..)
1488 .collect::<Vec<_>>();
1489 for field in record.fields.iter() {
1490 temp.truncate(0);
1491 self.resolve.push_flat(&field.ty, &mut temp);
1492 self.stack.extend(args.drain(..temp.len()));
1493 self.lift(&field.ty);
1494 }
1495 self.emit(&RecordLift {
1496 record,
1497 ty: id,
1498 name: self.resolve.types[id].name.as_deref().unwrap(),
1499 });
1500 }
1501 TypeDefKind::Tuple(tuple) => {
1502 let mut temp = Vec::new();
1503 self.resolve.push_flat(ty, &mut temp);
1504 let mut args = self
1505 .stack
1506 .drain(self.stack.len() - temp.len()..)
1507 .collect::<Vec<_>>();
1508 for ty in tuple.types.iter() {
1509 temp.truncate(0);
1510 self.resolve.push_flat(ty, &mut temp);
1511 self.stack.extend(args.drain(..temp.len()));
1512 self.lift(ty);
1513 }
1514 self.emit(&TupleLift { tuple, ty: id });
1515 }
1516 TypeDefKind::Flags(flags) => {
1517 self.emit(&FlagsLift {
1518 flags,
1519 ty: id,
1520 name: self.resolve.types[id].name.as_ref().unwrap(),
1521 });
1522 }
1523
1524 TypeDefKind::Variant(v) => {
1525 self.lift_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1526 self.emit(&VariantLift {
1527 variant: v,
1528 ty: id,
1529 name: self.resolve.types[id].name.as_deref().unwrap(),
1530 });
1531 }
1532
1533 TypeDefKind::Enum(enum_) => {
1534 self.emit(&EnumLift {
1535 enum_,
1536 ty: id,
1537 name: self.resolve.types[id].name.as_deref().unwrap(),
1538 });
1539 }
1540
1541 TypeDefKind::Option(t) => {
1542 self.lift_variant_arms(ty, [None, Some(t)]);
1543 self.emit(&OptionLift { payload: t, ty: id });
1544 }
1545
1546 TypeDefKind::Result(r) => {
1547 self.lift_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1548 self.emit(&ResultLift { result: r, ty: id });
1549 }
1550
1551 TypeDefKind::Future(ty) => {
1552 self.emit(&FutureLift {
1553 payload: ty,
1554 ty: id,
1555 });
1556 }
1557 TypeDefKind::Stream(ty) => {
1558 self.emit(&StreamLift {
1559 payload: ty,
1560 ty: id,
1561 });
1562 }
1563 TypeDefKind::Unknown => unreachable!(),
1564 },
1565 }
1566 }
1567
1568 fn lift_variant_arms<'b>(
1569 &mut self,
1570 ty: &Type,
1571 cases: impl IntoIterator<Item = Option<&'b Type>>,
1572 ) {
1573 let mut params = Vec::new();
1574 let mut temp = Vec::new();
1575 let mut casts = Vec::new();
1576 self.resolve.push_flat(ty, &mut params);
1577 let block_inputs = self
1578 .stack
1579 .drain(self.stack.len() + 1 - params.len()..)
1580 .collect::<Vec<_>>();
1581 for ty in cases {
1582 self.push_block();
1583 if let Some(ty) = ty {
1584 // Push only the values we need for this variant onto
1585 // the stack.
1586 temp.truncate(0);
1587 self.resolve.push_flat(ty, &mut temp);
1588 self.stack
1589 .extend(block_inputs[..temp.len()].iter().cloned());
1590
1591 // Cast all the types we have on the stack to the actual
1592 // types needed for this variant, if necessary.
1593 casts.truncate(0);
1594 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
1595 casts.push(cast(*expected, *actual));
1596 }
1597 if casts.iter().any(|c| *c != Bitcast::None) {
1598 self.emit(&Instruction::Bitcasts { casts: &casts });
1599 }
1600
1601 // Then recursively lift this variant's payload.
1602 self.lift(ty);
1603 }
1604 self.finish_block(ty.is_some() as usize);
1605 }
1606 }
1607
1608 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1609 use Instruction::*;
1610
1611 match *ty {
1612 // Builtin types need different flavors of storage instructions
1613 // depending on the size of the value written.
1614 Type::Bool | Type::U8 | Type::S8 => {
1615 self.lower_and_emit(ty, addr, &I32Store8 { offset })
1616 }
1617 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1618 Type::U32 | Type::S32 | Type::Char => {
1619 self.lower_and_emit(ty, addr, &I32Store { offset })
1620 }
1621 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1622 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1623 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1624 Type::String => self.write_list_to_memory(ty, addr, offset),
1625 Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1626
1627 Type::Id(id) => match &self.resolve.types[id].kind {
1628 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1629 TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1630
1631 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1632 self.lower_and_emit(ty, addr, &I32Store { offset })
1633 }
1634
1635 // Decompose the record into its components and then write all
1636 // the components into memory one-by-one.
1637 TypeDefKind::Record(record) => {
1638 self.emit(&RecordLower {
1639 record,
1640 ty: id,
1641 name: self.resolve.types[id].name.as_deref().unwrap(),
1642 });
1643 self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1644 }
1645 TypeDefKind::Resource => {
1646 todo!()
1647 }
1648 TypeDefKind::Tuple(tuple) => {
1649 self.emit(&TupleLower { tuple, ty: id });
1650 self.write_fields_to_memory(tuple.types.iter(), addr, offset);
1651 }
1652
1653 TypeDefKind::Flags(f) => {
1654 self.lower(ty);
1655 match f.repr() {
1656 FlagsRepr::U8 => {
1657 self.stack.push(addr);
1658 self.store_intrepr(offset, Int::U8);
1659 }
1660 FlagsRepr::U16 => {
1661 self.stack.push(addr);
1662 self.store_intrepr(offset, Int::U16);
1663 }
1664 FlagsRepr::U32(n) => {
1665 for i in (0..n).rev() {
1666 self.stack.push(addr.clone());
1667 self.emit(&I32Store {
1668 offset: offset.add_bytes(i * 4),
1669 });
1670 }
1671 }
1672 }
1673 }
1674
1675 // Each case will get its own block, and the first item in each
1676 // case is writing the discriminant. After that if we have a
1677 // payload we write the payload after the discriminant, aligned up
1678 // to the type's alignment.
1679 TypeDefKind::Variant(v) => {
1680 self.write_variant_arms_to_memory(
1681 offset,
1682 addr,
1683 v.tag(),
1684 v.cases.iter().map(|c| c.ty.as_ref()),
1685 );
1686 self.emit(&VariantLower {
1687 variant: v,
1688 ty: id,
1689 results: &[],
1690 name: self.resolve.types[id].name.as_deref().unwrap(),
1691 });
1692 }
1693
1694 TypeDefKind::Option(t) => {
1695 self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
1696 self.emit(&OptionLower {
1697 payload: t,
1698 ty: id,
1699 results: &[],
1700 });
1701 }
1702
1703 TypeDefKind::Result(r) => {
1704 self.write_variant_arms_to_memory(
1705 offset,
1706 addr,
1707 Int::U8,
1708 [r.ok.as_ref(), r.err.as_ref()],
1709 );
1710 self.emit(&ResultLower {
1711 result: r,
1712 ty: id,
1713 results: &[],
1714 });
1715 }
1716
1717 TypeDefKind::Enum(e) => {
1718 self.lower(ty);
1719 self.stack.push(addr);
1720 self.store_intrepr(offset, e.tag());
1721 }
1722
1723 TypeDefKind::Unknown => unreachable!(),
1724 },
1725 }
1726 }
1727
1728 fn write_params_to_memory<'b>(
1729 &mut self,
1730 params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1731 addr: B::Operand,
1732 offset: ArchitectureSize,
1733 ) {
1734 self.write_fields_to_memory(params, addr, offset);
1735 }
1736
1737 fn write_variant_arms_to_memory<'b>(
1738 &mut self,
1739 offset: ArchitectureSize,
1740 addr: B::Operand,
1741 tag: Int,
1742 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1743 ) {
1744 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
1745 for (i, ty) in cases.into_iter().enumerate() {
1746 self.push_block();
1747 self.emit(&Instruction::VariantPayloadName);
1748 let payload_name = self.stack.pop().unwrap();
1749 self.emit(&Instruction::I32Const { val: i as i32 });
1750 self.stack.push(addr.clone());
1751 self.store_intrepr(offset, tag);
1752 if let Some(ty) = ty {
1753 self.stack.push(payload_name.clone());
1754 self.write_to_memory(ty, addr.clone(), payload_offset);
1755 }
1756 self.finish_block(0);
1757 }
1758 }
1759
1760 fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1761 // After lowering the list there's two i32 values on the stack
1762 // which we write into memory, writing the pointer into the low address
1763 // and the length into the high address.
1764 self.lower(ty);
1765 self.stack.push(addr.clone());
1766 self.emit(&Instruction::LengthStore {
1767 offset: offset + self.bindgen.sizes().align(ty).into(),
1768 });
1769 self.stack.push(addr);
1770 self.emit(&Instruction::PointerStore { offset });
1771 }
1772
1773 fn write_fields_to_memory<'b>(
1774 &mut self,
1775 tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1776 addr: B::Operand,
1777 offset: ArchitectureSize,
1778 ) {
1779 let tys = tys.into_iter();
1780 let fields = self
1781 .stack
1782 .drain(self.stack.len() - tys.len()..)
1783 .collect::<Vec<_>>();
1784 for ((field_offset, ty), op) in self
1785 .bindgen
1786 .sizes()
1787 .field_offsets(tys)
1788 .into_iter()
1789 .zip(fields)
1790 {
1791 self.stack.push(op);
1792 self.write_to_memory(ty, addr.clone(), offset + (field_offset));
1793 }
1794 }
1795
1796 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
1797 self.lower(ty);
1798 self.stack.push(addr);
1799 self.emit(instr);
1800 }
1801
1802 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1803 use Instruction::*;
1804
1805 match *ty {
1806 Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1807 Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1808 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
1809 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
1810 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
1811 Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
1812 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
1813 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
1814 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
1815 Type::String => self.read_list_from_memory(ty, addr, offset),
1816 Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
1817
1818 Type::Id(id) => match &self.resolve.types[id].kind {
1819 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
1820
1821 TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
1822
1823 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1824 self.emit_and_lift(ty, addr, &I32Load { offset })
1825 }
1826
1827 TypeDefKind::Resource => {
1828 todo!();
1829 }
1830
1831 // Read and lift each field individually, adjusting the offset
1832 // as we go along, then aggregate all the fields into the
1833 // record.
1834 TypeDefKind::Record(record) => {
1835 self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1836 self.emit(&RecordLift {
1837 record,
1838 ty: id,
1839 name: self.resolve.types[id].name.as_deref().unwrap(),
1840 });
1841 }
1842
1843 TypeDefKind::Tuple(tuple) => {
1844 self.read_fields_from_memory(&tuple.types, addr, offset);
1845 self.emit(&TupleLift { tuple, ty: id });
1846 }
1847
1848 TypeDefKind::Flags(f) => {
1849 match f.repr() {
1850 FlagsRepr::U8 => {
1851 self.stack.push(addr);
1852 self.load_intrepr(offset, Int::U8);
1853 }
1854 FlagsRepr::U16 => {
1855 self.stack.push(addr);
1856 self.load_intrepr(offset, Int::U16);
1857 }
1858 FlagsRepr::U32(n) => {
1859 for i in 0..n {
1860 self.stack.push(addr.clone());
1861 self.emit(&I32Load {
1862 offset: offset.add_bytes(i * 4),
1863 });
1864 }
1865 }
1866 }
1867 self.lift(ty);
1868 }
1869
1870 // Each case will get its own block, and we'll dispatch to the
1871 // right block based on the `i32.load` we initially perform. Each
1872 // individual block is pretty simple and just reads the payload type
1873 // from the corresponding offset if one is available.
1874 TypeDefKind::Variant(variant) => {
1875 self.read_variant_arms_from_memory(
1876 offset,
1877 addr,
1878 variant.tag(),
1879 variant.cases.iter().map(|c| c.ty.as_ref()),
1880 );
1881 self.emit(&VariantLift {
1882 variant,
1883 ty: id,
1884 name: self.resolve.types[id].name.as_deref().unwrap(),
1885 });
1886 }
1887
1888 TypeDefKind::Option(t) => {
1889 self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
1890 self.emit(&OptionLift { payload: t, ty: id });
1891 }
1892
1893 TypeDefKind::Result(r) => {
1894 self.read_variant_arms_from_memory(
1895 offset,
1896 addr,
1897 Int::U8,
1898 [r.ok.as_ref(), r.err.as_ref()],
1899 );
1900 self.emit(&ResultLift { result: r, ty: id });
1901 }
1902
1903 TypeDefKind::Enum(e) => {
1904 self.stack.push(addr.clone());
1905 self.load_intrepr(offset, e.tag());
1906 self.lift(ty);
1907 }
1908
1909 TypeDefKind::Unknown => unreachable!(),
1910 },
1911 }
1912 }
1913
1914 fn read_results_from_memory(
1915 &mut self,
1916 result: &Option<Type>,
1917 addr: B::Operand,
1918 offset: ArchitectureSize,
1919 ) {
1920 self.read_fields_from_memory(result, addr, offset)
1921 }
1922
1923 fn read_variant_arms_from_memory<'b>(
1924 &mut self,
1925 offset: ArchitectureSize,
1926 addr: B::Operand,
1927 tag: Int,
1928 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1929 ) {
1930 self.stack.push(addr.clone());
1931 self.load_intrepr(offset, tag);
1932 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
1933 for ty in cases {
1934 self.push_block();
1935 if let Some(ty) = ty {
1936 self.read_from_memory(ty, addr.clone(), payload_offset);
1937 }
1938 self.finish_block(ty.is_some() as usize);
1939 }
1940 }
1941
1942 fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1943 // Read the pointer/len and then perform the standard lifting
1944 // proceses.
1945 self.stack.push(addr.clone());
1946 self.emit(&Instruction::PointerLoad { offset });
1947 self.stack.push(addr);
1948 self.emit(&Instruction::LengthLoad {
1949 offset: offset + self.bindgen.sizes().align(ty).into(),
1950 });
1951 self.lift(ty);
1952 }
1953
1954 fn read_fields_from_memory<'b>(
1955 &mut self,
1956 tys: impl IntoIterator<Item = &'b Type>,
1957 addr: B::Operand,
1958 offset: ArchitectureSize,
1959 ) {
1960 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
1961 self.read_from_memory(ty, addr.clone(), offset + (*field_offset));
1962 }
1963 }
1964
1965 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
1966 self.stack.push(addr);
1967 self.emit(instr);
1968 self.lift(ty);
1969 }
1970
1971 fn load_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
1972 self.emit(&match repr {
1973 Int::U64 => Instruction::I64Load { offset },
1974 Int::U32 => Instruction::I32Load { offset },
1975 Int::U16 => Instruction::I32Load16U { offset },
1976 Int::U8 => Instruction::I32Load8U { offset },
1977 });
1978 }
1979
1980 fn store_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
1981 self.emit(&match repr {
1982 Int::U64 => Instruction::I64Store { offset },
1983 Int::U32 => Instruction::I32Store { offset },
1984 Int::U16 => Instruction::I32Store16 { offset },
1985 Int::U8 => Instruction::I32Store8 { offset },
1986 });
1987 }
1988
1989 fn deallocate(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1990 use Instruction::*;
1991
1992 // No need to execute any instructions if this type itself doesn't
1993 // require any form of post-return.
1994 if !needs_post_return(self.resolve, ty) {
1995 return;
1996 }
1997
1998 match *ty {
1999 Type::String => {
2000 self.stack.push(addr.clone());
2001 self.emit(&Instruction::PointerLoad { offset });
2002 self.stack.push(addr);
2003 self.emit(&Instruction::LengthLoad {
2004 offset: offset + self.bindgen.sizes().align(ty).into(),
2005 });
2006 self.emit(&Instruction::GuestDeallocateString);
2007 }
2008
2009 Type::Bool
2010 | Type::U8
2011 | Type::S8
2012 | Type::U16
2013 | Type::S16
2014 | Type::U32
2015 | Type::S32
2016 | Type::Char
2017 | Type::U64
2018 | Type::S64
2019 | Type::F32
2020 | Type::F64
2021 | Type::ErrorContext => {}
2022
2023 Type::Id(id) => match &self.resolve.types[id].kind {
2024 TypeDefKind::Type(t) => self.deallocate(t, addr, offset),
2025
2026 TypeDefKind::List(element) => {
2027 self.stack.push(addr.clone());
2028 self.emit(&Instruction::PointerLoad { offset });
2029 self.stack.push(addr);
2030 self.emit(&Instruction::LengthLoad {
2031 offset: offset + self.bindgen.sizes().align(ty).into(),
2032 });
2033
2034 self.push_block();
2035 self.emit(&IterBasePointer);
2036 let elemaddr = self.stack.pop().unwrap();
2037 self.deallocate(element, elemaddr, Default::default());
2038 self.finish_block(0);
2039
2040 self.emit(&Instruction::GuestDeallocateList { element });
2041 }
2042
2043 TypeDefKind::Handle(_) => {
2044 todo!()
2045 }
2046
2047 TypeDefKind::Resource => {
2048 todo!()
2049 }
2050
2051 TypeDefKind::Record(record) => {
2052 self.deallocate_fields(
2053 &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2054 addr,
2055 offset,
2056 );
2057 }
2058
2059 TypeDefKind::Tuple(tuple) => {
2060 self.deallocate_fields(&tuple.types, addr, offset);
2061 }
2062
2063 TypeDefKind::Flags(_) => {}
2064
2065 TypeDefKind::Variant(variant) => {
2066 self.deallocate_variant(
2067 offset,
2068 addr,
2069 variant.tag(),
2070 variant.cases.iter().map(|c| c.ty.as_ref()),
2071 );
2072 self.emit(&GuestDeallocateVariant {
2073 blocks: variant.cases.len(),
2074 });
2075 }
2076
2077 TypeDefKind::Option(t) => {
2078 self.deallocate_variant(offset, addr, Int::U8, [None, Some(t)]);
2079 self.emit(&GuestDeallocateVariant { blocks: 2 });
2080 }
2081
2082 TypeDefKind::Result(e) => {
2083 self.deallocate_variant(offset, addr, Int::U8, [e.ok.as_ref(), e.err.as_ref()]);
2084 self.emit(&GuestDeallocateVariant { blocks: 2 });
2085 }
2086
2087 TypeDefKind::Enum(_) => {}
2088
2089 TypeDefKind::Future(_) => todo!("read future from memory"),
2090 TypeDefKind::Stream(_) => todo!("read stream from memory"),
2091 TypeDefKind::Unknown => unreachable!(),
2092 },
2093 }
2094 }
2095
2096 fn deallocate_variant<'b>(
2097 &mut self,
2098 offset: ArchitectureSize,
2099 addr: B::Operand,
2100 tag: Int,
2101 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2102 ) {
2103 self.stack.push(addr.clone());
2104 self.load_intrepr(offset, tag);
2105 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2106 for ty in cases {
2107 self.push_block();
2108 if let Some(ty) = ty {
2109 self.deallocate(ty, addr.clone(), payload_offset);
2110 }
2111 self.finish_block(0);
2112 }
2113 }
2114
2115 fn deallocate_fields(&mut self, tys: &[Type], addr: B::Operand, offset: ArchitectureSize) {
2116 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2117 self.deallocate(ty, addr.clone(), offset + (field_offset));
2118 }
2119 }
2120}
2121
2122fn cast(from: WasmType, to: WasmType) -> Bitcast {
2123 use WasmType::*;
2124
2125 match (from, to) {
2126 (I32, I32)
2127 | (I64, I64)
2128 | (F32, F32)
2129 | (F64, F64)
2130 | (Pointer, Pointer)
2131 | (PointerOrI64, PointerOrI64)
2132 | (Length, Length) => Bitcast::None,
2133
2134 (I32, I64) => Bitcast::I32ToI64,
2135 (F32, I32) => Bitcast::F32ToI32,
2136 (F64, I64) => Bitcast::F64ToI64,
2137
2138 (I64, I32) => Bitcast::I64ToI32,
2139 (I32, F32) => Bitcast::I32ToF32,
2140 (I64, F64) => Bitcast::I64ToF64,
2141
2142 (F32, I64) => Bitcast::F32ToI64,
2143 (I64, F32) => Bitcast::I64ToF32,
2144
2145 (I64, PointerOrI64) => Bitcast::I64ToP64,
2146 (Pointer, PointerOrI64) => Bitcast::PToP64,
2147 (_, PointerOrI64) => {
2148 Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2149 }
2150
2151 (PointerOrI64, I64) => Bitcast::P64ToI64,
2152 (PointerOrI64, Pointer) => Bitcast::P64ToP,
2153 (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2154
2155 (I32, Pointer) => Bitcast::I32ToP,
2156 (Pointer, I32) => Bitcast::PToI32,
2157 (I32, Length) => Bitcast::I32ToL,
2158 (Length, I32) => Bitcast::LToI32,
2159 (I64, Length) => Bitcast::I64ToL,
2160 (Length, I64) => Bitcast::LToI64,
2161 (Pointer, Length) => Bitcast::PToL,
2162 (Length, Pointer) => Bitcast::LToP,
2163
2164 (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2165 (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2166
2167 (F32, F64)
2168 | (F64, F32)
2169 | (F64, I32)
2170 | (I32, F64)
2171 | (Pointer | Length, I64 | F64)
2172 | (I64 | F64, Pointer | Length) => {
2173 unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2174 }
2175 }
2176}