wit_bindgen_core/abi.rs
1use std::fmt;
2pub use wit_parser::abi::{AbiVariant, FlatTypes, WasmSignature, WasmType};
3use wit_parser::{
4 align_to_arch, Alignment, ArchitectureSize, ElementInfo, Enum, Flags, FlagsRepr, Function,
5 Handle, Int, Record, Resolve, Result_, SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant,
6};
7
8// Helper macro for defining instructions without having to have tons of
9// exhaustive `match` statements to update
10macro_rules! def_instruction {
11 (
12 $( #[$enum_attr:meta] )*
13 pub enum $name:ident<'a> {
14 $(
15 $( #[$attr:meta] )*
16 $variant:ident $( {
17 $($field:ident : $field_ty:ty $(,)* )*
18 } )?
19 :
20 [$num_popped:expr] => [$num_pushed:expr],
21 )*
22 }
23 ) => {
24 $( #[$enum_attr] )*
25 pub enum $name<'a> {
26 $(
27 $( #[$attr] )*
28 $variant $( {
29 $(
30 $field : $field_ty,
31 )*
32 } )? ,
33 )*
34 }
35
36 impl $name<'_> {
37 /// How many operands does this instruction pop from the stack?
38 #[allow(unused_variables)]
39 pub fn operands_len(&self) -> usize {
40 match self {
41 $(
42 Self::$variant $( {
43 $(
44 $field,
45 )*
46 } )? => $num_popped,
47 )*
48 }
49 }
50
51 /// How many results does this instruction push onto the stack?
52 #[allow(unused_variables)]
53 pub fn results_len(&self) -> usize {
54 match self {
55 $(
56 Self::$variant $( {
57 $(
58 $field,
59 )*
60 } )? => $num_pushed,
61 )*
62 }
63 }
64 }
65 };
66}
67
68def_instruction! {
69 #[derive(Debug)]
70 pub enum Instruction<'a> {
71 /// Acquires the specified parameter and places it on the stack.
72 /// Depending on the context this may refer to wasm parameters or
73 /// interface types parameters.
74 GetArg { nth: usize } : [0] => [1],
75
76 // Integer const/manipulation instructions
77
78 /// Pushes the constant `val` onto the stack.
79 I32Const { val: i32 } : [0] => [1],
80 /// Casts the top N items on the stack using the `Bitcast` enum
81 /// provided. Consumes the same number of operands that this produces.
82 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
83 /// Pushes a number of constant zeros for each wasm type on the stack.
84 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
85
86 // Memory load/store instructions
87
88 /// Pops a pointer from the stack and loads a little-endian `i32` from
89 /// it, using the specified constant offset.
90 I32Load { offset: ArchitectureSize } : [1] => [1],
91 /// Pops a pointer from the stack and loads a little-endian `i8` from
92 /// it, using the specified constant offset. The value loaded is the
93 /// zero-extended to 32-bits
94 I32Load8U { offset: ArchitectureSize } : [1] => [1],
95 /// Pops a pointer from the stack and loads a little-endian `i8` from
96 /// it, using the specified constant offset. The value loaded is the
97 /// sign-extended to 32-bits
98 I32Load8S { offset: ArchitectureSize } : [1] => [1],
99 /// Pops a pointer from the stack and loads a little-endian `i16` from
100 /// it, using the specified constant offset. The value loaded is the
101 /// zero-extended to 32-bits
102 I32Load16U { offset: ArchitectureSize } : [1] => [1],
103 /// Pops a pointer from the stack and loads a little-endian `i16` from
104 /// it, using the specified constant offset. The value loaded is the
105 /// sign-extended to 32-bits
106 I32Load16S { offset: ArchitectureSize } : [1] => [1],
107 /// Pops a pointer from the stack and loads a little-endian `i64` from
108 /// it, using the specified constant offset.
109 I64Load { offset: ArchitectureSize } : [1] => [1],
110 /// Pops a pointer from the stack and loads a little-endian `f32` from
111 /// it, using the specified constant offset.
112 F32Load { offset: ArchitectureSize } : [1] => [1],
113 /// Pops a pointer from the stack and loads a little-endian `f64` from
114 /// it, using the specified constant offset.
115 F64Load { offset: ArchitectureSize } : [1] => [1],
116
117 /// Like `I32Load` or `I64Load`, but for loading pointer values.
118 PointerLoad { offset: ArchitectureSize } : [1] => [1],
119 /// Like `I32Load` or `I64Load`, but for loading array length values.
120 LengthLoad { offset: ArchitectureSize } : [1] => [1],
121
122 /// Pops a pointer from the stack and then an `i32` value.
123 /// Stores the value in little-endian at the pointer specified plus the
124 /// constant `offset`.
125 I32Store { offset: ArchitectureSize } : [2] => [0],
126 /// Pops a pointer from the stack and then an `i32` value.
127 /// Stores the low 8 bits of the value in little-endian at the pointer
128 /// specified plus the constant `offset`.
129 I32Store8 { offset: ArchitectureSize } : [2] => [0],
130 /// Pops a pointer from the stack and then an `i32` value.
131 /// Stores the low 16 bits of the value in little-endian at the pointer
132 /// specified plus the constant `offset`.
133 I32Store16 { offset: ArchitectureSize } : [2] => [0],
134 /// Pops a pointer from the stack and then an `i64` value.
135 /// Stores the value in little-endian at the pointer specified plus the
136 /// constant `offset`.
137 I64Store { offset: ArchitectureSize } : [2] => [0],
138 /// Pops a pointer from the stack and then an `f32` value.
139 /// Stores the value in little-endian at the pointer specified plus the
140 /// constant `offset`.
141 F32Store { offset: ArchitectureSize } : [2] => [0],
142 /// Pops a pointer from the stack and then an `f64` value.
143 /// Stores the value in little-endian at the pointer specified plus the
144 /// constant `offset`.
145 F64Store { offset: ArchitectureSize } : [2] => [0],
146
147 /// Like `I32Store` or `I64Store`, but for storing pointer values.
148 PointerStore { offset: ArchitectureSize } : [2] => [0],
149 /// Like `I32Store` or `I64Store`, but for storing array length values.
150 LengthStore { offset: ArchitectureSize } : [2] => [0],
151
152 // Scalar lifting/lowering
153
154 /// Converts an interface type `char` value to a 32-bit integer
155 /// representing the unicode scalar value.
156 I32FromChar : [1] => [1],
157 /// Converts an interface type `u64` value to a wasm `i64`.
158 I64FromU64 : [1] => [1],
159 /// Converts an interface type `s64` value to a wasm `i64`.
160 I64FromS64 : [1] => [1],
161 /// Converts an interface type `u32` value to a wasm `i32`.
162 I32FromU32 : [1] => [1],
163 /// Converts an interface type `s32` value to a wasm `i32`.
164 I32FromS32 : [1] => [1],
165 /// Converts an interface type `u16` value to a wasm `i32`.
166 I32FromU16 : [1] => [1],
167 /// Converts an interface type `s16` value to a wasm `i32`.
168 I32FromS16 : [1] => [1],
169 /// Converts an interface type `u8` value to a wasm `i32`.
170 I32FromU8 : [1] => [1],
171 /// Converts an interface type `s8` value to a wasm `i32`.
172 I32FromS8 : [1] => [1],
173 /// Conversion an interface type `f32` value to a wasm `f32`.
174 ///
175 /// This may be a noop for some implementations, but it's here in case the
176 /// native language representation of `f32` is different than the wasm
177 /// representation of `f32`.
178 CoreF32FromF32 : [1] => [1],
179 /// Conversion an interface type `f64` value to a wasm `f64`.
180 ///
181 /// This may be a noop for some implementations, but it's here in case the
182 /// native language representation of `f64` is different than the wasm
183 /// representation of `f64`.
184 CoreF64FromF64 : [1] => [1],
185
186 /// Converts a native wasm `i32` to an interface type `s8`.
187 ///
188 /// This will truncate the upper bits of the `i32`.
189 S8FromI32 : [1] => [1],
190 /// Converts a native wasm `i32` to an interface type `u8`.
191 ///
192 /// This will truncate the upper bits of the `i32`.
193 U8FromI32 : [1] => [1],
194 /// Converts a native wasm `i32` to an interface type `s16`.
195 ///
196 /// This will truncate the upper bits of the `i32`.
197 S16FromI32 : [1] => [1],
198 /// Converts a native wasm `i32` to an interface type `u16`.
199 ///
200 /// This will truncate the upper bits of the `i32`.
201 U16FromI32 : [1] => [1],
202 /// Converts a native wasm `i32` to an interface type `s32`.
203 S32FromI32 : [1] => [1],
204 /// Converts a native wasm `i32` to an interface type `u32`.
205 U32FromI32 : [1] => [1],
206 /// Converts a native wasm `i64` to an interface type `s64`.
207 S64FromI64 : [1] => [1],
208 /// Converts a native wasm `i64` to an interface type `u64`.
209 U64FromI64 : [1] => [1],
210 /// Converts a native wasm `i32` to an interface type `char`.
211 ///
212 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
213 CharFromI32 : [1] => [1],
214 /// Converts a native wasm `f32` to an interface type `f32`.
215 F32FromCoreF32 : [1] => [1],
216 /// Converts a native wasm `f64` to an interface type `f64`.
217 F64FromCoreF64 : [1] => [1],
218
219 /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
220 /// zero or one.
221 BoolFromI32 : [1] => [1],
222 /// Creates an `i32` from a `bool` input, must return 0 or 1.
223 I32FromBool : [1] => [1],
224
225 // lists
226
227 /// Lowers a list where the element's layout in the native language is
228 /// expected to match the canonical ABI definition of interface types.
229 ///
230 /// Pops a list value from the stack and pushes the pointer/length onto
231 /// the stack. If `realloc` is set to `Some` then this is expected to
232 /// *consume* the list which means that the data needs to be copied. An
233 /// allocation/copy is expected when:
234 ///
235 /// * A host is calling a wasm export with a list (it needs to copy the
236 /// list in to the callee's module, allocating space with `realloc`)
237 /// * A wasm export is returning a list (it's expected to use `realloc`
238 /// to give ownership of the list to the caller.
239 /// * A host is returning a list in a import definition, meaning that
240 /// space needs to be allocated in the caller with `realloc`).
241 ///
242 /// A copy does not happen (e.g. `realloc` is `None`) when:
243 ///
244 /// * A wasm module calls an import with the list. In this situation
245 /// it's expected the caller will know how to access this module's
246 /// memory (e.g. the host has raw access or wasm-to-wasm communication
247 /// would copy the list).
248 ///
249 /// If `realloc` is `Some` then the adapter is not responsible for
250 /// cleaning up this list because the other end is receiving the
251 /// allocation. If `realloc` is `None` then the adapter is responsible
252 /// for cleaning up any temporary allocation it created, if any.
253 ListCanonLower {
254 element: &'a Type,
255 realloc: Option<&'a str>,
256 } : [1] => [2],
257
258 /// Same as `ListCanonLower`, but used for strings
259 StringLower {
260 realloc: Option<&'a str>,
261 } : [1] => [2],
262
263 /// Lowers a list where the element's layout in the native language is
264 /// not expected to match the canonical ABI definition of interface
265 /// types.
266 ///
267 /// Pops a list value from the stack and pushes the pointer/length onto
268 /// the stack. This operation also pops a block from the block stack
269 /// which is used as the iteration body of writing each element of the
270 /// list consumed.
271 ///
272 /// The `realloc` field here behaves the same way as `ListCanonLower`.
273 /// It's only set to `None` when a wasm module calls a declared import.
274 /// Otherwise lowering in other contexts requires allocating memory for
275 /// the receiver to own.
276 ListLower {
277 element: &'a Type,
278 realloc: Option<&'a str>,
279 } : [1] => [2],
280
281 /// Lifts a list which has a canonical representation into an interface
282 /// types value.
283 ///
284 /// The term "canonical" representation here means that the
285 /// representation of the interface types value in the native language
286 /// exactly matches the canonical ABI definition of the type.
287 ///
288 /// This will consume two `i32` values from the stack, a pointer and a
289 /// length, and then produces an interface value list.
290 ListCanonLift {
291 element: &'a Type,
292 ty: TypeId,
293 } : [2] => [1],
294
295 /// Same as `ListCanonLift`, but used for strings
296 StringLift : [2] => [1],
297
298 /// Lifts a list which into an interface types value.
299 ///
300 /// This will consume two `i32` values from the stack, a pointer and a
301 /// length, and then produces an interface value list.
302 ///
303 /// This will also pop a block from the block stack which is how to
304 /// read each individual element from the list.
305 ListLift {
306 element: &'a Type,
307 ty: TypeId,
308 } : [2] => [1],
309
310 /// Pushes an operand onto the stack representing the list item from
311 /// each iteration of the list.
312 ///
313 /// This is only used inside of blocks related to lowering lists.
314 IterElem { element: &'a Type } : [0] => [1],
315
316 /// Pushes an operand onto the stack representing the base pointer of
317 /// the next element in a list.
318 ///
319 /// This is used for both lifting and lowering lists.
320 IterBasePointer : [0] => [1],
321
322 // records and tuples
323
324 /// Pops a record value off the stack, decomposes the record to all of
325 /// its fields, and then pushes the fields onto the stack.
326 RecordLower {
327 record: &'a Record,
328 name: &'a str,
329 ty: TypeId,
330 } : [1] => [record.fields.len()],
331
332 /// Pops all fields for a record off the stack and then composes them
333 /// into a record.
334 RecordLift {
335 record: &'a Record,
336 name: &'a str,
337 ty: TypeId,
338 } : [record.fields.len()] => [1],
339
340 /// Create an `i32` from a handle.
341 HandleLower {
342 handle: &'a Handle,
343 name: &'a str,
344 ty: TypeId,
345 } : [1] => [1],
346
347 /// Create a handle from an `i32`.
348 HandleLift {
349 handle: &'a Handle,
350 name: &'a str,
351 ty: TypeId,
352 } : [1] => [1],
353
354 /// Create an `i32` from a future.
355 FutureLower {
356 payload: &'a Option<Type>,
357 ty: TypeId,
358 } : [1] => [1],
359
360 /// Create a future from an `i32`.
361 FutureLift {
362 payload: &'a Option<Type>,
363 ty: TypeId,
364 } : [1] => [1],
365
366 /// Create an `i32` from a stream.
367 StreamLower {
368 payload: &'a Option<Type>,
369 ty: TypeId,
370 } : [1] => [1],
371
372 /// Create a stream from an `i32`.
373 StreamLift {
374 payload: &'a Option<Type>,
375 ty: TypeId,
376 } : [1] => [1],
377
378 /// Create an `i32` from an error-context.
379 ErrorContextLower : [1] => [1],
380
381 /// Create a error-context from an `i32`.
382 ErrorContextLift : [1] => [1],
383
384 /// Pops a tuple value off the stack, decomposes the tuple to all of
385 /// its fields, and then pushes the fields onto the stack.
386 TupleLower {
387 tuple: &'a Tuple,
388 ty: TypeId,
389 } : [1] => [tuple.types.len()],
390
391 /// Pops all fields for a tuple off the stack and then composes them
392 /// into a tuple.
393 TupleLift {
394 tuple: &'a Tuple,
395 ty: TypeId,
396 } : [tuple.types.len()] => [1],
397
398 /// Converts a language-specific record-of-bools to a list of `i32`.
399 FlagsLower {
400 flags: &'a Flags,
401 name: &'a str,
402 ty: TypeId,
403 } : [1] => [flags.repr().count()],
404 /// Converts a list of native wasm `i32` to a language-specific
405 /// record-of-bools.
406 FlagsLift {
407 flags: &'a Flags,
408 name: &'a str,
409 ty: TypeId,
410 } : [flags.repr().count()] => [1],
411
412 // variants
413
414 /// This is a special instruction used for `VariantLower`
415 /// instruction to determine the name of the payload, if present, to use
416 /// within each block.
417 ///
418 /// Each sub-block will have this be the first instruction, and if it
419 /// lowers a payload it will expect something bound to this name.
420 VariantPayloadName : [0] => [1],
421
422 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
423 /// from the code generator. Uses each of those blocks and the value
424 /// from the stack to produce `nresults` of items.
425 VariantLower {
426 variant: &'a Variant,
427 name: &'a str,
428 ty: TypeId,
429 results: &'a [WasmType],
430 } : [1] => [results.len()],
431
432 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
433 /// from the code generator. Uses each of those blocks and the value
434 /// from the stack to produce a final variant.
435 VariantLift {
436 variant: &'a Variant,
437 name: &'a str,
438 ty: TypeId,
439 } : [1] => [1],
440
441 /// Pops an enum off the stack and pushes the `i32` representation.
442 EnumLower {
443 enum_: &'a Enum,
444 name: &'a str,
445 ty: TypeId,
446 } : [1] => [1],
447
448 /// Pops an `i32` off the stack and lifts it into the `enum` specified.
449 EnumLift {
450 enum_: &'a Enum,
451 name: &'a str,
452 ty: TypeId,
453 } : [1] => [1],
454
455 /// Specialization of `VariantLower` for specifically `option<T>` types,
456 /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
457 /// the two cases.
458 OptionLower {
459 payload: &'a Type,
460 ty: TypeId,
461 results: &'a [WasmType],
462 } : [1] => [results.len()],
463
464 /// Specialization of `VariantLift` for specifically the `option<T>`
465 /// type. Otherwise behaves the same as the `VariantLift` instruction
466 /// with two blocks for the lift.
467 OptionLift {
468 payload: &'a Type,
469 ty: TypeId,
470 } : [1] => [1],
471
472 /// Specialization of `VariantLower` for specifically `result<T, E>`
473 /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
474 /// for the two cases.
475 ResultLower {
476 result: &'a Result_
477 ty: TypeId,
478 results: &'a [WasmType],
479 } : [1] => [results.len()],
480
481 /// Specialization of `VariantLift` for specifically the `result<T,
482 /// E>` type. Otherwise behaves the same as the `VariantLift`
483 /// instruction with two blocks for the lift.
484 ResultLift {
485 result: &'a Result_,
486 ty: TypeId,
487 } : [1] => [1],
488
489 // calling/control flow
490
491 /// Represents a call to a raw WebAssembly API. The module/name are
492 /// provided inline as well as the types if necessary.
493 CallWasm {
494 name: &'a str,
495 sig: &'a WasmSignature,
496 } : [sig.params.len()] => [sig.results.len()],
497
498 /// Same as `CallWasm`, except the dual where an interface is being
499 /// called rather than a raw wasm function.
500 ///
501 /// Note that this will be used for async functions, and `async_`
502 /// indicates whether the function should be invoked in an async
503 /// fashion.
504 CallInterface {
505 func: &'a Function,
506 async_: bool,
507 } : [func.params.len()] => [usize::from(func.result.is_some())],
508
509 /// Returns `amt` values on the stack. This is always the last
510 /// instruction.
511 Return { amt: usize, func: &'a Function } : [*amt] => [0],
512
513 /// Calls the `realloc` function specified in a malloc-like fashion
514 /// allocating `size` bytes with alignment `align`.
515 ///
516 /// Pushes the returned pointer onto the stack.
517 Malloc {
518 realloc: &'static str,
519 size: ArchitectureSize,
520 align: Alignment,
521 } : [0] => [1],
522
523 /// Used exclusively for guest-code generation this indicates that
524 /// the standard memory deallocation function needs to be invoked with
525 /// the specified parameters.
526 ///
527 /// This will pop a pointer from the stack and push nothing.
528 GuestDeallocate {
529 size: ArchitectureSize,
530 align: Alignment,
531 } : [1] => [0],
532
533 /// Used exclusively for guest-code generation this indicates that
534 /// a string is being deallocated. The ptr/length are on the stack and
535 /// are poppped off and used to deallocate the string.
536 GuestDeallocateString : [2] => [0],
537
538 /// Used exclusively for guest-code generation this indicates that
539 /// a list is being deallocated. The ptr/length are on the stack and
540 /// are poppped off and used to deallocate the list.
541 ///
542 /// This variant also pops a block off the block stack to be used as the
543 /// body of the deallocation loop.
544 GuestDeallocateList {
545 element: &'a Type,
546 } : [2] => [0],
547
548 /// Used exclusively for guest-code generation this indicates that
549 /// a variant is being deallocated. The integer discriminant is popped
550 /// off the stack as well as `blocks` number of blocks popped from the
551 /// blocks stack. The variant is used to select, at runtime, which of
552 /// the blocks is executed to deallocate the variant.
553 GuestDeallocateVariant {
554 blocks: usize,
555 } : [1] => [0],
556
557 /// Deallocates the language-specific handle representation on the top
558 /// of the stack. Used for async imports.
559 DropHandle { ty: &'a Type } : [1] => [0],
560
561 /// Call `task.return` for an async-lifted export.
562 ///
563 /// This will call core wasm import `name` which will be mapped to
564 /// `task.return` later on. The function given has `params` as its
565 /// parameters and it will return no results. This is used to pass the
566 /// lowered representation of a function's results to `task.return`.
567 AsyncTaskReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
568
569 /// Force the evaluation of the specified number of expressions and push
570 /// the results to the stack.
571 ///
572 /// This is useful prior to disposing of temporary variables and/or
573 /// allocations which are referenced by one or more not-yet-evaluated
574 /// expressions.
575 Flush { amt: usize } : [*amt] => [*amt],
576 }
577}
578
579#[derive(Debug, PartialEq)]
580pub enum Bitcast {
581 // Upcasts
582 F32ToI32,
583 F64ToI64,
584 I32ToI64,
585 F32ToI64,
586
587 // Downcasts
588 I32ToF32,
589 I64ToF64,
590 I64ToI32,
591 I64ToF32,
592
593 // PointerOrI64 conversions. These preserve provenance when the source
594 // or destination is a pointer value.
595 //
596 // These are used when pointer values are being stored in
597 // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
598 // always have to preserve provenance when the value being loaded or
599 // stored is a pointer.
600 P64ToI64,
601 I64ToP64,
602 P64ToP,
603 PToP64,
604
605 // Pointer<->number conversions. These do not preserve provenance.
606 //
607 // These are used when integer or floating-point values are being stored in
608 // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
609 // never have any provenance to preserve.
610 I32ToP,
611 PToI32,
612 PToL,
613 LToP,
614
615 // Number<->Number conversions.
616 I32ToL,
617 LToI32,
618 I64ToL,
619 LToI64,
620
621 // Multiple conversions in sequence.
622 Sequence(Box<[Bitcast; 2]>),
623
624 None,
625}
626
627/// Whether the glue code surrounding a call is lifting arguments and lowering
628/// results or vice versa.
629#[derive(Clone, Copy, PartialEq, Eq)]
630pub enum LiftLower {
631 /// When the glue code lifts arguments and lowers results.
632 ///
633 /// ```text
634 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
635 /// ```
636 LiftArgsLowerResults,
637 /// When the glue code lowers arguments and lifts results.
638 ///
639 /// ```text
640 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
641 /// ```
642 LowerArgsLiftResults,
643}
644
645/// Trait for language implementors to use to generate glue code between native
646/// WebAssembly signatures and interface types signatures.
647///
648/// This is used as an implementation detail in interpreting the ABI between
649/// interface types and wasm types. Eventually this will be driven by interface
650/// types adapters themselves, but for now the ABI of a function dictates what
651/// instructions are fed in.
652///
653/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
654/// generate code for. Instructions operate like a stack machine where each
655/// instruction has a list of inputs and a list of outputs (provided by the
656/// `emit` function).
657pub trait Bindgen {
658 /// The intermediate type for fragments of code for this type.
659 ///
660 /// For most languages `String` is a suitable intermediate type.
661 type Operand: Clone + fmt::Debug;
662
663 /// Emit code to implement the given instruction.
664 ///
665 /// Each operand is given in `operands` and can be popped off if ownership
666 /// is required. It's guaranteed that `operands` has the appropriate length
667 /// for the `inst` given, as specified with [`Instruction`].
668 ///
669 /// Each result variable should be pushed onto `results`. This function must
670 /// push the appropriate number of results or binding generation will panic.
671 fn emit(
672 &mut self,
673 resolve: &Resolve,
674 inst: &Instruction<'_>,
675 operands: &mut Vec<Self::Operand>,
676 results: &mut Vec<Self::Operand>,
677 );
678
679 /// Gets a operand reference to the return pointer area.
680 ///
681 /// The provided size and alignment is for the function's return type.
682 fn return_pointer(&mut self, size: ArchitectureSize, align: Alignment) -> Self::Operand;
683
684 /// Enters a new block of code to generate code for.
685 ///
686 /// This is currently exclusively used for constructing variants. When a
687 /// variant is constructed a block here will be pushed for each case of a
688 /// variant, generating the code necessary to translate a variant case.
689 ///
690 /// Blocks are completed with `finish_block` below. It's expected that `emit`
691 /// will always push code (if necessary) into the "current block", which is
692 /// updated by calling this method and `finish_block` below.
693 fn push_block(&mut self);
694
695 /// Indicates to the code generator that a block is completed, and the
696 /// `operand` specified was the resulting value of the block.
697 ///
698 /// This method will be used to compute the value of each arm of lifting a
699 /// variant. The `operand` will be `None` if the variant case didn't
700 /// actually have any type associated with it. Otherwise it will be `Some`
701 /// as the last value remaining on the stack representing the value
702 /// associated with a variant's `case`.
703 ///
704 /// It's expected that this will resume code generation in the previous
705 /// block before `push_block` was called. This must also save the results
706 /// of the current block internally for instructions like `ResultLift` to
707 /// use later.
708 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
709
710 /// Returns size information that was previously calculated for all types.
711 fn sizes(&self) -> &SizeAlign;
712
713 /// Returns whether or not the specified element type is represented in a
714 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
715 /// and `ListCanonLift` instructions are used or not.
716 fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
717}
718
719/// Generates an abstract sequence of instructions which represents this
720/// function being adapted as an imported function.
721///
722/// The instructions here, when executed, will emulate a language with
723/// interface types calling the concrete wasm implementation. The parameters
724/// for the returned instruction sequence are the language's own
725/// interface-types parameters. One instruction in the instruction stream
726/// will be a `Call` which represents calling the actual raw wasm function
727/// signature.
728///
729/// This function is useful, for example, if you're building a language
730/// generator for WASI bindings. This will document how to translate
731/// language-specific values into the wasm types to call a WASI function,
732/// and it will also automatically convert the results of the WASI function
733/// back to a language-specific value.
734pub fn call(
735 resolve: &Resolve,
736 variant: AbiVariant,
737 lift_lower: LiftLower,
738 func: &Function,
739 bindgen: &mut impl Bindgen,
740 async_: bool,
741) {
742 Generator::new(resolve, bindgen).call(func, variant, lift_lower, async_);
743}
744
745pub fn lower_to_memory<B: Bindgen>(
746 resolve: &Resolve,
747 bindgen: &mut B,
748 address: B::Operand,
749 value: B::Operand,
750 ty: &Type,
751) {
752 let mut generator = Generator::new(resolve, bindgen);
753 // TODO: make this configurable? Right this this function is only called for
754 // future/stream callbacks so it's appropriate to skip realloc here as it's
755 // all "lower for wasm import", but this might get reused for something else
756 // in the future.
757 generator.realloc = Some(Realloc::Export("cabi_realloc"));
758 generator.stack.push(value);
759 generator.write_to_memory(ty, address, Default::default());
760}
761
762pub fn lift_from_memory<B: Bindgen>(
763 resolve: &Resolve,
764 bindgen: &mut B,
765 address: B::Operand,
766 ty: &Type,
767) -> B::Operand {
768 let mut generator = Generator::new(resolve, bindgen);
769 generator.read_from_memory(ty, address, Default::default());
770 generator.stack.pop().unwrap()
771}
772
773/// Used in a similar manner as the `Interface::call` function except is
774/// used to generate the `post-return` callback for `func`.
775///
776/// This is only intended to be used in guest generators for exported
777/// functions and will primarily generate `GuestDeallocate*` instructions,
778/// plus others used as input to those instructions.
779pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen) {
780 Generator::new(resolve, bindgen).post_return(func);
781}
782
783/// Returns whether the `Function` specified needs a post-return function to
784/// be generated in guest code.
785///
786/// This is used when the return value contains a memory allocation such as
787/// a list or a string primarily.
788pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
789 func.result
790 .map(|t| needs_deallocate(resolve, &t, Deallocate::Lists))
791 .unwrap_or(false)
792}
793
794fn needs_deallocate(resolve: &Resolve, ty: &Type, what: Deallocate) -> bool {
795 match ty {
796 Type::String => true,
797 Type::ErrorContext => true,
798 Type::Id(id) => match &resolve.types[*id].kind {
799 TypeDefKind::List(_) => true,
800 TypeDefKind::Type(t) => needs_deallocate(resolve, t, what),
801 TypeDefKind::Handle(Handle::Own(_)) => what.handles(),
802 TypeDefKind::Handle(Handle::Borrow(_)) => false,
803 TypeDefKind::Resource => false,
804 TypeDefKind::Record(r) => r
805 .fields
806 .iter()
807 .any(|f| needs_deallocate(resolve, &f.ty, what)),
808 TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_deallocate(resolve, t, what)),
809 TypeDefKind::Variant(t) => t
810 .cases
811 .iter()
812 .filter_map(|t| t.ty.as_ref())
813 .any(|t| needs_deallocate(resolve, t, what)),
814 TypeDefKind::Option(t) => needs_deallocate(resolve, t, what),
815 TypeDefKind::Result(t) => [&t.ok, &t.err]
816 .iter()
817 .filter_map(|t| t.as_ref())
818 .any(|t| needs_deallocate(resolve, t, what)),
819 TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
820 TypeDefKind::Future(_) | TypeDefKind::Stream(_) => what.handles(),
821 TypeDefKind::Unknown => unreachable!(),
822 TypeDefKind::FixedSizeList(..) => todo!(),
823 },
824
825 Type::Bool
826 | Type::U8
827 | Type::S8
828 | Type::U16
829 | Type::S16
830 | Type::U32
831 | Type::S32
832 | Type::U64
833 | Type::S64
834 | Type::F32
835 | Type::F64
836 | Type::Char => false,
837 }
838}
839
840/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
841/// that's a pointer to a sequence of `types` stored in linear memory.
842pub fn deallocate_lists_in_types<B: Bindgen>(
843 resolve: &Resolve,
844 types: &[Type],
845 ptr: B::Operand,
846 bindgen: &mut B,
847) {
848 Generator::new(resolve, bindgen).deallocate_in_types(types, ptr, Deallocate::Lists);
849}
850
851/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
852/// that's a pointer to a sequence of `types` stored in linear memory.
853pub fn deallocate_lists_and_own_in_types<B: Bindgen>(
854 resolve: &Resolve,
855 types: &[Type],
856 ptr: B::Operand,
857 bindgen: &mut B,
858) {
859 Generator::new(resolve, bindgen).deallocate_in_types(types, ptr, Deallocate::ListsAndOwn);
860}
861
862#[derive(Copy, Clone)]
863pub enum Realloc {
864 None,
865 Export(&'static str),
866}
867
868/// What to deallocate in various `deallocate_*` methods.
869#[derive(Copy, Clone)]
870enum Deallocate {
871 /// Only deallocate lists.
872 Lists,
873 /// Deallocate lists and owned resources such as `own<T>` and
874 /// futures/streams.
875 ListsAndOwn,
876}
877
878impl Deallocate {
879 fn handles(&self) -> bool {
880 match self {
881 Deallocate::Lists => false,
882 Deallocate::ListsAndOwn => true,
883 }
884 }
885}
886
887struct Generator<'a, B: Bindgen> {
888 bindgen: &'a mut B,
889 resolve: &'a Resolve,
890 operands: Vec<B::Operand>,
891 results: Vec<B::Operand>,
892 stack: Vec<B::Operand>,
893 return_pointer: Option<B::Operand>,
894 realloc: Option<Realloc>,
895}
896
897const MAX_FLAT_PARAMS: usize = 16;
898
899impl<'a, B: Bindgen> Generator<'a, B> {
900 fn new(resolve: &'a Resolve, bindgen: &'a mut B) -> Generator<'a, B> {
901 Generator {
902 resolve,
903 bindgen,
904 operands: Vec::new(),
905 results: Vec::new(),
906 stack: Vec::new(),
907 return_pointer: None,
908 realloc: None,
909 }
910 }
911
912 fn call(&mut self, func: &Function, variant: AbiVariant, lift_lower: LiftLower, async_: bool) {
913 let sig = self.resolve.wasm_signature(variant, func);
914
915 // Lowering parameters calling a wasm import _or_ returning a result
916 // from an async-lifted wasm export means we don't need to pass
917 // ownership, but we pass ownership in all other cases.
918 let realloc = match (variant, lift_lower, async_) {
919 (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
920 | (AbiVariant::GuestExport, LiftLower::LiftArgsLowerResults, true) => Realloc::None,
921 _ => Realloc::Export("cabi_realloc"),
922 };
923 assert!(self.realloc.is_none());
924
925 match lift_lower {
926 LiftLower::LowerArgsLiftResults => {
927 assert!(!async_, "generators should not be using this for async");
928
929 self.realloc = Some(realloc);
930 if let (AbiVariant::GuestExport, true) = (variant, async_) {
931 unimplemented!("host-side code generation for async lift/lower not supported");
932 }
933
934 let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
935 let mut offset = ArchitectureSize::default();
936 for (nth, (_, ty)) in func.params.iter().enumerate() {
937 self_.emit(&Instruction::GetArg { nth });
938 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
939 self_.write_to_memory(ty, ptr.clone(), offset);
940 offset += self_.bindgen.sizes().size(ty);
941 }
942
943 self_.stack.push(ptr);
944 };
945
946 if !sig.indirect_params {
947 // If the parameters for this function aren't indirect
948 // (there aren't too many) then we simply do a normal lower
949 // operation for them all.
950 for (nth, (_, ty)) in func.params.iter().enumerate() {
951 self.emit(&Instruction::GetArg { nth });
952 self.lower(ty);
953 }
954 } else {
955 // ... otherwise if parameters are indirect space is
956 // allocated for them and each argument is lowered
957 // individually into memory.
958 let ElementInfo { size, align } = self
959 .bindgen
960 .sizes()
961 .record(func.params.iter().map(|t| &t.1));
962 let ptr = match variant {
963 // When a wasm module calls an import it will provide
964 // space that isn't explicitly deallocated.
965 AbiVariant::GuestImport => self.bindgen.return_pointer(size, align),
966 // When calling a wasm module from the outside, though,
967 // malloc needs to be called.
968 AbiVariant::GuestExport => {
969 self.emit(&Instruction::Malloc {
970 realloc: "cabi_realloc",
971 size,
972 align,
973 });
974 self.stack.pop().unwrap()
975 }
976 AbiVariant::GuestImportAsync
977 | AbiVariant::GuestExportAsync
978 | AbiVariant::GuestExportAsyncStackful => {
979 unreachable!()
980 }
981 };
982 lower_to_memory(self, ptr);
983 }
984 self.realloc = None;
985
986 // If necessary we may need to prepare a return pointer for
987 // this ABI.
988 if variant == AbiVariant::GuestImport && sig.retptr {
989 let info = self.bindgen.sizes().params(&func.result);
990 let ptr = self.bindgen.return_pointer(info.size, info.align);
991 self.return_pointer = Some(ptr.clone());
992 self.stack.push(ptr);
993 }
994
995 assert_eq!(self.stack.len(), sig.params.len());
996 self.emit(&Instruction::CallWasm {
997 name: &func.name,
998 sig: &sig,
999 });
1000
1001 if !sig.retptr {
1002 // With no return pointer in use we can simply lift the
1003 // result(s) of the function from the result of the core
1004 // wasm function.
1005 if let Some(ty) = &func.result {
1006 self.lift(ty)
1007 }
1008 } else {
1009 let ptr = match variant {
1010 // imports into guests means it's a wasm module
1011 // calling an imported function. We supplied the
1012 // return pointer as the last argument (saved in
1013 // `self.return_pointer`) so we use that to read
1014 // the result of the function from memory.
1015 AbiVariant::GuestImport => {
1016 assert!(sig.results.is_empty());
1017 self.return_pointer.take().unwrap()
1018 }
1019
1020 // guest exports means that this is a host
1021 // calling wasm so wasm returned a pointer to where
1022 // the result is stored
1023 AbiVariant::GuestExport => self.stack.pop().unwrap(),
1024
1025 AbiVariant::GuestImportAsync
1026 | AbiVariant::GuestExportAsync
1027 | AbiVariant::GuestExportAsyncStackful => {
1028 unreachable!()
1029 }
1030 };
1031
1032 self.read_results_from_memory(
1033 &func.result,
1034 ptr.clone(),
1035 ArchitectureSize::default(),
1036 );
1037 self.emit(&Instruction::Flush {
1038 amt: usize::from(func.result.is_some()),
1039 });
1040 }
1041
1042 self.emit(&Instruction::Return {
1043 func,
1044 amt: usize::from(func.result.is_some()),
1045 });
1046 }
1047 LiftLower::LiftArgsLowerResults => {
1048 if let (AbiVariant::GuestImport, true) = (variant, async_) {
1049 todo!("implement host-side support for async lift/lower");
1050 }
1051
1052 let read_from_memory = |self_: &mut Self| {
1053 let mut offset = ArchitectureSize::default();
1054 let ptr = self_.stack.pop().unwrap();
1055 for (_, ty) in func.params.iter() {
1056 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1057 self_.read_from_memory(ty, ptr.clone(), offset);
1058 offset += self_.bindgen.sizes().size(ty);
1059 }
1060 };
1061
1062 if !sig.indirect_params {
1063 // If parameters are not passed indirectly then we lift each
1064 // argument in succession from the component wasm types that
1065 // make-up the type.
1066 let mut offset = 0;
1067 for (_, ty) in func.params.iter() {
1068 let types = flat_types(self.resolve, ty).unwrap();
1069 for _ in 0..types.len() {
1070 self.emit(&Instruction::GetArg { nth: offset });
1071 offset += 1;
1072 }
1073 self.lift(ty);
1074 }
1075 } else {
1076 // ... otherwise argument is read in succession from memory
1077 // where the pointer to the arguments is the first argument
1078 // to the function.
1079 self.emit(&Instruction::GetArg { nth: 0 });
1080 read_from_memory(self);
1081 }
1082
1083 // ... and that allows us to call the interface types function
1084 self.emit(&Instruction::CallInterface { func, async_ });
1085
1086 // Asynchronous functions will call `task.return` after the
1087 // interface function completes, so lowering is conditional
1088 // based on slightly different logic for the `task.return`
1089 // intrinsic.
1090 let (lower_to_memory, async_flat_results) = if async_ {
1091 let results = match &func.result {
1092 Some(ty) => flat_types(self.resolve, ty),
1093 None => Some(Vec::new()),
1094 };
1095 (results.is_none(), Some(results))
1096 } else {
1097 (sig.retptr, None)
1098 };
1099
1100 // This was dynamically allocated by the caller (or async start
1101 // function) so after it's been read by the guest we need to
1102 // deallocate it.
1103 if let AbiVariant::GuestExport = variant {
1104 if sig.indirect_params && !async_ {
1105 let ElementInfo { size, align } = self
1106 .bindgen
1107 .sizes()
1108 .record(func.params.iter().map(|t| &t.1));
1109 self.emit(&Instruction::GetArg { nth: 0 });
1110 self.emit(&Instruction::GuestDeallocate { size, align });
1111 }
1112 }
1113
1114 self.realloc = Some(realloc);
1115
1116 if !lower_to_memory {
1117 // With no return pointer in use we simply lower the
1118 // result(s) and return that directly from the function.
1119 if let Some(ty) = &func.result {
1120 self.lower(ty);
1121 }
1122 } else {
1123 match variant {
1124 // When a function is imported to a guest this means
1125 // it's a host providing the implementation of the
1126 // import. The result is stored in the pointer
1127 // specified in the last argument, so we get the
1128 // pointer here and then write the return value into
1129 // it.
1130 AbiVariant::GuestImport => {
1131 self.emit(&Instruction::GetArg {
1132 nth: sig.params.len() - 1,
1133 });
1134 let ptr = self.stack.pop().unwrap();
1135 self.write_params_to_memory(&func.result, ptr, Default::default());
1136 }
1137
1138 // For a guest import this is a function defined in
1139 // wasm, so we're returning a pointer where the
1140 // value was stored at. Allocate some space here
1141 // (statically) and then write the result into that
1142 // memory, returning the pointer at the end.
1143 AbiVariant::GuestExport | AbiVariant::GuestExportAsync => {
1144 let ElementInfo { size, align } =
1145 self.bindgen.sizes().params(&func.result);
1146 let ptr = self.bindgen.return_pointer(size, align);
1147 self.write_params_to_memory(
1148 &func.result,
1149 ptr.clone(),
1150 Default::default(),
1151 );
1152 self.stack.push(ptr);
1153 }
1154
1155 AbiVariant::GuestImportAsync | AbiVariant::GuestExportAsyncStackful => {
1156 unreachable!()
1157 }
1158 }
1159 }
1160
1161 if let Some(results) = async_flat_results {
1162 let name = &format!("[task-return]{}", func.name);
1163 let params = results.as_deref().unwrap_or(&[WasmType::Pointer]);
1164
1165 self.emit(&Instruction::AsyncTaskReturn { name, params });
1166 } else {
1167 self.emit(&Instruction::Return {
1168 func,
1169 amt: sig.results.len(),
1170 });
1171 }
1172 self.realloc = None;
1173 }
1174 }
1175
1176 assert!(self.realloc.is_none());
1177
1178 assert!(
1179 self.stack.is_empty(),
1180 "stack has {} items remaining: {:?}",
1181 self.stack.len(),
1182 self.stack,
1183 );
1184 }
1185
1186 fn post_return(&mut self, func: &Function) {
1187 let sig = self.resolve.wasm_signature(AbiVariant::GuestExport, func);
1188
1189 // Currently post-return is only used for lists and lists are always
1190 // returned indirectly through memory due to their flat representation
1191 // having more than one type. Assert that a return pointer is used,
1192 // though, in case this ever changes.
1193 assert!(sig.retptr);
1194
1195 self.emit(&Instruction::GetArg { nth: 0 });
1196 let addr = self.stack.pop().unwrap();
1197
1198 let mut types = Vec::new();
1199 types.extend(func.result);
1200 self.deallocate_in_types(&types, addr, Deallocate::Lists);
1201
1202 self.emit(&Instruction::Return { func, amt: 0 });
1203 }
1204
1205 fn deallocate_in_types(&mut self, types: &[Type], addr: B::Operand, what: Deallocate) {
1206 for (offset, ty) in self.bindgen.sizes().field_offsets(types) {
1207 self.deallocate(ty, addr.clone(), offset, what);
1208 }
1209
1210 assert!(
1211 self.stack.is_empty(),
1212 "stack has {} items remaining",
1213 self.stack.len()
1214 );
1215 }
1216
1217 fn emit(&mut self, inst: &Instruction<'_>) {
1218 self.operands.clear();
1219 self.results.clear();
1220
1221 let operands_len = inst.operands_len();
1222 assert!(
1223 self.stack.len() >= operands_len,
1224 "not enough operands on stack for {:?}",
1225 inst
1226 );
1227 self.operands
1228 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1229 self.results.reserve(inst.results_len());
1230
1231 self.bindgen
1232 .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1233
1234 assert_eq!(
1235 self.results.len(),
1236 inst.results_len(),
1237 "{:?} expected {} results, got {}",
1238 inst,
1239 inst.results_len(),
1240 self.results.len()
1241 );
1242 self.stack.append(&mut self.results);
1243 }
1244
1245 fn push_block(&mut self) {
1246 self.bindgen.push_block();
1247 }
1248
1249 fn finish_block(&mut self, size: usize) {
1250 self.operands.clear();
1251 assert!(
1252 size <= self.stack.len(),
1253 "not enough operands on stack for finishing block",
1254 );
1255 self.operands
1256 .extend(self.stack.drain((self.stack.len() - size)..));
1257 self.bindgen.finish_block(&mut self.operands);
1258 }
1259
1260 fn lower(&mut self, ty: &Type) {
1261 use Instruction::*;
1262
1263 match *ty {
1264 Type::Bool => self.emit(&I32FromBool),
1265 Type::S8 => self.emit(&I32FromS8),
1266 Type::U8 => self.emit(&I32FromU8),
1267 Type::S16 => self.emit(&I32FromS16),
1268 Type::U16 => self.emit(&I32FromU16),
1269 Type::S32 => self.emit(&I32FromS32),
1270 Type::U32 => self.emit(&I32FromU32),
1271 Type::S64 => self.emit(&I64FromS64),
1272 Type::U64 => self.emit(&I64FromU64),
1273 Type::Char => self.emit(&I32FromChar),
1274 Type::F32 => self.emit(&CoreF32FromF32),
1275 Type::F64 => self.emit(&CoreF64FromF64),
1276 Type::String => {
1277 let realloc = self.list_realloc();
1278 self.emit(&StringLower { realloc });
1279 }
1280 Type::ErrorContext => self.emit(&ErrorContextLower),
1281 Type::Id(id) => match &self.resolve.types[id].kind {
1282 TypeDefKind::Type(t) => self.lower(t),
1283 TypeDefKind::List(element) => {
1284 let realloc = self.list_realloc();
1285 if self.bindgen.is_list_canonical(self.resolve, element) {
1286 self.emit(&ListCanonLower { element, realloc });
1287 } else {
1288 self.push_block();
1289 self.emit(&IterElem { element });
1290 self.emit(&IterBasePointer);
1291 let addr = self.stack.pop().unwrap();
1292 self.write_to_memory(element, addr, Default::default());
1293 self.finish_block(0);
1294 self.emit(&ListLower { element, realloc });
1295 }
1296 }
1297 TypeDefKind::Handle(handle) => {
1298 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1299 self.emit(&HandleLower {
1300 handle,
1301 ty: id,
1302 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1303 });
1304 }
1305 TypeDefKind::Resource => {
1306 todo!();
1307 }
1308 TypeDefKind::Record(record) => {
1309 self.emit(&RecordLower {
1310 record,
1311 ty: id,
1312 name: self.resolve.types[id].name.as_deref().unwrap(),
1313 });
1314 let values = self
1315 .stack
1316 .drain(self.stack.len() - record.fields.len()..)
1317 .collect::<Vec<_>>();
1318 for (field, value) in record.fields.iter().zip(values) {
1319 self.stack.push(value);
1320 self.lower(&field.ty);
1321 }
1322 }
1323 TypeDefKind::Tuple(tuple) => {
1324 self.emit(&TupleLower { tuple, ty: id });
1325 let values = self
1326 .stack
1327 .drain(self.stack.len() - tuple.types.len()..)
1328 .collect::<Vec<_>>();
1329 for (ty, value) in tuple.types.iter().zip(values) {
1330 self.stack.push(value);
1331 self.lower(ty);
1332 }
1333 }
1334
1335 TypeDefKind::Flags(flags) => {
1336 self.emit(&FlagsLower {
1337 flags,
1338 ty: id,
1339 name: self.resolve.types[id].name.as_ref().unwrap(),
1340 });
1341 }
1342
1343 TypeDefKind::Variant(v) => {
1344 let results =
1345 self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1346 self.emit(&VariantLower {
1347 variant: v,
1348 ty: id,
1349 results: &results,
1350 name: self.resolve.types[id].name.as_deref().unwrap(),
1351 });
1352 }
1353 TypeDefKind::Enum(enum_) => {
1354 self.emit(&EnumLower {
1355 enum_,
1356 ty: id,
1357 name: self.resolve.types[id].name.as_deref().unwrap(),
1358 });
1359 }
1360 TypeDefKind::Option(t) => {
1361 let results = self.lower_variant_arms(ty, [None, Some(t)]);
1362 self.emit(&OptionLower {
1363 payload: t,
1364 ty: id,
1365 results: &results,
1366 });
1367 }
1368 TypeDefKind::Result(r) => {
1369 let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1370 self.emit(&ResultLower {
1371 result: r,
1372 ty: id,
1373 results: &results,
1374 });
1375 }
1376 TypeDefKind::Future(ty) => {
1377 self.emit(&FutureLower {
1378 payload: ty,
1379 ty: id,
1380 });
1381 }
1382 TypeDefKind::Stream(ty) => {
1383 self.emit(&StreamLower {
1384 payload: ty,
1385 ty: id,
1386 });
1387 }
1388 TypeDefKind::Unknown => unreachable!(),
1389 TypeDefKind::FixedSizeList(..) => todo!(),
1390 },
1391 }
1392 }
1393
1394 fn lower_variant_arms<'b>(
1395 &mut self,
1396 ty: &Type,
1397 cases: impl IntoIterator<Item = Option<&'b Type>>,
1398 ) -> Vec<WasmType> {
1399 use Instruction::*;
1400 let results = flat_types(self.resolve, ty).unwrap();
1401 let mut casts = Vec::new();
1402 for (i, ty) in cases.into_iter().enumerate() {
1403 self.push_block();
1404 self.emit(&VariantPayloadName);
1405 let payload_name = self.stack.pop().unwrap();
1406 self.emit(&I32Const { val: i as i32 });
1407 let mut pushed = 1;
1408 if let Some(ty) = ty {
1409 // Using the payload of this block we lower the type to
1410 // raw wasm values.
1411 self.stack.push(payload_name);
1412 self.lower(ty);
1413
1414 // Determine the types of all the wasm values we just
1415 // pushed, and record how many. If we pushed too few
1416 // then we'll need to push some zeros after this.
1417 let temp = flat_types(self.resolve, ty).unwrap();
1418 pushed += temp.len();
1419
1420 // For all the types pushed we may need to insert some
1421 // bitcasts. This will go through and cast everything
1422 // to the right type to ensure all blocks produce the
1423 // same set of results.
1424 casts.truncate(0);
1425 for (actual, expected) in temp.iter().zip(&results[1..]) {
1426 casts.push(cast(*actual, *expected));
1427 }
1428 if casts.iter().any(|c| *c != Bitcast::None) {
1429 self.emit(&Bitcasts { casts: &casts });
1430 }
1431 }
1432
1433 // If we haven't pushed enough items in this block to match
1434 // what other variants are pushing then we need to push
1435 // some zeros.
1436 if pushed < results.len() {
1437 self.emit(&ConstZero {
1438 tys: &results[pushed..],
1439 });
1440 }
1441 self.finish_block(results.len());
1442 }
1443 results
1444 }
1445
1446 fn list_realloc(&self) -> Option<&'static str> {
1447 match self.realloc.expect("realloc should be configured") {
1448 Realloc::None => None,
1449 Realloc::Export(s) => Some(s),
1450 }
1451 }
1452
1453 /// Note that in general everything in this function is the opposite of the
1454 /// `lower` function above. This is intentional and should be kept this way!
1455 fn lift(&mut self, ty: &Type) {
1456 use Instruction::*;
1457
1458 match *ty {
1459 Type::Bool => self.emit(&BoolFromI32),
1460 Type::S8 => self.emit(&S8FromI32),
1461 Type::U8 => self.emit(&U8FromI32),
1462 Type::S16 => self.emit(&S16FromI32),
1463 Type::U16 => self.emit(&U16FromI32),
1464 Type::S32 => self.emit(&S32FromI32),
1465 Type::U32 => self.emit(&U32FromI32),
1466 Type::S64 => self.emit(&S64FromI64),
1467 Type::U64 => self.emit(&U64FromI64),
1468 Type::Char => self.emit(&CharFromI32),
1469 Type::F32 => self.emit(&F32FromCoreF32),
1470 Type::F64 => self.emit(&F64FromCoreF64),
1471 Type::String => self.emit(&StringLift),
1472 Type::ErrorContext => self.emit(&ErrorContextLift),
1473 Type::Id(id) => match &self.resolve.types[id].kind {
1474 TypeDefKind::Type(t) => self.lift(t),
1475 TypeDefKind::List(element) => {
1476 if self.bindgen.is_list_canonical(self.resolve, element) {
1477 self.emit(&ListCanonLift { element, ty: id });
1478 } else {
1479 self.push_block();
1480 self.emit(&IterBasePointer);
1481 let addr = self.stack.pop().unwrap();
1482 self.read_from_memory(element, addr, Default::default());
1483 self.finish_block(1);
1484 self.emit(&ListLift { element, ty: id });
1485 }
1486 }
1487 TypeDefKind::Handle(handle) => {
1488 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1489 self.emit(&HandleLift {
1490 handle,
1491 ty: id,
1492 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1493 });
1494 }
1495 TypeDefKind::Resource => {
1496 todo!();
1497 }
1498 TypeDefKind::Record(record) => {
1499 let temp = flat_types(self.resolve, ty).unwrap();
1500 let mut args = self
1501 .stack
1502 .drain(self.stack.len() - temp.len()..)
1503 .collect::<Vec<_>>();
1504 for field in record.fields.iter() {
1505 let temp = flat_types(self.resolve, &field.ty).unwrap();
1506 self.stack.extend(args.drain(..temp.len()));
1507 self.lift(&field.ty);
1508 }
1509 self.emit(&RecordLift {
1510 record,
1511 ty: id,
1512 name: self.resolve.types[id].name.as_deref().unwrap(),
1513 });
1514 }
1515 TypeDefKind::Tuple(tuple) => {
1516 let temp = flat_types(self.resolve, ty).unwrap();
1517 let mut args = self
1518 .stack
1519 .drain(self.stack.len() - temp.len()..)
1520 .collect::<Vec<_>>();
1521 for ty in tuple.types.iter() {
1522 let temp = flat_types(self.resolve, ty).unwrap();
1523 self.stack.extend(args.drain(..temp.len()));
1524 self.lift(ty);
1525 }
1526 self.emit(&TupleLift { tuple, ty: id });
1527 }
1528 TypeDefKind::Flags(flags) => {
1529 self.emit(&FlagsLift {
1530 flags,
1531 ty: id,
1532 name: self.resolve.types[id].name.as_ref().unwrap(),
1533 });
1534 }
1535
1536 TypeDefKind::Variant(v) => {
1537 self.lift_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1538 self.emit(&VariantLift {
1539 variant: v,
1540 ty: id,
1541 name: self.resolve.types[id].name.as_deref().unwrap(),
1542 });
1543 }
1544
1545 TypeDefKind::Enum(enum_) => {
1546 self.emit(&EnumLift {
1547 enum_,
1548 ty: id,
1549 name: self.resolve.types[id].name.as_deref().unwrap(),
1550 });
1551 }
1552
1553 TypeDefKind::Option(t) => {
1554 self.lift_variant_arms(ty, [None, Some(t)]);
1555 self.emit(&OptionLift { payload: t, ty: id });
1556 }
1557
1558 TypeDefKind::Result(r) => {
1559 self.lift_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1560 self.emit(&ResultLift { result: r, ty: id });
1561 }
1562
1563 TypeDefKind::Future(ty) => {
1564 self.emit(&FutureLift {
1565 payload: ty,
1566 ty: id,
1567 });
1568 }
1569 TypeDefKind::Stream(ty) => {
1570 self.emit(&StreamLift {
1571 payload: ty,
1572 ty: id,
1573 });
1574 }
1575 TypeDefKind::Unknown => unreachable!(),
1576 TypeDefKind::FixedSizeList(..) => todo!(),
1577 },
1578 }
1579 }
1580
1581 fn lift_variant_arms<'b>(
1582 &mut self,
1583 ty: &Type,
1584 cases: impl IntoIterator<Item = Option<&'b Type>>,
1585 ) {
1586 let params = flat_types(self.resolve, ty).unwrap();
1587 let mut casts = Vec::new();
1588 let block_inputs = self
1589 .stack
1590 .drain(self.stack.len() + 1 - params.len()..)
1591 .collect::<Vec<_>>();
1592 for ty in cases {
1593 self.push_block();
1594 if let Some(ty) = ty {
1595 // Push only the values we need for this variant onto
1596 // the stack.
1597 let temp = flat_types(self.resolve, ty).unwrap();
1598 self.stack
1599 .extend(block_inputs[..temp.len()].iter().cloned());
1600
1601 // Cast all the types we have on the stack to the actual
1602 // types needed for this variant, if necessary.
1603 casts.truncate(0);
1604 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
1605 casts.push(cast(*expected, *actual));
1606 }
1607 if casts.iter().any(|c| *c != Bitcast::None) {
1608 self.emit(&Instruction::Bitcasts { casts: &casts });
1609 }
1610
1611 // Then recursively lift this variant's payload.
1612 self.lift(ty);
1613 }
1614 self.finish_block(ty.is_some() as usize);
1615 }
1616 }
1617
1618 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1619 use Instruction::*;
1620
1621 match *ty {
1622 // Builtin types need different flavors of storage instructions
1623 // depending on the size of the value written.
1624 Type::Bool | Type::U8 | Type::S8 => {
1625 self.lower_and_emit(ty, addr, &I32Store8 { offset })
1626 }
1627 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1628 Type::U32 | Type::S32 | Type::Char => {
1629 self.lower_and_emit(ty, addr, &I32Store { offset })
1630 }
1631 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1632 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1633 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1634 Type::String => self.write_list_to_memory(ty, addr, offset),
1635 Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1636
1637 Type::Id(id) => match &self.resolve.types[id].kind {
1638 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1639 TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1640
1641 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1642 self.lower_and_emit(ty, addr, &I32Store { offset })
1643 }
1644
1645 // Decompose the record into its components and then write all
1646 // the components into memory one-by-one.
1647 TypeDefKind::Record(record) => {
1648 self.emit(&RecordLower {
1649 record,
1650 ty: id,
1651 name: self.resolve.types[id].name.as_deref().unwrap(),
1652 });
1653 self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1654 }
1655 TypeDefKind::Resource => {
1656 todo!()
1657 }
1658 TypeDefKind::Tuple(tuple) => {
1659 self.emit(&TupleLower { tuple, ty: id });
1660 self.write_fields_to_memory(tuple.types.iter(), addr, offset);
1661 }
1662
1663 TypeDefKind::Flags(f) => {
1664 self.lower(ty);
1665 match f.repr() {
1666 FlagsRepr::U8 => {
1667 self.stack.push(addr);
1668 self.store_intrepr(offset, Int::U8);
1669 }
1670 FlagsRepr::U16 => {
1671 self.stack.push(addr);
1672 self.store_intrepr(offset, Int::U16);
1673 }
1674 FlagsRepr::U32(n) => {
1675 for i in (0..n).rev() {
1676 self.stack.push(addr.clone());
1677 self.emit(&I32Store {
1678 offset: offset.add_bytes(i * 4),
1679 });
1680 }
1681 }
1682 }
1683 }
1684
1685 // Each case will get its own block, and the first item in each
1686 // case is writing the discriminant. After that if we have a
1687 // payload we write the payload after the discriminant, aligned up
1688 // to the type's alignment.
1689 TypeDefKind::Variant(v) => {
1690 self.write_variant_arms_to_memory(
1691 offset,
1692 addr,
1693 v.tag(),
1694 v.cases.iter().map(|c| c.ty.as_ref()),
1695 );
1696 self.emit(&VariantLower {
1697 variant: v,
1698 ty: id,
1699 results: &[],
1700 name: self.resolve.types[id].name.as_deref().unwrap(),
1701 });
1702 }
1703
1704 TypeDefKind::Option(t) => {
1705 self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
1706 self.emit(&OptionLower {
1707 payload: t,
1708 ty: id,
1709 results: &[],
1710 });
1711 }
1712
1713 TypeDefKind::Result(r) => {
1714 self.write_variant_arms_to_memory(
1715 offset,
1716 addr,
1717 Int::U8,
1718 [r.ok.as_ref(), r.err.as_ref()],
1719 );
1720 self.emit(&ResultLower {
1721 result: r,
1722 ty: id,
1723 results: &[],
1724 });
1725 }
1726
1727 TypeDefKind::Enum(e) => {
1728 self.lower(ty);
1729 self.stack.push(addr);
1730 self.store_intrepr(offset, e.tag());
1731 }
1732
1733 TypeDefKind::Unknown => unreachable!(),
1734 TypeDefKind::FixedSizeList(..) => todo!(),
1735 },
1736 }
1737 }
1738
1739 fn write_params_to_memory<'b>(
1740 &mut self,
1741 params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1742 addr: B::Operand,
1743 offset: ArchitectureSize,
1744 ) {
1745 self.write_fields_to_memory(params, addr, offset);
1746 }
1747
1748 fn write_variant_arms_to_memory<'b>(
1749 &mut self,
1750 offset: ArchitectureSize,
1751 addr: B::Operand,
1752 tag: Int,
1753 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1754 ) {
1755 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
1756 for (i, ty) in cases.into_iter().enumerate() {
1757 self.push_block();
1758 self.emit(&Instruction::VariantPayloadName);
1759 let payload_name = self.stack.pop().unwrap();
1760 self.emit(&Instruction::I32Const { val: i as i32 });
1761 self.stack.push(addr.clone());
1762 self.store_intrepr(offset, tag);
1763 if let Some(ty) = ty {
1764 self.stack.push(payload_name.clone());
1765 self.write_to_memory(ty, addr.clone(), payload_offset);
1766 }
1767 self.finish_block(0);
1768 }
1769 }
1770
1771 fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1772 // After lowering the list there's two i32 values on the stack
1773 // which we write into memory, writing the pointer into the low address
1774 // and the length into the high address.
1775 self.lower(ty);
1776 self.stack.push(addr.clone());
1777 self.emit(&Instruction::LengthStore {
1778 offset: offset + self.bindgen.sizes().align(ty).into(),
1779 });
1780 self.stack.push(addr);
1781 self.emit(&Instruction::PointerStore { offset });
1782 }
1783
1784 fn write_fields_to_memory<'b>(
1785 &mut self,
1786 tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1787 addr: B::Operand,
1788 offset: ArchitectureSize,
1789 ) {
1790 let tys = tys.into_iter();
1791 let fields = self
1792 .stack
1793 .drain(self.stack.len() - tys.len()..)
1794 .collect::<Vec<_>>();
1795 for ((field_offset, ty), op) in self
1796 .bindgen
1797 .sizes()
1798 .field_offsets(tys)
1799 .into_iter()
1800 .zip(fields)
1801 {
1802 self.stack.push(op);
1803 self.write_to_memory(ty, addr.clone(), offset + (field_offset));
1804 }
1805 }
1806
1807 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
1808 self.lower(ty);
1809 self.stack.push(addr);
1810 self.emit(instr);
1811 }
1812
1813 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1814 use Instruction::*;
1815
1816 match *ty {
1817 Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1818 Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1819 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
1820 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
1821 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
1822 Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
1823 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
1824 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
1825 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
1826 Type::String => self.read_list_from_memory(ty, addr, offset),
1827 Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
1828
1829 Type::Id(id) => match &self.resolve.types[id].kind {
1830 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
1831
1832 TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
1833
1834 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1835 self.emit_and_lift(ty, addr, &I32Load { offset })
1836 }
1837
1838 TypeDefKind::Resource => {
1839 todo!();
1840 }
1841
1842 // Read and lift each field individually, adjusting the offset
1843 // as we go along, then aggregate all the fields into the
1844 // record.
1845 TypeDefKind::Record(record) => {
1846 self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1847 self.emit(&RecordLift {
1848 record,
1849 ty: id,
1850 name: self.resolve.types[id].name.as_deref().unwrap(),
1851 });
1852 }
1853
1854 TypeDefKind::Tuple(tuple) => {
1855 self.read_fields_from_memory(&tuple.types, addr, offset);
1856 self.emit(&TupleLift { tuple, ty: id });
1857 }
1858
1859 TypeDefKind::Flags(f) => {
1860 match f.repr() {
1861 FlagsRepr::U8 => {
1862 self.stack.push(addr);
1863 self.load_intrepr(offset, Int::U8);
1864 }
1865 FlagsRepr::U16 => {
1866 self.stack.push(addr);
1867 self.load_intrepr(offset, Int::U16);
1868 }
1869 FlagsRepr::U32(n) => {
1870 for i in 0..n {
1871 self.stack.push(addr.clone());
1872 self.emit(&I32Load {
1873 offset: offset.add_bytes(i * 4),
1874 });
1875 }
1876 }
1877 }
1878 self.lift(ty);
1879 }
1880
1881 // Each case will get its own block, and we'll dispatch to the
1882 // right block based on the `i32.load` we initially perform. Each
1883 // individual block is pretty simple and just reads the payload type
1884 // from the corresponding offset if one is available.
1885 TypeDefKind::Variant(variant) => {
1886 self.read_variant_arms_from_memory(
1887 offset,
1888 addr,
1889 variant.tag(),
1890 variant.cases.iter().map(|c| c.ty.as_ref()),
1891 );
1892 self.emit(&VariantLift {
1893 variant,
1894 ty: id,
1895 name: self.resolve.types[id].name.as_deref().unwrap(),
1896 });
1897 }
1898
1899 TypeDefKind::Option(t) => {
1900 self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
1901 self.emit(&OptionLift { payload: t, ty: id });
1902 }
1903
1904 TypeDefKind::Result(r) => {
1905 self.read_variant_arms_from_memory(
1906 offset,
1907 addr,
1908 Int::U8,
1909 [r.ok.as_ref(), r.err.as_ref()],
1910 );
1911 self.emit(&ResultLift { result: r, ty: id });
1912 }
1913
1914 TypeDefKind::Enum(e) => {
1915 self.stack.push(addr.clone());
1916 self.load_intrepr(offset, e.tag());
1917 self.lift(ty);
1918 }
1919
1920 TypeDefKind::Unknown => unreachable!(),
1921 TypeDefKind::FixedSizeList(..) => todo!(),
1922 },
1923 }
1924 }
1925
1926 fn read_results_from_memory(
1927 &mut self,
1928 result: &Option<Type>,
1929 addr: B::Operand,
1930 offset: ArchitectureSize,
1931 ) {
1932 self.read_fields_from_memory(result, addr, offset)
1933 }
1934
1935 fn read_variant_arms_from_memory<'b>(
1936 &mut self,
1937 offset: ArchitectureSize,
1938 addr: B::Operand,
1939 tag: Int,
1940 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1941 ) {
1942 self.stack.push(addr.clone());
1943 self.load_intrepr(offset, tag);
1944 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
1945 for ty in cases {
1946 self.push_block();
1947 if let Some(ty) = ty {
1948 self.read_from_memory(ty, addr.clone(), payload_offset);
1949 }
1950 self.finish_block(ty.is_some() as usize);
1951 }
1952 }
1953
1954 fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1955 // Read the pointer/len and then perform the standard lifting
1956 // proceses.
1957 self.stack.push(addr.clone());
1958 self.emit(&Instruction::PointerLoad { offset });
1959 self.stack.push(addr);
1960 self.emit(&Instruction::LengthLoad {
1961 offset: offset + self.bindgen.sizes().align(ty).into(),
1962 });
1963 self.lift(ty);
1964 }
1965
1966 fn read_fields_from_memory<'b>(
1967 &mut self,
1968 tys: impl IntoIterator<Item = &'b Type>,
1969 addr: B::Operand,
1970 offset: ArchitectureSize,
1971 ) {
1972 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
1973 self.read_from_memory(ty, addr.clone(), offset + (*field_offset));
1974 }
1975 }
1976
1977 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
1978 self.stack.push(addr);
1979 self.emit(instr);
1980 self.lift(ty);
1981 }
1982
1983 fn load_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
1984 self.emit(&match repr {
1985 Int::U64 => Instruction::I64Load { offset },
1986 Int::U32 => Instruction::I32Load { offset },
1987 Int::U16 => Instruction::I32Load16U { offset },
1988 Int::U8 => Instruction::I32Load8U { offset },
1989 });
1990 }
1991
1992 fn store_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
1993 self.emit(&match repr {
1994 Int::U64 => Instruction::I64Store { offset },
1995 Int::U32 => Instruction::I32Store { offset },
1996 Int::U16 => Instruction::I32Store16 { offset },
1997 Int::U8 => Instruction::I32Store8 { offset },
1998 });
1999 }
2000
2001 fn deallocate(
2002 &mut self,
2003 ty: &Type,
2004 addr: B::Operand,
2005 offset: ArchitectureSize,
2006 what: Deallocate,
2007 ) {
2008 use Instruction::*;
2009
2010 // No need to execute any instructions if this type itself doesn't
2011 // require any form of post-return.
2012 if !needs_deallocate(self.resolve, ty, what) {
2013 return;
2014 }
2015
2016 match *ty {
2017 Type::String => {
2018 self.stack.push(addr.clone());
2019 self.emit(&Instruction::PointerLoad { offset });
2020 self.stack.push(addr);
2021 self.emit(&Instruction::LengthLoad {
2022 offset: offset + self.bindgen.sizes().align(ty).into(),
2023 });
2024 self.emit(&Instruction::GuestDeallocateString);
2025 }
2026
2027 Type::Bool
2028 | Type::U8
2029 | Type::S8
2030 | Type::U16
2031 | Type::S16
2032 | Type::U32
2033 | Type::S32
2034 | Type::Char
2035 | Type::U64
2036 | Type::S64
2037 | Type::F32
2038 | Type::F64
2039 | Type::ErrorContext => {}
2040
2041 Type::Id(id) => match &self.resolve.types[id].kind {
2042 TypeDefKind::Type(t) => self.deallocate(t, addr, offset, what),
2043
2044 TypeDefKind::List(element) => {
2045 self.stack.push(addr.clone());
2046 self.emit(&Instruction::PointerLoad { offset });
2047 self.stack.push(addr);
2048 self.emit(&Instruction::LengthLoad {
2049 offset: offset + self.bindgen.sizes().align(ty).into(),
2050 });
2051
2052 self.push_block();
2053 self.emit(&IterBasePointer);
2054 let elemaddr = self.stack.pop().unwrap();
2055 self.deallocate(element, elemaddr, Default::default(), what);
2056 self.finish_block(0);
2057
2058 self.emit(&Instruction::GuestDeallocateList { element });
2059 }
2060
2061 TypeDefKind::Handle(Handle::Own(_))
2062 | TypeDefKind::Future(_)
2063 | TypeDefKind::Stream(_)
2064 if what.handles() =>
2065 {
2066 self.read_from_memory(ty, addr, offset);
2067 self.emit(&DropHandle { ty });
2068 }
2069
2070 TypeDefKind::Handle(Handle::Own(_)) => unreachable!(),
2071 TypeDefKind::Handle(Handle::Borrow(_)) => unreachable!(),
2072 TypeDefKind::Resource => unreachable!(),
2073
2074 TypeDefKind::Record(record) => {
2075 self.deallocate_fields(
2076 &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2077 addr,
2078 offset,
2079 what,
2080 );
2081 }
2082
2083 TypeDefKind::Tuple(tuple) => {
2084 self.deallocate_fields(&tuple.types, addr, offset, what);
2085 }
2086
2087 TypeDefKind::Flags(_) => {}
2088
2089 TypeDefKind::Variant(variant) => {
2090 self.deallocate_variant(
2091 offset,
2092 addr,
2093 variant.tag(),
2094 variant.cases.iter().map(|c| c.ty.as_ref()),
2095 what,
2096 );
2097 self.emit(&GuestDeallocateVariant {
2098 blocks: variant.cases.len(),
2099 });
2100 }
2101
2102 TypeDefKind::Option(t) => {
2103 self.deallocate_variant(offset, addr, Int::U8, [None, Some(t)], what);
2104 self.emit(&GuestDeallocateVariant { blocks: 2 });
2105 }
2106
2107 TypeDefKind::Result(e) => {
2108 self.deallocate_variant(
2109 offset,
2110 addr,
2111 Int::U8,
2112 [e.ok.as_ref(), e.err.as_ref()],
2113 what,
2114 );
2115 self.emit(&GuestDeallocateVariant { blocks: 2 });
2116 }
2117
2118 TypeDefKind::Enum(_) => {}
2119
2120 TypeDefKind::Future(_) => unreachable!(),
2121 TypeDefKind::Stream(_) => unreachable!(),
2122 TypeDefKind::Unknown => unreachable!(),
2123 TypeDefKind::FixedSizeList(..) => todo!(),
2124 },
2125 }
2126 }
2127
2128 fn deallocate_variant<'b>(
2129 &mut self,
2130 offset: ArchitectureSize,
2131 addr: B::Operand,
2132 tag: Int,
2133 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2134 what: Deallocate,
2135 ) {
2136 self.stack.push(addr.clone());
2137 self.load_intrepr(offset, tag);
2138 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2139 for ty in cases {
2140 self.push_block();
2141 if let Some(ty) = ty {
2142 self.deallocate(ty, addr.clone(), payload_offset, what);
2143 }
2144 self.finish_block(0);
2145 }
2146 }
2147
2148 fn deallocate_fields(
2149 &mut self,
2150 tys: &[Type],
2151 addr: B::Operand,
2152 offset: ArchitectureSize,
2153 what: Deallocate,
2154 ) {
2155 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2156 self.deallocate(ty, addr.clone(), offset + (field_offset), what);
2157 }
2158 }
2159}
2160
2161fn cast(from: WasmType, to: WasmType) -> Bitcast {
2162 use WasmType::*;
2163
2164 match (from, to) {
2165 (I32, I32)
2166 | (I64, I64)
2167 | (F32, F32)
2168 | (F64, F64)
2169 | (Pointer, Pointer)
2170 | (PointerOrI64, PointerOrI64)
2171 | (Length, Length) => Bitcast::None,
2172
2173 (I32, I64) => Bitcast::I32ToI64,
2174 (F32, I32) => Bitcast::F32ToI32,
2175 (F64, I64) => Bitcast::F64ToI64,
2176
2177 (I64, I32) => Bitcast::I64ToI32,
2178 (I32, F32) => Bitcast::I32ToF32,
2179 (I64, F64) => Bitcast::I64ToF64,
2180
2181 (F32, I64) => Bitcast::F32ToI64,
2182 (I64, F32) => Bitcast::I64ToF32,
2183
2184 (I64, PointerOrI64) => Bitcast::I64ToP64,
2185 (Pointer, PointerOrI64) => Bitcast::PToP64,
2186 (_, PointerOrI64) => {
2187 Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2188 }
2189
2190 (PointerOrI64, I64) => Bitcast::P64ToI64,
2191 (PointerOrI64, Pointer) => Bitcast::P64ToP,
2192 (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2193
2194 (I32, Pointer) => Bitcast::I32ToP,
2195 (Pointer, I32) => Bitcast::PToI32,
2196 (I32, Length) => Bitcast::I32ToL,
2197 (Length, I32) => Bitcast::LToI32,
2198 (I64, Length) => Bitcast::I64ToL,
2199 (Length, I64) => Bitcast::LToI64,
2200 (Pointer, Length) => Bitcast::PToL,
2201 (Length, Pointer) => Bitcast::LToP,
2202
2203 (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2204 (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2205
2206 (F32, F64)
2207 | (F64, F32)
2208 | (F64, I32)
2209 | (I32, F64)
2210 | (Pointer | Length, I64 | F64)
2211 | (I64 | F64, Pointer | Length) => {
2212 unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2213 }
2214 }
2215}
2216
2217fn flat_types(resolve: &Resolve, ty: &Type) -> Option<Vec<WasmType>> {
2218 let mut storage = [WasmType::I32; MAX_FLAT_PARAMS];
2219 let mut flat = FlatTypes::new(&mut storage);
2220 if resolve.push_flat(ty, &mut flat) {
2221 Some(flat.to_vec())
2222 } else {
2223 None
2224 }
2225}