wit_bindgen_core/abi.rs
1use std::fmt;
2use std::iter;
3
4use wit_parser::Param;
5pub use wit_parser::abi::{AbiVariant, FlatTypes, WasmSignature, WasmType};
6use wit_parser::{
7 Alignment, ArchitectureSize, ElementInfo, Enum, Flags, FlagsRepr, Function, Handle, Int,
8 Record, Resolve, Result_, SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant, align_to_arch,
9};
10
11// Helper macro for defining instructions without having to have tons of
12// exhaustive `match` statements to update
13macro_rules! def_instruction {
14 (
15 $( #[$enum_attr:meta] )*
16 pub enum $name:ident<'a> {
17 $(
18 $( #[$attr:meta] )*
19 $variant:ident $( {
20 $($field:ident : $field_ty:ty $(,)* )*
21 } )?
22 :
23 [$num_popped:expr] => [$num_pushed:expr],
24 )*
25 }
26 ) => {
27 $( #[$enum_attr] )*
28 pub enum $name<'a> {
29 $(
30 $( #[$attr] )*
31 $variant $( {
32 $(
33 $field : $field_ty,
34 )*
35 } )? ,
36 )*
37 }
38
39 impl $name<'_> {
40 /// How many operands does this instruction pop from the stack?
41 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
42 pub fn operands_len(&self) -> usize {
43 match self {
44 $(
45 Self::$variant $( {
46 $(
47 $field,
48 )*
49 } )? => $num_popped,
50 )*
51 }
52 }
53
54 /// How many results does this instruction push onto the stack?
55 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
56 pub fn results_len(&self) -> usize {
57 match self {
58 $(
59 Self::$variant $( {
60 $(
61 $field,
62 )*
63 } )? => $num_pushed,
64 )*
65 }
66 }
67 }
68 };
69}
70
71def_instruction! {
72 #[derive(Debug)]
73 pub enum Instruction<'a> {
74 /// Acquires the specified parameter and places it on the stack.
75 /// Depending on the context this may refer to wasm parameters or
76 /// interface types parameters.
77 GetArg { nth: usize } : [0] => [1],
78
79 // Integer const/manipulation instructions
80
81 /// Pushes the constant `val` onto the stack.
82 I32Const { val: i32 } : [0] => [1],
83 /// Casts the top N items on the stack using the `Bitcast` enum
84 /// provided. Consumes the same number of operands that this produces.
85 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
86 /// Pushes a number of constant zeros for each wasm type on the stack.
87 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
88
89 // Memory load/store instructions
90
91 /// Pops a pointer from the stack and loads a little-endian `i32` from
92 /// it, using the specified constant offset.
93 I32Load { offset: ArchitectureSize } : [1] => [1],
94 /// Pops a pointer from the stack and loads a little-endian `i8` from
95 /// it, using the specified constant offset. The value loaded is the
96 /// zero-extended to 32-bits
97 I32Load8U { offset: ArchitectureSize } : [1] => [1],
98 /// Pops a pointer from the stack and loads a little-endian `i8` from
99 /// it, using the specified constant offset. The value loaded is the
100 /// sign-extended to 32-bits
101 I32Load8S { offset: ArchitectureSize } : [1] => [1],
102 /// Pops a pointer from the stack and loads a little-endian `i16` from
103 /// it, using the specified constant offset. The value loaded is the
104 /// zero-extended to 32-bits
105 I32Load16U { offset: ArchitectureSize } : [1] => [1],
106 /// Pops a pointer from the stack and loads a little-endian `i16` from
107 /// it, using the specified constant offset. The value loaded is the
108 /// sign-extended to 32-bits
109 I32Load16S { offset: ArchitectureSize } : [1] => [1],
110 /// Pops a pointer from the stack and loads a little-endian `i64` from
111 /// it, using the specified constant offset.
112 I64Load { offset: ArchitectureSize } : [1] => [1],
113 /// Pops a pointer from the stack and loads a little-endian `f32` from
114 /// it, using the specified constant offset.
115 F32Load { offset: ArchitectureSize } : [1] => [1],
116 /// Pops a pointer from the stack and loads a little-endian `f64` from
117 /// it, using the specified constant offset.
118 F64Load { offset: ArchitectureSize } : [1] => [1],
119
120 /// Like `I32Load` or `I64Load`, but for loading pointer values.
121 PointerLoad { offset: ArchitectureSize } : [1] => [1],
122 /// Like `I32Load` or `I64Load`, but for loading array length values.
123 LengthLoad { offset: ArchitectureSize } : [1] => [1],
124
125 /// Pops a pointer from the stack and then an `i32` value.
126 /// Stores the value in little-endian at the pointer specified plus the
127 /// constant `offset`.
128 I32Store { offset: ArchitectureSize } : [2] => [0],
129 /// Pops a pointer from the stack and then an `i32` value.
130 /// Stores the low 8 bits of the value in little-endian at the pointer
131 /// specified plus the constant `offset`.
132 I32Store8 { offset: ArchitectureSize } : [2] => [0],
133 /// Pops a pointer from the stack and then an `i32` value.
134 /// Stores the low 16 bits of the value in little-endian at the pointer
135 /// specified plus the constant `offset`.
136 I32Store16 { offset: ArchitectureSize } : [2] => [0],
137 /// Pops a pointer from the stack and then an `i64` value.
138 /// Stores the value in little-endian at the pointer specified plus the
139 /// constant `offset`.
140 I64Store { offset: ArchitectureSize } : [2] => [0],
141 /// Pops a pointer from the stack and then an `f32` value.
142 /// Stores the value in little-endian at the pointer specified plus the
143 /// constant `offset`.
144 F32Store { offset: ArchitectureSize } : [2] => [0],
145 /// Pops a pointer from the stack and then an `f64` value.
146 /// Stores the value in little-endian at the pointer specified plus the
147 /// constant `offset`.
148 F64Store { offset: ArchitectureSize } : [2] => [0],
149
150 /// Like `I32Store` or `I64Store`, but for storing pointer values.
151 PointerStore { offset: ArchitectureSize } : [2] => [0],
152 /// Like `I32Store` or `I64Store`, but for storing array length values.
153 LengthStore { offset: ArchitectureSize } : [2] => [0],
154
155 // Scalar lifting/lowering
156
157 /// Converts an interface type `char` value to a 32-bit integer
158 /// representing the unicode scalar value.
159 I32FromChar : [1] => [1],
160 /// Converts an interface type `u64` value to a wasm `i64`.
161 I64FromU64 : [1] => [1],
162 /// Converts an interface type `s64` value to a wasm `i64`.
163 I64FromS64 : [1] => [1],
164 /// Converts an interface type `u32` value to a wasm `i32`.
165 I32FromU32 : [1] => [1],
166 /// Converts an interface type `s32` value to a wasm `i32`.
167 I32FromS32 : [1] => [1],
168 /// Converts an interface type `u16` value to a wasm `i32`.
169 I32FromU16 : [1] => [1],
170 /// Converts an interface type `s16` value to a wasm `i32`.
171 I32FromS16 : [1] => [1],
172 /// Converts an interface type `u8` value to a wasm `i32`.
173 I32FromU8 : [1] => [1],
174 /// Converts an interface type `s8` value to a wasm `i32`.
175 I32FromS8 : [1] => [1],
176 /// Conversion an interface type `f32` value to a wasm `f32`.
177 ///
178 /// This may be a noop for some implementations, but it's here in case the
179 /// native language representation of `f32` is different than the wasm
180 /// representation of `f32`.
181 CoreF32FromF32 : [1] => [1],
182 /// Conversion an interface type `f64` value to a wasm `f64`.
183 ///
184 /// This may be a noop for some implementations, but it's here in case the
185 /// native language representation of `f64` is different than the wasm
186 /// representation of `f64`.
187 CoreF64FromF64 : [1] => [1],
188
189 /// Converts a native wasm `i32` to an interface type `s8`.
190 ///
191 /// This will truncate the upper bits of the `i32`.
192 S8FromI32 : [1] => [1],
193 /// Converts a native wasm `i32` to an interface type `u8`.
194 ///
195 /// This will truncate the upper bits of the `i32`.
196 U8FromI32 : [1] => [1],
197 /// Converts a native wasm `i32` to an interface type `s16`.
198 ///
199 /// This will truncate the upper bits of the `i32`.
200 S16FromI32 : [1] => [1],
201 /// Converts a native wasm `i32` to an interface type `u16`.
202 ///
203 /// This will truncate the upper bits of the `i32`.
204 U16FromI32 : [1] => [1],
205 /// Converts a native wasm `i32` to an interface type `s32`.
206 S32FromI32 : [1] => [1],
207 /// Converts a native wasm `i32` to an interface type `u32`.
208 U32FromI32 : [1] => [1],
209 /// Converts a native wasm `i64` to an interface type `s64`.
210 S64FromI64 : [1] => [1],
211 /// Converts a native wasm `i64` to an interface type `u64`.
212 U64FromI64 : [1] => [1],
213 /// Converts a native wasm `i32` to an interface type `char`.
214 ///
215 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
216 CharFromI32 : [1] => [1],
217 /// Converts a native wasm `f32` to an interface type `f32`.
218 F32FromCoreF32 : [1] => [1],
219 /// Converts a native wasm `f64` to an interface type `f64`.
220 F64FromCoreF64 : [1] => [1],
221
222 /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
223 /// zero or one.
224 BoolFromI32 : [1] => [1],
225 /// Creates an `i32` from a `bool` input, must return 0 or 1.
226 I32FromBool : [1] => [1],
227
228 // lists
229
230 /// Lowers a list where the element's layout in the native language is
231 /// expected to match the canonical ABI definition of interface types.
232 ///
233 /// Pops a list value from the stack and pushes the pointer/length onto
234 /// the stack. If `realloc` is set to `Some` then this is expected to
235 /// *consume* the list which means that the data needs to be copied. An
236 /// allocation/copy is expected when:
237 ///
238 /// * A host is calling a wasm export with a list (it needs to copy the
239 /// list in to the callee's module, allocating space with `realloc`)
240 /// * A wasm export is returning a list (it's expected to use `realloc`
241 /// to give ownership of the list to the caller.
242 /// * A host is returning a list in a import definition, meaning that
243 /// space needs to be allocated in the caller with `realloc`).
244 ///
245 /// A copy does not happen (e.g. `realloc` is `None`) when:
246 ///
247 /// * A wasm module calls an import with the list. In this situation
248 /// it's expected the caller will know how to access this module's
249 /// memory (e.g. the host has raw access or wasm-to-wasm communication
250 /// would copy the list).
251 ///
252 /// If `realloc` is `Some` then the adapter is not responsible for
253 /// cleaning up this list because the other end is receiving the
254 /// allocation. If `realloc` is `None` then the adapter is responsible
255 /// for cleaning up any temporary allocation it created, if any.
256 ListCanonLower {
257 element: &'a Type,
258 realloc: Option<&'a str>,
259 } : [1] => [2],
260
261 /// Same as `ListCanonLower`, but used for strings
262 StringLower {
263 realloc: Option<&'a str>,
264 } : [1] => [2],
265
266 /// Lowers a list where the element's layout in the native language is
267 /// not expected to match the canonical ABI definition of interface
268 /// types.
269 ///
270 /// Pops a list value from the stack and pushes the pointer/length onto
271 /// the stack. This operation also pops a block from the block stack
272 /// which is used as the iteration body of writing each element of the
273 /// list consumed.
274 ///
275 /// The `realloc` field here behaves the same way as `ListCanonLower`.
276 /// It's only set to `None` when a wasm module calls a declared import.
277 /// Otherwise lowering in other contexts requires allocating memory for
278 /// the receiver to own.
279 ListLower {
280 element: &'a Type,
281 realloc: Option<&'a str>,
282 } : [1] => [2],
283
284 /// Lifts a list which has a canonical representation into an interface
285 /// types value.
286 ///
287 /// The term "canonical" representation here means that the
288 /// representation of the interface types value in the native language
289 /// exactly matches the canonical ABI definition of the type.
290 ///
291 /// This will consume two `i32` values from the stack, a pointer and a
292 /// length, and then produces an interface value list.
293 ListCanonLift {
294 element: &'a Type,
295 ty: TypeId,
296 } : [2] => [1],
297
298 /// Same as `ListCanonLift`, but used for strings
299 StringLift : [2] => [1],
300
301 /// Lifts a list which into an interface types value.
302 ///
303 /// This will consume two `i32` values from the stack, a pointer and a
304 /// length, and then produces an interface value list.
305 ///
306 /// This will also pop a block from the block stack which is how to
307 /// read each individual element from the list.
308 ListLift {
309 element: &'a Type,
310 ty: TypeId,
311 } : [2] => [1],
312
313 /// Pops all fields for a fixed list off the stack and then composes them
314 /// into an array.
315 FixedLengthListLift {
316 element: &'a Type,
317 size: u32,
318 id: TypeId,
319 } : [*size as usize] => [1],
320
321 /// Pops an array off the stack, decomposes the elements and then pushes them onto the stack.
322 FixedLengthListLower {
323 element: &'a Type,
324 size: u32,
325 id: TypeId,
326 } : [1] => [*size as usize],
327
328 /// Pops an array and an address off the stack, passes each element to a block storing it
329 FixedLengthListLowerToMemory {
330 element: &'a Type,
331 size: u32,
332 id: TypeId,
333 } : [2] => [0],
334
335 /// Pops base address, pushes an array
336 ///
337 /// This will also pop a block from the block stack which is how to
338 /// read each individual element from the list.
339 FixedLengthListLiftFromMemory {
340 element: &'a Type,
341 size: u32,
342 id: TypeId,
343 } : [1] => [1],
344
345
346 /// Pushes an operand onto the stack representing the list item from
347 /// each iteration of the list.
348 ///
349 /// This is only used inside of blocks related to lowering lists.
350 IterElem { element: &'a Type } : [0] => [1],
351
352 /// Pushes an operand onto the stack representing the base pointer of
353 /// the next element in a list.
354 ///
355 /// This is used for both lifting and lowering lists.
356 IterBasePointer : [0] => [1],
357
358 // records and tuples
359
360 /// Pops a record value off the stack, decomposes the record to all of
361 /// its fields, and then pushes the fields onto the stack.
362 RecordLower {
363 record: &'a Record,
364 name: &'a str,
365 ty: TypeId,
366 } : [1] => [record.fields.len()],
367
368 /// Pops all fields for a record off the stack and then composes them
369 /// into a record.
370 RecordLift {
371 record: &'a Record,
372 name: &'a str,
373 ty: TypeId,
374 } : [record.fields.len()] => [1],
375
376 /// Create an `i32` from a handle.
377 HandleLower {
378 handle: &'a Handle,
379 name: &'a str,
380 ty: TypeId,
381 } : [1] => [1],
382
383 /// Create a handle from an `i32`.
384 HandleLift {
385 handle: &'a Handle,
386 name: &'a str,
387 ty: TypeId,
388 } : [1] => [1],
389
390 /// Create an `i32` from a future.
391 FutureLower {
392 payload: &'a Option<Type>,
393 ty: TypeId,
394 } : [1] => [1],
395
396 /// Create a future from an `i32`.
397 FutureLift {
398 payload: &'a Option<Type>,
399 ty: TypeId,
400 } : [1] => [1],
401
402 /// Create an `i32` from a stream.
403 StreamLower {
404 payload: &'a Option<Type>,
405 ty: TypeId,
406 } : [1] => [1],
407
408 /// Create a stream from an `i32`.
409 StreamLift {
410 payload: &'a Option<Type>,
411 ty: TypeId,
412 } : [1] => [1],
413
414 /// Create an `i32` from an error-context.
415 ErrorContextLower : [1] => [1],
416
417 /// Create a error-context from an `i32`.
418 ErrorContextLift : [1] => [1],
419
420 /// Pops a tuple value off the stack, decomposes the tuple to all of
421 /// its fields, and then pushes the fields onto the stack.
422 TupleLower {
423 tuple: &'a Tuple,
424 ty: TypeId,
425 } : [1] => [tuple.types.len()],
426
427 /// Pops all fields for a tuple off the stack and then composes them
428 /// into a tuple.
429 TupleLift {
430 tuple: &'a Tuple,
431 ty: TypeId,
432 } : [tuple.types.len()] => [1],
433
434 /// Converts a language-specific record-of-bools to a list of `i32`.
435 FlagsLower {
436 flags: &'a Flags,
437 name: &'a str,
438 ty: TypeId,
439 } : [1] => [flags.repr().count()],
440 /// Converts a list of native wasm `i32` to a language-specific
441 /// record-of-bools.
442 FlagsLift {
443 flags: &'a Flags,
444 name: &'a str,
445 ty: TypeId,
446 } : [flags.repr().count()] => [1],
447
448 // variants
449
450 /// This is a special instruction used for `VariantLower`
451 /// instruction to determine the name of the payload, if present, to use
452 /// within each block.
453 ///
454 /// Each sub-block will have this be the first instruction, and if it
455 /// lowers a payload it will expect something bound to this name.
456 VariantPayloadName : [0] => [1],
457
458 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
459 /// from the code generator. Uses each of those blocks and the value
460 /// from the stack to produce `nresults` of items.
461 VariantLower {
462 variant: &'a Variant,
463 name: &'a str,
464 ty: TypeId,
465 results: &'a [WasmType],
466 } : [1] => [results.len()],
467
468 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
469 /// from the code generator. Uses each of those blocks and the value
470 /// from the stack to produce a final variant.
471 VariantLift {
472 variant: &'a Variant,
473 name: &'a str,
474 ty: TypeId,
475 } : [1] => [1],
476
477 /// Pops an enum off the stack and pushes the `i32` representation.
478 EnumLower {
479 enum_: &'a Enum,
480 name: &'a str,
481 ty: TypeId,
482 } : [1] => [1],
483
484 /// Pops an `i32` off the stack and lifts it into the `enum` specified.
485 EnumLift {
486 enum_: &'a Enum,
487 name: &'a str,
488 ty: TypeId,
489 } : [1] => [1],
490
491 /// Specialization of `VariantLower` for specifically `option<T>` types,
492 /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
493 /// the two cases.
494 OptionLower {
495 payload: &'a Type,
496 ty: TypeId,
497 results: &'a [WasmType],
498 } : [1] => [results.len()],
499
500 /// Specialization of `VariantLift` for specifically the `option<T>`
501 /// type. Otherwise behaves the same as the `VariantLift` instruction
502 /// with two blocks for the lift.
503 OptionLift {
504 payload: &'a Type,
505 ty: TypeId,
506 } : [1] => [1],
507
508 /// Specialization of `VariantLower` for specifically `result<T, E>`
509 /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
510 /// for the two cases.
511 ResultLower {
512 result: &'a Result_
513 ty: TypeId,
514 results: &'a [WasmType],
515 } : [1] => [results.len()],
516
517 /// Specialization of `VariantLift` for specifically the `result<T,
518 /// E>` type. Otherwise behaves the same as the `VariantLift`
519 /// instruction with two blocks for the lift.
520 ResultLift {
521 result: &'a Result_,
522 ty: TypeId,
523 } : [1] => [1],
524
525 // calling/control flow
526
527 /// Represents a call to a raw WebAssembly API. The module/name are
528 /// provided inline as well as the types if necessary.
529 CallWasm {
530 name: &'a str,
531 sig: &'a WasmSignature,
532 } : [sig.params.len()] => [sig.results.len()],
533
534 /// Same as `CallWasm`, except the dual where an interface is being
535 /// called rather than a raw wasm function.
536 ///
537 /// Note that this will be used for async functions, and `async_`
538 /// indicates whether the function should be invoked in an async
539 /// fashion.
540 CallInterface {
541 func: &'a Function,
542 async_: bool,
543 } : [func.params.len()] => [usize::from(func.result.is_some())],
544
545 /// Returns `amt` values on the stack. This is always the last
546 /// instruction.
547 Return { amt: usize, func: &'a Function } : [*amt] => [0],
548
549 /// Calls the `realloc` function specified in a malloc-like fashion
550 /// allocating `size` bytes with alignment `align`.
551 ///
552 /// Pushes the returned pointer onto the stack.
553 Malloc {
554 realloc: &'static str,
555 size: ArchitectureSize,
556 align: Alignment,
557 } : [0] => [1],
558
559 /// Used exclusively for guest-code generation this indicates that
560 /// the standard memory deallocation function needs to be invoked with
561 /// the specified parameters.
562 ///
563 /// This will pop a pointer from the stack and push nothing.
564 GuestDeallocate {
565 size: ArchitectureSize,
566 align: Alignment,
567 } : [1] => [0],
568
569 /// Used exclusively for guest-code generation this indicates that
570 /// a string is being deallocated. The ptr/length are on the stack and
571 /// are poppped off and used to deallocate the string.
572 GuestDeallocateString : [2] => [0],
573
574 /// Used exclusively for guest-code generation this indicates that
575 /// a list is being deallocated. The ptr/length are on the stack and
576 /// are poppped off and used to deallocate the list.
577 ///
578 /// This variant also pops a block off the block stack to be used as the
579 /// body of the deallocation loop.
580 GuestDeallocateList {
581 element: &'a Type,
582 } : [2] => [0],
583
584 /// Used exclusively for guest-code generation this indicates that
585 /// a variant is being deallocated. The integer discriminant is popped
586 /// off the stack as well as `blocks` number of blocks popped from the
587 /// blocks stack. The variant is used to select, at runtime, which of
588 /// the blocks is executed to deallocate the variant.
589 GuestDeallocateVariant {
590 blocks: usize,
591 } : [1] => [0],
592
593 /// Deallocates the language-specific handle representation on the top
594 /// of the stack. Used for async imports.
595 DropHandle { ty: &'a Type } : [1] => [0],
596
597 /// Call `task.return` for an async-lifted export.
598 ///
599 /// This will call core wasm import `name` which will be mapped to
600 /// `task.return` later on. The function given has `params` as its
601 /// parameters and it will return no results. This is used to pass the
602 /// lowered representation of a function's results to `task.return`.
603 AsyncTaskReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
604
605 /// Force the evaluation of the specified number of expressions and push
606 /// the results to the stack.
607 ///
608 /// This is useful prior to disposing of temporary variables and/or
609 /// allocations which are referenced by one or more not-yet-evaluated
610 /// expressions.
611 Flush { amt: usize } : [*amt] => [*amt],
612 }
613}
614
615#[derive(Debug, PartialEq)]
616pub enum Bitcast {
617 // Upcasts
618 F32ToI32,
619 F64ToI64,
620 I32ToI64,
621 F32ToI64,
622
623 // Downcasts
624 I32ToF32,
625 I64ToF64,
626 I64ToI32,
627 I64ToF32,
628
629 // PointerOrI64 conversions. These preserve provenance when the source
630 // or destination is a pointer value.
631 //
632 // These are used when pointer values are being stored in
633 // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
634 // always have to preserve provenance when the value being loaded or
635 // stored is a pointer.
636 P64ToI64,
637 I64ToP64,
638 P64ToP,
639 PToP64,
640
641 // Pointer<->number conversions. These do not preserve provenance.
642 //
643 // These are used when integer or floating-point values are being stored in
644 // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
645 // never have any provenance to preserve.
646 I32ToP,
647 PToI32,
648 PToL,
649 LToP,
650
651 // Number<->Number conversions.
652 I32ToL,
653 LToI32,
654 I64ToL,
655 LToI64,
656
657 // Multiple conversions in sequence.
658 Sequence(Box<[Bitcast; 2]>),
659
660 None,
661}
662
663/// Whether the glue code surrounding a call is lifting arguments and lowering
664/// results or vice versa.
665#[derive(Clone, Copy, PartialEq, Eq)]
666pub enum LiftLower {
667 /// When the glue code lifts arguments and lowers results.
668 ///
669 /// ```text
670 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
671 /// ```
672 LiftArgsLowerResults,
673 /// When the glue code lowers arguments and lifts results.
674 ///
675 /// ```text
676 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
677 /// ```
678 LowerArgsLiftResults,
679}
680
681/// Trait for language implementors to use to generate glue code between native
682/// WebAssembly signatures and interface types signatures.
683///
684/// This is used as an implementation detail in interpreting the ABI between
685/// interface types and wasm types. Eventually this will be driven by interface
686/// types adapters themselves, but for now the ABI of a function dictates what
687/// instructions are fed in.
688///
689/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
690/// generate code for. Instructions operate like a stack machine where each
691/// instruction has a list of inputs and a list of outputs (provided by the
692/// `emit` function).
693pub trait Bindgen {
694 /// The intermediate type for fragments of code for this type.
695 ///
696 /// For most languages `String` is a suitable intermediate type.
697 type Operand: Clone + fmt::Debug;
698
699 /// Emit code to implement the given instruction.
700 ///
701 /// Each operand is given in `operands` and can be popped off if ownership
702 /// is required. It's guaranteed that `operands` has the appropriate length
703 /// for the `inst` given, as specified with [`Instruction`].
704 ///
705 /// Each result variable should be pushed onto `results`. This function must
706 /// push the appropriate number of results or binding generation will panic.
707 fn emit(
708 &mut self,
709 resolve: &Resolve,
710 inst: &Instruction<'_>,
711 operands: &mut Vec<Self::Operand>,
712 results: &mut Vec<Self::Operand>,
713 );
714
715 /// Gets a operand reference to the return pointer area.
716 ///
717 /// The provided size and alignment is for the function's return type.
718 fn return_pointer(&mut self, size: ArchitectureSize, align: Alignment) -> Self::Operand;
719
720 /// Enters a new block of code to generate code for.
721 ///
722 /// This is currently exclusively used for constructing variants. When a
723 /// variant is constructed a block here will be pushed for each case of a
724 /// variant, generating the code necessary to translate a variant case.
725 ///
726 /// Blocks are completed with `finish_block` below. It's expected that `emit`
727 /// will always push code (if necessary) into the "current block", which is
728 /// updated by calling this method and `finish_block` below.
729 fn push_block(&mut self);
730
731 /// Indicates to the code generator that a block is completed, and the
732 /// `operand` specified was the resulting value of the block.
733 ///
734 /// This method will be used to compute the value of each arm of lifting a
735 /// variant. The `operand` will be `None` if the variant case didn't
736 /// actually have any type associated with it. Otherwise it will be `Some`
737 /// as the last value remaining on the stack representing the value
738 /// associated with a variant's `case`.
739 ///
740 /// It's expected that this will resume code generation in the previous
741 /// block before `push_block` was called. This must also save the results
742 /// of the current block internally for instructions like `ResultLift` to
743 /// use later.
744 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
745
746 /// Returns size information that was previously calculated for all types.
747 fn sizes(&self) -> &SizeAlign;
748
749 /// Returns whether or not the specified element type is represented in a
750 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
751 /// and `ListCanonLift` instructions are used or not.
752 fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
753}
754
755/// Generates an abstract sequence of instructions which represents this
756/// function being adapted as an imported function.
757///
758/// The instructions here, when executed, will emulate a language with
759/// interface types calling the concrete wasm implementation. The parameters
760/// for the returned instruction sequence are the language's own
761/// interface-types parameters. One instruction in the instruction stream
762/// will be a `Call` which represents calling the actual raw wasm function
763/// signature.
764///
765/// This function is useful, for example, if you're building a language
766/// generator for WASI bindings. This will document how to translate
767/// language-specific values into the wasm types to call a WASI function,
768/// and it will also automatically convert the results of the WASI function
769/// back to a language-specific value.
770pub fn call(
771 resolve: &Resolve,
772 variant: AbiVariant,
773 lift_lower: LiftLower,
774 func: &Function,
775 bindgen: &mut impl Bindgen,
776 async_: bool,
777) {
778 Generator::new(resolve, bindgen).call(func, variant, lift_lower, async_);
779}
780
781pub fn lower_to_memory<B: Bindgen>(
782 resolve: &Resolve,
783 bindgen: &mut B,
784 address: B::Operand,
785 value: B::Operand,
786 ty: &Type,
787) {
788 let mut generator = Generator::new(resolve, bindgen);
789 // TODO: make this configurable? Right now this function is only called for
790 // future/stream callbacks so it's appropriate to skip realloc here as it's
791 // all "lower for wasm import", but this might get reused for something else
792 // in the future.
793 generator.realloc = Some(Realloc::Export("cabi_realloc"));
794 generator.stack.push(value);
795 generator.write_to_memory(ty, address, Default::default());
796}
797
798pub fn lower_flat<B: Bindgen>(
799 resolve: &Resolve,
800 bindgen: &mut B,
801 value: B::Operand,
802 ty: &Type,
803) -> Vec<B::Operand> {
804 let mut generator = Generator::new(resolve, bindgen);
805 generator.stack.push(value);
806 generator.realloc = Some(Realloc::Export("cabi_realloc"));
807 generator.lower(ty);
808 generator.stack
809}
810
811pub fn lift_from_memory<B: Bindgen>(
812 resolve: &Resolve,
813 bindgen: &mut B,
814 address: B::Operand,
815 ty: &Type,
816) -> B::Operand {
817 let mut generator = Generator::new(resolve, bindgen);
818 generator.read_from_memory(ty, address, Default::default());
819 generator.stack.pop().unwrap()
820}
821
822/// Used in a similar manner as the `Interface::call` function except is
823/// used to generate the `post-return` callback for `func`.
824///
825/// This is only intended to be used in guest generators for exported
826/// functions and will primarily generate `GuestDeallocate*` instructions,
827/// plus others used as input to those instructions.
828pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen) {
829 Generator::new(resolve, bindgen).post_return(func);
830}
831
832/// Returns whether the `Function` specified needs a post-return function to
833/// be generated in guest code.
834///
835/// This is used when the return value contains a memory allocation such as
836/// a list or a string primarily.
837pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
838 func.result
839 .map(|t| needs_deallocate(resolve, &t, Deallocate::Lists))
840 .unwrap_or(false)
841}
842
843pub fn guest_export_params_have_allocations(resolve: &Resolve, func: &Function) -> bool {
844 func.params
845 .iter()
846 .any(|param| needs_deallocate(resolve, ¶m.ty, Deallocate::Lists))
847}
848
849fn needs_deallocate(resolve: &Resolve, ty: &Type, what: Deallocate) -> bool {
850 match ty {
851 Type::String => true,
852 Type::ErrorContext => true,
853 Type::Id(id) => match &resolve.types[*id].kind {
854 TypeDefKind::List(_) => true,
855 TypeDefKind::Type(t) => needs_deallocate(resolve, t, what),
856 TypeDefKind::Handle(Handle::Own(_)) => what.handles(),
857 TypeDefKind::Handle(Handle::Borrow(_)) => false,
858 TypeDefKind::Resource => false,
859 TypeDefKind::Record(r) => r
860 .fields
861 .iter()
862 .any(|f| needs_deallocate(resolve, &f.ty, what)),
863 TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_deallocate(resolve, t, what)),
864 TypeDefKind::Variant(t) => t
865 .cases
866 .iter()
867 .filter_map(|t| t.ty.as_ref())
868 .any(|t| needs_deallocate(resolve, t, what)),
869 TypeDefKind::Option(t) => needs_deallocate(resolve, t, what),
870 TypeDefKind::Result(t) => [&t.ok, &t.err]
871 .iter()
872 .filter_map(|t| t.as_ref())
873 .any(|t| needs_deallocate(resolve, t, what)),
874 TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
875 TypeDefKind::Future(_) | TypeDefKind::Stream(_) => what.handles(),
876 TypeDefKind::Unknown => unreachable!(),
877 TypeDefKind::FixedLengthList(t, _) => needs_deallocate(resolve, t, what),
878 TypeDefKind::Map(..) => todo!(),
879 },
880
881 Type::Bool
882 | Type::U8
883 | Type::S8
884 | Type::U16
885 | Type::S16
886 | Type::U32
887 | Type::S32
888 | Type::U64
889 | Type::S64
890 | Type::F32
891 | Type::F64
892 | Type::Char => false,
893 }
894}
895
896/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
897/// that's a pointer to a sequence of `types` stored in linear memory.
898pub fn deallocate_lists_in_types<B: Bindgen>(
899 resolve: &Resolve,
900 types: &[Type],
901 operands: &[B::Operand],
902 indirect: bool,
903 bindgen: &mut B,
904) {
905 Generator::new(resolve, bindgen).deallocate_in_types(
906 types,
907 operands,
908 indirect,
909 Deallocate::Lists,
910 );
911}
912
913/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
914/// that's a pointer to a sequence of `types` stored in linear memory.
915pub fn deallocate_lists_and_own_in_types<B: Bindgen>(
916 resolve: &Resolve,
917 types: &[Type],
918 operands: &[B::Operand],
919 indirect: bool,
920 bindgen: &mut B,
921) {
922 Generator::new(resolve, bindgen).deallocate_in_types(
923 types,
924 operands,
925 indirect,
926 Deallocate::ListsAndOwn,
927 );
928}
929
930#[derive(Copy, Clone)]
931pub enum Realloc {
932 None,
933 Export(&'static str),
934}
935
936/// What to deallocate in various `deallocate_*` methods.
937#[derive(Copy, Clone)]
938enum Deallocate {
939 /// Only deallocate lists.
940 Lists,
941 /// Deallocate lists and owned resources such as `own<T>` and
942 /// futures/streams.
943 ListsAndOwn,
944}
945
946impl Deallocate {
947 fn handles(&self) -> bool {
948 match self {
949 Deallocate::Lists => false,
950 Deallocate::ListsAndOwn => true,
951 }
952 }
953}
954
955struct Generator<'a, B: Bindgen> {
956 bindgen: &'a mut B,
957 resolve: &'a Resolve,
958 operands: Vec<B::Operand>,
959 results: Vec<B::Operand>,
960 stack: Vec<B::Operand>,
961 return_pointer: Option<B::Operand>,
962 realloc: Option<Realloc>,
963}
964
965const MAX_FLAT_PARAMS: usize = 16;
966const MAX_FLAT_ASYNC_PARAMS: usize = 4;
967
968impl<'a, B: Bindgen> Generator<'a, B> {
969 fn new(resolve: &'a Resolve, bindgen: &'a mut B) -> Generator<'a, B> {
970 Generator {
971 resolve,
972 bindgen,
973 operands: Vec::new(),
974 results: Vec::new(),
975 stack: Vec::new(),
976 return_pointer: None,
977 realloc: None,
978 }
979 }
980
981 fn call(&mut self, func: &Function, variant: AbiVariant, lift_lower: LiftLower, async_: bool) {
982 let sig = self.resolve.wasm_signature(variant, func);
983
984 // Lowering parameters calling a wasm import _or_ returning a result
985 // from an async-lifted wasm export means we don't need to pass
986 // ownership, but we pass ownership in all other cases.
987 let realloc = match (variant, lift_lower, async_) {
988 (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
989 | (
990 AbiVariant::GuestExport
991 | AbiVariant::GuestExportAsync
992 | AbiVariant::GuestExportAsyncStackful,
993 LiftLower::LiftArgsLowerResults,
994 true,
995 ) => Realloc::None,
996 _ => Realloc::Export("cabi_realloc"),
997 };
998 assert!(self.realloc.is_none());
999
1000 match lift_lower {
1001 LiftLower::LowerArgsLiftResults => {
1002 self.realloc = Some(realloc);
1003
1004 // Create a function that performs individual lowering of operands
1005 let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
1006 let mut offset = ArchitectureSize::default();
1007 for (nth, Param { ty, .. }) in func.params.iter().enumerate() {
1008 self_.emit(&Instruction::GetArg { nth });
1009 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1010 self_.write_to_memory(ty, ptr.clone(), offset);
1011 offset += self_.bindgen.sizes().size(ty);
1012 }
1013
1014 self_.stack.push(ptr);
1015 };
1016
1017 // Lower parameters
1018 if sig.indirect_params {
1019 // If parameters are indirect space is
1020 // allocated for them and each argument is lowered
1021 // individually into memory.
1022 let ElementInfo { size, align } = self
1023 .bindgen
1024 .sizes()
1025 .record(func.params.iter().map(|param| ¶m.ty));
1026
1027 // Resolve the pointer to the indirectly stored parameters
1028 let ptr = match variant {
1029 // When a wasm module calls an import it will provide
1030 // space that isn't explicitly deallocated.
1031 AbiVariant::GuestImport => self.bindgen.return_pointer(size, align),
1032
1033 AbiVariant::GuestImportAsync => {
1034 todo!("direct param lowering for async guest import not implemented")
1035 }
1036
1037 // When calling a wasm module from the outside, though,
1038 // malloc needs to be called.
1039 AbiVariant::GuestExport => {
1040 self.emit(&Instruction::Malloc {
1041 realloc: "cabi_realloc",
1042 size,
1043 align,
1044 });
1045 self.stack.pop().unwrap()
1046 }
1047
1048 AbiVariant::GuestExportAsync | AbiVariant::GuestExportAsyncStackful => {
1049 todo!("direct param lowering for async not implemented")
1050 }
1051 };
1052
1053 // Lower the parameters to memory
1054 lower_to_memory(self, ptr);
1055 } else {
1056 // ... otherwise arguments are direct,
1057 // (there aren't too many) then we simply do a normal lower
1058 // operation for them all.
1059 for (nth, Param { ty, .. }) in func.params.iter().enumerate() {
1060 self.emit(&Instruction::GetArg { nth });
1061 self.lower(ty);
1062 }
1063 }
1064 self.realloc = None;
1065
1066 // If necessary we may need to prepare a return pointer for this ABI.
1067 if variant == AbiVariant::GuestImport && sig.retptr {
1068 let info = self.bindgen.sizes().params(&func.result);
1069 let ptr = self.bindgen.return_pointer(info.size, info.align);
1070 self.return_pointer = Some(ptr.clone());
1071 self.stack.push(ptr);
1072 }
1073
1074 // Call the Wasm function
1075 assert_eq!(self.stack.len(), sig.params.len());
1076 self.emit(&Instruction::CallWasm {
1077 name: &func.name,
1078 sig: &sig,
1079 });
1080
1081 // Handle the result
1082 if sig.retptr {
1083 // If there is a return pointer, we must get the pointer to where results
1084 // should be stored, and store the results there?
1085
1086 let ptr = match variant {
1087 // imports into guests means it's a wasm module
1088 // calling an imported function. We supplied the
1089 // return pointer as the last argument (saved in
1090 // `self.return_pointer`) so we use that to read
1091 // the result of the function from memory.
1092 AbiVariant::GuestImport => {
1093 assert!(sig.results.is_empty());
1094 self.return_pointer.take().unwrap()
1095 }
1096
1097 // guest exports means that this is a host
1098 // calling wasm so wasm returned a pointer to where
1099 // the result is stored
1100 AbiVariant::GuestExport => self.stack.pop().unwrap(),
1101
1102 AbiVariant::GuestImportAsync
1103 | AbiVariant::GuestExportAsync
1104 | AbiVariant::GuestExportAsyncStackful => {
1105 unreachable!()
1106 }
1107 };
1108
1109 if let (AbiVariant::GuestExport, true) = (variant, async_) {
1110 // If we're dealing with an async function, the result should not be read from memory
1111 // immediately, as it's the async call result
1112 //
1113 // We can leave the result of the call (the indication of what to do as an async call)
1114 // on the stack as a return
1115 self.stack.push(ptr);
1116 } else {
1117 // If we're not dealing with an async call, the result must be in memory at this point and can be read out
1118 self.read_results_from_memory(
1119 &func.result,
1120 ptr.clone(),
1121 ArchitectureSize::default(),
1122 );
1123 self.emit(&Instruction::Flush {
1124 amt: usize::from(func.result.is_some()),
1125 });
1126 }
1127 } else {
1128 // With no return pointer in use we can simply lift the
1129 // result(s) of the function from the result of the core
1130 // wasm function.
1131 if let Some(ty) = &func.result {
1132 self.lift(ty)
1133 }
1134 }
1135
1136 // Emit the function return
1137 if async_ {
1138 self.emit(&Instruction::AsyncTaskReturn {
1139 name: &func.name,
1140 params: if func.result.is_some() {
1141 &[WasmType::Pointer]
1142 } else {
1143 &[]
1144 },
1145 });
1146 } else {
1147 self.emit(&Instruction::Return {
1148 func,
1149 amt: usize::from(func.result.is_some()),
1150 });
1151 }
1152 }
1153
1154 LiftLower::LiftArgsLowerResults => {
1155 let max_flat_params = match (variant, async_) {
1156 (AbiVariant::GuestImportAsync, _is_async @ true) => MAX_FLAT_ASYNC_PARAMS,
1157 _ => MAX_FLAT_PARAMS,
1158 };
1159
1160 // Read parameters from memory
1161 let read_from_memory = |self_: &mut Self| {
1162 let mut offset = ArchitectureSize::default();
1163 let ptr = self_
1164 .stack
1165 .pop()
1166 .expect("empty stack during read param from memory");
1167 for Param { ty, .. } in func.params.iter() {
1168 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1169 self_.read_from_memory(ty, ptr.clone(), offset);
1170 offset += self_.bindgen.sizes().size(ty);
1171 }
1172 };
1173
1174 // Resolve parameters
1175 if sig.indirect_params {
1176 // If parameters were passed indirectly, arguments must be
1177 // read in succession from memory, with the pointer to the arguments
1178 // being the first argument to the function.
1179 self.emit(&Instruction::GetArg { nth: 0 });
1180 read_from_memory(self);
1181 } else {
1182 // ... otherwise, if parameters were passed directly then we lift each
1183 // argument in succession from the component wasm types that
1184 // make-up the type.
1185 let mut offset = 0;
1186 for Param {
1187 name: param_name,
1188 ty,
1189 ..
1190 } in func.params.iter()
1191 {
1192 let Some(types) = flat_types(self.resolve, ty, Some(max_flat_params))
1193 else {
1194 panic!(
1195 "failed to flatten types during direct parameter lifting ('{param_name}' in func '{}')",
1196 func.name
1197 );
1198 };
1199 for _ in 0..types.len() {
1200 self.emit(&Instruction::GetArg { nth: offset });
1201 offset += 1;
1202 }
1203 self.lift(ty);
1204 }
1205 }
1206
1207 // ... and that allows us to call the interface types function
1208 self.emit(&Instruction::CallInterface { func, async_ });
1209
1210 // The return value of an async function is *not* the result of the function
1211 // itself or a pointer but rather a status code.
1212 //
1213 // Asynchronous functions will call `task.return` after the
1214 // interface function completes, so lowering is conditional
1215 // based on slightly different logic for the `task.return`
1216 // intrinsic.
1217 //
1218 // Note that in the async import case teh code below deals with the CM function being lowered,
1219 // not the core function that is underneath that (i.e. func.result may be empty,
1220 // where the associated core function underneath must have a i32 status code result)
1221 let (lower_to_memory, async_flat_results) = match (async_, &func.result) {
1222 // All async cases pass along the function results and flatten where necesary
1223 (_is_async @ true, func_result) => {
1224 let results = match &func_result {
1225 Some(ty) => flat_types(self.resolve, ty, Some(max_flat_params)),
1226 None => Some(Vec::new()),
1227 };
1228 (results.is_none(), Some(results))
1229 }
1230 // All other non-async cases
1231 (_is_async @ false, _) => (sig.retptr, None),
1232 };
1233
1234 // This was dynamically allocated by the caller (or async start
1235 // function) so after it's been read by the guest we need to
1236 // deallocate it.
1237 if let AbiVariant::GuestExport
1238 | AbiVariant::GuestExportAsync
1239 | AbiVariant::GuestExportAsyncStackful = variant
1240 {
1241 if sig.indirect_params && !async_ {
1242 let ElementInfo { size, align } = self
1243 .bindgen
1244 .sizes()
1245 .record(func.params.iter().map(|param| ¶m.ty));
1246 self.emit(&Instruction::GetArg { nth: 0 });
1247 self.emit(&Instruction::GuestDeallocate { size, align });
1248 }
1249 }
1250
1251 self.realloc = Some(realloc);
1252
1253 // Perform memory lowing of relevant results, including out pointers as well as traditional results
1254 match (lower_to_memory, sig.retptr, variant) {
1255 // For sync calls, if no lowering to memory is required and there *is* a return pointer in use
1256 // then we need to lower then simply lower the result(s) and return that directly from the function.
1257 (_lower_to_memory @ false, _, _) => {
1258 if let Some(ty) = &func.result {
1259 self.lower(ty);
1260 }
1261 }
1262
1263 // Lowering to memory for a guest import
1264 //
1265 // When a function is imported to a guest this means
1266 // it's a host providing the implementation of the
1267 // import. The result is stored in the pointer
1268 // specified in the last argument, so we get the
1269 // pointer here and then write the return value into
1270 // it.
1271 (
1272 _lower_to_memory @ true,
1273 _has_ret_ptr @ true,
1274 AbiVariant::GuestImport | AbiVariant::GuestImportAsync,
1275 ) => {
1276 self.emit(&Instruction::GetArg {
1277 nth: sig.params.len() - 1,
1278 });
1279 let ptr = self
1280 .stack
1281 .pop()
1282 .expect("empty stack during result lower to memory");
1283 self.write_params_to_memory(&func.result, ptr, Default::default());
1284 }
1285
1286 // Lowering to memory for a guest export
1287 //
1288 // For a guest import this is a function defined in
1289 // wasm, so we're returning a pointer where the
1290 // value was stored at. Allocate some space here
1291 // (statically) and then write the result into that
1292 // memory, returning the pointer at the end.
1293 (_lower_to_memory @ true, _, variant) => match variant {
1294 AbiVariant::GuestExport | AbiVariant::GuestExportAsync => {
1295 let ElementInfo { size, align } =
1296 self.bindgen.sizes().params(&func.result);
1297 let ptr = self.bindgen.return_pointer(size, align);
1298 self.write_params_to_memory(
1299 &func.result,
1300 ptr.clone(),
1301 Default::default(),
1302 );
1303 self.stack.push(ptr);
1304 }
1305 AbiVariant::GuestImport | AbiVariant::GuestImportAsync => {
1306 unreachable!(
1307 "lowering to memory cannot be performed without a return pointer ({async_note} func [{func_name}], variant {variant:#?})",
1308 async_note = async_.then_some("async").unwrap_or("sync"),
1309 func_name = func.name,
1310 )
1311 }
1312 AbiVariant::GuestExportAsyncStackful => {
1313 todo!("stackful exports are not yet supported")
1314 }
1315 },
1316 }
1317
1318 // Build and emit the appropriate return
1319 match (variant, async_flat_results) {
1320 // Async guest imports always return a i32 status code
1321 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, None) if async_ => {
1322 unreachable!("async guest imports must have a return")
1323 }
1324
1325 // Async guest imports with results return the status code, not a pointer to any results
1326 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, Some(results))
1327 if async_ =>
1328 {
1329 let name = &format!("[task-return]{}", func.name);
1330 let params = results.as_deref().unwrap_or_default();
1331 self.emit(&Instruction::AsyncTaskReturn { name, params });
1332 }
1333
1334 // All async/non-async cases with results that need to be returned
1335 //
1336 // In practice, async imports should not end up here, as the returned result of an
1337 // async import is *not* a pointer but instead a status code.
1338 (_, Some(results)) => {
1339 let name = &format!("[task-return]{}", func.name);
1340 let params = results.as_deref().unwrap_or(&[WasmType::Pointer]);
1341 self.emit(&Instruction::AsyncTaskReturn { name, params });
1342 }
1343
1344 // All async/non-async cases with no results
1345 (_, None) => {
1346 if async_ {
1347 let name = &format!("[task-return]{}", func.name);
1348 self.emit(&Instruction::AsyncTaskReturn {
1349 name: name,
1350 params: if sig.results.len() > MAX_FLAT_ASYNC_PARAMS {
1351 &[WasmType::Pointer]
1352 } else {
1353 &sig.results
1354 },
1355 });
1356 } else {
1357 self.emit(&Instruction::Return {
1358 func,
1359 amt: sig.results.len(),
1360 });
1361 }
1362 }
1363 }
1364
1365 self.realloc = None;
1366 }
1367 }
1368
1369 assert!(self.realloc.is_none());
1370
1371 assert!(
1372 self.stack.is_empty(),
1373 "stack has {} items remaining: {:?}",
1374 self.stack.len(),
1375 self.stack,
1376 );
1377 }
1378
1379 fn post_return(&mut self, func: &Function) {
1380 let sig = self.resolve.wasm_signature(AbiVariant::GuestExport, func);
1381
1382 // Currently post-return is only used for lists and lists are always
1383 // returned indirectly through memory due to their flat representation
1384 // having more than one type. Assert that a return pointer is used,
1385 // though, in case this ever changes.
1386 assert!(sig.retptr);
1387
1388 self.emit(&Instruction::GetArg { nth: 0 });
1389 let addr = self.stack.pop().unwrap();
1390
1391 let mut types = Vec::new();
1392 types.extend(func.result);
1393 self.deallocate_in_types(&types, &[addr], true, Deallocate::Lists);
1394
1395 self.emit(&Instruction::Return { func, amt: 0 });
1396 }
1397
1398 fn deallocate_in_types(
1399 &mut self,
1400 types: &[Type],
1401 operands: &[B::Operand],
1402 indirect: bool,
1403 what: Deallocate,
1404 ) {
1405 if indirect {
1406 assert_eq!(operands.len(), 1);
1407 for (offset, ty) in self.bindgen.sizes().field_offsets(types) {
1408 self.deallocate_indirect(ty, operands[0].clone(), offset, what);
1409 }
1410 assert!(
1411 self.stack.is_empty(),
1412 "stack has {} items remaining",
1413 self.stack.len()
1414 );
1415 } else {
1416 let mut operands = operands;
1417 let mut operands_for_ty;
1418 for ty in types {
1419 let types = flat_types(self.resolve, ty, None).unwrap();
1420 (operands_for_ty, operands) = operands.split_at(types.len());
1421 self.stack.extend_from_slice(operands_for_ty);
1422 self.deallocate(ty, what);
1423 assert!(
1424 self.stack.is_empty(),
1425 "stack has {} items remaining",
1426 self.stack.len()
1427 );
1428 }
1429 assert!(operands.is_empty());
1430 }
1431 }
1432
1433 fn emit(&mut self, inst: &Instruction<'_>) {
1434 self.operands.clear();
1435 self.results.clear();
1436
1437 let operands_len = inst.operands_len();
1438 assert!(
1439 self.stack.len() >= operands_len,
1440 "not enough operands on stack for {:?}: have {} need {operands_len}",
1441 inst,
1442 self.stack.len(),
1443 );
1444 self.operands
1445 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1446 self.results.reserve(inst.results_len());
1447
1448 self.bindgen
1449 .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1450
1451 assert_eq!(
1452 self.results.len(),
1453 inst.results_len(),
1454 "{:?} expected {} results, got {}",
1455 inst,
1456 inst.results_len(),
1457 self.results.len()
1458 );
1459 self.stack.append(&mut self.results);
1460 }
1461
1462 fn push_block(&mut self) {
1463 self.bindgen.push_block();
1464 }
1465
1466 fn finish_block(&mut self, size: usize) {
1467 self.operands.clear();
1468 assert!(
1469 size <= self.stack.len(),
1470 "not enough operands on stack for finishing block",
1471 );
1472 self.operands
1473 .extend(self.stack.drain((self.stack.len() - size)..));
1474 self.bindgen.finish_block(&mut self.operands);
1475 }
1476
1477 fn lower(&mut self, ty: &Type) {
1478 use Instruction::*;
1479
1480 match *ty {
1481 Type::Bool => self.emit(&I32FromBool),
1482 Type::S8 => self.emit(&I32FromS8),
1483 Type::U8 => self.emit(&I32FromU8),
1484 Type::S16 => self.emit(&I32FromS16),
1485 Type::U16 => self.emit(&I32FromU16),
1486 Type::S32 => self.emit(&I32FromS32),
1487 Type::U32 => self.emit(&I32FromU32),
1488 Type::S64 => self.emit(&I64FromS64),
1489 Type::U64 => self.emit(&I64FromU64),
1490 Type::Char => self.emit(&I32FromChar),
1491 Type::F32 => self.emit(&CoreF32FromF32),
1492 Type::F64 => self.emit(&CoreF64FromF64),
1493 Type::String => {
1494 let realloc = self.list_realloc();
1495 self.emit(&StringLower { realloc });
1496 }
1497 Type::ErrorContext => self.emit(&ErrorContextLower),
1498 Type::Id(id) => match &self.resolve.types[id].kind {
1499 TypeDefKind::Type(t) => self.lower(t),
1500 TypeDefKind::List(element) => {
1501 let realloc = self.list_realloc();
1502 if self.bindgen.is_list_canonical(self.resolve, element) {
1503 self.emit(&ListCanonLower { element, realloc });
1504 } else {
1505 self.push_block();
1506 self.emit(&IterElem { element });
1507 self.emit(&IterBasePointer);
1508 let addr = self.stack.pop().unwrap();
1509 self.write_to_memory(element, addr, Default::default());
1510 self.finish_block(0);
1511 self.emit(&ListLower { element, realloc });
1512 }
1513 }
1514 TypeDefKind::Handle(handle) => {
1515 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1516 self.emit(&HandleLower {
1517 handle,
1518 ty: id,
1519 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1520 });
1521 }
1522 TypeDefKind::Resource => {
1523 todo!();
1524 }
1525 TypeDefKind::Record(record) => {
1526 self.emit(&RecordLower {
1527 record,
1528 ty: id,
1529 name: self.resolve.types[id].name.as_deref().unwrap(),
1530 });
1531 let values = self
1532 .stack
1533 .drain(self.stack.len() - record.fields.len()..)
1534 .collect::<Vec<_>>();
1535 for (field, value) in record.fields.iter().zip(values) {
1536 self.stack.push(value);
1537 self.lower(&field.ty);
1538 }
1539 }
1540 TypeDefKind::Tuple(tuple) => {
1541 self.emit(&TupleLower { tuple, ty: id });
1542 let values = self
1543 .stack
1544 .drain(self.stack.len() - tuple.types.len()..)
1545 .collect::<Vec<_>>();
1546 for (ty, value) in tuple.types.iter().zip(values) {
1547 self.stack.push(value);
1548 self.lower(ty);
1549 }
1550 }
1551
1552 TypeDefKind::Flags(flags) => {
1553 self.emit(&FlagsLower {
1554 flags,
1555 ty: id,
1556 name: self.resolve.types[id].name.as_ref().unwrap(),
1557 });
1558 }
1559
1560 TypeDefKind::Variant(v) => {
1561 let results =
1562 self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1563 self.emit(&VariantLower {
1564 variant: v,
1565 ty: id,
1566 results: &results,
1567 name: self.resolve.types[id].name.as_deref().unwrap(),
1568 });
1569 }
1570 TypeDefKind::Enum(enum_) => {
1571 self.emit(&EnumLower {
1572 enum_,
1573 ty: id,
1574 name: self.resolve.types[id].name.as_deref().unwrap(),
1575 });
1576 }
1577 TypeDefKind::Option(t) => {
1578 let results = self.lower_variant_arms(ty, [None, Some(t)]);
1579 self.emit(&OptionLower {
1580 payload: t,
1581 ty: id,
1582 results: &results,
1583 });
1584 }
1585 TypeDefKind::Result(r) => {
1586 let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1587 self.emit(&ResultLower {
1588 result: r,
1589 ty: id,
1590 results: &results,
1591 });
1592 }
1593 TypeDefKind::Future(ty) => {
1594 self.emit(&FutureLower {
1595 payload: ty,
1596 ty: id,
1597 });
1598 }
1599 TypeDefKind::Stream(ty) => {
1600 self.emit(&StreamLower {
1601 payload: ty,
1602 ty: id,
1603 });
1604 }
1605 TypeDefKind::Unknown => unreachable!(),
1606 TypeDefKind::FixedLengthList(ty, size) => {
1607 self.emit(&FixedLengthListLower {
1608 element: ty,
1609 size: *size,
1610 id,
1611 });
1612 let mut values = self
1613 .stack
1614 .drain(self.stack.len() - (*size as usize)..)
1615 .collect::<Vec<_>>();
1616 for value in values.drain(..) {
1617 self.stack.push(value);
1618 self.lower(ty);
1619 }
1620 }
1621 TypeDefKind::Map(..) => todo!(),
1622 },
1623 }
1624 }
1625
1626 fn lower_variant_arms<'b>(
1627 &mut self,
1628 ty: &Type,
1629 cases: impl IntoIterator<Item = Option<&'b Type>>,
1630 ) -> Vec<WasmType> {
1631 use Instruction::*;
1632 let results = flat_types(self.resolve, ty, None).unwrap();
1633 let mut casts = Vec::new();
1634 for (i, ty) in cases.into_iter().enumerate() {
1635 self.push_block();
1636 self.emit(&VariantPayloadName);
1637 let payload_name = self.stack.pop().unwrap();
1638 self.emit(&I32Const { val: i as i32 });
1639 let mut pushed = 1;
1640 if let Some(ty) = ty {
1641 // Using the payload of this block we lower the type to
1642 // raw wasm values.
1643 self.stack.push(payload_name);
1644 self.lower(ty);
1645
1646 // Determine the types of all the wasm values we just
1647 // pushed, and record how many. If we pushed too few
1648 // then we'll need to push some zeros after this.
1649 let temp = flat_types(self.resolve, ty, None).unwrap();
1650 pushed += temp.len();
1651
1652 // For all the types pushed we may need to insert some
1653 // bitcasts. This will go through and cast everything
1654 // to the right type to ensure all blocks produce the
1655 // same set of results.
1656 casts.truncate(0);
1657 for (actual, expected) in temp.iter().zip(&results[1..]) {
1658 casts.push(cast(*actual, *expected));
1659 }
1660 if casts.iter().any(|c| *c != Bitcast::None) {
1661 self.emit(&Bitcasts { casts: &casts });
1662 }
1663 }
1664
1665 // If we haven't pushed enough items in this block to match
1666 // what other variants are pushing then we need to push
1667 // some zeros.
1668 if pushed < results.len() {
1669 self.emit(&ConstZero {
1670 tys: &results[pushed..],
1671 });
1672 }
1673 self.finish_block(results.len());
1674 }
1675 results
1676 }
1677
1678 fn list_realloc(&self) -> Option<&'static str> {
1679 match self.realloc.expect("realloc should be configured") {
1680 Realloc::None => None,
1681 Realloc::Export(s) => Some(s),
1682 }
1683 }
1684
1685 /// Note that in general everything in this function is the opposite of the
1686 /// `lower` function above. This is intentional and should be kept this way!
1687 fn lift(&mut self, ty: &Type) {
1688 use Instruction::*;
1689
1690 match *ty {
1691 Type::Bool => self.emit(&BoolFromI32),
1692 Type::S8 => self.emit(&S8FromI32),
1693 Type::U8 => self.emit(&U8FromI32),
1694 Type::S16 => self.emit(&S16FromI32),
1695 Type::U16 => self.emit(&U16FromI32),
1696 Type::S32 => self.emit(&S32FromI32),
1697 Type::U32 => self.emit(&U32FromI32),
1698 Type::S64 => self.emit(&S64FromI64),
1699 Type::U64 => self.emit(&U64FromI64),
1700 Type::Char => self.emit(&CharFromI32),
1701 Type::F32 => self.emit(&F32FromCoreF32),
1702 Type::F64 => self.emit(&F64FromCoreF64),
1703 Type::String => self.emit(&StringLift),
1704 Type::ErrorContext => self.emit(&ErrorContextLift),
1705 Type::Id(id) => match &self.resolve.types[id].kind {
1706 TypeDefKind::Type(t) => self.lift(t),
1707 TypeDefKind::List(element) => {
1708 if self.bindgen.is_list_canonical(self.resolve, element) {
1709 self.emit(&ListCanonLift { element, ty: id });
1710 } else {
1711 self.push_block();
1712 self.emit(&IterBasePointer);
1713 let addr = self.stack.pop().unwrap();
1714 self.read_from_memory(element, addr, Default::default());
1715 self.finish_block(1);
1716 self.emit(&ListLift { element, ty: id });
1717 }
1718 }
1719 TypeDefKind::Handle(handle) => {
1720 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1721 self.emit(&HandleLift {
1722 handle,
1723 ty: id,
1724 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1725 });
1726 }
1727 TypeDefKind::Resource => {
1728 todo!();
1729 }
1730 TypeDefKind::Record(record) => {
1731 self.flat_for_each_record_type(
1732 ty,
1733 record.fields.iter().map(|f| &f.ty),
1734 Self::lift,
1735 );
1736 self.emit(&RecordLift {
1737 record,
1738 ty: id,
1739 name: self.resolve.types[id].name.as_deref().unwrap(),
1740 });
1741 }
1742 TypeDefKind::Tuple(tuple) => {
1743 self.flat_for_each_record_type(ty, tuple.types.iter(), Self::lift);
1744 self.emit(&TupleLift { tuple, ty: id });
1745 }
1746 TypeDefKind::Flags(flags) => {
1747 self.emit(&FlagsLift {
1748 flags,
1749 ty: id,
1750 name: self.resolve.types[id].name.as_ref().unwrap(),
1751 });
1752 }
1753
1754 TypeDefKind::Variant(v) => {
1755 self.flat_for_each_variant_arm(
1756 ty,
1757 true,
1758 v.cases.iter().map(|c| c.ty.as_ref()),
1759 Self::lift,
1760 );
1761 self.emit(&VariantLift {
1762 variant: v,
1763 ty: id,
1764 name: self.resolve.types[id].name.as_deref().unwrap(),
1765 });
1766 }
1767
1768 TypeDefKind::Enum(enum_) => {
1769 self.emit(&EnumLift {
1770 enum_,
1771 ty: id,
1772 name: self.resolve.types[id].name.as_deref().unwrap(),
1773 });
1774 }
1775
1776 TypeDefKind::Option(t) => {
1777 self.flat_for_each_variant_arm(ty, true, [None, Some(t)], Self::lift);
1778 self.emit(&OptionLift { payload: t, ty: id });
1779 }
1780
1781 TypeDefKind::Result(r) => {
1782 self.flat_for_each_variant_arm(
1783 ty,
1784 true,
1785 [r.ok.as_ref(), r.err.as_ref()],
1786 Self::lift,
1787 );
1788 self.emit(&ResultLift { result: r, ty: id });
1789 }
1790
1791 TypeDefKind::Future(ty) => {
1792 self.emit(&FutureLift {
1793 payload: ty,
1794 ty: id,
1795 });
1796 }
1797 TypeDefKind::Stream(ty) => {
1798 self.emit(&StreamLift {
1799 payload: ty,
1800 ty: id,
1801 });
1802 }
1803 TypeDefKind::Unknown => unreachable!(),
1804 TypeDefKind::FixedLengthList(ty, size) => {
1805 let temp = flat_types(self.resolve, ty, None).unwrap();
1806 let flat_per_elem = temp.to_vec().len();
1807 let flatsize = flat_per_elem * (*size as usize);
1808 let mut lowered_args = self
1809 .stack
1810 .drain(self.stack.len() - flatsize..)
1811 .collect::<Vec<_>>();
1812 for _ in 0..*size {
1813 self.stack.extend(lowered_args.drain(..flat_per_elem));
1814 self.lift(ty);
1815 }
1816 self.emit(&FixedLengthListLift {
1817 element: ty,
1818 size: *size,
1819 id,
1820 });
1821 }
1822 TypeDefKind::Map(..) => todo!(),
1823 },
1824 }
1825 }
1826
1827 fn flat_for_each_record_type<'b>(
1828 &mut self,
1829 container: &Type,
1830 types: impl Iterator<Item = &'b Type>,
1831 mut iter: impl FnMut(&mut Self, &Type),
1832 ) {
1833 let temp = flat_types(self.resolve, container, None).unwrap();
1834 let mut args = self
1835 .stack
1836 .drain(self.stack.len() - temp.len()..)
1837 .collect::<Vec<_>>();
1838 for ty in types {
1839 let temp = flat_types(self.resolve, ty, None).unwrap();
1840 self.stack.extend(args.drain(..temp.len()));
1841 iter(self, ty);
1842 }
1843 }
1844
1845 fn flat_for_each_variant_arm<'b>(
1846 &mut self,
1847 ty: &Type,
1848 blocks_with_type_have_result: bool,
1849 cases: impl IntoIterator<Item = Option<&'b Type>>,
1850 mut iter: impl FnMut(&mut Self, &Type),
1851 ) {
1852 let params = flat_types(self.resolve, ty, None).unwrap();
1853 let mut casts = Vec::new();
1854 let block_inputs = self
1855 .stack
1856 .drain(self.stack.len() + 1 - params.len()..)
1857 .collect::<Vec<_>>();
1858 for ty in cases {
1859 self.push_block();
1860 if let Some(ty) = ty {
1861 // Push only the values we need for this variant onto
1862 // the stack.
1863 let temp = flat_types(self.resolve, ty, None).unwrap();
1864 self.stack
1865 .extend(block_inputs[..temp.len()].iter().cloned());
1866
1867 // Cast all the types we have on the stack to the actual
1868 // types needed for this variant, if necessary.
1869 casts.truncate(0);
1870 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
1871 casts.push(cast(*expected, *actual));
1872 }
1873 if casts.iter().any(|c| *c != Bitcast::None) {
1874 self.emit(&Instruction::Bitcasts { casts: &casts });
1875 }
1876
1877 // Then recursively lift this variant's payload.
1878 iter(self, ty);
1879 }
1880 self.finish_block(if blocks_with_type_have_result {
1881 ty.is_some() as usize
1882 } else {
1883 0
1884 });
1885 }
1886 }
1887
1888 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1889 use Instruction::*;
1890
1891 match *ty {
1892 // Builtin types need different flavors of storage instructions
1893 // depending on the size of the value written.
1894 Type::Bool | Type::U8 | Type::S8 => {
1895 self.lower_and_emit(ty, addr, &I32Store8 { offset })
1896 }
1897 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1898 Type::U32 | Type::S32 | Type::Char => {
1899 self.lower_and_emit(ty, addr, &I32Store { offset })
1900 }
1901 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1902 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1903 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1904 Type::String => self.write_list_to_memory(ty, addr, offset),
1905 Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1906
1907 Type::Id(id) => match &self.resolve.types[id].kind {
1908 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1909 TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1910
1911 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1912 self.lower_and_emit(ty, addr, &I32Store { offset })
1913 }
1914
1915 // Decompose the record into its components and then write all
1916 // the components into memory one-by-one.
1917 TypeDefKind::Record(record) => {
1918 self.emit(&RecordLower {
1919 record,
1920 ty: id,
1921 name: self.resolve.types[id].name.as_deref().unwrap(),
1922 });
1923 self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1924 }
1925 TypeDefKind::Resource => {
1926 todo!()
1927 }
1928 TypeDefKind::Tuple(tuple) => {
1929 self.emit(&TupleLower { tuple, ty: id });
1930 self.write_fields_to_memory(tuple.types.iter(), addr, offset);
1931 }
1932
1933 TypeDefKind::Flags(f) => {
1934 self.lower(ty);
1935 match f.repr() {
1936 FlagsRepr::U8 => {
1937 self.stack.push(addr);
1938 self.store_intrepr(offset, Int::U8);
1939 }
1940 FlagsRepr::U16 => {
1941 self.stack.push(addr);
1942 self.store_intrepr(offset, Int::U16);
1943 }
1944 FlagsRepr::U32(n) => {
1945 for i in (0..n).rev() {
1946 self.stack.push(addr.clone());
1947 self.emit(&I32Store {
1948 offset: offset.add_bytes(i * 4),
1949 });
1950 }
1951 }
1952 }
1953 }
1954
1955 // Each case will get its own block, and the first item in each
1956 // case is writing the discriminant. After that if we have a
1957 // payload we write the payload after the discriminant, aligned up
1958 // to the type's alignment.
1959 TypeDefKind::Variant(v) => {
1960 self.write_variant_arms_to_memory(
1961 offset,
1962 addr,
1963 v.tag(),
1964 v.cases.iter().map(|c| c.ty.as_ref()),
1965 );
1966 self.emit(&VariantLower {
1967 variant: v,
1968 ty: id,
1969 results: &[],
1970 name: self.resolve.types[id].name.as_deref().unwrap(),
1971 });
1972 }
1973
1974 TypeDefKind::Option(t) => {
1975 self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
1976 self.emit(&OptionLower {
1977 payload: t,
1978 ty: id,
1979 results: &[],
1980 });
1981 }
1982
1983 TypeDefKind::Result(r) => {
1984 self.write_variant_arms_to_memory(
1985 offset,
1986 addr,
1987 Int::U8,
1988 [r.ok.as_ref(), r.err.as_ref()],
1989 );
1990 self.emit(&ResultLower {
1991 result: r,
1992 ty: id,
1993 results: &[],
1994 });
1995 }
1996
1997 TypeDefKind::Enum(e) => {
1998 self.lower(ty);
1999 self.stack.push(addr);
2000 self.store_intrepr(offset, e.tag());
2001 }
2002
2003 TypeDefKind::Unknown => unreachable!(),
2004 TypeDefKind::FixedLengthList(element, size) => {
2005 // resembles write_list_to_memory
2006 self.push_block();
2007 self.emit(&IterElem { element });
2008 self.emit(&IterBasePointer);
2009 let elem_addr = self.stack.pop().unwrap();
2010 self.write_to_memory(element, elem_addr, offset);
2011 self.finish_block(0);
2012 self.stack.push(addr);
2013 self.emit(&FixedLengthListLowerToMemory {
2014 element,
2015 size: *size,
2016 id,
2017 });
2018 }
2019 TypeDefKind::Map(..) => todo!(),
2020 },
2021 }
2022 }
2023
2024 fn write_params_to_memory<'b>(
2025 &mut self,
2026 params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
2027 addr: B::Operand,
2028 offset: ArchitectureSize,
2029 ) {
2030 self.write_fields_to_memory(params, addr, offset);
2031 }
2032
2033 fn write_variant_arms_to_memory<'b>(
2034 &mut self,
2035 offset: ArchitectureSize,
2036 addr: B::Operand,
2037 tag: Int,
2038 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2039 ) {
2040 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2041 for (i, ty) in cases.into_iter().enumerate() {
2042 self.push_block();
2043 self.emit(&Instruction::VariantPayloadName);
2044 let payload_name = self.stack.pop().unwrap();
2045 self.emit(&Instruction::I32Const { val: i as i32 });
2046 self.stack.push(addr.clone());
2047 self.store_intrepr(offset, tag);
2048 if let Some(ty) = ty {
2049 self.stack.push(payload_name.clone());
2050 self.write_to_memory(ty, addr.clone(), payload_offset);
2051 }
2052 self.finish_block(0);
2053 }
2054 }
2055
2056 fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2057 // After lowering the list there's two i32 values on the stack
2058 // which we write into memory, writing the pointer into the low address
2059 // and the length into the high address.
2060 self.lower(ty);
2061 self.stack.push(addr.clone());
2062 self.emit(&Instruction::LengthStore {
2063 offset: offset + self.bindgen.sizes().align(ty).into(),
2064 });
2065 self.stack.push(addr);
2066 self.emit(&Instruction::PointerStore { offset });
2067 }
2068
2069 fn write_fields_to_memory<'b>(
2070 &mut self,
2071 tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
2072 addr: B::Operand,
2073 offset: ArchitectureSize,
2074 ) {
2075 let tys = tys.into_iter();
2076 let fields = self
2077 .stack
2078 .drain(self.stack.len() - tys.len()..)
2079 .collect::<Vec<_>>();
2080 for ((field_offset, ty), op) in self
2081 .bindgen
2082 .sizes()
2083 .field_offsets(tys)
2084 .into_iter()
2085 .zip(fields)
2086 {
2087 self.stack.push(op);
2088 self.write_to_memory(ty, addr.clone(), offset + (field_offset));
2089 }
2090 }
2091
2092 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2093 self.lower(ty);
2094 self.stack.push(addr);
2095 self.emit(instr);
2096 }
2097
2098 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2099 use Instruction::*;
2100
2101 match *ty {
2102 Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2103 Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2104 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
2105 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
2106 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
2107 Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
2108 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
2109 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
2110 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
2111 Type::String => self.read_list_from_memory(ty, addr, offset),
2112 Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
2113
2114 Type::Id(id) => match &self.resolve.types[id].kind {
2115 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
2116
2117 TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
2118
2119 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
2120 self.emit_and_lift(ty, addr, &I32Load { offset })
2121 }
2122
2123 TypeDefKind::Resource => {
2124 todo!();
2125 }
2126
2127 // Read and lift each field individually, adjusting the offset
2128 // as we go along, then aggregate all the fields into the
2129 // record.
2130 TypeDefKind::Record(record) => {
2131 self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
2132 self.emit(&RecordLift {
2133 record,
2134 ty: id,
2135 name: self.resolve.types[id].name.as_deref().unwrap(),
2136 });
2137 }
2138
2139 TypeDefKind::Tuple(tuple) => {
2140 self.read_fields_from_memory(&tuple.types, addr, offset);
2141 self.emit(&TupleLift { tuple, ty: id });
2142 }
2143
2144 TypeDefKind::Flags(f) => {
2145 match f.repr() {
2146 FlagsRepr::U8 => {
2147 self.stack.push(addr);
2148 self.load_intrepr(offset, Int::U8);
2149 }
2150 FlagsRepr::U16 => {
2151 self.stack.push(addr);
2152 self.load_intrepr(offset, Int::U16);
2153 }
2154 FlagsRepr::U32(n) => {
2155 for i in 0..n {
2156 self.stack.push(addr.clone());
2157 self.emit(&I32Load {
2158 offset: offset.add_bytes(i * 4),
2159 });
2160 }
2161 }
2162 }
2163 self.lift(ty);
2164 }
2165
2166 // Each case will get its own block, and we'll dispatch to the
2167 // right block based on the `i32.load` we initially perform. Each
2168 // individual block is pretty simple and just reads the payload type
2169 // from the corresponding offset if one is available.
2170 TypeDefKind::Variant(variant) => {
2171 self.read_variant_arms_from_memory(
2172 offset,
2173 addr,
2174 variant.tag(),
2175 variant.cases.iter().map(|c| c.ty.as_ref()),
2176 );
2177 self.emit(&VariantLift {
2178 variant,
2179 ty: id,
2180 name: self.resolve.types[id].name.as_deref().unwrap(),
2181 });
2182 }
2183
2184 TypeDefKind::Option(t) => {
2185 self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
2186 self.emit(&OptionLift { payload: t, ty: id });
2187 }
2188
2189 TypeDefKind::Result(r) => {
2190 self.read_variant_arms_from_memory(
2191 offset,
2192 addr,
2193 Int::U8,
2194 [r.ok.as_ref(), r.err.as_ref()],
2195 );
2196 self.emit(&ResultLift { result: r, ty: id });
2197 }
2198
2199 TypeDefKind::Enum(e) => {
2200 self.stack.push(addr.clone());
2201 self.load_intrepr(offset, e.tag());
2202 self.lift(ty);
2203 }
2204
2205 TypeDefKind::Unknown => unreachable!(),
2206 TypeDefKind::FixedLengthList(ty, size) => {
2207 self.push_block();
2208 self.emit(&IterBasePointer);
2209 let elemaddr = self.stack.pop().unwrap();
2210 self.read_from_memory(ty, elemaddr, offset);
2211 self.finish_block(1);
2212 self.stack.push(addr.clone());
2213 self.emit(&FixedLengthListLiftFromMemory {
2214 element: ty,
2215 size: *size,
2216 id,
2217 });
2218 }
2219 TypeDefKind::Map(..) => todo!(),
2220 },
2221 }
2222 }
2223
2224 fn read_results_from_memory(
2225 &mut self,
2226 result: &Option<Type>,
2227 addr: B::Operand,
2228 offset: ArchitectureSize,
2229 ) {
2230 self.read_fields_from_memory(result, addr, offset)
2231 }
2232
2233 fn read_variant_arms_from_memory<'b>(
2234 &mut self,
2235 offset: ArchitectureSize,
2236 addr: B::Operand,
2237 tag: Int,
2238 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2239 ) {
2240 self.stack.push(addr.clone());
2241 self.load_intrepr(offset, tag);
2242 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2243 for ty in cases {
2244 self.push_block();
2245 if let Some(ty) = ty {
2246 self.read_from_memory(ty, addr.clone(), payload_offset);
2247 }
2248 self.finish_block(ty.is_some() as usize);
2249 }
2250 }
2251
2252 fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2253 // Read the pointer/len and then perform the standard lifting
2254 // proceses.
2255 self.stack.push(addr.clone());
2256 self.emit(&Instruction::PointerLoad { offset });
2257 self.stack.push(addr);
2258 self.emit(&Instruction::LengthLoad {
2259 offset: offset + self.bindgen.sizes().align(ty).into(),
2260 });
2261 self.lift(ty);
2262 }
2263
2264 fn read_fields_from_memory<'b>(
2265 &mut self,
2266 tys: impl IntoIterator<Item = &'b Type>,
2267 addr: B::Operand,
2268 offset: ArchitectureSize,
2269 ) {
2270 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
2271 self.read_from_memory(ty, addr.clone(), offset + (*field_offset));
2272 }
2273 }
2274
2275 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2276 self.stack.push(addr);
2277 self.emit(instr);
2278 self.lift(ty);
2279 }
2280
2281 fn load_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2282 self.emit(&match repr {
2283 Int::U64 => Instruction::I64Load { offset },
2284 Int::U32 => Instruction::I32Load { offset },
2285 Int::U16 => Instruction::I32Load16U { offset },
2286 Int::U8 => Instruction::I32Load8U { offset },
2287 });
2288 }
2289
2290 fn store_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2291 self.emit(&match repr {
2292 Int::U64 => Instruction::I64Store { offset },
2293 Int::U32 => Instruction::I32Store { offset },
2294 Int::U16 => Instruction::I32Store16 { offset },
2295 Int::U8 => Instruction::I32Store8 { offset },
2296 });
2297 }
2298
2299 /// Runs the deallocation of `ty` for the operands currently on
2300 /// `self.stack`.
2301 ///
2302 /// This will pop the ABI items of `ty` from `self.stack`.
2303 fn deallocate(&mut self, ty: &Type, what: Deallocate) {
2304 use Instruction::*;
2305
2306 match *ty {
2307 Type::String => {
2308 self.emit(&Instruction::GuestDeallocateString);
2309 }
2310
2311 Type::Bool
2312 | Type::U8
2313 | Type::S8
2314 | Type::U16
2315 | Type::S16
2316 | Type::U32
2317 | Type::S32
2318 | Type::Char
2319 | Type::U64
2320 | Type::S64
2321 | Type::F32
2322 | Type::F64
2323 | Type::ErrorContext => {
2324 // No deallocation necessary, just discard the operand on the
2325 // stack.
2326 self.stack.pop().unwrap();
2327 }
2328
2329 Type::Id(id) => match &self.resolve.types[id].kind {
2330 TypeDefKind::Type(t) => self.deallocate(t, what),
2331
2332 TypeDefKind::List(element) => {
2333 self.push_block();
2334 self.emit(&IterBasePointer);
2335 let elemaddr = self.stack.pop().unwrap();
2336 self.deallocate_indirect(element, elemaddr, Default::default(), what);
2337 self.finish_block(0);
2338
2339 self.emit(&Instruction::GuestDeallocateList { element });
2340 }
2341
2342 TypeDefKind::Handle(Handle::Own(_))
2343 | TypeDefKind::Future(_)
2344 | TypeDefKind::Stream(_)
2345 if what.handles() =>
2346 {
2347 self.lift(ty);
2348 self.emit(&DropHandle { ty });
2349 }
2350
2351 TypeDefKind::Record(record) => {
2352 self.flat_for_each_record_type(
2353 ty,
2354 record.fields.iter().map(|f| &f.ty),
2355 |me, ty| me.deallocate(ty, what),
2356 );
2357 }
2358
2359 TypeDefKind::Tuple(tuple) => {
2360 self.flat_for_each_record_type(ty, tuple.types.iter(), |me, ty| {
2361 me.deallocate(ty, what)
2362 });
2363 }
2364
2365 TypeDefKind::Variant(variant) => {
2366 self.flat_for_each_variant_arm(
2367 ty,
2368 false,
2369 variant.cases.iter().map(|c| c.ty.as_ref()),
2370 |me, ty| me.deallocate(ty, what),
2371 );
2372 self.emit(&GuestDeallocateVariant {
2373 blocks: variant.cases.len(),
2374 });
2375 }
2376
2377 TypeDefKind::Option(t) => {
2378 self.flat_for_each_variant_arm(ty, false, [None, Some(t)], |me, ty| {
2379 me.deallocate(ty, what)
2380 });
2381 self.emit(&GuestDeallocateVariant { blocks: 2 });
2382 }
2383
2384 TypeDefKind::Result(e) => {
2385 self.flat_for_each_variant_arm(
2386 ty,
2387 false,
2388 [e.ok.as_ref(), e.err.as_ref()],
2389 |me, ty| me.deallocate(ty, what),
2390 );
2391 self.emit(&GuestDeallocateVariant { blocks: 2 });
2392 }
2393
2394 // discard the operand on the stack, otherwise nothing to free.
2395 TypeDefKind::Flags(_)
2396 | TypeDefKind::Enum(_)
2397 | TypeDefKind::Future(_)
2398 | TypeDefKind::Stream(_)
2399 | TypeDefKind::Handle(Handle::Own(_))
2400 | TypeDefKind::Handle(Handle::Borrow(_)) => {
2401 self.stack.pop().unwrap();
2402 }
2403
2404 TypeDefKind::Resource => unreachable!(),
2405 TypeDefKind::Unknown => unreachable!(),
2406
2407 TypeDefKind::FixedLengthList(..) => todo!(),
2408 TypeDefKind::Map(..) => todo!(),
2409 },
2410 }
2411 }
2412
2413 fn deallocate_indirect(
2414 &mut self,
2415 ty: &Type,
2416 addr: B::Operand,
2417 offset: ArchitectureSize,
2418 what: Deallocate,
2419 ) {
2420 use Instruction::*;
2421
2422 // No need to execute any instructions if this type itself doesn't
2423 // require any form of post-return.
2424 if !needs_deallocate(self.resolve, ty, what) {
2425 return;
2426 }
2427
2428 match *ty {
2429 Type::String => {
2430 self.stack.push(addr.clone());
2431 self.emit(&Instruction::PointerLoad { offset });
2432 self.stack.push(addr);
2433 self.emit(&Instruction::LengthLoad {
2434 offset: offset + self.bindgen.sizes().align(ty).into(),
2435 });
2436 self.deallocate(ty, what);
2437 }
2438
2439 Type::Bool
2440 | Type::U8
2441 | Type::S8
2442 | Type::U16
2443 | Type::S16
2444 | Type::U32
2445 | Type::S32
2446 | Type::Char
2447 | Type::U64
2448 | Type::S64
2449 | Type::F32
2450 | Type::F64
2451 | Type::ErrorContext => {}
2452
2453 Type::Id(id) => match &self.resolve.types[id].kind {
2454 TypeDefKind::Type(t) => self.deallocate_indirect(t, addr, offset, what),
2455
2456 TypeDefKind::List(_) => {
2457 self.stack.push(addr.clone());
2458 self.emit(&Instruction::PointerLoad { offset });
2459 self.stack.push(addr);
2460 self.emit(&Instruction::LengthLoad {
2461 offset: offset + self.bindgen.sizes().align(ty).into(),
2462 });
2463
2464 self.deallocate(ty, what);
2465 }
2466
2467 TypeDefKind::Handle(Handle::Own(_))
2468 | TypeDefKind::Future(_)
2469 | TypeDefKind::Stream(_)
2470 if what.handles() =>
2471 {
2472 self.read_from_memory(ty, addr, offset);
2473 self.emit(&DropHandle { ty });
2474 }
2475
2476 TypeDefKind::Handle(Handle::Own(_)) => unreachable!(),
2477 TypeDefKind::Handle(Handle::Borrow(_)) => unreachable!(),
2478 TypeDefKind::Resource => unreachable!(),
2479
2480 TypeDefKind::Record(record) => {
2481 self.deallocate_indirect_fields(
2482 &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2483 addr,
2484 offset,
2485 what,
2486 );
2487 }
2488
2489 TypeDefKind::Tuple(tuple) => {
2490 self.deallocate_indirect_fields(&tuple.types, addr, offset, what);
2491 }
2492
2493 TypeDefKind::Flags(_) => {}
2494
2495 TypeDefKind::Variant(variant) => {
2496 self.deallocate_indirect_variant(
2497 offset,
2498 addr,
2499 variant.tag(),
2500 variant.cases.iter().map(|c| c.ty.as_ref()),
2501 what,
2502 );
2503 self.emit(&GuestDeallocateVariant {
2504 blocks: variant.cases.len(),
2505 });
2506 }
2507
2508 TypeDefKind::Option(t) => {
2509 self.deallocate_indirect_variant(offset, addr, Int::U8, [None, Some(t)], what);
2510 self.emit(&GuestDeallocateVariant { blocks: 2 });
2511 }
2512
2513 TypeDefKind::Result(e) => {
2514 self.deallocate_indirect_variant(
2515 offset,
2516 addr,
2517 Int::U8,
2518 [e.ok.as_ref(), e.err.as_ref()],
2519 what,
2520 );
2521 self.emit(&GuestDeallocateVariant { blocks: 2 });
2522 }
2523
2524 TypeDefKind::Enum(_) => {}
2525
2526 TypeDefKind::Future(_) => unreachable!(),
2527 TypeDefKind::Stream(_) => unreachable!(),
2528 TypeDefKind::Unknown => unreachable!(),
2529 TypeDefKind::FixedLengthList(_, _) => {}
2530 TypeDefKind::Map(..) => todo!(),
2531 },
2532 }
2533 }
2534
2535 fn deallocate_indirect_variant<'b>(
2536 &mut self,
2537 offset: ArchitectureSize,
2538 addr: B::Operand,
2539 tag: Int,
2540 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2541 what: Deallocate,
2542 ) {
2543 self.stack.push(addr.clone());
2544 self.load_intrepr(offset, tag);
2545 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2546 for ty in cases {
2547 self.push_block();
2548 if let Some(ty) = ty {
2549 self.deallocate_indirect(ty, addr.clone(), payload_offset, what);
2550 }
2551 self.finish_block(0);
2552 }
2553 }
2554
2555 fn deallocate_indirect_fields(
2556 &mut self,
2557 tys: &[Type],
2558 addr: B::Operand,
2559 offset: ArchitectureSize,
2560 what: Deallocate,
2561 ) {
2562 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2563 self.deallocate_indirect(ty, addr.clone(), offset + (field_offset), what);
2564 }
2565 }
2566}
2567
2568fn cast(from: WasmType, to: WasmType) -> Bitcast {
2569 use WasmType::*;
2570
2571 match (from, to) {
2572 (I32, I32)
2573 | (I64, I64)
2574 | (F32, F32)
2575 | (F64, F64)
2576 | (Pointer, Pointer)
2577 | (PointerOrI64, PointerOrI64)
2578 | (Length, Length) => Bitcast::None,
2579
2580 (I32, I64) => Bitcast::I32ToI64,
2581 (F32, I32) => Bitcast::F32ToI32,
2582 (F64, I64) => Bitcast::F64ToI64,
2583
2584 (I64, I32) => Bitcast::I64ToI32,
2585 (I32, F32) => Bitcast::I32ToF32,
2586 (I64, F64) => Bitcast::I64ToF64,
2587
2588 (F32, I64) => Bitcast::F32ToI64,
2589 (I64, F32) => Bitcast::I64ToF32,
2590
2591 (I64, PointerOrI64) => Bitcast::I64ToP64,
2592 (Pointer, PointerOrI64) => Bitcast::PToP64,
2593 (_, PointerOrI64) => {
2594 Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2595 }
2596
2597 (PointerOrI64, I64) => Bitcast::P64ToI64,
2598 (PointerOrI64, Pointer) => Bitcast::P64ToP,
2599 (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2600
2601 (I32, Pointer) => Bitcast::I32ToP,
2602 (Pointer, I32) => Bitcast::PToI32,
2603 (I32, Length) => Bitcast::I32ToL,
2604 (Length, I32) => Bitcast::LToI32,
2605 (I64, Length) => Bitcast::I64ToL,
2606 (Length, I64) => Bitcast::LToI64,
2607 (Pointer, Length) => Bitcast::PToL,
2608 (Length, Pointer) => Bitcast::LToP,
2609
2610 (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2611 (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2612
2613 (F32, F64)
2614 | (F64, F32)
2615 | (F64, I32)
2616 | (I32, F64)
2617 | (Pointer | Length, I64 | F64)
2618 | (I64 | F64, Pointer | Length) => {
2619 unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2620 }
2621 }
2622}
2623
2624/// Flatten types in a given type
2625///
2626/// It is sometimes necessary to restrict the number of max parameters dynamically,
2627/// for example during an async guest import call (flat params are limited to 4)
2628fn flat_types(resolve: &Resolve, ty: &Type, max_params: Option<usize>) -> Option<Vec<WasmType>> {
2629 let max_params = max_params.unwrap_or(MAX_FLAT_PARAMS);
2630 let mut storage = iter::repeat_n(WasmType::I32, max_params).collect::<Vec<_>>();
2631 let mut flat = FlatTypes::new(storage.as_mut_slice());
2632 resolve.push_flat(ty, &mut flat).then_some(flat.to_vec())
2633}