wit_bindgen_core/abi.rs
1use std::fmt;
2use std::iter;
3
4pub use wit_parser::abi::{AbiVariant, FlatTypes, WasmSignature, WasmType};
5use wit_parser::{
6 Alignment, ArchitectureSize, ElementInfo, Enum, Flags, FlagsRepr, Function, Handle, Int,
7 Record, Resolve, Result_, SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant, align_to_arch,
8};
9
10// Helper macro for defining instructions without having to have tons of
11// exhaustive `match` statements to update
12macro_rules! def_instruction {
13 (
14 $( #[$enum_attr:meta] )*
15 pub enum $name:ident<'a> {
16 $(
17 $( #[$attr:meta] )*
18 $variant:ident $( {
19 $($field:ident : $field_ty:ty $(,)* )*
20 } )?
21 :
22 [$num_popped:expr] => [$num_pushed:expr],
23 )*
24 }
25 ) => {
26 $( #[$enum_attr] )*
27 pub enum $name<'a> {
28 $(
29 $( #[$attr] )*
30 $variant $( {
31 $(
32 $field : $field_ty,
33 )*
34 } )? ,
35 )*
36 }
37
38 impl $name<'_> {
39 /// How many operands does this instruction pop from the stack?
40 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
41 pub fn operands_len(&self) -> usize {
42 match self {
43 $(
44 Self::$variant $( {
45 $(
46 $field,
47 )*
48 } )? => $num_popped,
49 )*
50 }
51 }
52
53 /// How many results does this instruction push onto the stack?
54 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
55 pub fn results_len(&self) -> usize {
56 match self {
57 $(
58 Self::$variant $( {
59 $(
60 $field,
61 )*
62 } )? => $num_pushed,
63 )*
64 }
65 }
66 }
67 };
68}
69
70def_instruction! {
71 #[derive(Debug)]
72 pub enum Instruction<'a> {
73 /// Acquires the specified parameter and places it on the stack.
74 /// Depending on the context this may refer to wasm parameters or
75 /// interface types parameters.
76 GetArg { nth: usize } : [0] => [1],
77
78 // Integer const/manipulation instructions
79
80 /// Pushes the constant `val` onto the stack.
81 I32Const { val: i32 } : [0] => [1],
82 /// Casts the top N items on the stack using the `Bitcast` enum
83 /// provided. Consumes the same number of operands that this produces.
84 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
85 /// Pushes a number of constant zeros for each wasm type on the stack.
86 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
87
88 // Memory load/store instructions
89
90 /// Pops a pointer from the stack and loads a little-endian `i32` from
91 /// it, using the specified constant offset.
92 I32Load { offset: ArchitectureSize } : [1] => [1],
93 /// Pops a pointer from the stack and loads a little-endian `i8` from
94 /// it, using the specified constant offset. The value loaded is the
95 /// zero-extended to 32-bits
96 I32Load8U { offset: ArchitectureSize } : [1] => [1],
97 /// Pops a pointer from the stack and loads a little-endian `i8` from
98 /// it, using the specified constant offset. The value loaded is the
99 /// sign-extended to 32-bits
100 I32Load8S { offset: ArchitectureSize } : [1] => [1],
101 /// Pops a pointer from the stack and loads a little-endian `i16` from
102 /// it, using the specified constant offset. The value loaded is the
103 /// zero-extended to 32-bits
104 I32Load16U { offset: ArchitectureSize } : [1] => [1],
105 /// Pops a pointer from the stack and loads a little-endian `i16` from
106 /// it, using the specified constant offset. The value loaded is the
107 /// sign-extended to 32-bits
108 I32Load16S { offset: ArchitectureSize } : [1] => [1],
109 /// Pops a pointer from the stack and loads a little-endian `i64` from
110 /// it, using the specified constant offset.
111 I64Load { offset: ArchitectureSize } : [1] => [1],
112 /// Pops a pointer from the stack and loads a little-endian `f32` from
113 /// it, using the specified constant offset.
114 F32Load { offset: ArchitectureSize } : [1] => [1],
115 /// Pops a pointer from the stack and loads a little-endian `f64` from
116 /// it, using the specified constant offset.
117 F64Load { offset: ArchitectureSize } : [1] => [1],
118
119 /// Like `I32Load` or `I64Load`, but for loading pointer values.
120 PointerLoad { offset: ArchitectureSize } : [1] => [1],
121 /// Like `I32Load` or `I64Load`, but for loading array length values.
122 LengthLoad { offset: ArchitectureSize } : [1] => [1],
123
124 /// Pops a pointer from the stack and then an `i32` value.
125 /// Stores the value in little-endian at the pointer specified plus the
126 /// constant `offset`.
127 I32Store { offset: ArchitectureSize } : [2] => [0],
128 /// Pops a pointer from the stack and then an `i32` value.
129 /// Stores the low 8 bits of the value in little-endian at the pointer
130 /// specified plus the constant `offset`.
131 I32Store8 { offset: ArchitectureSize } : [2] => [0],
132 /// Pops a pointer from the stack and then an `i32` value.
133 /// Stores the low 16 bits of the value in little-endian at the pointer
134 /// specified plus the constant `offset`.
135 I32Store16 { offset: ArchitectureSize } : [2] => [0],
136 /// Pops a pointer from the stack and then an `i64` value.
137 /// Stores the value in little-endian at the pointer specified plus the
138 /// constant `offset`.
139 I64Store { offset: ArchitectureSize } : [2] => [0],
140 /// Pops a pointer from the stack and then an `f32` value.
141 /// Stores the value in little-endian at the pointer specified plus the
142 /// constant `offset`.
143 F32Store { offset: ArchitectureSize } : [2] => [0],
144 /// Pops a pointer from the stack and then an `f64` value.
145 /// Stores the value in little-endian at the pointer specified plus the
146 /// constant `offset`.
147 F64Store { offset: ArchitectureSize } : [2] => [0],
148
149 /// Like `I32Store` or `I64Store`, but for storing pointer values.
150 PointerStore { offset: ArchitectureSize } : [2] => [0],
151 /// Like `I32Store` or `I64Store`, but for storing array length values.
152 LengthStore { offset: ArchitectureSize } : [2] => [0],
153
154 // Scalar lifting/lowering
155
156 /// Converts an interface type `char` value to a 32-bit integer
157 /// representing the unicode scalar value.
158 I32FromChar : [1] => [1],
159 /// Converts an interface type `u64` value to a wasm `i64`.
160 I64FromU64 : [1] => [1],
161 /// Converts an interface type `s64` value to a wasm `i64`.
162 I64FromS64 : [1] => [1],
163 /// Converts an interface type `u32` value to a wasm `i32`.
164 I32FromU32 : [1] => [1],
165 /// Converts an interface type `s32` value to a wasm `i32`.
166 I32FromS32 : [1] => [1],
167 /// Converts an interface type `u16` value to a wasm `i32`.
168 I32FromU16 : [1] => [1],
169 /// Converts an interface type `s16` value to a wasm `i32`.
170 I32FromS16 : [1] => [1],
171 /// Converts an interface type `u8` value to a wasm `i32`.
172 I32FromU8 : [1] => [1],
173 /// Converts an interface type `s8` value to a wasm `i32`.
174 I32FromS8 : [1] => [1],
175 /// Conversion an interface type `f32` value to a wasm `f32`.
176 ///
177 /// This may be a noop for some implementations, but it's here in case the
178 /// native language representation of `f32` is different than the wasm
179 /// representation of `f32`.
180 CoreF32FromF32 : [1] => [1],
181 /// Conversion an interface type `f64` value to a wasm `f64`.
182 ///
183 /// This may be a noop for some implementations, but it's here in case the
184 /// native language representation of `f64` is different than the wasm
185 /// representation of `f64`.
186 CoreF64FromF64 : [1] => [1],
187
188 /// Converts a native wasm `i32` to an interface type `s8`.
189 ///
190 /// This will truncate the upper bits of the `i32`.
191 S8FromI32 : [1] => [1],
192 /// Converts a native wasm `i32` to an interface type `u8`.
193 ///
194 /// This will truncate the upper bits of the `i32`.
195 U8FromI32 : [1] => [1],
196 /// Converts a native wasm `i32` to an interface type `s16`.
197 ///
198 /// This will truncate the upper bits of the `i32`.
199 S16FromI32 : [1] => [1],
200 /// Converts a native wasm `i32` to an interface type `u16`.
201 ///
202 /// This will truncate the upper bits of the `i32`.
203 U16FromI32 : [1] => [1],
204 /// Converts a native wasm `i32` to an interface type `s32`.
205 S32FromI32 : [1] => [1],
206 /// Converts a native wasm `i32` to an interface type `u32`.
207 U32FromI32 : [1] => [1],
208 /// Converts a native wasm `i64` to an interface type `s64`.
209 S64FromI64 : [1] => [1],
210 /// Converts a native wasm `i64` to an interface type `u64`.
211 U64FromI64 : [1] => [1],
212 /// Converts a native wasm `i32` to an interface type `char`.
213 ///
214 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
215 CharFromI32 : [1] => [1],
216 /// Converts a native wasm `f32` to an interface type `f32`.
217 F32FromCoreF32 : [1] => [1],
218 /// Converts a native wasm `f64` to an interface type `f64`.
219 F64FromCoreF64 : [1] => [1],
220
221 /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
222 /// zero or one.
223 BoolFromI32 : [1] => [1],
224 /// Creates an `i32` from a `bool` input, must return 0 or 1.
225 I32FromBool : [1] => [1],
226
227 // lists
228
229 /// Lowers a list where the element's layout in the native language is
230 /// expected to match the canonical ABI definition of interface types.
231 ///
232 /// Pops a list value from the stack and pushes the pointer/length onto
233 /// the stack. If `realloc` is set to `Some` then this is expected to
234 /// *consume* the list which means that the data needs to be copied. An
235 /// allocation/copy is expected when:
236 ///
237 /// * A host is calling a wasm export with a list (it needs to copy the
238 /// list in to the callee's module, allocating space with `realloc`)
239 /// * A wasm export is returning a list (it's expected to use `realloc`
240 /// to give ownership of the list to the caller.
241 /// * A host is returning a list in a import definition, meaning that
242 /// space needs to be allocated in the caller with `realloc`).
243 ///
244 /// A copy does not happen (e.g. `realloc` is `None`) when:
245 ///
246 /// * A wasm module calls an import with the list. In this situation
247 /// it's expected the caller will know how to access this module's
248 /// memory (e.g. the host has raw access or wasm-to-wasm communication
249 /// would copy the list).
250 ///
251 /// If `realloc` is `Some` then the adapter is not responsible for
252 /// cleaning up this list because the other end is receiving the
253 /// allocation. If `realloc` is `None` then the adapter is responsible
254 /// for cleaning up any temporary allocation it created, if any.
255 ListCanonLower {
256 element: &'a Type,
257 realloc: Option<&'a str>,
258 } : [1] => [2],
259
260 /// Same as `ListCanonLower`, but used for strings
261 StringLower {
262 realloc: Option<&'a str>,
263 } : [1] => [2],
264
265 /// Lowers a list where the element's layout in the native language is
266 /// not expected to match the canonical ABI definition of interface
267 /// types.
268 ///
269 /// Pops a list value from the stack and pushes the pointer/length onto
270 /// the stack. This operation also pops a block from the block stack
271 /// which is used as the iteration body of writing each element of the
272 /// list consumed.
273 ///
274 /// The `realloc` field here behaves the same way as `ListCanonLower`.
275 /// It's only set to `None` when a wasm module calls a declared import.
276 /// Otherwise lowering in other contexts requires allocating memory for
277 /// the receiver to own.
278 ListLower {
279 element: &'a Type,
280 realloc: Option<&'a str>,
281 } : [1] => [2],
282
283 /// Lifts a list which has a canonical representation into an interface
284 /// types value.
285 ///
286 /// The term "canonical" representation here means that the
287 /// representation of the interface types value in the native language
288 /// exactly matches the canonical ABI definition of the type.
289 ///
290 /// This will consume two `i32` values from the stack, a pointer and a
291 /// length, and then produces an interface value list.
292 ListCanonLift {
293 element: &'a Type,
294 ty: TypeId,
295 } : [2] => [1],
296
297 /// Same as `ListCanonLift`, but used for strings
298 StringLift : [2] => [1],
299
300 /// Lifts a list which into an interface types value.
301 ///
302 /// This will consume two `i32` values from the stack, a pointer and a
303 /// length, and then produces an interface value list.
304 ///
305 /// This will also pop a block from the block stack which is how to
306 /// read each individual element from the list.
307 ListLift {
308 element: &'a Type,
309 ty: TypeId,
310 } : [2] => [1],
311
312 /// Pops all fields for a fixed list off the stack and then composes them
313 /// into an array.
314 FixedLengthListLift {
315 element: &'a Type,
316 size: u32,
317 id: TypeId,
318 } : [*size as usize] => [1],
319
320 /// Pops an array off the stack, decomposes the elements and then pushes them onto the stack.
321 FixedLengthListLower {
322 element: &'a Type,
323 size: u32,
324 id: TypeId,
325 } : [1] => [*size as usize],
326
327 /// Pops an array and an address off the stack, passes each element to a block storing it
328 FixedLengthListLowerToMemory {
329 element: &'a Type,
330 size: u32,
331 id: TypeId,
332 } : [2] => [0],
333
334 /// Pops base address, pushes an array
335 ///
336 /// This will also pop a block from the block stack which is how to
337 /// read each individual element from the list.
338 FixedLengthListLiftFromMemory {
339 element: &'a Type,
340 size: u32,
341 id: TypeId,
342 } : [1] => [1],
343
344
345 /// Pushes an operand onto the stack representing the list item from
346 /// each iteration of the list.
347 ///
348 /// This is only used inside of blocks related to lowering lists.
349 IterElem { element: &'a Type } : [0] => [1],
350
351 /// Pushes an operand onto the stack representing the base pointer of
352 /// the next element in a list.
353 ///
354 /// This is used for both lifting and lowering lists.
355 IterBasePointer : [0] => [1],
356
357 // records and tuples
358
359 /// Pops a record value off the stack, decomposes the record to all of
360 /// its fields, and then pushes the fields onto the stack.
361 RecordLower {
362 record: &'a Record,
363 name: &'a str,
364 ty: TypeId,
365 } : [1] => [record.fields.len()],
366
367 /// Pops all fields for a record off the stack and then composes them
368 /// into a record.
369 RecordLift {
370 record: &'a Record,
371 name: &'a str,
372 ty: TypeId,
373 } : [record.fields.len()] => [1],
374
375 /// Create an `i32` from a handle.
376 HandleLower {
377 handle: &'a Handle,
378 name: &'a str,
379 ty: TypeId,
380 } : [1] => [1],
381
382 /// Create a handle from an `i32`.
383 HandleLift {
384 handle: &'a Handle,
385 name: &'a str,
386 ty: TypeId,
387 } : [1] => [1],
388
389 /// Create an `i32` from a future.
390 FutureLower {
391 payload: &'a Option<Type>,
392 ty: TypeId,
393 } : [1] => [1],
394
395 /// Create a future from an `i32`.
396 FutureLift {
397 payload: &'a Option<Type>,
398 ty: TypeId,
399 } : [1] => [1],
400
401 /// Create an `i32` from a stream.
402 StreamLower {
403 payload: &'a Option<Type>,
404 ty: TypeId,
405 } : [1] => [1],
406
407 /// Create a stream from an `i32`.
408 StreamLift {
409 payload: &'a Option<Type>,
410 ty: TypeId,
411 } : [1] => [1],
412
413 /// Create an `i32` from an error-context.
414 ErrorContextLower : [1] => [1],
415
416 /// Create a error-context from an `i32`.
417 ErrorContextLift : [1] => [1],
418
419 /// Pops a tuple value off the stack, decomposes the tuple to all of
420 /// its fields, and then pushes the fields onto the stack.
421 TupleLower {
422 tuple: &'a Tuple,
423 ty: TypeId,
424 } : [1] => [tuple.types.len()],
425
426 /// Pops all fields for a tuple off the stack and then composes them
427 /// into a tuple.
428 TupleLift {
429 tuple: &'a Tuple,
430 ty: TypeId,
431 } : [tuple.types.len()] => [1],
432
433 /// Converts a language-specific record-of-bools to a list of `i32`.
434 FlagsLower {
435 flags: &'a Flags,
436 name: &'a str,
437 ty: TypeId,
438 } : [1] => [flags.repr().count()],
439 /// Converts a list of native wasm `i32` to a language-specific
440 /// record-of-bools.
441 FlagsLift {
442 flags: &'a Flags,
443 name: &'a str,
444 ty: TypeId,
445 } : [flags.repr().count()] => [1],
446
447 // variants
448
449 /// This is a special instruction used for `VariantLower`
450 /// instruction to determine the name of the payload, if present, to use
451 /// within each block.
452 ///
453 /// Each sub-block will have this be the first instruction, and if it
454 /// lowers a payload it will expect something bound to this name.
455 VariantPayloadName : [0] => [1],
456
457 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
458 /// from the code generator. Uses each of those blocks and the value
459 /// from the stack to produce `nresults` of items.
460 VariantLower {
461 variant: &'a Variant,
462 name: &'a str,
463 ty: TypeId,
464 results: &'a [WasmType],
465 } : [1] => [results.len()],
466
467 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
468 /// from the code generator. Uses each of those blocks and the value
469 /// from the stack to produce a final variant.
470 VariantLift {
471 variant: &'a Variant,
472 name: &'a str,
473 ty: TypeId,
474 } : [1] => [1],
475
476 /// Pops an enum off the stack and pushes the `i32` representation.
477 EnumLower {
478 enum_: &'a Enum,
479 name: &'a str,
480 ty: TypeId,
481 } : [1] => [1],
482
483 /// Pops an `i32` off the stack and lifts it into the `enum` specified.
484 EnumLift {
485 enum_: &'a Enum,
486 name: &'a str,
487 ty: TypeId,
488 } : [1] => [1],
489
490 /// Specialization of `VariantLower` for specifically `option<T>` types,
491 /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
492 /// the two cases.
493 OptionLower {
494 payload: &'a Type,
495 ty: TypeId,
496 results: &'a [WasmType],
497 } : [1] => [results.len()],
498
499 /// Specialization of `VariantLift` for specifically the `option<T>`
500 /// type. Otherwise behaves the same as the `VariantLift` instruction
501 /// with two blocks for the lift.
502 OptionLift {
503 payload: &'a Type,
504 ty: TypeId,
505 } : [1] => [1],
506
507 /// Specialization of `VariantLower` for specifically `result<T, E>`
508 /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
509 /// for the two cases.
510 ResultLower {
511 result: &'a Result_
512 ty: TypeId,
513 results: &'a [WasmType],
514 } : [1] => [results.len()],
515
516 /// Specialization of `VariantLift` for specifically the `result<T,
517 /// E>` type. Otherwise behaves the same as the `VariantLift`
518 /// instruction with two blocks for the lift.
519 ResultLift {
520 result: &'a Result_,
521 ty: TypeId,
522 } : [1] => [1],
523
524 // calling/control flow
525
526 /// Represents a call to a raw WebAssembly API. The module/name are
527 /// provided inline as well as the types if necessary.
528 CallWasm {
529 name: &'a str,
530 sig: &'a WasmSignature,
531 } : [sig.params.len()] => [sig.results.len()],
532
533 /// Same as `CallWasm`, except the dual where an interface is being
534 /// called rather than a raw wasm function.
535 ///
536 /// Note that this will be used for async functions, and `async_`
537 /// indicates whether the function should be invoked in an async
538 /// fashion.
539 CallInterface {
540 func: &'a Function,
541 async_: bool,
542 } : [func.params.len()] => [usize::from(func.result.is_some())],
543
544 /// Returns `amt` values on the stack. This is always the last
545 /// instruction.
546 Return { amt: usize, func: &'a Function } : [*amt] => [0],
547
548 /// Calls the `realloc` function specified in a malloc-like fashion
549 /// allocating `size` bytes with alignment `align`.
550 ///
551 /// Pushes the returned pointer onto the stack.
552 Malloc {
553 realloc: &'static str,
554 size: ArchitectureSize,
555 align: Alignment,
556 } : [0] => [1],
557
558 /// Used exclusively for guest-code generation this indicates that
559 /// the standard memory deallocation function needs to be invoked with
560 /// the specified parameters.
561 ///
562 /// This will pop a pointer from the stack and push nothing.
563 GuestDeallocate {
564 size: ArchitectureSize,
565 align: Alignment,
566 } : [1] => [0],
567
568 /// Used exclusively for guest-code generation this indicates that
569 /// a string is being deallocated. The ptr/length are on the stack and
570 /// are poppped off and used to deallocate the string.
571 GuestDeallocateString : [2] => [0],
572
573 /// Used exclusively for guest-code generation this indicates that
574 /// a list is being deallocated. The ptr/length are on the stack and
575 /// are poppped off and used to deallocate the list.
576 ///
577 /// This variant also pops a block off the block stack to be used as the
578 /// body of the deallocation loop.
579 GuestDeallocateList {
580 element: &'a Type,
581 } : [2] => [0],
582
583 /// Used exclusively for guest-code generation this indicates that
584 /// a variant is being deallocated. The integer discriminant is popped
585 /// off the stack as well as `blocks` number of blocks popped from the
586 /// blocks stack. The variant is used to select, at runtime, which of
587 /// the blocks is executed to deallocate the variant.
588 GuestDeallocateVariant {
589 blocks: usize,
590 } : [1] => [0],
591
592 /// Deallocates the language-specific handle representation on the top
593 /// of the stack. Used for async imports.
594 DropHandle { ty: &'a Type } : [1] => [0],
595
596 /// Call `task.return` for an async-lifted export.
597 ///
598 /// This will call core wasm import `name` which will be mapped to
599 /// `task.return` later on. The function given has `params` as its
600 /// parameters and it will return no results. This is used to pass the
601 /// lowered representation of a function's results to `task.return`.
602 AsyncTaskReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
603
604 /// Force the evaluation of the specified number of expressions and push
605 /// the results to the stack.
606 ///
607 /// This is useful prior to disposing of temporary variables and/or
608 /// allocations which are referenced by one or more not-yet-evaluated
609 /// expressions.
610 Flush { amt: usize } : [*amt] => [*amt],
611 }
612}
613
614#[derive(Debug, PartialEq)]
615pub enum Bitcast {
616 // Upcasts
617 F32ToI32,
618 F64ToI64,
619 I32ToI64,
620 F32ToI64,
621
622 // Downcasts
623 I32ToF32,
624 I64ToF64,
625 I64ToI32,
626 I64ToF32,
627
628 // PointerOrI64 conversions. These preserve provenance when the source
629 // or destination is a pointer value.
630 //
631 // These are used when pointer values are being stored in
632 // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
633 // always have to preserve provenance when the value being loaded or
634 // stored is a pointer.
635 P64ToI64,
636 I64ToP64,
637 P64ToP,
638 PToP64,
639
640 // Pointer<->number conversions. These do not preserve provenance.
641 //
642 // These are used when integer or floating-point values are being stored in
643 // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
644 // never have any provenance to preserve.
645 I32ToP,
646 PToI32,
647 PToL,
648 LToP,
649
650 // Number<->Number conversions.
651 I32ToL,
652 LToI32,
653 I64ToL,
654 LToI64,
655
656 // Multiple conversions in sequence.
657 Sequence(Box<[Bitcast; 2]>),
658
659 None,
660}
661
662/// Whether the glue code surrounding a call is lifting arguments and lowering
663/// results or vice versa.
664#[derive(Clone, Copy, PartialEq, Eq)]
665pub enum LiftLower {
666 /// When the glue code lifts arguments and lowers results.
667 ///
668 /// ```text
669 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
670 /// ```
671 LiftArgsLowerResults,
672 /// When the glue code lowers arguments and lifts results.
673 ///
674 /// ```text
675 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
676 /// ```
677 LowerArgsLiftResults,
678}
679
680/// Trait for language implementors to use to generate glue code between native
681/// WebAssembly signatures and interface types signatures.
682///
683/// This is used as an implementation detail in interpreting the ABI between
684/// interface types and wasm types. Eventually this will be driven by interface
685/// types adapters themselves, but for now the ABI of a function dictates what
686/// instructions are fed in.
687///
688/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
689/// generate code for. Instructions operate like a stack machine where each
690/// instruction has a list of inputs and a list of outputs (provided by the
691/// `emit` function).
692pub trait Bindgen {
693 /// The intermediate type for fragments of code for this type.
694 ///
695 /// For most languages `String` is a suitable intermediate type.
696 type Operand: Clone + fmt::Debug;
697
698 /// Emit code to implement the given instruction.
699 ///
700 /// Each operand is given in `operands` and can be popped off if ownership
701 /// is required. It's guaranteed that `operands` has the appropriate length
702 /// for the `inst` given, as specified with [`Instruction`].
703 ///
704 /// Each result variable should be pushed onto `results`. This function must
705 /// push the appropriate number of results or binding generation will panic.
706 fn emit(
707 &mut self,
708 resolve: &Resolve,
709 inst: &Instruction<'_>,
710 operands: &mut Vec<Self::Operand>,
711 results: &mut Vec<Self::Operand>,
712 );
713
714 /// Gets a operand reference to the return pointer area.
715 ///
716 /// The provided size and alignment is for the function's return type.
717 fn return_pointer(&mut self, size: ArchitectureSize, align: Alignment) -> Self::Operand;
718
719 /// Enters a new block of code to generate code for.
720 ///
721 /// This is currently exclusively used for constructing variants. When a
722 /// variant is constructed a block here will be pushed for each case of a
723 /// variant, generating the code necessary to translate a variant case.
724 ///
725 /// Blocks are completed with `finish_block` below. It's expected that `emit`
726 /// will always push code (if necessary) into the "current block", which is
727 /// updated by calling this method and `finish_block` below.
728 fn push_block(&mut self);
729
730 /// Indicates to the code generator that a block is completed, and the
731 /// `operand` specified was the resulting value of the block.
732 ///
733 /// This method will be used to compute the value of each arm of lifting a
734 /// variant. The `operand` will be `None` if the variant case didn't
735 /// actually have any type associated with it. Otherwise it will be `Some`
736 /// as the last value remaining on the stack representing the value
737 /// associated with a variant's `case`.
738 ///
739 /// It's expected that this will resume code generation in the previous
740 /// block before `push_block` was called. This must also save the results
741 /// of the current block internally for instructions like `ResultLift` to
742 /// use later.
743 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
744
745 /// Returns size information that was previously calculated for all types.
746 fn sizes(&self) -> &SizeAlign;
747
748 /// Returns whether or not the specified element type is represented in a
749 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
750 /// and `ListCanonLift` instructions are used or not.
751 fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
752}
753
754/// Generates an abstract sequence of instructions which represents this
755/// function being adapted as an imported function.
756///
757/// The instructions here, when executed, will emulate a language with
758/// interface types calling the concrete wasm implementation. The parameters
759/// for the returned instruction sequence are the language's own
760/// interface-types parameters. One instruction in the instruction stream
761/// will be a `Call` which represents calling the actual raw wasm function
762/// signature.
763///
764/// This function is useful, for example, if you're building a language
765/// generator for WASI bindings. This will document how to translate
766/// language-specific values into the wasm types to call a WASI function,
767/// and it will also automatically convert the results of the WASI function
768/// back to a language-specific value.
769pub fn call(
770 resolve: &Resolve,
771 variant: AbiVariant,
772 lift_lower: LiftLower,
773 func: &Function,
774 bindgen: &mut impl Bindgen,
775 async_: bool,
776) {
777 Generator::new(resolve, bindgen).call(func, variant, lift_lower, async_);
778}
779
780pub fn lower_to_memory<B: Bindgen>(
781 resolve: &Resolve,
782 bindgen: &mut B,
783 address: B::Operand,
784 value: B::Operand,
785 ty: &Type,
786) {
787 let mut generator = Generator::new(resolve, bindgen);
788 // TODO: make this configurable? Right now this function is only called for
789 // future/stream callbacks so it's appropriate to skip realloc here as it's
790 // all "lower for wasm import", but this might get reused for something else
791 // in the future.
792 generator.realloc = Some(Realloc::Export("cabi_realloc"));
793 generator.stack.push(value);
794 generator.write_to_memory(ty, address, Default::default());
795}
796
797pub fn lower_flat<B: Bindgen>(
798 resolve: &Resolve,
799 bindgen: &mut B,
800 value: B::Operand,
801 ty: &Type,
802) -> Vec<B::Operand> {
803 let mut generator = Generator::new(resolve, bindgen);
804 generator.stack.push(value);
805 generator.realloc = Some(Realloc::Export("cabi_realloc"));
806 generator.lower(ty);
807 generator.stack
808}
809
810pub fn lift_from_memory<B: Bindgen>(
811 resolve: &Resolve,
812 bindgen: &mut B,
813 address: B::Operand,
814 ty: &Type,
815) -> B::Operand {
816 let mut generator = Generator::new(resolve, bindgen);
817 generator.read_from_memory(ty, address, Default::default());
818 generator.stack.pop().unwrap()
819}
820
821/// Used in a similar manner as the `Interface::call` function except is
822/// used to generate the `post-return` callback for `func`.
823///
824/// This is only intended to be used in guest generators for exported
825/// functions and will primarily generate `GuestDeallocate*` instructions,
826/// plus others used as input to those instructions.
827pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen) {
828 Generator::new(resolve, bindgen).post_return(func);
829}
830
831/// Returns whether the `Function` specified needs a post-return function to
832/// be generated in guest code.
833///
834/// This is used when the return value contains a memory allocation such as
835/// a list or a string primarily.
836pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
837 func.result
838 .map(|t| needs_deallocate(resolve, &t, Deallocate::Lists))
839 .unwrap_or(false)
840}
841
842pub fn guest_export_params_have_allocations(resolve: &Resolve, func: &Function) -> bool {
843 func.params
844 .iter()
845 .any(|(_, t)| needs_deallocate(resolve, &t, Deallocate::Lists))
846}
847
848fn needs_deallocate(resolve: &Resolve, ty: &Type, what: Deallocate) -> bool {
849 match ty {
850 Type::String => true,
851 Type::ErrorContext => true,
852 Type::Id(id) => match &resolve.types[*id].kind {
853 TypeDefKind::List(_) => true,
854 TypeDefKind::Type(t) => needs_deallocate(resolve, t, what),
855 TypeDefKind::Handle(Handle::Own(_)) => what.handles(),
856 TypeDefKind::Handle(Handle::Borrow(_)) => false,
857 TypeDefKind::Resource => false,
858 TypeDefKind::Record(r) => r
859 .fields
860 .iter()
861 .any(|f| needs_deallocate(resolve, &f.ty, what)),
862 TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_deallocate(resolve, t, what)),
863 TypeDefKind::Variant(t) => t
864 .cases
865 .iter()
866 .filter_map(|t| t.ty.as_ref())
867 .any(|t| needs_deallocate(resolve, t, what)),
868 TypeDefKind::Option(t) => needs_deallocate(resolve, t, what),
869 TypeDefKind::Result(t) => [&t.ok, &t.err]
870 .iter()
871 .filter_map(|t| t.as_ref())
872 .any(|t| needs_deallocate(resolve, t, what)),
873 TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
874 TypeDefKind::Future(_) | TypeDefKind::Stream(_) => what.handles(),
875 TypeDefKind::Unknown => unreachable!(),
876 TypeDefKind::FixedSizeList(t, _) => needs_deallocate(resolve, t, what),
877 TypeDefKind::Map(..) => todo!(),
878 },
879
880 Type::Bool
881 | Type::U8
882 | Type::S8
883 | Type::U16
884 | Type::S16
885 | Type::U32
886 | Type::S32
887 | Type::U64
888 | Type::S64
889 | Type::F32
890 | Type::F64
891 | Type::Char => false,
892 }
893}
894
895/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
896/// that's a pointer to a sequence of `types` stored in linear memory.
897pub fn deallocate_lists_in_types<B: Bindgen>(
898 resolve: &Resolve,
899 types: &[Type],
900 operands: &[B::Operand],
901 indirect: bool,
902 bindgen: &mut B,
903) {
904 Generator::new(resolve, bindgen).deallocate_in_types(
905 types,
906 operands,
907 indirect,
908 Deallocate::Lists,
909 );
910}
911
912/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
913/// that's a pointer to a sequence of `types` stored in linear memory.
914pub fn deallocate_lists_and_own_in_types<B: Bindgen>(
915 resolve: &Resolve,
916 types: &[Type],
917 operands: &[B::Operand],
918 indirect: bool,
919 bindgen: &mut B,
920) {
921 Generator::new(resolve, bindgen).deallocate_in_types(
922 types,
923 operands,
924 indirect,
925 Deallocate::ListsAndOwn,
926 );
927}
928
929#[derive(Copy, Clone)]
930pub enum Realloc {
931 None,
932 Export(&'static str),
933}
934
935/// What to deallocate in various `deallocate_*` methods.
936#[derive(Copy, Clone)]
937enum Deallocate {
938 /// Only deallocate lists.
939 Lists,
940 /// Deallocate lists and owned resources such as `own<T>` and
941 /// futures/streams.
942 ListsAndOwn,
943}
944
945impl Deallocate {
946 fn handles(&self) -> bool {
947 match self {
948 Deallocate::Lists => false,
949 Deallocate::ListsAndOwn => true,
950 }
951 }
952}
953
954struct Generator<'a, B: Bindgen> {
955 bindgen: &'a mut B,
956 resolve: &'a Resolve,
957 operands: Vec<B::Operand>,
958 results: Vec<B::Operand>,
959 stack: Vec<B::Operand>,
960 return_pointer: Option<B::Operand>,
961 realloc: Option<Realloc>,
962}
963
964const MAX_FLAT_PARAMS: usize = 16;
965const MAX_FLAT_ASYNC_PARAMS: usize = 4;
966
967impl<'a, B: Bindgen> Generator<'a, B> {
968 fn new(resolve: &'a Resolve, bindgen: &'a mut B) -> Generator<'a, B> {
969 Generator {
970 resolve,
971 bindgen,
972 operands: Vec::new(),
973 results: Vec::new(),
974 stack: Vec::new(),
975 return_pointer: None,
976 realloc: None,
977 }
978 }
979
980 fn call(&mut self, func: &Function, variant: AbiVariant, lift_lower: LiftLower, async_: bool) {
981 let sig = self.resolve.wasm_signature(variant, func);
982
983 // Lowering parameters calling a wasm import _or_ returning a result
984 // from an async-lifted wasm export means we don't need to pass
985 // ownership, but we pass ownership in all other cases.
986 let realloc = match (variant, lift_lower, async_) {
987 (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
988 | (
989 AbiVariant::GuestExport
990 | AbiVariant::GuestExportAsync
991 | AbiVariant::GuestExportAsyncStackful,
992 LiftLower::LiftArgsLowerResults,
993 true,
994 ) => Realloc::None,
995 _ => Realloc::Export("cabi_realloc"),
996 };
997 assert!(self.realloc.is_none());
998
999 match lift_lower {
1000 LiftLower::LowerArgsLiftResults => {
1001 self.realloc = Some(realloc);
1002
1003 // Create a function that performs individual lowering of operands
1004 let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
1005 let mut offset = ArchitectureSize::default();
1006 for (nth, (_, ty)) in func.params.iter().enumerate() {
1007 self_.emit(&Instruction::GetArg { nth });
1008 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1009 self_.write_to_memory(ty, ptr.clone(), offset);
1010 offset += self_.bindgen.sizes().size(ty);
1011 }
1012
1013 self_.stack.push(ptr);
1014 };
1015
1016 // Lower parameters
1017 if sig.indirect_params {
1018 // If parameters are indirect space is
1019 // allocated for them and each argument is lowered
1020 // individually into memory.
1021 let ElementInfo { size, align } = self
1022 .bindgen
1023 .sizes()
1024 .record(func.params.iter().map(|t| &t.1));
1025
1026 // Resolve the pointer to the indirectly stored parameters
1027 let ptr = match variant {
1028 // When a wasm module calls an import it will provide
1029 // space that isn't explicitly deallocated.
1030 AbiVariant::GuestImport => self.bindgen.return_pointer(size, align),
1031
1032 AbiVariant::GuestImportAsync => {
1033 todo!("direct param lowering for async guest import not implemented")
1034 }
1035
1036 // When calling a wasm module from the outside, though,
1037 // malloc needs to be called.
1038 AbiVariant::GuestExport => {
1039 self.emit(&Instruction::Malloc {
1040 realloc: "cabi_realloc",
1041 size,
1042 align,
1043 });
1044 self.stack.pop().unwrap()
1045 }
1046
1047 AbiVariant::GuestExportAsync | AbiVariant::GuestExportAsyncStackful => {
1048 todo!("direct param lowering for async not implemented")
1049 }
1050 };
1051
1052 // Lower the parameters to memory
1053 lower_to_memory(self, ptr);
1054 } else {
1055 // ... otherwise arguments are direct,
1056 // (there aren't too many) then we simply do a normal lower
1057 // operation for them all.
1058 for (nth, (_, ty)) in func.params.iter().enumerate() {
1059 self.emit(&Instruction::GetArg { nth });
1060 self.lower(ty);
1061 }
1062 }
1063 self.realloc = None;
1064
1065 // If necessary we may need to prepare a return pointer for this ABI.
1066 if variant == AbiVariant::GuestImport && sig.retptr {
1067 let info = self.bindgen.sizes().params(&func.result);
1068 let ptr = self.bindgen.return_pointer(info.size, info.align);
1069 self.return_pointer = Some(ptr.clone());
1070 self.stack.push(ptr);
1071 }
1072
1073 // Call the Wasm function
1074 assert_eq!(self.stack.len(), sig.params.len());
1075 self.emit(&Instruction::CallWasm {
1076 name: &func.name,
1077 sig: &sig,
1078 });
1079
1080 // Handle the result
1081 if sig.retptr {
1082 // If there is a return pointer, we must get the pointer to where results
1083 // should be stored, and store the results there?
1084
1085 let ptr = match variant {
1086 // imports into guests means it's a wasm module
1087 // calling an imported function. We supplied the
1088 // return pointer as the last argument (saved in
1089 // `self.return_pointer`) so we use that to read
1090 // the result of the function from memory.
1091 AbiVariant::GuestImport => {
1092 assert!(sig.results.is_empty());
1093 self.return_pointer.take().unwrap()
1094 }
1095
1096 // guest exports means that this is a host
1097 // calling wasm so wasm returned a pointer to where
1098 // the result is stored
1099 AbiVariant::GuestExport => self.stack.pop().unwrap(),
1100
1101 AbiVariant::GuestImportAsync
1102 | AbiVariant::GuestExportAsync
1103 | AbiVariant::GuestExportAsyncStackful => {
1104 unreachable!()
1105 }
1106 };
1107
1108 if let (AbiVariant::GuestExport, true) = (variant, async_) {
1109 // If we're dealing with an async function, the result should not be read from memory
1110 // immediately, as it's the async call result
1111 //
1112 // We can leave the result of the call (the indication of what to do as an async call)
1113 // on the stack as a return
1114 self.stack.push(ptr);
1115 } else {
1116 // If we're not dealing with an async call, the result must be in memory at this point and can be read out
1117 self.read_results_from_memory(
1118 &func.result,
1119 ptr.clone(),
1120 ArchitectureSize::default(),
1121 );
1122 self.emit(&Instruction::Flush {
1123 amt: usize::from(func.result.is_some()),
1124 });
1125 }
1126 } else {
1127 // With no return pointer in use we can simply lift the
1128 // result(s) of the function from the result of the core
1129 // wasm function.
1130 if let Some(ty) = &func.result {
1131 self.lift(ty)
1132 }
1133 }
1134
1135 // Emit the function return
1136 if async_ {
1137 self.emit(&Instruction::AsyncTaskReturn {
1138 name: &func.name,
1139 params: if func.result.is_some() {
1140 &[WasmType::Pointer]
1141 } else {
1142 &[]
1143 },
1144 });
1145 } else {
1146 self.emit(&Instruction::Return {
1147 func,
1148 amt: usize::from(func.result.is_some()),
1149 });
1150 }
1151 }
1152
1153 LiftLower::LiftArgsLowerResults => {
1154 let max_flat_params = match (variant, async_) {
1155 (AbiVariant::GuestImportAsync, _is_async @ true) => MAX_FLAT_ASYNC_PARAMS,
1156 _ => MAX_FLAT_PARAMS,
1157 };
1158
1159 // Read parameters from memory
1160 let read_from_memory = |self_: &mut Self| {
1161 let mut offset = ArchitectureSize::default();
1162 let ptr = self_
1163 .stack
1164 .pop()
1165 .expect("empty stack during read param from memory");
1166 for (_, ty) in func.params.iter() {
1167 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1168 self_.read_from_memory(ty, ptr.clone(), offset);
1169 offset += self_.bindgen.sizes().size(ty);
1170 }
1171 };
1172
1173 // Resolve parameters
1174 if sig.indirect_params {
1175 // If parameters were passed indirectly, arguments must be
1176 // read in succession from memory, with the pointer to the arguments
1177 // being the first argument to the function.
1178 self.emit(&Instruction::GetArg { nth: 0 });
1179 read_from_memory(self);
1180 } else {
1181 // ... otherwise, if parameters were passed directly then we lift each
1182 // argument in succession from the component wasm types that
1183 // make-up the type.
1184 let mut offset = 0;
1185 for (param_name, ty) in func.params.iter() {
1186 let Some(types) = flat_types(self.resolve, ty, Some(max_flat_params))
1187 else {
1188 panic!(
1189 "failed to flatten types during direct parameter lifting ('{param_name}' in func '{}')",
1190 func.name
1191 );
1192 };
1193 for _ in 0..types.len() {
1194 self.emit(&Instruction::GetArg { nth: offset });
1195 offset += 1;
1196 }
1197 self.lift(ty);
1198 }
1199 }
1200
1201 // ... and that allows us to call the interface types function
1202 self.emit(&Instruction::CallInterface { func, async_ });
1203
1204 // The return value of an async function is *not* the result of the function
1205 // itself or a pointer but rather a status code.
1206 //
1207 // Asynchronous functions will call `task.return` after the
1208 // interface function completes, so lowering is conditional
1209 // based on slightly different logic for the `task.return`
1210 // intrinsic.
1211 //
1212 // Note that in the async import case teh code below deals with the CM function being lowered,
1213 // not the core function that is underneath that (i.e. func.result may be empty,
1214 // where the associated core function underneath must have a i32 status code result)
1215 let (lower_to_memory, async_flat_results) = match (async_, &func.result) {
1216 // All async cases pass along the function results and flatten where necesary
1217 (_is_async @ true, func_result) => {
1218 let results = match &func_result {
1219 Some(ty) => flat_types(self.resolve, ty, Some(max_flat_params)),
1220 None => Some(Vec::new()),
1221 };
1222 (results.is_none(), Some(results))
1223 }
1224 // All other non-async cases
1225 (_is_async @ false, _) => (sig.retptr, None),
1226 };
1227
1228 // This was dynamically allocated by the caller (or async start
1229 // function) so after it's been read by the guest we need to
1230 // deallocate it.
1231 if let AbiVariant::GuestExport
1232 | AbiVariant::GuestExportAsync
1233 | AbiVariant::GuestExportAsyncStackful = variant
1234 {
1235 if sig.indirect_params && !async_ {
1236 let ElementInfo { size, align } = self
1237 .bindgen
1238 .sizes()
1239 .record(func.params.iter().map(|t| &t.1));
1240 self.emit(&Instruction::GetArg { nth: 0 });
1241 self.emit(&Instruction::GuestDeallocate { size, align });
1242 }
1243 }
1244
1245 self.realloc = Some(realloc);
1246
1247 // Perform memory lowing of relevant results, including out pointers as well as traditional results
1248 match (lower_to_memory, sig.retptr, variant) {
1249 // For sync calls, if no lowering to memory is required and there *is* a return pointer in use
1250 // then we need to lower then simply lower the result(s) and return that directly from the function.
1251 (_lower_to_memory @ false, _, _) => {
1252 if let Some(ty) = &func.result {
1253 self.lower(ty);
1254 }
1255 }
1256
1257 // Lowering to memory for a guest import
1258 //
1259 // When a function is imported to a guest this means
1260 // it's a host providing the implementation of the
1261 // import. The result is stored in the pointer
1262 // specified in the last argument, so we get the
1263 // pointer here and then write the return value into
1264 // it.
1265 (
1266 _lower_to_memory @ true,
1267 _has_ret_ptr @ true,
1268 AbiVariant::GuestImport | AbiVariant::GuestImportAsync,
1269 ) => {
1270 self.emit(&Instruction::GetArg {
1271 nth: sig.params.len() - 1,
1272 });
1273 let ptr = self
1274 .stack
1275 .pop()
1276 .expect("empty stack during result lower to memory");
1277 self.write_params_to_memory(&func.result, ptr, Default::default());
1278 }
1279
1280 // Lowering to memory for a guest export
1281 //
1282 // For a guest import this is a function defined in
1283 // wasm, so we're returning a pointer where the
1284 // value was stored at. Allocate some space here
1285 // (statically) and then write the result into that
1286 // memory, returning the pointer at the end.
1287 (_lower_to_memory @ true, _, variant) => match variant {
1288 AbiVariant::GuestExport | AbiVariant::GuestExportAsync => {
1289 let ElementInfo { size, align } =
1290 self.bindgen.sizes().params(&func.result);
1291 let ptr = self.bindgen.return_pointer(size, align);
1292 self.write_params_to_memory(
1293 &func.result,
1294 ptr.clone(),
1295 Default::default(),
1296 );
1297 self.stack.push(ptr);
1298 }
1299 AbiVariant::GuestImport | AbiVariant::GuestImportAsync => {
1300 unreachable!(
1301 "lowering to memory cannot be performed without a return pointer ({async_note} func [{func_name}], variant {variant:#?})",
1302 async_note = async_.then_some("async").unwrap_or("sync"),
1303 func_name = func.name,
1304 )
1305 }
1306 AbiVariant::GuestExportAsyncStackful => {
1307 todo!("stackful exports are not yet supported")
1308 }
1309 },
1310 }
1311
1312 // Build and emit the appropriate return
1313 match (variant, async_flat_results) {
1314 // Async guest imports always return a i32 status code
1315 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, None) if async_ => {
1316 unreachable!("async guest imports must have a return")
1317 }
1318
1319 // Async guest imports with results return the status code, not a pointer to any results
1320 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, Some(results))
1321 if async_ =>
1322 {
1323 let name = &format!("[task-return]{}", func.name);
1324 let params = results.as_deref().unwrap_or_default();
1325 self.emit(&Instruction::AsyncTaskReturn { name, params });
1326 }
1327
1328 // All async/non-async cases with results that need to be returned
1329 //
1330 // In practice, async imports should not end up here, as the returned result of an
1331 // async import is *not* a pointer but instead a status code.
1332 (_, Some(results)) => {
1333 let name = &format!("[task-return]{}", func.name);
1334 let params = results.as_deref().unwrap_or(&[WasmType::Pointer]);
1335 self.emit(&Instruction::AsyncTaskReturn { name, params });
1336 }
1337
1338 // All async/non-async cases with no results
1339 (_, None) => {
1340 if async_ {
1341 let name = &format!("[task-return]{}", func.name);
1342 self.emit(&Instruction::AsyncTaskReturn {
1343 name: name,
1344 params: if sig.results.len() > MAX_FLAT_ASYNC_PARAMS {
1345 &[WasmType::Pointer]
1346 } else {
1347 &sig.results
1348 },
1349 });
1350 } else {
1351 self.emit(&Instruction::Return {
1352 func,
1353 amt: sig.results.len(),
1354 });
1355 }
1356 }
1357 }
1358
1359 self.realloc = None;
1360 }
1361 }
1362
1363 assert!(self.realloc.is_none());
1364
1365 assert!(
1366 self.stack.is_empty(),
1367 "stack has {} items remaining: {:?}",
1368 self.stack.len(),
1369 self.stack,
1370 );
1371 }
1372
1373 fn post_return(&mut self, func: &Function) {
1374 let sig = self.resolve.wasm_signature(AbiVariant::GuestExport, func);
1375
1376 // Currently post-return is only used for lists and lists are always
1377 // returned indirectly through memory due to their flat representation
1378 // having more than one type. Assert that a return pointer is used,
1379 // though, in case this ever changes.
1380 assert!(sig.retptr);
1381
1382 self.emit(&Instruction::GetArg { nth: 0 });
1383 let addr = self.stack.pop().unwrap();
1384
1385 let mut types = Vec::new();
1386 types.extend(func.result);
1387 self.deallocate_in_types(&types, &[addr], true, Deallocate::Lists);
1388
1389 self.emit(&Instruction::Return { func, amt: 0 });
1390 }
1391
1392 fn deallocate_in_types(
1393 &mut self,
1394 types: &[Type],
1395 operands: &[B::Operand],
1396 indirect: bool,
1397 what: Deallocate,
1398 ) {
1399 if indirect {
1400 assert_eq!(operands.len(), 1);
1401 for (offset, ty) in self.bindgen.sizes().field_offsets(types) {
1402 self.deallocate_indirect(ty, operands[0].clone(), offset, what);
1403 }
1404 assert!(
1405 self.stack.is_empty(),
1406 "stack has {} items remaining",
1407 self.stack.len()
1408 );
1409 } else {
1410 let mut operands = operands;
1411 let mut operands_for_ty;
1412 for ty in types {
1413 let types = flat_types(self.resolve, ty, None).unwrap();
1414 (operands_for_ty, operands) = operands.split_at(types.len());
1415 self.stack.extend_from_slice(operands_for_ty);
1416 self.deallocate(ty, what);
1417 assert!(
1418 self.stack.is_empty(),
1419 "stack has {} items remaining",
1420 self.stack.len()
1421 );
1422 }
1423 assert!(operands.is_empty());
1424 }
1425 }
1426
1427 fn emit(&mut self, inst: &Instruction<'_>) {
1428 self.operands.clear();
1429 self.results.clear();
1430
1431 let operands_len = inst.operands_len();
1432 assert!(
1433 self.stack.len() >= operands_len,
1434 "not enough operands on stack for {:?}: have {} need {operands_len}",
1435 inst,
1436 self.stack.len(),
1437 );
1438 self.operands
1439 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1440 self.results.reserve(inst.results_len());
1441
1442 self.bindgen
1443 .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1444
1445 assert_eq!(
1446 self.results.len(),
1447 inst.results_len(),
1448 "{:?} expected {} results, got {}",
1449 inst,
1450 inst.results_len(),
1451 self.results.len()
1452 );
1453 self.stack.append(&mut self.results);
1454 }
1455
1456 fn push_block(&mut self) {
1457 self.bindgen.push_block();
1458 }
1459
1460 fn finish_block(&mut self, size: usize) {
1461 self.operands.clear();
1462 assert!(
1463 size <= self.stack.len(),
1464 "not enough operands on stack for finishing block",
1465 );
1466 self.operands
1467 .extend(self.stack.drain((self.stack.len() - size)..));
1468 self.bindgen.finish_block(&mut self.operands);
1469 }
1470
1471 fn lower(&mut self, ty: &Type) {
1472 use Instruction::*;
1473
1474 match *ty {
1475 Type::Bool => self.emit(&I32FromBool),
1476 Type::S8 => self.emit(&I32FromS8),
1477 Type::U8 => self.emit(&I32FromU8),
1478 Type::S16 => self.emit(&I32FromS16),
1479 Type::U16 => self.emit(&I32FromU16),
1480 Type::S32 => self.emit(&I32FromS32),
1481 Type::U32 => self.emit(&I32FromU32),
1482 Type::S64 => self.emit(&I64FromS64),
1483 Type::U64 => self.emit(&I64FromU64),
1484 Type::Char => self.emit(&I32FromChar),
1485 Type::F32 => self.emit(&CoreF32FromF32),
1486 Type::F64 => self.emit(&CoreF64FromF64),
1487 Type::String => {
1488 let realloc = self.list_realloc();
1489 self.emit(&StringLower { realloc });
1490 }
1491 Type::ErrorContext => self.emit(&ErrorContextLower),
1492 Type::Id(id) => match &self.resolve.types[id].kind {
1493 TypeDefKind::Type(t) => self.lower(t),
1494 TypeDefKind::List(element) => {
1495 let realloc = self.list_realloc();
1496 if self.bindgen.is_list_canonical(self.resolve, element) {
1497 self.emit(&ListCanonLower { element, realloc });
1498 } else {
1499 self.push_block();
1500 self.emit(&IterElem { element });
1501 self.emit(&IterBasePointer);
1502 let addr = self.stack.pop().unwrap();
1503 self.write_to_memory(element, addr, Default::default());
1504 self.finish_block(0);
1505 self.emit(&ListLower { element, realloc });
1506 }
1507 }
1508 TypeDefKind::Handle(handle) => {
1509 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1510 self.emit(&HandleLower {
1511 handle,
1512 ty: id,
1513 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1514 });
1515 }
1516 TypeDefKind::Resource => {
1517 todo!();
1518 }
1519 TypeDefKind::Record(record) => {
1520 self.emit(&RecordLower {
1521 record,
1522 ty: id,
1523 name: self.resolve.types[id].name.as_deref().unwrap(),
1524 });
1525 let values = self
1526 .stack
1527 .drain(self.stack.len() - record.fields.len()..)
1528 .collect::<Vec<_>>();
1529 for (field, value) in record.fields.iter().zip(values) {
1530 self.stack.push(value);
1531 self.lower(&field.ty);
1532 }
1533 }
1534 TypeDefKind::Tuple(tuple) => {
1535 self.emit(&TupleLower { tuple, ty: id });
1536 let values = self
1537 .stack
1538 .drain(self.stack.len() - tuple.types.len()..)
1539 .collect::<Vec<_>>();
1540 for (ty, value) in tuple.types.iter().zip(values) {
1541 self.stack.push(value);
1542 self.lower(ty);
1543 }
1544 }
1545
1546 TypeDefKind::Flags(flags) => {
1547 self.emit(&FlagsLower {
1548 flags,
1549 ty: id,
1550 name: self.resolve.types[id].name.as_ref().unwrap(),
1551 });
1552 }
1553
1554 TypeDefKind::Variant(v) => {
1555 let results =
1556 self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1557 self.emit(&VariantLower {
1558 variant: v,
1559 ty: id,
1560 results: &results,
1561 name: self.resolve.types[id].name.as_deref().unwrap(),
1562 });
1563 }
1564 TypeDefKind::Enum(enum_) => {
1565 self.emit(&EnumLower {
1566 enum_,
1567 ty: id,
1568 name: self.resolve.types[id].name.as_deref().unwrap(),
1569 });
1570 }
1571 TypeDefKind::Option(t) => {
1572 let results = self.lower_variant_arms(ty, [None, Some(t)]);
1573 self.emit(&OptionLower {
1574 payload: t,
1575 ty: id,
1576 results: &results,
1577 });
1578 }
1579 TypeDefKind::Result(r) => {
1580 let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1581 self.emit(&ResultLower {
1582 result: r,
1583 ty: id,
1584 results: &results,
1585 });
1586 }
1587 TypeDefKind::Future(ty) => {
1588 self.emit(&FutureLower {
1589 payload: ty,
1590 ty: id,
1591 });
1592 }
1593 TypeDefKind::Stream(ty) => {
1594 self.emit(&StreamLower {
1595 payload: ty,
1596 ty: id,
1597 });
1598 }
1599 TypeDefKind::Unknown => unreachable!(),
1600 TypeDefKind::FixedSizeList(ty, size) => {
1601 self.emit(&FixedLengthListLower {
1602 element: ty,
1603 size: *size,
1604 id,
1605 });
1606 let mut values = self
1607 .stack
1608 .drain(self.stack.len() - (*size as usize)..)
1609 .collect::<Vec<_>>();
1610 for value in values.drain(..) {
1611 self.stack.push(value);
1612 self.lower(ty);
1613 }
1614 }
1615 TypeDefKind::Map(..) => todo!(),
1616 },
1617 }
1618 }
1619
1620 fn lower_variant_arms<'b>(
1621 &mut self,
1622 ty: &Type,
1623 cases: impl IntoIterator<Item = Option<&'b Type>>,
1624 ) -> Vec<WasmType> {
1625 use Instruction::*;
1626 let results = flat_types(self.resolve, ty, None).unwrap();
1627 let mut casts = Vec::new();
1628 for (i, ty) in cases.into_iter().enumerate() {
1629 self.push_block();
1630 self.emit(&VariantPayloadName);
1631 let payload_name = self.stack.pop().unwrap();
1632 self.emit(&I32Const { val: i as i32 });
1633 let mut pushed = 1;
1634 if let Some(ty) = ty {
1635 // Using the payload of this block we lower the type to
1636 // raw wasm values.
1637 self.stack.push(payload_name);
1638 self.lower(ty);
1639
1640 // Determine the types of all the wasm values we just
1641 // pushed, and record how many. If we pushed too few
1642 // then we'll need to push some zeros after this.
1643 let temp = flat_types(self.resolve, ty, None).unwrap();
1644 pushed += temp.len();
1645
1646 // For all the types pushed we may need to insert some
1647 // bitcasts. This will go through and cast everything
1648 // to the right type to ensure all blocks produce the
1649 // same set of results.
1650 casts.truncate(0);
1651 for (actual, expected) in temp.iter().zip(&results[1..]) {
1652 casts.push(cast(*actual, *expected));
1653 }
1654 if casts.iter().any(|c| *c != Bitcast::None) {
1655 self.emit(&Bitcasts { casts: &casts });
1656 }
1657 }
1658
1659 // If we haven't pushed enough items in this block to match
1660 // what other variants are pushing then we need to push
1661 // some zeros.
1662 if pushed < results.len() {
1663 self.emit(&ConstZero {
1664 tys: &results[pushed..],
1665 });
1666 }
1667 self.finish_block(results.len());
1668 }
1669 results
1670 }
1671
1672 fn list_realloc(&self) -> Option<&'static str> {
1673 match self.realloc.expect("realloc should be configured") {
1674 Realloc::None => None,
1675 Realloc::Export(s) => Some(s),
1676 }
1677 }
1678
1679 /// Note that in general everything in this function is the opposite of the
1680 /// `lower` function above. This is intentional and should be kept this way!
1681 fn lift(&mut self, ty: &Type) {
1682 use Instruction::*;
1683
1684 match *ty {
1685 Type::Bool => self.emit(&BoolFromI32),
1686 Type::S8 => self.emit(&S8FromI32),
1687 Type::U8 => self.emit(&U8FromI32),
1688 Type::S16 => self.emit(&S16FromI32),
1689 Type::U16 => self.emit(&U16FromI32),
1690 Type::S32 => self.emit(&S32FromI32),
1691 Type::U32 => self.emit(&U32FromI32),
1692 Type::S64 => self.emit(&S64FromI64),
1693 Type::U64 => self.emit(&U64FromI64),
1694 Type::Char => self.emit(&CharFromI32),
1695 Type::F32 => self.emit(&F32FromCoreF32),
1696 Type::F64 => self.emit(&F64FromCoreF64),
1697 Type::String => self.emit(&StringLift),
1698 Type::ErrorContext => self.emit(&ErrorContextLift),
1699 Type::Id(id) => match &self.resolve.types[id].kind {
1700 TypeDefKind::Type(t) => self.lift(t),
1701 TypeDefKind::List(element) => {
1702 if self.bindgen.is_list_canonical(self.resolve, element) {
1703 self.emit(&ListCanonLift { element, ty: id });
1704 } else {
1705 self.push_block();
1706 self.emit(&IterBasePointer);
1707 let addr = self.stack.pop().unwrap();
1708 self.read_from_memory(element, addr, Default::default());
1709 self.finish_block(1);
1710 self.emit(&ListLift { element, ty: id });
1711 }
1712 }
1713 TypeDefKind::Handle(handle) => {
1714 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1715 self.emit(&HandleLift {
1716 handle,
1717 ty: id,
1718 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1719 });
1720 }
1721 TypeDefKind::Resource => {
1722 todo!();
1723 }
1724 TypeDefKind::Record(record) => {
1725 self.flat_for_each_record_type(
1726 ty,
1727 record.fields.iter().map(|f| &f.ty),
1728 Self::lift,
1729 );
1730 self.emit(&RecordLift {
1731 record,
1732 ty: id,
1733 name: self.resolve.types[id].name.as_deref().unwrap(),
1734 });
1735 }
1736 TypeDefKind::Tuple(tuple) => {
1737 self.flat_for_each_record_type(ty, tuple.types.iter(), Self::lift);
1738 self.emit(&TupleLift { tuple, ty: id });
1739 }
1740 TypeDefKind::Flags(flags) => {
1741 self.emit(&FlagsLift {
1742 flags,
1743 ty: id,
1744 name: self.resolve.types[id].name.as_ref().unwrap(),
1745 });
1746 }
1747
1748 TypeDefKind::Variant(v) => {
1749 self.flat_for_each_variant_arm(
1750 ty,
1751 true,
1752 v.cases.iter().map(|c| c.ty.as_ref()),
1753 Self::lift,
1754 );
1755 self.emit(&VariantLift {
1756 variant: v,
1757 ty: id,
1758 name: self.resolve.types[id].name.as_deref().unwrap(),
1759 });
1760 }
1761
1762 TypeDefKind::Enum(enum_) => {
1763 self.emit(&EnumLift {
1764 enum_,
1765 ty: id,
1766 name: self.resolve.types[id].name.as_deref().unwrap(),
1767 });
1768 }
1769
1770 TypeDefKind::Option(t) => {
1771 self.flat_for_each_variant_arm(ty, true, [None, Some(t)], Self::lift);
1772 self.emit(&OptionLift { payload: t, ty: id });
1773 }
1774
1775 TypeDefKind::Result(r) => {
1776 self.flat_for_each_variant_arm(
1777 ty,
1778 true,
1779 [r.ok.as_ref(), r.err.as_ref()],
1780 Self::lift,
1781 );
1782 self.emit(&ResultLift { result: r, ty: id });
1783 }
1784
1785 TypeDefKind::Future(ty) => {
1786 self.emit(&FutureLift {
1787 payload: ty,
1788 ty: id,
1789 });
1790 }
1791 TypeDefKind::Stream(ty) => {
1792 self.emit(&StreamLift {
1793 payload: ty,
1794 ty: id,
1795 });
1796 }
1797 TypeDefKind::Unknown => unreachable!(),
1798 TypeDefKind::FixedSizeList(ty, size) => {
1799 let temp = flat_types(self.resolve, ty, None).unwrap();
1800 let flat_per_elem = temp.to_vec().len();
1801 let flatsize = flat_per_elem * (*size as usize);
1802 let mut lowered_args = self
1803 .stack
1804 .drain(self.stack.len() - flatsize..)
1805 .collect::<Vec<_>>();
1806 for _ in 0..*size {
1807 self.stack.extend(lowered_args.drain(..flat_per_elem));
1808 self.lift(ty);
1809 }
1810 self.emit(&FixedLengthListLift {
1811 element: ty,
1812 size: *size,
1813 id,
1814 });
1815 }
1816 TypeDefKind::Map(..) => todo!(),
1817 },
1818 }
1819 }
1820
1821 fn flat_for_each_record_type<'b>(
1822 &mut self,
1823 container: &Type,
1824 types: impl Iterator<Item = &'b Type>,
1825 mut iter: impl FnMut(&mut Self, &Type),
1826 ) {
1827 let temp = flat_types(self.resolve, container, None).unwrap();
1828 let mut args = self
1829 .stack
1830 .drain(self.stack.len() - temp.len()..)
1831 .collect::<Vec<_>>();
1832 for ty in types {
1833 let temp = flat_types(self.resolve, ty, None).unwrap();
1834 self.stack.extend(args.drain(..temp.len()));
1835 iter(self, ty);
1836 }
1837 }
1838
1839 fn flat_for_each_variant_arm<'b>(
1840 &mut self,
1841 ty: &Type,
1842 blocks_with_type_have_result: bool,
1843 cases: impl IntoIterator<Item = Option<&'b Type>>,
1844 mut iter: impl FnMut(&mut Self, &Type),
1845 ) {
1846 let params = flat_types(self.resolve, ty, None).unwrap();
1847 let mut casts = Vec::new();
1848 let block_inputs = self
1849 .stack
1850 .drain(self.stack.len() + 1 - params.len()..)
1851 .collect::<Vec<_>>();
1852 for ty in cases {
1853 self.push_block();
1854 if let Some(ty) = ty {
1855 // Push only the values we need for this variant onto
1856 // the stack.
1857 let temp = flat_types(self.resolve, ty, None).unwrap();
1858 self.stack
1859 .extend(block_inputs[..temp.len()].iter().cloned());
1860
1861 // Cast all the types we have on the stack to the actual
1862 // types needed for this variant, if necessary.
1863 casts.truncate(0);
1864 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
1865 casts.push(cast(*expected, *actual));
1866 }
1867 if casts.iter().any(|c| *c != Bitcast::None) {
1868 self.emit(&Instruction::Bitcasts { casts: &casts });
1869 }
1870
1871 // Then recursively lift this variant's payload.
1872 iter(self, ty);
1873 }
1874 self.finish_block(if blocks_with_type_have_result {
1875 ty.is_some() as usize
1876 } else {
1877 0
1878 });
1879 }
1880 }
1881
1882 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1883 use Instruction::*;
1884
1885 match *ty {
1886 // Builtin types need different flavors of storage instructions
1887 // depending on the size of the value written.
1888 Type::Bool | Type::U8 | Type::S8 => {
1889 self.lower_and_emit(ty, addr, &I32Store8 { offset })
1890 }
1891 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1892 Type::U32 | Type::S32 | Type::Char => {
1893 self.lower_and_emit(ty, addr, &I32Store { offset })
1894 }
1895 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1896 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1897 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1898 Type::String => self.write_list_to_memory(ty, addr, offset),
1899 Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1900
1901 Type::Id(id) => match &self.resolve.types[id].kind {
1902 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1903 TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1904
1905 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1906 self.lower_and_emit(ty, addr, &I32Store { offset })
1907 }
1908
1909 // Decompose the record into its components and then write all
1910 // the components into memory one-by-one.
1911 TypeDefKind::Record(record) => {
1912 self.emit(&RecordLower {
1913 record,
1914 ty: id,
1915 name: self.resolve.types[id].name.as_deref().unwrap(),
1916 });
1917 self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1918 }
1919 TypeDefKind::Resource => {
1920 todo!()
1921 }
1922 TypeDefKind::Tuple(tuple) => {
1923 self.emit(&TupleLower { tuple, ty: id });
1924 self.write_fields_to_memory(tuple.types.iter(), addr, offset);
1925 }
1926
1927 TypeDefKind::Flags(f) => {
1928 self.lower(ty);
1929 match f.repr() {
1930 FlagsRepr::U8 => {
1931 self.stack.push(addr);
1932 self.store_intrepr(offset, Int::U8);
1933 }
1934 FlagsRepr::U16 => {
1935 self.stack.push(addr);
1936 self.store_intrepr(offset, Int::U16);
1937 }
1938 FlagsRepr::U32(n) => {
1939 for i in (0..n).rev() {
1940 self.stack.push(addr.clone());
1941 self.emit(&I32Store {
1942 offset: offset.add_bytes(i * 4),
1943 });
1944 }
1945 }
1946 }
1947 }
1948
1949 // Each case will get its own block, and the first item in each
1950 // case is writing the discriminant. After that if we have a
1951 // payload we write the payload after the discriminant, aligned up
1952 // to the type's alignment.
1953 TypeDefKind::Variant(v) => {
1954 self.write_variant_arms_to_memory(
1955 offset,
1956 addr,
1957 v.tag(),
1958 v.cases.iter().map(|c| c.ty.as_ref()),
1959 );
1960 self.emit(&VariantLower {
1961 variant: v,
1962 ty: id,
1963 results: &[],
1964 name: self.resolve.types[id].name.as_deref().unwrap(),
1965 });
1966 }
1967
1968 TypeDefKind::Option(t) => {
1969 self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
1970 self.emit(&OptionLower {
1971 payload: t,
1972 ty: id,
1973 results: &[],
1974 });
1975 }
1976
1977 TypeDefKind::Result(r) => {
1978 self.write_variant_arms_to_memory(
1979 offset,
1980 addr,
1981 Int::U8,
1982 [r.ok.as_ref(), r.err.as_ref()],
1983 );
1984 self.emit(&ResultLower {
1985 result: r,
1986 ty: id,
1987 results: &[],
1988 });
1989 }
1990
1991 TypeDefKind::Enum(e) => {
1992 self.lower(ty);
1993 self.stack.push(addr);
1994 self.store_intrepr(offset, e.tag());
1995 }
1996
1997 TypeDefKind::Unknown => unreachable!(),
1998 TypeDefKind::FixedSizeList(element, size) => {
1999 // resembles write_list_to_memory
2000 self.push_block();
2001 self.emit(&IterElem { element });
2002 self.emit(&IterBasePointer);
2003 let elem_addr = self.stack.pop().unwrap();
2004 self.write_to_memory(element, elem_addr, offset);
2005 self.finish_block(0);
2006 self.stack.push(addr);
2007 self.emit(&FixedLengthListLowerToMemory {
2008 element,
2009 size: *size,
2010 id,
2011 });
2012 }
2013 TypeDefKind::Map(..) => todo!(),
2014 },
2015 }
2016 }
2017
2018 fn write_params_to_memory<'b>(
2019 &mut self,
2020 params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
2021 addr: B::Operand,
2022 offset: ArchitectureSize,
2023 ) {
2024 self.write_fields_to_memory(params, addr, offset);
2025 }
2026
2027 fn write_variant_arms_to_memory<'b>(
2028 &mut self,
2029 offset: ArchitectureSize,
2030 addr: B::Operand,
2031 tag: Int,
2032 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2033 ) {
2034 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2035 for (i, ty) in cases.into_iter().enumerate() {
2036 self.push_block();
2037 self.emit(&Instruction::VariantPayloadName);
2038 let payload_name = self.stack.pop().unwrap();
2039 self.emit(&Instruction::I32Const { val: i as i32 });
2040 self.stack.push(addr.clone());
2041 self.store_intrepr(offset, tag);
2042 if let Some(ty) = ty {
2043 self.stack.push(payload_name.clone());
2044 self.write_to_memory(ty, addr.clone(), payload_offset);
2045 }
2046 self.finish_block(0);
2047 }
2048 }
2049
2050 fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2051 // After lowering the list there's two i32 values on the stack
2052 // which we write into memory, writing the pointer into the low address
2053 // and the length into the high address.
2054 self.lower(ty);
2055 self.stack.push(addr.clone());
2056 self.emit(&Instruction::LengthStore {
2057 offset: offset + self.bindgen.sizes().align(ty).into(),
2058 });
2059 self.stack.push(addr);
2060 self.emit(&Instruction::PointerStore { offset });
2061 }
2062
2063 fn write_fields_to_memory<'b>(
2064 &mut self,
2065 tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
2066 addr: B::Operand,
2067 offset: ArchitectureSize,
2068 ) {
2069 let tys = tys.into_iter();
2070 let fields = self
2071 .stack
2072 .drain(self.stack.len() - tys.len()..)
2073 .collect::<Vec<_>>();
2074 for ((field_offset, ty), op) in self
2075 .bindgen
2076 .sizes()
2077 .field_offsets(tys)
2078 .into_iter()
2079 .zip(fields)
2080 {
2081 self.stack.push(op);
2082 self.write_to_memory(ty, addr.clone(), offset + (field_offset));
2083 }
2084 }
2085
2086 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2087 self.lower(ty);
2088 self.stack.push(addr);
2089 self.emit(instr);
2090 }
2091
2092 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2093 use Instruction::*;
2094
2095 match *ty {
2096 Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2097 Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2098 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
2099 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
2100 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
2101 Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
2102 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
2103 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
2104 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
2105 Type::String => self.read_list_from_memory(ty, addr, offset),
2106 Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
2107
2108 Type::Id(id) => match &self.resolve.types[id].kind {
2109 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
2110
2111 TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
2112
2113 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
2114 self.emit_and_lift(ty, addr, &I32Load { offset })
2115 }
2116
2117 TypeDefKind::Resource => {
2118 todo!();
2119 }
2120
2121 // Read and lift each field individually, adjusting the offset
2122 // as we go along, then aggregate all the fields into the
2123 // record.
2124 TypeDefKind::Record(record) => {
2125 self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
2126 self.emit(&RecordLift {
2127 record,
2128 ty: id,
2129 name: self.resolve.types[id].name.as_deref().unwrap(),
2130 });
2131 }
2132
2133 TypeDefKind::Tuple(tuple) => {
2134 self.read_fields_from_memory(&tuple.types, addr, offset);
2135 self.emit(&TupleLift { tuple, ty: id });
2136 }
2137
2138 TypeDefKind::Flags(f) => {
2139 match f.repr() {
2140 FlagsRepr::U8 => {
2141 self.stack.push(addr);
2142 self.load_intrepr(offset, Int::U8);
2143 }
2144 FlagsRepr::U16 => {
2145 self.stack.push(addr);
2146 self.load_intrepr(offset, Int::U16);
2147 }
2148 FlagsRepr::U32(n) => {
2149 for i in 0..n {
2150 self.stack.push(addr.clone());
2151 self.emit(&I32Load {
2152 offset: offset.add_bytes(i * 4),
2153 });
2154 }
2155 }
2156 }
2157 self.lift(ty);
2158 }
2159
2160 // Each case will get its own block, and we'll dispatch to the
2161 // right block based on the `i32.load` we initially perform. Each
2162 // individual block is pretty simple and just reads the payload type
2163 // from the corresponding offset if one is available.
2164 TypeDefKind::Variant(variant) => {
2165 self.read_variant_arms_from_memory(
2166 offset,
2167 addr,
2168 variant.tag(),
2169 variant.cases.iter().map(|c| c.ty.as_ref()),
2170 );
2171 self.emit(&VariantLift {
2172 variant,
2173 ty: id,
2174 name: self.resolve.types[id].name.as_deref().unwrap(),
2175 });
2176 }
2177
2178 TypeDefKind::Option(t) => {
2179 self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
2180 self.emit(&OptionLift { payload: t, ty: id });
2181 }
2182
2183 TypeDefKind::Result(r) => {
2184 self.read_variant_arms_from_memory(
2185 offset,
2186 addr,
2187 Int::U8,
2188 [r.ok.as_ref(), r.err.as_ref()],
2189 );
2190 self.emit(&ResultLift { result: r, ty: id });
2191 }
2192
2193 TypeDefKind::Enum(e) => {
2194 self.stack.push(addr.clone());
2195 self.load_intrepr(offset, e.tag());
2196 self.lift(ty);
2197 }
2198
2199 TypeDefKind::Unknown => unreachable!(),
2200 TypeDefKind::FixedSizeList(ty, size) => {
2201 self.push_block();
2202 self.emit(&IterBasePointer);
2203 let elemaddr = self.stack.pop().unwrap();
2204 self.read_from_memory(ty, elemaddr, offset);
2205 self.finish_block(1);
2206 self.stack.push(addr.clone());
2207 self.emit(&FixedLengthListLiftFromMemory {
2208 element: ty,
2209 size: *size,
2210 id,
2211 });
2212 }
2213 TypeDefKind::Map(..) => todo!(),
2214 },
2215 }
2216 }
2217
2218 fn read_results_from_memory(
2219 &mut self,
2220 result: &Option<Type>,
2221 addr: B::Operand,
2222 offset: ArchitectureSize,
2223 ) {
2224 self.read_fields_from_memory(result, addr, offset)
2225 }
2226
2227 fn read_variant_arms_from_memory<'b>(
2228 &mut self,
2229 offset: ArchitectureSize,
2230 addr: B::Operand,
2231 tag: Int,
2232 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2233 ) {
2234 self.stack.push(addr.clone());
2235 self.load_intrepr(offset, tag);
2236 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2237 for ty in cases {
2238 self.push_block();
2239 if let Some(ty) = ty {
2240 self.read_from_memory(ty, addr.clone(), payload_offset);
2241 }
2242 self.finish_block(ty.is_some() as usize);
2243 }
2244 }
2245
2246 fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2247 // Read the pointer/len and then perform the standard lifting
2248 // proceses.
2249 self.stack.push(addr.clone());
2250 self.emit(&Instruction::PointerLoad { offset });
2251 self.stack.push(addr);
2252 self.emit(&Instruction::LengthLoad {
2253 offset: offset + self.bindgen.sizes().align(ty).into(),
2254 });
2255 self.lift(ty);
2256 }
2257
2258 fn read_fields_from_memory<'b>(
2259 &mut self,
2260 tys: impl IntoIterator<Item = &'b Type>,
2261 addr: B::Operand,
2262 offset: ArchitectureSize,
2263 ) {
2264 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
2265 self.read_from_memory(ty, addr.clone(), offset + (*field_offset));
2266 }
2267 }
2268
2269 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2270 self.stack.push(addr);
2271 self.emit(instr);
2272 self.lift(ty);
2273 }
2274
2275 fn load_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2276 self.emit(&match repr {
2277 Int::U64 => Instruction::I64Load { offset },
2278 Int::U32 => Instruction::I32Load { offset },
2279 Int::U16 => Instruction::I32Load16U { offset },
2280 Int::U8 => Instruction::I32Load8U { offset },
2281 });
2282 }
2283
2284 fn store_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2285 self.emit(&match repr {
2286 Int::U64 => Instruction::I64Store { offset },
2287 Int::U32 => Instruction::I32Store { offset },
2288 Int::U16 => Instruction::I32Store16 { offset },
2289 Int::U8 => Instruction::I32Store8 { offset },
2290 });
2291 }
2292
2293 /// Runs the deallocation of `ty` for the operands currently on
2294 /// `self.stack`.
2295 ///
2296 /// This will pop the ABI items of `ty` from `self.stack`.
2297 fn deallocate(&mut self, ty: &Type, what: Deallocate) {
2298 use Instruction::*;
2299
2300 match *ty {
2301 Type::String => {
2302 self.emit(&Instruction::GuestDeallocateString);
2303 }
2304
2305 Type::Bool
2306 | Type::U8
2307 | Type::S8
2308 | Type::U16
2309 | Type::S16
2310 | Type::U32
2311 | Type::S32
2312 | Type::Char
2313 | Type::U64
2314 | Type::S64
2315 | Type::F32
2316 | Type::F64
2317 | Type::ErrorContext => {
2318 // No deallocation necessary, just discard the operand on the
2319 // stack.
2320 self.stack.pop().unwrap();
2321 }
2322
2323 Type::Id(id) => match &self.resolve.types[id].kind {
2324 TypeDefKind::Type(t) => self.deallocate(t, what),
2325
2326 TypeDefKind::List(element) => {
2327 self.push_block();
2328 self.emit(&IterBasePointer);
2329 let elemaddr = self.stack.pop().unwrap();
2330 self.deallocate_indirect(element, elemaddr, Default::default(), what);
2331 self.finish_block(0);
2332
2333 self.emit(&Instruction::GuestDeallocateList { element });
2334 }
2335
2336 TypeDefKind::Handle(Handle::Own(_))
2337 | TypeDefKind::Future(_)
2338 | TypeDefKind::Stream(_)
2339 if what.handles() =>
2340 {
2341 self.lift(ty);
2342 self.emit(&DropHandle { ty });
2343 }
2344
2345 TypeDefKind::Record(record) => {
2346 self.flat_for_each_record_type(
2347 ty,
2348 record.fields.iter().map(|f| &f.ty),
2349 |me, ty| me.deallocate(ty, what),
2350 );
2351 }
2352
2353 TypeDefKind::Tuple(tuple) => {
2354 self.flat_for_each_record_type(ty, tuple.types.iter(), |me, ty| {
2355 me.deallocate(ty, what)
2356 });
2357 }
2358
2359 TypeDefKind::Variant(variant) => {
2360 self.flat_for_each_variant_arm(
2361 ty,
2362 false,
2363 variant.cases.iter().map(|c| c.ty.as_ref()),
2364 |me, ty| me.deallocate(ty, what),
2365 );
2366 self.emit(&GuestDeallocateVariant {
2367 blocks: variant.cases.len(),
2368 });
2369 }
2370
2371 TypeDefKind::Option(t) => {
2372 self.flat_for_each_variant_arm(ty, false, [None, Some(t)], |me, ty| {
2373 me.deallocate(ty, what)
2374 });
2375 self.emit(&GuestDeallocateVariant { blocks: 2 });
2376 }
2377
2378 TypeDefKind::Result(e) => {
2379 self.flat_for_each_variant_arm(
2380 ty,
2381 false,
2382 [e.ok.as_ref(), e.err.as_ref()],
2383 |me, ty| me.deallocate(ty, what),
2384 );
2385 self.emit(&GuestDeallocateVariant { blocks: 2 });
2386 }
2387
2388 // discard the operand on the stack, otherwise nothing to free.
2389 TypeDefKind::Flags(_)
2390 | TypeDefKind::Enum(_)
2391 | TypeDefKind::Future(_)
2392 | TypeDefKind::Stream(_)
2393 | TypeDefKind::Handle(Handle::Own(_))
2394 | TypeDefKind::Handle(Handle::Borrow(_)) => {
2395 self.stack.pop().unwrap();
2396 }
2397
2398 TypeDefKind::Resource => unreachable!(),
2399 TypeDefKind::Unknown => unreachable!(),
2400
2401 TypeDefKind::FixedSizeList(..) => todo!(),
2402 TypeDefKind::Map(..) => todo!(),
2403 },
2404 }
2405 }
2406
2407 fn deallocate_indirect(
2408 &mut self,
2409 ty: &Type,
2410 addr: B::Operand,
2411 offset: ArchitectureSize,
2412 what: Deallocate,
2413 ) {
2414 use Instruction::*;
2415
2416 // No need to execute any instructions if this type itself doesn't
2417 // require any form of post-return.
2418 if !needs_deallocate(self.resolve, ty, what) {
2419 return;
2420 }
2421
2422 match *ty {
2423 Type::String => {
2424 self.stack.push(addr.clone());
2425 self.emit(&Instruction::PointerLoad { offset });
2426 self.stack.push(addr);
2427 self.emit(&Instruction::LengthLoad {
2428 offset: offset + self.bindgen.sizes().align(ty).into(),
2429 });
2430 self.deallocate(ty, what);
2431 }
2432
2433 Type::Bool
2434 | Type::U8
2435 | Type::S8
2436 | Type::U16
2437 | Type::S16
2438 | Type::U32
2439 | Type::S32
2440 | Type::Char
2441 | Type::U64
2442 | Type::S64
2443 | Type::F32
2444 | Type::F64
2445 | Type::ErrorContext => {}
2446
2447 Type::Id(id) => match &self.resolve.types[id].kind {
2448 TypeDefKind::Type(t) => self.deallocate_indirect(t, addr, offset, what),
2449
2450 TypeDefKind::List(_) => {
2451 self.stack.push(addr.clone());
2452 self.emit(&Instruction::PointerLoad { offset });
2453 self.stack.push(addr);
2454 self.emit(&Instruction::LengthLoad {
2455 offset: offset + self.bindgen.sizes().align(ty).into(),
2456 });
2457
2458 self.deallocate(ty, what);
2459 }
2460
2461 TypeDefKind::Handle(Handle::Own(_))
2462 | TypeDefKind::Future(_)
2463 | TypeDefKind::Stream(_)
2464 if what.handles() =>
2465 {
2466 self.read_from_memory(ty, addr, offset);
2467 self.emit(&DropHandle { ty });
2468 }
2469
2470 TypeDefKind::Handle(Handle::Own(_)) => unreachable!(),
2471 TypeDefKind::Handle(Handle::Borrow(_)) => unreachable!(),
2472 TypeDefKind::Resource => unreachable!(),
2473
2474 TypeDefKind::Record(record) => {
2475 self.deallocate_indirect_fields(
2476 &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2477 addr,
2478 offset,
2479 what,
2480 );
2481 }
2482
2483 TypeDefKind::Tuple(tuple) => {
2484 self.deallocate_indirect_fields(&tuple.types, addr, offset, what);
2485 }
2486
2487 TypeDefKind::Flags(_) => {}
2488
2489 TypeDefKind::Variant(variant) => {
2490 self.deallocate_indirect_variant(
2491 offset,
2492 addr,
2493 variant.tag(),
2494 variant.cases.iter().map(|c| c.ty.as_ref()),
2495 what,
2496 );
2497 self.emit(&GuestDeallocateVariant {
2498 blocks: variant.cases.len(),
2499 });
2500 }
2501
2502 TypeDefKind::Option(t) => {
2503 self.deallocate_indirect_variant(offset, addr, Int::U8, [None, Some(t)], what);
2504 self.emit(&GuestDeallocateVariant { blocks: 2 });
2505 }
2506
2507 TypeDefKind::Result(e) => {
2508 self.deallocate_indirect_variant(
2509 offset,
2510 addr,
2511 Int::U8,
2512 [e.ok.as_ref(), e.err.as_ref()],
2513 what,
2514 );
2515 self.emit(&GuestDeallocateVariant { blocks: 2 });
2516 }
2517
2518 TypeDefKind::Enum(_) => {}
2519
2520 TypeDefKind::Future(_) => unreachable!(),
2521 TypeDefKind::Stream(_) => unreachable!(),
2522 TypeDefKind::Unknown => unreachable!(),
2523 TypeDefKind::FixedSizeList(_, _) => {}
2524 TypeDefKind::Map(..) => todo!(),
2525 },
2526 }
2527 }
2528
2529 fn deallocate_indirect_variant<'b>(
2530 &mut self,
2531 offset: ArchitectureSize,
2532 addr: B::Operand,
2533 tag: Int,
2534 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2535 what: Deallocate,
2536 ) {
2537 self.stack.push(addr.clone());
2538 self.load_intrepr(offset, tag);
2539 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2540 for ty in cases {
2541 self.push_block();
2542 if let Some(ty) = ty {
2543 self.deallocate_indirect(ty, addr.clone(), payload_offset, what);
2544 }
2545 self.finish_block(0);
2546 }
2547 }
2548
2549 fn deallocate_indirect_fields(
2550 &mut self,
2551 tys: &[Type],
2552 addr: B::Operand,
2553 offset: ArchitectureSize,
2554 what: Deallocate,
2555 ) {
2556 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2557 self.deallocate_indirect(ty, addr.clone(), offset + (field_offset), what);
2558 }
2559 }
2560}
2561
2562fn cast(from: WasmType, to: WasmType) -> Bitcast {
2563 use WasmType::*;
2564
2565 match (from, to) {
2566 (I32, I32)
2567 | (I64, I64)
2568 | (F32, F32)
2569 | (F64, F64)
2570 | (Pointer, Pointer)
2571 | (PointerOrI64, PointerOrI64)
2572 | (Length, Length) => Bitcast::None,
2573
2574 (I32, I64) => Bitcast::I32ToI64,
2575 (F32, I32) => Bitcast::F32ToI32,
2576 (F64, I64) => Bitcast::F64ToI64,
2577
2578 (I64, I32) => Bitcast::I64ToI32,
2579 (I32, F32) => Bitcast::I32ToF32,
2580 (I64, F64) => Bitcast::I64ToF64,
2581
2582 (F32, I64) => Bitcast::F32ToI64,
2583 (I64, F32) => Bitcast::I64ToF32,
2584
2585 (I64, PointerOrI64) => Bitcast::I64ToP64,
2586 (Pointer, PointerOrI64) => Bitcast::PToP64,
2587 (_, PointerOrI64) => {
2588 Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2589 }
2590
2591 (PointerOrI64, I64) => Bitcast::P64ToI64,
2592 (PointerOrI64, Pointer) => Bitcast::P64ToP,
2593 (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2594
2595 (I32, Pointer) => Bitcast::I32ToP,
2596 (Pointer, I32) => Bitcast::PToI32,
2597 (I32, Length) => Bitcast::I32ToL,
2598 (Length, I32) => Bitcast::LToI32,
2599 (I64, Length) => Bitcast::I64ToL,
2600 (Length, I64) => Bitcast::LToI64,
2601 (Pointer, Length) => Bitcast::PToL,
2602 (Length, Pointer) => Bitcast::LToP,
2603
2604 (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2605 (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2606
2607 (F32, F64)
2608 | (F64, F32)
2609 | (F64, I32)
2610 | (I32, F64)
2611 | (Pointer | Length, I64 | F64)
2612 | (I64 | F64, Pointer | Length) => {
2613 unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2614 }
2615 }
2616}
2617
2618/// Flatten types in a given type
2619///
2620/// It is sometimes necessary to restrict the number of max parameters dynamically,
2621/// for example during an async guest import call (flat params are limited to 4)
2622fn flat_types(resolve: &Resolve, ty: &Type, max_params: Option<usize>) -> Option<Vec<WasmType>> {
2623 let max_params = max_params.unwrap_or(MAX_FLAT_PARAMS);
2624 let mut storage = iter::repeat_n(WasmType::I32, max_params).collect::<Vec<_>>();
2625 let mut flat = FlatTypes::new(storage.as_mut_slice());
2626 resolve.push_flat(ty, &mut flat).then_some(flat.to_vec())
2627}