wit_bindgen_core/abi.rs
1use std::fmt;
2use std::iter;
3
4pub use wit_parser::abi::{AbiVariant, FlatTypes, WasmSignature, WasmType};
5use wit_parser::{
6 Alignment, ArchitectureSize, ElementInfo, Enum, Flags, FlagsRepr, Function, Handle, Int,
7 Record, Resolve, Result_, SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant, align_to_arch,
8};
9
10// Helper macro for defining instructions without having to have tons of
11// exhaustive `match` statements to update
12macro_rules! def_instruction {
13 (
14 $( #[$enum_attr:meta] )*
15 pub enum $name:ident<'a> {
16 $(
17 $( #[$attr:meta] )*
18 $variant:ident $( {
19 $($field:ident : $field_ty:ty $(,)* )*
20 } )?
21 :
22 [$num_popped:expr] => [$num_pushed:expr],
23 )*
24 }
25 ) => {
26 $( #[$enum_attr] )*
27 pub enum $name<'a> {
28 $(
29 $( #[$attr] )*
30 $variant $( {
31 $(
32 $field : $field_ty,
33 )*
34 } )? ,
35 )*
36 }
37
38 impl $name<'_> {
39 /// How many operands does this instruction pop from the stack?
40 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
41 pub fn operands_len(&self) -> usize {
42 match self {
43 $(
44 Self::$variant $( {
45 $(
46 $field,
47 )*
48 } )? => $num_popped,
49 )*
50 }
51 }
52
53 /// How many results does this instruction push onto the stack?
54 #[allow(unused_variables, reason = "match arms bind fields for exhaustiveness, not usage")]
55 pub fn results_len(&self) -> usize {
56 match self {
57 $(
58 Self::$variant $( {
59 $(
60 $field,
61 )*
62 } )? => $num_pushed,
63 )*
64 }
65 }
66 }
67 };
68}
69
70def_instruction! {
71 #[derive(Debug)]
72 pub enum Instruction<'a> {
73 /// Acquires the specified parameter and places it on the stack.
74 /// Depending on the context this may refer to wasm parameters or
75 /// interface types parameters.
76 GetArg { nth: usize } : [0] => [1],
77
78 // Integer const/manipulation instructions
79
80 /// Pushes the constant `val` onto the stack.
81 I32Const { val: i32 } : [0] => [1],
82 /// Casts the top N items on the stack using the `Bitcast` enum
83 /// provided. Consumes the same number of operands that this produces.
84 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
85 /// Pushes a number of constant zeros for each wasm type on the stack.
86 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
87
88 // Memory load/store instructions
89
90 /// Pops a pointer from the stack and loads a little-endian `i32` from
91 /// it, using the specified constant offset.
92 I32Load { offset: ArchitectureSize } : [1] => [1],
93 /// Pops a pointer from the stack and loads a little-endian `i8` from
94 /// it, using the specified constant offset. The value loaded is the
95 /// zero-extended to 32-bits
96 I32Load8U { offset: ArchitectureSize } : [1] => [1],
97 /// Pops a pointer from the stack and loads a little-endian `i8` from
98 /// it, using the specified constant offset. The value loaded is the
99 /// sign-extended to 32-bits
100 I32Load8S { offset: ArchitectureSize } : [1] => [1],
101 /// Pops a pointer from the stack and loads a little-endian `i16` from
102 /// it, using the specified constant offset. The value loaded is the
103 /// zero-extended to 32-bits
104 I32Load16U { offset: ArchitectureSize } : [1] => [1],
105 /// Pops a pointer from the stack and loads a little-endian `i16` from
106 /// it, using the specified constant offset. The value loaded is the
107 /// sign-extended to 32-bits
108 I32Load16S { offset: ArchitectureSize } : [1] => [1],
109 /// Pops a pointer from the stack and loads a little-endian `i64` from
110 /// it, using the specified constant offset.
111 I64Load { offset: ArchitectureSize } : [1] => [1],
112 /// Pops a pointer from the stack and loads a little-endian `f32` from
113 /// it, using the specified constant offset.
114 F32Load { offset: ArchitectureSize } : [1] => [1],
115 /// Pops a pointer from the stack and loads a little-endian `f64` from
116 /// it, using the specified constant offset.
117 F64Load { offset: ArchitectureSize } : [1] => [1],
118
119 /// Like `I32Load` or `I64Load`, but for loading pointer values.
120 PointerLoad { offset: ArchitectureSize } : [1] => [1],
121 /// Like `I32Load` or `I64Load`, but for loading array length values.
122 LengthLoad { offset: ArchitectureSize } : [1] => [1],
123
124 /// Pops a pointer from the stack and then an `i32` value.
125 /// Stores the value in little-endian at the pointer specified plus the
126 /// constant `offset`.
127 I32Store { offset: ArchitectureSize } : [2] => [0],
128 /// Pops a pointer from the stack and then an `i32` value.
129 /// Stores the low 8 bits of the value in little-endian at the pointer
130 /// specified plus the constant `offset`.
131 I32Store8 { offset: ArchitectureSize } : [2] => [0],
132 /// Pops a pointer from the stack and then an `i32` value.
133 /// Stores the low 16 bits of the value in little-endian at the pointer
134 /// specified plus the constant `offset`.
135 I32Store16 { offset: ArchitectureSize } : [2] => [0],
136 /// Pops a pointer from the stack and then an `i64` value.
137 /// Stores the value in little-endian at the pointer specified plus the
138 /// constant `offset`.
139 I64Store { offset: ArchitectureSize } : [2] => [0],
140 /// Pops a pointer from the stack and then an `f32` value.
141 /// Stores the value in little-endian at the pointer specified plus the
142 /// constant `offset`.
143 F32Store { offset: ArchitectureSize } : [2] => [0],
144 /// Pops a pointer from the stack and then an `f64` value.
145 /// Stores the value in little-endian at the pointer specified plus the
146 /// constant `offset`.
147 F64Store { offset: ArchitectureSize } : [2] => [0],
148
149 /// Like `I32Store` or `I64Store`, but for storing pointer values.
150 PointerStore { offset: ArchitectureSize } : [2] => [0],
151 /// Like `I32Store` or `I64Store`, but for storing array length values.
152 LengthStore { offset: ArchitectureSize } : [2] => [0],
153
154 // Scalar lifting/lowering
155
156 /// Converts an interface type `char` value to a 32-bit integer
157 /// representing the unicode scalar value.
158 I32FromChar : [1] => [1],
159 /// Converts an interface type `u64` value to a wasm `i64`.
160 I64FromU64 : [1] => [1],
161 /// Converts an interface type `s64` value to a wasm `i64`.
162 I64FromS64 : [1] => [1],
163 /// Converts an interface type `u32` value to a wasm `i32`.
164 I32FromU32 : [1] => [1],
165 /// Converts an interface type `s32` value to a wasm `i32`.
166 I32FromS32 : [1] => [1],
167 /// Converts an interface type `u16` value to a wasm `i32`.
168 I32FromU16 : [1] => [1],
169 /// Converts an interface type `s16` value to a wasm `i32`.
170 I32FromS16 : [1] => [1],
171 /// Converts an interface type `u8` value to a wasm `i32`.
172 I32FromU8 : [1] => [1],
173 /// Converts an interface type `s8` value to a wasm `i32`.
174 I32FromS8 : [1] => [1],
175 /// Conversion an interface type `f32` value to a wasm `f32`.
176 ///
177 /// This may be a noop for some implementations, but it's here in case the
178 /// native language representation of `f32` is different than the wasm
179 /// representation of `f32`.
180 CoreF32FromF32 : [1] => [1],
181 /// Conversion an interface type `f64` value to a wasm `f64`.
182 ///
183 /// This may be a noop for some implementations, but it's here in case the
184 /// native language representation of `f64` is different than the wasm
185 /// representation of `f64`.
186 CoreF64FromF64 : [1] => [1],
187
188 /// Converts a native wasm `i32` to an interface type `s8`.
189 ///
190 /// This will truncate the upper bits of the `i32`.
191 S8FromI32 : [1] => [1],
192 /// Converts a native wasm `i32` to an interface type `u8`.
193 ///
194 /// This will truncate the upper bits of the `i32`.
195 U8FromI32 : [1] => [1],
196 /// Converts a native wasm `i32` to an interface type `s16`.
197 ///
198 /// This will truncate the upper bits of the `i32`.
199 S16FromI32 : [1] => [1],
200 /// Converts a native wasm `i32` to an interface type `u16`.
201 ///
202 /// This will truncate the upper bits of the `i32`.
203 U16FromI32 : [1] => [1],
204 /// Converts a native wasm `i32` to an interface type `s32`.
205 S32FromI32 : [1] => [1],
206 /// Converts a native wasm `i32` to an interface type `u32`.
207 U32FromI32 : [1] => [1],
208 /// Converts a native wasm `i64` to an interface type `s64`.
209 S64FromI64 : [1] => [1],
210 /// Converts a native wasm `i64` to an interface type `u64`.
211 U64FromI64 : [1] => [1],
212 /// Converts a native wasm `i32` to an interface type `char`.
213 ///
214 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
215 CharFromI32 : [1] => [1],
216 /// Converts a native wasm `f32` to an interface type `f32`.
217 F32FromCoreF32 : [1] => [1],
218 /// Converts a native wasm `f64` to an interface type `f64`.
219 F64FromCoreF64 : [1] => [1],
220
221 /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
222 /// zero or one.
223 BoolFromI32 : [1] => [1],
224 /// Creates an `i32` from a `bool` input, must return 0 or 1.
225 I32FromBool : [1] => [1],
226
227 // lists
228
229 /// Lowers a list where the element's layout in the native language is
230 /// expected to match the canonical ABI definition of interface types.
231 ///
232 /// Pops a list value from the stack and pushes the pointer/length onto
233 /// the stack. If `realloc` is set to `Some` then this is expected to
234 /// *consume* the list which means that the data needs to be copied. An
235 /// allocation/copy is expected when:
236 ///
237 /// * A host is calling a wasm export with a list (it needs to copy the
238 /// list in to the callee's module, allocating space with `realloc`)
239 /// * A wasm export is returning a list (it's expected to use `realloc`
240 /// to give ownership of the list to the caller.
241 /// * A host is returning a list in a import definition, meaning that
242 /// space needs to be allocated in the caller with `realloc`).
243 ///
244 /// A copy does not happen (e.g. `realloc` is `None`) when:
245 ///
246 /// * A wasm module calls an import with the list. In this situation
247 /// it's expected the caller will know how to access this module's
248 /// memory (e.g. the host has raw access or wasm-to-wasm communication
249 /// would copy the list).
250 ///
251 /// If `realloc` is `Some` then the adapter is not responsible for
252 /// cleaning up this list because the other end is receiving the
253 /// allocation. If `realloc` is `None` then the adapter is responsible
254 /// for cleaning up any temporary allocation it created, if any.
255 ListCanonLower {
256 element: &'a Type,
257 realloc: Option<&'a str>,
258 } : [1] => [2],
259
260 /// Same as `ListCanonLower`, but used for strings
261 StringLower {
262 realloc: Option<&'a str>,
263 } : [1] => [2],
264
265 /// Lowers a list where the element's layout in the native language is
266 /// not expected to match the canonical ABI definition of interface
267 /// types.
268 ///
269 /// Pops a list value from the stack and pushes the pointer/length onto
270 /// the stack. This operation also pops a block from the block stack
271 /// which is used as the iteration body of writing each element of the
272 /// list consumed.
273 ///
274 /// The `realloc` field here behaves the same way as `ListCanonLower`.
275 /// It's only set to `None` when a wasm module calls a declared import.
276 /// Otherwise lowering in other contexts requires allocating memory for
277 /// the receiver to own.
278 ListLower {
279 element: &'a Type,
280 realloc: Option<&'a str>,
281 } : [1] => [2],
282
283 /// Lifts a list which has a canonical representation into an interface
284 /// types value.
285 ///
286 /// The term "canonical" representation here means that the
287 /// representation of the interface types value in the native language
288 /// exactly matches the canonical ABI definition of the type.
289 ///
290 /// This will consume two `i32` values from the stack, a pointer and a
291 /// length, and then produces an interface value list.
292 ListCanonLift {
293 element: &'a Type,
294 ty: TypeId,
295 } : [2] => [1],
296
297 /// Same as `ListCanonLift`, but used for strings
298 StringLift : [2] => [1],
299
300 /// Lifts a list which into an interface types value.
301 ///
302 /// This will consume two `i32` values from the stack, a pointer and a
303 /// length, and then produces an interface value list.
304 ///
305 /// This will also pop a block from the block stack which is how to
306 /// read each individual element from the list.
307 ListLift {
308 element: &'a Type,
309 ty: TypeId,
310 } : [2] => [1],
311
312 /// Pushes an operand onto the stack representing the list item from
313 /// each iteration of the list.
314 ///
315 /// This is only used inside of blocks related to lowering lists.
316 IterElem { element: &'a Type } : [0] => [1],
317
318 /// Pushes an operand onto the stack representing the base pointer of
319 /// the next element in a list.
320 ///
321 /// This is used for both lifting and lowering lists.
322 IterBasePointer : [0] => [1],
323
324 // records and tuples
325
326 /// Pops a record value off the stack, decomposes the record to all of
327 /// its fields, and then pushes the fields onto the stack.
328 RecordLower {
329 record: &'a Record,
330 name: &'a str,
331 ty: TypeId,
332 } : [1] => [record.fields.len()],
333
334 /// Pops all fields for a record off the stack and then composes them
335 /// into a record.
336 RecordLift {
337 record: &'a Record,
338 name: &'a str,
339 ty: TypeId,
340 } : [record.fields.len()] => [1],
341
342 /// Create an `i32` from a handle.
343 HandleLower {
344 handle: &'a Handle,
345 name: &'a str,
346 ty: TypeId,
347 } : [1] => [1],
348
349 /// Create a handle from an `i32`.
350 HandleLift {
351 handle: &'a Handle,
352 name: &'a str,
353 ty: TypeId,
354 } : [1] => [1],
355
356 /// Create an `i32` from a future.
357 FutureLower {
358 payload: &'a Option<Type>,
359 ty: TypeId,
360 } : [1] => [1],
361
362 /// Create a future from an `i32`.
363 FutureLift {
364 payload: &'a Option<Type>,
365 ty: TypeId,
366 } : [1] => [1],
367
368 /// Create an `i32` from a stream.
369 StreamLower {
370 payload: &'a Option<Type>,
371 ty: TypeId,
372 } : [1] => [1],
373
374 /// Create a stream from an `i32`.
375 StreamLift {
376 payload: &'a Option<Type>,
377 ty: TypeId,
378 } : [1] => [1],
379
380 /// Create an `i32` from an error-context.
381 ErrorContextLower : [1] => [1],
382
383 /// Create a error-context from an `i32`.
384 ErrorContextLift : [1] => [1],
385
386 /// Pops a tuple value off the stack, decomposes the tuple to all of
387 /// its fields, and then pushes the fields onto the stack.
388 TupleLower {
389 tuple: &'a Tuple,
390 ty: TypeId,
391 } : [1] => [tuple.types.len()],
392
393 /// Pops all fields for a tuple off the stack and then composes them
394 /// into a tuple.
395 TupleLift {
396 tuple: &'a Tuple,
397 ty: TypeId,
398 } : [tuple.types.len()] => [1],
399
400 /// Converts a language-specific record-of-bools to a list of `i32`.
401 FlagsLower {
402 flags: &'a Flags,
403 name: &'a str,
404 ty: TypeId,
405 } : [1] => [flags.repr().count()],
406 /// Converts a list of native wasm `i32` to a language-specific
407 /// record-of-bools.
408 FlagsLift {
409 flags: &'a Flags,
410 name: &'a str,
411 ty: TypeId,
412 } : [flags.repr().count()] => [1],
413
414 // variants
415
416 /// This is a special instruction used for `VariantLower`
417 /// instruction to determine the name of the payload, if present, to use
418 /// within each block.
419 ///
420 /// Each sub-block will have this be the first instruction, and if it
421 /// lowers a payload it will expect something bound to this name.
422 VariantPayloadName : [0] => [1],
423
424 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
425 /// from the code generator. Uses each of those blocks and the value
426 /// from the stack to produce `nresults` of items.
427 VariantLower {
428 variant: &'a Variant,
429 name: &'a str,
430 ty: TypeId,
431 results: &'a [WasmType],
432 } : [1] => [results.len()],
433
434 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
435 /// from the code generator. Uses each of those blocks and the value
436 /// from the stack to produce a final variant.
437 VariantLift {
438 variant: &'a Variant,
439 name: &'a str,
440 ty: TypeId,
441 } : [1] => [1],
442
443 /// Pops an enum off the stack and pushes the `i32` representation.
444 EnumLower {
445 enum_: &'a Enum,
446 name: &'a str,
447 ty: TypeId,
448 } : [1] => [1],
449
450 /// Pops an `i32` off the stack and lifts it into the `enum` specified.
451 EnumLift {
452 enum_: &'a Enum,
453 name: &'a str,
454 ty: TypeId,
455 } : [1] => [1],
456
457 /// Specialization of `VariantLower` for specifically `option<T>` types,
458 /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
459 /// the two cases.
460 OptionLower {
461 payload: &'a Type,
462 ty: TypeId,
463 results: &'a [WasmType],
464 } : [1] => [results.len()],
465
466 /// Specialization of `VariantLift` for specifically the `option<T>`
467 /// type. Otherwise behaves the same as the `VariantLift` instruction
468 /// with two blocks for the lift.
469 OptionLift {
470 payload: &'a Type,
471 ty: TypeId,
472 } : [1] => [1],
473
474 /// Specialization of `VariantLower` for specifically `result<T, E>`
475 /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
476 /// for the two cases.
477 ResultLower {
478 result: &'a Result_
479 ty: TypeId,
480 results: &'a [WasmType],
481 } : [1] => [results.len()],
482
483 /// Specialization of `VariantLift` for specifically the `result<T,
484 /// E>` type. Otherwise behaves the same as the `VariantLift`
485 /// instruction with two blocks for the lift.
486 ResultLift {
487 result: &'a Result_,
488 ty: TypeId,
489 } : [1] => [1],
490
491 // calling/control flow
492
493 /// Represents a call to a raw WebAssembly API. The module/name are
494 /// provided inline as well as the types if necessary.
495 CallWasm {
496 name: &'a str,
497 sig: &'a WasmSignature,
498 } : [sig.params.len()] => [sig.results.len()],
499
500 /// Same as `CallWasm`, except the dual where an interface is being
501 /// called rather than a raw wasm function.
502 ///
503 /// Note that this will be used for async functions, and `async_`
504 /// indicates whether the function should be invoked in an async
505 /// fashion.
506 CallInterface {
507 func: &'a Function,
508 async_: bool,
509 } : [func.params.len()] => [usize::from(func.result.is_some())],
510
511 /// Returns `amt` values on the stack. This is always the last
512 /// instruction.
513 Return { amt: usize, func: &'a Function } : [*amt] => [0],
514
515 /// Calls the `realloc` function specified in a malloc-like fashion
516 /// allocating `size` bytes with alignment `align`.
517 ///
518 /// Pushes the returned pointer onto the stack.
519 Malloc {
520 realloc: &'static str,
521 size: ArchitectureSize,
522 align: Alignment,
523 } : [0] => [1],
524
525 /// Used exclusively for guest-code generation this indicates that
526 /// the standard memory deallocation function needs to be invoked with
527 /// the specified parameters.
528 ///
529 /// This will pop a pointer from the stack and push nothing.
530 GuestDeallocate {
531 size: ArchitectureSize,
532 align: Alignment,
533 } : [1] => [0],
534
535 /// Used exclusively for guest-code generation this indicates that
536 /// a string is being deallocated. The ptr/length are on the stack and
537 /// are poppped off and used to deallocate the string.
538 GuestDeallocateString : [2] => [0],
539
540 /// Used exclusively for guest-code generation this indicates that
541 /// a list is being deallocated. The ptr/length are on the stack and
542 /// are poppped off and used to deallocate the list.
543 ///
544 /// This variant also pops a block off the block stack to be used as the
545 /// body of the deallocation loop.
546 GuestDeallocateList {
547 element: &'a Type,
548 } : [2] => [0],
549
550 /// Used exclusively for guest-code generation this indicates that
551 /// a variant is being deallocated. The integer discriminant is popped
552 /// off the stack as well as `blocks` number of blocks popped from the
553 /// blocks stack. The variant is used to select, at runtime, which of
554 /// the blocks is executed to deallocate the variant.
555 GuestDeallocateVariant {
556 blocks: usize,
557 } : [1] => [0],
558
559 /// Deallocates the language-specific handle representation on the top
560 /// of the stack. Used for async imports.
561 DropHandle { ty: &'a Type } : [1] => [0],
562
563 /// Call `task.return` for an async-lifted export.
564 ///
565 /// This will call core wasm import `name` which will be mapped to
566 /// `task.return` later on. The function given has `params` as its
567 /// parameters and it will return no results. This is used to pass the
568 /// lowered representation of a function's results to `task.return`.
569 AsyncTaskReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
570
571 /// Force the evaluation of the specified number of expressions and push
572 /// the results to the stack.
573 ///
574 /// This is useful prior to disposing of temporary variables and/or
575 /// allocations which are referenced by one or more not-yet-evaluated
576 /// expressions.
577 Flush { amt: usize } : [*amt] => [*amt],
578 }
579}
580
581#[derive(Debug, PartialEq)]
582pub enum Bitcast {
583 // Upcasts
584 F32ToI32,
585 F64ToI64,
586 I32ToI64,
587 F32ToI64,
588
589 // Downcasts
590 I32ToF32,
591 I64ToF64,
592 I64ToI32,
593 I64ToF32,
594
595 // PointerOrI64 conversions. These preserve provenance when the source
596 // or destination is a pointer value.
597 //
598 // These are used when pointer values are being stored in
599 // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
600 // always have to preserve provenance when the value being loaded or
601 // stored is a pointer.
602 P64ToI64,
603 I64ToP64,
604 P64ToP,
605 PToP64,
606
607 // Pointer<->number conversions. These do not preserve provenance.
608 //
609 // These are used when integer or floating-point values are being stored in
610 // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
611 // never have any provenance to preserve.
612 I32ToP,
613 PToI32,
614 PToL,
615 LToP,
616
617 // Number<->Number conversions.
618 I32ToL,
619 LToI32,
620 I64ToL,
621 LToI64,
622
623 // Multiple conversions in sequence.
624 Sequence(Box<[Bitcast; 2]>),
625
626 None,
627}
628
629/// Whether the glue code surrounding a call is lifting arguments and lowering
630/// results or vice versa.
631#[derive(Clone, Copy, PartialEq, Eq)]
632pub enum LiftLower {
633 /// When the glue code lifts arguments and lowers results.
634 ///
635 /// ```text
636 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
637 /// ```
638 LiftArgsLowerResults,
639 /// When the glue code lowers arguments and lifts results.
640 ///
641 /// ```text
642 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
643 /// ```
644 LowerArgsLiftResults,
645}
646
647/// Trait for language implementors to use to generate glue code between native
648/// WebAssembly signatures and interface types signatures.
649///
650/// This is used as an implementation detail in interpreting the ABI between
651/// interface types and wasm types. Eventually this will be driven by interface
652/// types adapters themselves, but for now the ABI of a function dictates what
653/// instructions are fed in.
654///
655/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
656/// generate code for. Instructions operate like a stack machine where each
657/// instruction has a list of inputs and a list of outputs (provided by the
658/// `emit` function).
659pub trait Bindgen {
660 /// The intermediate type for fragments of code for this type.
661 ///
662 /// For most languages `String` is a suitable intermediate type.
663 type Operand: Clone + fmt::Debug;
664
665 /// Emit code to implement the given instruction.
666 ///
667 /// Each operand is given in `operands` and can be popped off if ownership
668 /// is required. It's guaranteed that `operands` has the appropriate length
669 /// for the `inst` given, as specified with [`Instruction`].
670 ///
671 /// Each result variable should be pushed onto `results`. This function must
672 /// push the appropriate number of results or binding generation will panic.
673 fn emit(
674 &mut self,
675 resolve: &Resolve,
676 inst: &Instruction<'_>,
677 operands: &mut Vec<Self::Operand>,
678 results: &mut Vec<Self::Operand>,
679 );
680
681 /// Gets a operand reference to the return pointer area.
682 ///
683 /// The provided size and alignment is for the function's return type.
684 fn return_pointer(&mut self, size: ArchitectureSize, align: Alignment) -> Self::Operand;
685
686 /// Enters a new block of code to generate code for.
687 ///
688 /// This is currently exclusively used for constructing variants. When a
689 /// variant is constructed a block here will be pushed for each case of a
690 /// variant, generating the code necessary to translate a variant case.
691 ///
692 /// Blocks are completed with `finish_block` below. It's expected that `emit`
693 /// will always push code (if necessary) into the "current block", which is
694 /// updated by calling this method and `finish_block` below.
695 fn push_block(&mut self);
696
697 /// Indicates to the code generator that a block is completed, and the
698 /// `operand` specified was the resulting value of the block.
699 ///
700 /// This method will be used to compute the value of each arm of lifting a
701 /// variant. The `operand` will be `None` if the variant case didn't
702 /// actually have any type associated with it. Otherwise it will be `Some`
703 /// as the last value remaining on the stack representing the value
704 /// associated with a variant's `case`.
705 ///
706 /// It's expected that this will resume code generation in the previous
707 /// block before `push_block` was called. This must also save the results
708 /// of the current block internally for instructions like `ResultLift` to
709 /// use later.
710 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
711
712 /// Returns size information that was previously calculated for all types.
713 fn sizes(&self) -> &SizeAlign;
714
715 /// Returns whether or not the specified element type is represented in a
716 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
717 /// and `ListCanonLift` instructions are used or not.
718 fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
719}
720
721/// Generates an abstract sequence of instructions which represents this
722/// function being adapted as an imported function.
723///
724/// The instructions here, when executed, will emulate a language with
725/// interface types calling the concrete wasm implementation. The parameters
726/// for the returned instruction sequence are the language's own
727/// interface-types parameters. One instruction in the instruction stream
728/// will be a `Call` which represents calling the actual raw wasm function
729/// signature.
730///
731/// This function is useful, for example, if you're building a language
732/// generator for WASI bindings. This will document how to translate
733/// language-specific values into the wasm types to call a WASI function,
734/// and it will also automatically convert the results of the WASI function
735/// back to a language-specific value.
736pub fn call(
737 resolve: &Resolve,
738 variant: AbiVariant,
739 lift_lower: LiftLower,
740 func: &Function,
741 bindgen: &mut impl Bindgen,
742 async_: bool,
743) {
744 Generator::new(resolve, bindgen).call(func, variant, lift_lower, async_);
745}
746
747pub fn lower_to_memory<B: Bindgen>(
748 resolve: &Resolve,
749 bindgen: &mut B,
750 address: B::Operand,
751 value: B::Operand,
752 ty: &Type,
753) {
754 let mut generator = Generator::new(resolve, bindgen);
755 // TODO: make this configurable? Right now this function is only called for
756 // future/stream callbacks so it's appropriate to skip realloc here as it's
757 // all "lower for wasm import", but this might get reused for something else
758 // in the future.
759 generator.realloc = Some(Realloc::Export("cabi_realloc"));
760 generator.stack.push(value);
761 generator.write_to_memory(ty, address, Default::default());
762}
763
764pub fn lower_flat<B: Bindgen>(
765 resolve: &Resolve,
766 bindgen: &mut B,
767 value: B::Operand,
768 ty: &Type,
769) -> Vec<B::Operand> {
770 let mut generator = Generator::new(resolve, bindgen);
771 generator.stack.push(value);
772 generator.realloc = Some(Realloc::Export("cabi_realloc"));
773 generator.lower(ty);
774 generator.stack
775}
776
777pub fn lift_from_memory<B: Bindgen>(
778 resolve: &Resolve,
779 bindgen: &mut B,
780 address: B::Operand,
781 ty: &Type,
782) -> B::Operand {
783 let mut generator = Generator::new(resolve, bindgen);
784 generator.read_from_memory(ty, address, Default::default());
785 generator.stack.pop().unwrap()
786}
787
788/// Used in a similar manner as the `Interface::call` function except is
789/// used to generate the `post-return` callback for `func`.
790///
791/// This is only intended to be used in guest generators for exported
792/// functions and will primarily generate `GuestDeallocate*` instructions,
793/// plus others used as input to those instructions.
794pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen) {
795 Generator::new(resolve, bindgen).post_return(func);
796}
797
798/// Returns whether the `Function` specified needs a post-return function to
799/// be generated in guest code.
800///
801/// This is used when the return value contains a memory allocation such as
802/// a list or a string primarily.
803pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
804 func.result
805 .map(|t| needs_deallocate(resolve, &t, Deallocate::Lists))
806 .unwrap_or(false)
807}
808
809pub fn guest_export_params_have_allocations(resolve: &Resolve, func: &Function) -> bool {
810 func.params
811 .iter()
812 .any(|(_, t)| needs_deallocate(resolve, &t, Deallocate::Lists))
813}
814
815fn needs_deallocate(resolve: &Resolve, ty: &Type, what: Deallocate) -> bool {
816 match ty {
817 Type::String => true,
818 Type::ErrorContext => true,
819 Type::Id(id) => match &resolve.types[*id].kind {
820 TypeDefKind::List(_) => true,
821 TypeDefKind::Type(t) => needs_deallocate(resolve, t, what),
822 TypeDefKind::Handle(Handle::Own(_)) => what.handles(),
823 TypeDefKind::Handle(Handle::Borrow(_)) => false,
824 TypeDefKind::Resource => false,
825 TypeDefKind::Record(r) => r
826 .fields
827 .iter()
828 .any(|f| needs_deallocate(resolve, &f.ty, what)),
829 TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_deallocate(resolve, t, what)),
830 TypeDefKind::Variant(t) => t
831 .cases
832 .iter()
833 .filter_map(|t| t.ty.as_ref())
834 .any(|t| needs_deallocate(resolve, t, what)),
835 TypeDefKind::Option(t) => needs_deallocate(resolve, t, what),
836 TypeDefKind::Result(t) => [&t.ok, &t.err]
837 .iter()
838 .filter_map(|t| t.as_ref())
839 .any(|t| needs_deallocate(resolve, t, what)),
840 TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
841 TypeDefKind::Future(_) | TypeDefKind::Stream(_) => what.handles(),
842 TypeDefKind::Unknown => unreachable!(),
843 TypeDefKind::FixedSizeList(..) => todo!(),
844 TypeDefKind::Map(..) => todo!(),
845 },
846
847 Type::Bool
848 | Type::U8
849 | Type::S8
850 | Type::U16
851 | Type::S16
852 | Type::U32
853 | Type::S32
854 | Type::U64
855 | Type::S64
856 | Type::F32
857 | Type::F64
858 | Type::Char => false,
859 }
860}
861
862/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
863/// that's a pointer to a sequence of `types` stored in linear memory.
864pub fn deallocate_lists_in_types<B: Bindgen>(
865 resolve: &Resolve,
866 types: &[Type],
867 operands: &[B::Operand],
868 indirect: bool,
869 bindgen: &mut B,
870) {
871 Generator::new(resolve, bindgen).deallocate_in_types(
872 types,
873 operands,
874 indirect,
875 Deallocate::Lists,
876 );
877}
878
879/// Generate instructions in `bindgen` to deallocate all lists in `ptr` where
880/// that's a pointer to a sequence of `types` stored in linear memory.
881pub fn deallocate_lists_and_own_in_types<B: Bindgen>(
882 resolve: &Resolve,
883 types: &[Type],
884 operands: &[B::Operand],
885 indirect: bool,
886 bindgen: &mut B,
887) {
888 Generator::new(resolve, bindgen).deallocate_in_types(
889 types,
890 operands,
891 indirect,
892 Deallocate::ListsAndOwn,
893 );
894}
895
896#[derive(Copy, Clone)]
897pub enum Realloc {
898 None,
899 Export(&'static str),
900}
901
902/// What to deallocate in various `deallocate_*` methods.
903#[derive(Copy, Clone)]
904enum Deallocate {
905 /// Only deallocate lists.
906 Lists,
907 /// Deallocate lists and owned resources such as `own<T>` and
908 /// futures/streams.
909 ListsAndOwn,
910}
911
912impl Deallocate {
913 fn handles(&self) -> bool {
914 match self {
915 Deallocate::Lists => false,
916 Deallocate::ListsAndOwn => true,
917 }
918 }
919}
920
921struct Generator<'a, B: Bindgen> {
922 bindgen: &'a mut B,
923 resolve: &'a Resolve,
924 operands: Vec<B::Operand>,
925 results: Vec<B::Operand>,
926 stack: Vec<B::Operand>,
927 return_pointer: Option<B::Operand>,
928 realloc: Option<Realloc>,
929}
930
931const MAX_FLAT_PARAMS: usize = 16;
932const MAX_FLAT_ASYNC_PARAMS: usize = 4;
933
934impl<'a, B: Bindgen> Generator<'a, B> {
935 fn new(resolve: &'a Resolve, bindgen: &'a mut B) -> Generator<'a, B> {
936 Generator {
937 resolve,
938 bindgen,
939 operands: Vec::new(),
940 results: Vec::new(),
941 stack: Vec::new(),
942 return_pointer: None,
943 realloc: None,
944 }
945 }
946
947 fn call(&mut self, func: &Function, variant: AbiVariant, lift_lower: LiftLower, async_: bool) {
948 let sig = self.resolve.wasm_signature(variant, func);
949
950 // Lowering parameters calling a wasm import _or_ returning a result
951 // from an async-lifted wasm export means we don't need to pass
952 // ownership, but we pass ownership in all other cases.
953 let realloc = match (variant, lift_lower, async_) {
954 (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
955 | (
956 AbiVariant::GuestExport
957 | AbiVariant::GuestExportAsync
958 | AbiVariant::GuestExportAsyncStackful,
959 LiftLower::LiftArgsLowerResults,
960 true,
961 ) => Realloc::None,
962 _ => Realloc::Export("cabi_realloc"),
963 };
964 assert!(self.realloc.is_none());
965
966 match lift_lower {
967 LiftLower::LowerArgsLiftResults => {
968 self.realloc = Some(realloc);
969
970 // Create a function that performs individual lowering of operands
971 let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
972 let mut offset = ArchitectureSize::default();
973 for (nth, (_, ty)) in func.params.iter().enumerate() {
974 self_.emit(&Instruction::GetArg { nth });
975 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
976 self_.write_to_memory(ty, ptr.clone(), offset);
977 offset += self_.bindgen.sizes().size(ty);
978 }
979
980 self_.stack.push(ptr);
981 };
982
983 // Lower parameters
984 if sig.indirect_params {
985 // If parameters are indirect space is
986 // allocated for them and each argument is lowered
987 // individually into memory.
988 let ElementInfo { size, align } = self
989 .bindgen
990 .sizes()
991 .record(func.params.iter().map(|t| &t.1));
992
993 // Resolve the pointer to the indirectly stored parameters
994 let ptr = match variant {
995 // When a wasm module calls an import it will provide
996 // space that isn't explicitly deallocated.
997 AbiVariant::GuestImport => self.bindgen.return_pointer(size, align),
998
999 AbiVariant::GuestImportAsync => {
1000 todo!("direct param lowering for async guest import not implemented")
1001 }
1002
1003 // When calling a wasm module from the outside, though,
1004 // malloc needs to be called.
1005 AbiVariant::GuestExport => {
1006 self.emit(&Instruction::Malloc {
1007 realloc: "cabi_realloc",
1008 size,
1009 align,
1010 });
1011 self.stack.pop().unwrap()
1012 }
1013
1014 AbiVariant::GuestExportAsync | AbiVariant::GuestExportAsyncStackful => {
1015 todo!("direct param lowering for async not implemented")
1016 }
1017 };
1018
1019 // Lower the parameters to memory
1020 lower_to_memory(self, ptr);
1021 } else {
1022 // ... otherwise arguments are direct,
1023 // (there aren't too many) then we simply do a normal lower
1024 // operation for them all.
1025 for (nth, (_, ty)) in func.params.iter().enumerate() {
1026 self.emit(&Instruction::GetArg { nth });
1027 self.lower(ty);
1028 }
1029 }
1030 self.realloc = None;
1031
1032 // If necessary we may need to prepare a return pointer for this ABI.
1033 if variant == AbiVariant::GuestImport && sig.retptr {
1034 let info = self.bindgen.sizes().params(&func.result);
1035 let ptr = self.bindgen.return_pointer(info.size, info.align);
1036 self.return_pointer = Some(ptr.clone());
1037 self.stack.push(ptr);
1038 }
1039
1040 // Call the Wasm function
1041 assert_eq!(self.stack.len(), sig.params.len());
1042 self.emit(&Instruction::CallWasm {
1043 name: &func.name,
1044 sig: &sig,
1045 });
1046
1047 // Handle the result
1048 if sig.retptr {
1049 // If there is a return pointer, we must get the pointer to where results
1050 // should be stored, and store the results there?
1051
1052 let ptr = match variant {
1053 // imports into guests means it's a wasm module
1054 // calling an imported function. We supplied the
1055 // return pointer as the last argument (saved in
1056 // `self.return_pointer`) so we use that to read
1057 // the result of the function from memory.
1058 AbiVariant::GuestImport => {
1059 assert!(sig.results.is_empty());
1060 self.return_pointer.take().unwrap()
1061 }
1062
1063 // guest exports means that this is a host
1064 // calling wasm so wasm returned a pointer to where
1065 // the result is stored
1066 AbiVariant::GuestExport => self.stack.pop().unwrap(),
1067
1068 AbiVariant::GuestImportAsync
1069 | AbiVariant::GuestExportAsync
1070 | AbiVariant::GuestExportAsyncStackful => {
1071 unreachable!()
1072 }
1073 };
1074
1075 if let (AbiVariant::GuestExport, true) = (variant, async_) {
1076 // If we're dealing with an async function, the result should not be read from memory
1077 // immediately, as it's the async call result
1078 //
1079 // We can leave the result of the call (the indication of what to do as an async call)
1080 // on the stack as a return
1081 self.stack.push(ptr);
1082 } else {
1083 // If we're not dealing with an async call, the result must be in memory at this point and can be read out
1084 self.read_results_from_memory(
1085 &func.result,
1086 ptr.clone(),
1087 ArchitectureSize::default(),
1088 );
1089 self.emit(&Instruction::Flush {
1090 amt: usize::from(func.result.is_some()),
1091 });
1092 }
1093 } else {
1094 // With no return pointer in use we can simply lift the
1095 // result(s) of the function from the result of the core
1096 // wasm function.
1097 if let Some(ty) = &func.result {
1098 self.lift(ty)
1099 }
1100 }
1101
1102 // Emit the function return
1103 self.emit(&Instruction::Return {
1104 func,
1105 amt: usize::from(func.result.is_some()),
1106 });
1107 }
1108
1109 LiftLower::LiftArgsLowerResults => {
1110 let max_flat_params = match (variant, async_) {
1111 (AbiVariant::GuestImportAsync, _is_async @ true) => MAX_FLAT_ASYNC_PARAMS,
1112 _ => MAX_FLAT_PARAMS,
1113 };
1114
1115 // Read parameters from memory
1116 let read_from_memory = |self_: &mut Self| {
1117 let mut offset = ArchitectureSize::default();
1118 let ptr = self_
1119 .stack
1120 .pop()
1121 .expect("empty stack during read param from memory");
1122 for (_, ty) in func.params.iter() {
1123 offset = align_to_arch(offset, self_.bindgen.sizes().align(ty));
1124 self_.read_from_memory(ty, ptr.clone(), offset);
1125 offset += self_.bindgen.sizes().size(ty);
1126 }
1127 };
1128
1129 // Resolve parameters
1130 if sig.indirect_params {
1131 // If parameters were passed indirectly, arguments must be
1132 // read in succession from memory, with the pointer to the arguments
1133 // being the first argument to the function.
1134 self.emit(&Instruction::GetArg { nth: 0 });
1135 read_from_memory(self);
1136 } else {
1137 // ... otherwise, if parameters were passed directly then we lift each
1138 // argument in succession from the component wasm types that
1139 // make-up the type.
1140 let mut offset = 0;
1141 for (param_name, ty) in func.params.iter() {
1142 let Some(types) = flat_types(self.resolve, ty, Some(max_flat_params))
1143 else {
1144 panic!(
1145 "failed to flatten types during direct parameter lifting ('{param_name}' in func '{}')",
1146 func.name
1147 );
1148 };
1149 for _ in 0..types.len() {
1150 self.emit(&Instruction::GetArg { nth: offset });
1151 offset += 1;
1152 }
1153 self.lift(ty);
1154 }
1155 }
1156
1157 // ... and that allows us to call the interface types function
1158 self.emit(&Instruction::CallInterface { func, async_ });
1159
1160 // The return value of an async function is *not* the result of the function
1161 // itself or a pointer but rather a status code.
1162 //
1163 // Asynchronous functions will call `task.return` after the
1164 // interface function completes, so lowering is conditional
1165 // based on slightly different logic for the `task.return`
1166 // intrinsic.
1167 //
1168 // Note that in the async import case teh code below deals with the CM function being lowered,
1169 // not the core function that is underneath that (i.e. func.result may be empty,
1170 // where the associated core function underneath must have a i32 status code result)
1171 let (lower_to_memory, async_flat_results) = match (async_, &func.result) {
1172 // All async cases pass along the function results and flatten where necesary
1173 (_is_async @ true, func_result) => {
1174 let results = match &func_result {
1175 Some(ty) => flat_types(self.resolve, ty, Some(max_flat_params)),
1176 None => Some(Vec::new()),
1177 };
1178 (results.is_none(), Some(results))
1179 }
1180 // All other non-async cases
1181 (_is_async @ false, _) => (sig.retptr, None),
1182 };
1183
1184 // This was dynamically allocated by the caller (or async start
1185 // function) so after it's been read by the guest we need to
1186 // deallocate it.
1187 if let AbiVariant::GuestExport
1188 | AbiVariant::GuestExportAsync
1189 | AbiVariant::GuestExportAsyncStackful = variant
1190 {
1191 if sig.indirect_params && !async_ {
1192 let ElementInfo { size, align } = self
1193 .bindgen
1194 .sizes()
1195 .record(func.params.iter().map(|t| &t.1));
1196 self.emit(&Instruction::GetArg { nth: 0 });
1197 self.emit(&Instruction::GuestDeallocate { size, align });
1198 }
1199 }
1200
1201 self.realloc = Some(realloc);
1202
1203 // Perform memory lowing of relevant results, including out pointers as well as traditional results
1204 match (lower_to_memory, sig.retptr, variant) {
1205 // For sync calls, if no lowering to memory is required and there *is* a return pointer in use
1206 // then we need to lower then simply lower the result(s) and return that directly from the function.
1207 (_lower_to_memory @ false, _, _) => {
1208 if let Some(ty) = &func.result {
1209 self.lower(ty);
1210 }
1211 }
1212
1213 // Lowering to memory for a guest import
1214 //
1215 // When a function is imported to a guest this means
1216 // it's a host providing the implementation of the
1217 // import. The result is stored in the pointer
1218 // specified in the last argument, so we get the
1219 // pointer here and then write the return value into
1220 // it.
1221 (
1222 _lower_to_memory @ true,
1223 _has_ret_ptr @ true,
1224 AbiVariant::GuestImport | AbiVariant::GuestImportAsync,
1225 ) => {
1226 self.emit(&Instruction::GetArg {
1227 nth: sig.params.len() - 1,
1228 });
1229 let ptr = self
1230 .stack
1231 .pop()
1232 .expect("empty stack during result lower to memory");
1233 self.write_params_to_memory(&func.result, ptr, Default::default());
1234 }
1235
1236 // Lowering to memory for a guest export
1237 //
1238 // For a guest import this is a function defined in
1239 // wasm, so we're returning a pointer where the
1240 // value was stored at. Allocate some space here
1241 // (statically) and then write the result into that
1242 // memory, returning the pointer at the end.
1243 (_lower_to_memory @ true, _, variant) => match variant {
1244 AbiVariant::GuestExport | AbiVariant::GuestExportAsync => {
1245 let ElementInfo { size, align } =
1246 self.bindgen.sizes().params(&func.result);
1247 let ptr = self.bindgen.return_pointer(size, align);
1248 self.write_params_to_memory(
1249 &func.result,
1250 ptr.clone(),
1251 Default::default(),
1252 );
1253 self.stack.push(ptr);
1254 }
1255 AbiVariant::GuestImport | AbiVariant::GuestImportAsync => {
1256 unreachable!(
1257 "lowering to memory cannot be performed without a return pointer ({async_note} func [{func_name}], variant {variant:#?})",
1258 async_note = async_.then_some("async").unwrap_or("sync"),
1259 func_name = func.name,
1260 )
1261 }
1262 AbiVariant::GuestExportAsyncStackful => {
1263 todo!("stackful exports are not yet supported")
1264 }
1265 },
1266 }
1267
1268 // Build and emit the appropriate return
1269 match (variant, async_flat_results) {
1270 // Async guest imports always return a i32 status code
1271 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, None) if async_ => {
1272 unreachable!("async guest imports must have a return")
1273 }
1274
1275 // Async guest imports with results return the status code, not a pointer to any results
1276 (AbiVariant::GuestImport | AbiVariant::GuestImportAsync, Some(results))
1277 if async_ =>
1278 {
1279 let name = &format!("[task-return]{}", func.name);
1280 let params = results.as_deref().unwrap_or_default();
1281 self.emit(&Instruction::AsyncTaskReturn { name, params });
1282 }
1283
1284 // All async/non-async cases with results that need to be returned
1285 //
1286 // In practice, async imports should not end up here, as the returned result of an
1287 // async import is *not* a pointer but instead a status code.
1288 (_, Some(results)) => {
1289 let name = &format!("[task-return]{}", func.name);
1290 let params = results.as_deref().unwrap_or(&[WasmType::Pointer]);
1291 self.emit(&Instruction::AsyncTaskReturn { name, params });
1292 }
1293
1294 // All async/non-async cases with no results
1295 (_, None) => {
1296 if async_ {
1297 let name = &format!("[task-return]{}", func.name);
1298 self.emit(&Instruction::AsyncTaskReturn {
1299 name: name,
1300 params: if sig.results.len() > MAX_FLAT_ASYNC_PARAMS {
1301 &[WasmType::Pointer]
1302 } else {
1303 &sig.results
1304 },
1305 });
1306 } else {
1307 self.emit(&Instruction::Return {
1308 func,
1309 amt: sig.results.len(),
1310 });
1311 }
1312 }
1313 }
1314
1315 self.realloc = None;
1316 }
1317 }
1318
1319 assert!(self.realloc.is_none());
1320
1321 assert!(
1322 self.stack.is_empty(),
1323 "stack has {} items remaining: {:?}",
1324 self.stack.len(),
1325 self.stack,
1326 );
1327 }
1328
1329 fn post_return(&mut self, func: &Function) {
1330 let sig = self.resolve.wasm_signature(AbiVariant::GuestExport, func);
1331
1332 // Currently post-return is only used for lists and lists are always
1333 // returned indirectly through memory due to their flat representation
1334 // having more than one type. Assert that a return pointer is used,
1335 // though, in case this ever changes.
1336 assert!(sig.retptr);
1337
1338 self.emit(&Instruction::GetArg { nth: 0 });
1339 let addr = self.stack.pop().unwrap();
1340
1341 let mut types = Vec::new();
1342 types.extend(func.result);
1343 self.deallocate_in_types(&types, &[addr], true, Deallocate::Lists);
1344
1345 self.emit(&Instruction::Return { func, amt: 0 });
1346 }
1347
1348 fn deallocate_in_types(
1349 &mut self,
1350 types: &[Type],
1351 operands: &[B::Operand],
1352 indirect: bool,
1353 what: Deallocate,
1354 ) {
1355 if indirect {
1356 assert_eq!(operands.len(), 1);
1357 for (offset, ty) in self.bindgen.sizes().field_offsets(types) {
1358 self.deallocate_indirect(ty, operands[0].clone(), offset, what);
1359 }
1360 assert!(
1361 self.stack.is_empty(),
1362 "stack has {} items remaining",
1363 self.stack.len()
1364 );
1365 } else {
1366 let mut operands = operands;
1367 let mut operands_for_ty;
1368 for ty in types {
1369 let types = flat_types(self.resolve, ty, None).unwrap();
1370 (operands_for_ty, operands) = operands.split_at(types.len());
1371 self.stack.extend_from_slice(operands_for_ty);
1372 self.deallocate(ty, what);
1373 assert!(
1374 self.stack.is_empty(),
1375 "stack has {} items remaining",
1376 self.stack.len()
1377 );
1378 }
1379 assert!(operands.is_empty());
1380 }
1381 }
1382
1383 fn emit(&mut self, inst: &Instruction<'_>) {
1384 self.operands.clear();
1385 self.results.clear();
1386
1387 let operands_len = inst.operands_len();
1388 assert!(
1389 self.stack.len() >= operands_len,
1390 "not enough operands on stack for {:?}: have {} need {operands_len}",
1391 inst,
1392 self.stack.len(),
1393 );
1394 self.operands
1395 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1396 self.results.reserve(inst.results_len());
1397
1398 self.bindgen
1399 .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1400
1401 assert_eq!(
1402 self.results.len(),
1403 inst.results_len(),
1404 "{:?} expected {} results, got {}",
1405 inst,
1406 inst.results_len(),
1407 self.results.len()
1408 );
1409 self.stack.append(&mut self.results);
1410 }
1411
1412 fn push_block(&mut self) {
1413 self.bindgen.push_block();
1414 }
1415
1416 fn finish_block(&mut self, size: usize) {
1417 self.operands.clear();
1418 assert!(
1419 size <= self.stack.len(),
1420 "not enough operands on stack for finishing block",
1421 );
1422 self.operands
1423 .extend(self.stack.drain((self.stack.len() - size)..));
1424 self.bindgen.finish_block(&mut self.operands);
1425 }
1426
1427 fn lower(&mut self, ty: &Type) {
1428 use Instruction::*;
1429
1430 match *ty {
1431 Type::Bool => self.emit(&I32FromBool),
1432 Type::S8 => self.emit(&I32FromS8),
1433 Type::U8 => self.emit(&I32FromU8),
1434 Type::S16 => self.emit(&I32FromS16),
1435 Type::U16 => self.emit(&I32FromU16),
1436 Type::S32 => self.emit(&I32FromS32),
1437 Type::U32 => self.emit(&I32FromU32),
1438 Type::S64 => self.emit(&I64FromS64),
1439 Type::U64 => self.emit(&I64FromU64),
1440 Type::Char => self.emit(&I32FromChar),
1441 Type::F32 => self.emit(&CoreF32FromF32),
1442 Type::F64 => self.emit(&CoreF64FromF64),
1443 Type::String => {
1444 let realloc = self.list_realloc();
1445 self.emit(&StringLower { realloc });
1446 }
1447 Type::ErrorContext => self.emit(&ErrorContextLower),
1448 Type::Id(id) => match &self.resolve.types[id].kind {
1449 TypeDefKind::Type(t) => self.lower(t),
1450 TypeDefKind::List(element) => {
1451 let realloc = self.list_realloc();
1452 if self.bindgen.is_list_canonical(self.resolve, element) {
1453 self.emit(&ListCanonLower { element, realloc });
1454 } else {
1455 self.push_block();
1456 self.emit(&IterElem { element });
1457 self.emit(&IterBasePointer);
1458 let addr = self.stack.pop().unwrap();
1459 self.write_to_memory(element, addr, Default::default());
1460 self.finish_block(0);
1461 self.emit(&ListLower { element, realloc });
1462 }
1463 }
1464 TypeDefKind::Handle(handle) => {
1465 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1466 self.emit(&HandleLower {
1467 handle,
1468 ty: id,
1469 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1470 });
1471 }
1472 TypeDefKind::Resource => {
1473 todo!();
1474 }
1475 TypeDefKind::Record(record) => {
1476 self.emit(&RecordLower {
1477 record,
1478 ty: id,
1479 name: self.resolve.types[id].name.as_deref().unwrap(),
1480 });
1481 let values = self
1482 .stack
1483 .drain(self.stack.len() - record.fields.len()..)
1484 .collect::<Vec<_>>();
1485 for (field, value) in record.fields.iter().zip(values) {
1486 self.stack.push(value);
1487 self.lower(&field.ty);
1488 }
1489 }
1490 TypeDefKind::Tuple(tuple) => {
1491 self.emit(&TupleLower { tuple, ty: id });
1492 let values = self
1493 .stack
1494 .drain(self.stack.len() - tuple.types.len()..)
1495 .collect::<Vec<_>>();
1496 for (ty, value) in tuple.types.iter().zip(values) {
1497 self.stack.push(value);
1498 self.lower(ty);
1499 }
1500 }
1501
1502 TypeDefKind::Flags(flags) => {
1503 self.emit(&FlagsLower {
1504 flags,
1505 ty: id,
1506 name: self.resolve.types[id].name.as_ref().unwrap(),
1507 });
1508 }
1509
1510 TypeDefKind::Variant(v) => {
1511 let results =
1512 self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1513 self.emit(&VariantLower {
1514 variant: v,
1515 ty: id,
1516 results: &results,
1517 name: self.resolve.types[id].name.as_deref().unwrap(),
1518 });
1519 }
1520 TypeDefKind::Enum(enum_) => {
1521 self.emit(&EnumLower {
1522 enum_,
1523 ty: id,
1524 name: self.resolve.types[id].name.as_deref().unwrap(),
1525 });
1526 }
1527 TypeDefKind::Option(t) => {
1528 let results = self.lower_variant_arms(ty, [None, Some(t)]);
1529 self.emit(&OptionLower {
1530 payload: t,
1531 ty: id,
1532 results: &results,
1533 });
1534 }
1535 TypeDefKind::Result(r) => {
1536 let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1537 self.emit(&ResultLower {
1538 result: r,
1539 ty: id,
1540 results: &results,
1541 });
1542 }
1543 TypeDefKind::Future(ty) => {
1544 self.emit(&FutureLower {
1545 payload: ty,
1546 ty: id,
1547 });
1548 }
1549 TypeDefKind::Stream(ty) => {
1550 self.emit(&StreamLower {
1551 payload: ty,
1552 ty: id,
1553 });
1554 }
1555 TypeDefKind::Unknown => unreachable!(),
1556 TypeDefKind::FixedSizeList(..) => todo!(),
1557 TypeDefKind::Map(..) => todo!(),
1558 },
1559 }
1560 }
1561
1562 fn lower_variant_arms<'b>(
1563 &mut self,
1564 ty: &Type,
1565 cases: impl IntoIterator<Item = Option<&'b Type>>,
1566 ) -> Vec<WasmType> {
1567 use Instruction::*;
1568 let results = flat_types(self.resolve, ty, None).unwrap();
1569 let mut casts = Vec::new();
1570 for (i, ty) in cases.into_iter().enumerate() {
1571 self.push_block();
1572 self.emit(&VariantPayloadName);
1573 let payload_name = self.stack.pop().unwrap();
1574 self.emit(&I32Const { val: i as i32 });
1575 let mut pushed = 1;
1576 if let Some(ty) = ty {
1577 // Using the payload of this block we lower the type to
1578 // raw wasm values.
1579 self.stack.push(payload_name);
1580 self.lower(ty);
1581
1582 // Determine the types of all the wasm values we just
1583 // pushed, and record how many. If we pushed too few
1584 // then we'll need to push some zeros after this.
1585 let temp = flat_types(self.resolve, ty, None).unwrap();
1586 pushed += temp.len();
1587
1588 // For all the types pushed we may need to insert some
1589 // bitcasts. This will go through and cast everything
1590 // to the right type to ensure all blocks produce the
1591 // same set of results.
1592 casts.truncate(0);
1593 for (actual, expected) in temp.iter().zip(&results[1..]) {
1594 casts.push(cast(*actual, *expected));
1595 }
1596 if casts.iter().any(|c| *c != Bitcast::None) {
1597 self.emit(&Bitcasts { casts: &casts });
1598 }
1599 }
1600
1601 // If we haven't pushed enough items in this block to match
1602 // what other variants are pushing then we need to push
1603 // some zeros.
1604 if pushed < results.len() {
1605 self.emit(&ConstZero {
1606 tys: &results[pushed..],
1607 });
1608 }
1609 self.finish_block(results.len());
1610 }
1611 results
1612 }
1613
1614 fn list_realloc(&self) -> Option<&'static str> {
1615 match self.realloc.expect("realloc should be configured") {
1616 Realloc::None => None,
1617 Realloc::Export(s) => Some(s),
1618 }
1619 }
1620
1621 /// Note that in general everything in this function is the opposite of the
1622 /// `lower` function above. This is intentional and should be kept this way!
1623 fn lift(&mut self, ty: &Type) {
1624 use Instruction::*;
1625
1626 match *ty {
1627 Type::Bool => self.emit(&BoolFromI32),
1628 Type::S8 => self.emit(&S8FromI32),
1629 Type::U8 => self.emit(&U8FromI32),
1630 Type::S16 => self.emit(&S16FromI32),
1631 Type::U16 => self.emit(&U16FromI32),
1632 Type::S32 => self.emit(&S32FromI32),
1633 Type::U32 => self.emit(&U32FromI32),
1634 Type::S64 => self.emit(&S64FromI64),
1635 Type::U64 => self.emit(&U64FromI64),
1636 Type::Char => self.emit(&CharFromI32),
1637 Type::F32 => self.emit(&F32FromCoreF32),
1638 Type::F64 => self.emit(&F64FromCoreF64),
1639 Type::String => self.emit(&StringLift),
1640 Type::ErrorContext => self.emit(&ErrorContextLift),
1641 Type::Id(id) => match &self.resolve.types[id].kind {
1642 TypeDefKind::Type(t) => self.lift(t),
1643 TypeDefKind::List(element) => {
1644 if self.bindgen.is_list_canonical(self.resolve, element) {
1645 self.emit(&ListCanonLift { element, ty: id });
1646 } else {
1647 self.push_block();
1648 self.emit(&IterBasePointer);
1649 let addr = self.stack.pop().unwrap();
1650 self.read_from_memory(element, addr, Default::default());
1651 self.finish_block(1);
1652 self.emit(&ListLift { element, ty: id });
1653 }
1654 }
1655 TypeDefKind::Handle(handle) => {
1656 let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1657 self.emit(&HandleLift {
1658 handle,
1659 ty: id,
1660 name: self.resolve.types[*ty].name.as_deref().unwrap(),
1661 });
1662 }
1663 TypeDefKind::Resource => {
1664 todo!();
1665 }
1666 TypeDefKind::Record(record) => {
1667 self.flat_for_each_record_type(
1668 ty,
1669 record.fields.iter().map(|f| &f.ty),
1670 Self::lift,
1671 );
1672 self.emit(&RecordLift {
1673 record,
1674 ty: id,
1675 name: self.resolve.types[id].name.as_deref().unwrap(),
1676 });
1677 }
1678 TypeDefKind::Tuple(tuple) => {
1679 self.flat_for_each_record_type(ty, tuple.types.iter(), Self::lift);
1680 self.emit(&TupleLift { tuple, ty: id });
1681 }
1682 TypeDefKind::Flags(flags) => {
1683 self.emit(&FlagsLift {
1684 flags,
1685 ty: id,
1686 name: self.resolve.types[id].name.as_ref().unwrap(),
1687 });
1688 }
1689
1690 TypeDefKind::Variant(v) => {
1691 self.flat_for_each_variant_arm(
1692 ty,
1693 true,
1694 v.cases.iter().map(|c| c.ty.as_ref()),
1695 Self::lift,
1696 );
1697 self.emit(&VariantLift {
1698 variant: v,
1699 ty: id,
1700 name: self.resolve.types[id].name.as_deref().unwrap(),
1701 });
1702 }
1703
1704 TypeDefKind::Enum(enum_) => {
1705 self.emit(&EnumLift {
1706 enum_,
1707 ty: id,
1708 name: self.resolve.types[id].name.as_deref().unwrap(),
1709 });
1710 }
1711
1712 TypeDefKind::Option(t) => {
1713 self.flat_for_each_variant_arm(ty, true, [None, Some(t)], Self::lift);
1714 self.emit(&OptionLift { payload: t, ty: id });
1715 }
1716
1717 TypeDefKind::Result(r) => {
1718 self.flat_for_each_variant_arm(
1719 ty,
1720 true,
1721 [r.ok.as_ref(), r.err.as_ref()],
1722 Self::lift,
1723 );
1724 self.emit(&ResultLift { result: r, ty: id });
1725 }
1726
1727 TypeDefKind::Future(ty) => {
1728 self.emit(&FutureLift {
1729 payload: ty,
1730 ty: id,
1731 });
1732 }
1733 TypeDefKind::Stream(ty) => {
1734 self.emit(&StreamLift {
1735 payload: ty,
1736 ty: id,
1737 });
1738 }
1739 TypeDefKind::Unknown => unreachable!(),
1740 TypeDefKind::FixedSizeList(..) => todo!(),
1741 TypeDefKind::Map(..) => todo!(),
1742 },
1743 }
1744 }
1745
1746 fn flat_for_each_record_type<'b>(
1747 &mut self,
1748 container: &Type,
1749 types: impl Iterator<Item = &'b Type>,
1750 mut iter: impl FnMut(&mut Self, &Type),
1751 ) {
1752 let temp = flat_types(self.resolve, container, None).unwrap();
1753 let mut args = self
1754 .stack
1755 .drain(self.stack.len() - temp.len()..)
1756 .collect::<Vec<_>>();
1757 for ty in types {
1758 let temp = flat_types(self.resolve, ty, None).unwrap();
1759 self.stack.extend(args.drain(..temp.len()));
1760 iter(self, ty);
1761 }
1762 }
1763
1764 fn flat_for_each_variant_arm<'b>(
1765 &mut self,
1766 ty: &Type,
1767 blocks_with_type_have_result: bool,
1768 cases: impl IntoIterator<Item = Option<&'b Type>>,
1769 mut iter: impl FnMut(&mut Self, &Type),
1770 ) {
1771 let params = flat_types(self.resolve, ty, None).unwrap();
1772 let mut casts = Vec::new();
1773 let block_inputs = self
1774 .stack
1775 .drain(self.stack.len() + 1 - params.len()..)
1776 .collect::<Vec<_>>();
1777 for ty in cases {
1778 self.push_block();
1779 if let Some(ty) = ty {
1780 // Push only the values we need for this variant onto
1781 // the stack.
1782 let temp = flat_types(self.resolve, ty, None).unwrap();
1783 self.stack
1784 .extend(block_inputs[..temp.len()].iter().cloned());
1785
1786 // Cast all the types we have on the stack to the actual
1787 // types needed for this variant, if necessary.
1788 casts.truncate(0);
1789 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
1790 casts.push(cast(*expected, *actual));
1791 }
1792 if casts.iter().any(|c| *c != Bitcast::None) {
1793 self.emit(&Instruction::Bitcasts { casts: &casts });
1794 }
1795
1796 // Then recursively lift this variant's payload.
1797 iter(self, ty);
1798 }
1799 self.finish_block(if blocks_with_type_have_result {
1800 ty.is_some() as usize
1801 } else {
1802 0
1803 });
1804 }
1805 }
1806
1807 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1808 use Instruction::*;
1809
1810 match *ty {
1811 // Builtin types need different flavors of storage instructions
1812 // depending on the size of the value written.
1813 Type::Bool | Type::U8 | Type::S8 => {
1814 self.lower_and_emit(ty, addr, &I32Store8 { offset })
1815 }
1816 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1817 Type::U32 | Type::S32 | Type::Char => {
1818 self.lower_and_emit(ty, addr, &I32Store { offset })
1819 }
1820 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1821 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1822 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1823 Type::String => self.write_list_to_memory(ty, addr, offset),
1824 Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1825
1826 Type::Id(id) => match &self.resolve.types[id].kind {
1827 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1828 TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1829
1830 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1831 self.lower_and_emit(ty, addr, &I32Store { offset })
1832 }
1833
1834 // Decompose the record into its components and then write all
1835 // the components into memory one-by-one.
1836 TypeDefKind::Record(record) => {
1837 self.emit(&RecordLower {
1838 record,
1839 ty: id,
1840 name: self.resolve.types[id].name.as_deref().unwrap(),
1841 });
1842 self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1843 }
1844 TypeDefKind::Resource => {
1845 todo!()
1846 }
1847 TypeDefKind::Tuple(tuple) => {
1848 self.emit(&TupleLower { tuple, ty: id });
1849 self.write_fields_to_memory(tuple.types.iter(), addr, offset);
1850 }
1851
1852 TypeDefKind::Flags(f) => {
1853 self.lower(ty);
1854 match f.repr() {
1855 FlagsRepr::U8 => {
1856 self.stack.push(addr);
1857 self.store_intrepr(offset, Int::U8);
1858 }
1859 FlagsRepr::U16 => {
1860 self.stack.push(addr);
1861 self.store_intrepr(offset, Int::U16);
1862 }
1863 FlagsRepr::U32(n) => {
1864 for i in (0..n).rev() {
1865 self.stack.push(addr.clone());
1866 self.emit(&I32Store {
1867 offset: offset.add_bytes(i * 4),
1868 });
1869 }
1870 }
1871 }
1872 }
1873
1874 // Each case will get its own block, and the first item in each
1875 // case is writing the discriminant. After that if we have a
1876 // payload we write the payload after the discriminant, aligned up
1877 // to the type's alignment.
1878 TypeDefKind::Variant(v) => {
1879 self.write_variant_arms_to_memory(
1880 offset,
1881 addr,
1882 v.tag(),
1883 v.cases.iter().map(|c| c.ty.as_ref()),
1884 );
1885 self.emit(&VariantLower {
1886 variant: v,
1887 ty: id,
1888 results: &[],
1889 name: self.resolve.types[id].name.as_deref().unwrap(),
1890 });
1891 }
1892
1893 TypeDefKind::Option(t) => {
1894 self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
1895 self.emit(&OptionLower {
1896 payload: t,
1897 ty: id,
1898 results: &[],
1899 });
1900 }
1901
1902 TypeDefKind::Result(r) => {
1903 self.write_variant_arms_to_memory(
1904 offset,
1905 addr,
1906 Int::U8,
1907 [r.ok.as_ref(), r.err.as_ref()],
1908 );
1909 self.emit(&ResultLower {
1910 result: r,
1911 ty: id,
1912 results: &[],
1913 });
1914 }
1915
1916 TypeDefKind::Enum(e) => {
1917 self.lower(ty);
1918 self.stack.push(addr);
1919 self.store_intrepr(offset, e.tag());
1920 }
1921
1922 TypeDefKind::Unknown => unreachable!(),
1923 TypeDefKind::FixedSizeList(..) => todo!(),
1924 TypeDefKind::Map(..) => todo!(),
1925 },
1926 }
1927 }
1928
1929 fn write_params_to_memory<'b>(
1930 &mut self,
1931 params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1932 addr: B::Operand,
1933 offset: ArchitectureSize,
1934 ) {
1935 self.write_fields_to_memory(params, addr, offset);
1936 }
1937
1938 fn write_variant_arms_to_memory<'b>(
1939 &mut self,
1940 offset: ArchitectureSize,
1941 addr: B::Operand,
1942 tag: Int,
1943 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1944 ) {
1945 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
1946 for (i, ty) in cases.into_iter().enumerate() {
1947 self.push_block();
1948 self.emit(&Instruction::VariantPayloadName);
1949 let payload_name = self.stack.pop().unwrap();
1950 self.emit(&Instruction::I32Const { val: i as i32 });
1951 self.stack.push(addr.clone());
1952 self.store_intrepr(offset, tag);
1953 if let Some(ty) = ty {
1954 self.stack.push(payload_name.clone());
1955 self.write_to_memory(ty, addr.clone(), payload_offset);
1956 }
1957 self.finish_block(0);
1958 }
1959 }
1960
1961 fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
1962 // After lowering the list there's two i32 values on the stack
1963 // which we write into memory, writing the pointer into the low address
1964 // and the length into the high address.
1965 self.lower(ty);
1966 self.stack.push(addr.clone());
1967 self.emit(&Instruction::LengthStore {
1968 offset: offset + self.bindgen.sizes().align(ty).into(),
1969 });
1970 self.stack.push(addr);
1971 self.emit(&Instruction::PointerStore { offset });
1972 }
1973
1974 fn write_fields_to_memory<'b>(
1975 &mut self,
1976 tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1977 addr: B::Operand,
1978 offset: ArchitectureSize,
1979 ) {
1980 let tys = tys.into_iter();
1981 let fields = self
1982 .stack
1983 .drain(self.stack.len() - tys.len()..)
1984 .collect::<Vec<_>>();
1985 for ((field_offset, ty), op) in self
1986 .bindgen
1987 .sizes()
1988 .field_offsets(tys)
1989 .into_iter()
1990 .zip(fields)
1991 {
1992 self.stack.push(op);
1993 self.write_to_memory(ty, addr.clone(), offset + (field_offset));
1994 }
1995 }
1996
1997 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
1998 self.lower(ty);
1999 self.stack.push(addr);
2000 self.emit(instr);
2001 }
2002
2003 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2004 use Instruction::*;
2005
2006 match *ty {
2007 Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2008 Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2009 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
2010 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
2011 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
2012 Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
2013 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
2014 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
2015 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
2016 Type::String => self.read_list_from_memory(ty, addr, offset),
2017 Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
2018
2019 Type::Id(id) => match &self.resolve.types[id].kind {
2020 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
2021
2022 TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
2023
2024 TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
2025 self.emit_and_lift(ty, addr, &I32Load { offset })
2026 }
2027
2028 TypeDefKind::Resource => {
2029 todo!();
2030 }
2031
2032 // Read and lift each field individually, adjusting the offset
2033 // as we go along, then aggregate all the fields into the
2034 // record.
2035 TypeDefKind::Record(record) => {
2036 self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
2037 self.emit(&RecordLift {
2038 record,
2039 ty: id,
2040 name: self.resolve.types[id].name.as_deref().unwrap(),
2041 });
2042 }
2043
2044 TypeDefKind::Tuple(tuple) => {
2045 self.read_fields_from_memory(&tuple.types, addr, offset);
2046 self.emit(&TupleLift { tuple, ty: id });
2047 }
2048
2049 TypeDefKind::Flags(f) => {
2050 match f.repr() {
2051 FlagsRepr::U8 => {
2052 self.stack.push(addr);
2053 self.load_intrepr(offset, Int::U8);
2054 }
2055 FlagsRepr::U16 => {
2056 self.stack.push(addr);
2057 self.load_intrepr(offset, Int::U16);
2058 }
2059 FlagsRepr::U32(n) => {
2060 for i in 0..n {
2061 self.stack.push(addr.clone());
2062 self.emit(&I32Load {
2063 offset: offset.add_bytes(i * 4),
2064 });
2065 }
2066 }
2067 }
2068 self.lift(ty);
2069 }
2070
2071 // Each case will get its own block, and we'll dispatch to the
2072 // right block based on the `i32.load` we initially perform. Each
2073 // individual block is pretty simple and just reads the payload type
2074 // from the corresponding offset if one is available.
2075 TypeDefKind::Variant(variant) => {
2076 self.read_variant_arms_from_memory(
2077 offset,
2078 addr,
2079 variant.tag(),
2080 variant.cases.iter().map(|c| c.ty.as_ref()),
2081 );
2082 self.emit(&VariantLift {
2083 variant,
2084 ty: id,
2085 name: self.resolve.types[id].name.as_deref().unwrap(),
2086 });
2087 }
2088
2089 TypeDefKind::Option(t) => {
2090 self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
2091 self.emit(&OptionLift { payload: t, ty: id });
2092 }
2093
2094 TypeDefKind::Result(r) => {
2095 self.read_variant_arms_from_memory(
2096 offset,
2097 addr,
2098 Int::U8,
2099 [r.ok.as_ref(), r.err.as_ref()],
2100 );
2101 self.emit(&ResultLift { result: r, ty: id });
2102 }
2103
2104 TypeDefKind::Enum(e) => {
2105 self.stack.push(addr.clone());
2106 self.load_intrepr(offset, e.tag());
2107 self.lift(ty);
2108 }
2109
2110 TypeDefKind::Unknown => unreachable!(),
2111 TypeDefKind::FixedSizeList(..) => todo!(),
2112 TypeDefKind::Map(..) => todo!(),
2113 },
2114 }
2115 }
2116
2117 fn read_results_from_memory(
2118 &mut self,
2119 result: &Option<Type>,
2120 addr: B::Operand,
2121 offset: ArchitectureSize,
2122 ) {
2123 self.read_fields_from_memory(result, addr, offset)
2124 }
2125
2126 fn read_variant_arms_from_memory<'b>(
2127 &mut self,
2128 offset: ArchitectureSize,
2129 addr: B::Operand,
2130 tag: Int,
2131 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2132 ) {
2133 self.stack.push(addr.clone());
2134 self.load_intrepr(offset, tag);
2135 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2136 for ty in cases {
2137 self.push_block();
2138 if let Some(ty) = ty {
2139 self.read_from_memory(ty, addr.clone(), payload_offset);
2140 }
2141 self.finish_block(ty.is_some() as usize);
2142 }
2143 }
2144
2145 fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: ArchitectureSize) {
2146 // Read the pointer/len and then perform the standard lifting
2147 // proceses.
2148 self.stack.push(addr.clone());
2149 self.emit(&Instruction::PointerLoad { offset });
2150 self.stack.push(addr);
2151 self.emit(&Instruction::LengthLoad {
2152 offset: offset + self.bindgen.sizes().align(ty).into(),
2153 });
2154 self.lift(ty);
2155 }
2156
2157 fn read_fields_from_memory<'b>(
2158 &mut self,
2159 tys: impl IntoIterator<Item = &'b Type>,
2160 addr: B::Operand,
2161 offset: ArchitectureSize,
2162 ) {
2163 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
2164 self.read_from_memory(ty, addr.clone(), offset + (*field_offset));
2165 }
2166 }
2167
2168 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2169 self.stack.push(addr);
2170 self.emit(instr);
2171 self.lift(ty);
2172 }
2173
2174 fn load_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2175 self.emit(&match repr {
2176 Int::U64 => Instruction::I64Load { offset },
2177 Int::U32 => Instruction::I32Load { offset },
2178 Int::U16 => Instruction::I32Load16U { offset },
2179 Int::U8 => Instruction::I32Load8U { offset },
2180 });
2181 }
2182
2183 fn store_intrepr(&mut self, offset: ArchitectureSize, repr: Int) {
2184 self.emit(&match repr {
2185 Int::U64 => Instruction::I64Store { offset },
2186 Int::U32 => Instruction::I32Store { offset },
2187 Int::U16 => Instruction::I32Store16 { offset },
2188 Int::U8 => Instruction::I32Store8 { offset },
2189 });
2190 }
2191
2192 /// Runs the deallocation of `ty` for the operands currently on
2193 /// `self.stack`.
2194 ///
2195 /// This will pop the ABI items of `ty` from `self.stack`.
2196 fn deallocate(&mut self, ty: &Type, what: Deallocate) {
2197 use Instruction::*;
2198
2199 match *ty {
2200 Type::String => {
2201 self.emit(&Instruction::GuestDeallocateString);
2202 }
2203
2204 Type::Bool
2205 | Type::U8
2206 | Type::S8
2207 | Type::U16
2208 | Type::S16
2209 | Type::U32
2210 | Type::S32
2211 | Type::Char
2212 | Type::U64
2213 | Type::S64
2214 | Type::F32
2215 | Type::F64
2216 | Type::ErrorContext => {
2217 // No deallocation necessary, just discard the operand on the
2218 // stack.
2219 self.stack.pop().unwrap();
2220 }
2221
2222 Type::Id(id) => match &self.resolve.types[id].kind {
2223 TypeDefKind::Type(t) => self.deallocate(t, what),
2224
2225 TypeDefKind::List(element) => {
2226 self.push_block();
2227 self.emit(&IterBasePointer);
2228 let elemaddr = self.stack.pop().unwrap();
2229 self.deallocate_indirect(element, elemaddr, Default::default(), what);
2230 self.finish_block(0);
2231
2232 self.emit(&Instruction::GuestDeallocateList { element });
2233 }
2234
2235 TypeDefKind::Handle(Handle::Own(_))
2236 | TypeDefKind::Future(_)
2237 | TypeDefKind::Stream(_)
2238 if what.handles() =>
2239 {
2240 self.lift(ty);
2241 self.emit(&DropHandle { ty });
2242 }
2243
2244 TypeDefKind::Record(record) => {
2245 self.flat_for_each_record_type(
2246 ty,
2247 record.fields.iter().map(|f| &f.ty),
2248 |me, ty| me.deallocate(ty, what),
2249 );
2250 }
2251
2252 TypeDefKind::Tuple(tuple) => {
2253 self.flat_for_each_record_type(ty, tuple.types.iter(), |me, ty| {
2254 me.deallocate(ty, what)
2255 });
2256 }
2257
2258 TypeDefKind::Variant(variant) => {
2259 self.flat_for_each_variant_arm(
2260 ty,
2261 false,
2262 variant.cases.iter().map(|c| c.ty.as_ref()),
2263 |me, ty| me.deallocate(ty, what),
2264 );
2265 self.emit(&GuestDeallocateVariant {
2266 blocks: variant.cases.len(),
2267 });
2268 }
2269
2270 TypeDefKind::Option(t) => {
2271 self.flat_for_each_variant_arm(ty, false, [None, Some(t)], |me, ty| {
2272 me.deallocate(ty, what)
2273 });
2274 self.emit(&GuestDeallocateVariant { blocks: 2 });
2275 }
2276
2277 TypeDefKind::Result(e) => {
2278 self.flat_for_each_variant_arm(
2279 ty,
2280 false,
2281 [e.ok.as_ref(), e.err.as_ref()],
2282 |me, ty| me.deallocate(ty, what),
2283 );
2284 self.emit(&GuestDeallocateVariant { blocks: 2 });
2285 }
2286
2287 // discard the operand on the stack, otherwise nothing to free.
2288 TypeDefKind::Flags(_)
2289 | TypeDefKind::Enum(_)
2290 | TypeDefKind::Future(_)
2291 | TypeDefKind::Stream(_)
2292 | TypeDefKind::Handle(Handle::Own(_))
2293 | TypeDefKind::Handle(Handle::Borrow(_)) => {
2294 self.stack.pop().unwrap();
2295 }
2296
2297 TypeDefKind::Resource => unreachable!(),
2298 TypeDefKind::Unknown => unreachable!(),
2299
2300 TypeDefKind::FixedSizeList(..) => todo!(),
2301 TypeDefKind::Map(..) => todo!(),
2302 },
2303 }
2304 }
2305
2306 fn deallocate_indirect(
2307 &mut self,
2308 ty: &Type,
2309 addr: B::Operand,
2310 offset: ArchitectureSize,
2311 what: Deallocate,
2312 ) {
2313 use Instruction::*;
2314
2315 // No need to execute any instructions if this type itself doesn't
2316 // require any form of post-return.
2317 if !needs_deallocate(self.resolve, ty, what) {
2318 return;
2319 }
2320
2321 match *ty {
2322 Type::String => {
2323 self.stack.push(addr.clone());
2324 self.emit(&Instruction::PointerLoad { offset });
2325 self.stack.push(addr);
2326 self.emit(&Instruction::LengthLoad {
2327 offset: offset + self.bindgen.sizes().align(ty).into(),
2328 });
2329 self.deallocate(ty, what);
2330 }
2331
2332 Type::Bool
2333 | Type::U8
2334 | Type::S8
2335 | Type::U16
2336 | Type::S16
2337 | Type::U32
2338 | Type::S32
2339 | Type::Char
2340 | Type::U64
2341 | Type::S64
2342 | Type::F32
2343 | Type::F64
2344 | Type::ErrorContext => {}
2345
2346 Type::Id(id) => match &self.resolve.types[id].kind {
2347 TypeDefKind::Type(t) => self.deallocate_indirect(t, addr, offset, what),
2348
2349 TypeDefKind::List(_) => {
2350 self.stack.push(addr.clone());
2351 self.emit(&Instruction::PointerLoad { offset });
2352 self.stack.push(addr);
2353 self.emit(&Instruction::LengthLoad {
2354 offset: offset + self.bindgen.sizes().align(ty).into(),
2355 });
2356
2357 self.deallocate(ty, what);
2358 }
2359
2360 TypeDefKind::Handle(Handle::Own(_))
2361 | TypeDefKind::Future(_)
2362 | TypeDefKind::Stream(_)
2363 if what.handles() =>
2364 {
2365 self.read_from_memory(ty, addr, offset);
2366 self.emit(&DropHandle { ty });
2367 }
2368
2369 TypeDefKind::Handle(Handle::Own(_)) => unreachable!(),
2370 TypeDefKind::Handle(Handle::Borrow(_)) => unreachable!(),
2371 TypeDefKind::Resource => unreachable!(),
2372
2373 TypeDefKind::Record(record) => {
2374 self.deallocate_indirect_fields(
2375 &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2376 addr,
2377 offset,
2378 what,
2379 );
2380 }
2381
2382 TypeDefKind::Tuple(tuple) => {
2383 self.deallocate_indirect_fields(&tuple.types, addr, offset, what);
2384 }
2385
2386 TypeDefKind::Flags(_) => {}
2387
2388 TypeDefKind::Variant(variant) => {
2389 self.deallocate_indirect_variant(
2390 offset,
2391 addr,
2392 variant.tag(),
2393 variant.cases.iter().map(|c| c.ty.as_ref()),
2394 what,
2395 );
2396 self.emit(&GuestDeallocateVariant {
2397 blocks: variant.cases.len(),
2398 });
2399 }
2400
2401 TypeDefKind::Option(t) => {
2402 self.deallocate_indirect_variant(offset, addr, Int::U8, [None, Some(t)], what);
2403 self.emit(&GuestDeallocateVariant { blocks: 2 });
2404 }
2405
2406 TypeDefKind::Result(e) => {
2407 self.deallocate_indirect_variant(
2408 offset,
2409 addr,
2410 Int::U8,
2411 [e.ok.as_ref(), e.err.as_ref()],
2412 what,
2413 );
2414 self.emit(&GuestDeallocateVariant { blocks: 2 });
2415 }
2416
2417 TypeDefKind::Enum(_) => {}
2418
2419 TypeDefKind::Future(_) => unreachable!(),
2420 TypeDefKind::Stream(_) => unreachable!(),
2421 TypeDefKind::Unknown => unreachable!(),
2422 TypeDefKind::FixedSizeList(..) => todo!(),
2423 TypeDefKind::Map(..) => todo!(),
2424 },
2425 }
2426 }
2427
2428 fn deallocate_indirect_variant<'b>(
2429 &mut self,
2430 offset: ArchitectureSize,
2431 addr: B::Operand,
2432 tag: Int,
2433 cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2434 what: Deallocate,
2435 ) {
2436 self.stack.push(addr.clone());
2437 self.load_intrepr(offset, tag);
2438 let payload_offset = offset + (self.bindgen.sizes().payload_offset(tag, cases.clone()));
2439 for ty in cases {
2440 self.push_block();
2441 if let Some(ty) = ty {
2442 self.deallocate_indirect(ty, addr.clone(), payload_offset, what);
2443 }
2444 self.finish_block(0);
2445 }
2446 }
2447
2448 fn deallocate_indirect_fields(
2449 &mut self,
2450 tys: &[Type],
2451 addr: B::Operand,
2452 offset: ArchitectureSize,
2453 what: Deallocate,
2454 ) {
2455 for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2456 self.deallocate_indirect(ty, addr.clone(), offset + (field_offset), what);
2457 }
2458 }
2459}
2460
2461fn cast(from: WasmType, to: WasmType) -> Bitcast {
2462 use WasmType::*;
2463
2464 match (from, to) {
2465 (I32, I32)
2466 | (I64, I64)
2467 | (F32, F32)
2468 | (F64, F64)
2469 | (Pointer, Pointer)
2470 | (PointerOrI64, PointerOrI64)
2471 | (Length, Length) => Bitcast::None,
2472
2473 (I32, I64) => Bitcast::I32ToI64,
2474 (F32, I32) => Bitcast::F32ToI32,
2475 (F64, I64) => Bitcast::F64ToI64,
2476
2477 (I64, I32) => Bitcast::I64ToI32,
2478 (I32, F32) => Bitcast::I32ToF32,
2479 (I64, F64) => Bitcast::I64ToF64,
2480
2481 (F32, I64) => Bitcast::F32ToI64,
2482 (I64, F32) => Bitcast::I64ToF32,
2483
2484 (I64, PointerOrI64) => Bitcast::I64ToP64,
2485 (Pointer, PointerOrI64) => Bitcast::PToP64,
2486 (_, PointerOrI64) => {
2487 Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2488 }
2489
2490 (PointerOrI64, I64) => Bitcast::P64ToI64,
2491 (PointerOrI64, Pointer) => Bitcast::P64ToP,
2492 (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2493
2494 (I32, Pointer) => Bitcast::I32ToP,
2495 (Pointer, I32) => Bitcast::PToI32,
2496 (I32, Length) => Bitcast::I32ToL,
2497 (Length, I32) => Bitcast::LToI32,
2498 (I64, Length) => Bitcast::I64ToL,
2499 (Length, I64) => Bitcast::LToI64,
2500 (Pointer, Length) => Bitcast::PToL,
2501 (Length, Pointer) => Bitcast::LToP,
2502
2503 (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2504 (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2505
2506 (F32, F64)
2507 | (F64, F32)
2508 | (F64, I32)
2509 | (I32, F64)
2510 | (Pointer | Length, I64 | F64)
2511 | (I64 | F64, Pointer | Length) => {
2512 unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2513 }
2514 }
2515}
2516
2517/// Flatten types in a given type
2518///
2519/// It is sometimes necessary to restrict the number of max parameters dynamically,
2520/// for example during an async guest import call (flat params are limited to 4)
2521fn flat_types(resolve: &Resolve, ty: &Type, max_params: Option<usize>) -> Option<Vec<WasmType>> {
2522 let max_params = max_params.unwrap_or(MAX_FLAT_PARAMS);
2523 let mut storage = iter::repeat_n(WasmType::I32, max_params).collect::<Vec<_>>();
2524 let mut flat = FlatTypes::new(storage.as_mut_slice());
2525 resolve.push_flat(ty, &mut flat).then_some(flat.to_vec())
2526}