cosmian_wit_parser/abi.rs
1use crate::{
2 Function, Int, Interface, Record, RecordKind, ResourceId, Type, TypeDefKind, TypeId, Variant,
3};
4use std::mem;
5
6/// A raw WebAssembly signature with params and results.
7#[derive(Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
8pub struct WasmSignature {
9 /// The WebAssembly parameters of this function.
10 pub params: Vec<WasmType>,
11 /// The WebAssembly results of this function.
12 pub results: Vec<WasmType>,
13 /// The raw types, if needed, returned through return pointer located in
14 /// `params`.
15 pub retptr: Option<Vec<WasmType>>,
16}
17
18/// Enumerates wasm types used by interface types when lowering/lifting.
19#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
20pub enum WasmType {
21 I32,
22 I64,
23 F32,
24 F64,
25 // NOTE: we don't lower interface types to any other Wasm type,
26 // e.g. externref, so we don't need to define them here.
27}
28
29fn unify(a: WasmType, b: WasmType) -> WasmType {
30 use WasmType::*;
31
32 match (a, b) {
33 (I64, _) | (_, I64) | (I32, F64) | (F64, I32) => I64,
34
35 (I32, I32) | (I32, F32) | (F32, I32) => I32,
36
37 (F32, F32) => F32,
38 (F64, F64) | (F32, F64) | (F64, F32) => F64,
39 }
40}
41
42impl From<Int> for WasmType {
43 fn from(i: Int) -> WasmType {
44 match i {
45 Int::U8 | Int::U16 | Int::U32 => WasmType::I32,
46 Int::U64 => WasmType::I64,
47 }
48 }
49}
50
51/// Possible ABIs for interface functions to have.
52///
53/// Note that this is a stopgap until we have more of interface types. Interface
54/// types functions do not have ABIs, they have APIs. For the meantime, however,
55/// we mandate ABIs to ensure we can all talk to each other.
56#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
57pub enum Abi {
58 /// Only stable ABI currently, and is the historical WASI ABI since it was
59 /// first created.
60 ///
61 /// Note that this ABI is limited notably in its return values where it can
62 /// only return 0 results or one `Result<T, enum>` lookalike.
63 Preview1,
64
65 /// In-progress "canonical ABI" as proposed for interface types.
66 Canonical,
67}
68
69// Helper macro for defining instructions without having to have tons of
70// exhaustive `match` statements to update
71macro_rules! def_instruction {
72 (
73 $( #[$enum_attr:meta] )*
74 pub enum $name:ident<'a> {
75 $(
76 $( #[$attr:meta] )*
77 $variant:ident $( {
78 $($field:ident : $field_ty:ty $(,)* )*
79 } )?
80 :
81 [$num_popped:expr] => [$num_pushed:expr],
82 )*
83 }
84 ) => {
85 $( #[$enum_attr] )*
86 pub enum $name<'a> {
87 $(
88 $( #[$attr] )*
89 $variant $( {
90 $(
91 $field : $field_ty,
92 )*
93 } )? ,
94 )*
95 }
96
97 impl $name<'_> {
98 /// How many operands does this instruction pop from the stack?
99 #[allow(unused_variables)]
100 pub fn operands_len(&self) -> usize {
101 match self {
102 $(
103 Self::$variant $( {
104 $(
105 $field,
106 )*
107 } )? => $num_popped,
108 )*
109 }
110 }
111
112 /// How many results does this instruction push onto the stack?
113 #[allow(unused_variables)]
114 pub fn results_len(&self) -> usize {
115 match self {
116 $(
117 Self::$variant $( {
118 $(
119 $field,
120 )*
121 } )? => $num_pushed,
122 )*
123 }
124 }
125 }
126 };
127}
128
129def_instruction! {
130 #[derive(Debug)]
131 pub enum Instruction<'a> {
132 /// Acquires the specified parameter and places it on the stack.
133 /// Depending on the context this may refer to wasm parameters or
134 /// interface types parameters.
135 GetArg { nth: usize } : [0] => [1],
136
137 // Integer const/manipulation instructions
138
139 /// Pushes the constant `val` onto the stack.
140 I32Const { val: i32 } : [0] => [1],
141 /// Casts the top N items on the stack using the `Bitcast` enum
142 /// provided. Consumes the same number of operands that this produces.
143 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
144 /// Pushes a number of constant zeros for each wasm type on the stack.
145 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
146
147 // Memory load/store instructions
148
149 /// Pops an `i32` from the stack and loads a little-endian `i32` from
150 /// it, using the specified constant offset.
151 I32Load { offset: i32 } : [1] => [1],
152 /// Pops an `i32` from the stack and loads a little-endian `i8` from
153 /// it, using the specified constant offset. The value loaded is the
154 /// zero-extended to 32-bits
155 I32Load8U { offset: i32 } : [1] => [1],
156 /// Pops an `i32` from the stack and loads a little-endian `i8` from
157 /// it, using the specified constant offset. The value loaded is the
158 /// sign-extended to 32-bits
159 I32Load8S { offset: i32 } : [1] => [1],
160 /// Pops an `i32` from the stack and loads a little-endian `i16` from
161 /// it, using the specified constant offset. The value loaded is the
162 /// zero-extended to 32-bits
163 I32Load16U { offset: i32 } : [1] => [1],
164 /// Pops an `i32` from the stack and loads a little-endian `i16` from
165 /// it, using the specified constant offset. The value loaded is the
166 /// sign-extended to 32-bits
167 I32Load16S { offset: i32 } : [1] => [1],
168 /// Pops an `i32` from the stack and loads a little-endian `i64` from
169 /// it, using the specified constant offset.
170 I64Load { offset: i32 } : [1] => [1],
171 /// Pops an `i32` from the stack and loads a little-endian `f32` from
172 /// it, using the specified constant offset.
173 F32Load { offset: i32 } : [1] => [1],
174 /// Pops an `i32` from the stack and loads a little-endian `f64` from
175 /// it, using the specified constant offset.
176 F64Load { offset: i32 } : [1] => [1],
177
178 /// Pops an `i32` address from the stack and then an `i32` value.
179 /// Stores the value in little-endian at the pointer specified plus the
180 /// constant `offset`.
181 I32Store { offset: i32 } : [2] => [0],
182 /// Pops an `i32` address from the stack and then an `i32` value.
183 /// Stores the low 8 bits of the value in little-endian at the pointer
184 /// specified plus the constant `offset`.
185 I32Store8 { offset: i32 } : [2] => [0],
186 /// Pops an `i32` address from the stack and then an `i32` value.
187 /// Stores the low 16 bits of the value in little-endian at the pointer
188 /// specified plus the constant `offset`.
189 I32Store16 { offset: i32 } : [2] => [0],
190 /// Pops an `i32` address from the stack and then an `i64` value.
191 /// Stores the value in little-endian at the pointer specified plus the
192 /// constant `offset`.
193 I64Store { offset: i32 } : [2] => [0],
194 /// Pops an `i32` address from the stack and then an `f32` value.
195 /// Stores the value in little-endian at the pointer specified plus the
196 /// constant `offset`.
197 F32Store { offset: i32 } : [2] => [0],
198 /// Pops an `i32` address from the stack and then an `f64` value.
199 /// Stores the value in little-endian at the pointer specified plus the
200 /// constant `offset`.
201 F64Store { offset: i32 } : [2] => [0],
202
203 // Scalar lifting/lowering
204
205 /// Converts an interface type `char` value to a 32-bit integer
206 /// representing the unicode scalar value.
207 I32FromChar : [1] => [1],
208 /// Converts an interface type `u64` value to a wasm `i64`.
209 I64FromU64 : [1] => [1],
210 /// Converts an interface type `s64` value to a wasm `i64`.
211 I64FromS64 : [1] => [1],
212 /// Converts an interface type `u32` value to a wasm `i32`.
213 I32FromU32 : [1] => [1],
214 /// Converts an interface type `s32` value to a wasm `i32`.
215 I32FromS32 : [1] => [1],
216 /// Converts an interface type `u16` value to a wasm `i32`.
217 I32FromU16 : [1] => [1],
218 /// Converts an interface type `s16` value to a wasm `i32`.
219 I32FromS16 : [1] => [1],
220 /// Converts an interface type `u8` value to a wasm `i32`.
221 I32FromU8 : [1] => [1],
222 /// Converts an interface type `s8` value to a wasm `i32`.
223 I32FromS8 : [1] => [1],
224 /// Converts a language-specific `usize` value to a wasm `i32`.
225 I32FromUsize : [1] => [1],
226 /// Converts a language-specific C `char` value to a wasm `i32`.
227 I32FromChar8 : [1] => [1],
228 /// Conversion an interface type `f32` value to a wasm `f32`.
229 ///
230 /// This may be a noop for some implementations, but it's here in case the
231 /// native language representation of `f32` is different than the wasm
232 /// representation of `f32`.
233 F32FromIf32 : [1] => [1],
234 /// Conversion an interface type `f64` value to a wasm `f64`.
235 ///
236 /// This may be a noop for some implementations, but it's here in case the
237 /// native language representation of `f64` is different than the wasm
238 /// representation of `f64`.
239 F64FromIf64 : [1] => [1],
240
241 /// Converts a native wasm `i32` to an interface type `s8`.
242 ///
243 /// This will truncate the upper bits of the `i32`.
244 S8FromI32 : [1] => [1],
245 /// Converts a native wasm `i32` to an interface type `u8`.
246 ///
247 /// This will truncate the upper bits of the `i32`.
248 U8FromI32 : [1] => [1],
249 /// Converts a native wasm `i32` to an interface type `s16`.
250 ///
251 /// This will truncate the upper bits of the `i32`.
252 S16FromI32 : [1] => [1],
253 /// Converts a native wasm `i32` to an interface type `u16`.
254 ///
255 /// This will truncate the upper bits of the `i32`.
256 U16FromI32 : [1] => [1],
257 /// Converts a native wasm `i32` to an interface type `s32`.
258 S32FromI32 : [1] => [1],
259 /// Converts a native wasm `i32` to an interface type `u32`.
260 U32FromI32 : [1] => [1],
261 /// Converts a native wasm `i64` to an interface type `s64`.
262 S64FromI64 : [1] => [1],
263 /// Converts a native wasm `i64` to an interface type `u64`.
264 U64FromI64 : [1] => [1],
265 /// Converts a native wasm `i32` to an interface type `char`.
266 ///
267 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
268 CharFromI32 : [1] => [1],
269 /// Converts a native wasm `f32` to an interface type `f32`.
270 If32FromF32 : [1] => [1],
271 /// Converts a native wasm `f64` to an interface type `f64`.
272 If64FromF64 : [1] => [1],
273 /// Converts a native wasm `i32` to a language-specific C `char`.
274 ///
275 /// This will truncate the upper bits of the `i32`.
276 Char8FromI32 : [1] => [1],
277 /// Converts a native wasm `i32` to a language-specific `usize`.
278 UsizeFromI32 : [1] => [1],
279
280 // Handles
281
282 /// Converts a "borrowed" handle into a wasm `i32` value.
283 ///
284 /// > **Note**: this documentation is outdated and does not reflect the
285 /// > current implementation of the canonical ABI. This needs to be
286 /// > updated.
287 ///
288 /// A "borrowed" handle in this case means one where ownership is not
289 /// being relinquished. This is only used for lowering interface types
290 /// parameters.
291 ///
292 /// Situations that this is used are:
293 ///
294 /// * A wasm exported function receives, as a parameter, handles defined
295 /// by the wasm module itself. This is effectively proof of ownership
296 /// by an external caller (be it host or wasm module) and the
297 /// ownership of the handle still lies with the caller. The wasm
298 /// module is only receiving a reference to the resource.
299 ///
300 /// * A wasm module is calling an import with a handle defined by the
301 /// import's module. Sort of the converse of the previous case this
302 /// means that the wasm module is handing out a reference to a
303 /// resource that it owns. The type in the wasm module, for example,
304 /// needs to reflect this.
305 ///
306 /// This instruction is not used for return values in either
307 /// export/import positions.
308 I32FromBorrowedHandle { ty: ResourceId } : [1] => [1],
309
310 /// Converts an "owned" handle into a wasm `i32` value.
311 ///
312 /// > **Note**: this documentation is outdated and does not reflect the
313 /// > current implementation of the canonical ABI. This needs to be
314 /// > updated.
315 ///
316 /// This conversion is used for handle values which are crossing a
317 /// module boundary for perhaps the first time. Some example cases of
318 /// when this conversion is used are:
319 ///
320 /// * When a host defines a function to be imported, returned handles
321 /// use this instruction. Handles being returned to wasm a granting a
322 /// capability, which means that this new capability is typically
323 /// wrapped up in a new integer descriptor.
324 ///
325 /// * When a wasm module calls an imported function with a type defined
326 /// by itself, then it's granting a capability to the callee. This
327 /// means that the wasm module's type is being granted for the first
328 /// time, possibly, so it needs to be an owned value that's consumed.
329 /// Note that this doesn't actually happen with `*.witx` today due to
330 /// the lack of handle type imports.
331 ///
332 /// * When a wasm module export returns a handle defined within the
333 /// module, then it's similar to calling an imported function with
334 /// that handle. The capability is being granted to the caller of the
335 /// export, so the owned value is wrapped up in an `i32`.
336 ///
337 /// * When a host is calling a wasm module with a capability defined by
338 /// the host, its' similar to the host import returning a capability.
339 /// This would be granting the wasm module with the capability so an
340 /// owned version with a fresh handle is passed to the wasm module.
341 /// Note that this doesn't happen today with `*.witx` due to the lack
342 /// of handle type imports.
343 ///
344 /// Basically this instruction is used for handle->wasm conversions
345 /// depending on the calling context and where the handle type in
346 /// question was defined.
347 I32FromOwnedHandle { ty: ResourceId } : [1] => [1],
348
349 /// Converts a native wasm `i32` into an owned handle value.
350 ///
351 /// > **Note**: this documentation is outdated and does not reflect the
352 /// > current implementation of the canonical ABI. This needs to be
353 /// > updated.
354 ///
355 /// This is the converse of `I32FromOwnedHandle` and is used in similar
356 /// situations:
357 ///
358 /// * A host definition of an import receives a handle defined in the
359 /// module itself.
360 /// * A wasm module calling an import receives a handle defined by the
361 /// import.
362 /// * A wasm module's export receives a handle defined by an external
363 /// module.
364 /// * A host calling a wasm export receives a handle defined in the
365 /// module.
366 ///
367 /// Note that like `I32FromOwnedHandle` the first and third bullets
368 /// above don't happen today because witx can't express type imports
369 /// just yet.
370 HandleOwnedFromI32 { ty: ResourceId } : [1] => [1],
371
372 /// Converts a native wasm `i32` into a borrowedhandle value.
373 ///
374 /// > **Note**: this documentation is outdated and does not reflect the
375 /// > current implementation of the canonical ABI. This needs to be
376 /// > updated.
377 ///
378 /// This is the converse of `I32FromBorrowedHandle` and is used in similar
379 /// situations:
380 ///
381 /// * An exported wasm function receives, as a parameter, a handle that
382 /// is defined by the wasm module.
383 /// * An host-defined imported function is receiving a handle, as a
384 /// parameter, that is defined by the host itself.
385 HandleBorrowedFromI32 { ty: ResourceId } : [1] => [1],
386
387 // lists
388
389 /// Lowers a list where the element's layout in the native language is
390 /// expected to match the canonical ABI definition of interface types.
391 ///
392 /// Pops a list value from the stack and pushes the pointer/length onto
393 /// the stack. If `realloc` is set to `Some` then this is expected to
394 /// *consume* the list which means that the data needs to be copied. An
395 /// allocation/copy is expected when:
396 ///
397 /// * A host is calling a wasm export with a list (it needs to copy the
398 /// list in to the callee's module, allocating space with `realloc`)
399 /// * A wasm export is returning a list (it's expected to use `realloc`
400 /// to give ownership of the list to the caller.
401 /// * A host is returning a list in a import definition, meaning that
402 /// space needs to be allocated in the caller with `realloc`).
403 ///
404 /// A copy does not happen (e.g. `realloc` is `None`) when:
405 ///
406 /// * A wasm module calls an import with the list. In this situation
407 /// it's expected the caller will know how to access this module's
408 /// memory (e.g. the host has raw access or wasm-to-wasm communication
409 /// would copy the list).
410 ///
411 /// If `realloc` is `Some` then the adapter is not responsible for
412 /// cleaning up this list because the other end is receiving the
413 /// allocation. If `realloc` is `None` then the adapter is responsible
414 /// for cleaning up any temporary allocation it created, if any.
415 ListCanonLower {
416 element: &'a Type,
417 realloc: Option<&'a str>,
418 } : [1] => [2],
419
420 /// Lowers a list where the element's layout in the native language is
421 /// not expected to match the canonical ABI definition of interface
422 /// types.
423 ///
424 /// Pops a list value from the stack and pushes the pointer/length onto
425 /// the stack. This operation also pops a block from the block stack
426 /// which is used as the iteration body of writing each element of the
427 /// list consumed.
428 ///
429 /// The `realloc` field here behaves the same way as `ListCanonLower`.
430 /// It's only set to `None` when a wasm module calls a declared import.
431 /// Otherwise lowering in other contexts requires allocating memory for
432 /// the receiver to own.
433 ListLower {
434 element: &'a Type,
435 realloc: Option<&'a str>,
436 } : [1] => [2],
437
438 /// Lifts a list which has a canonical representation into an interface
439 /// types value.
440 ///
441 /// The term "canonical" representation here means that the
442 /// representation of the interface types value in the native language
443 /// exactly matches the canonical ABI definition of the type.
444 ///
445 /// This will consume two `i32` values from the stack, a pointer and a
446 /// length, and then produces an interface value list. If the `free`
447 /// field is set to `Some` then the pointer/length should be considered
448 /// an owned allocation and need to be deallocated by the receiver. If
449 /// it is set to `None` then a view is provided but it does not need to
450 /// be deallocated.
451 ///
452 /// The `free` field is set to `Some` in similar situations as described
453 /// by `ListCanonLower`. If `free` is `Some` then the memory must be
454 /// deallocated after the lifted list is done being consumed. If it is
455 /// `None` then the receiver of the lifted list does not own the memory
456 /// and must leave the memory as-is.
457 ListCanonLift {
458 element: &'a Type,
459 free: Option<&'a str>,
460 ty: TypeId,
461 } : [2] => [1],
462
463 /// Lifts a list which into an interface types value.
464 ///
465 /// This will consume two `i32` values from the stack, a pointer and a
466 /// length, and then produces an interface value list. Note that the
467 /// pointer/length popped are **owned** and need to be deallocated with
468 /// the wasm `free` function when the list is no longer needed.
469 ///
470 /// This will also pop a block from the block stack which is how to
471 /// read each individual element from the list.
472 ListLift {
473 element: &'a Type,
474 free: Option<&'a str>,
475 ty: TypeId,
476 } : [2] => [1],
477
478 /// Pushes an operand onto the stack representing the list item from
479 /// each iteration of the list.
480 ///
481 /// This is only used inside of blocks related to lowering lists.
482 IterElem { element: &'a Type } : [0] => [1],
483
484 /// Pushes an operand onto the stack representing the base pointer of
485 /// the next element in a list.
486 ///
487 /// This is used for both lifting and lowering lists.
488 IterBasePointer : [0] => [1],
489
490 // buffers
491
492 /// Pops a buffer value, pushes the pointer/length of where it points
493 /// to in memory.
494 BufferLowerPtrLen { push: bool, ty: &'a Type } : [1] => [3],
495 /// Pops a buffer value, pushes an integer handle for the buffer.
496 BufferLowerHandle { push: bool, ty: &'a Type } : [1] => [1],
497 /// Pops a ptr/len, pushes a buffer wrapping that ptr/len of the memory
498 /// from the origin module.
499 BufferLiftPtrLen { push: bool, ty: &'a Type } : [3] => [1],
500 /// Pops an i32, pushes a buffer wrapping that i32 handle.
501 BufferLiftHandle { push: bool, ty: &'a Type } : [1] => [1],
502
503 // records
504
505 /// Pops a record value off the stack, decomposes the record to all of
506 /// its fields, and then pushes the fields onto the stack.
507 RecordLower {
508 record: &'a Record,
509 name: Option<&'a str>,
510 ty: TypeId,
511 } : [1] => [record.fields.len()],
512
513 /// Pops all fields for a record off the stack and then composes them
514 /// into a record.
515 RecordLift {
516 record: &'a Record,
517 name: Option<&'a str>,
518 ty: TypeId,
519 } : [record.fields.len()] => [1],
520
521 /// Converts a language-specific record-of-bools to a list of `i32`.
522 FlagsLower {
523 record: &'a Record,
524 name: &'a str,
525 ty: TypeId,
526 } : [1] => [record.num_i32s()],
527 FlagsLower64 {
528 record: &'a Record,
529 name: &'a str,
530 ty: TypeId,
531 } : [1] => [1],
532 /// Converts a list of native wasm `i32` to a language-specific
533 /// record-of-bools.
534 FlagsLift {
535 record: &'a Record,
536 name: &'a str,
537 ty: TypeId,
538 } : [record.num_i32s()] => [1],
539 FlagsLift64 {
540 record: &'a Record,
541 name: &'a str,
542 ty: TypeId,
543 } : [1] => [1],
544
545 // variants
546
547 /// This is a special instruction used for `VariantLower`
548 /// instruction to determine the name of the payload, if present, to use
549 /// within each block.
550 ///
551 /// Each sub-block will have this be the first instruction, and if it
552 /// lowers a payload it will expect something bound to this name.
553 VariantPayloadName : [0] => [1],
554
555 /// TODO
556 BufferPayloadName : [0] => [1],
557
558 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
559 /// from the code generator. Uses each of those blocks and the value
560 /// from the stack to produce `nresults` of items.
561 VariantLower {
562 variant: &'a Variant,
563 name: Option<&'a str>,
564 ty: TypeId,
565 results: &'a [WasmType],
566 } : [1] => [results.len()],
567
568 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
569 /// from the code generator. Uses each of those blocks and the value
570 /// from the stack to produce a final variant.
571 VariantLift {
572 variant: &'a Variant,
573 name: Option<&'a str>,
574 ty: TypeId,
575 } : [1] => [1],
576
577 // calling/control flow
578
579 /// Represents a call to a raw WebAssembly API. The module/name are
580 /// provided inline as well as the types if necessary.
581 ///
582 /// Note that this instruction is not currently used for async
583 /// functions, instead `CallWasmAsyncImport` and `CallWasmAsyncExport`
584 /// are used.
585 CallWasm {
586 module: &'a str,
587 name: &'a str,
588 sig: &'a WasmSignature,
589 } : [sig.params.len()] => [sig.results.len()],
590
591 /// Represents a call to an asynchronous wasm import.
592 ///
593 /// This currently only happens when a compiled-to-wasm module calls as
594 /// async import. This instruction is used to indicate that the
595 /// specified import function should be called. The specified import
596 /// function has `params` as its types, but the final two parameters
597 /// must be synthesized by this instruction which are the
598 /// callback/callback state. The actual imported function does not
599 /// return anything but the callback will be called with the `i32` state
600 /// as the first parameter and `results` as the rest of the parameters.
601 /// The callback function should return nothing.
602 ///
603 /// It's up to the bindings generator to figure out how to make this
604 /// look synchronous despite it being callback-based in the middle.
605 CallWasmAsyncImport {
606 module: &'a str,
607 name: &'a str,
608 params: &'a [WasmType],
609 results: &'a [WasmType],
610 } : [params.len() - 2] => [results.len()],
611
612 /// Represents a call to an asynchronous wasm export.
613 ///
614 /// This currently only happens when a host module calls an async
615 /// function on a wasm module. The specified function will take `params`
616 /// as its argument plus one more argument of an `i32` state that the
617 /// host needs to synthesize. The function being called doesn't actually
618 /// return anything. Instead wasm will call an `async_export_done`
619 /// intrinsic in the `canonical_abi` module. This intrinsic receives a
620 /// context value and a pointer into linear memory. The context value
621 /// lines up with the final `i32` parameter of this function call (which
622 /// the bindings generator must synthesize) and the pointer into linear
623 /// memory contains the `results`, stored at 8-byte offsets in the same
624 /// manner that multiple results are transferred.
625 ///
626 /// It's up to the bindings generator to figure out how to make this
627 /// look synchronous despite it being callback-based in the middle.
628 CallWasmAsyncExport {
629 module: &'a str,
630 name: &'a str,
631 params: &'a [WasmType],
632 results: &'a [WasmType],
633 } : [params.len() - 1] => [results.len()],
634
635 /// Same as `CallWasm`, except the dual where an interface is being
636 /// called rather than a raw wasm function.
637 ///
638 /// Note that this will be used for async functions.
639 CallInterface {
640 module: &'a str,
641 func: &'a Function,
642 } : [func.params.len()] => [func.results.len()],
643
644 /// Returns `amt` values on the stack. This is always the last
645 /// instruction.
646 ///
647 /// Note that this instruction is used for asynchronous functions where
648 /// the results are *lifted*, not when they're *lowered*, though. For
649 /// those modes the `ReturnAsyncExport` and `ReturnAsyncImport`
650 /// functions are used.
651 Return { amt: usize, func: &'a Function } : [*amt] => [0],
652
653 /// "Returns" from an asynchronous export.
654 ///
655 /// This is only used for compiled-to-wasm modules at this time, and
656 /// only for the exports of async functions in those modules. This
657 /// instruction receives two parameters, the first of which is the
658 /// original context from the start of the function which was provided
659 /// when the export was first called (its last parameter). The second
660 /// argument is a pointer into linear memory with the results of the
661 /// asynchronous call already encoded. This instruction should then call
662 /// the `async_export_done` intrinsic in the `canonical_abi` module.
663 ReturnAsyncExport { func: &'a Function } : [2] => [0],
664
665 /// "Returns" from an asynchronous import.
666 ///
667 /// This is only used for host modules at this time, and
668 /// only for the import of async functions in those modules. This
669 /// instruction receives the operands used to call the completion
670 /// function in the wasm module. The first parameter to this instruction
671 /// is the index into the function table of the function to call, and
672 /// the remaining parameters are the parameters to invoke the function
673 /// with.
674 ReturnAsyncImport {
675 func: &'a Function,
676 params: usize,
677 } : [*params + 2] => [0],
678
679
680 // ...
681
682 /// An instruction from an extended instruction set that's specific to
683 /// `*.witx` and the "Preview1" ABI.
684 Witx {
685 instr: &'a WitxInstruction<'a>,
686 } : [instr.operands_len()] => [instr.results_len()],
687 }
688}
689
690#[derive(Debug, PartialEq)]
691pub enum Bitcast {
692 // Upcasts
693 F32ToF64,
694 F32ToI32,
695 F64ToI64,
696 I32ToI64,
697 F32ToI64,
698
699 // Downcasts
700 F64ToF32,
701 I32ToF32,
702 I64ToF64,
703 I64ToI32,
704 I64ToF32,
705
706 None,
707}
708
709def_instruction! {
710 #[derive(Debug)]
711 pub enum WitxInstruction<'a> {
712 /// Takes the value off the top of the stack and writes it into linear
713 /// memory. Pushes the address in linear memory as an `i32`.
714 AddrOf : [1] => [1],
715
716 /// Converts a language-specific pointer value to a wasm `i32`.
717 I32FromPointer : [1] => [1],
718 /// Converts a language-specific pointer value to a wasm `i32`.
719 I32FromConstPointer : [1] => [1],
720 /// Converts a native wasm `i32` to a language-specific pointer.
721 PointerFromI32 { ty: &'a Type }: [1] => [1],
722 /// Converts a native wasm `i32` to a language-specific pointer.
723 ConstPointerFromI32 { ty: &'a Type } : [1] => [1],
724
725 /// This is a special instruction specifically for the original ABI of
726 /// WASI. The raw return `i32` of a function is re-pushed onto the
727 /// stack for reuse.
728 ReuseReturn : [0] => [1],
729 }
730}
731
732/// Whether the glue code surrounding a call is lifting arguments and lowering
733/// results or vice versa.
734#[derive(Clone, Copy, PartialEq, Eq)]
735pub enum LiftLower {
736 /// When the glue code lifts arguments and lowers results.
737 ///
738 /// ```text
739 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
740 /// ```
741 LiftArgsLowerResults,
742 /// When the glue code lowers arguments and lifts results.
743 ///
744 /// ```text
745 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
746 /// ```
747 LowerArgsLiftResults,
748}
749
750/// We use a different ABI for wasm importing functions exported by the host
751/// than for wasm exporting functions imported by the host.
752///
753/// Note that this reflects the flavor of ABI we generate, and not necessarily
754/// the way the resulting bindings will be used by end users. See the comments
755/// on the `Direction` enum in gen-core for details.
756///
757/// The bindings ABI has a concept of a "guest" and a "host". Wasmlink can
758/// generate glue to bridge between two "guests", but in that case each side
759/// thinks of the glue as the "host". There are two variants of the ABI,
760/// one specialized for the "guest" importing and calling a function defined
761/// and exported in the "host", and the other specialized for the "host"
762/// importing and calling a fuinction defined and exported in the "guest".
763#[derive(Clone, Copy, PartialEq, Eq, Debug)]
764pub enum AbiVariant {
765 /// The guest is importing and calling the function.
766 GuestImport,
767 /// The guest is defining and exporting the function.
768 GuestExport,
769}
770
771/// Trait for language implementors to use to generate glue code between native
772/// WebAssembly signatures and interface types signatures.
773///
774/// This is used as an implementation detail in interpreting the ABI between
775/// interface types and wasm types. Eventually this will be driven by interface
776/// types adapters themselves, but for now the ABI of a function dictates what
777/// instructions are fed in.
778///
779/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
780/// generate code for. Instructions operate like a stack machine where each
781/// instruction has a list of inputs and a list of outputs (provided by the
782/// `emit` function).
783pub trait Bindgen {
784 /// The intermediate type for fragments of code for this type.
785 ///
786 /// For most languages `String` is a suitable intermediate type.
787 type Operand: Clone;
788
789 /// Emit code to implement the given instruction.
790 ///
791 /// Each operand is given in `operands` and can be popped off if ownership
792 /// is required. It's guaranteed that `operands` has the appropriate length
793 /// for the `inst` given, as specified with [`Instruction`].
794 ///
795 /// Each result variable should be pushed onto `results`. This function must
796 /// push the appropriate number of results or binding generation will panic.
797 fn emit(
798 &mut self,
799 iface: &Interface,
800 inst: &Instruction<'_>,
801 operands: &mut Vec<Self::Operand>,
802 results: &mut Vec<Self::Operand>,
803 );
804
805 /// Allocates temporary space in linear memory for the type `ty`.
806 ///
807 /// This is called when calling some wasm functions where a return pointer
808 /// is needed. Only used for the `Abi::Preview1` ABI.
809 ///
810 /// Returns an `Operand` which has type `i32` and is the base of the typed
811 /// allocation in memory.
812 fn allocate_typed_space(&mut self, iface: &Interface, ty: TypeId) -> Self::Operand;
813
814 /// Allocates temporary space in linear memory for a fixed number of `i64`
815 /// values.
816 ///
817 /// This is only called in the `Abi::Canonical` ABI for when a function
818 /// would otherwise have multiple results.
819 ///
820 /// Returns an `Operand` which has type `i32` and points to the base of the
821 /// fixed-size-array allocation.
822 fn i64_return_pointer_area(&mut self, amt: usize) -> Self::Operand;
823
824 /// Enters a new block of code to generate code for.
825 ///
826 /// This is currently exclusively used for constructing variants. When a
827 /// variant is constructed a block here will be pushed for each case of a
828 /// variant, generating the code necessary to translate a variant case.
829 ///
830 /// Blocks are completed with `finish_block` below. It's expected that `emit`
831 /// will always push code (if necessary) into the "current block", which is
832 /// updated by calling this method and `finish_block` below.
833 fn push_block(&mut self);
834
835 /// Indicates to the code generator that a block is completed, and the
836 /// `operand` specified was the resulting value of the block.
837 ///
838 /// This method will be used to compute the value of each arm of lifting a
839 /// variant. The `operand` will be `None` if the variant case didn't
840 /// actually have any type associated with it. Otherwise it will be `Some`
841 /// as the last value remaining on the stack representing the value
842 /// associated with a variant's `case`.
843 ///
844 /// It's expected that this will resume code generation in the previous
845 /// block before `push_block` was called. This must also save the results
846 /// of the current block internally for instructions like `ResultLift` to
847 /// use later.
848 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
849
850 /// Returns size information that was previously calculated for all types.
851 fn sizes(&self) -> &crate::sizealign::SizeAlign;
852
853 /// Returns whether or not the specified element type is represented in a
854 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
855 /// and `ListCanonLift` instructions are used or not.
856 fn is_list_canonical(&self, iface: &Interface, element: &Type) -> bool;
857}
858
859impl Interface {
860 /// Validates the parameters/results of a function are representable in its
861 /// ABI.
862 ///
863 /// Returns an error string if they're not representable or returns `Ok` if
864 /// they're indeed representable.
865 pub fn validate_abi(&self, func: &Function) -> Result<(), String> {
866 for (_, ty) in func.params.iter() {
867 self.validate_abi_ty(func.abi, ty, true)?;
868 }
869 for (_, ty) in func.results.iter() {
870 self.validate_abi_ty(func.abi, ty, false)?;
871 }
872 match func.abi {
873 Abi::Preview1 => {
874 // validated below...
875 }
876 Abi::Canonical => return Ok(()),
877 }
878 match func.results.len() {
879 0 => Ok(()),
880 1 => self.validate_preview1_return(&func.results[0].1),
881 _ => Err("more than one result".to_string()),
882 }
883 }
884
885 fn validate_preview1_return(&self, ty: &Type) -> Result<(), String> {
886 let id = match ty {
887 Type::Id(id) => *id,
888 _ => return Ok(()),
889 };
890 match &self.types[id].kind {
891 TypeDefKind::Type(t) => self.validate_preview1_return(t),
892 TypeDefKind::Variant(v) => {
893 let (ok, err) = match v.as_expected() {
894 Some(pair) => pair,
895 None => return Err("invalid return type".to_string()),
896 };
897 if let Some(ty) = ok {
898 let id = match ty {
899 Type::Id(id) => *id,
900 _ => return Err("only named types are allowed in results".to_string()),
901 };
902 match &self.types[id].kind {
903 TypeDefKind::Record(r) if r.is_tuple() => {
904 for field in r.fields.iter() {
905 self.validate_ty_named(&field.ty)?;
906 }
907 }
908 _ => {
909 self.validate_ty_named(ty)?;
910 }
911 }
912 }
913
914 if let Some(ty) = err {
915 let kind = self.validate_ty_named(ty)?;
916 if let TypeDefKind::Variant(v) = kind {
917 if v.is_enum() {
918 return Ok(());
919 }
920 }
921 return Err("invalid type in error payload of result".to_string());
922 }
923 Ok(())
924 }
925 TypeDefKind::Record(r) if r.is_flags() => Ok(()),
926 TypeDefKind::Record(_)
927 | TypeDefKind::List(_)
928 | TypeDefKind::PushBuffer(_)
929 | TypeDefKind::PullBuffer(_) => Err("invalid return type".to_string()),
930 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => Ok(()),
931 }
932 }
933
934 fn validate_ty_named(&self, ty: &Type) -> Result<&TypeDefKind, String> {
935 let id = match ty {
936 Type::Id(id) => *id,
937 _ => return Err("only named types are allowed in results".to_string()),
938 };
939 let ty = &self.types[id];
940 if ty.name.is_none() {
941 return Err("only named types are allowed in results".to_string());
942 }
943 Ok(&ty.kind)
944 }
945
946 fn validate_abi_ty(&self, abi: Abi, ty: &Type, param: bool) -> Result<(), String> {
947 let id = match ty {
948 Type::Id(id) => *id,
949 // Type::U8 { lang_c_char: true } => {
950 // if let Abi::Next = self {
951 // return Err("cannot use `(@witx char8)` in this ABI".to_string());
952 // }
953 // Ok(())
954 // }
955 // Type::U32 { lang_ptr_size: true } => {
956 // if let Abi::Next = self {
957 // return Err("cannot use `(@witx usize)` in this ABI".to_string());
958 // }
959 // Ok(())
960 // }
961 _ => return Ok(()),
962 };
963 match &self.types[id].kind {
964 TypeDefKind::Type(t) => self.validate_abi_ty(abi, t, param),
965 TypeDefKind::Record(r) => {
966 for r in r.fields.iter() {
967 self.validate_abi_ty(abi, &r.ty, param)?;
968 }
969 Ok(())
970 }
971 TypeDefKind::Variant(v) => {
972 for case in v.cases.iter() {
973 if let Some(ty) = &case.ty {
974 self.validate_abi_ty(abi, ty, param)?;
975 }
976 }
977 Ok(())
978 }
979 TypeDefKind::List(t) => self.validate_abi_ty(abi, t, param),
980 TypeDefKind::Pointer(t) => {
981 if let Abi::Canonical = abi {
982 return Err("cannot use `(@witx pointer)` in this ABI".to_string());
983 }
984 self.validate_abi_ty(abi, t, param)
985 }
986 TypeDefKind::ConstPointer(t) => {
987 if let Abi::Canonical = abi {
988 return Err("cannot use `(@witx const_pointer)` in this ABI".to_string());
989 }
990 self.validate_abi_ty(abi, t, param)
991 }
992 TypeDefKind::PushBuffer(t) | TypeDefKind::PullBuffer(t) => {
993 if !param {
994 return Err("cannot use buffers in the result position".to_string());
995 }
996 let param = match &self.types[id].kind {
997 TypeDefKind::PushBuffer(_) => false,
998 TypeDefKind::PullBuffer(_) => param,
999 _ => unreachable!(),
1000 };
1001 // If this is an output buffer then validate `t` as if it were a
1002 // result because the callee can't give us buffers back.
1003 self.validate_abi_ty(abi, t, param)
1004 }
1005 }
1006 }
1007
1008 /// Get the WebAssembly type signature for this interface function
1009 ///
1010 /// The first entry returned is the list of parameters and the second entry
1011 /// is the list of results for the wasm function signature.
1012 pub fn wasm_signature(&self, variant: AbiVariant, func: &Function) -> WasmSignature {
1013 let mut params = Vec::new();
1014 let mut results = Vec::new();
1015 for (_, param) in func.params.iter() {
1016 if let (Abi::Preview1, Type::Id(id)) = (func.abi, param) {
1017 match &self.types[*id].kind {
1018 TypeDefKind::Variant(_) => {
1019 params.push(WasmType::I32);
1020 continue;
1021 }
1022 TypeDefKind::Record(r) if !r.is_flags() => {
1023 params.push(WasmType::I32);
1024 continue;
1025 }
1026 _ => {}
1027 }
1028 }
1029 self.push_wasm(func.abi, variant, param, &mut params);
1030 }
1031
1032 for (_, result) in func.results.iter() {
1033 if let (Abi::Preview1, Type::Id(id)) = (func.abi, result) {
1034 if let TypeDefKind::Variant(v) = &self.types[*id].kind {
1035 results.push(v.tag.into());
1036 if v.is_enum() {
1037 continue;
1038 }
1039 // return pointer for payload, if any
1040 if let Some(ty) = &v.cases[0].ty {
1041 for _ in 0..self.preview1_num_types(ty) {
1042 params.push(WasmType::I32);
1043 }
1044 }
1045 continue;
1046 }
1047 }
1048 self.push_wasm(func.abi, variant, result, &mut results);
1049 }
1050
1051 let mut retptr = None;
1052 if func.is_async {
1053 // Asynchronous functions never actually return anything since
1054 // they're all callback-based, meaning that we always put all the
1055 // results into a return pointer.
1056 //
1057 // Asynchronous exports take one extra parameter which is the
1058 // context used to pass to the `async_export_done` intrinsic, and
1059 // asynchronous imports take two extra parameters where the first is
1060 // a pointer into the function table and the second is a context
1061 // argument to pass to this function.
1062 match variant {
1063 AbiVariant::GuestExport => {
1064 retptr = Some(mem::take(&mut results));
1065 params.push(WasmType::I32);
1066 }
1067 AbiVariant::GuestImport => {
1068 retptr = Some(mem::take(&mut results));
1069 params.push(WasmType::I32);
1070 params.push(WasmType::I32);
1071 }
1072 }
1073 } else {
1074 // Rust/C don't support multi-value well right now, so if a function
1075 // would have multiple results then instead truncate it. Imports take a
1076 // return pointer to write into and exports return a pointer they wrote
1077 // into.
1078 if results.len() > 1 {
1079 retptr = Some(mem::take(&mut results));
1080 match variant {
1081 AbiVariant::GuestImport => {
1082 params.push(WasmType::I32);
1083 }
1084 AbiVariant::GuestExport => {
1085 results.push(WasmType::I32);
1086 }
1087 }
1088 }
1089 }
1090
1091 WasmSignature {
1092 params,
1093 results,
1094 retptr,
1095 }
1096 }
1097
1098 fn preview1_num_types(&self, ty: &Type) -> usize {
1099 match ty {
1100 Type::Id(id) => match &self.types[*id].kind {
1101 TypeDefKind::Record(r) if r.is_tuple() => r.fields.len(),
1102 _ => 1,
1103 },
1104 _ => 1,
1105 }
1106 }
1107
1108 fn push_wasm(&self, abi: Abi, variant: AbiVariant, ty: &Type, result: &mut Vec<WasmType>) {
1109 match ty {
1110 Type::S8
1111 | Type::U8
1112 | Type::S16
1113 | Type::U16
1114 | Type::S32
1115 | Type::U32
1116 | Type::Char
1117 | Type::Handle(_)
1118 | Type::CChar
1119 | Type::Usize => result.push(WasmType::I32),
1120
1121 Type::U64 | Type::S64 => result.push(WasmType::I64),
1122 Type::F32 => result.push(WasmType::F32),
1123 Type::F64 => result.push(WasmType::F64),
1124
1125 Type::Id(id) => match &self.types[*id].kind {
1126 TypeDefKind::Type(t) => self.push_wasm(abi, variant, t, result),
1127
1128 TypeDefKind::Record(r) if r.is_flags() => match self.flags_repr(r) {
1129 Some(int) => result.push(int.into()),
1130 None => {
1131 for _ in 0..r.num_i32s() {
1132 result.push(WasmType::I32);
1133 }
1134 }
1135 },
1136
1137 TypeDefKind::Record(r) => {
1138 for field in r.fields.iter() {
1139 self.push_wasm(abi, variant, &field.ty, result);
1140 }
1141 }
1142
1143 TypeDefKind::List(_) => {
1144 result.push(WasmType::I32);
1145 result.push(WasmType::I32);
1146 }
1147
1148 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => {
1149 result.push(WasmType::I32);
1150 }
1151
1152 TypeDefKind::PushBuffer(_) | TypeDefKind::PullBuffer(_) => {
1153 result.push(WasmType::I32);
1154 if variant == AbiVariant::GuestImport {
1155 result.push(WasmType::I32);
1156 result.push(WasmType::I32);
1157 }
1158 }
1159
1160 TypeDefKind::Variant(v) => {
1161 result.push(v.tag.into());
1162 let start = result.len();
1163 let mut temp = Vec::new();
1164
1165 // Push each case's type onto a temporary vector, and then
1166 // merge that vector into our final list starting at
1167 // `start`. Note that this requires some degree of
1168 // "unification" so we can handle things like `Result<i32,
1169 // f32>` where that turns into `[i32 i32]` where the second
1170 // `i32` might be the `f32` bitcasted.
1171 for case in v.cases.iter() {
1172 let ty = match &case.ty {
1173 Some(ty) => ty,
1174 None => continue,
1175 };
1176 self.push_wasm(abi, variant, ty, &mut temp);
1177
1178 for (i, ty) in temp.drain(..).enumerate() {
1179 match result.get_mut(start + i) {
1180 Some(prev) => *prev = unify(*prev, ty),
1181 None => result.push(ty),
1182 }
1183 }
1184 }
1185 }
1186 },
1187 }
1188 }
1189
1190 pub fn flags_repr(&self, record: &Record) -> Option<Int> {
1191 match record.kind {
1192 RecordKind::Flags(Some(hint)) => Some(hint),
1193 RecordKind::Flags(None) if record.fields.len() <= 8 => Some(Int::U8),
1194 RecordKind::Flags(None) if record.fields.len() <= 16 => Some(Int::U16),
1195 RecordKind::Flags(None) if record.fields.len() <= 32 => Some(Int::U32),
1196 RecordKind::Flags(None) if record.fields.len() <= 64 => Some(Int::U64),
1197 RecordKind::Flags(None) => None,
1198 _ => panic!("not a flags record"),
1199 }
1200 }
1201
1202 /// Generates an abstract sequence of instructions which represents this
1203 /// function being adapted as an imported function.
1204 ///
1205 /// The instructions here, when executed, will emulate a language with
1206 /// interface types calling the concrete wasm implementation. The parameters
1207 /// for the returned instruction sequence are the language's own
1208 /// interface-types parameters. One instruction in the instruction stream
1209 /// will be a `Call` which represents calling the actual raw wasm function
1210 /// signature.
1211 ///
1212 /// This function is useful, for example, if you're building a language
1213 /// generator for WASI bindings. This will document how to translate
1214 /// language-specific values into the wasm types to call a WASI function,
1215 /// and it will also automatically convert the results of the WASI function
1216 /// back to a language-specific value.
1217 pub fn call(
1218 &self,
1219 variant: AbiVariant,
1220 lift_lower: LiftLower,
1221 func: &Function,
1222 bindgen: &mut impl Bindgen,
1223 ) {
1224 if Abi::Preview1 == func.abi {
1225 // The Preview1 ABI only works with WASI which is only intended
1226 // for use with these modes.
1227 if variant == AbiVariant::GuestExport {
1228 panic!("the preview1 ABI only supports import modes");
1229 }
1230 }
1231 Generator::new(self, func.abi, variant, lift_lower, bindgen).call(func);
1232 }
1233}
1234
1235struct Generator<'a, B: Bindgen> {
1236 abi: Abi,
1237 variant: AbiVariant,
1238 lift_lower: LiftLower,
1239 bindgen: &'a mut B,
1240 iface: &'a Interface,
1241 operands: Vec<B::Operand>,
1242 results: Vec<B::Operand>,
1243 stack: Vec<B::Operand>,
1244 return_pointers: Vec<B::Operand>,
1245}
1246
1247impl<'a, B: Bindgen> Generator<'a, B> {
1248 fn new(
1249 iface: &'a Interface,
1250 abi: Abi,
1251 variant: AbiVariant,
1252 lift_lower: LiftLower,
1253 bindgen: &'a mut B,
1254 ) -> Generator<'a, B> {
1255 Generator {
1256 iface,
1257 abi,
1258 variant,
1259 lift_lower,
1260 bindgen,
1261 operands: Vec::new(),
1262 results: Vec::new(),
1263 stack: Vec::new(),
1264 return_pointers: Vec::new(),
1265 }
1266 }
1267
1268 fn call(&mut self, func: &Function) {
1269 let sig = self.iface.wasm_signature(self.variant, func);
1270
1271 match self.lift_lower {
1272 LiftLower::LowerArgsLiftResults => {
1273 // Push all parameters for this function onto the stack, and
1274 // then batch-lower everything all at once.
1275 for nth in 0..func.params.len() {
1276 self.emit(&Instruction::GetArg { nth });
1277 }
1278 self.lower_all(&func.params, None);
1279
1280 if func.is_async {
1281 // We emit custom instructions for async calls since they
1282 // have different parameters synthesized by the bindings
1283 // generator depending on what kind of call is being made.
1284 //
1285 // Note that no return pointer goop happens here because
1286 // that's all done through parameters of callbacks instead.
1287 let tys = sig.retptr.as_ref().unwrap();
1288 match self.variant {
1289 AbiVariant::GuestImport => {
1290 assert_eq!(self.stack.len(), sig.params.len() - 2);
1291 self.emit(&Instruction::CallWasmAsyncImport {
1292 module: &self.iface.name,
1293 name: &func.name,
1294 params: &sig.params,
1295 results: tys,
1296 });
1297 }
1298 AbiVariant::GuestExport => {
1299 assert_eq!(self.stack.len(), sig.params.len() - 1);
1300 self.emit(&Instruction::CallWasmAsyncExport {
1301 module: &self.iface.name,
1302 name: &func.name,
1303 params: &sig.params,
1304 results: tys,
1305 });
1306 }
1307 }
1308 } else {
1309 // If necessary we may need to prepare a return pointer for this
1310 // ABI. The `Preview1` ABI has most return values returned
1311 // through pointers, and the `Canonical` ABI returns more-than-one
1312 // values through a return pointer.
1313 if self.variant == AbiVariant::GuestImport {
1314 self.prep_return_pointer(&sig, &func.results);
1315 }
1316
1317 // Now that all the wasm args are prepared we can call the
1318 // actual wasm function.
1319 assert_eq!(self.stack.len(), sig.params.len());
1320 self.emit(&Instruction::CallWasm {
1321 module: &self.iface.name,
1322 name: &func.name,
1323 sig: &sig,
1324 });
1325
1326 // In the `Canonical` ABI we model multiple return values by going
1327 // through memory. Remove that indirection here by loading
1328 // everything to simulate the function having many return values
1329 // in our stack discipline.
1330 if let Some(actual) = &sig.retptr {
1331 if self.variant == AbiVariant::GuestImport {
1332 assert_eq!(self.return_pointers.len(), 1);
1333 self.stack.push(self.return_pointers.pop().unwrap());
1334 }
1335 self.load_retptr(actual);
1336 }
1337 }
1338
1339 // Batch-lift all result values now that all the function's return
1340 // values are on the stack.
1341 self.lift_all(&func.results);
1342
1343 self.emit(&Instruction::Return {
1344 func,
1345 amt: func.results.len(),
1346 });
1347 }
1348 LiftLower::LiftArgsLowerResults => {
1349 // Use `GetArg` to push all relevant arguments onto the stack.
1350 // Note that we can't use the signature of this function
1351 // directly due to various conversions and return pointers, so
1352 // we need to somewhat manually calculate all the arguments
1353 // which are converted as interface types arguments below.
1354 let nargs = match self.abi {
1355 Abi::Preview1 => {
1356 func.params.len()
1357 + func
1358 .params
1359 .iter()
1360 .filter(|(_, t)| match t {
1361 Type::Id(id) => {
1362 matches!(&self.iface.types[*id].kind, TypeDefKind::List(_))
1363 }
1364 _ => false,
1365 })
1366 .count()
1367 }
1368 Abi::Canonical => {
1369 let skip_cnt = if func.is_async {
1370 match self.variant {
1371 AbiVariant::GuestExport => 1,
1372 AbiVariant::GuestImport => 2,
1373 }
1374 } else {
1375 (sig.retptr.is_some() && self.variant == AbiVariant::GuestImport)
1376 as usize
1377 };
1378 sig.params.len() - skip_cnt
1379 }
1380 };
1381 for nth in 0..nargs {
1382 self.emit(&Instruction::GetArg { nth });
1383 }
1384
1385 // Once everything is on the stack we can lift all arguments
1386 // one-by-one into their interface-types equivalent.
1387 self.lift_all(&func.params);
1388
1389 // ... and that allows us to call the interface types function
1390 self.emit(&Instruction::CallInterface {
1391 module: &self.iface.name,
1392 func,
1393 });
1394
1395 // ... and at the end we lower everything back into return
1396 // values.
1397 self.lower_all(&func.results, Some(nargs));
1398
1399 if func.is_async {
1400 let tys = sig.retptr.as_ref().unwrap();
1401 match self.variant {
1402 AbiVariant::GuestImport => {
1403 assert_eq!(self.stack.len(), tys.len());
1404 let operands = mem::take(&mut self.stack);
1405 // function index to call
1406 self.emit(&Instruction::GetArg {
1407 nth: sig.params.len() - 2,
1408 });
1409 // environment for the function
1410 self.emit(&Instruction::GetArg {
1411 nth: sig.params.len() - 1,
1412 });
1413 self.stack.extend(operands);
1414 self.emit(&Instruction::ReturnAsyncImport {
1415 func,
1416 params: tys.len(),
1417 });
1418 }
1419 AbiVariant::GuestExport => {
1420 // Store all results, if any, into the general
1421 // return pointer area.
1422 let retptr = if !tys.is_empty() {
1423 let op = self.bindgen.i64_return_pointer_area(tys.len());
1424 self.stack.push(op);
1425 Some(self.store_retptr(tys))
1426 } else {
1427 None
1428 };
1429
1430 // Get the caller's context index.
1431 self.emit(&Instruction::GetArg {
1432 nth: sig.params.len() - 1,
1433 });
1434 match retptr {
1435 Some(ptr) => self.stack.push(ptr),
1436 None => self.emit(&Instruction::I32Const { val: 0 }),
1437 }
1438
1439 // This will call the "done" function with the
1440 // context/pointer argument
1441 self.emit(&Instruction::ReturnAsyncExport { func });
1442 }
1443 }
1444 } else {
1445 // Our ABI dictates that a list of returned types are
1446 // returned through memories, so after we've got all the
1447 // values on the stack perform all of the stores here.
1448 if let Some(tys) = &sig.retptr {
1449 match self.variant {
1450 AbiVariant::GuestImport => {
1451 self.emit(&Instruction::GetArg {
1452 nth: sig.params.len() - 1,
1453 });
1454 }
1455 AbiVariant::GuestExport => {
1456 let op = self.bindgen.i64_return_pointer_area(tys.len());
1457 self.stack.push(op);
1458 }
1459 }
1460 let retptr = self.store_retptr(tys);
1461 if self.variant == AbiVariant::GuestExport {
1462 self.stack.push(retptr);
1463 }
1464 }
1465
1466 self.emit(&Instruction::Return {
1467 func,
1468 amt: sig.results.len(),
1469 });
1470 }
1471 }
1472 }
1473
1474 assert!(
1475 self.stack.is_empty(),
1476 "stack has {} items remaining",
1477 self.stack.len()
1478 );
1479 }
1480
1481 fn load_retptr(&mut self, types: &[WasmType]) {
1482 let rp = self.stack.pop().unwrap();
1483 for (i, ty) in types.iter().enumerate() {
1484 self.stack.push(rp.clone());
1485 let offset = (i * 8) as i32;
1486 match ty {
1487 WasmType::I32 => self.emit(&Instruction::I32Load { offset }),
1488 WasmType::I64 => self.emit(&Instruction::I64Load { offset }),
1489 WasmType::F32 => self.emit(&Instruction::F32Load { offset }),
1490 WasmType::F64 => self.emit(&Instruction::F64Load { offset }),
1491 }
1492 }
1493 }
1494
1495 /// Assumes that the wasm values to create `tys` are all located on the
1496 /// stack.
1497 ///
1498 /// Inserts instructions necesesary to lift those types into their
1499 /// interface types equivalent.
1500 fn lift_all(&mut self, tys: &[(String, Type)]) {
1501 let mut temp = Vec::new();
1502 let operands = tys
1503 .iter()
1504 .rev()
1505 .map(|(_, ty)| {
1506 let ntys = match self.abi {
1507 Abi::Preview1 => match ty {
1508 Type::Id(id) => match &self.iface.types[*id].kind {
1509 TypeDefKind::List(_) => 2,
1510 _ => 1,
1511 },
1512 _ => 1,
1513 },
1514 Abi::Canonical => {
1515 temp.truncate(0);
1516 self.iface.push_wasm(self.abi, self.variant, ty, &mut temp);
1517 temp.len()
1518 }
1519 };
1520 self.stack
1521 .drain(self.stack.len() - ntys..)
1522 .collect::<Vec<_>>()
1523 })
1524 .collect::<Vec<_>>();
1525 for (operands, (_, ty)) in operands.into_iter().rev().zip(tys) {
1526 self.stack.extend(operands);
1527 self.lift(ty);
1528 }
1529 }
1530
1531 /// Assumes that the value for `tys` is already on the stack, and then
1532 /// converts all of those values into their wasm types by lowering each
1533 /// argument in-order.
1534 fn lower_all(&mut self, tys: &[(String, Type)], mut nargs: Option<usize>) {
1535 let operands = self
1536 .stack
1537 .drain(self.stack.len() - tys.len()..)
1538 .collect::<Vec<_>>();
1539 for (operand, (_, ty)) in operands.into_iter().zip(tys) {
1540 self.stack.push(operand);
1541 self.lower(ty, nargs.as_mut());
1542 }
1543 }
1544
1545 /// Assumes `types.len()` values are on the stack and stores them all into
1546 /// the return pointer of this function, specified in the last argument.
1547 ///
1548 /// This is only used with `Abi::Next`.
1549 fn store_retptr(&mut self, types: &[WasmType]) -> B::Operand {
1550 let retptr = self.stack.pop().unwrap();
1551 for (i, ty) in types.iter().enumerate().rev() {
1552 self.stack.push(retptr.clone());
1553 let offset = (i * 8) as i32;
1554 match ty {
1555 WasmType::I32 => self.emit(&Instruction::I32Store { offset }),
1556 WasmType::I64 => self.emit(&Instruction::I64Store { offset }),
1557 WasmType::F32 => self.emit(&Instruction::F32Store { offset }),
1558 WasmType::F64 => self.emit(&Instruction::F64Store { offset }),
1559 }
1560 }
1561 retptr
1562 }
1563
1564 fn witx(&mut self, instr: &WitxInstruction<'_>) {
1565 self.emit(&Instruction::Witx { instr });
1566 }
1567
1568 fn emit(&mut self, inst: &Instruction<'_>) {
1569 self.operands.clear();
1570 self.results.clear();
1571
1572 let operands_len = inst.operands_len();
1573 assert!(
1574 self.stack.len() >= operands_len,
1575 "not enough operands on stack for {:?}",
1576 inst
1577 );
1578 self.operands
1579 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1580 self.results.reserve(inst.results_len());
1581
1582 self.bindgen
1583 .emit(self.iface, inst, &mut self.operands, &mut self.results);
1584
1585 assert_eq!(
1586 self.results.len(),
1587 inst.results_len(),
1588 "{:?} expected {} results, got {}",
1589 inst,
1590 inst.results_len(),
1591 self.results.len()
1592 );
1593 self.stack.append(&mut self.results);
1594 }
1595
1596 fn push_block(&mut self) {
1597 self.bindgen.push_block();
1598 }
1599
1600 fn finish_block(&mut self, size: usize) {
1601 self.operands.clear();
1602 assert!(
1603 size <= self.stack.len(),
1604 "not enough operands on stack for finishing block",
1605 );
1606 self.operands
1607 .extend(self.stack.drain((self.stack.len() - size)..));
1608 self.bindgen.finish_block(&mut self.operands);
1609 }
1610
1611 fn lower(&mut self, ty: &Type, retptr: Option<&mut usize>) {
1612 use Instruction::*;
1613 use WitxInstruction::*;
1614
1615 match *ty {
1616 Type::S8 => self.emit(&I32FromS8),
1617 Type::U8 => self.emit(&I32FromU8),
1618 Type::CChar => self.emit(&I32FromChar8),
1619 Type::S16 => self.emit(&I32FromS16),
1620 Type::U16 => self.emit(&I32FromU16),
1621 Type::S32 => self.emit(&I32FromS32),
1622 Type::U32 => self.emit(&I32FromU32),
1623 Type::Usize => self.emit(&I32FromUsize),
1624 Type::S64 => self.emit(&I64FromS64),
1625 Type::U64 => self.emit(&I64FromU64),
1626 Type::Char => self.emit(&I32FromChar),
1627 Type::F32 => self.emit(&F32FromIf32),
1628 Type::F64 => self.emit(&F64FromIf64),
1629 Type::Handle(ty) => {
1630 let borrowed = match self.lift_lower {
1631 // This means that a return value is being lowered, which is
1632 // never borrowed.
1633 LiftLower::LiftArgsLowerResults => false,
1634 // There's one of three possible situations we're in:
1635 //
1636 // * The handle is defined by the wasm module itself. This
1637 // is the only actual possible scenario today due to how
1638 // witx is defined. In this situation the handle is owned
1639 // by the host and "proof of ownership" is being offered
1640 // and there's no need to relinquish ownership.
1641 //
1642 // * The handle is defined by the host, and it's passing it
1643 // to a wasm module. This should use an owned conversion.
1644 // This isn't expressible in today's `*.witx` format.
1645 //
1646 // * The handle is defined by neither the host or the wasm
1647 // mdoule. This means that the host is passing a
1648 // capability from another wasm module into this one,
1649 // meaning it's doing so by reference since the host is
1650 // retaining access to its own
1651 //
1652 // Note, again, only the first bullet here is possible
1653 // today, hence the hardcoded `true` value. We'll need to
1654 // refactor `witx` to expose the other possibilities.
1655 LiftLower::LowerArgsLiftResults => true,
1656 };
1657 if borrowed {
1658 self.emit(&I32FromBorrowedHandle { ty });
1659 } else {
1660 self.emit(&I32FromOwnedHandle { ty });
1661 }
1662 }
1663 Type::Id(id) => match &self.iface.types[id].kind {
1664 TypeDefKind::Type(t) => self.lower(t, retptr),
1665 TypeDefKind::Pointer(_) => self.witx(&I32FromPointer),
1666 TypeDefKind::ConstPointer(_) => self.witx(&I32FromConstPointer),
1667 TypeDefKind::List(element) => match self.abi {
1668 Abi::Preview1 => self.emit(&ListCanonLower {
1669 element,
1670 realloc: None,
1671 }),
1672 Abi::Canonical => {
1673 // Lowering parameters calling a wasm import means
1674 // we don't need to pass ownership, but we pass
1675 // ownership in all other cases.
1676 let realloc = match (self.variant, self.lift_lower) {
1677 (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults) => None,
1678 _ => Some("canonical_abi_realloc"),
1679 };
1680 if self.is_char(element)
1681 || self.bindgen.is_list_canonical(self.iface, element)
1682 {
1683 self.emit(&ListCanonLower { element, realloc });
1684 } else {
1685 self.push_block();
1686 self.emit(&IterElem { element });
1687 self.emit(&IterBasePointer);
1688 let addr = self.stack.pop().unwrap();
1689 self.write_to_memory(element, addr, 0);
1690 self.finish_block(0);
1691 self.emit(&ListLower { element, realloc });
1692 }
1693 }
1694 },
1695 TypeDefKind::PushBuffer(ty) | TypeDefKind::PullBuffer(ty) => {
1696 let push = matches!(&self.iface.types[id].kind, TypeDefKind::PushBuffer(_));
1697 self.translate_buffer(push, ty);
1698
1699 // Buffers are only used in the parameter position, so if we
1700 // are lowering them, then we had better be lowering args
1701 // and lifting results.
1702 assert!(self.lift_lower == LiftLower::LowerArgsLiftResults);
1703
1704 match self.variant {
1705 AbiVariant::GuestImport => {
1706 // When calling an imported function we're passing a raw view
1707 // into memory, and the adapter will convert it into something
1708 // else if necessary.
1709 self.emit(&BufferLowerPtrLen { push, ty });
1710 }
1711 AbiVariant::GuestExport => {
1712 // When calling an exported function we're passing a handle to
1713 // the caller's memory, and this part of the adapter is
1714 // responsible for converting it into something that's a handle.
1715 self.emit(&BufferLowerHandle { push, ty });
1716 }
1717 }
1718 }
1719 TypeDefKind::Record(record) if record.is_flags() => {
1720 match self.iface.flags_repr(record) {
1721 Some(Int::U64) => self.emit(&FlagsLower64 {
1722 record,
1723 ty: id,
1724 name: self.iface.types[id].name.as_ref().unwrap(),
1725 }),
1726 _ => self.emit(&FlagsLower {
1727 record,
1728 ty: id,
1729 name: self.iface.types[id].name.as_ref().unwrap(),
1730 }),
1731 }
1732 }
1733 TypeDefKind::Record(record) => match self.abi {
1734 Abi::Preview1 => self.witx(&AddrOf),
1735
1736 Abi::Canonical => {
1737 self.emit(&RecordLower {
1738 record,
1739 ty: id,
1740 name: self.iface.types[id].name.as_deref(),
1741 });
1742 let values = self
1743 .stack
1744 .drain(self.stack.len() - record.fields.len()..)
1745 .collect::<Vec<_>>();
1746 for (field, value) in record.fields.iter().zip(values) {
1747 self.stack.push(value);
1748 self.lower(&field.ty, None);
1749 }
1750 }
1751 },
1752
1753 // Variants in the return position of an import must be a Result in
1754 // the preview1 ABI and they're a bit special about where all the
1755 // pieces are.
1756 TypeDefKind::Variant(v)
1757 if self.abi == Abi::Preview1
1758 && self.variant == AbiVariant::GuestImport
1759 && self.lift_lower == LiftLower::LiftArgsLowerResults
1760 && !v.is_enum() =>
1761 {
1762 let retptr = retptr.unwrap();
1763 let (ok, err) = v.as_expected().unwrap();
1764 self.push_block();
1765 self.emit(&VariantPayloadName);
1766 let payload_name = self.stack.pop().unwrap();
1767 if let Some(ok) = ok {
1768 self.stack.push(payload_name);
1769 let store = |me: &mut Self, ty: &Type, n| {
1770 me.emit(&GetArg { nth: *retptr + n });
1771 let addr = me.stack.pop().unwrap();
1772 me.write_to_memory(ty, addr, 0);
1773 };
1774 match *ok {
1775 Type::Id(okid) => match &self.iface.types[okid].kind {
1776 TypeDefKind::Record(record) if record.is_tuple() => {
1777 self.emit(&RecordLower {
1778 record,
1779 ty: id,
1780 name: self.iface.types[okid].name.as_deref(),
1781 });
1782 // Note that `rev()` is used here due to the order
1783 // that tuples are pushed onto the stack and how we
1784 // consume the last item first from the stack.
1785 for (i, field) in record.fields.iter().enumerate().rev() {
1786 store(self, &field.ty, i);
1787 }
1788 }
1789 _ => store(self, ok, 0),
1790 },
1791 _ => store(self, ok, 0),
1792 }
1793 };
1794 self.emit(&I32Const { val: 0 });
1795 self.finish_block(1);
1796
1797 self.push_block();
1798 self.emit(&VariantPayloadName);
1799 let payload_name = self.stack.pop().unwrap();
1800 if let Some(ty) = err {
1801 self.stack.push(payload_name);
1802 self.lower(ty, None);
1803 }
1804 self.finish_block(1);
1805
1806 self.emit(&VariantLower {
1807 variant: v,
1808 ty: id,
1809 name: self.iface.types[id].name.as_deref(),
1810 results: &[WasmType::I32],
1811 });
1812 }
1813
1814 // Variant arguments in the Preview1 ABI are all passed by pointer
1815 TypeDefKind::Variant(v)
1816 if self.abi == Abi::Preview1
1817 && self.variant == AbiVariant::GuestImport
1818 && self.lift_lower == LiftLower::LowerArgsLiftResults
1819 && !v.is_enum() =>
1820 {
1821 self.witx(&AddrOf)
1822 }
1823
1824 TypeDefKind::Variant(v) => {
1825 let mut results = Vec::new();
1826 let mut temp = Vec::new();
1827 let mut casts = Vec::new();
1828 self.iface
1829 .push_wasm(self.abi, self.variant, ty, &mut results);
1830 for (i, case) in v.cases.iter().enumerate() {
1831 self.push_block();
1832 self.emit(&VariantPayloadName);
1833 let payload_name = self.stack.pop().unwrap();
1834 self.emit(&I32Const { val: i as i32 });
1835 let mut pushed = 1;
1836 if let Some(ty) = &case.ty {
1837 // Using the payload of this block we lower the type to
1838 // raw wasm values.
1839 self.stack.push(payload_name.clone());
1840 self.lower(ty, None);
1841
1842 // Determine the types of all the wasm values we just
1843 // pushed, and record how many. If we pushed too few
1844 // then we'll need to push some zeros after this.
1845 temp.truncate(0);
1846 self.iface.push_wasm(self.abi, self.variant, ty, &mut temp);
1847 pushed += temp.len();
1848
1849 // For all the types pushed we may need to insert some
1850 // bitcasts. This will go through and cast everything
1851 // to the right type to ensure all blocks produce the
1852 // same set of results.
1853 casts.truncate(0);
1854 for (actual, expected) in temp.iter().zip(&results[1..]) {
1855 casts.push(cast(*actual, *expected));
1856 }
1857 if casts.iter().any(|c| *c != Bitcast::None) {
1858 self.emit(&Bitcasts { casts: &casts });
1859 }
1860 }
1861
1862 // If we haven't pushed enough items in this block to match
1863 // what other variants are pushing then we need to push
1864 // some zeros.
1865 if pushed < results.len() {
1866 self.emit(&ConstZero {
1867 tys: &results[pushed..],
1868 });
1869 }
1870 self.finish_block(results.len());
1871 }
1872 self.emit(&VariantLower {
1873 variant: v,
1874 ty: id,
1875 results: &results,
1876 name: self.iface.types[id].name.as_deref(),
1877 });
1878 }
1879 },
1880 }
1881 }
1882
1883 fn prep_return_pointer(&mut self, sig: &WasmSignature, results: &[(String, Type)]) {
1884 match self.abi {
1885 Abi::Preview1 => {
1886 assert!(results.len() <= 1);
1887 let ty = match results.get(0) {
1888 Some((_, ty)) => ty,
1889 None => return,
1890 };
1891 // Return pointers are only needed for `Result<T, _>`...
1892 let variant = match ty {
1893 Type::Id(id) => match &self.iface.types[*id].kind {
1894 TypeDefKind::Variant(v) => v,
1895 _ => return,
1896 },
1897 _ => return,
1898 };
1899 // ... and only if `T` is actually present in `Result<T, _>`
1900 let ok = match &variant.cases[0].ty {
1901 Some(Type::Id(id)) => *id,
1902 _ => return,
1903 };
1904
1905 // Tuples have each individual item in a separate return pointer while
1906 // all other types go through a singular return pointer.
1907 let iface = self.iface;
1908 let mut prep = |ty: TypeId| {
1909 let ptr = self.bindgen.allocate_typed_space(iface, ty);
1910 self.return_pointers.push(ptr.clone());
1911 self.stack.push(ptr);
1912 };
1913 match &iface.types[ok].kind {
1914 TypeDefKind::Record(r) if r.is_tuple() => {
1915 for field in r.fields.iter() {
1916 match field.ty {
1917 Type::Id(id) => prep(id),
1918 _ => unreachable!(),
1919 }
1920 }
1921 }
1922 _ => prep(ok),
1923 }
1924 }
1925 // If a return pointer was automatically injected into this function
1926 // then we need to allocate a proper amount of space for it and then
1927 // add it to the stack to get passed to the callee.
1928 Abi::Canonical => {
1929 if let Some(results) = &sig.retptr {
1930 let ptr = self.bindgen.i64_return_pointer_area(results.len());
1931 self.return_pointers.push(ptr.clone());
1932 self.stack.push(ptr);
1933 }
1934 }
1935 }
1936 }
1937
1938 /// Note that in general everything in this function is the opposite of the
1939 /// `lower` function above. This is intentional and should be kept this way!
1940 fn lift(&mut self, ty: &Type) {
1941 use Instruction::*;
1942 use WitxInstruction::*;
1943
1944 match *ty {
1945 Type::S8 => self.emit(&S8FromI32),
1946 Type::CChar => self.emit(&Char8FromI32),
1947 Type::U8 => self.emit(&U8FromI32),
1948 Type::S16 => self.emit(&S16FromI32),
1949 Type::U16 => self.emit(&U16FromI32),
1950 Type::S32 => self.emit(&S32FromI32),
1951 Type::Usize => self.emit(&UsizeFromI32),
1952 Type::U32 => self.emit(&U32FromI32),
1953 Type::S64 => self.emit(&S64FromI64),
1954 Type::U64 => self.emit(&U64FromI64),
1955 Type::Char => self.emit(&CharFromI32),
1956 Type::F32 => self.emit(&If32FromF32),
1957 Type::F64 => self.emit(&If64FromF64),
1958 Type::Handle(ty) => {
1959 // For more information on these values see the comments in
1960 // `lower` above.
1961 let borrowed = match self.lift_lower {
1962 LiftLower::LiftArgsLowerResults => true,
1963 LiftLower::LowerArgsLiftResults => false,
1964 };
1965 if borrowed {
1966 self.emit(&HandleBorrowedFromI32 { ty });
1967 } else {
1968 self.emit(&HandleOwnedFromI32 { ty });
1969 }
1970 }
1971 Type::Id(id) => match &self.iface.types[id].kind {
1972 TypeDefKind::Type(t) => self.lift(t),
1973 TypeDefKind::Pointer(ty) => self.witx(&PointerFromI32 { ty }),
1974 TypeDefKind::ConstPointer(ty) => self.witx(&ConstPointerFromI32 { ty }),
1975 TypeDefKind::List(element) => match self.abi {
1976 Abi::Preview1 => self.emit(&ListCanonLift {
1977 element,
1978 free: None,
1979 ty: id,
1980 }),
1981 Abi::Canonical => {
1982 // Lifting the arguments of a defined import means that, if
1983 // possible, the caller still retains ownership and we don't
1984 // free anything.
1985 let free = match (self.variant, self.lift_lower) {
1986 (AbiVariant::GuestImport, LiftLower::LiftArgsLowerResults) => None,
1987 _ => Some("canonical_abi_free"),
1988 };
1989 if self.is_char(element)
1990 || self.bindgen.is_list_canonical(self.iface, element)
1991 {
1992 self.emit(&ListCanonLift {
1993 element,
1994 free,
1995 ty: id,
1996 });
1997 } else {
1998 self.push_block();
1999 self.emit(&IterBasePointer);
2000 let addr = self.stack.pop().unwrap();
2001 self.read_from_memory(element, addr, 0);
2002 self.finish_block(1);
2003 self.emit(&ListLift {
2004 element,
2005 free,
2006 ty: id,
2007 });
2008 }
2009 }
2010 },
2011 TypeDefKind::PushBuffer(ty) | TypeDefKind::PullBuffer(ty) => {
2012 let push = matches!(&self.iface.types[id].kind, TypeDefKind::PushBuffer(_));
2013 self.translate_buffer(push, ty);
2014 // Buffers are only used in the parameter position, which
2015 // means lifting a buffer should only happen when we are
2016 // lifting arguments and lowering results.
2017 assert!(self.lift_lower == LiftLower::LiftArgsLowerResults);
2018
2019 match self.variant {
2020 AbiVariant::GuestImport => {
2021 // When calling a defined imported function then we're coming
2022 // from a pointer/length, and the embedding context will figure
2023 // out what to do with that pointer/length.
2024 self.emit(&BufferLiftPtrLen { push, ty })
2025 }
2026 AbiVariant::GuestExport => {
2027 // When calling an exported function we're given a handle to the
2028 // buffer, which is then interpreted in the calling context.
2029 self.emit(&BufferLiftHandle { push, ty })
2030 }
2031 }
2032 }
2033 TypeDefKind::Record(record) if record.is_flags() => {
2034 match self.iface.flags_repr(record) {
2035 Some(Int::U64) => self.emit(&FlagsLift64 {
2036 record,
2037 ty: id,
2038 name: self.iface.types[id].name.as_ref().unwrap(),
2039 }),
2040 _ => self.emit(&FlagsLift {
2041 record,
2042 ty: id,
2043 name: self.iface.types[id].name.as_ref().unwrap(),
2044 }),
2045 }
2046 }
2047 TypeDefKind::Record(record) => match self.abi {
2048 Abi::Preview1 => {
2049 let addr = self.stack.pop().unwrap();
2050 self.read_from_memory(ty, addr, 0);
2051 }
2052 Abi::Canonical => {
2053 let mut temp = Vec::new();
2054 self.iface.push_wasm(self.abi, self.variant, ty, &mut temp);
2055 let mut args = self
2056 .stack
2057 .drain(self.stack.len() - temp.len()..)
2058 .collect::<Vec<_>>();
2059 for field in record.fields.iter() {
2060 temp.truncate(0);
2061 self.iface
2062 .push_wasm(self.abi, self.variant, &field.ty, &mut temp);
2063 self.stack.extend(args.drain(..temp.len()));
2064 self.lift(&field.ty);
2065 }
2066 self.emit(&RecordLift {
2067 record,
2068 ty: id,
2069 name: self.iface.types[id].name.as_deref(),
2070 });
2071 }
2072 },
2073
2074 // Variants in the return position of an import must be a Result in
2075 // the preview1 ABI and they're a bit special about where all the
2076 // pieces are.
2077 TypeDefKind::Variant(v)
2078 if self.abi == Abi::Preview1
2079 && self.variant == AbiVariant::GuestImport
2080 && self.lift_lower == LiftLower::LowerArgsLiftResults
2081 && !v.is_enum() =>
2082 {
2083 let (ok, err) = v.as_expected().unwrap();
2084 self.push_block();
2085 if let Some(ok) = ok {
2086 let mut n = 0;
2087 let mut load = |me: &mut Self, ty: &Type| {
2088 me.read_from_memory(ty, me.return_pointers[n].clone(), 0);
2089 n += 1;
2090 };
2091 match *ok {
2092 Type::Id(okid) => match &self.iface.types[okid].kind {
2093 TypeDefKind::Record(record) if record.is_tuple() => {
2094 for field in record.fields.iter() {
2095 load(self, &field.ty);
2096 }
2097 self.emit(&RecordLift {
2098 record,
2099 ty: okid,
2100 name: self.iface.types[okid].name.as_deref(),
2101 });
2102 }
2103 _ => load(self, ok),
2104 },
2105 _ => load(self, ok),
2106 }
2107 }
2108 self.finish_block(ok.is_some() as usize);
2109
2110 self.push_block();
2111 if let Some(ty) = err {
2112 self.witx(&ReuseReturn);
2113 self.lift(ty);
2114 }
2115 self.finish_block(err.is_some() as usize);
2116
2117 self.emit(&VariantLift {
2118 variant: v,
2119 ty: id,
2120 name: self.iface.types[id].name.as_deref(),
2121 });
2122 }
2123
2124 // Variant arguments in the Preview1 ABI are all passed by pointer,
2125 // so we read them here.
2126 TypeDefKind::Variant(v)
2127 if self.abi == Abi::Preview1
2128 && self.variant == AbiVariant::GuestImport
2129 && self.lift_lower == LiftLower::LiftArgsLowerResults
2130 && !v.is_enum() =>
2131 {
2132 let addr = self.stack.pop().unwrap();
2133 self.read_from_memory(ty, addr, 0)
2134 }
2135
2136 TypeDefKind::Variant(v) => {
2137 let mut params = Vec::new();
2138 let mut temp = Vec::new();
2139 let mut casts = Vec::new();
2140 self.iface
2141 .push_wasm(self.abi, self.variant, ty, &mut params);
2142 let block_inputs = self
2143 .stack
2144 .drain(self.stack.len() + 1 - params.len()..)
2145 .collect::<Vec<_>>();
2146 for case in v.cases.iter() {
2147 self.push_block();
2148 if let Some(ty) = &case.ty {
2149 // Push only the values we need for this variant onto
2150 // the stack.
2151 temp.truncate(0);
2152 self.iface.push_wasm(self.abi, self.variant, ty, &mut temp);
2153 self.stack
2154 .extend(block_inputs[..temp.len()].iter().cloned());
2155
2156 // Cast all the types we have on the stack to the actual
2157 // types needed for this variant, if necessary.
2158 casts.truncate(0);
2159 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
2160 casts.push(cast(*expected, *actual));
2161 }
2162 if casts.iter().any(|c| *c != Bitcast::None) {
2163 self.emit(&Bitcasts { casts: &casts });
2164 }
2165
2166 // Then recursively lift this variant's payload.
2167 self.lift(ty);
2168 }
2169 self.finish_block(case.ty.is_some() as usize);
2170 }
2171 self.emit(&VariantLift {
2172 variant: v,
2173 ty: id,
2174 name: self.iface.types[id].name.as_deref(),
2175 });
2176 }
2177 },
2178 }
2179 }
2180
2181 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
2182 use Instruction::*;
2183
2184 match *ty {
2185 // Builtin types need different flavors of storage instructions
2186 // depending on the size of the value written.
2187 Type::U8 | Type::S8 | Type::CChar => {
2188 self.lower_and_emit(ty, addr, &I32Store8 { offset })
2189 }
2190 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
2191 Type::U32 | Type::S32 | Type::Usize | Type::Handle(_) | Type::Char => {
2192 self.lower_and_emit(ty, addr, &I32Store { offset })
2193 }
2194 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
2195 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
2196 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
2197
2198 Type::Id(id) => match &self.iface.types[id].kind {
2199 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
2200 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => {
2201 self.lower_and_emit(ty, addr, &I32Store { offset });
2202 }
2203
2204 // After lowering the list there's two i32 values on the stack
2205 // which we write into memory, writing the pointer into the low address
2206 // and the length into the high address.
2207 TypeDefKind::List(_) => {
2208 self.lower(ty, None);
2209 self.stack.push(addr.clone());
2210 self.emit(&I32Store { offset: offset + 4 });
2211 self.stack.push(addr);
2212 self.emit(&I32Store { offset });
2213 }
2214
2215 // Lower the buffer to its raw values, and then write the values
2216 // into memory, which may be more than one value depending on
2217 // our import/export direction.
2218 TypeDefKind::PushBuffer(_) | TypeDefKind::PullBuffer(_) => {
2219 self.lower(ty, None);
2220 if self.variant == AbiVariant::GuestImport {
2221 self.stack.push(addr.clone());
2222 self.emit(&I32Store { offset: offset + 8 });
2223 self.stack.push(addr.clone());
2224 self.emit(&I32Store { offset: offset + 4 });
2225 }
2226 self.stack.push(addr);
2227 self.emit(&I32Store { offset });
2228 }
2229
2230 TypeDefKind::Record(r) if r.is_flags() => {
2231 self.lower(ty, None);
2232 match self.iface.flags_repr(r) {
2233 Some(repr) => {
2234 self.stack.push(addr);
2235 self.store_intrepr(offset, repr);
2236 }
2237 None => {
2238 for i in 0..r.num_i32s() {
2239 self.stack.push(addr.clone());
2240 self.emit(&I32Store {
2241 offset: offset + (i as i32) * 4,
2242 });
2243 }
2244 }
2245 }
2246 }
2247
2248 // Decompose the record into its components and then write all
2249 // the components into memory one-by-one.
2250 TypeDefKind::Record(record) => {
2251 self.emit(&RecordLower {
2252 record,
2253 ty: id,
2254 name: self.iface.types[id].name.as_deref(),
2255 });
2256 let fields = self
2257 .stack
2258 .drain(self.stack.len() - record.fields.len()..)
2259 .collect::<Vec<_>>();
2260 for ((field_offset, op), field) in self
2261 .bindgen
2262 .sizes()
2263 .field_offsets(record)
2264 .into_iter()
2265 .zip(fields)
2266 .zip(&record.fields)
2267 {
2268 self.stack.push(op);
2269 self.write_to_memory(
2270 &field.ty,
2271 addr.clone(),
2272 offset + (field_offset as i32),
2273 );
2274 }
2275 }
2276
2277 // Each case will get its own block, and the first item in each
2278 // case is writing the discriminant. After that if we have a
2279 // payload we write the payload after the discriminant, aligned up
2280 // to the type's alignment.
2281 TypeDefKind::Variant(v) => {
2282 let payload_offset = offset + (self.bindgen.sizes().payload_offset(v) as i32);
2283 for (i, case) in v.cases.iter().enumerate() {
2284 self.push_block();
2285 self.emit(&VariantPayloadName);
2286 let payload_name = self.stack.pop().unwrap();
2287 self.emit(&I32Const { val: i as i32 });
2288 self.stack.push(addr.clone());
2289 self.store_intrepr(offset, v.tag);
2290 if let Some(ty) = &case.ty {
2291 self.stack.push(payload_name.clone());
2292 self.write_to_memory(ty, addr.clone(), payload_offset);
2293 }
2294 self.finish_block(0);
2295 }
2296 self.emit(&VariantLower {
2297 variant: v,
2298 ty: id,
2299 results: &[],
2300 name: self.iface.types[id].name.as_deref(),
2301 });
2302 }
2303 },
2304 }
2305 }
2306
2307 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2308 self.lower(ty, None);
2309 self.stack.push(addr);
2310 self.emit(instr);
2311 }
2312
2313 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
2314 use Instruction::*;
2315
2316 match *ty {
2317 Type::U8 | Type::CChar => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2318 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
2319 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
2320 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
2321 Type::U32 | Type::S32 | Type::Char | Type::Usize | Type::Handle(_) => {
2322 self.emit_and_lift(ty, addr, &I32Load { offset })
2323 }
2324 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
2325 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
2326 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
2327
2328 Type::Id(id) => match &self.iface.types[id].kind {
2329 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
2330 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => {
2331 self.emit_and_lift(ty, addr, &I32Load { offset })
2332 }
2333
2334 // Read the pointer/len and then perform the standard lifting
2335 // proceses.
2336 TypeDefKind::List(_) => {
2337 self.stack.push(addr.clone());
2338 self.emit(&I32Load { offset });
2339 self.stack.push(addr);
2340 self.emit(&I32Load { offset: offset + 4 });
2341 self.lift(ty);
2342 }
2343
2344 // Read the requisite number of values from memory and then lift as
2345 // appropriate.
2346 TypeDefKind::PushBuffer(_) | TypeDefKind::PullBuffer(_) => {
2347 self.stack.push(addr.clone());
2348 self.emit(&I32Load { offset });
2349 if self.variant == AbiVariant::GuestImport
2350 && self.lift_lower == LiftLower::LiftArgsLowerResults
2351 {
2352 self.stack.push(addr.clone());
2353 self.emit(&I32Load { offset: offset + 4 });
2354 self.stack.push(addr);
2355 self.emit(&I32Load { offset: offset + 8 });
2356 }
2357 self.lift(ty);
2358 }
2359
2360 TypeDefKind::Record(r) if r.is_flags() => {
2361 match self.iface.flags_repr(r) {
2362 Some(repr) => {
2363 self.stack.push(addr);
2364 self.load_intrepr(offset, repr);
2365 }
2366 None => {
2367 for i in 0..r.num_i32s() {
2368 self.stack.push(addr.clone());
2369 self.emit(&I32Load {
2370 offset: offset + (i as i32) * 4,
2371 });
2372 }
2373 }
2374 }
2375 self.lift(ty);
2376 }
2377
2378 // Read and lift each field individually, adjusting the offset
2379 // as we go along, then aggregate all the fields into the
2380 // record.
2381 TypeDefKind::Record(record) => {
2382 for (field_offset, field) in self
2383 .bindgen
2384 .sizes()
2385 .field_offsets(record)
2386 .into_iter()
2387 .zip(&record.fields)
2388 {
2389 self.read_from_memory(
2390 &field.ty,
2391 addr.clone(),
2392 offset + (field_offset as i32),
2393 );
2394 }
2395 self.emit(&RecordLift {
2396 record,
2397 ty: id,
2398 name: self.iface.types[id].name.as_deref(),
2399 });
2400 }
2401
2402 // Each case will get its own block, and we'll dispatch to the
2403 // right block based on the `i32.load` we initially perform. Each
2404 // individual block is pretty simple and just reads the payload type
2405 // from the corresponding offset if one is available.
2406 TypeDefKind::Variant(variant) => {
2407 self.stack.push(addr.clone());
2408 self.load_intrepr(offset, variant.tag);
2409 let payload_offset =
2410 offset + (self.bindgen.sizes().payload_offset(variant) as i32);
2411 for case in variant.cases.iter() {
2412 self.push_block();
2413 if let Some(ty) = &case.ty {
2414 self.read_from_memory(ty, addr.clone(), payload_offset);
2415 }
2416 self.finish_block(case.ty.is_some() as usize);
2417 }
2418 self.emit(&VariantLift {
2419 variant,
2420 ty: id,
2421 name: self.iface.types[id].name.as_deref(),
2422 });
2423 }
2424 },
2425 }
2426 }
2427
2428 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2429 self.stack.push(addr);
2430 self.emit(instr);
2431 self.lift(ty);
2432 }
2433
2434 fn load_intrepr(&mut self, offset: i32, repr: Int) {
2435 self.emit(&match repr {
2436 Int::U64 => Instruction::I64Load { offset },
2437 Int::U32 => Instruction::I32Load { offset },
2438 Int::U16 => Instruction::I32Load16U { offset },
2439 Int::U8 => Instruction::I32Load8U { offset },
2440 });
2441 }
2442
2443 fn store_intrepr(&mut self, offset: i32, repr: Int) {
2444 self.emit(&match repr {
2445 Int::U64 => Instruction::I64Store { offset },
2446 Int::U32 => Instruction::I32Store { offset },
2447 Int::U16 => Instruction::I32Store16 { offset },
2448 Int::U8 => Instruction::I32Store8 { offset },
2449 });
2450 }
2451
2452 fn translate_buffer(&mut self, push: bool, ty: &Type) {
2453 let do_write = match self.lift_lower {
2454 // For declared items, input/output is defined in the context of
2455 // what the callee will do. The callee will read input buffers,
2456 // meaning we write to them, and write to output buffers, meaning
2457 // we'll read from them.
2458 LiftLower::LowerArgsLiftResults => !push,
2459
2460 // Defined item mirror declared imports because buffers are
2461 // defined from the caller's perspective, so we don't invert the
2462 // `out` setting like above.
2463 LiftLower::LiftArgsLowerResults => push,
2464 };
2465 self.emit(&Instruction::IterBasePointer);
2466 let addr = self.stack.pop().unwrap();
2467
2468 self.push_block();
2469
2470 let size = if do_write {
2471 self.emit(&Instruction::BufferPayloadName);
2472 self.write_to_memory(ty, addr, 0);
2473 0
2474 } else {
2475 self.read_from_memory(ty, addr, 0);
2476 1
2477 };
2478
2479 self.finish_block(size);
2480 }
2481
2482 fn is_char(&self, ty: &Type) -> bool {
2483 match ty {
2484 Type::Char => true,
2485 Type::Id(id) => match &self.iface.types[*id].kind {
2486 TypeDefKind::Type(t) => self.is_char(t),
2487 _ => false,
2488 },
2489 _ => false,
2490 }
2491 }
2492}
2493
2494fn cast(from: WasmType, to: WasmType) -> Bitcast {
2495 use WasmType::*;
2496
2497 match (from, to) {
2498 (I32, I32) | (I64, I64) | (F32, F32) | (F64, F64) => Bitcast::None,
2499
2500 (I32, I64) => Bitcast::I32ToI64,
2501 (F32, F64) => Bitcast::F32ToF64,
2502 (F32, I32) => Bitcast::F32ToI32,
2503 (F64, I64) => Bitcast::F64ToI64,
2504
2505 (I64, I32) => Bitcast::I64ToI32,
2506 (F64, F32) => Bitcast::F64ToF32,
2507 (I32, F32) => Bitcast::I32ToF32,
2508 (I64, F64) => Bitcast::I64ToF64,
2509
2510 (F32, I64) => Bitcast::F32ToI64,
2511 (I64, F32) => Bitcast::I64ToF32,
2512 (F64, I32) | (I32, F64) => unreachable!(),
2513 }
2514}