witnext2/abi.rs
1use crate::{
2 Function, Int, Interface, Record, RecordKind, ResourceId, Type, TypeDefKind, TypeId, Variant,
3};
4use std::mem;
5
6/// A raw WebAssembly signature with params and results.
7#[derive(Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord)]
8pub struct WasmSignature {
9 /// The WebAssembly parameters of this function.
10 pub params: Vec<WasmType>,
11 /// The WebAssembly results of this function.
12 pub results: Vec<WasmType>,
13 /// The raw types, if needed, returned through return pointer located in
14 /// `params`.
15 pub retptr: Option<Vec<WasmType>>,
16}
17
18/// Enumerates wasm types used by interface types when lowering/lifting.
19#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
20pub enum WasmType {
21 I32,
22 I64,
23 F32,
24 F64,
25 // NOTE: we don't lower interface types to any other Wasm type,
26 // e.g. externref, so we don't need to define them here.
27}
28
29fn unify(a: WasmType, b: WasmType) -> WasmType {
30 use WasmType::*;
31
32 match (a, b) {
33 (I64, _) | (_, I64) | (I32, F64) | (F64, I32) => I64,
34
35 (I32, I32) | (I32, F32) | (F32, I32) => I32,
36
37 (F32, F32) => F32,
38 (F64, F64) | (F32, F64) | (F64, F32) => F64,
39 }
40}
41
42impl From<Int> for WasmType {
43 fn from(i: Int) -> WasmType {
44 match i {
45 Int::U8 | Int::U16 | Int::U32 => WasmType::I32,
46 Int::U64 => WasmType::I64,
47 }
48 }
49}
50
51/// Possible ABIs for interface functions to have.
52///
53/// Note that this is a stopgap until we have more of interface types. Interface
54/// types functions do not have ABIs, they have APIs. For the meantime, however,
55/// we mandate ABIs to ensure we can all talk to each other.
56#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
57pub enum Abi {
58 /// Only stable ABI currently, and is the historical WASI ABI since it was
59 /// first created.
60 ///
61 /// Note that this ABI is limited notably in its return values where it can
62 /// only return 0 results or one `Result<T, enum>` lookalike.
63 Preview1,
64
65 /// In-progress "canonical ABI" as proposed for interface types.
66 Canonical,
67}
68
69// Helper macro for defining instructions without having to have tons of
70// exhaustive `match` statements to update
71macro_rules! def_instruction {
72 (
73 $( #[$enum_attr:meta] )*
74 pub enum $name:ident<'a> {
75 $(
76 $( #[$attr:meta] )*
77 $variant:ident $( {
78 $($field:ident : $field_ty:ty $(,)* )*
79 } )?
80 :
81 [$num_popped:expr] => [$num_pushed:expr],
82 )*
83 }
84 ) => {
85 $( #[$enum_attr] )*
86 pub enum $name<'a> {
87 $(
88 $( #[$attr] )*
89 $variant $( {
90 $(
91 $field : $field_ty,
92 )*
93 } )? ,
94 )*
95 }
96
97 impl $name<'_> {
98 /// How many operands does this instruction pop from the stack?
99 #[allow(unused_variables)]
100 pub fn operands_len(&self) -> usize {
101 match self {
102 $(
103 Self::$variant $( {
104 $(
105 $field,
106 )*
107 } )? => $num_popped,
108 )*
109 }
110 }
111
112 /// How many results does this instruction push onto the stack?
113 #[allow(unused_variables)]
114 pub fn results_len(&self) -> usize {
115 match self {
116 $(
117 Self::$variant $( {
118 $(
119 $field,
120 )*
121 } )? => $num_pushed,
122 )*
123 }
124 }
125 }
126 };
127}
128
129def_instruction! {
130 #[derive(Debug)]
131 pub enum Instruction<'a> {
132 /// Acquires the specified parameter and places it on the stack.
133 /// Depending on the context this may refer to wasm parameters or
134 /// interface types parameters.
135 GetArg { nth: usize } : [0] => [1],
136
137 // Integer const/manipulation instructions
138
139 /// Pushes the constant `val` onto the stack.
140 I32Const { val: i32 } : [0] => [1],
141 /// Casts the top N items on the stack using the `Bitcast` enum
142 /// provided. Consumes the same number of operands that this produces.
143 Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
144 /// Pushes a number of constant zeros for each wasm type on the stack.
145 ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
146
147 // Memory load/store instructions
148
149 /// Pops an `i32` from the stack and loads a little-endian `i32` from
150 /// it, using the specified constant offset.
151 I32Load { offset: i32 } : [1] => [1],
152 /// Pops an `i32` from the stack and loads a little-endian `i8` from
153 /// it, using the specified constant offset. The value loaded is the
154 /// zero-extended to 32-bits
155 I32Load8U { offset: i32 } : [1] => [1],
156 /// Pops an `i32` from the stack and loads a little-endian `i8` from
157 /// it, using the specified constant offset. The value loaded is the
158 /// sign-extended to 32-bits
159 I32Load8S { offset: i32 } : [1] => [1],
160 /// Pops an `i32` from the stack and loads a little-endian `i16` from
161 /// it, using the specified constant offset. The value loaded is the
162 /// zero-extended to 32-bits
163 I32Load16U { offset: i32 } : [1] => [1],
164 /// Pops an `i32` from the stack and loads a little-endian `i16` from
165 /// it, using the specified constant offset. The value loaded is the
166 /// sign-extended to 32-bits
167 I32Load16S { offset: i32 } : [1] => [1],
168 /// Pops an `i32` from the stack and loads a little-endian `i64` from
169 /// it, using the specified constant offset.
170 I64Load { offset: i32 } : [1] => [1],
171 /// Pops an `i32` from the stack and loads a little-endian `f32` from
172 /// it, using the specified constant offset.
173 F32Load { offset: i32 } : [1] => [1],
174 /// Pops an `i32` from the stack and loads a little-endian `f64` from
175 /// it, using the specified constant offset.
176 F64Load { offset: i32 } : [1] => [1],
177
178 /// Pops an `i32` address from the stack and then an `i32` value.
179 /// Stores the value in little-endian at the pointer specified plus the
180 /// constant `offset`.
181 I32Store { offset: i32 } : [2] => [0],
182 /// Pops an `i32` address from the stack and then an `i32` value.
183 /// Stores the low 8 bits of the value in little-endian at the pointer
184 /// specified plus the constant `offset`.
185 I32Store8 { offset: i32 } : [2] => [0],
186 /// Pops an `i32` address from the stack and then an `i32` value.
187 /// Stores the low 16 bits of the value in little-endian at the pointer
188 /// specified plus the constant `offset`.
189 I32Store16 { offset: i32 } : [2] => [0],
190 /// Pops an `i32` address from the stack and then an `i64` value.
191 /// Stores the value in little-endian at the pointer specified plus the
192 /// constant `offset`.
193 I64Store { offset: i32 } : [2] => [0],
194 /// Pops an `i32` address from the stack and then an `f32` value.
195 /// Stores the value in little-endian at the pointer specified plus the
196 /// constant `offset`.
197 F32Store { offset: i32 } : [2] => [0],
198 /// Pops an `i32` address from the stack and then an `f64` value.
199 /// Stores the value in little-endian at the pointer specified plus the
200 /// constant `offset`.
201 F64Store { offset: i32 } : [2] => [0],
202
203 // Scalar lifting/lowering
204
205 /// Converts an interface type `char` value to a 32-bit integer
206 /// representing the unicode scalar value.
207 I32FromChar : [1] => [1],
208 /// Converts an interface type `u64` value to a wasm `i64`.
209 I64FromU64 : [1] => [1],
210 /// Converts an interface type `s64` value to a wasm `i64`.
211 I64FromS64 : [1] => [1],
212 /// Converts an interface type `u32` value to a wasm `i32`.
213 I32FromU32 : [1] => [1],
214 /// Converts an interface type `s32` value to a wasm `i32`.
215 I32FromS32 : [1] => [1],
216 /// Converts an interface type `u16` value to a wasm `i32`.
217 I32FromU16 : [1] => [1],
218 /// Converts an interface type `s16` value to a wasm `i32`.
219 I32FromS16 : [1] => [1],
220 /// Converts an interface type `u8` value to a wasm `i32`.
221 I32FromU8 : [1] => [1],
222 /// Converts an interface type `s8` value to a wasm `i32`.
223 I32FromS8 : [1] => [1],
224 /// Converts a language-specific `usize` value to a wasm `i32`.
225 I32FromUsize : [1] => [1],
226 /// Converts a language-specific C `char` value to a wasm `i32`.
227 I32FromChar8 : [1] => [1],
228 /// Conversion an interface type `f32` value to a wasm `f32`.
229 ///
230 /// This may be a noop for some implementations, but it's here in case the
231 /// native language representation of `f32` is different than the wasm
232 /// representation of `f32`.
233 F32FromIf32 : [1] => [1],
234 /// Conversion an interface type `f64` value to a wasm `f64`.
235 ///
236 /// This may be a noop for some implementations, but it's here in case the
237 /// native language representation of `f64` is different than the wasm
238 /// representation of `f64`.
239 F64FromIf64 : [1] => [1],
240
241 /// Converts a native wasm `i32` to an interface type `s8`.
242 ///
243 /// This will truncate the upper bits of the `i32`.
244 S8FromI32 : [1] => [1],
245 /// Converts a native wasm `i32` to an interface type `u8`.
246 ///
247 /// This will truncate the upper bits of the `i32`.
248 U8FromI32 : [1] => [1],
249 /// Converts a native wasm `i32` to an interface type `s16`.
250 ///
251 /// This will truncate the upper bits of the `i32`.
252 S16FromI32 : [1] => [1],
253 /// Converts a native wasm `i32` to an interface type `u16`.
254 ///
255 /// This will truncate the upper bits of the `i32`.
256 U16FromI32 : [1] => [1],
257 /// Converts a native wasm `i32` to an interface type `s32`.
258 S32FromI32 : [1] => [1],
259 /// Converts a native wasm `i32` to an interface type `u32`.
260 U32FromI32 : [1] => [1],
261 /// Converts a native wasm `i64` to an interface type `s64`.
262 S64FromI64 : [1] => [1],
263 /// Converts a native wasm `i64` to an interface type `u64`.
264 U64FromI64 : [1] => [1],
265 /// Converts a native wasm `i32` to an interface type `char`.
266 ///
267 /// It's safe to assume that the `i32` is indeed a valid unicode code point.
268 CharFromI32 : [1] => [1],
269 /// Converts a native wasm `f32` to an interface type `f32`.
270 If32FromF32 : [1] => [1],
271 /// Converts a native wasm `f64` to an interface type `f64`.
272 If64FromF64 : [1] => [1],
273 /// Converts a native wasm `i32` to a language-specific C `char`.
274 ///
275 /// This will truncate the upper bits of the `i32`.
276 Char8FromI32 : [1] => [1],
277 /// Converts a native wasm `i32` to a language-specific `usize`.
278 UsizeFromI32 : [1] => [1],
279
280 // Handles
281
282 /// Converts a "borrowed" handle into a wasm `i32` value.
283 ///
284 /// > **Note**: this documentation is outdated and does not reflect the
285 /// > current implementation of the canonical ABI. This needs to be
286 /// > updated.
287 ///
288 /// A "borrowed" handle in this case means one where ownership is not
289 /// being relinquished. This is only used for lowering interface types
290 /// parameters.
291 ///
292 /// Situations that this is used are:
293 ///
294 /// * A wasm exported function receives, as a parameter, handles defined
295 /// by the wasm module itself. This is effectively proof of ownership
296 /// by an external caller (be it host or wasm module) and the
297 /// ownership of the handle still lies with the caller. The wasm
298 /// module is only receiving a reference to the resource.
299 ///
300 /// * A wasm module is calling an import with a handle defined by the
301 /// import's module. Sort of the converse of the previous case this
302 /// means that the wasm module is handing out a reference to a
303 /// resource that it owns. The type in the wasm module, for example,
304 /// needs to reflect this.
305 ///
306 /// This instruction is not used for return values in either
307 /// export/import positions.
308 I32FromBorrowedHandle { ty: ResourceId } : [1] => [1],
309
310 /// Converts an "owned" handle into a wasm `i32` value.
311 ///
312 /// > **Note**: this documentation is outdated and does not reflect the
313 /// > current implementation of the canonical ABI. This needs to be
314 /// > updated.
315 ///
316 /// This conversion is used for handle values which are crossing a
317 /// module boundary for perhaps the first time. Some example cases of
318 /// when this conversion is used are:
319 ///
320 /// * When a host defines a function to be imported, returned handles
321 /// use this instruction. Handles being returned to wasm a granting a
322 /// capability, which means that this new capability is typically
323 /// wrapped up in a new integer descriptor.
324 ///
325 /// * When a wasm module calls an imported function with a type defined
326 /// by itself, then it's granting a capability to the callee. This
327 /// means that the wasm module's type is being granted for the first
328 /// time, possibly, so it needs to be an owned value that's consumed.
329 /// Note that this doesn't actually happen with `*.witx` today due to
330 /// the lack of handle type imports.
331 ///
332 /// * When a wasm module export returns a handle defined within the
333 /// module, then it's similar to calling an imported function with
334 /// that handle. The capability is being granted to the caller of the
335 /// export, so the owned value is wrapped up in an `i32`.
336 ///
337 /// * When a host is calling a wasm module with a capability defined by
338 /// the host, its' similar to the host import returning a capability.
339 /// This would be granting the wasm module with the capability so an
340 /// owned version with a fresh handle is passed to the wasm module.
341 /// Note that this doesn't happen today with `*.witx` due to the lack
342 /// of handle type imports.
343 ///
344 /// Basically this instruction is used for handle->wasm conversions
345 /// depending on the calling context and where the handle type in
346 /// question was defined.
347 I32FromOwnedHandle { ty: ResourceId } : [1] => [1],
348
349 /// Converts a native wasm `i32` into an owned handle value.
350 ///
351 /// > **Note**: this documentation is outdated and does not reflect the
352 /// > current implementation of the canonical ABI. This needs to be
353 /// > updated.
354 ///
355 /// This is the converse of `I32FromOwnedHandle` and is used in similar
356 /// situations:
357 ///
358 /// * A host definition of an import receives a handle defined in the
359 /// module itself.
360 /// * A wasm module calling an import receives a handle defined by the
361 /// import.
362 /// * A wasm module's export receives a handle defined by an external
363 /// module.
364 /// * A host calling a wasm export receives a handle defined in the
365 /// module.
366 ///
367 /// Note that like `I32FromOwnedHandle` the first and third bullets
368 /// above don't happen today because witx can't express type imports
369 /// just yet.
370 HandleOwnedFromI32 { ty: ResourceId } : [1] => [1],
371
372 /// Converts a native wasm `i32` into a borrowedhandle value.
373 ///
374 /// > **Note**: this documentation is outdated and does not reflect the
375 /// > current implementation of the canonical ABI. This needs to be
376 /// > updated.
377 ///
378 /// This is the converse of `I32FromBorrowedHandle` and is used in similar
379 /// situations:
380 ///
381 /// * An exported wasm function receives, as a parameter, a handle that
382 /// is defined by the wasm module.
383 /// * An host-defined imported function is receiving a handle, as a
384 /// parameter, that is defined by the host itself.
385 HandleBorrowedFromI32 { ty: ResourceId } : [1] => [1],
386
387 // lists
388
389 /// Lowers a list where the element's layout in the native language is
390 /// expected to match the canonical ABI definition of interface types.
391 ///
392 /// Pops a list value from the stack and pushes the pointer/length onto
393 /// the stack. If `realloc` is set to `Some` then this is expected to
394 /// *consume* the list which means that the data needs to be copied. An
395 /// allocation/copy is expected when:
396 ///
397 /// * A host is calling a wasm export with a list (it needs to copy the
398 /// list in to the callee's module, allocating space with `realloc`)
399 /// * A wasm export is returning a list (it's expected to use `realloc`
400 /// to give ownership of the list to the caller.
401 /// * A host is returning a list in a import definition, meaning that
402 /// space needs to be allocated in the caller with `realloc`).
403 ///
404 /// A copy does not happen (e.g. `realloc` is `None`) when:
405 ///
406 /// * A wasm module calls an import with the list. In this situation
407 /// it's expected the caller will know how to access this module's
408 /// memory (e.g. the host has raw access or wasm-to-wasm communication
409 /// would copy the list).
410 ///
411 /// If `realloc` is `Some` then the adapter is not responsible for
412 /// cleaning up this list because the other end is receiving the
413 /// allocation. If `realloc` is `None` then the adapter is responsible
414 /// for cleaning up any temporary allocation it created, if any.
415 ListCanonLower {
416 element: &'a Type,
417 realloc: Option<&'a str>,
418 } : [1] => [2],
419
420 /// Lowers a list where the element's layout in the native language is
421 /// not expected to match the canonical ABI definition of interface
422 /// types.
423 ///
424 /// Pops a list value from the stack and pushes the pointer/length onto
425 /// the stack. This operation also pops a block from the block stack
426 /// which is used as the iteration body of writing each element of the
427 /// list consumed.
428 ///
429 /// The `realloc` field here behaves the same way as `ListCanonLower`.
430 /// It's only set to `None` when a wasm module calls a declared import.
431 /// Otherwise lowering in other contexts requires allocating memory for
432 /// the receiver to own.
433 ListLower {
434 element: &'a Type,
435 realloc: Option<&'a str>,
436 } : [1] => [2],
437
438 /// Lifts a list which has a canonical representation into an interface
439 /// types value.
440 ///
441 /// The term "canonical" representation here means that the
442 /// representation of the interface types value in the native language
443 /// exactly matches the canonical ABI definition of the type.
444 ///
445 /// This will consume two `i32` values from the stack, a pointer and a
446 /// length, and then produces an interface value list. If the `free`
447 /// field is set to `Some` then the pointer/length should be considered
448 /// an owned allocation and need to be deallocated by the receiver. If
449 /// it is set to `None` then a view is provided but it does not need to
450 /// be deallocated.
451 ///
452 /// The `free` field is set to `Some` in similar situations as described
453 /// by `ListCanonLower`. If `free` is `Some` then the memory must be
454 /// deallocated after the lifted list is done being consumed. If it is
455 /// `None` then the receiver of the lifted list does not own the memory
456 /// and must leave the memory as-is.
457 ListCanonLift {
458 element: &'a Type,
459 free: Option<&'a str>,
460 ty: TypeId,
461 } : [2] => [1],
462
463 /// Lifts a list which into an interface types value.
464 ///
465 /// This will consume two `i32` values from the stack, a pointer and a
466 /// length, and then produces an interface value list. Note that the
467 /// pointer/length popped are **owned** and need to be deallocated with
468 /// the wasm `free` function when the list is no longer needed.
469 ///
470 /// This will also pop a block from the block stack which is how to
471 /// read each individual element from the list.
472 ListLift {
473 element: &'a Type,
474 free: Option<&'a str>,
475 ty: TypeId,
476 } : [2] => [1],
477
478 /// Pushes an operand onto the stack representing the list item from
479 /// each iteration of the list.
480 ///
481 /// This is only used inside of blocks related to lowering lists.
482 IterElem { element: &'a Type } : [0] => [1],
483
484 /// Pushes an operand onto the stack representing the base pointer of
485 /// the next element in a list.
486 ///
487 /// This is used for both lifting and lowering lists.
488 IterBasePointer : [0] => [1],
489
490 // buffers
491
492 /// Pops a buffer value, pushes the pointer/length of where it points
493 /// to in memory.
494 BufferLowerPtrLen { push: bool, ty: &'a Type } : [1] => [3],
495 /// Pops a buffer value, pushes an integer handle for the buffer.
496 BufferLowerHandle { push: bool, ty: &'a Type } : [1] => [1],
497 /// Pops a ptr/len, pushes a buffer wrapping that ptr/len of the memory
498 /// from the origin module.
499 BufferLiftPtrLen { push: bool, ty: &'a Type } : [3] => [1],
500 /// Pops an i32, pushes a buffer wrapping that i32 handle.
501 BufferLiftHandle { push: bool, ty: &'a Type } : [1] => [1],
502
503 // records
504
505 /// Pops a record value off the stack, decomposes the record to all of
506 /// its fields, and then pushes the fields onto the stack.
507 RecordLower {
508 record: &'a Record,
509 name: Option<&'a str>,
510 ty: TypeId,
511 } : [1] => [record.fields.len()],
512
513 /// Pops all fields for a record off the stack and then composes them
514 /// into a record.
515 RecordLift {
516 record: &'a Record,
517 name: Option<&'a str>,
518 ty: TypeId,
519 } : [record.fields.len()] => [1],
520
521 /// Converts a language-specific record-of-bools to a list of `i32`.
522 FlagsLower {
523 record: &'a Record,
524 name: &'a str,
525 ty: TypeId,
526 } : [1] => [record.num_i32s()],
527 FlagsLower64 {
528 record: &'a Record,
529 name: &'a str,
530 ty: TypeId,
531 } : [1] => [1],
532 /// Converts a list of native wasm `i32` to a language-specific
533 /// record-of-bools.
534 FlagsLift {
535 record: &'a Record,
536 name: &'a str,
537 ty: TypeId,
538 } : [record.num_i32s()] => [1],
539 FlagsLift64 {
540 record: &'a Record,
541 name: &'a str,
542 ty: TypeId,
543 } : [1] => [1],
544
545 // variants
546
547 /// This is a special instruction used for `VariantLower`
548 /// instruction to determine the name of the payload, if present, to use
549 /// within each block.
550 ///
551 /// Each sub-block will have this be the first instruction, and if it
552 /// lowers a payload it will expect something bound to this name.
553 VariantPayloadName : [0] => [1],
554
555 /// TODO
556 BufferPayloadName : [0] => [1],
557
558 /// Pops a variant off the stack as well as `ty.cases.len()` blocks
559 /// from the code generator. Uses each of those blocks and the value
560 /// from the stack to produce `nresults` of items.
561 VariantLower {
562 variant: &'a Variant,
563 name: Option<&'a str>,
564 ty: TypeId,
565 results: &'a [WasmType],
566 } : [1] => [results.len()],
567
568 /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
569 /// from the code generator. Uses each of those blocks and the value
570 /// from the stack to produce a final variant.
571 VariantLift {
572 variant: &'a Variant,
573 name: Option<&'a str>,
574 ty: TypeId,
575 } : [1] => [1],
576
577 // calling/control flow
578
579 /// Represents a call to a raw WebAssembly API. The module/name are
580 /// provided inline as well as the types if necessary.
581 CallWasm {
582 module: &'a str,
583 name: &'a str,
584 sig: &'a WasmSignature,
585 } : [sig.params.len()] => [sig.results.len()],
586
587 /// Same as `CallWasm`, except the dual where an interface is being
588 /// called rather than a raw wasm function.
589 CallInterface {
590 module: &'a str,
591 func: &'a Function,
592 } : [func.params.len()] => [func.results.len()],
593
594 /// Returns `amt` values on the stack. This is always the last
595 /// instruction.
596 Return { amt: usize, func: &'a Function } : [*amt] => [0],
597
598 // ...
599
600 /// An instruction from an extended instruction set that's specific to
601 /// `*.witx` and the "Preview1" ABI.
602 Witx {
603 instr: &'a WitxInstruction<'a>,
604 } : [instr.operands_len()] => [instr.results_len()],
605 }
606}
607
608#[derive(Debug, PartialEq)]
609pub enum Bitcast {
610 // Upcasts
611 F32ToF64,
612 F32ToI32,
613 F64ToI64,
614 I32ToI64,
615 F32ToI64,
616
617 // Downcasts
618 F64ToF32,
619 I32ToF32,
620 I64ToF64,
621 I64ToI32,
622 I64ToF32,
623
624 None,
625}
626
627def_instruction! {
628 #[derive(Debug)]
629 pub enum WitxInstruction<'a> {
630 /// Takes the value off the top of the stack and writes it into linear
631 /// memory. Pushes the address in linear memory as an `i32`.
632 AddrOf : [1] => [1],
633
634 /// Converts a language-specific pointer value to a wasm `i32`.
635 I32FromPointer : [1] => [1],
636 /// Converts a language-specific pointer value to a wasm `i32`.
637 I32FromConstPointer : [1] => [1],
638 /// Converts a native wasm `i32` to a language-specific pointer.
639 PointerFromI32 { ty: &'a Type }: [1] => [1],
640 /// Converts a native wasm `i32` to a language-specific pointer.
641 ConstPointerFromI32 { ty: &'a Type } : [1] => [1],
642
643 /// This is a special instruction specifically for the original ABI of
644 /// WASI. The raw return `i32` of a function is re-pushed onto the
645 /// stack for reuse.
646 ReuseReturn : [0] => [1],
647 }
648}
649
650/// Whether the glue code surrounding a call is lifting arguments and lowering
651/// results or vice versa.
652#[derive(Clone, Copy, PartialEq, Eq)]
653pub enum LiftLower {
654 /// When the glue code lifts arguments and lowers results.
655 ///
656 /// ```text
657 /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
658 /// ```
659 LiftArgsLowerResults,
660 /// When the glue code lowers arguments and lifts results.
661 ///
662 /// ```text
663 /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
664 /// ```
665 LowerArgsLiftResults,
666}
667
668/// Whether we are generating glue code to call an import or an export.
669#[derive(Clone, Copy, PartialEq, Eq)]
670pub enum Direction {
671 /// We are generating glue code to call an import.
672 Import,
673 /// We are generating glue code to call an export.
674 Export,
675}
676
677/// Trait for language implementors to use to generate glue code between native
678/// WebAssembly signatures and interface types signatures.
679///
680/// This is used as an implementation detail in interpreting the ABI between
681/// interface types and wasm types. Eventually this will be driven by interface
682/// types adapters themselves, but for now the ABI of a function dictates what
683/// instructions are fed in.
684///
685/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
686/// generate code for. Instructions operate like a stack machine where each
687/// instruction has a list of inputs and a list of outputs (provided by the
688/// `emit` function).
689pub trait Bindgen {
690 /// The intermediate type for fragments of code for this type.
691 ///
692 /// For most languages `String` is a suitable intermediate type.
693 type Operand: Clone;
694
695 /// Emit code to implement the given instruction.
696 ///
697 /// Each operand is given in `operands` and can be popped off if ownership
698 /// is required. It's guaranteed that `operands` has the appropriate length
699 /// for the `inst` given, as specified with [`Instruction`].
700 ///
701 /// Each result variable should be pushed onto `results`. This function must
702 /// push the appropriate number of results or binding generation will panic.
703 fn emit(
704 &mut self,
705 iface: &Interface,
706 inst: &Instruction<'_>,
707 operands: &mut Vec<Self::Operand>,
708 results: &mut Vec<Self::Operand>,
709 );
710
711 /// Allocates temporary space in linear memory for the type `ty`.
712 ///
713 /// This is called when calling some wasm functions where a return pointer
714 /// is needed. Only used for the `Abi::Preview1` ABI.
715 ///
716 /// Returns an `Operand` which has type `i32` and is the base of the typed
717 /// allocation in memory.
718 fn allocate_typed_space(&mut self, iface: &Interface, ty: TypeId) -> Self::Operand;
719
720 /// Allocates temporary space in linear memory for a fixed number of `i64`
721 /// values.
722 ///
723 /// This is only called in the `Abi::Canonical` ABI for when a function
724 /// would otherwise have multiple results.
725 ///
726 /// Returns an `Operand` which has type `i32` and points to the base of the
727 /// fixed-size-array allocation.
728 fn i64_return_pointer_area(&mut self, amt: usize) -> Self::Operand;
729
730 /// Enters a new block of code to generate code for.
731 ///
732 /// This is currently exclusively used for constructing variants. When a
733 /// variant is constructed a block here will be pushed for each case of a
734 /// variant, generating the code necessary to translate a variant case.
735 ///
736 /// Blocks are completed with `finish_block` below. It's expected that `emit`
737 /// will always push code (if necessary) into the "current block", which is
738 /// updated by calling this method and `finish_block` below.
739 fn push_block(&mut self);
740
741 /// Indicates to the code generator that a block is completed, and the
742 /// `operand` specified was the resulting value of the block.
743 ///
744 /// This method will be used to compute the value of each arm of lifting a
745 /// variant. The `operand` will be `None` if the variant case didn't
746 /// actually have any type associated with it. Otherwise it will be `Some`
747 /// as the last value remaining on the stack representing the value
748 /// associated with a variant's `case`.
749 ///
750 /// It's expected that this will resume code generation in the previous
751 /// block before `push_block` was called. This must also save the results
752 /// of the current block internally for instructions like `ResultLift` to
753 /// use later.
754 fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
755
756 /// Returns size information that was previously calculated for all types.
757 fn sizes(&self) -> &crate::sizealign::SizeAlign;
758
759 /// Returns whether or not the specified element type is represented in a
760 /// "canonical" form for lists. This dictates whether the `ListCanonLower`
761 /// and `ListCanonLift` instructions are used or not.
762 fn is_list_canonical(&self, iface: &Interface, element: &Type) -> bool;
763}
764
765impl Interface {
766 /// Validates the parameters/results of a function are representable in its
767 /// ABI.
768 ///
769 /// Returns an error string if they're not representable or returns `Ok` if
770 /// they're indeed representable.
771 pub fn validate_abi(&self, func: &Function) -> Result<(), String> {
772 for (_, ty) in func.params.iter() {
773 self.validate_abi_ty(func.abi, ty, true)?;
774 }
775 for (_, ty) in func.results.iter() {
776 self.validate_abi_ty(func.abi, ty, false)?;
777 }
778 match func.abi {
779 Abi::Preview1 => {
780 // validated below...
781 }
782 Abi::Canonical => return Ok(()),
783 }
784 match func.results.len() {
785 0 => Ok(()),
786 1 => self.validate_preview1_return(&func.results[0].1),
787 _ => Err("more than one result".to_string()),
788 }
789 }
790
791 fn validate_preview1_return(&self, ty: &Type) -> Result<(), String> {
792 let id = match ty {
793 Type::Id(id) => *id,
794 _ => return Ok(()),
795 };
796 match &self.types[id].kind {
797 TypeDefKind::Type(t) => self.validate_preview1_return(t),
798 TypeDefKind::Variant(v) => {
799 let (ok, err) = match v.as_expected() {
800 Some(pair) => pair,
801 None => return Err("invalid return type".to_string()),
802 };
803 if let Some(ty) = ok {
804 let id = match ty {
805 Type::Id(id) => *id,
806 _ => return Err("only named types are allowed in results".to_string()),
807 };
808 match &self.types[id].kind {
809 TypeDefKind::Record(r) if r.is_tuple() => {
810 for field in r.fields.iter() {
811 self.validate_ty_named(&field.ty)?;
812 }
813 }
814 _ => {
815 self.validate_ty_named(ty)?;
816 }
817 }
818 }
819
820 if let Some(ty) = err {
821 let kind = self.validate_ty_named(ty)?;
822 if let TypeDefKind::Variant(v) = kind {
823 if v.is_enum() {
824 return Ok(());
825 }
826 }
827 return Err("invalid type in error payload of result".to_string());
828 }
829 Ok(())
830 }
831 TypeDefKind::Record(r) if r.is_flags() => Ok(()),
832 TypeDefKind::Record(_)
833 | TypeDefKind::List(_)
834 | TypeDefKind::PushBuffer(_)
835 | TypeDefKind::PullBuffer(_) => Err("invalid return type".to_string()),
836 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => Ok(()),
837 }
838 }
839
840 fn validate_ty_named(&self, ty: &Type) -> Result<&TypeDefKind, String> {
841 let id = match ty {
842 Type::Id(id) => *id,
843 _ => return Err("only named types are allowed in results".to_string()),
844 };
845 let ty = &self.types[id];
846 if ty.name.is_none() {
847 return Err("only named types are allowed in results".to_string());
848 }
849 Ok(&ty.kind)
850 }
851
852 fn validate_abi_ty(&self, abi: Abi, ty: &Type, param: bool) -> Result<(), String> {
853 let id = match ty {
854 Type::Id(id) => *id,
855 // Type::U8 { lang_c_char: true } => {
856 // if let Abi::Next = self {
857 // return Err("cannot use `(@witx char8)` in this ABI".to_string());
858 // }
859 // Ok(())
860 // }
861 // Type::U32 { lang_ptr_size: true } => {
862 // if let Abi::Next = self {
863 // return Err("cannot use `(@witx usize)` in this ABI".to_string());
864 // }
865 // Ok(())
866 // }
867 _ => return Ok(()),
868 };
869 match &self.types[id].kind {
870 TypeDefKind::Type(t) => self.validate_abi_ty(abi, t, param),
871 TypeDefKind::Record(r) => {
872 for r in r.fields.iter() {
873 self.validate_abi_ty(abi, &r.ty, param)?;
874 }
875 Ok(())
876 }
877 TypeDefKind::Variant(v) => {
878 for case in v.cases.iter() {
879 if let Some(ty) = &case.ty {
880 self.validate_abi_ty(abi, ty, param)?;
881 }
882 }
883 Ok(())
884 }
885 TypeDefKind::List(t) => self.validate_abi_ty(abi, t, param),
886 TypeDefKind::Pointer(t) => {
887 if let Abi::Canonical = abi {
888 return Err("cannot use `(@witx pointer)` in this ABI".to_string());
889 }
890 self.validate_abi_ty(abi, t, param)
891 }
892 TypeDefKind::ConstPointer(t) => {
893 if let Abi::Canonical = abi {
894 return Err("cannot use `(@witx const_pointer)` in this ABI".to_string());
895 }
896 self.validate_abi_ty(abi, t, param)
897 }
898 TypeDefKind::PushBuffer(t) | TypeDefKind::PullBuffer(t) => {
899 if !param {
900 return Err("cannot use buffers in the result position".to_string());
901 }
902 let param = match &self.types[id].kind {
903 TypeDefKind::PushBuffer(_) => false,
904 TypeDefKind::PullBuffer(_) => param,
905 _ => unreachable!(),
906 };
907 // If this is an output buffer then validate `t` as if it were a
908 // result because the callee can't give us buffers back.
909 self.validate_abi_ty(abi, t, param)
910 }
911 }
912 }
913
914 /// Get the WebAssembly type signature for this interface function
915 ///
916 /// The first entry returned is the list of parameters and the second entry
917 /// is the list of results for the wasm function signature.
918 pub fn wasm_signature(&self, dir: Direction, func: &Function) -> WasmSignature {
919 let mut params = Vec::new();
920 let mut results = Vec::new();
921 for (_, param) in func.params.iter() {
922 if let (Abi::Preview1, Type::Id(id)) = (func.abi, param) {
923 match &self.types[*id].kind {
924 TypeDefKind::Variant(_) => {
925 params.push(WasmType::I32);
926 continue;
927 }
928 TypeDefKind::Record(r) if !r.is_flags() => {
929 params.push(WasmType::I32);
930 continue;
931 }
932 _ => {}
933 }
934 }
935 self.push_wasm(func.abi, dir, param, &mut params);
936 }
937
938 for (_, result) in func.results.iter() {
939 if let (Abi::Preview1, Type::Id(id)) = (func.abi, result) {
940 match &self.types[*id].kind {
941 TypeDefKind::Variant(v) => {
942 results.push(v.tag.into());
943 if v.is_enum() {
944 continue;
945 }
946 // return pointer for payload, if any
947 if let Some(ty) = &v.cases[0].ty {
948 for _ in 0..self.preview1_num_types(ty) {
949 params.push(WasmType::I32);
950 }
951 }
952 continue;
953 }
954 _ => {}
955 }
956 }
957 self.push_wasm(func.abi, dir, result, &mut results);
958 }
959
960 // Rust/C don't support multi-value well right now, so if a function
961 // would have multiple results then instead truncate it. Imports take a
962 // return pointer to write into and exports return a pointer they wrote
963 // into.
964 let mut retptr = None;
965 if results.len() > 1 {
966 retptr = Some(mem::take(&mut results));
967 match dir {
968 Direction::Import => {
969 params.push(WasmType::I32);
970 }
971 Direction::Export => {
972 results.push(WasmType::I32);
973 }
974 }
975 }
976
977 WasmSignature {
978 params,
979 results,
980 retptr,
981 }
982 }
983
984 fn preview1_num_types(&self, ty: &Type) -> usize {
985 match ty {
986 Type::Id(id) => match &self.types[*id].kind {
987 TypeDefKind::Record(r) if r.is_tuple() => r.fields.len(),
988 _ => 1,
989 },
990 _ => 1,
991 }
992 }
993
994 fn push_wasm(&self, abi: Abi, dir: Direction, ty: &Type, result: &mut Vec<WasmType>) {
995 match ty {
996 Type::S8
997 | Type::U8
998 | Type::S16
999 | Type::U16
1000 | Type::S32
1001 | Type::U32
1002 | Type::Char
1003 | Type::Handle(_)
1004 | Type::CChar
1005 | Type::Usize => result.push(WasmType::I32),
1006
1007 Type::U64 | Type::S64 => result.push(WasmType::I64),
1008 Type::F32 => result.push(WasmType::F32),
1009 Type::F64 => result.push(WasmType::F64),
1010
1011 Type::Id(id) => match &self.types[*id].kind {
1012 TypeDefKind::Type(t) => self.push_wasm(abi, dir, t, result),
1013
1014 TypeDefKind::Record(r) if r.is_flags() => match self.flags_repr(r) {
1015 Some(int) => result.push(int.into()),
1016 None => {
1017 for _ in 0..r.num_i32s() {
1018 result.push(WasmType::I32);
1019 }
1020 }
1021 },
1022
1023 TypeDefKind::Record(r) => {
1024 for field in r.fields.iter() {
1025 self.push_wasm(abi, dir, &field.ty, result);
1026 }
1027 }
1028
1029 TypeDefKind::List(_) => {
1030 result.push(WasmType::I32);
1031 result.push(WasmType::I32);
1032 }
1033
1034 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => {
1035 result.push(WasmType::I32);
1036 }
1037
1038 TypeDefKind::PushBuffer(_) | TypeDefKind::PullBuffer(_) => {
1039 result.push(WasmType::I32);
1040 if dir == Direction::Import {
1041 result.push(WasmType::I32);
1042 result.push(WasmType::I32);
1043 }
1044 }
1045
1046 TypeDefKind::Variant(v) => {
1047 result.push(v.tag.into());
1048 let start = result.len();
1049 let mut temp = Vec::new();
1050
1051 // Push each case's type onto a temporary vector, and then
1052 // merge that vector into our final list starting at
1053 // `start`. Note that this requires some degree of
1054 // "unification" so we can handle things like `Result<i32,
1055 // f32>` where that turns into `[i32 i32]` where the second
1056 // `i32` might be the `f32` bitcasted.
1057 for case in v.cases.iter() {
1058 let ty = match &case.ty {
1059 Some(ty) => ty,
1060 None => continue,
1061 };
1062 self.push_wasm(abi, dir, ty, &mut temp);
1063
1064 for (i, ty) in temp.drain(..).enumerate() {
1065 match result.get_mut(start + i) {
1066 Some(prev) => *prev = unify(*prev, ty),
1067 None => result.push(ty),
1068 }
1069 }
1070 }
1071 }
1072 },
1073 }
1074 }
1075
1076 pub fn flags_repr(&self, record: &Record) -> Option<Int> {
1077 match record.kind {
1078 RecordKind::Flags(Some(hint)) => Some(hint),
1079 RecordKind::Flags(None) if record.fields.len() <= 8 => Some(Int::U8),
1080 RecordKind::Flags(None) if record.fields.len() <= 16 => Some(Int::U16),
1081 RecordKind::Flags(None) if record.fields.len() <= 32 => Some(Int::U32),
1082 RecordKind::Flags(None) if record.fields.len() <= 64 => Some(Int::U64),
1083 RecordKind::Flags(None) => None,
1084 _ => panic!("not a flags record"),
1085 }
1086 }
1087
1088 /// Generates an abstract sequence of instructions which represents this
1089 /// function being adapted as an imported function.
1090 ///
1091 /// The instructions here, when executed, will emulate a language with
1092 /// interface types calling the concrete wasm implementation. The parameters
1093 /// for the returned instruction sequence are the language's own
1094 /// interface-types parameters. One instruction in the instruction stream
1095 /// will be a `Call` which represents calling the actual raw wasm function
1096 /// signature.
1097 ///
1098 /// This function is useful, for example, if you're building a language
1099 /// generator for WASI bindings. This will document how to translate
1100 /// language-specific values into the wasm types to call a WASI function,
1101 /// and it will also automatically convert the results of the WASI function
1102 /// back to a language-specific value.
1103 pub fn call(
1104 &self,
1105 dir: Direction,
1106 lift_lower: LiftLower,
1107 func: &Function,
1108 bindgen: &mut impl Bindgen,
1109 ) {
1110 if Abi::Preview1 == func.abi {
1111 // The Preview1 ABI only works with WASI which is only intended
1112 // for use with these modes.
1113 if dir == Direction::Export {
1114 panic!("the preview1 ABI only supports import modes");
1115 }
1116 }
1117 Generator::new(self, func.abi, dir, lift_lower, bindgen).call(func);
1118 }
1119}
1120
1121struct Generator<'a, B: Bindgen> {
1122 abi: Abi,
1123 dir: Direction,
1124 lift_lower: LiftLower,
1125 bindgen: &'a mut B,
1126 iface: &'a Interface,
1127 operands: Vec<B::Operand>,
1128 results: Vec<B::Operand>,
1129 stack: Vec<B::Operand>,
1130 return_pointers: Vec<B::Operand>,
1131}
1132
1133impl<'a, B: Bindgen> Generator<'a, B> {
1134 fn new(
1135 iface: &'a Interface,
1136 abi: Abi,
1137 dir: Direction,
1138 lift_lower: LiftLower,
1139 bindgen: &'a mut B,
1140 ) -> Generator<'a, B> {
1141 Generator {
1142 iface,
1143 abi,
1144 dir,
1145 lift_lower,
1146 bindgen,
1147 operands: Vec::new(),
1148 results: Vec::new(),
1149 stack: Vec::new(),
1150 return_pointers: Vec::new(),
1151 }
1152 }
1153
1154 fn call(&mut self, func: &Function) {
1155 let sig = self.iface.wasm_signature(self.dir, func);
1156
1157 match self.lift_lower {
1158 LiftLower::LowerArgsLiftResults => {
1159 // Push all parameters for this function onto the stack, and
1160 // then batch-lower everything all at once.
1161 for nth in 0..func.params.len() {
1162 self.emit(&Instruction::GetArg { nth });
1163 }
1164 self.lower_all(&func.params, None);
1165
1166 // If necessary we may need to prepare a return pointer for this
1167 // ABI. The `Preview1` ABI has most return values returned
1168 // through pointers, and the `Canonical` ABI returns more-than-one
1169 // values through a return pointer.
1170 if self.dir == Direction::Import {
1171 self.prep_return_pointer(&sig, &func.results);
1172 }
1173
1174 // Now that all the wasm args are prepared we can call the
1175 // actual wasm function.
1176 assert_eq!(self.stack.len(), sig.params.len());
1177 self.emit(&Instruction::CallWasm {
1178 module: &self.iface.name,
1179 name: &func.name,
1180 sig: &sig,
1181 });
1182
1183 // In the `Canonical` ABI we model multiple return values by going
1184 // through memory. Remove that indirection here by loading
1185 // everything to simulate the function having many return values
1186 // in our stack discipline.
1187 if let Some(actual) = &sig.retptr {
1188 if self.dir == Direction::Import {
1189 assert_eq!(self.return_pointers.len(), 1);
1190 self.stack.push(self.return_pointers.pop().unwrap());
1191 }
1192 self.load_retptr(actual);
1193 }
1194
1195 // Batch-lift all result values now that all the function's return
1196 // values are on the stack.
1197 self.lift_all(&func.results);
1198
1199 self.emit(&Instruction::Return {
1200 func,
1201 amt: func.results.len(),
1202 });
1203 }
1204 LiftLower::LiftArgsLowerResults => {
1205 // Use `GetArg` to push all relevant arguments onto the stack.
1206 // Note that we can't use the signature of this function
1207 // directly due to various conversions and return pointers, so
1208 // we need to somewhat manually calculate all the arguments
1209 // which are converted as interface types arguments below.
1210 let nargs = match self.abi {
1211 Abi::Preview1 => {
1212 func.params.len()
1213 + func
1214 .params
1215 .iter()
1216 .filter(|(_, t)| match t {
1217 Type::Id(id) => match &self.iface.types[*id].kind {
1218 TypeDefKind::List(_) => true,
1219 _ => false,
1220 },
1221 _ => false,
1222 })
1223 .count()
1224 }
1225 Abi::Canonical => {
1226 sig.params.len()
1227 - (self.dir == Direction::Import && sig.retptr.is_some()) as usize
1228 }
1229 };
1230 for nth in 0..nargs {
1231 self.emit(&Instruction::GetArg { nth });
1232 }
1233
1234 // Once everything is on the stack we can lift all arguments
1235 // one-by-one into their interface-types equivalent.
1236 self.lift_all(&func.params);
1237
1238 // ... and that allows us to call the interface types function
1239 self.emit(&Instruction::CallInterface {
1240 module: &self.iface.name,
1241 func,
1242 });
1243
1244 // ... and at the end we lower everything back into return
1245 // values.
1246 self.lower_all(&func.results, Some(nargs));
1247
1248 // Our ABI dictates that a list of returned types are returned
1249 // through memories, so after we've got all the values on the
1250 // stack perform all of the stores here.
1251 if let Some(tys) = &sig.retptr {
1252 match self.dir {
1253 Direction::Import => {
1254 self.emit(&Instruction::GetArg {
1255 nth: sig.params.len() - 1,
1256 });
1257 }
1258 Direction::Export => {
1259 let op = self.bindgen.i64_return_pointer_area(tys.len());
1260 self.stack.push(op);
1261 }
1262 }
1263 let retptr = self.store_retptr(tys);
1264 if self.dir == Direction::Export {
1265 self.stack.push(retptr);
1266 }
1267 }
1268
1269 self.emit(&Instruction::Return {
1270 func,
1271 amt: sig.results.len(),
1272 });
1273 }
1274 }
1275
1276 assert!(
1277 self.stack.is_empty(),
1278 "stack has {} items remaining",
1279 self.stack.len()
1280 );
1281 }
1282
1283 fn load_retptr(&mut self, types: &[WasmType]) {
1284 let rp = self.stack.pop().unwrap();
1285 for (i, ty) in types.iter().enumerate() {
1286 self.stack.push(rp.clone());
1287 let offset = (i * 8) as i32;
1288 match ty {
1289 WasmType::I32 => self.emit(&Instruction::I32Load { offset }),
1290 WasmType::I64 => self.emit(&Instruction::I64Load { offset }),
1291 WasmType::F32 => self.emit(&Instruction::F32Load { offset }),
1292 WasmType::F64 => self.emit(&Instruction::F64Load { offset }),
1293 }
1294 }
1295 }
1296
1297 /// Assumes that the wasm values to create `tys` are all located on the
1298 /// stack.
1299 ///
1300 /// Inserts instructions necesesary to lift those types into their
1301 /// interface types equivalent.
1302 fn lift_all(&mut self, tys: &[(String, Type)]) {
1303 let mut temp = Vec::new();
1304 let operands = tys
1305 .iter()
1306 .rev()
1307 .map(|(_, ty)| {
1308 let ntys = match self.abi {
1309 Abi::Preview1 => match ty {
1310 Type::Id(id) => match &self.iface.types[*id].kind {
1311 TypeDefKind::List(_) => 2,
1312 _ => 1,
1313 },
1314 _ => 1,
1315 },
1316 Abi::Canonical => {
1317 temp.truncate(0);
1318 self.iface.push_wasm(self.abi, self.dir, ty, &mut temp);
1319 temp.len()
1320 }
1321 };
1322 self.stack
1323 .drain(self.stack.len() - ntys..)
1324 .collect::<Vec<_>>()
1325 })
1326 .collect::<Vec<_>>();
1327 for (operands, (_, ty)) in operands.into_iter().rev().zip(tys) {
1328 self.stack.extend(operands);
1329 self.lift(ty);
1330 }
1331 }
1332
1333 /// Assumes that the value for `tys` is already on the stack, and then
1334 /// converts all of those values into their wasm types by lowering each
1335 /// argument in-order.
1336 fn lower_all<'b>(&mut self, tys: &[(String, Type)], mut nargs: Option<usize>) {
1337 let operands = self
1338 .stack
1339 .drain(self.stack.len() - tys.len()..)
1340 .collect::<Vec<_>>();
1341 for (operand, (_, ty)) in operands.into_iter().zip(tys) {
1342 self.stack.push(operand);
1343 self.lower(ty, nargs.as_mut());
1344 }
1345 }
1346
1347 /// Assumes `types.len()` values are on the stack and stores them all into
1348 /// the return pointer of this function, specified in the last argument.
1349 ///
1350 /// This is only used with `Abi::Next`.
1351 fn store_retptr(&mut self, types: &[WasmType]) -> B::Operand {
1352 let retptr = self.stack.pop().unwrap();
1353 for (i, ty) in types.iter().enumerate().rev() {
1354 self.stack.push(retptr.clone());
1355 let offset = (i * 8) as i32;
1356 match ty {
1357 WasmType::I32 => self.emit(&Instruction::I32Store { offset }),
1358 WasmType::I64 => self.emit(&Instruction::I64Store { offset }),
1359 WasmType::F32 => self.emit(&Instruction::F32Store { offset }),
1360 WasmType::F64 => self.emit(&Instruction::F64Store { offset }),
1361 }
1362 }
1363 return retptr;
1364 }
1365
1366 fn witx(&mut self, instr: &WitxInstruction<'_>) {
1367 self.emit(&Instruction::Witx { instr });
1368 }
1369
1370 fn emit(&mut self, inst: &Instruction<'_>) {
1371 self.operands.clear();
1372 self.results.clear();
1373
1374 let operands_len = inst.operands_len();
1375 assert!(
1376 self.stack.len() >= operands_len,
1377 "not enough operands on stack for {:?}",
1378 inst
1379 );
1380 self.operands
1381 .extend(self.stack.drain((self.stack.len() - operands_len)..));
1382 self.results.reserve(inst.results_len());
1383
1384 self.bindgen
1385 .emit(self.iface, inst, &mut self.operands, &mut self.results);
1386
1387 assert_eq!(
1388 self.results.len(),
1389 inst.results_len(),
1390 "{:?} expected {} results, got {}",
1391 inst,
1392 inst.results_len(),
1393 self.results.len()
1394 );
1395 self.stack.extend(self.results.drain(..));
1396 }
1397
1398 fn push_block(&mut self) {
1399 self.bindgen.push_block();
1400 }
1401
1402 fn finish_block(&mut self, size: usize) {
1403 self.operands.clear();
1404 assert!(
1405 size <= self.stack.len(),
1406 "not enough operands on stack for finishing block",
1407 );
1408 self.operands
1409 .extend(self.stack.drain((self.stack.len() - size)..));
1410 self.bindgen.finish_block(&mut self.operands);
1411 }
1412
1413 fn lower(&mut self, ty: &Type, retptr: Option<&mut usize>) {
1414 use Instruction::*;
1415 use WitxInstruction::*;
1416
1417 match *ty {
1418 Type::S8 => self.emit(&I32FromS8),
1419 Type::U8 => self.emit(&I32FromU8),
1420 Type::CChar => self.emit(&I32FromChar8),
1421 Type::S16 => self.emit(&I32FromS16),
1422 Type::U16 => self.emit(&I32FromU16),
1423 Type::S32 => self.emit(&I32FromS32),
1424 Type::U32 => self.emit(&I32FromU32),
1425 Type::Usize => self.emit(&I32FromUsize),
1426 Type::S64 => self.emit(&I64FromS64),
1427 Type::U64 => self.emit(&I64FromU64),
1428 Type::Char => self.emit(&I32FromChar),
1429 Type::F32 => self.emit(&F32FromIf32),
1430 Type::F64 => self.emit(&F64FromIf64),
1431 Type::Handle(ty) => {
1432 let borrowed = match self.lift_lower {
1433 // This means that a return value is being lowered, which is
1434 // never borrowed.
1435 LiftLower::LiftArgsLowerResults => false,
1436 // There's one of three possible situations we're in:
1437 //
1438 // * The handle is defined by the wasm module itself. This
1439 // is the only actual possible scenario today due to how
1440 // witx is defined. In this situation the handle is owned
1441 // by the host and "proof of ownership" is being offered
1442 // and there's no need to relinquish ownership.
1443 //
1444 // * The handle is defined by the host, and it's passing it
1445 // to a wasm module. This should use an owned conversion.
1446 // This isn't expressible in today's `*.witx` format.
1447 //
1448 // * The handle is defined by neither the host or the wasm
1449 // mdoule. This means that the host is passing a
1450 // capability from another wasm module into this one,
1451 // meaning it's doing so by reference since the host is
1452 // retaining access to its own
1453 //
1454 // Note, again, only the first bullet here is possible
1455 // today, hence the hardcoded `true` value. We'll need to
1456 // refactor `witx` to expose the other possibilities.
1457 LiftLower::LowerArgsLiftResults => true,
1458 };
1459 if borrowed {
1460 self.emit(&I32FromBorrowedHandle { ty });
1461 } else {
1462 self.emit(&I32FromOwnedHandle { ty });
1463 }
1464 }
1465 Type::Id(id) => match &self.iface.types[id].kind {
1466 TypeDefKind::Type(t) => self.lower(t, retptr),
1467 TypeDefKind::Pointer(_) => self.witx(&I32FromPointer),
1468 TypeDefKind::ConstPointer(_) => self.witx(&I32FromConstPointer),
1469 TypeDefKind::List(element) => match self.abi {
1470 Abi::Preview1 => self.emit(&ListCanonLower {
1471 element,
1472 realloc: None,
1473 }),
1474 Abi::Canonical => {
1475 // Lowering parameters calling a wasm import means
1476 // we don't need to pass ownership, but we pass
1477 // ownership in all other cases.
1478 let realloc = match (self.dir, self.lift_lower) {
1479 (Direction::Import, LiftLower::LowerArgsLiftResults) => None,
1480 _ => Some("canonical_abi_realloc"),
1481 };
1482 if self.is_char(element)
1483 || self.bindgen.is_list_canonical(self.iface, element)
1484 {
1485 self.emit(&ListCanonLower { element, realloc });
1486 } else {
1487 self.push_block();
1488 self.emit(&IterElem { element });
1489 self.emit(&IterBasePointer);
1490 let addr = self.stack.pop().unwrap();
1491 self.write_to_memory(element, addr, 0);
1492 self.finish_block(0);
1493 self.emit(&ListLower { element, realloc });
1494 }
1495 }
1496 },
1497 TypeDefKind::PushBuffer(ty) | TypeDefKind::PullBuffer(ty) => {
1498 let push = match &self.iface.types[id].kind {
1499 TypeDefKind::PushBuffer(_) => true,
1500 _ => false,
1501 };
1502 self.translate_buffer(push, ty);
1503
1504 // Buffers are only used in the parameter position, so if we
1505 // are lowering them, then we had better be lowering args
1506 // and lifting results.
1507 assert!(self.lift_lower == LiftLower::LowerArgsLiftResults);
1508
1509 match self.dir {
1510 Direction::Import => {
1511 // When calling an imported function we're passing a raw view
1512 // into memory, and the adapter will convert it into something
1513 // else if necessary.
1514 self.emit(&BufferLowerPtrLen { push, ty });
1515 }
1516 Direction::Export => {
1517 // When calling an exported function we're passing a handle to
1518 // the caller's memory, and this part of the adapter is
1519 // responsible for converting it into something that's a handle.
1520 self.emit(&BufferLowerHandle { push, ty });
1521 }
1522 }
1523 }
1524 TypeDefKind::Record(record) if record.is_flags() => {
1525 match self.iface.flags_repr(record) {
1526 Some(Int::U64) => self.emit(&FlagsLower64 {
1527 record,
1528 ty: id,
1529 name: self.iface.types[id].name.as_ref().unwrap(),
1530 }),
1531 _ => self.emit(&FlagsLower {
1532 record,
1533 ty: id,
1534 name: self.iface.types[id].name.as_ref().unwrap(),
1535 }),
1536 }
1537 }
1538 TypeDefKind::Record(record) => match self.abi {
1539 Abi::Preview1 => self.witx(&AddrOf),
1540
1541 Abi::Canonical => {
1542 self.emit(&RecordLower {
1543 record,
1544 ty: id,
1545 name: self.iface.types[id].name.as_deref(),
1546 });
1547 let values = self
1548 .stack
1549 .drain(self.stack.len() - record.fields.len()..)
1550 .collect::<Vec<_>>();
1551 for (field, value) in record.fields.iter().zip(values) {
1552 self.stack.push(value);
1553 self.lower(&field.ty, None);
1554 }
1555 }
1556 },
1557
1558 // Variants in the return position of an import must be a Result in
1559 // the preview1 ABI and they're a bit special about where all the
1560 // pieces are.
1561 TypeDefKind::Variant(v)
1562 if self.abi == Abi::Preview1
1563 && self.dir == Direction::Import
1564 && self.lift_lower == LiftLower::LiftArgsLowerResults
1565 && !v.is_enum() =>
1566 {
1567 let retptr = retptr.unwrap();
1568 let (ok, err) = v.as_expected().unwrap();
1569 self.push_block();
1570 self.emit(&VariantPayloadName);
1571 let payload_name = self.stack.pop().unwrap();
1572 if let Some(ok) = ok {
1573 self.stack.push(payload_name.clone());
1574 let store = |me: &mut Self, ty: &Type, n| {
1575 me.emit(&GetArg { nth: *retptr + n });
1576 let addr = me.stack.pop().unwrap();
1577 me.write_to_memory(ty, addr, 0);
1578 };
1579 match *ok {
1580 Type::Id(okid) => match &self.iface.types[okid].kind {
1581 TypeDefKind::Record(record) if record.is_tuple() => {
1582 self.emit(&RecordLower {
1583 record,
1584 ty: id,
1585 name: self.iface.types[okid].name.as_deref(),
1586 });
1587 // Note that `rev()` is used here due to the order
1588 // that tuples are pushed onto the stack and how we
1589 // consume the last item first from the stack.
1590 for (i, field) in record.fields.iter().enumerate().rev() {
1591 store(self, &field.ty, i);
1592 }
1593 }
1594 _ => store(self, ok, 0),
1595 },
1596 _ => store(self, ok, 0),
1597 }
1598 };
1599 self.emit(&I32Const { val: 0 });
1600 self.finish_block(1);
1601
1602 self.push_block();
1603 self.emit(&VariantPayloadName);
1604 let payload_name = self.stack.pop().unwrap();
1605 if let Some(ty) = err {
1606 self.stack.push(payload_name.clone());
1607 self.lower(ty, None);
1608 }
1609 self.finish_block(1);
1610
1611 self.emit(&VariantLower {
1612 variant: v,
1613 ty: id,
1614 name: self.iface.types[id].name.as_deref(),
1615 results: &[WasmType::I32],
1616 });
1617 }
1618
1619 // Variant arguments in the Preview1 ABI are all passed by pointer
1620 TypeDefKind::Variant(v)
1621 if self.abi == Abi::Preview1
1622 && self.dir == Direction::Import
1623 && self.lift_lower == LiftLower::LowerArgsLiftResults
1624 && !v.is_enum() =>
1625 {
1626 self.witx(&AddrOf)
1627 }
1628
1629 TypeDefKind::Variant(v) => {
1630 let mut results = Vec::new();
1631 let mut temp = Vec::new();
1632 let mut casts = Vec::new();
1633 self.iface.push_wasm(self.abi, self.dir, ty, &mut results);
1634 for (i, case) in v.cases.iter().enumerate() {
1635 self.push_block();
1636 self.emit(&VariantPayloadName);
1637 let payload_name = self.stack.pop().unwrap();
1638 self.emit(&I32Const { val: i as i32 });
1639 let mut pushed = 1;
1640 if let Some(ty) = &case.ty {
1641 // Using the payload of this block we lower the type to
1642 // raw wasm values.
1643 self.stack.push(payload_name.clone());
1644 self.lower(ty, None);
1645
1646 // Determine the types of all the wasm values we just
1647 // pushed, and record how many. If we pushed too few
1648 // then we'll need to push some zeros after this.
1649 temp.truncate(0);
1650 self.iface.push_wasm(self.abi, self.dir, ty, &mut temp);
1651 pushed += temp.len();
1652
1653 // For all the types pushed we may need to insert some
1654 // bitcasts. This will go through and cast everything
1655 // to the right type to ensure all blocks produce the
1656 // same set of results.
1657 casts.truncate(0);
1658 for (actual, expected) in temp.iter().zip(&results[1..]) {
1659 casts.push(cast(*actual, *expected));
1660 }
1661 if casts.iter().any(|c| *c != Bitcast::None) {
1662 self.emit(&Bitcasts { casts: &casts });
1663 }
1664 }
1665
1666 // If we haven't pushed enough items in this block to match
1667 // what other variants are pushing then we need to push
1668 // some zeros.
1669 if pushed < results.len() {
1670 self.emit(&ConstZero {
1671 tys: &results[pushed..],
1672 });
1673 }
1674 self.finish_block(results.len());
1675 }
1676 self.emit(&VariantLower {
1677 variant: v,
1678 ty: id,
1679 results: &results,
1680 name: self.iface.types[id].name.as_deref(),
1681 });
1682 }
1683 },
1684 }
1685 }
1686
1687 fn prep_return_pointer(&mut self, sig: &WasmSignature, results: &[(String, Type)]) {
1688 match self.abi {
1689 Abi::Preview1 => {
1690 assert!(results.len() <= 1);
1691 let ty = match results.get(0) {
1692 Some((_, ty)) => ty,
1693 None => return,
1694 };
1695 // Return pointers are only needed for `Result<T, _>`...
1696 let variant = match ty {
1697 Type::Id(id) => match &self.iface.types[*id].kind {
1698 TypeDefKind::Variant(v) => v,
1699 _ => return,
1700 },
1701 _ => return,
1702 };
1703 // ... and only if `T` is actually present in `Result<T, _>`
1704 let ok = match &variant.cases[0].ty {
1705 Some(Type::Id(id)) => *id,
1706 _ => return,
1707 };
1708
1709 // Tuples have each individual item in a separate return pointer while
1710 // all other types go through a singular return pointer.
1711 let iface = self.iface;
1712 let mut prep = |ty: TypeId| {
1713 let ptr = self.bindgen.allocate_typed_space(iface, ty);
1714 self.return_pointers.push(ptr.clone());
1715 self.stack.push(ptr);
1716 };
1717 match &iface.types[ok].kind {
1718 TypeDefKind::Record(r) if r.is_tuple() => {
1719 for field in r.fields.iter() {
1720 match field.ty {
1721 Type::Id(id) => prep(id),
1722 _ => unreachable!(),
1723 }
1724 }
1725 }
1726 _ => prep(ok),
1727 }
1728 }
1729 // If a return pointer was automatically injected into this function
1730 // then we need to allocate a proper amount of space for it and then
1731 // add it to the stack to get passed to the callee.
1732 Abi::Canonical => {
1733 if let Some(results) = &sig.retptr {
1734 let ptr = self.bindgen.i64_return_pointer_area(results.len());
1735 self.return_pointers.push(ptr.clone());
1736 self.stack.push(ptr.clone());
1737 }
1738 }
1739 }
1740 }
1741
1742 /// Note that in general everything in this function is the opposite of the
1743 /// `lower` function above. This is intentional and should be kept this way!
1744 fn lift(&mut self, ty: &Type) {
1745 use Instruction::*;
1746 use WitxInstruction::*;
1747
1748 match *ty {
1749 Type::S8 => self.emit(&S8FromI32),
1750 Type::CChar => self.emit(&Char8FromI32),
1751 Type::U8 => self.emit(&U8FromI32),
1752 Type::S16 => self.emit(&S16FromI32),
1753 Type::U16 => self.emit(&U16FromI32),
1754 Type::S32 => self.emit(&S32FromI32),
1755 Type::Usize => self.emit(&UsizeFromI32),
1756 Type::U32 => self.emit(&U32FromI32),
1757 Type::S64 => self.emit(&S64FromI64),
1758 Type::U64 => self.emit(&U64FromI64),
1759 Type::Char => self.emit(&CharFromI32),
1760 Type::F32 => self.emit(&If32FromF32),
1761 Type::F64 => self.emit(&If64FromF64),
1762 Type::Handle(ty) => {
1763 // For more information on these values see the comments in
1764 // `lower` above.
1765 let borrowed = match self.lift_lower {
1766 LiftLower::LiftArgsLowerResults => true,
1767 LiftLower::LowerArgsLiftResults => false,
1768 };
1769 if borrowed {
1770 self.emit(&HandleBorrowedFromI32 { ty });
1771 } else {
1772 self.emit(&HandleOwnedFromI32 { ty });
1773 }
1774 }
1775 Type::Id(id) => match &self.iface.types[id].kind {
1776 TypeDefKind::Type(t) => self.lift(t),
1777 TypeDefKind::Pointer(ty) => self.witx(&PointerFromI32 { ty }),
1778 TypeDefKind::ConstPointer(ty) => self.witx(&ConstPointerFromI32 { ty }),
1779 TypeDefKind::List(element) => match self.abi {
1780 Abi::Preview1 => self.emit(&ListCanonLift {
1781 element,
1782 free: None,
1783 ty: id,
1784 }),
1785 Abi::Canonical => {
1786 // Lifting the arguments of a defined import means that, if
1787 // possible, the caller still retains ownership and we don't
1788 // free anything.
1789 let free = match (self.dir, self.lift_lower) {
1790 (Direction::Import, LiftLower::LiftArgsLowerResults) => None,
1791 _ => Some("canonical_abi_free"),
1792 };
1793 if self.is_char(element)
1794 || self.bindgen.is_list_canonical(self.iface, element)
1795 {
1796 self.emit(&ListCanonLift {
1797 element,
1798 free,
1799 ty: id,
1800 });
1801 } else {
1802 self.push_block();
1803 self.emit(&IterBasePointer);
1804 let addr = self.stack.pop().unwrap();
1805 self.read_from_memory(element, addr, 0);
1806 self.finish_block(1);
1807 self.emit(&ListLift {
1808 element,
1809 free,
1810 ty: id,
1811 });
1812 }
1813 }
1814 },
1815 TypeDefKind::PushBuffer(ty) | TypeDefKind::PullBuffer(ty) => {
1816 let push = match &self.iface.types[id].kind {
1817 TypeDefKind::PushBuffer(_) => true,
1818 _ => false,
1819 };
1820 self.translate_buffer(push, ty);
1821 // Buffers are only used in the parameter position, which
1822 // means lifting a buffer should only happen when we are
1823 // lifting arguments and lowering results.
1824 assert!(self.lift_lower == LiftLower::LiftArgsLowerResults);
1825
1826 match self.dir {
1827 Direction::Import => {
1828 // When calling a defined imported function then we're coming
1829 // from a pointer/length, and the embedding context will figure
1830 // out what to do with that pointer/length.
1831 self.emit(&BufferLiftPtrLen { push, ty })
1832 }
1833 Direction::Export => {
1834 // When calling an exported function we're given a handle to the
1835 // buffer, which is then interpreted in the calling context.
1836 self.emit(&BufferLiftHandle { push, ty })
1837 }
1838 }
1839 }
1840 TypeDefKind::Record(record) if record.is_flags() => {
1841 match self.iface.flags_repr(record) {
1842 Some(Int::U64) => self.emit(&FlagsLift64 {
1843 record,
1844 ty: id,
1845 name: self.iface.types[id].name.as_ref().unwrap(),
1846 }),
1847 _ => self.emit(&FlagsLift {
1848 record,
1849 ty: id,
1850 name: self.iface.types[id].name.as_ref().unwrap(),
1851 }),
1852 }
1853 }
1854 TypeDefKind::Record(record) => match self.abi {
1855 Abi::Preview1 => {
1856 let addr = self.stack.pop().unwrap();
1857 self.read_from_memory(ty, addr, 0);
1858 }
1859 Abi::Canonical => {
1860 let mut temp = Vec::new();
1861 self.iface.push_wasm(self.abi, self.dir, ty, &mut temp);
1862 let mut args = self
1863 .stack
1864 .drain(self.stack.len() - temp.len()..)
1865 .collect::<Vec<_>>();
1866 for field in record.fields.iter() {
1867 temp.truncate(0);
1868 self.iface
1869 .push_wasm(self.abi, self.dir, &field.ty, &mut temp);
1870 self.stack.extend(args.drain(..temp.len()));
1871 self.lift(&field.ty);
1872 }
1873 self.emit(&RecordLift {
1874 record,
1875 ty: id,
1876 name: self.iface.types[id].name.as_deref(),
1877 });
1878 }
1879 },
1880
1881 // Variants in the return position of an import must be a Result in
1882 // the preview1 ABI and they're a bit special about where all the
1883 // pieces are.
1884 TypeDefKind::Variant(v)
1885 if self.abi == Abi::Preview1
1886 && self.dir == Direction::Import
1887 && self.lift_lower == LiftLower::LowerArgsLiftResults
1888 && !v.is_enum() =>
1889 {
1890 let (ok, err) = v.as_expected().unwrap();
1891 self.push_block();
1892 if let Some(ok) = ok {
1893 let mut n = 0;
1894 let mut load = |me: &mut Self, ty: &Type| {
1895 me.read_from_memory(ty, me.return_pointers[n].clone(), 0);
1896 n += 1;
1897 };
1898 match *ok {
1899 Type::Id(okid) => match &self.iface.types[okid].kind {
1900 TypeDefKind::Record(record) if record.is_tuple() => {
1901 for field in record.fields.iter() {
1902 load(self, &field.ty);
1903 }
1904 self.emit(&RecordLift {
1905 record,
1906 ty: okid,
1907 name: self.iface.types[okid].name.as_deref(),
1908 });
1909 }
1910 _ => load(self, ok),
1911 },
1912 _ => load(self, ok),
1913 }
1914 }
1915 self.finish_block(ok.is_some() as usize);
1916
1917 self.push_block();
1918 if let Some(ty) = err {
1919 self.witx(&ReuseReturn);
1920 self.lift(ty);
1921 }
1922 self.finish_block(err.is_some() as usize);
1923
1924 self.emit(&VariantLift {
1925 variant: v,
1926 ty: id,
1927 name: self.iface.types[id].name.as_deref(),
1928 });
1929 }
1930
1931 // Variant arguments in the Preview1 ABI are all passed by pointer,
1932 // so we read them here.
1933 TypeDefKind::Variant(v)
1934 if self.abi == Abi::Preview1
1935 && self.dir == Direction::Import
1936 && self.lift_lower == LiftLower::LiftArgsLowerResults
1937 && !v.is_enum() =>
1938 {
1939 let addr = self.stack.pop().unwrap();
1940 self.read_from_memory(ty, addr, 0)
1941 }
1942
1943 TypeDefKind::Variant(v) => {
1944 let mut params = Vec::new();
1945 let mut temp = Vec::new();
1946 let mut casts = Vec::new();
1947 self.iface.push_wasm(self.abi, self.dir, ty, &mut params);
1948 let block_inputs = self
1949 .stack
1950 .drain(self.stack.len() + 1 - params.len()..)
1951 .collect::<Vec<_>>();
1952 for case in v.cases.iter() {
1953 self.push_block();
1954 if let Some(ty) = &case.ty {
1955 // Push only the values we need for this variant onto
1956 // the stack.
1957 temp.truncate(0);
1958 self.iface.push_wasm(self.abi, self.dir, ty, &mut temp);
1959 self.stack
1960 .extend(block_inputs[..temp.len()].iter().cloned());
1961
1962 // Cast all the types we have on the stack to the actual
1963 // types needed for this variant, if necessary.
1964 casts.truncate(0);
1965 for (actual, expected) in temp.iter().zip(¶ms[1..]) {
1966 casts.push(cast(*expected, *actual));
1967 }
1968 if casts.iter().any(|c| *c != Bitcast::None) {
1969 self.emit(&Bitcasts { casts: &casts });
1970 }
1971
1972 // Then recursively lift this variant's payload.
1973 self.lift(ty);
1974 }
1975 self.finish_block(case.ty.is_some() as usize);
1976 }
1977 self.emit(&VariantLift {
1978 variant: v,
1979 ty: id,
1980 name: self.iface.types[id].name.as_deref(),
1981 });
1982 }
1983 },
1984 }
1985 }
1986
1987 fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
1988 use Instruction::*;
1989
1990 match *ty {
1991 // Builtin types need different flavors of storage instructions
1992 // depending on the size of the value written.
1993 Type::U8 | Type::S8 | Type::CChar => {
1994 self.lower_and_emit(ty, addr, &I32Store8 { offset })
1995 }
1996 Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1997 Type::U32 | Type::S32 | Type::Usize | Type::Handle(_) | Type::Char => {
1998 self.lower_and_emit(ty, addr, &I32Store { offset })
1999 }
2000 Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
2001 Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
2002 Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
2003
2004 Type::Id(id) => match &self.iface.types[id].kind {
2005 TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
2006 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => {
2007 self.lower_and_emit(ty, addr, &I32Store { offset });
2008 }
2009
2010 // After lowering the list there's two i32 values on the stack
2011 // which we write into memory, writing the pointer into the low address
2012 // and the length into the high address.
2013 TypeDefKind::List(_) => {
2014 self.lower(ty, None);
2015 self.stack.push(addr.clone());
2016 self.emit(&I32Store { offset: offset + 4 });
2017 self.stack.push(addr);
2018 self.emit(&I32Store { offset });
2019 }
2020
2021 // Lower the buffer to its raw values, and then write the values
2022 // into memory, which may be more than one value depending on
2023 // our import/export direction.
2024 TypeDefKind::PushBuffer(_) | TypeDefKind::PullBuffer(_) => {
2025 self.lower(ty, None);
2026 if self.dir == Direction::Import {
2027 self.stack.push(addr.clone());
2028 self.emit(&I32Store { offset: offset + 8 });
2029 self.stack.push(addr.clone());
2030 self.emit(&I32Store { offset: offset + 4 });
2031 }
2032 self.stack.push(addr);
2033 self.emit(&I32Store { offset });
2034 }
2035
2036 TypeDefKind::Record(r) if r.is_flags() => {
2037 self.lower(ty, None);
2038 match self.iface.flags_repr(r) {
2039 Some(repr) => {
2040 self.stack.push(addr);
2041 self.store_intrepr(offset, repr);
2042 }
2043 None => {
2044 for i in 0..r.num_i32s() {
2045 self.stack.push(addr.clone());
2046 self.emit(&I32Store {
2047 offset: offset + (i as i32) * 4,
2048 });
2049 }
2050 }
2051 }
2052 }
2053
2054 // Decompose the record into its components and then write all
2055 // the components into memory one-by-one.
2056 TypeDefKind::Record(record) => {
2057 self.emit(&RecordLower {
2058 record,
2059 ty: id,
2060 name: self.iface.types[id].name.as_deref(),
2061 });
2062 let fields = self
2063 .stack
2064 .drain(self.stack.len() - record.fields.len()..)
2065 .collect::<Vec<_>>();
2066 for ((field_offset, op), field) in self
2067 .bindgen
2068 .sizes()
2069 .field_offsets(record)
2070 .into_iter()
2071 .zip(fields)
2072 .zip(&record.fields)
2073 {
2074 self.stack.push(op);
2075 self.write_to_memory(
2076 &field.ty,
2077 addr.clone(),
2078 offset + (field_offset as i32),
2079 );
2080 }
2081 }
2082
2083 // Each case will get its own block, and the first item in each
2084 // case is writing the discriminant. After that if we have a
2085 // payload we write the payload after the discriminant, aligned up
2086 // to the type's alignment.
2087 TypeDefKind::Variant(v) => {
2088 let payload_offset = offset + (self.bindgen.sizes().payload_offset(v) as i32);
2089 for (i, case) in v.cases.iter().enumerate() {
2090 self.push_block();
2091 self.emit(&VariantPayloadName);
2092 let payload_name = self.stack.pop().unwrap();
2093 self.emit(&I32Const { val: i as i32 });
2094 self.stack.push(addr.clone());
2095 self.store_intrepr(offset, v.tag);
2096 if let Some(ty) = &case.ty {
2097 self.stack.push(payload_name.clone());
2098 self.write_to_memory(ty, addr.clone(), payload_offset);
2099 }
2100 self.finish_block(0);
2101 }
2102 self.emit(&VariantLower {
2103 variant: v,
2104 ty: id,
2105 results: &[],
2106 name: self.iface.types[id].name.as_deref(),
2107 });
2108 }
2109 },
2110 }
2111 }
2112
2113 fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2114 self.lower(ty, None);
2115 self.stack.push(addr);
2116 self.emit(instr);
2117 }
2118
2119 fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
2120 use Instruction::*;
2121
2122 match *ty {
2123 Type::U8 | Type::CChar => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
2124 Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
2125 Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
2126 Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
2127 Type::U32 | Type::S32 | Type::Char | Type::Usize | Type::Handle(_) => {
2128 self.emit_and_lift(ty, addr, &I32Load { offset })
2129 }
2130 Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
2131 Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
2132 Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
2133
2134 Type::Id(id) => match &self.iface.types[id].kind {
2135 TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
2136 TypeDefKind::Pointer(_) | TypeDefKind::ConstPointer(_) => {
2137 self.emit_and_lift(ty, addr, &I32Load { offset })
2138 }
2139
2140 // Read the pointer/len and then perform the standard lifting
2141 // proceses.
2142 TypeDefKind::List(_) => {
2143 self.stack.push(addr.clone());
2144 self.emit(&I32Load { offset });
2145 self.stack.push(addr);
2146 self.emit(&I32Load { offset: offset + 4 });
2147 self.lift(ty);
2148 }
2149
2150 // Read the requisite number of values from memory and then lift as
2151 // appropriate.
2152 TypeDefKind::PushBuffer(_) | TypeDefKind::PullBuffer(_) => {
2153 self.stack.push(addr.clone());
2154 self.emit(&I32Load { offset });
2155 if self.dir == Direction::Import
2156 && self.lift_lower == LiftLower::LiftArgsLowerResults
2157 {
2158 self.stack.push(addr.clone());
2159 self.emit(&I32Load { offset: offset + 4 });
2160 self.stack.push(addr);
2161 self.emit(&I32Load { offset: offset + 8 });
2162 }
2163 self.lift(ty);
2164 }
2165
2166 TypeDefKind::Record(r) if r.is_flags() => {
2167 match self.iface.flags_repr(r) {
2168 Some(repr) => {
2169 self.stack.push(addr);
2170 self.load_intrepr(offset, repr);
2171 }
2172 None => {
2173 for i in 0..r.num_i32s() {
2174 self.stack.push(addr.clone());
2175 self.emit(&I32Load {
2176 offset: offset + (i as i32) * 4,
2177 });
2178 }
2179 }
2180 }
2181 self.lift(ty);
2182 }
2183
2184 // Read and lift each field individually, adjusting the offset
2185 // as we go along, then aggregate all the fields into the
2186 // record.
2187 TypeDefKind::Record(record) => {
2188 for (field_offset, field) in self
2189 .bindgen
2190 .sizes()
2191 .field_offsets(record)
2192 .into_iter()
2193 .zip(&record.fields)
2194 {
2195 self.read_from_memory(
2196 &field.ty,
2197 addr.clone(),
2198 offset + (field_offset as i32),
2199 );
2200 }
2201 self.emit(&RecordLift {
2202 record,
2203 ty: id,
2204 name: self.iface.types[id].name.as_deref(),
2205 });
2206 }
2207
2208 // Each case will get its own block, and we'll dispatch to the
2209 // right block based on the `i32.load` we initially perform. Each
2210 // individual block is pretty simple and just reads the payload type
2211 // from the corresponding offset if one is available.
2212 TypeDefKind::Variant(variant) => {
2213 self.stack.push(addr.clone());
2214 self.load_intrepr(offset, variant.tag);
2215 let payload_offset =
2216 offset + (self.bindgen.sizes().payload_offset(variant) as i32);
2217 for case in variant.cases.iter() {
2218 self.push_block();
2219 if let Some(ty) = &case.ty {
2220 self.read_from_memory(ty, addr.clone(), payload_offset);
2221 }
2222 self.finish_block(case.ty.is_some() as usize);
2223 }
2224 self.emit(&VariantLift {
2225 variant,
2226 ty: id,
2227 name: self.iface.types[id].name.as_deref(),
2228 });
2229 }
2230 },
2231 }
2232 }
2233
2234 fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2235 self.stack.push(addr);
2236 self.emit(instr);
2237 self.lift(ty);
2238 }
2239
2240 fn load_intrepr(&mut self, offset: i32, repr: Int) {
2241 self.emit(&match repr {
2242 Int::U64 => Instruction::I64Load { offset },
2243 Int::U32 => Instruction::I32Load { offset },
2244 Int::U16 => Instruction::I32Load16U { offset },
2245 Int::U8 => Instruction::I32Load8U { offset },
2246 });
2247 }
2248
2249 fn store_intrepr(&mut self, offset: i32, repr: Int) {
2250 self.emit(&match repr {
2251 Int::U64 => Instruction::I64Store { offset },
2252 Int::U32 => Instruction::I32Store { offset },
2253 Int::U16 => Instruction::I32Store16 { offset },
2254 Int::U8 => Instruction::I32Store8 { offset },
2255 });
2256 }
2257
2258 fn translate_buffer(&mut self, push: bool, ty: &Type) {
2259 let do_write = match self.lift_lower {
2260 // For declared items, input/output is defined in the context of
2261 // what the callee will do. The callee will read input buffers,
2262 // meaning we write to them, and write to output buffers, meaning
2263 // we'll read from them.
2264 LiftLower::LowerArgsLiftResults => !push,
2265
2266 // Defined item mirror declared imports because buffers are
2267 // defined from the caller's perspective, so we don't invert the
2268 // `out` setting like above.
2269 LiftLower::LiftArgsLowerResults => push,
2270 };
2271 self.emit(&Instruction::IterBasePointer);
2272 let addr = self.stack.pop().unwrap();
2273 if do_write {
2274 self.push_block();
2275 self.emit(&Instruction::BufferPayloadName);
2276 self.write_to_memory(ty, addr, 0);
2277 self.finish_block(0);
2278 } else {
2279 self.push_block();
2280 self.read_from_memory(ty, addr, 0);
2281 self.finish_block(1);
2282 }
2283 }
2284
2285 fn is_char(&self, ty: &Type) -> bool {
2286 match ty {
2287 Type::Char => true,
2288 Type::Id(id) => match &self.iface.types[*id].kind {
2289 TypeDefKind::Type(t) => self.is_char(t),
2290 _ => false,
2291 },
2292 _ => false,
2293 }
2294 }
2295}
2296
2297fn cast(from: WasmType, to: WasmType) -> Bitcast {
2298 use WasmType::*;
2299
2300 match (from, to) {
2301 (I32, I32) | (I64, I64) | (F32, F32) | (F64, F64) => Bitcast::None,
2302
2303 (I32, I64) => Bitcast::I32ToI64,
2304 (F32, F64) => Bitcast::F32ToF64,
2305 (F32, I32) => Bitcast::F32ToI32,
2306 (F64, I64) => Bitcast::F64ToI64,
2307
2308 (I64, I32) => Bitcast::I64ToI32,
2309 (F64, F32) => Bitcast::F64ToF32,
2310 (I32, F32) => Bitcast::I32ToF32,
2311 (I64, F64) => Bitcast::I64ToF64,
2312
2313 (F32, I64) => Bitcast::F32ToI64,
2314 (I64, F32) => Bitcast::I64ToF32,
2315 (F64, I32) | (I32, F64) => unreachable!(),
2316 }
2317}