#include "wasm/WasmStubs.h"
#include "mozilla/ArrayUtils.h"
#include "jit/RegisterAllocator.h"
#include "js/Printf.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmInstance.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::ArrayLength;
typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
typedef jit::ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
typedef jit::ABIArgIter<ValTypeVector> ABIArgValTypeIter;
#ifdef WASM_CODEGEN_DEBUG
template <class Closure>
static void GenPrint(DebugChannel channel, MacroAssembler& masm,
const Maybe<Register>& taken, Closure passArgAndCall) {
if (!IsCodegenDebugEnabled(channel)) {
return;
}
AllocatableRegisterSet regs(RegisterSet::All());
LiveRegisterSet save(regs.asLiveSet());
masm.PushRegsInMask(save);
if (taken) {
regs.take(taken.value());
}
Register temp = regs.takeAnyGeneral();
{
MOZ_ASSERT(MaybeGetJitContext(),
"codegen debug checks require a jit context");
masm.setupUnalignedABICall(temp);
passArgAndCall(IsCompilingWasm(), temp);
}
masm.PopRegsInMask(save);
}
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
UniqueChars str = JS_vsmprintf(fmt, ap);
va_end(ap);
const char* text = str.release();
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
masm.passABIArg(temp);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintText);
} else {
masm.callWithABI((void*)PrintText, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
const Register& src) {
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
masm.passABIArg(src);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintI32);
} else {
masm.callWithABI((void*)PrintI32, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
const Register& src) {
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
masm.passABIArg(src);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintPtr);
} else {
masm.callWithABI((void*)PrintPtr, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
const Register64& src) {
# if JS_BITS_PER_WORD == 64
GenPrintf(channel, masm, "i64 ");
GenPrintIsize(channel, masm, src.reg);
# else
GenPrintf(channel, masm, "i64(");
GenPrintIsize(channel, masm, src.low);
GenPrintIsize(channel, masm, src.high);
GenPrintf(channel, masm, ") ");
# endif
}
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
masm.passABIArg(src, MoveOp::FLOAT32);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintF32);
} else {
masm.callWithABI((void*)PrintF32, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
masm.passABIArg(src, MoveOp::DOUBLE);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintF64);
} else {
masm.callWithABI((void*)PrintF64, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
#else
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
const char* fmt, ...) {}
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
const Register& src) {}
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
const Register& src) {}
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
const Register64& src) {}
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
#endif
static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
masm.flushBuffer();
offsets->end = masm.size();
return !masm.oom();
}
static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
uint32_t addBeforeAssert = 0) {
MOZ_ASSERT(
(sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
masm.assertStackAlignment(alignment, addBeforeAssert);
}
template <class VectorT>
static unsigned StackArgBytes(const VectorT& args) {
ABIArgIter<VectorT> iter(args);
while (!iter.done()) {
iter++;
}
return iter.stackBytesConsumedSoFar();
}
static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
Register argv, Register scratch) {
for (ABIArgValTypeIter iter(fe.funcType().args()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(ExportArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
switch (iter->kind()) {
case ABIArg::GPR:
if (type == MIRType::Int32) {
masm.load32(src, iter->gpr());
} else if (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} else if (type == MIRType::RefOrNull) {
masm.loadPtr(src, iter->gpr());
} else {
MOZ_CRASH("unknown GPR type");
}
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
if (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} else {
MOZ_CRASH("wasm uses hardfp for function calls.");
}
break;
#endif
case ABIArg::FPU: {
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
"ExportArg must be big enough to store SIMD values");
switch (type) {
case MIRType::Double:
masm.loadDouble(src, iter->fpu());
break;
case MIRType::Float32:
masm.loadFloat32(src, iter->fpu());
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
break;
}
break;
}
case ABIArg::Stack:
switch (type) {
case MIRType::Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
case MIRType::Int64: {
RegisterOrSP sp = masm.getStackPointer();
#if JS_BITS_PER_WORD == 32
masm.load32(LowWord(src), scratch);
masm.store32(scratch,
LowWord(Address(sp, iter->offsetFromArgBase())));
masm.load32(HighWord(src), scratch);
masm.store32(scratch,
HighWord(Address(sp, iter->offsetFromArgBase())));
#else
Register64 scratch64(scratch);
masm.load64(src, scratch64);
masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
#endif
break;
}
case MIRType::RefOrNull:
masm.loadPtr(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
case MIRType::Double: {
ScratchDoubleScope fpscratch(masm);
masm.loadDouble(src, fpscratch);
masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
case MIRType::Float32: {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(
"unexpected stack arg type");
}
break;
case ABIArg::Uninitialized:
MOZ_CRASH("Uninitialized ABIArg kind");
}
}
}
static void StoreABIReturn(MacroAssembler& masm, const FuncExport& fe,
Register argv) {
switch (fe.funcType().ret().code()) {
case ExprType::Void:
break;
case ExprType::I32:
masm.store32(ReturnReg, Address(argv, 0));
break;
case ExprType::I64:
masm.store64(ReturnReg64, Address(argv, 0));
break;
case ExprType::F32:
masm.canonicalizeFloat(ReturnFloat32Reg);
masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
break;
case ExprType::F64:
masm.canonicalizeDouble(ReturnDoubleReg);
masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
break;
case ExprType::Ref:
case ExprType::AnyRef:
masm.storePtr(ReturnReg, Address(argv, 0));
break;
case ExprType::NullRef:
MOZ_CRASH("NullRef not expressible");
case ExprType::Limit:
MOZ_CRASH("Limit");
}
}
#if defined(JS_CODEGEN_ARM)
static const LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask &
~(uint32_t(1) << Registers::lr)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
(1ULL << FloatRegisters::d15) |
(1ULL << FloatRegisters::s31)));
#elif defined(JS_CODEGEN_ARM64)
static const LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet((Registers::NonVolatileMask &
~(uint32_t(1) << Registers::lr)) |
(uint32_t(1) << Registers::x16)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
FloatRegisters::NonAllocatableMask));
#else
static const LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
FloatRegisterSet(FloatRegisters::NonVolatileMask));
#endif
#if defined(JS_CODEGEN_NONE)
static const unsigned NonVolatileRegsPushSize = 0;
#else
static const unsigned NonVolatileRegsPushSize =
NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
NonVolatileRegs.fpus().getPushSizeInBytes();
#endif
#ifdef ENABLE_WASM_REFTYPES
static const unsigned NumExtraPushed = 2; #else
static const unsigned NumExtraPushed = 1; #endif
#ifdef JS_CODEGEN_ARM64
static const unsigned WasmPushSize = 16;
#else
static const unsigned WasmPushSize = sizeof(void*);
#endif
static const unsigned FramePushedBeforeAlign =
NonVolatileRegsPushSize + NumExtraPushed * WasmPushSize;
static void AssertExpectedSP(const MacroAssembler& masm) {
#ifdef JS_CODEGEN_ARM64
MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
#endif
}
template <class Operand>
static void WasmPush(MacroAssembler& masm, const Operand& op) {
#ifdef JS_CODEGEN_ARM64
masm.reserveStack(WasmPushSize);
masm.storePtr(op, Address(masm.getStackPointer(), 0));
#else
masm.Push(op);
#endif
}
static void WasmPop(MacroAssembler& masm, Register r) {
#ifdef JS_CODEGEN_ARM64
masm.loadPtr(Address(masm.getStackPointer(), 0), r);
masm.freeStack(WasmPushSize);
#else
masm.Pop(r);
#endif
}
static void MoveSPForJitABI(MacroAssembler& masm) {
#ifdef JS_CODEGEN_ARM64
masm.moveStackPtrTo(PseudoStackPointer);
#endif
}
static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
const Maybe<ImmPtr>& funcPtr) {
MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
if (funcPtr) {
masm.call(*funcPtr);
} else {
masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
}
}
static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
const Maybe<ImmPtr>& funcPtr,
Offsets* offsets) {
AssertExpectedSP(masm);
masm.haltingAlign(CodeAlignment);
offsets->begin = masm.currentOffset();
#ifdef JS_USE_LINK_REGISTER
# if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
defined(JS_CODEGEN_MIPS64)
masm.pushReturnAddress();
# elif defined(JS_CODEGEN_ARM64)
WasmPush(masm, lr);
# else
MOZ_CRASH("Implement this");
# endif
#endif
masm.setFramePushed(0);
masm.PushRegsInMask(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == NonVolatileRegsPushSize);
Register argv = ABINonArgReturnReg0;
Register scratch = ABINonArgReturnReg1;
const unsigned argBase = sizeof(void*) + masm.framePushed();
ABIArgGenerator abi;
ABIArg arg;
arg = abi.next(MIRType::Pointer);
if (arg.kind() == ABIArg::GPR) {
masm.movePtr(arg.gpr(), argv);
} else {
masm.loadPtr(
Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
argv);
}
arg = abi.next(MIRType::Pointer);
if (arg.kind() == ABIArg::GPR) {
masm.movePtr(arg.gpr(), WasmTlsReg);
} else {
masm.loadPtr(
Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
WasmTlsReg);
}
#ifdef ENABLE_WASM_REFTYPES
WasmPush(masm, WasmTlsReg);
#endif
WasmPush(masm, argv);
MOZ_ASSERT(masm.framePushed() == FramePushedBeforeAlign);
masm.setFramePushed(0);
#ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
#else
masm.moveStackPtrTo(scratch);
masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
masm.Push(scratch);
#endif
unsigned argDecrement =
StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
StackArgBytes(fe.funcType().args()));
masm.reserveStack(argDecrement);
SetupABIArguments(masm, fe, argv, scratch);
masm.movePtr(ImmWord(0), FramePointer);
masm.loadWasmPinnedRegsFromTls();
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
masm.freeStack(argDecrement);
#ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
#else
masm.PopStackPtr();
#endif
MOZ_ASSERT(masm.framePushed() == 0);
masm.setFramePushed(FramePushedBeforeAlign);
WasmPop(masm, argv);
#ifdef ENABLE_WASM_REFTYPES
WasmPop(masm, WasmTlsReg);
#endif
StoreABIReturn(masm, fe, argv);
Label success, join;
masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
#ifdef DEBUG
Label ok;
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
masm.breakpoint();
masm.bind(&ok);
#endif
masm.move32(Imm32(false), ReturnReg);
masm.jump(&join);
masm.bind(&success);
masm.move32(Imm32(true), ReturnReg);
masm.bind(&join);
masm.PopRegsInMask(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == 0);
#if defined(JS_CODEGEN_ARM64)
masm.setFramePushed(WasmPushSize);
WasmPop(masm, lr);
masm.abiret();
#else
masm.ret();
#endif
return FinishOffsets(masm, offsets);
}
#ifdef JS_PUNBOX64
static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
#else
static const ValueOperand ScratchValIonEntry =
ValueOperand(ABINonArgReg0, ABINonArgReg1);
#endif
static const Register ScratchIonEntry = ABINonArgReg2;
static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
SymbolicAddress sym) {
if (isAbsolute) {
masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
} else {
masm.call(sym);
}
}
static void GenerateJitEntryLoadTls(MacroAssembler& masm, unsigned frameSize) {
AssertExpectedSP(masm);
unsigned offset = frameSize + JitFrameLayout::offsetOfCalleeToken();
masm.loadFunctionFromCalleeToken(Address(masm.getStackPointer(), offset),
ScratchIonEntry);
offset = FunctionExtended::offsetOfExtendedSlot(
FunctionExtended::WASM_TLSDATA_SLOT);
masm.loadValue(Address(ScratchIonEntry, offset), ScratchValIonEntry);
masm.unboxPrivate(ScratchValIonEntry, WasmTlsReg);
}
static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
AssertExpectedSP(masm);
MOZ_ASSERT(masm.framePushed() == frameSize);
GenerateJitEntryLoadTls(masm, frameSize);
masm.freeStack(frameSize);
MoveSPForJitABI(masm);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), ScratchIonEntry);
masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
ExitFrameType::WasmGenericJitEntry);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)),
ScratchIonEntry);
masm.loadPtr(
Address(ScratchIonEntry, Instance::offsetOfJSJitExceptionHandler()),
ScratchIonEntry);
masm.jump(ScratchIonEntry);
}
static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
const FuncExport& fe, const Maybe<ImmPtr>& funcPtr,
Offsets* offsets) {
AssertExpectedSP(masm);
RegisterOrSP sp = masm.getStackPointer();
GenerateJitEntryPrologue(masm, offsets);
unsigned normalBytesNeeded = StackArgBytes(fe.funcType().args());
MIRTypeVector coerceArgTypes;
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
unsigned oolBytesNeeded = StackArgBytes(coerceArgTypes);
unsigned bytesNeeded = Max(normalBytesNeeded, oolBytesNeeded);
unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
masm.framePushed(), bytesNeeded);
masm.reserveStack(frameSize);
GenerateJitEntryLoadTls(masm, frameSize);
if (fe.funcType().hasI64ArgOrRet()) {
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
SymbolicAddress::ReportInt64JSCall);
GenerateJitEntryThrow(masm, frameSize);
return FinishOffsets(masm, offsets);
}
FloatRegister scratchF = ABINonArgDoubleReg;
Register scratchG = ScratchIonEntry;
ValueOperand scratchV = ScratchValIonEntry;
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
fe.funcIndex());
Label oolCall;
for (size_t i = 0; i < fe.funcType().args().length(); i++) {
unsigned jitArgOffset = frameSize + JitFrameLayout::offsetOfActualArg(i);
Address jitArgAddr(sp, jitArgOffset);
masm.loadValue(jitArgAddr, scratchV);
Label next;
switch (fe.funcType().args()[i].code()) {
case ValType::I32: {
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
masm.branchTestInt32(Assembler::Equal, tag, &next);
Label storeBack, notDouble;
masm.branchTestDouble(Assembler::NotEqual, tag, ¬Double);
{
ScratchTagScopeRelease _(&tag);
masm.unboxDouble(scratchV, scratchF);
masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
masm.jump(&storeBack);
}
masm.bind(¬Double);
Label nullOrUndefined, notNullOrUndefined;
masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
masm.branchTestNull(Assembler::NotEqual, tag, ¬NullOrUndefined);
masm.bind(&nullOrUndefined);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(Int32Value(0), jitArgAddr);
}
masm.jump(&next);
masm.bind(¬NullOrUndefined);
masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
masm.unboxBoolean(scratchV, scratchG);
masm.bind(&storeBack);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
}
break;
}
case ValType::F32:
case ValType::F64: {
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
masm.branchTestDouble(Assembler::Equal, tag, &next);
Label storeBack, notInt32;
{
ScratchTagScopeRelease _(&tag);
masm.branchTestInt32(Assembler::NotEqual, scratchV, ¬Int32);
masm.int32ValueToDouble(scratchV, scratchF);
masm.jump(&storeBack);
}
masm.bind(¬Int32);
Label notUndefined;
masm.branchTestUndefined(Assembler::NotEqual, tag, ¬Undefined);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
masm.jump(&next);
}
masm.bind(¬Undefined);
Label notNull;
masm.branchTestNull(Assembler::NotEqual, tag, ¬Null);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(DoubleValue(0.), jitArgAddr);
}
masm.jump(&next);
masm.bind(¬Null);
masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
masm.boolValueToDouble(scratchV, scratchF);
masm.bind(&storeBack);
masm.boxDouble(scratchF, jitArgAddr);
break;
}
default: {
MOZ_CRASH("unexpected argument type when calling from the jit");
}
}
masm.nopAlign(CodeAlignment);
masm.bind(&next);
}
Label rejoinBeforeCall;
masm.bind(&rejoinBeforeCall);
for (ABIArgValTypeIter iter(fe.funcType().args()); !iter.done(); iter++) {
unsigned jitArgOffset =
frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
Address argv(sp, jitArgOffset);
bool isStackArg = iter->kind() == ABIArg::Stack;
switch (iter.mirType()) {
case MIRType::Int32: {
Register target = isStackArg ? ScratchIonEntry : iter->gpr();
masm.unboxInt32(argv, target);
GenPrintIsize(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::Float32: {
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
masm.unboxDouble(argv, ABINonArgDoubleReg);
masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
GenPrintF32(DebugChannel::Function, masm, target.asSingle());
if (isStackArg) {
masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::Double: {
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
masm.unboxDouble(argv, target);
GenPrintF64(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
default: { MOZ_CRASH("unexpected input argument when calling from jit"); }
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
masm.loadWasmPinnedRegsFromTls();
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
Label exception;
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
masm.freeStack(frameSize);
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
fe.funcIndex());
switch (fe.funcType().ret().code()) {
case ExprType::Void:
GenPrintf(DebugChannel::Function, masm, "void");
masm.moveValue(UndefinedValue(), JSReturnOperand);
break;
case ExprType::I32:
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
break;
case ExprType::F32: {
masm.canonicalizeFloat(ReturnFloat32Reg);
masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
break;
}
case ExprType::F64: {
masm.canonicalizeDouble(ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
break;
}
case ExprType::Ref:
MOZ_CRASH("return ref in jitentry NYI");
break;
case ExprType::AnyRef:
MOZ_CRASH("return anyref in jitentry NYI");
break;
case ExprType::I64:
MOZ_CRASH("unexpected return type when calling from ion to wasm");
case ExprType::NullRef:
MOZ_CRASH("NullRef not expressible");
case ExprType::Limit:
MOZ_CRASH("Limit");
}
GenPrintf(DebugChannel::Function, masm, "\n");
MOZ_ASSERT(masm.framePushed() == 0);
#ifdef JS_CODEGEN_ARM64
masm.loadPtr(Address(sp, 0), lr);
masm.addToStackPtr(Imm32(8));
masm.moveStackPtrTo(PseudoStackPointer);
masm.abiret();
#else
masm.ret();
#endif
if (fe.funcType().args().length()) {
masm.bind(&oolCall);
masm.setFramePushed(frameSize);
ABIArgMIRTypeIter argsIter(coerceArgTypes);
if (argsIter->kind() == ABIArg::GPR) {
masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
} else {
masm.storePtr(ImmWord(funcExportIndex),
Address(sp, argsIter->offsetFromArgBase()));
}
argsIter++;
if (argsIter->kind() == ABIArg::GPR) {
masm.movePtr(WasmTlsReg, argsIter->gpr());
} else {
masm.storePtr(WasmTlsReg, Address(sp, argsIter->offsetFromArgBase()));
}
argsIter++;
Address argv(sp, masm.framePushed() + JitFrameLayout::offsetOfActualArg(0));
if (argsIter->kind() == ABIArg::GPR) {
masm.computeEffectiveAddress(argv, argsIter->gpr());
} else {
masm.computeEffectiveAddress(argv, ScratchIonEntry);
masm.storePtr(ScratchIonEntry,
Address(sp, argsIter->offsetFromArgBase()));
}
argsIter++;
MOZ_ASSERT(argsIter.done());
masm.assertStackAlignment(ABIStackAlignment);
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
SymbolicAddress::CoerceInPlace_JitEntry);
masm.assertStackAlignment(ABIStackAlignment);
masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
&rejoinBeforeCall);
}
masm.bind(&exception);
masm.setFramePushed(frameSize);
GenerateJitEntryThrow(masm, frameSize);
return FinishOffsets(masm, offsets);
}
void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
const Instance& inst,
const JitCallStackArgVector& stackArgs,
bool profilingEnabled, Register scratch,
uint32_t* callOffset) {
MOZ_ASSERT(!IsCompilingWasm());
size_t framePushedAtStart = masm.framePushed();
if (profilingEnabled) {
masm.Push(FramePointer);
} else {
#ifdef DEBUG
AllocatableRegisterSet set(RegisterSet::All());
TakeJitRegisters( false, &set);
MOZ_ASSERT(set.has(FramePointer),
"replace the whole if branch by the then body when this fails");
#endif
}
*callOffset = masm.buildFakeExitFrame(scratch);
masm.loadJSContext(scratch);
masm.moveStackPtrTo(FramePointer);
masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
unsigned bytesNeeded = StackArgBytes(fe.funcType().args());
bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
bytesNeeded);
if (bytesNeeded) {
masm.reserveStack(bytesNeeded);
}
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
fe.funcIndex());
for (ABIArgValTypeIter iter(fe.funcType().args()); !iter.done(); iter++) {
MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
if (iter->kind() != ABIArg::Stack) {
switch (iter.mirType()) {
case MIRType::Int32:
GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
break;
case MIRType::Float32:
GenPrintF32(DebugChannel::Function, masm, iter->fpu());
break;
case MIRType::Double:
GenPrintF64(DebugChannel::Function, masm, iter->fpu());
break;
default:
MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
}
continue;
}
Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
const JitCallStackArg& stackArg = stackArgs[iter.index()];
switch (stackArg.tag()) {
case JitCallStackArg::Tag::Imm32:
GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
masm.storePtr(ImmWord(stackArg.imm32()), dst);
break;
case JitCallStackArg::Tag::GPR:
MOZ_ASSERT(stackArg.gpr() != scratch);
MOZ_ASSERT(stackArg.gpr() != FramePointer);
GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
masm.storePtr(stackArg.gpr(), dst);
break;
case JitCallStackArg::Tag::FPU:
switch (iter.mirType()) {
case MIRType::Double:
GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
masm.storeDouble(stackArg.fpu(), dst);
break;
case MIRType::Float32:
GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
masm.storeFloat32(stackArg.fpu(), dst);
break;
default:
MOZ_CRASH(
"unexpected MIR type for a float register in wasm fast call");
}
break;
case JitCallStackArg::Tag::Address: {
Address src = stackArg.addr();
src.offset += masm.framePushed() - framePushedAtStart;
switch (iter.mirType()) {
case MIRType::Double: {
ScratchDoubleScope fpscratch(masm);
GenPrintF64(DebugChannel::Function, masm, fpscratch);
masm.loadDouble(src, fpscratch);
masm.storeDouble(fpscratch, dst);
break;
}
case MIRType::Float32: {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
GenPrintF32(DebugChannel::Function, masm, fpscratch);
masm.storeFloat32(fpscratch, dst);
break;
}
case MIRType::Int32:
masm.loadPtr(src, scratch);
GenPrintIsize(DebugChannel::Function, masm, scratch);
masm.storePtr(scratch, dst);
break;
default:
MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
}
break;
}
case JitCallStackArg::Tag::Undefined: {
MOZ_CRASH("can't happen because of arg.kind() check");
}
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
masm.movePtr(ImmPtr(inst.tlsData()), WasmTlsReg);
masm.loadWasmPinnedRegsFromTls();
const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
const MetadataTier& metadata = codeTier.metadata();
const CodeRange& codeRange = metadata.codeRange(fe);
void* callee = codeTier.segment().base() + codeRange.funcNormalEntry();
masm.assertStackAlignment(WasmStackAlignment);
masm.callJit(ImmPtr(callee));
#ifdef JS_CODEGEN_ARM64
masm.initPseudoStackPtr();
#endif
masm.assertStackAlignment(WasmStackAlignment);
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(wasm::FailFP),
masm.exceptionLabel());
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
fe.funcIndex());
switch (fe.funcType().ret().code()) {
case wasm::ExprType::Void:
masm.moveValue(UndefinedValue(), JSReturnOperand);
GenPrintf(DebugChannel::Function, masm, "void");
break;
case wasm::ExprType::I32:
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
break;
case wasm::ExprType::F32:
masm.canonicalizeFloat(ReturnFloat32Reg);
GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
break;
case wasm::ExprType::F64:
masm.canonicalizeDouble(ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
break;
case wasm::ExprType::Ref:
case wasm::ExprType::AnyRef:
case wasm::ExprType::I64:
MOZ_CRASH("unexpected return type when calling from ion to wasm");
case wasm::ExprType::NullRef:
MOZ_CRASH("NullRef not expressible");
case wasm::ExprType::Limit:
MOZ_CRASH("Limit");
}
GenPrintf(DebugChannel::Function, masm, "\n");
masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
if (profilingEnabled) {
masm.Pop(FramePointer);
}
MOZ_ASSERT(framePushedAtStart == masm.framePushed());
}
static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
Address src, Address dst) {
if (type == MIRType::Int32) {
masm.load32(src, scratch);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store32(scratch, dst);
} else if (type == MIRType::Int64) {
#if JS_BITS_PER_WORD == 32
GenPrintf(DebugChannel::Import, masm, "i64(");
masm.load32(LowWord(src), scratch);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store32(scratch, LowWord(dst));
masm.load32(HighWord(src), scratch);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store32(scratch, HighWord(dst));
GenPrintf(DebugChannel::Import, masm, ") ");
#else
Register64 scratch64(scratch);
masm.load64(src, scratch64);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store64(scratch64, dst);
#endif
} else if (type == MIRType::RefOrNull) {
masm.loadPtr(src, scratch);
GenPrintPtr(DebugChannel::Import, masm, scratch);
masm.storePtr(scratch, dst);
} else if (type == MIRType::Float32) {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
GenPrintF32(DebugChannel::Import, masm, fpscratch);
masm.storeFloat32(fpscratch, dst);
} else if (type == MIRType::Double) {
ScratchDoubleScope fpscratch(masm);
masm.loadDouble(src, fpscratch);
GenPrintF64(DebugChannel::Import, masm, fpscratch);
masm.storeDouble(fpscratch, dst);
} else {
MOZ_CRASH("StackCopy: unexpected type");
}
}
typedef bool ToValue;
static void FillArgumentArray(MacroAssembler& masm, unsigned funcImportIndex,
const ValTypeVector& args, unsigned argOffset,
unsigned offsetToCallerStackArgs,
Register scratch, ToValue toValue) {
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
funcImportIndex);
for (ABIArgValTypeIter i(args); !i.done(); i++) {
Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
MIRType type = i.mirType();
switch (i->kind()) {
case ABIArg::GPR:
if (type == MIRType::Int32) {
GenPrintIsize(DebugChannel::Import, masm, i->gpr());
if (toValue) {
masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
} else {
masm.store32(i->gpr(), dst);
}
} else if (type == MIRType::Int64) {
if (toValue) {
masm.breakpoint();
} else {
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
masm.store64(i->gpr64(), dst);
}
} else if (type == MIRType::RefOrNull) {
if (toValue) {
MOZ_CRASH("generating a jit exit for anyref NYI");
}
GenPrintPtr(DebugChannel::Import, masm, i->gpr());
masm.storePtr(i->gpr(), dst);
} else {
MOZ_CRASH("FillArgumentArray, ABIArg::GPR: unexpected type");
}
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
if (type == MIRType::Int64) {
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
masm.store64(i->gpr64(), dst);
} else {
MOZ_CRASH("wasm uses hardfp for function calls.");
}
break;
#endif
case ABIArg::FPU: {
MOZ_ASSERT(IsFloatingPointType(type));
FloatRegister srcReg = i->fpu();
if (type == MIRType::Double) {
if (toValue) {
ScratchDoubleScope fpscratch(masm);
masm.moveDouble(srcReg, fpscratch);
masm.canonicalizeDouble(fpscratch);
GenPrintF64(DebugChannel::Import, masm, fpscratch);
masm.storeDouble(fpscratch, dst);
} else {
GenPrintF64(DebugChannel::Import, masm, srcReg);
masm.storeDouble(srcReg, dst);
}
} else {
MOZ_ASSERT(type == MIRType::Float32);
if (toValue) {
ScratchDoubleScope fpscratch(masm);
masm.convertFloat32ToDouble(srcReg, fpscratch);
masm.canonicalizeDouble(fpscratch);
GenPrintF64(DebugChannel::Import, masm, fpscratch);
masm.storeDouble(fpscratch, dst);
} else {
GenPrintF32(DebugChannel::Import, masm, srcReg);
masm.storeFloat32(srcReg, dst);
}
}
break;
}
case ABIArg::Stack: {
Address src(masm.getStackPointer(),
offsetToCallerStackArgs + i->offsetFromArgBase());
if (toValue) {
if (type == MIRType::Int32) {
masm.load32(src, scratch);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
} else if (type == MIRType::Int64) {
masm.breakpoint();
} else if (type == MIRType::RefOrNull) {
MOZ_CRASH("generating a jit exit for anyref NYI");
} else if (IsFloatingPointType(type)) {
ScratchDoubleScope dscratch(masm);
FloatRegister fscratch = dscratch.asSingle();
if (type == MIRType::Float32) {
masm.loadFloat32(src, fscratch);
masm.convertFloat32ToDouble(fscratch, dscratch);
} else {
masm.loadDouble(src, dscratch);
}
masm.canonicalizeDouble(dscratch);
GenPrintF64(DebugChannel::Import, masm, dscratch);
masm.storeDouble(dscratch, dst);
} else {
MOZ_CRASH("FillArgumentArray, ABIArg::Stack: unexpected type");
}
} else {
StackCopy(masm, type, scratch, src, dst);
}
break;
}
case ABIArg::Uninitialized:
MOZ_CRASH("Uninitialized ABIArg kind");
}
}
GenPrintf(DebugChannel::Import, masm, "\n");
}
static bool GenerateImportFunction(jit::MacroAssembler& masm,
const FuncImport& fi,
FuncTypeIdDesc funcTypeId,
FuncOffsets* offsets) {
AssertExpectedSP(masm);
GenerateFunctionPrologue(masm, funcTypeId, Nothing(), offsets);
MOZ_ASSERT(masm.framePushed() == 0);
unsigned framePushed =
StackDecrementForCall(WasmStackAlignment,
sizeof(Frame), StackArgBytes(fi.funcType().args()));
masm.wasmReserveStackChecked(framePushed, BytecodeOffset(0));
MOZ_ASSERT(masm.framePushed() == framePushed);
Register scratch = ABINonArgReg0;
unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
ABIArgValTypeIter i(fi.funcType().args());
for (; !i.done(); i++) {
if (i->kind() != ABIArg::Stack) {
continue;
}
Address src(masm.getStackPointer(),
offsetToCallerStackArgs + i->offsetFromArgBase());
Address dst(masm.getStackPointer(), i->offsetFromArgBase());
GenPrintf(DebugChannel::Import, masm,
"calling exotic import function with arguments: ");
StackCopy(masm, i.mirType(), scratch, src, dst);
GenPrintf(DebugChannel::Import, masm, "\n");
}
CallSiteDesc desc(CallSiteDesc::Dynamic);
MoveSPForJitABI(masm);
masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
masm.loadWasmTlsRegFromFrame();
masm.loadWasmPinnedRegsFromTls();
masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
GenerateFunctionEpilogue(masm, framePushed, offsets);
return FinishOffsets(masm, offsets);
}
static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
bool wasm::GenerateImportFunctions(const ModuleEnvironment& env,
const FuncImportVector& imports,
CompiledCode* code) {
LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
TempAllocator alloc(&lifo);
WasmMacroAssembler masm(alloc);
for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
const FuncImport& fi = imports[funcIndex];
FuncOffsets offsets;
if (!GenerateImportFunction(masm, fi, env.funcTypes[funcIndex]->id,
&offsets)) {
return false;
}
if (!code->codeRanges.emplaceBack(funcIndex, 0,
offsets)) {
return false;
}
}
masm.finish();
if (masm.oom()) {
return false;
}
return code->swap(masm);
}
static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
uint32_t funcImportIndex,
Label* throwLabel,
CallableOffsets* offsets) {
AssertExpectedSP(masm);
masm.setFramePushed(0);
static const MIRType typeArray[] = {MIRType::Pointer, MIRType::Pointer, MIRType::Int32, MIRType::Pointer}; MIRTypeVector invokeArgTypes;
MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
unsigned argOffset =
AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
unsigned argBytes =
Max<size_t>(1, fi.funcType().args().length()) * sizeof(Value);
unsigned framePushed =
StackDecrementForCall(ABIStackAlignment,
sizeof(Frame), argOffset + argBytes);
GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp,
offsets);
unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
Register scratch = ABINonArgReturnReg0;
FillArgumentArray(masm, funcImportIndex, fi.funcType().args(), argOffset,
offsetToCallerStackArgs, scratch, ToValue(false));
ABIArgMIRTypeIter i(invokeArgTypes);
Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
if (i->kind() == ABIArg::GPR) {
masm.loadPtr(instancePtr, i->gpr());
} else {
masm.loadPtr(instancePtr, scratch);
masm.storePtr(scratch,
Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
if (i->kind() == ABIArg::GPR) {
masm.mov(ImmWord(funcImportIndex), i->gpr());
} else {
masm.store32(Imm32(funcImportIndex),
Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
unsigned argc = fi.funcType().args().length();
if (i->kind() == ABIArg::GPR) {
masm.mov(ImmWord(argc), i->gpr());
} else {
masm.store32(Imm32(argc),
Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
Address argv(masm.getStackPointer(), argOffset);
if (i->kind() == ABIArg::GPR) {
masm.computeEffectiveAddress(argv, i->gpr());
} else {
masm.computeEffectiveAddress(argv, scratch);
masm.storePtr(scratch,
Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
MOZ_ASSERT(i.done());
AssertStackAlignment(masm, ABIStackAlignment);
switch (fi.funcType().ret().code()) {
case ExprType::Void:
masm.call(SymbolicAddress::CallImport_Void);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex);
GenPrintf(DebugChannel::Import, masm, "void");
break;
case ExprType::I32:
masm.call(SymbolicAddress::CallImport_I32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.load32(argv, ReturnReg);
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex);
GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
break;
case ExprType::I64:
masm.call(SymbolicAddress::CallImport_I64);
masm.jump(throwLabel);
break;
case ExprType::F32:
masm.call(SymbolicAddress::CallImport_F64);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.loadDouble(argv, ReturnDoubleReg);
masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex);
GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
break;
case ExprType::F64:
masm.call(SymbolicAddress::CallImport_F64);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.loadDouble(argv, ReturnDoubleReg);
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex);
GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
break;
case ExprType::Ref:
MOZ_CRASH("No Ref support here yet");
case ExprType::AnyRef:
masm.call(SymbolicAddress::CallImport_AnyRef);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.loadPtr(argv, ReturnReg);
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex);
GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
break;
case ExprType::NullRef:
MOZ_CRASH("NullRef not expressible");
case ExprType::Limit:
MOZ_CRASH("Limit");
}
GenPrintf(DebugChannel::Import, masm, "\n");
MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
defined(JS_CODEGEN_MIPS64)
MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
#endif
GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::ImportInterp,
offsets);
return FinishOffsets(masm, offsets);
}
static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
unsigned funcImportIndex, Label* throwLabel,
JitExitOffsets* offsets) {
AssertExpectedSP(masm);
masm.setFramePushed(0);
static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
const unsigned sizeOfRetAddr = sizeof(void*);
const unsigned sizeOfPreFrame =
WasmToJSJitFrameLayout::Size() - sizeOfRetAddr;
const unsigned sizeOfThisAndArgs =
(1 + fi.funcType().args().length()) * sizeof(Value);
const unsigned totalJitFrameBytes =
sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs;
const unsigned jitFramePushed =
StackDecrementForCall(JitStackAlignment,
sizeof(Frame), totalJitFrameBytes) -
sizeOfRetAddr;
const unsigned sizeOfThisAndArgsAndPadding = jitFramePushed - sizeOfPreFrame;
#ifdef JS_CODEGEN_ARM64
const unsigned frameAlignExtra = sizeof(void*);
#else
const unsigned frameAlignExtra = 0;
#endif
GenerateJitExitPrologue(masm, jitFramePushed + frameAlignExtra, offsets);
size_t argOffset = frameAlignExtra;
uint32_t descriptor =
MakeFrameDescriptor(sizeOfThisAndArgsAndPadding, FrameType::WasmToJSJit,
WasmToJSJitFrameLayout::Size());
masm.storePtr(ImmWord(uintptr_t(descriptor)),
Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
Register callee = ABINonArgReturnReg0; Register scratch = ABINonArgReturnReg1;
masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, fun),
callee);
masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
unsigned argc = fi.funcType().args().length();
masm.storePtr(ImmWord(uintptr_t(argc)),
Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
MOZ_ASSERT(argOffset == sizeOfPreFrame + frameAlignExtra);
masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(Value);
unsigned offsetToCallerStackArgs =
jitFramePushed + sizeof(Frame) + frameAlignExtra;
FillArgumentArray(masm, funcImportIndex, fi.funcType().args(), argOffset,
offsetToCallerStackArgs, scratch, ToValue(true));
argOffset += fi.funcType().args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame + frameAlignExtra);
masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
Label rectify;
masm.branch32(Assembler::Above, scratch, Imm32(fi.funcType().args().length()),
&rectify);
masm.loadJitCodeNoArgCheck(callee, callee);
Label rejoinBeforeCall;
masm.bind(&rejoinBeforeCall);
AssertStackAlignment(masm, JitStackAlignment,
sizeOfRetAddr + frameAlignExtra);
#ifdef JS_CODEGEN_ARM64
masm.addToStackPtr(Imm32(8));
#endif
MoveSPForJitABI(masm);
masm.callJitNoProfiler(callee);
#ifdef JS_CODEGEN_ARM64
masm.subFromStackPtr(Imm32(8));
#endif
offsets->untrustedFPStart = masm.currentOffset();
AssertStackAlignment(masm, JitStackAlignment,
sizeOfRetAddr + frameAlignExtra);
masm.loadWasmTlsRegFromFrame();
masm.moveStackPtrTo(FramePointer);
masm.addPtr(Imm32(masm.framePushed()), FramePointer);
offsets->untrustedFPEnd = masm.currentOffset();
static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
#ifdef JS_CODEGEN_ARM64
static_assert(sizeOfRetAddr == frameAlignExtra, "ARM64 SP alignment");
#else
masm.reserveStack(sizeOfRetAddr);
#endif
unsigned nativeFramePushed = masm.framePushed();
AssertStackAlignment(masm, ABIStackAlignment);
#ifdef DEBUG
{
Label ok;
masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
masm.breakpoint();
masm.bind(&ok);
}
#endif
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
funcImportIndex);
Label oolConvert;
switch (fi.funcType().ret().code()) {
case ExprType::Void:
GenPrintf(DebugChannel::Import, masm, "void");
break;
case ExprType::I32:
masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
&oolConvert);
GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
break;
case ExprType::I64:
masm.breakpoint();
break;
case ExprType::F32:
masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
break;
case ExprType::F64:
masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
break;
case ExprType::Ref:
MOZ_CRASH("ref returned by import (jit exit) NYI");
break;
case ExprType::AnyRef:
MOZ_CRASH("anyref returned by import (jit exit) NYI");
break;
case ExprType::NullRef:
MOZ_CRASH("NullRef not expressible");
case ExprType::Limit:
MOZ_CRASH("Limit");
}
GenPrintf(DebugChannel::Import, masm, "\n");
Label done;
masm.bind(&done);
GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
{
masm.bind(&rectify);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)), callee);
masm.loadPtr(Address(callee, Instance::offsetOfJSJitArgsRectifier()),
callee);
masm.jump(&rejoinBeforeCall);
}
if (oolConvert.used()) {
masm.bind(&oolConvert);
masm.setFramePushed(nativeFramePushed);
MIRTypeVector coerceArgTypes;
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
unsigned offsetToCoerceArgv =
AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
AssertStackAlignment(masm, ABIStackAlignment);
masm.storeValue(JSReturnOperand,
Address(masm.getStackPointer(), offsetToCoerceArgv));
SetExitFP(masm, ExitReason::Fixed::ImportJit, scratch);
ABIArgMIRTypeIter i(coerceArgTypes);
Address argv(masm.getStackPointer(), offsetToCoerceArgv);
if (i->kind() == ABIArg::GPR) {
masm.computeEffectiveAddress(argv, i->gpr());
} else {
masm.computeEffectiveAddress(argv, scratch);
masm.storePtr(scratch,
Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
MOZ_ASSERT(i.done());
AssertStackAlignment(masm, ABIStackAlignment);
switch (fi.funcType().ret().code()) {
case ExprType::I32:
masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
ReturnReg);
break;
case ExprType::F64:
case ExprType::F32:
masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv),
ReturnDoubleReg);
if (fi.funcType().ret() == ExprType::F32) {
masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
}
break;
default:
MOZ_CRASH("Unsupported convert type");
}
ClearExitFP(masm, scratch);
masm.jump(&done);
masm.setFramePushed(0);
}
MOZ_ASSERT(masm.framePushed() == 0);
return FinishOffsets(masm, offsets);
}
struct ABIFunctionArgs {
ABIFunctionType abiType;
size_t len;
explicit ABIFunctionArgs(ABIFunctionType sig)
: abiType(ABIFunctionType(sig >> ArgType_Shift)) {
len = 0;
uint32_t i = uint32_t(abiType);
while (i) {
i = i >> ArgType_Shift;
len++;
}
}
size_t length() const { return len; }
MIRType operator[](size_t i) const {
MOZ_ASSERT(i < len);
uint32_t abi = uint32_t(abiType);
while (i--) {
abi = abi >> ArgType_Shift;
}
return ToMIRType(ABIArgType(abi & ArgType_Mask));
}
};
bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
ExitReason exitReason, void* funcPtr,
CallableOffsets* offsets) {
AssertExpectedSP(masm);
masm.setFramePushed(0);
ABIFunctionArgs args(abiType);
uint32_t framePushed =
StackDecrementForCall(ABIStackAlignment,
sizeof(Frame), StackArgBytes(args));
GenerateExitPrologue(masm, framePushed, exitReason, offsets);
unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
Register scratch = ABINonArgReturnReg0;
for (ABIArgIter<ABIFunctionArgs> i(args); !i.done(); i++) {
if (i->argInRegister()) {
#ifdef JS_CODEGEN_ARM
if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
FloatRegister input = i->fpu();
if (i.mirType() == MIRType::Float32) {
masm.ma_vxfer(input, Register::FromCode(input.id()));
} else if (i.mirType() == MIRType::Double) {
uint32_t regId = input.singleOverlay().id();
masm.ma_vxfer(input, Register::FromCode(regId),
Register::FromCode(regId + 1));
}
}
#endif
continue;
}
Address src(masm.getStackPointer(),
offsetToCallerStackArgs + i->offsetFromArgBase());
Address dst(masm.getStackPointer(), i->offsetFromArgBase());
StackCopy(masm, i.mirType(), scratch, src, dst);
}
AssertStackAlignment(masm, ABIStackAlignment);
MoveSPForJitABI(masm);
masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
#if defined(JS_CODEGEN_X86)
Operand op(esp, 0);
MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
if (retType == MIRType::Float32) {
masm.fstp32(op);
masm.loadFloat32(op, ReturnFloat32Reg);
} else if (retType == MIRType::Double) {
masm.fstp(op);
masm.loadDouble(op, ReturnDoubleReg);
}
#elif defined(JS_CODEGEN_ARM)
MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
if (!UseHardFpABI() && IsFloatingPointType(retType)) {
masm.ma_vxfer(r0, r1, d0);
}
#endif
GenerateExitEpilogue(masm, framePushed, exitReason, offsets);
return FinishOffsets(masm, offsets);
}
#if defined(JS_CODEGEN_ARM)
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
(uint32_t(1) << Registers::pc))),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too.");
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask &
~((uint32_t(1) << Registers::k0) |
(uint32_t(1) << Registers::k1) |
(uint32_t(1) << Registers::sp) |
(uint32_t(1) << Registers::zero))),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too.");
#elif defined(JS_CODEGEN_ARM64)
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask &
~((uint32_t(1) << Registers::StackPointer) |
(uint32_t(1) << Registers::lr))),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too");
#else
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask &
~(uint32_t(1) << Registers::StackPointer)),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too");
#endif
void wasm::GenerateTrapExitMachineState(MachineState* machine,
size_t* numWords) {
*numWords = WasmPushSize / sizeof(void*);
MOZ_ASSERT(*numWords == TrapExitDummyValueOffsetFromTop + 1);
for (GeneralRegisterBackwardIterator iter(RegsToPreserve.gprs()); iter.more();
++iter) {
machine->setRegisterLocation(*iter,
reinterpret_cast<uintptr_t*>(*numWords));
(*numWords)++;
}
}
static bool GenerateTrapExit(MacroAssembler& masm, Label* throwLabel,
Offsets* offsets) {
AssertExpectedSP(masm);
masm.haltingAlign(CodeAlignment);
masm.setFramePushed(0);
offsets->begin = masm.currentOffset();
WasmPush(masm, ImmWord(TrapExitDummyValue));
unsigned framePushedBeforePreserve = masm.framePushed();
masm.PushRegsInMask(RegsToPreserve);
unsigned offsetOfReturnWord = masm.framePushed() - framePushedBeforePreserve;
Register preAlignStackPointer = ABINonVolatileReg;
masm.moveStackPtrTo(preAlignStackPointer);
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
if (ShadowStackSpace) {
masm.subFromStackPtr(Imm32(ShadowStackSpace));
}
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleTrap);
masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.moveToStackPtr(preAlignStackPointer);
masm.storePtr(ReturnReg, Address(masm.getStackPointer(), offsetOfReturnWord));
masm.PopRegsInMask(RegsToPreserve);
#ifdef JS_CODEGEN_ARM64
WasmPop(masm, lr);
masm.abiret();
#else
masm.ret();
#endif
return FinishOffsets(masm, offsets);
}
static bool GenerateThrowStub(MacroAssembler& masm, Label* throwLabel,
Offsets* offsets) {
AssertExpectedSP(masm);
masm.haltingAlign(CodeAlignment);
masm.bind(throwLabel);
offsets->begin = masm.currentOffset();
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
if (ShadowStackSpace) {
masm.subFromStackPtr(Imm32(ShadowStackSpace));
}
masm.call(SymbolicAddress::HandleThrow);
masm.moveToStackPtr(ReturnReg);
masm.move32(Imm32(FailFP), FramePointer);
#ifdef JS_CODEGEN_ARM64
masm.loadPtr(Address(ReturnReg, 0), lr);
masm.addToStackPtr(Imm32(8));
masm.abiret();
#else
masm.ret();
#endif
return FinishOffsets(masm, offsets);
}
static const LiveRegisterSet AllAllocatableRegs =
LiveRegisterSet(GeneralRegisterSet(Registers::AllocatableMask),
FloatRegisterSet(FloatRegisters::AllMask));
static bool GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel,
CallableOffsets* offsets) {
AssertExpectedSP(masm);
masm.haltingAlign(CodeAlignment);
masm.setFramePushed(0);
GenerateExitPrologue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
masm.PushRegsInMask(AllAllocatableRegs);
uint32_t framePushed = masm.framePushed();
#ifdef JS_CODEGEN_ARM64
static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
#else
Register scratch = ABINonArgReturnReg0;
masm.moveStackPtrTo(scratch);
masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
#endif
if (ShadowStackSpace) {
masm.subFromStackPtr(Imm32(ShadowStackSpace));
}
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleDebugTrap);
masm.branchIfFalseBool(ReturnReg, throwLabel);
if (ShadowStackSpace) {
masm.addToStackPtr(Imm32(ShadowStackSpace));
}
#ifndef JS_CODEGEN_ARM64
masm.Pop(scratch);
masm.moveToStackPtr(scratch);
#endif
masm.setFramePushed(framePushed);
masm.PopRegsInMask(AllAllocatableRegs);
GenerateExitEpilogue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
return FinishOffsets(masm, offsets);
}
bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex,
const FuncExport& fe, const Maybe<ImmPtr>& callee,
bool isAsmJS, CodeRangeVector* codeRanges) {
MOZ_ASSERT(!callee == fe.hasEagerStubs());
MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
Offsets offsets;
if (!GenerateInterpEntry(masm, fe, callee, &offsets)) {
return false;
}
if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(),
offsets)) {
return false;
}
if (isAsmJS || fe.funcType().temporarilyUnsupportedAnyRef()) {
return true;
}
if (!GenerateJitEntry(masm, funcExportIndex, fe, callee, &offsets)) {
return false;
}
if (!codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(), offsets)) {
return false;
}
return true;
}
bool wasm::GenerateStubs(const ModuleEnvironment& env,
const FuncImportVector& imports,
const FuncExportVector& exports, CompiledCode* code) {
LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
TempAllocator alloc(&lifo);
WasmMacroAssembler masm(alloc);
if (!code->swap(masm)) {
return false;
}
Label throwLabel;
JitSpew(JitSpew_Codegen, "# Emitting wasm import stubs");
for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
const FuncImport& fi = imports[funcIndex];
CallableOffsets interpOffsets;
if (!GenerateImportInterpExit(masm, fi, funcIndex, &throwLabel,
&interpOffsets)) {
return false;
}
if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex,
interpOffsets)) {
return false;
}
if (fi.funcType().temporarilyUnsupportedAnyRef()) {
continue;
}
JitExitOffsets jitOffsets;
if (!GenerateImportJitExit(masm, fi, funcIndex, &throwLabel, &jitOffsets)) {
return false;
}
if (!code->codeRanges.emplaceBack(funcIndex, jitOffsets)) {
return false;
}
}
JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
Maybe<ImmPtr> noAbsolute;
for (size_t i = 0; i < exports.length(); i++) {
const FuncExport& fe = exports[i];
if (!fe.hasEagerStubs()) {
continue;
}
if (!GenerateEntryStubs(masm, i, fe, noAbsolute, env.isAsmJS(),
&code->codeRanges)) {
return false;
}
}
JitSpew(JitSpew_Codegen, "# Emitting wasm exit stubs");
Offsets offsets;
if (!GenerateTrapExit(masm, &throwLabel, &offsets)) {
return false;
}
if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets)) {
return false;
}
CallableOffsets callableOffsets;
if (!GenerateDebugTrapStub(masm, &throwLabel, &callableOffsets)) {
return false;
}
if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, callableOffsets)) {
return false;
}
if (!GenerateThrowStub(masm, &throwLabel, &offsets)) {
return false;
}
if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets)) {
return false;
}
masm.finish();
if (masm.oom()) {
return false;
}
return code->swap(masm);
}