#include "jit/arm64/CodeGenerator-arm64.h"
#include "mozilla/MathAlgorithms.h"
#include "jsnum.h"
#include "jit/CodeGenerator.h"
#include "jit/JitFrames.h"
#include "jit/JitRealm.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"
#include "vm/JSContext.h"
#include "vm/Realm.h"
#include "vm/Shape.h"
#include "vm/TraceLogging.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "vm/JSScript-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using mozilla::FloorLog2;
using mozilla::NegativeInfinity;
CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph,
MacroAssembler* masm)
: CodeGeneratorShared(gen, graph, masm) {}
bool CodeGeneratorARM64::generateOutOfLineCode() {
if (!CodeGeneratorShared::generateOutOfLineCode()) {
return false;
}
if (deoptLabel_.used()) {
masm.bind(&deoptLabel_);
masm.push(Imm32(frameSize()));
TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
masm.jump(handler);
}
return !masm.oom();
}
void CodeGeneratorARM64::emitBranch(Assembler::Condition cond,
MBasicBlock* mirTrue,
MBasicBlock* mirFalse) {
if (isNextBlock(mirFalse->lir())) {
jumpToBlock(mirTrue, cond);
} else {
jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
jumpToBlock(mirTrue);
}
}
void OutOfLineBailout::accept(CodeGeneratorARM64* codegen) {
codegen->visitOutOfLineBailout(this);
}
void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
Register input = ToRegister(test->input());
MBasicBlock* mirTrue = test->ifTrue();
MBasicBlock* mirFalse = test->ifFalse();
masm.test32(input, input);
if (isNextBlock(mirFalse->lir())) {
jumpToBlock(mirTrue, Assembler::NonZero);
} else {
jumpToBlock(mirFalse, Assembler::Zero);
if (!isNextBlock(mirTrue->lir())) {
jumpToBlock(mirTrue);
}
}
}
void CodeGenerator::visitCompare(LCompare* comp) {
const MCompare* mir = comp->mir();
const MCompare::CompareType type = mir->compareType();
const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
const Register leftreg = ToRegister(comp->getOperand(0));
const LAllocation* right = comp->getOperand(1);
const Register defreg = ToRegister(comp->getDef(0));
if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol) {
masm.cmpPtrSet(cond, leftreg, ToRegister(right), defreg);
return;
}
if (right->isConstant()) {
masm.cmp32Set(cond, leftreg, Imm32(ToInt32(right)), defreg);
} else {
masm.cmp32Set(cond, leftreg, ToRegister(right), defreg);
}
}
void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
const MCompare* mir = comp->cmpMir();
const MCompare::CompareType type = mir->compareType();
const LAllocation* left = comp->left();
const LAllocation* right = comp->right();
if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol) {
masm.cmpPtr(ToRegister(left), ToRegister(right));
} else if (right->isConstant()) {
masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
} else {
masm.cmp32(ToRegister(left), ToRegister(right));
}
Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
void CodeGeneratorARM64::bailoutIf(Assembler::Condition condition,
LSnapshot* snapshot) {
encode(snapshot);
MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
frameClass_.frameSize() == masm.framePushed());
InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
addOutOfLineCode(ool,
new (alloc()) BytecodeSite(tree, tree->script()->code()));
masm.B(ool->entry(), condition);
}
void CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot) {
MOZ_ASSERT_IF(!masm.oom(), label->used());
MOZ_ASSERT_IF(!masm.oom(), !label->bound());
encode(snapshot);
MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
frameClass_.frameSize() == masm.framePushed());
InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
addOutOfLineCode(ool,
new (alloc()) BytecodeSite(tree, tree->script()->code()));
masm.retarget(label, ool->entry());
}
void CodeGeneratorARM64::bailout(LSnapshot* snapshot) {
Label label;
masm.b(&label);
bailoutFrom(&label, snapshot);
}
void CodeGeneratorARM64::visitOutOfLineBailout(OutOfLineBailout* ool) {
masm.push(Imm32(ool->snapshot()->snapshotOffset()));
masm.B(&deoptLabel_);
}
void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
ARMFPRegister lhs(ToFloatRegister(ins->first()), 64);
ARMFPRegister rhs(ToFloatRegister(ins->second()), 64);
ARMFPRegister output(ToFloatRegister(ins->output()), 64);
if (ins->mir()->isMax()) {
masm.Fmax(output, lhs, rhs);
} else {
masm.Fmin(output, lhs, rhs);
}
}
void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
ARMFPRegister lhs(ToFloatRegister(ins->first()), 32);
ARMFPRegister rhs(ToFloatRegister(ins->second()), 32);
ARMFPRegister output(ToFloatRegister(ins->output()), 32);
if (ins->mir()->isMax()) {
masm.Fmax(output, lhs, rhs);
} else {
masm.Fmin(output, lhs, rhs);
}
}
void CodeGenerator::visitAbsD(LAbsD* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 64);
masm.Fabs(input, input);
}
void CodeGenerator::visitAbsF(LAbsF* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 32);
masm.Fabs(input, input);
}
void CodeGenerator::visitSqrtD(LSqrtD* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 64);
ARMFPRegister output(ToFloatRegister(ins->output()), 64);
masm.Fsqrt(output, input);
}
void CodeGenerator::visitSqrtF(LSqrtF* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 32);
ARMFPRegister output(ToFloatRegister(ins->output()), 32);
masm.Fsqrt(output, input);
}
template <typename T>
ARMRegister toWRegister(const T* a) {
return ARMRegister(ToRegister(a), 32);
}
template <typename T>
ARMRegister toXRegister(const T* a) {
return ARMRegister(ToRegister(a), 64);
}
Operand toWOperand(const LAllocation* a) {
if (a->isConstant()) {
return Operand(ToInt32(a));
}
return Operand(toWRegister(a));
}
vixl::CPURegister ToCPURegister(const LAllocation* a, Scalar::Type type) {
if (a->isFloatReg() && type == Scalar::Float64) {
return ARMFPRegister(ToFloatRegister(a), 64);
}
if (a->isFloatReg() && type == Scalar::Float32) {
return ARMFPRegister(ToFloatRegister(a), 32);
}
if (a->isGeneralReg()) {
return ARMRegister(ToRegister(a), 32);
}
MOZ_CRASH("Unknown LAllocation");
}
vixl::CPURegister ToCPURegister(const LDefinition* d, Scalar::Type type) {
return ToCPURegister(d->output(), type);
}
void CodeGenerator::visitAddI(LAddI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
MOZ_ASSERT(!ins->recoversInput());
if (ins->snapshot()) {
masm.Adds(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
bailoutIf(Assembler::Overflow, ins->snapshot());
} else {
masm.Add(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
}
}
void CodeGenerator::visitSubI(LSubI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
MOZ_ASSERT(!ins->recoversInput());
if (ins->snapshot()) {
masm.Subs(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
bailoutIf(Assembler::Overflow, ins->snapshot());
} else {
masm.Sub(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
}
}
void CodeGenerator::visitMulI(LMulI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
MMul* mul = ins->mir();
MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
!mul->canBeNegativeZero() && !mul->canOverflow());
Register lhsreg = ToRegister(lhs);
const ARMRegister lhsreg32 = ARMRegister(lhsreg, 32);
Register destreg = ToRegister(dest);
const ARMRegister destreg32 = ARMRegister(destreg, 32);
if (rhs->isConstant()) {
int32_t constant = ToInt32(rhs);
if (mul->canBeNegativeZero() && constant <= 0) {
Assembler::Condition bailoutCond =
(constant == 0) ? Assembler::LessThan : Assembler::Equal;
masm.Cmp(toWRegister(lhs), Operand(0));
bailoutIf(bailoutCond, ins->snapshot());
}
switch (constant) {
case -1:
masm.Negs(destreg32, Operand(lhsreg32));
break; case 0:
masm.Mov(destreg32, wzr);
return; case 1:
if (destreg != lhsreg) {
masm.Mov(destreg32, lhsreg32);
}
return; case 2:
masm.Adds(destreg32, lhsreg32, Operand(lhsreg32));
break; default:
if (!mul->canOverflow() && constant > 0) {
int32_t shift = FloorLog2(constant);
if ((1 << shift) == constant) {
masm.Lsl(destreg32, lhsreg32, shift);
return;
}
}
Label bailout;
Label* onZero = mul->canBeNegativeZero() ? &bailout : nullptr;
Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const Register scratch = temps.AcquireW().asUnsized();
masm.move32(Imm32(constant), scratch);
masm.mul32(lhsreg, scratch, destreg, onOverflow, onZero);
if (onZero || onOverflow) {
bailoutFrom(&bailout, ins->snapshot());
}
return; }
if (mul->canOverflow()) {
bailoutIf(Assembler::Overflow, ins->snapshot());
}
} else {
Register rhsreg = ToRegister(rhs);
Label bailout;
Label* onZero = mul->canBeNegativeZero() ? &bailout : nullptr;
Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
masm.mul32(lhsreg, rhsreg, destreg, onOverflow, onZero);
if (onZero || onOverflow) {
bailoutFrom(&bailout, ins->snapshot());
}
}
}
void CodeGenerator::visitDivI(LDivI* ins) {
const Register lhs = ToRegister(ins->lhs());
const Register rhs = ToRegister(ins->rhs());
const Register output = ToRegister(ins->output());
const ARMRegister lhs32 = toWRegister(ins->lhs());
const ARMRegister rhs32 = toWRegister(ins->rhs());
const ARMRegister temp32 = toWRegister(ins->getTemp(0));
const ARMRegister output32 = toWRegister(ins->output());
MDiv* mir = ins->mir();
Label done;
if (mir->canBeDivideByZero()) {
masm.test32(rhs, rhs);
if (mir->trapOnError()) {
Label nonZero;
masm.j(Assembler::NonZero, &nonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
masm.bind(&nonZero);
} else if (mir->canTruncateInfinities()) {
Label nonZero;
masm.j(Assembler::NonZero, &nonZero);
masm.Mov(output32, wzr);
masm.jump(&done);
masm.bind(&nonZero);
} else {
MOZ_ASSERT(mir->fallible());
bailoutIf(Assembler::Zero, ins->snapshot());
}
}
if (mir->canBeNegativeOverflow()) {
Label notOverflow;
masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), ¬Overflow);
masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), ¬Overflow);
if (mir->trapOnError()) {
masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
} else if (mir->canTruncateOverflow()) {
masm.move32(lhs, output);
masm.jump(&done);
} else {
MOZ_ASSERT(mir->fallible());
bailout(ins->snapshot());
}
masm.bind(¬Overflow);
}
if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
Label nonZero;
masm.branch32(Assembler::NotEqual, lhs, Imm32(0), &nonZero);
masm.cmp32(rhs, Imm32(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
masm.bind(&nonZero);
}
if (mir->canTruncateRemainder()) {
masm.Sdiv(output32, lhs32, rhs32);
} else {
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
ARMRegister scratch32 = temps.AcquireW();
masm.Sdiv(scratch32, lhs32, rhs32);
masm.Mul(temp32, scratch32, rhs32);
masm.Cmp(lhs32, temp32);
bailoutIf(Assembler::NotEqual, ins->snapshot());
masm.Mov(output32, scratch32);
}
masm.bind(&done);
}
void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
MOZ_CRASH("CodeGenerator::visitDivPowTwoI");
}
void CodeGeneratorARM64::modICommon(MMod* mir, Register lhs, Register rhs,
Register output, LSnapshot* snapshot,
Label& done) {
MOZ_CRASH("CodeGeneratorARM64::modICommon");
}
void CodeGenerator::visitModI(LModI* ins) {
if (gen->compilingWasm()) {
MOZ_CRASH("visitModI while compilingWasm");
}
MMod* mir = ins->mir();
ARMRegister lhs = toWRegister(ins->lhs());
ARMRegister rhs = toWRegister(ins->rhs());
ARMRegister output = toWRegister(ins->output());
Label done;
if (mir->canBeDivideByZero() && !mir->isTruncated()) {
masm.Cmp(rhs, Operand(0));
bailoutIf(Assembler::Equal, ins->snapshot());
} else if (mir->canBeDivideByZero()) {
masm.Mov(output, rhs);
masm.Cbz(rhs, &done);
}
masm.Sdiv(output, lhs, rhs);
masm.Msub(output, output, rhs, lhs);
if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
masm.Cbnz(output, &done);
bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
}
if (done.used()) {
masm.bind(&done);
}
}
void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
Register lhs = ToRegister(ins->getOperand(0));
ARMRegister lhsw = toWRegister(ins->getOperand(0));
ARMRegister outw = toWRegister(ins->output());
int32_t shift = ins->shift();
bool canBeNegative =
!ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend();
Label negative;
if (canBeNegative) {
masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
}
masm.And(outw, lhsw, Operand((uint32_t(1) << shift) - 1));
if (canBeNegative) {
Label done;
masm.jump(&done);
masm.bind(&negative);
masm.Neg(outw, Operand(lhsw));
masm.And(outw, outw, Operand((uint32_t(1) << shift) - 1));
if (!ins->mir()->isTruncated()) {
masm.Negs(outw, Operand(outw));
bailoutIf(Assembler::Zero, ins->snapshot());
} else {
masm.Neg(outw, Operand(outw));
}
masm.bind(&done);
}
}
void CodeGenerator::visitModMaskI(LModMaskI* ins) {
MMod* mir = ins->mir();
int32_t shift = ins->shift();
const Register src = ToRegister(ins->getOperand(0));
const Register dest = ToRegister(ins->getDef(0));
const Register hold = ToRegister(ins->getTemp(0));
const Register remain = ToRegister(ins->getTemp(1));
const ARMRegister src32 = ARMRegister(src, 32);
const ARMRegister dest32 = ARMRegister(dest, 32);
const ARMRegister remain32 = ARMRegister(remain, 32);
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const ARMRegister scratch32 = temps.AcquireW();
const Register scratch = scratch32.asUnsized();
int32_t mask = (1 << shift) - 1;
Label loop;
masm.Mov(remain32, src32);
masm.Mov(dest32, wzr);
{
Label negative;
masm.branch32(Assembler::Signed, remain, Imm32(0), &negative);
masm.move32(Imm32(1), hold);
masm.jump(&loop);
masm.bind(&negative);
masm.move32(Imm32(-1), hold);
masm.neg32(remain);
}
masm.bind(&loop);
{
masm.And(scratch32, remain32, Operand(mask));
masm.Add(dest32, dest32, scratch32);
masm.Subs(scratch32, dest32, Operand(mask));
{
Label sumSigned;
masm.branch32(Assembler::Signed, scratch, scratch, &sumSigned);
masm.Mov(dest32, scratch32);
masm.bind(&sumSigned);
}
masm.Lsr(remain32, remain32, shift);
masm.branchTest32(Assembler::NonZero, remain, remain, &loop);
}
{
Label done;
masm.branchTest32(Assembler::NotSigned, hold, hold, &done);
if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
bailoutTest32(Assembler::Zero, hold, hold, ins->snapshot());
}
masm.neg32(dest);
masm.bind(&done);
}
}
void CodeGenerator::visitBitNotI(LBitNotI* ins) {
const LAllocation* input = ins->getOperand(0);
const LDefinition* output = ins->getDef(0);
masm.Mvn(toWRegister(output), toWOperand(input));
}
void CodeGenerator::visitBitOpI(LBitOpI* ins) {
const ARMRegister lhs = toWRegister(ins->getOperand(0));
const Operand rhs = toWOperand(ins->getOperand(1));
const ARMRegister dest = toWRegister(ins->getDef(0));
switch (ins->bitop()) {
case JSOP_BITOR:
masm.Orr(dest, lhs, rhs);
break;
case JSOP_BITXOR:
masm.Eor(dest, lhs, rhs);
break;
case JSOP_BITAND:
masm.And(dest, lhs, rhs);
break;
default:
MOZ_CRASH("unexpected binary opcode");
}
}
void CodeGenerator::visitShiftI(LShiftI* ins) {
const ARMRegister lhs = toWRegister(ins->lhs());
const LAllocation* rhs = ins->rhs();
const ARMRegister dest = toWRegister(ins->output());
if (rhs->isConstant()) {
int32_t shift = ToInt32(rhs) & 0x1F;
switch (ins->bitop()) {
case JSOP_LSH:
masm.Lsl(dest, lhs, shift);
break;
case JSOP_RSH:
masm.Asr(dest, lhs, shift);
break;
case JSOP_URSH:
if (shift) {
masm.Lsr(dest, lhs, shift);
} else if (ins->mir()->toUrsh()->fallible()) {
masm.Ands(dest, lhs, Operand(0xFFFFFFFF));
bailoutIf(Assembler::Signed, ins->snapshot());
} else {
masm.Mov(dest, lhs);
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
} else {
const ARMRegister rhsreg = toWRegister(rhs);
switch (ins->bitop()) {
case JSOP_LSH:
masm.Lsl(dest, lhs, rhsreg);
break;
case JSOP_RSH:
masm.Asr(dest, lhs, rhsreg);
break;
case JSOP_URSH:
masm.Lsr(dest, lhs, rhsreg);
if (ins->mir()->toUrsh()->fallible()) {
masm.Cmp(dest, Operand(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
}
}
void CodeGenerator::visitUrshD(LUrshD* ins) {
const ARMRegister lhs = toWRegister(ins->lhs());
const LAllocation* rhs = ins->rhs();
const FloatRegister out = ToFloatRegister(ins->output());
const Register temp = ToRegister(ins->temp());
const ARMRegister temp32 = toWRegister(ins->temp());
if (rhs->isConstant()) {
int32_t shift = ToInt32(rhs) & 0x1F;
if (shift) {
masm.Lsr(temp32, lhs, shift);
masm.convertUInt32ToDouble(temp, out);
} else {
masm.convertUInt32ToDouble(ToRegister(ins->lhs()), out);
}
} else {
masm.And(temp32, toWRegister(rhs), Operand(0x1F));
masm.Lsr(temp32, lhs, temp32);
masm.convertUInt32ToDouble(temp, out);
}
}
void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
ScratchDoubleScope scratch(masm);
Label done, sqrt;
if (!ins->mir()->operandIsNeverNegativeInfinity()) {
masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
if (ins->mir()->operandIsNeverNaN()) {
cond = Assembler::DoubleNotEqual;
}
masm.branchDouble(cond, input, scratch, &sqrt);
masm.zeroDouble(output);
masm.subDouble(scratch, output);
masm.jump(&done);
masm.bind(&sqrt);
}
if (!ins->mir()->operandIsNeverNegativeZero()) {
masm.zeroDouble(scratch);
masm.addDouble(input, scratch);
masm.sqrtDouble(scratch, output);
} else {
masm.sqrtDouble(input, output);
}
masm.bind(&done);
}
MoveOperand CodeGeneratorARM64::toMoveOperand(const LAllocation a) const {
if (a.isGeneralReg()) {
return MoveOperand(ToRegister(a));
}
if (a.isFloatReg()) {
return MoveOperand(ToFloatRegister(a));
}
return MoveOperand(AsRegister(masm.getStackPointer()), ToStackOffset(a));
}
class js::jit::OutOfLineTableSwitch
: public OutOfLineCodeBase<CodeGeneratorARM64> {
MTableSwitch* mir_;
CodeLabel jumpLabel_;
void accept(CodeGeneratorARM64* codegen) override {
codegen->visitOutOfLineTableSwitch(this);
}
public:
explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
MTableSwitch* mir() const { return mir_; }
CodeLabel* jumpLabel() { return &jumpLabel_; }
};
void CodeGeneratorARM64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) {
MTableSwitch* mir = ool->mir();
AutoForbidPools afp(
&masm, (mir->numCases() + 1) * (sizeof(void*) / vixl::kInstructionSize));
masm.haltingAlign(sizeof(void*));
masm.bind(ool->jumpLabel());
masm.addCodeLabel(*ool->jumpLabel());
for (size_t i = 0; i < mir->numCases(); i++) {
LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
Label* caseheader = caseblock->label();
uint32_t caseoffset = caseheader->offset();
CodeLabel cl;
masm.writeCodePointer(&cl);
cl.target()->bind(caseoffset);
masm.addCodeLabel(cl);
}
}
void CodeGeneratorARM64::emitTableSwitchDispatch(MTableSwitch* mir,
Register index,
Register base) {
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
if (mir->low() != 0) {
masm.sub32(Imm32(mir->low()), index);
}
int32_t cases = mir->numCases();
masm.branch32(Assembler::AboveOrEqual, index, Imm32(cases), defaultcase);
OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
addOutOfLineCode(ool, mir);
masm.mov(ool->jumpLabel(), base);
BaseIndex pointer(base, index, ScalePointer);
masm.branchToComputedAddress(pointer);
}
void CodeGenerator::visitMathD(LMathD* math) {
ARMFPRegister lhs(ToFloatRegister(math->lhs()), 64);
ARMFPRegister rhs(ToFloatRegister(math->rhs()), 64);
ARMFPRegister output(ToFloatRegister(math->output()), 64);
switch (math->jsop()) {
case JSOP_ADD:
masm.Fadd(output, lhs, rhs);
break;
case JSOP_SUB:
masm.Fsub(output, lhs, rhs);
break;
case JSOP_MUL:
masm.Fmul(output, lhs, rhs);
break;
case JSOP_DIV:
masm.Fdiv(output, lhs, rhs);
break;
default:
MOZ_CRASH("unexpected opcode");
}
}
void CodeGenerator::visitMathF(LMathF* math) {
ARMFPRegister lhs(ToFloatRegister(math->lhs()), 32);
ARMFPRegister rhs(ToFloatRegister(math->rhs()), 32);
ARMFPRegister output(ToFloatRegister(math->output()), 32);
switch (math->jsop()) {
case JSOP_ADD:
masm.Fadd(output, lhs, rhs);
break;
case JSOP_SUB:
masm.Fsub(output, lhs, rhs);
break;
case JSOP_MUL:
masm.Fmul(output, lhs, rhs);
break;
case JSOP_DIV:
masm.Fdiv(output, lhs, rhs);
break;
default:
MOZ_CRASH("unexpected opcode");
}
}
void CodeGenerator::visitFloor(LFloor* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
Label bailout;
masm.floor(input, output, &bailout);
bailoutFrom(&bailout, lir->snapshot());
}
void CodeGenerator::visitFloorF(LFloorF* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
Label bailout;
masm.floorf(input, output, &bailout);
bailoutFrom(&bailout, lir->snapshot());
}
void CodeGenerator::visitCeil(LCeil* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
Label bailout;
masm.ceil(input, output, &bailout);
bailoutFrom(&bailout, lir->snapshot());
}
void CodeGenerator::visitCeilF(LCeilF* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
Label bailout;
masm.ceilf(input, output, &bailout);
bailoutFrom(&bailout, lir->snapshot());
}
void CodeGenerator::visitRound(LRound* lir) {
const FloatRegister input = ToFloatRegister(lir->input());
const ARMFPRegister input64(input, 64);
const FloatRegister temp = ToFloatRegister(lir->temp());
const Register output = ToRegister(lir->output());
ScratchDoubleScope scratch(masm);
Label negative, done;
masm.Fcmp(input64, 0.0);
masm.B(&negative, Assembler::Condition::lo);
{
masm.Fcvtas(ARMRegister(output, 32), input64);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.branch32(Assembler::NotEqual, output, Imm32(0), &done);
{
masm.Fcmp(input64, 0.0);
bailoutIf(Assembler::Overflow, lir->snapshot());
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const ARMRegister scratchGPR64 = temps.AcquireX();
masm.Fmov(scratchGPR64, input64);
masm.Cmp(scratchGPR64, vixl::Operand(uint64_t(0x8000000000000000)));
bailoutIf(Assembler::Equal, lir->snapshot());
}
masm.jump(&done);
}
masm.bind(&negative);
{
Label join;
masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
masm.loadConstantDouble(-0.5, scratch);
masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &join);
masm.loadConstantDouble(0.5, temp);
masm.bind(&join);
masm.addDouble(input, temp);
masm.Fcvtms(ARMRegister(output, 32), temp);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutTest32(Assembler::Zero, output, output, lir->snapshot());
}
masm.bind(&done);
}
void CodeGenerator::visitRoundF(LRoundF* lir) {
const FloatRegister input = ToFloatRegister(lir->input());
const ARMFPRegister input32(input, 32);
const FloatRegister temp = ToFloatRegister(lir->temp());
const Register output = ToRegister(lir->output());
ScratchFloat32Scope scratch(masm);
Label negative, done;
masm.Fcmp(input32, 0.0);
masm.B(&negative, Assembler::Condition::lo);
{
masm.Fcvtas(ARMRegister(output, 32), input32);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.branch32(Assembler::NotEqual, output, Imm32(0), &done);
{
masm.Fcmp(input32, 0.0f);
bailoutIf(Assembler::Overflow, lir->snapshot());
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const ARMRegister scratchGPR32 = temps.AcquireW();
masm.Fmov(scratchGPR32, input32);
masm.Cmp(scratchGPR32, vixl::Operand(uint32_t(0x80000000)));
bailoutIf(Assembler::Equal, lir->snapshot());
}
masm.jump(&done);
}
masm.bind(&negative);
{
Label join;
masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
masm.loadConstantFloat32(-0.5f, scratch);
masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &join);
masm.loadConstantFloat32(0.5f, temp);
masm.bind(&join);
masm.addFloat32(input, temp);
masm.Fcvtms(ARMRegister(output, 32), temp);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutTest32(Assembler::Zero, output, output, lir->snapshot());
}
masm.bind(&done);
}
void CodeGenerator::visitTrunc(LTrunc* lir) {
const FloatRegister input = ToFloatRegister(lir->input());
const ARMFPRegister input64(input, 64);
const Register output = ToRegister(lir->output());
const ARMRegister output32(output, 32);
const ARMRegister output64(output, 64);
Label done, zeroCase;
masm.Fcvtzs(output32, input64);
masm.branch32(Assembler::Equal, output, Imm32(0), &zeroCase);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
masm.jump(&done);
{
masm.bind(&zeroCase);
masm.Fcmp(input64, 0.0);
bailoutIf(vixl::lt, lir->snapshot());
masm.Fmov(output64, input64);
bailoutTestPtr(Assembler::Signed, output, output, lir->snapshot());
masm.movePtr(ImmPtr(0), output);
}
masm.bind(&done);
}
void CodeGenerator::visitTruncF(LTruncF* lir) {
const FloatRegister input = ToFloatRegister(lir->input());
const ARMFPRegister input32(input, 32);
const Register output = ToRegister(lir->output());
const ARMRegister output32(output, 32);
Label done, zeroCase;
masm.Fcvtzs(output32, input32);
masm.branch32(Assembler::Equal, output, Imm32(0), &zeroCase);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
masm.jump(&done);
{
masm.bind(&zeroCase);
masm.Fcmp(input32, 0.0f);
bailoutIf(vixl::lt, lir->snapshot());
masm.Fmov(output32, input32);
bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
masm.move32(Imm32(0), output);
}
masm.bind(&done);
}
void CodeGenerator::visitClzI(LClzI* lir) {
ARMRegister input = toWRegister(lir->input());
ARMRegister output = toWRegister(lir->output());
masm.Clz(output, input);
}
void CodeGenerator::visitCtzI(LCtzI* lir) {
Register input = ToRegister(lir->input());
Register output = ToRegister(lir->output());
masm.ctz32(input, output, false);
}
void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
return FrameSizeClass::None();
}
FrameSizeClass FrameSizeClass::ClassLimit() { return FrameSizeClass(0); }
uint32_t FrameSizeClass::frameSize() const {
MOZ_CRASH("arm64 does not use frame size classes");
}
ValueOperand CodeGeneratorARM64::ToValue(LInstruction* ins, size_t pos) {
return ValueOperand(ToRegister(ins->getOperand(pos)));
}
ValueOperand CodeGeneratorARM64::ToTempValue(LInstruction* ins, size_t pos) {
MOZ_CRASH("CodeGeneratorARM64::ToTempValue");
}
void CodeGenerator::visitValue(LValue* value) {
ValueOperand result = ToOutValue(value);
masm.moveValue(value->value(), result);
}
void CodeGenerator::visitBox(LBox* box) {
const LAllocation* in = box->getOperand(0);
ValueOperand result = ToOutValue(box);
masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
}
void CodeGenerator::visitUnbox(LUnbox* unbox) {
MUnbox* mir = unbox->mir();
if (mir->fallible()) {
const ValueOperand value = ToValue(unbox, LUnbox::Input);
Assembler::Condition cond;
switch (mir->type()) {
case MIRType::Int32:
cond = masm.testInt32(Assembler::NotEqual, value);
break;
case MIRType::Boolean:
cond = masm.testBoolean(Assembler::NotEqual, value);
break;
case MIRType::Object:
cond = masm.testObject(Assembler::NotEqual, value);
break;
case MIRType::String:
cond = masm.testString(Assembler::NotEqual, value);
break;
case MIRType::Symbol:
cond = masm.testSymbol(Assembler::NotEqual, value);
break;
case MIRType::BigInt:
cond = masm.testBigInt(Assembler::NotEqual, value);
break;
default:
MOZ_CRASH("Given MIRType cannot be unboxed.");
}
bailoutIf(cond, unbox->snapshot());
} else {
#ifdef DEBUG
JSValueTag tag = MIRTypeToTag(mir->type());
Label ok;
ValueOperand input = ToValue(unbox, LUnbox::Input);
ScratchTagScope scratch(masm, input);
masm.splitTagForTest(input, scratch);
masm.cmpTag(scratch, ImmTag(tag));
masm.B(&ok, Assembler::Condition::Equal);
masm.assumeUnreachable("Infallible unbox type mismatch");
masm.bind(&ok);
#endif
}
ValueOperand input = ToValue(unbox, LUnbox::Input);
Register result = ToRegister(unbox->output());
switch (mir->type()) {
case MIRType::Int32:
masm.unboxInt32(input, result);
break;
case MIRType::Boolean:
masm.unboxBoolean(input, result);
break;
case MIRType::Object:
masm.unboxObject(input, result);
break;
case MIRType::String:
masm.unboxString(input, result);
break;
case MIRType::Symbol:
masm.unboxSymbol(input, result);
break;
case MIRType::BigInt:
masm.unboxBigInt(input, result);
break;
default:
MOZ_CRASH("Given MIRType cannot be unboxed.");
}
}
void CodeGenerator::visitDouble(LDouble* ins) {
ARMFPRegister output(ToFloatRegister(ins->getDef(0)), 64);
masm.Fmov(output, ins->getDouble());
}
void CodeGenerator::visitFloat32(LFloat32* ins) {
ARMFPRegister output(ToFloatRegister(ins->getDef(0)), 32);
masm.Fmov(output, ins->getFloat());
}
void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
const LAllocation* opd = test->input();
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 64), 0.0);
jumpToBlock(ifFalse, Assembler::Zero);
jumpToBlock(ifFalse, Assembler::Overflow);
jumpToBlock(ifTrue);
}
void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
const LAllocation* opd = test->input();
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 32), 0.0);
jumpToBlock(ifFalse, Assembler::Zero);
jumpToBlock(ifFalse, Assembler::Overflow);
jumpToBlock(ifTrue);
}
void CodeGenerator::visitCompareD(LCompareD* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
ARMRegister output = toWRegister(comp->output());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.compareDouble(cond, left, right);
masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
}
void CodeGenerator::visitCompareF(LCompareF* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
ARMRegister output = toWRegister(comp->output());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.compareFloat(cond, left, right);
masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
}
void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
Assembler::DoubleCondition doubleCond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
Assembler::Condition cond =
Assembler::ConditionFromDoubleCondition(doubleCond);
masm.compareDouble(doubleCond, left, right);
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
Assembler::DoubleCondition doubleCond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
Assembler::Condition cond =
Assembler::ConditionFromDoubleCondition(doubleCond);
masm.compareFloat(doubleCond, left, right);
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
void CodeGenerator::visitCompareB(LCompareB* lir) {
MCompare* mir = lir->mir();
const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
const LAllocation* rhs = lir->rhs();
const Register output = ToRegister(lir->output());
const Assembler::Condition cond =
JSOpToCondition(mir->compareType(), mir->jsop());
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
if (rhs->isConstant()) {
masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
} else {
masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
}
masm.cmpPtrSet(cond, lhs.valueReg(), scratch, output);
}
void CodeGenerator::visitCompareBAndBranch(LCompareBAndBranch* lir) {
MCompare* mir = lir->cmpMir();
const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
const LAllocation* rhs = lir->rhs();
const Assembler::Condition cond =
JSOpToCondition(mir->compareType(), mir->jsop());
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
if (rhs->isConstant()) {
masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(scratch));
} else {
masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
}
masm.cmpPtr(lhs.valueReg(), scratch);
emitBranch(cond, lir->ifTrue(), lir->ifFalse());
}
void CodeGenerator::visitCompareBitwise(LCompareBitwise* lir) {
MCompare* mir = lir->mir();
Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
const Register output = ToRegister(lir->output());
MOZ_ASSERT(IsEqualityOp(mir->jsop()));
masm.cmpPtrSet(cond, lhs.valueReg(), rhs.valueReg(), output);
}
void CodeGenerator::visitCompareBitwiseAndBranch(
LCompareBitwiseAndBranch* lir) {
MCompare* mir = lir->cmpMir();
Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
emitBranch(cond, lir->ifTrue(), lir->ifFalse());
}
void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
if (baab->right()->isConstant()) {
masm.Tst(toWRegister(baab->left()), Operand(ToInt32(baab->right())));
} else {
masm.Tst(toWRegister(baab->left()), toWRegister(baab->right()));
}
emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
masm.convertUInt32ToFloat32(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitNotI(LNotI* ins) {
ARMRegister input = toWRegister(ins->input());
ARMRegister output = toWRegister(ins->output());
masm.Cmp(input, ZeroRegister32);
masm.Cset(output, Assembler::Zero);
}
void CodeGenerator::visitNotD(LNotD* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 64);
ARMRegister output = toWRegister(ins->output());
masm.Fcmp(input, 0.0);
masm.Cset(output, Assembler::Equal);
masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
}
void CodeGenerator::visitNotF(LNotF* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 32);
ARMRegister output = toWRegister(ins->output());
masm.Fcmp(input, 0.0);
masm.Cset(output, Assembler::Equal);
masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
}
void CodeGeneratorARM64::storeElementTyped(const LAllocation* value,
MIRType valueType,
MIRType elementType,
Register elements,
const LAllocation* index) {
MOZ_CRASH("CodeGeneratorARM64::storeElementTyped");
}
void CodeGeneratorARM64::generateInvalidateEpilogue() {
for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
masm.nop();
}
masm.bind(&invalidate_);
masm.push(lr);
invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
masm.call(thunk);
masm.assumeUnreachable(
"Should have returned directly to its caller instead of here.");
}
template <class U>
Register getBase(U* mir) {
switch (mir->base()) {
case U::Heap:
return HeapReg;
}
return InvalidReg;
}
void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
MOZ_CRASH("visitAsmJSLoadHeap");
}
void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
MOZ_CRASH("visitAsmJSStoreHeap");
}
void CodeGenerator::visitWasmCompareExchangeHeap(
LWasmCompareExchangeHeap* ins) {
MOZ_CRASH("visitWasmCompareExchangeHeap");
}
void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
MOZ_CRASH("visitWasmAtomicBinopHeap");
}
void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
MOZ_CRASH("visitWasmStackArg");
}
void CodeGenerator::visitUDiv(LUDiv* ins) {
MDiv* mir = ins->mir();
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
ARMRegister lhs32 = ARMRegister(lhs, 32);
ARMRegister rhs32 = ARMRegister(rhs, 32);
ARMRegister output32 = ARMRegister(output, 32);
if (mir->canBeDivideByZero()) {
if (mir->isTruncated()) {
if (mir->trapOnError()) {
Label nonZero;
masm.branchTest32(Assembler::NonZero, rhs, rhs, &nonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
masm.bind(&nonZero);
} else {
}
} else {
bailoutTest32(Assembler::Zero, rhs, rhs, ins->snapshot());
}
}
masm.Udiv(output32, lhs32, rhs32);
if (!mir->canTruncateRemainder()) {
Register remainder = ToRegister(ins->remainder());
ARMRegister remainder32 = ARMRegister(remainder, 32);
masm.Msub(remainder32, output32, rhs32, lhs32);
bailoutTest32(Assembler::NonZero, remainder, remainder, ins->snapshot());
}
if (!mir->isTruncated()) {
bailoutTest32(Assembler::Signed, output, output, ins->snapshot());
}
}
void CodeGenerator::visitUMod(LUMod* ins) {
MMod* mir = ins->mir();
ARMRegister lhs = toWRegister(ins->lhs());
ARMRegister rhs = toWRegister(ins->rhs());
ARMRegister output = toWRegister(ins->output());
Label done;
if (mir->canBeDivideByZero() && !mir->isTruncated()) {
masm.Cmp(rhs, Operand(0));
bailoutIf(Assembler::Equal, ins->snapshot());
} else if (mir->canBeDivideByZero()) {
masm.Mov(output, rhs);
masm.Cbz(rhs, &done);
}
masm.Udiv(output, lhs, rhs);
masm.Msub(output, output, rhs, lhs);
if (!mir->isTruncated()) {
bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
}
if (done.used()) {
masm.bind(&done);
}
}
void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
const MEffectiveAddress* mir = ins->mir();
const ARMRegister base = toXRegister(ins->base());
const ARMRegister index = toXRegister(ins->index());
const ARMRegister output = toXRegister(ins->output());
masm.Add(output, base, Operand(index, vixl::LSL, mir->scale()));
masm.Add(output, output, Operand(mir->displacement()));
}
void CodeGenerator::visitNegI(LNegI* ins) {
const ARMRegister input = toWRegister(ins->input());
const ARMRegister output = toWRegister(ins->output());
masm.Neg(output, input);
}
void CodeGenerator::visitNegD(LNegD* ins) {
const ARMFPRegister input(ToFloatRegister(ins->input()), 64);
const ARMFPRegister output(ToFloatRegister(ins->input()), 64);
masm.Fneg(output, input);
}
void CodeGenerator::visitNegF(LNegF* ins) {
const ARMFPRegister input(ToFloatRegister(ins->input()), 32);
const ARMFPRegister output(ToFloatRegister(ins->input()), 32);
masm.Fneg(output, input);
}
void CodeGenerator::visitCompareExchangeTypedArrayElement(
LCompareExchangeTypedArrayElement* lir) {
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register temp =
lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
Register oldval = ToRegister(lir->oldval());
Register newval = ToRegister(lir->newval());
Scalar::Type arrayType = lir->mir()->arrayType();
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
newval, temp, output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()),
ScaleFromElemWidth(width));
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
newval, temp, output);
}
}
void CodeGenerator::visitAtomicExchangeTypedArrayElement(
LAtomicExchangeTypedArrayElement* lir) {
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register temp =
lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()),
ScaleFromElemWidth(width));
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
output);
}
}
void CodeGenerator::visitAddI64(LAddI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitClzI64(LClzI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitCtzI64(LCtzI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitMulI64(LMulI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitNotI64(LNotI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitSubI64(LSubI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitPopcntI(LPopcntI*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitBitOpI64(LBitOpI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitShiftI64(LShiftI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitWasmLoad(LWasmLoad*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitCopySignD(LCopySignD*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitCopySignF(LCopySignF*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitPopcntI64(LPopcntI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitRotateI64(LRotateI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitWasmStore(LWasmStore*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitCompareI64(LCompareI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitWasmSelect(LWasmSelect*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitWasmLoadI64(LWasmLoadI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitWasmStoreI64(LWasmStoreI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
masm.memoryBarrier(ins->type());
}
void CodeGenerator::visitWasmAddOffset(LWasmAddOffset*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitWasmSelectI64(LWasmSelectI64*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitSignExtendInt64(LSignExtendInt64*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitWasmReinterpret(LWasmReinterpret*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitAtomicTypedArrayElementBinop(
LAtomicTypedArrayElementBinop* lir) {
MOZ_ASSERT(lir->mir()->hasUses());
AnyRegister output = ToAnyRegister(lir->output());
Register elements = ToRegister(lir->elements());
Register flagTemp = ToRegister(lir->temp1());
Register outTemp =
lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
lir->mir()->operation(), value, mem, flagTemp, outTemp,
output);
} else {
BaseIndex mem(elements, ToRegister(lir->index()),
ScaleFromElemWidth(width));
masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
lir->mir()->operation(), value, mem, flagTemp, outTemp,
output);
}
}
void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
LWasmAtomicBinopHeapForEffect*) {
MOZ_CRASH("NYI");
}
void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
LAtomicTypedArrayElementBinopForEffect*) {
MOZ_CRASH("NYI");
}