#include "jit/mips-shared/CodeGenerator-mips-shared.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
#include "jsnum.h"
#include "jit/CodeGenerator.h"
#include "jit/JitFrames.h"
#include "jit/JitRealm.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"
#include "js/Conversions.h"
#include "vm/JSContext.h"
#include "vm/Realm.h"
#include "vm/Shape.h"
#include "vm/TraceLogging.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "vm/JSScript-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using JS::ToInt32;
using mozilla::DebugOnly;
using mozilla::FloorLog2;
using mozilla::NegativeInfinity;
CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen,
LIRGraph* graph,
MacroAssembler* masm)
: CodeGeneratorShared(gen, graph, masm) {}
Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation& a) {
if (a.isGeneralReg()) {
return Operand(a.toGeneralReg()->reg());
}
if (a.isFloatReg()) {
return Operand(a.toFloatReg()->reg());
}
return Operand(masm.getStackPointer(), ToStackOffset(&a));
}
Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation* a) {
return ToOperand(*a);
}
Operand CodeGeneratorMIPSShared::ToOperand(const LDefinition* def) {
return ToOperand(def->output());
}
#ifdef JS_PUNBOX64
Operand CodeGeneratorMIPSShared::ToOperandOrRegister64(
const LInt64Allocation input) {
return ToOperand(input.value());
}
#else
Register64 CodeGeneratorMIPSShared::ToOperandOrRegister64(
const LInt64Allocation input) {
return ToRegister64(input);
}
#endif
void CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt,
FloatRegister lhs,
FloatRegister rhs, MBasicBlock* mir,
Assembler::DoubleCondition cond) {
Label* label = skipTrivialBlocks(mir)->lir()->label();
if (fmt == Assembler::DoubleFloat) {
masm.branchDouble(cond, lhs, rhs, label);
} else {
masm.branchFloat(cond, lhs, rhs, label);
}
}
FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) {
return FrameSizeClass::None();
}
FrameSizeClass FrameSizeClass::ClassLimit() { return FrameSizeClass(0); }
uint32_t FrameSizeClass::frameSize() const {
MOZ_CRASH("MIPS does not use frame size classes");
}
void OutOfLineBailout::accept(CodeGeneratorMIPSShared* codegen) {
codegen->visitOutOfLineBailout(this);
}
void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
const LAllocation* opd = test->getOperand(0);
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
}
void CodeGenerator::visitCompare(LCompare* comp) {
MCompare* mir = comp->mir();
Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
const LAllocation* left = comp->getOperand(0);
const LAllocation* right = comp->getOperand(1);
const LDefinition* def = comp->getDef(0);
#ifdef JS_CODEGEN_MIPS64
if (mir->compareType() == MCompare::Compare_Object ||
mir->compareType() == MCompare::Compare_Symbol) {
if (right->isGeneralReg()) {
masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
ToRegister(def));
} else {
masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
}
return;
}
#endif
if (right->isConstant()) {
masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
ToRegister(def));
} else if (right->isGeneralReg()) {
masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
} else {
masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
}
}
void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
MCompare* mir = comp->cmpMir();
Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
#ifdef JS_CODEGEN_MIPS64
if (mir->compareType() == MCompare::Compare_Object ||
mir->compareType() == MCompare::Compare_Symbol) {
if (comp->right()->isGeneralReg()) {
emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
comp->ifTrue(), comp->ifFalse());
} else {
masm.loadPtr(ToAddress(comp->right()), ScratchRegister);
emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
comp->ifTrue(), comp->ifFalse());
}
return;
}
#endif
if (comp->right()->isConstant()) {
emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
comp->ifTrue(), comp->ifFalse());
} else if (comp->right()->isGeneralReg()) {
emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
comp->ifTrue(), comp->ifFalse());
} else {
masm.load32(ToAddress(comp->right()), ScratchRegister);
emitBranch(ToRegister(comp->left()), ScratchRegister, cond, comp->ifTrue(),
comp->ifFalse());
}
}
bool CodeGeneratorMIPSShared::generateOutOfLineCode() {
if (!CodeGeneratorShared::generateOutOfLineCode()) {
return false;
}
if (deoptLabel_.used()) {
masm.bind(&deoptLabel_);
masm.move32(Imm32(frameSize()), ra);
TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
masm.jump(handler);
}
return !masm.oom();
}
void CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot) {
if (masm.bailed()) {
return;
}
MOZ_ASSERT_IF(!masm.oom(), label->used());
MOZ_ASSERT_IF(!masm.oom(), !label->bound());
encode(snapshot);
MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
frameClass_.frameSize() == masm.framePushed());
InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout* ool =
new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
addOutOfLineCode(ool,
new (alloc()) BytecodeSite(tree, tree->script()->code()));
masm.retarget(label, ool->entry());
}
void CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot) {
Label label;
masm.jump(&label);
bailoutFrom(&label, snapshot);
}
void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
FloatRegister first = ToFloatRegister(ins->first());
FloatRegister second = ToFloatRegister(ins->second());
MOZ_ASSERT(first == ToFloatRegister(ins->output()));
if (ins->mir()->isMax()) {
masm.maxDouble(second, first, true);
} else {
masm.minDouble(second, first, true);
}
}
void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
FloatRegister first = ToFloatRegister(ins->first());
FloatRegister second = ToFloatRegister(ins->second());
MOZ_ASSERT(first == ToFloatRegister(ins->output()));
if (ins->mir()->isMax()) {
masm.maxFloat32(second, first, true);
} else {
masm.minFloat32(second, first, true);
}
}
void CodeGenerator::visitAbsD(LAbsD* ins) {
FloatRegister input = ToFloatRegister(ins->input());
MOZ_ASSERT(input == ToFloatRegister(ins->output()));
masm.as_absd(input, input);
}
void CodeGenerator::visitAbsF(LAbsF* ins) {
FloatRegister input = ToFloatRegister(ins->input());
MOZ_ASSERT(input == ToFloatRegister(ins->output()));
masm.as_abss(input, input);
}
void CodeGenerator::visitSqrtD(LSqrtD* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
masm.as_sqrtd(output, input);
}
void CodeGenerator::visitSqrtF(LSqrtF* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
masm.as_sqrts(output, input);
}
void CodeGenerator::visitAddI(LAddI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
if (!ins->snapshot()) {
if (rhs->isConstant()) {
masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
} else {
masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
}
return;
}
Label overflow;
if (rhs->isConstant()) {
masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs),
Imm32(ToInt32(rhs)), &overflow);
} else {
masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs),
&overflow);
}
bailoutFrom(&overflow, ins->snapshot());
}
void CodeGenerator::visitAddI64(LAddI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
if (IsConstant(rhs)) {
masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
return;
}
masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
}
void CodeGenerator::visitSubI(LSubI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
if (!ins->snapshot()) {
if (rhs->isConstant()) {
masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
} else {
masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
}
return;
}
Label overflow;
if (rhs->isConstant()) {
masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs),
Imm32(ToInt32(rhs)), &overflow);
} else {
masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs),
&overflow);
}
bailoutFrom(&overflow, ins->snapshot());
}
void CodeGenerator::visitSubI64(LSubI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
if (IsConstant(rhs)) {
masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
return;
}
masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
}
void CodeGenerator::visitMulI(LMulI* ins) {
const LAllocation* lhs = ins->lhs();
const LAllocation* rhs = ins->rhs();
Register dest = ToRegister(ins->output());
MMul* mul = ins->mir();
MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
!mul->canBeNegativeZero() && !mul->canOverflow());
if (rhs->isConstant()) {
int32_t constant = ToInt32(rhs);
Register src = ToRegister(lhs);
if (mul->canBeNegativeZero() && constant <= 0) {
Assembler::Condition cond =
(constant == 0) ? Assembler::LessThan : Assembler::Equal;
bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
}
switch (constant) {
case -1:
if (mul->canOverflow()) {
bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
ins->snapshot());
}
masm.ma_negu(dest, src);
break;
case 0:
masm.move32(Imm32(0), dest);
break;
case 1:
masm.move32(src, dest);
break;
case 2:
if (mul->canOverflow()) {
Label mulTwoOverflow;
masm.ma_addTestOverflow(dest, src, src, &mulTwoOverflow);
bailoutFrom(&mulTwoOverflow, ins->snapshot());
} else {
masm.as_addu(dest, src, src);
}
break;
default:
uint32_t shift = FloorLog2(constant);
if (!mul->canOverflow() && (constant > 0)) {
uint32_t rest = constant - (1 << shift);
if ((1 << shift) == constant) {
masm.ma_sll(dest, src, Imm32(shift));
return;
}
uint32_t shift_rest = FloorLog2(rest);
if (src != dest && (1u << shift_rest) == rest) {
masm.ma_sll(dest, src, Imm32(shift - shift_rest));
masm.add32(src, dest);
if (shift_rest != 0) {
masm.ma_sll(dest, dest, Imm32(shift_rest));
}
return;
}
}
if (mul->canOverflow() && (constant > 0) && (src != dest)) {
if ((1 << shift) == constant) {
masm.ma_sll(dest, src, Imm32(shift));
masm.ma_sra(ScratchRegister, dest, Imm32(shift));
bailoutCmp32(Assembler::NotEqual, src, ScratchRegister,
ins->snapshot());
return;
}
}
if (mul->canOverflow()) {
Label mulConstOverflow;
masm.ma_mul_branch_overflow(dest, ToRegister(lhs),
Imm32(ToInt32(rhs)), &mulConstOverflow);
bailoutFrom(&mulConstOverflow, ins->snapshot());
} else {
masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
}
break;
}
} else {
Label multRegOverflow;
if (mul->canOverflow()) {
masm.ma_mul_branch_overflow(dest, ToRegister(lhs), ToRegister(rhs),
&multRegOverflow);
bailoutFrom(&multRegOverflow, ins->snapshot());
} else {
masm.as_mul(dest, ToRegister(lhs), ToRegister(rhs));
}
if (mul->canBeNegativeZero()) {
Label done;
masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
Register scratch = SecondScratchReg;
masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
masm.bind(&done);
}
}
}
void CodeGenerator::visitMulI64(LMulI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
const Register64 output = ToOutRegister64(lir);
if (IsConstant(rhs)) {
int64_t constant = ToInt64(rhs);
switch (constant) {
case -1:
masm.neg64(ToRegister64(lhs));
return;
case 0:
masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
return;
case 1:
return;
default:
if (constant > 0) {
if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1))) {
masm.move64(ToRegister64(lhs), output);
masm.lshift64(Imm32(FloorLog2(constant + 1)), output);
masm.sub64(ToRegister64(lhs), output);
return;
} else if (mozilla::IsPowerOfTwo(
static_cast<uint32_t>(constant - 1))) {
masm.move64(ToRegister64(lhs), output);
masm.lshift64(Imm32(FloorLog2(constant - 1u)), output);
masm.add64(ToRegister64(lhs), output);
return;
}
int32_t shift = mozilla::FloorLog2(constant);
if (int64_t(1) << shift == constant) {
masm.lshift64(Imm32(shift), ToRegister64(lhs));
return;
}
}
Register temp = ToTempRegisterOrInvalid(lir->temp());
masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
}
} else {
Register temp = ToTempRegisterOrInvalid(lir->temp());
masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
}
}
void CodeGenerator::visitDivI(LDivI* ins) {
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register dest = ToRegister(ins->output());
Register temp = ToRegister(ins->getTemp(0));
MDiv* mir = ins->mir();
Label done;
if (mir->canBeDivideByZero()) {
if (mir->trapOnError()) {
Label nonZero;
masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
masm.bind(&nonZero);
} else if (mir->canTruncateInfinities()) {
Label notzero;
masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(¬zero);
} else {
MOZ_ASSERT(mir->fallible());
bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
}
}
if (mir->canBeNegativeOverflow()) {
Label notMinInt;
masm.move32(Imm32(INT32_MIN), temp);
masm.ma_b(lhs, temp, ¬MinInt, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(-1), temp);
if (mir->trapOnError()) {
Label ok;
masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
masm.bind(&ok);
} else if (mir->canTruncateOverflow()) {
Label skip;
masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(INT32_MIN), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
} else {
MOZ_ASSERT(mir->fallible());
bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
}
masm.bind(¬MinInt);
}
if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
Label nonzero;
masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
masm.bind(&nonzero);
}
if (mir->canTruncateRemainder()) {
masm.as_div(lhs, rhs);
masm.as_mflo(dest);
} else {
MOZ_ASSERT(mir->fallible());
Label remainderNonZero;
masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
bailoutFrom(&remainderNonZero, ins->snapshot());
}
masm.bind(&done);
}
void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
Register lhs = ToRegister(ins->numerator());
Register dest = ToRegister(ins->output());
Register tmp = ToRegister(ins->getTemp(0));
int32_t shift = ins->shift();
if (shift != 0) {
MDiv* mir = ins->mir();
if (!mir->isTruncated()) {
masm.ma_sll(tmp, lhs, Imm32(32 - shift));
bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
}
if (!mir->canBeNegativeDividend()) {
masm.ma_sra(dest, lhs, Imm32(shift));
return;
}
if (shift > 1) {
masm.ma_sra(tmp, lhs, Imm32(31));
masm.ma_srl(tmp, tmp, Imm32(32 - shift));
masm.add32(lhs, tmp);
} else {
masm.ma_srl(tmp, lhs, Imm32(32 - shift));
masm.add32(lhs, tmp);
}
masm.ma_sra(dest, tmp, Imm32(shift));
} else {
masm.move32(lhs, dest);
}
}
void CodeGenerator::visitModI(LModI* ins) {
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register dest = ToRegister(ins->output());
Register callTemp = ToRegister(ins->callTemp());
MMod* mir = ins->mir();
Label done, prevent;
masm.move32(lhs, callTemp);
if (mir->canBeNegativeDividend()) {
masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
if (mir->isTruncated()) {
Label skip;
masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
} else {
MOZ_ASSERT(mir->fallible());
bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
}
masm.bind(&prevent);
}
if (mir->canBeDivideByZero()) {
if (mir->isTruncated()) {
if (mir->trapOnError()) {
Label nonZero;
masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
masm.bind(&nonZero);
} else {
Label skip;
masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
}
} else {
MOZ_ASSERT(mir->fallible());
bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
}
}
if (mir->canBeNegativeDividend()) {
Label notNegative;
masm.ma_b(rhs, Imm32(0), ¬Negative, Assembler::GreaterThan, ShortJump);
if (mir->isTruncated()) {
Label skip;
masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
} else {
MOZ_ASSERT(mir->fallible());
bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
}
masm.bind(¬Negative);
}
masm.as_div(lhs, rhs);
masm.as_mfhi(dest);
if (mir->canBeNegativeDividend()) {
if (mir->isTruncated()) {
} else {
MOZ_ASSERT(mir->fallible());
masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
}
}
masm.bind(&done);
}
void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
Register in = ToRegister(ins->getOperand(0));
Register out = ToRegister(ins->getDef(0));
MMod* mir = ins->mir();
Label negative, done;
masm.move32(in, out);
masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
{
masm.and32(Imm32((1 << ins->shift()) - 1), out);
masm.ma_b(&done, ShortJump);
}
{
masm.bind(&negative);
masm.neg32(out);
masm.and32(Imm32((1 << ins->shift()) - 1), out);
masm.neg32(out);
}
if (mir->canBeNegativeDividend()) {
if (!mir->isTruncated()) {
MOZ_ASSERT(mir->fallible());
bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
} else {
}
}
masm.bind(&done);
}
void CodeGenerator::visitModMaskI(LModMaskI* ins) {
Register src = ToRegister(ins->getOperand(0));
Register dest = ToRegister(ins->getDef(0));
Register tmp0 = ToRegister(ins->getTemp(0));
Register tmp1 = ToRegister(ins->getTemp(1));
MMod* mir = ins->mir();
if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
MOZ_ASSERT(mir->fallible());
Label bail;
masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
bailoutFrom(&bail, ins->snapshot());
} else {
masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
}
}
void CodeGenerator::visitBitNotI(LBitNotI* ins) {
const LAllocation* input = ins->getOperand(0);
const LDefinition* dest = ins->getDef(0);
MOZ_ASSERT(!input->isConstant());
masm.ma_not(ToRegister(dest), ToRegister(input));
}
void CodeGenerator::visitBitOpI(LBitOpI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
switch (ins->bitop()) {
case JSOP_BITOR:
if (rhs->isConstant()) {
masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
} else {
masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
}
break;
case JSOP_BITXOR:
if (rhs->isConstant()) {
masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
} else {
masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
}
break;
case JSOP_BITAND:
if (rhs->isConstant()) {
masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
} else {
masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
}
break;
default:
MOZ_CRASH("unexpected binary opcode");
}
}
void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
switch (lir->bitop()) {
case JSOP_BITOR:
if (IsConstant(rhs)) {
masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
} else {
masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
}
break;
case JSOP_BITXOR:
if (IsConstant(rhs)) {
masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
} else {
masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
}
break;
case JSOP_BITAND:
if (IsConstant(rhs)) {
masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
} else {
masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
}
break;
default:
MOZ_CRASH("unexpected binary opcode");
}
}
void CodeGenerator::visitShiftI(LShiftI* ins) {
Register lhs = ToRegister(ins->lhs());
const LAllocation* rhs = ins->rhs();
Register dest = ToRegister(ins->output());
if (rhs->isConstant()) {
int32_t shift = ToInt32(rhs) & 0x1F;
switch (ins->bitop()) {
case JSOP_LSH:
if (shift) {
masm.ma_sll(dest, lhs, Imm32(shift));
} else {
masm.move32(lhs, dest);
}
break;
case JSOP_RSH:
if (shift) {
masm.ma_sra(dest, lhs, Imm32(shift));
} else {
masm.move32(lhs, dest);
}
break;
case JSOP_URSH:
if (shift) {
masm.ma_srl(dest, lhs, Imm32(shift));
} else {
if (ins->mir()->toUrsh()->fallible()) {
bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
}
masm.move32(lhs, dest);
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
} else {
masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
switch (ins->bitop()) {
case JSOP_LSH:
masm.ma_sll(dest, lhs, dest);
break;
case JSOP_RSH:
masm.ma_sra(dest, lhs, dest);
break;
case JSOP_URSH:
masm.ma_srl(dest, lhs, dest);
if (ins->mir()->toUrsh()->fallible()) {
bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
}
}
void CodeGenerator::visitShiftI64(LShiftI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
if (rhs->isConstant()) {
int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
switch (lir->bitop()) {
case JSOP_LSH:
if (shift) {
masm.lshift64(Imm32(shift), ToRegister64(lhs));
}
break;
case JSOP_RSH:
if (shift) {
masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
}
break;
case JSOP_URSH:
if (shift) {
masm.rshift64(Imm32(shift), ToRegister64(lhs));
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
return;
}
switch (lir->bitop()) {
case JSOP_LSH:
masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
break;
case JSOP_RSH:
masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
break;
case JSOP_URSH:
masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
break;
default:
MOZ_CRASH("Unexpected shift op");
}
}
void CodeGenerator::visitRotateI64(LRotateI64* lir) {
MRotate* mir = lir->mir();
LAllocation* count = lir->count();
Register64 input = ToRegister64(lir->input());
Register64 output = ToOutRegister64(lir);
Register temp = ToTempRegisterOrInvalid(lir->temp());
#ifdef JS_CODEGEN_MIPS64
MOZ_ASSERT(input == output);
#endif
if (count->isConstant()) {
int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
if (!c) {
#ifdef JS_CODEGEN_MIPS32
masm.move64(input, output);
#endif
return;
}
if (mir->isLeftRotate()) {
masm.rotateLeft64(Imm32(c), input, output, temp);
} else {
masm.rotateRight64(Imm32(c), input, output, temp);
}
} else {
if (mir->isLeftRotate()) {
masm.rotateLeft64(ToRegister(count), input, output, temp);
} else {
masm.rotateRight64(ToRegister(count), input, output, temp);
}
}
}
void CodeGenerator::visitUrshD(LUrshD* ins) {
Register lhs = ToRegister(ins->lhs());
Register temp = ToRegister(ins->temp());
const LAllocation* rhs = ins->rhs();
FloatRegister out = ToFloatRegister(ins->output());
if (rhs->isConstant()) {
masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
} else {
masm.ma_srl(temp, lhs, ToRegister(rhs));
}
masm.convertUInt32ToDouble(temp, out);
}
void CodeGenerator::visitClzI(LClzI* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
masm.as_clz(output, input);
}
void CodeGenerator::visitCtzI(LCtzI* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
masm.ma_ctz(output, input);
}
void CodeGenerator::visitPopcntI(LPopcntI* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
Register tmp = ToRegister(ins->temp());
masm.popcnt32(input, output, tmp);
}
void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
Register64 input = ToRegister64(ins->getInt64Operand(0));
Register64 output = ToOutRegister64(ins);
Register tmp = ToRegister(ins->getTemp(0));
masm.popcnt64(input, output, tmp);
}
void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
Label done, skip;
masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
masm.ma_bc1d(input, ScratchDoubleReg, &skip,
Assembler::DoubleNotEqualOrUnordered, ShortJump);
masm.as_negd(output, ScratchDoubleReg);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
masm.loadConstantDouble(0.0, ScratchDoubleReg);
masm.as_addd(output, input, ScratchDoubleReg);
masm.as_sqrtd(output, output);
masm.bind(&done);
}
MoveOperand CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const {
if (a.isGeneralReg()) {
return MoveOperand(ToRegister(a));
}
if (a.isFloatReg()) {
return MoveOperand(ToFloatRegister(a));
}
int32_t offset = ToStackOffset(a);
MOZ_ASSERT((offset & 3) == 0);
return MoveOperand(StackPointer, offset);
}
void CodeGenerator::visitMathD(LMathD* math) {
FloatRegister src1 = ToFloatRegister(math->getOperand(0));
FloatRegister src2 = ToFloatRegister(math->getOperand(1));
FloatRegister output = ToFloatRegister(math->getDef(0));
switch (math->jsop()) {
case JSOP_ADD:
masm.as_addd(output, src1, src2);
break;
case JSOP_SUB:
masm.as_subd(output, src1, src2);
break;
case JSOP_MUL:
masm.as_muld(output, src1, src2);
break;
case JSOP_DIV:
masm.as_divd(output, src1, src2);
break;
default:
MOZ_CRASH("unexpected opcode");
}
}
void CodeGenerator::visitMathF(LMathF* math) {
FloatRegister src1 = ToFloatRegister(math->getOperand(0));
FloatRegister src2 = ToFloatRegister(math->getOperand(1));
FloatRegister output = ToFloatRegister(math->getDef(0));
switch (math->jsop()) {
case JSOP_ADD:
masm.as_adds(output, src1, src2);
break;
case JSOP_SUB:
masm.as_subs(output, src1, src2);
break;
case JSOP_MUL:
masm.as_muls(output, src1, src2);
break;
case JSOP_DIV:
masm.as_divs(output, src1, src2);
break;
default:
MOZ_CRASH("unexpected opcode");
}
}
void CodeGenerator::visitFloor(LFloor* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister scratch = ScratchDoubleReg;
Register output = ToRegister(lir->output());
Label skipCheck, done;
masm.loadConstantDouble(0.0, scratch);
masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual,
ShortJump);
masm.moveFromDoubleHi(input, SecondScratchReg);
bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0),
lir->snapshot());
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(&skipCheck);
masm.as_floorwd(scratch, input);
masm.moveFromDoubleLo(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.bind(&done);
}
void CodeGenerator::visitFloorF(LFloorF* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister scratch = ScratchFloat32Reg;
Register output = ToRegister(lir->output());
Label skipCheck, done;
masm.loadConstantFloat32(0.0f, scratch);
masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual,
ShortJump);
masm.moveFromDoubleLo(input, SecondScratchReg);
bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0),
lir->snapshot());
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(&skipCheck);
masm.as_floorws(scratch, input);
masm.moveFromDoubleLo(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.bind(&done);
}
void CodeGenerator::visitCeil(LCeil* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister scratch = ScratchDoubleReg;
Register output = ToRegister(lir->output());
Label performCeil, done;
masm.loadConstantDouble(0, scratch);
masm.branchDouble(Assembler::DoubleGreaterThan, input, scratch, &performCeil);
masm.loadConstantDouble(-1, scratch);
masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, scratch,
&performCeil);
masm.moveFromDoubleHi(input, SecondScratchReg);
bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0),
lir->snapshot());
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(&performCeil);
masm.as_ceilwd(scratch, input);
masm.moveFromDoubleLo(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.bind(&done);
}
void CodeGenerator::visitCeilF(LCeilF* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister scratch = ScratchFloat32Reg;
Register output = ToRegister(lir->output());
Label performCeil, done;
masm.loadConstantFloat32(0.0f, scratch);
masm.branchFloat(Assembler::DoubleGreaterThan, input, scratch, &performCeil);
masm.loadConstantFloat32(-1.0f, scratch);
masm.branchFloat(Assembler::DoubleLessThanOrEqual, input, scratch,
&performCeil);
masm.moveFromFloat32(input, SecondScratchReg);
bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0),
lir->snapshot());
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(&performCeil);
masm.as_ceilws(scratch, input);
masm.moveFromFloat32(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.bind(&done);
}
void CodeGenerator::visitRound(LRound* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister temp = ToFloatRegister(lir->temp());
FloatRegister scratch = ScratchDoubleReg;
Register output = ToRegister(lir->output());
Label bail, negative, end, skipCheck;
masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
masm.loadConstantDouble(0.0, scratch);
masm.ma_bc1d(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual,
ShortJump);
masm.moveFromDoubleHi(input, SecondScratchReg);
bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0),
lir->snapshot());
masm.move32(Imm32(0), output);
masm.ma_b(&end, ShortJump);
masm.bind(&skipCheck);
masm.as_addd(scratch, input, temp);
masm.as_floorwd(scratch, scratch);
masm.moveFromDoubleLo(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.jump(&end);
masm.bind(&negative);
Label loadJoin;
masm.loadConstantDouble(-0.5, scratch);
masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &loadJoin);
masm.loadConstantDouble(0.5, temp);
masm.bind(&loadJoin);
masm.addDouble(input, temp);
masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
bailoutFrom(&bail, lir->snapshot());
masm.as_floorwd(scratch, temp);
masm.moveFromDoubleLo(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
masm.bind(&end);
}
void CodeGenerator::visitRoundF(LRoundF* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister temp = ToFloatRegister(lir->temp());
FloatRegister scratch = ScratchFloat32Reg;
Register output = ToRegister(lir->output());
Label bail, negative, end, skipCheck;
masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
masm.loadConstantFloat32(0.0f, scratch);
masm.ma_bc1s(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual,
ShortJump);
masm.moveFromFloat32(input, SecondScratchReg);
bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0),
lir->snapshot());
masm.move32(Imm32(0), output);
masm.ma_b(&end, ShortJump);
masm.bind(&skipCheck);
masm.as_adds(scratch, input, temp);
masm.as_floorws(scratch, scratch);
masm.moveFromFloat32(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
masm.jump(&end);
masm.bind(&negative);
Label loadJoin;
masm.loadConstantFloat32(-0.5f, scratch);
masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &loadJoin);
masm.loadConstantFloat32(0.5f, temp);
masm.bind(&loadJoin);
masm.as_adds(temp, input, temp);
masm.branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
bailoutFrom(&bail, lir->snapshot());
masm.as_floorws(scratch, temp);
masm.moveFromFloat32(scratch, output);
bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
masm.bind(&end);
}
void CodeGenerator::visitTrunc(LTrunc* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
Label notZero;
masm.as_truncwd(ScratchFloat32Reg, input);
masm.as_cfc1(ScratchRegister, Assembler::FCSR);
masm.moveFromFloat32(ScratchFloat32Reg, output);
masm.ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
masm.ma_b(output, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
masm.moveFromDoubleHi(input, ScratchRegister);
masm.as_slt(ScratchRegister, ScratchRegister, zero);
masm.bind(¬Zero);
bailoutCmp32(Assembler::NotEqual, ScratchRegister, Imm32(0), lir->snapshot());
}
void CodeGenerator::visitTruncF(LTruncF* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
Label notZero;
masm.as_truncws(ScratchFloat32Reg, input);
masm.as_cfc1(ScratchRegister, Assembler::FCSR);
masm.moveFromFloat32(ScratchFloat32Reg, output);
masm.ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
masm.ma_b(output, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
masm.moveFromFloat32(input, ScratchRegister);
masm.as_slt(ScratchRegister, ScratchRegister, zero);
masm.bind(¬Zero);
bailoutCmp32(Assembler::NotEqual, ScratchRegister, Imm32(0), lir->snapshot());
}
void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
auto input = ToFloatRegister(lir->input());
auto output = ToRegister(lir->output());
MWasmTruncateToInt32* mir = lir->mir();
MIRType fromType = mir->input()->type();
MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
addOutOfLineCode(ool, mir);
Label* oolEntry = ool->entry();
if (mir->isUnsigned()) {
if (fromType == MIRType::Double) {
masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
oolEntry);
} else if (fromType == MIRType::Float32) {
masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
oolEntry);
} else {
MOZ_CRASH("unexpected type");
}
masm.bind(ool->rejoin());
return;
}
if (fromType == MIRType::Double) {
masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
oolEntry);
} else if (fromType == MIRType::Float32) {
masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
oolEntry);
} else {
MOZ_CRASH("unexpected type");
}
masm.bind(ool->rejoin());
}
void CodeGeneratorMIPSShared::visitOutOfLineBailout(OutOfLineBailout* ool) {
masm.subPtr(Imm32(sizeof(Value)), StackPointer);
masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
Address(StackPointer, 0));
masm.jump(&deoptLabel_);
}
void CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(
OutOfLineWasmTruncateCheck* ool) {
if (ool->toType() == MIRType::Int32) {
masm.outOfLineWasmTruncateToInt32Check(
ool->input(), ool->output(), ool->fromType(), ool->flags(),
ool->rejoin(), ool->bytecodeOffset());
} else {
MOZ_ASSERT(ool->toType() == MIRType::Int64);
masm.outOfLineWasmTruncateToInt64Check(
ool->input(), ool->output64(), ool->fromType(), ool->flags(),
ool->rejoin(), ool->bytecodeOffset());
}
}
void CodeGenerator::visitCopySignF(LCopySignF* ins) {
FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
FloatRegister output = ToFloatRegister(ins->getDef(0));
Register lhsi = ToRegister(ins->getTemp(0));
Register rhsi = ToRegister(ins->getTemp(1));
masm.moveFromFloat32(lhs, lhsi);
masm.moveFromFloat32(rhs, rhsi);
masm.ma_ins(rhsi, lhsi, 0, 31);
masm.moveToFloat32(rhsi, output);
}
void CodeGenerator::visitCopySignD(LCopySignD* ins) {
FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
FloatRegister output = ToFloatRegister(ins->getDef(0));
Register lhsi = ToRegister(ins->getTemp(0));
Register rhsi = ToRegister(ins->getTemp(1));
masm.moveFromDoubleHi(lhs, lhsi);
masm.moveFromDoubleHi(rhs, rhsi);
masm.ma_ins(rhsi, lhsi, 0, 31);
masm.moveToDoubleHi(rhsi, output);
}
void CodeGenerator::visitValue(LValue* value) {
const ValueOperand out = ToOutValue(value);
masm.moveValue(value->value(), out);
}
void CodeGenerator::visitDouble(LDouble* ins) {
const LDefinition* out = ins->getDef(0);
masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
}
void CodeGenerator::visitFloat32(LFloat32* ins) {
const LDefinition* out = ins->getDef(0);
masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
}
void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
FloatRegister input = ToFloatRegister(test->input());
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
masm.loadConstantDouble(0.0, ScratchDoubleReg);
if (isNextBlock(ifFalse->lir())) {
branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
Assembler::DoubleNotEqual);
} else {
branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
Assembler::DoubleEqualOrUnordered);
jumpToBlock(ifTrue);
}
}
void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
FloatRegister input = ToFloatRegister(test->input());
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
if (isNextBlock(ifFalse->lir())) {
branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
Assembler::DoubleNotEqual);
} else {
branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
Assembler::DoubleEqualOrUnordered);
jumpToBlock(ifTrue);
}
}
void CodeGenerator::visitCompareD(LCompareD* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Register dest = ToRegister(comp->output());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.ma_cmp_set_double(dest, lhs, rhs, cond);
}
void CodeGenerator::visitCompareF(LCompareF* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Register dest = ToRegister(comp->output());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
}
void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Assembler::DoubleCondition cond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
MBasicBlock* ifTrue = comp->ifTrue();
MBasicBlock* ifFalse = comp->ifFalse();
if (isNextBlock(ifFalse->lir())) {
branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
} else {
branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
Assembler::InvertCondition(cond));
jumpToBlock(ifTrue);
}
}
void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Assembler::DoubleCondition cond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
MBasicBlock* ifTrue = comp->ifTrue();
MBasicBlock* ifFalse = comp->ifFalse();
if (isNextBlock(ifFalse->lir())) {
branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
} else {
branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
Assembler::InvertCondition(cond));
jumpToBlock(ifTrue);
}
}
void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
if (lir->right()->isConstant()) {
masm.ma_and(ScratchRegister, ToRegister(lir->left()),
Imm32(ToInt32(lir->right())));
} else {
masm.as_and(ScratchRegister, ToRegister(lir->left()),
ToRegister(lir->right()));
}
emitBranch(ScratchRegister, ScratchRegister, lir->cond(), lir->ifTrue(),
lir->ifFalse());
}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
masm.convertUInt32ToFloat32(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitNotI(LNotI* ins) {
masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
ToRegister(ins->output()));
}
void CodeGenerator::visitNotD(LNotD* ins) {
FloatRegister in = ToFloatRegister(ins->input());
Register dest = ToRegister(ins->output());
masm.loadConstantDouble(0.0, ScratchDoubleReg);
masm.ma_cmp_set_double(dest, in, ScratchDoubleReg,
Assembler::DoubleEqualOrUnordered);
}
void CodeGenerator::visitNotF(LNotF* ins) {
FloatRegister in = ToFloatRegister(ins->input());
Register dest = ToRegister(ins->output());
masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg,
Assembler::DoubleEqualOrUnordered);
}
void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
masm.memoryBarrier(ins->type());
}
void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
masm.nop();
}
masm.bind(&invalidate_);
masm.Push(ra);
invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
masm.jump(thunk);
masm.assumeUnreachable(
"Should have returned directly to its caller instead of here.");
}
class js::jit::OutOfLineTableSwitch
: public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
MTableSwitch* mir_;
CodeLabel jumpLabel_;
void accept(CodeGeneratorMIPSShared* codegen) {
codegen->visitOutOfLineTableSwitch(this);
}
public:
OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
MTableSwitch* mir() const { return mir_; }
CodeLabel* jumpLabel() { return &jumpLabel_; }
};
void CodeGeneratorMIPSShared::visitOutOfLineTableSwitch(
OutOfLineTableSwitch* ool) {
MTableSwitch* mir = ool->mir();
masm.haltingAlign(sizeof(void*));
masm.bind(ool->jumpLabel());
masm.addCodeLabel(*ool->jumpLabel());
for (size_t i = 0; i < mir->numCases(); i++) {
LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
Label* caseheader = caseblock->label();
uint32_t caseoffset = caseheader->offset();
CodeLabel cl;
masm.writeCodePointer(&cl);
cl.target()->bind(caseoffset);
masm.addCodeLabel(cl);
}
}
void CodeGeneratorMIPSShared::emitTableSwitchDispatch(MTableSwitch* mir,
Register index,
Register base) {
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
if (mir->low() != 0) {
masm.subPtr(Imm32(mir->low()), index);
}
int32_t cases = mir->numCases();
masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
addOutOfLineCode(ool, mir);
masm.ma_li(base, ool->jumpLabel());
BaseIndex pointer(base, index, ScalePointer);
masm.branchToComputedAddress(pointer);
}
template <typename T>
void CodeGeneratorMIPSShared::emitWasmLoad(T* lir) {
const MWasmLoad* mir = lir->mir();
Register ptrScratch = InvalidReg;
if (!lir->ptrCopy()->isBogusTemp()) {
ptrScratch = ToRegister(lir->ptrCopy());
}
if (IsUnaligned(mir->access())) {
if (IsFloatingPointType(mir->type())) {
masm.wasmUnalignedLoadFP(mir->access(), HeapReg, ToRegister(lir->ptr()),
ptrScratch, ToFloatRegister(lir->output()),
ToRegister(lir->getTemp(1)), InvalidReg,
InvalidReg);
} else {
masm.wasmUnalignedLoad(mir->access(), HeapReg, ToRegister(lir->ptr()),
ptrScratch, ToRegister(lir->output()),
ToRegister(lir->getTemp(1)));
}
} else {
masm.wasmLoad(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
ToAnyRegister(lir->output()));
}
}
void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
void CodeGenerator::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir) {
emitWasmLoad(lir);
}
template <typename T>
void CodeGeneratorMIPSShared::emitWasmStore(T* lir) {
const MWasmStore* mir = lir->mir();
Register ptrScratch = InvalidReg;
if (!lir->ptrCopy()->isBogusTemp()) {
ptrScratch = ToRegister(lir->ptrCopy());
}
if (IsUnaligned(mir->access())) {
if (mir->access().type() == Scalar::Float32 ||
mir->access().type() == Scalar::Float64) {
masm.wasmUnalignedStoreFP(mir->access(), ToFloatRegister(lir->value()),
HeapReg, ToRegister(lir->ptr()), ptrScratch,
ToRegister(lir->getTemp(1)));
} else {
masm.wasmUnalignedStore(mir->access(), ToRegister(lir->value()), HeapReg,
ToRegister(lir->ptr()), ptrScratch,
ToRegister(lir->getTemp(1)));
}
} else {
masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), HeapReg,
ToRegister(lir->ptr()), ptrScratch);
}
}
void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
void CodeGenerator::visitWasmUnalignedStore(LWasmUnalignedStore* lir) {
emitWasmStore(lir);
}
void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
const MAsmJSLoadHeap* mir = ins->mir();
const LAllocation* ptr = ins->ptr();
const LDefinition* out = ins->output();
const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
bool isSigned;
int size;
bool isFloat = false;
switch (mir->access().type()) {
case Scalar::Int8:
isSigned = true;
size = 8;
break;
case Scalar::Uint8:
isSigned = false;
size = 8;
break;
case Scalar::Int16:
isSigned = true;
size = 16;
break;
case Scalar::Uint16:
isSigned = false;
size = 16;
break;
case Scalar::Int32:
isSigned = true;
size = 32;
break;
case Scalar::Uint32:
isSigned = false;
size = 32;
break;
case Scalar::Float64:
isFloat = true;
size = 64;
break;
case Scalar::Float32:
isFloat = true;
size = 32;
break;
default:
MOZ_CRASH("unexpected array type");
}
if (ptr->isConstant()) {
MOZ_ASSERT(!mir->needsBoundsCheck());
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
if (isFloat) {
if (size == 32) {
masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
} else {
masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
}
} else {
masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
static_cast<LoadStoreSize>(size),
isSigned ? SignExtend : ZeroExtend);
}
return;
}
Register ptrReg = ToRegister(ptr);
if (!mir->needsBoundsCheck()) {
if (isFloat) {
if (size == 32) {
masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
ToFloatRegister(out));
} else {
masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
ToFloatRegister(out));
}
} else {
masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size),
isSigned ? SignExtend : ZeroExtend);
}
return;
}
Label done, outOfRange;
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptrReg,
ToRegister(boundsCheckLimit), &outOfRange);
if (isFloat) {
if (size == 32) {
masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
ToFloatRegister(out));
} else {
masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
ToFloatRegister(out));
}
} else {
masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size),
isSigned ? SignExtend : ZeroExtend);
}
masm.ma_b(&done, ShortJump);
masm.bind(&outOfRange);
if (isFloat) {
if (size == 32) {
masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
} else {
masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
}
} else {
masm.move32(Imm32(0), ToRegister(out));
}
masm.bind(&done);
}
void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
const MAsmJSStoreHeap* mir = ins->mir();
const LAllocation* value = ins->value();
const LAllocation* ptr = ins->ptr();
const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
bool isSigned;
int size;
bool isFloat = false;
switch (mir->access().type()) {
case Scalar::Int8:
isSigned = true;
size = 8;
break;
case Scalar::Uint8:
isSigned = false;
size = 8;
break;
case Scalar::Int16:
isSigned = true;
size = 16;
break;
case Scalar::Uint16:
isSigned = false;
size = 16;
break;
case Scalar::Int32:
isSigned = true;
size = 32;
break;
case Scalar::Uint32:
isSigned = false;
size = 32;
break;
case Scalar::Float64:
isFloat = true;
size = 64;
break;
case Scalar::Float32:
isFloat = true;
size = 32;
break;
default:
MOZ_CRASH("unexpected array type");
}
if (ptr->isConstant()) {
MOZ_ASSERT(!mir->needsBoundsCheck());
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
if (isFloat) {
FloatRegister freg = ToFloatRegister(value);
Address addr(HeapReg, ptrImm);
if (size == 32) {
masm.storeFloat32(freg, addr);
} else {
masm.storeDouble(freg, addr);
}
} else {
masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
static_cast<LoadStoreSize>(size),
isSigned ? SignExtend : ZeroExtend);
}
return;
}
Register ptrReg = ToRegister(ptr);
Address dstAddr(ptrReg, 0);
if (!mir->needsBoundsCheck()) {
if (isFloat) {
FloatRegister freg = ToFloatRegister(value);
BaseIndex bi(HeapReg, ptrReg, TimesOne);
if (size == 32) {
masm.storeFloat32(freg, bi);
} else {
masm.storeDouble(freg, bi);
}
} else {
masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size),
isSigned ? SignExtend : ZeroExtend);
}
return;
}
Label outOfRange;
masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptrReg,
ToRegister(boundsCheckLimit), &outOfRange);
if (isFloat) {
if (size == 32) {
masm.storeFloat32(ToFloatRegister(value),
BaseIndex(HeapReg, ptrReg, TimesOne));
} else
masm.storeDouble(ToFloatRegister(value),
BaseIndex(HeapReg, ptrReg, TimesOne));
} else {
masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size),
isSigned ? SignExtend : ZeroExtend);
}
masm.bind(&outOfRange);
}
void CodeGenerator::visitWasmCompareExchangeHeap(
LWasmCompareExchangeHeap* ins) {
MWasmCompareExchangeHeap* mir = ins->mir();
Register ptrReg = ToRegister(ins->ptr());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
Register oldval = ToRegister(ins->oldValue());
Register newval = ToRegister(ins->newValue());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
offsetTemp, maskTemp, ToRegister(ins->output()));
}
void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
MWasmAtomicExchangeHeap* mir = ins->mir();
Register ptrReg = ToRegister(ins->ptr());
Register value = ToRegister(ins->value());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
maskTemp, ToRegister(ins->output()));
}
void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
MOZ_ASSERT(ins->mir()->hasUses());
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
MWasmAtomicBinopHeap* mir = ins->mir();
Register ptrReg = ToRegister(ins->ptr());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
ToRegister(ins->value()), srcAddr, valueTemp,
offsetTemp, maskTemp, ToRegister(ins->output()));
}
void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
LWasmAtomicBinopHeapForEffect* ins) {
MOZ_ASSERT(!ins->mir()->hasUses());
MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
MWasmAtomicBinopHeap* mir = ins->mir();
Register ptrReg = ToRegister(ins->ptr());
Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
ToRegister(ins->value()), srcAddr, valueTemp,
offsetTemp, maskTemp);
}
void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
const MWasmStackArg* mir = ins->mir();
if (ins->arg()->isConstant()) {
masm.storePtr(ImmWord(ToInt32(ins->arg())),
Address(StackPointer, mir->spOffset()));
} else {
if (ins->arg()->isGeneralReg()) {
masm.storePtr(ToRegister(ins->arg()),
Address(StackPointer, mir->spOffset()));
} else if (mir->input()->type() == MIRType::Double) {
masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
Address(StackPointer, mir->spOffset()));
} else {
masm.storeFloat32(ToFloatRegister(ins->arg()),
Address(StackPointer, mir->spOffset()));
}
}
}
void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
const MWasmStackArg* mir = ins->mir();
Address dst(StackPointer, mir->spOffset());
if (IsConstant(ins->arg())) {
masm.store64(Imm64(ToInt64(ins->arg())), dst);
} else {
masm.store64(ToRegister64(ins->arg()), dst);
}
}
void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
MIRType mirType = ins->mir()->type();
Register cond = ToRegister(ins->condExpr());
const LAllocation* falseExpr = ins->falseExpr();
if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
Register out = ToRegister(ins->output());
MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
"true expr input is reused for output");
masm.as_movz(out, ToRegister(falseExpr), cond);
return;
}
FloatRegister out = ToFloatRegister(ins->output());
MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
"true expr input is reused for output");
if (falseExpr->isFloatReg()) {
if (mirType == MIRType::Float32) {
masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
cond);
} else if (mirType == MIRType::Double) {
masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
cond);
} else {
MOZ_CRASH("unhandled type in visitWasmSelect!");
}
} else {
Label done;
masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
if (mirType == MIRType::Float32) {
masm.loadFloat32(ToAddress(falseExpr), out);
} else if (mirType == MIRType::Double) {
masm.loadDouble(ToAddress(falseExpr), out);
} else {
MOZ_CRASH("unhandled type in visitWasmSelect!");
}
masm.bind(&done);
}
}
void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
MOZ_ASSERT(gen->compilingWasm());
MWasmReinterpret* ins = lir->mir();
MIRType to = ins->type();
DebugOnly<MIRType> from = ins->input()->type();
switch (to) {
case MIRType::Int32:
MOZ_ASSERT(from == MIRType::Float32);
masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
break;
case MIRType::Float32:
MOZ_ASSERT(from == MIRType::Int32);
masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
break;
case MIRType::Double:
case MIRType::Int64:
MOZ_CRASH("not handled by this LIR opcode");
default:
MOZ_CRASH("unexpected WasmReinterpret");
}
}
void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
Label done;
if (ins->canBeDivideByZero()) {
if (ins->mir()->isTruncated()) {
if (ins->trapOnError()) {
Label nonZero;
masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
masm.bind(&nonZero);
} else {
Label notzero;
masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump);
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(¬zero);
}
} else {
bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
}
}
masm.as_divu(lhs, rhs);
masm.as_mfhi(output);
if (ins->mir()->isDiv()) {
if (!ins->mir()->toDiv()->canTruncateRemainder()) {
bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
}
masm.as_mflo(output);
}
if (!ins->mir()->isTruncated()) {
bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
}
masm.bind(&done);
}
void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
const MEffectiveAddress* mir = ins->mir();
Register base = ToRegister(ins->base());
Register index = ToRegister(ins->index());
Register output = ToRegister(ins->output());
BaseIndex address(base, index, mir->scale(), mir->displacement());
masm.computeEffectiveAddress(address, output);
}
void CodeGenerator::visitNegI(LNegI* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
masm.ma_negu(output, input);
}
void CodeGenerator::visitNegD(LNegD* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
masm.as_negd(output, input);
}
void CodeGenerator::visitNegF(LNegF* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
masm.as_negs(output, input);
}
void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
MWasmAddOffset* mir = lir->mir();
Register base = ToRegister(lir->base());
Register out = ToRegister(lir->output());
Label ok;
masm.ma_addTestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
&ok);
masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
masm.bind(&ok);
}
void CodeGenerator::visitAtomicTypedArrayElementBinop(
LAtomicTypedArrayElementBinop* lir) {
MOZ_ASSERT(lir->mir()->hasUses());
AnyRegister output = ToAnyRegister(lir->output());
Register elements = ToRegister(lir->elements());
Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
lir->mir()->operation(), value, mem, valueTemp,
offsetTemp, maskTemp, outTemp, output);
} else {
BaseIndex mem(elements, ToRegister(lir->index()),
ScaleFromElemWidth(width));
masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
lir->mir()->operation(), value, mem, valueTemp,
offsetTemp, maskTemp, outTemp, output);
}
}
void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
LAtomicTypedArrayElementBinopForEffect* lir) {
MOZ_ASSERT(!lir->mir()->hasUses());
Register elements = ToRegister(lir->elements());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Register value = ToRegister(lir->value());
Scalar::Type arrayType = lir->mir()->arrayType();
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
lir->mir()->operation(), value, mem, valueTemp,
offsetTemp, maskTemp);
} else {
BaseIndex mem(elements, ToRegister(lir->index()),
ScaleFromElemWidth(width));
masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
lir->mir()->operation(), value, mem, valueTemp,
offsetTemp, maskTemp);
}
}
void CodeGenerator::visitCompareExchangeTypedArrayElement(
LCompareExchangeTypedArrayElement* lir) {
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register outTemp = ToTempRegisterOrInvalid(lir->temp());
Register oldval = ToRegister(lir->oldval());
Register newval = ToRegister(lir->newval());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Scalar::Type arrayType = lir->mir()->arrayType();
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
newval, valueTemp, offsetTemp, maskTemp, outTemp,
output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()),
ScaleFromElemWidth(width));
masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
newval, valueTemp, offsetTemp, maskTemp, outTemp,
output);
}
}
void CodeGenerator::visitAtomicExchangeTypedArrayElement(
LAtomicExchangeTypedArrayElement* lir) {
Register elements = ToRegister(lir->elements());
AnyRegister output = ToAnyRegister(lir->output());
Register outTemp = ToTempRegisterOrInvalid(lir->temp());
Register value = ToRegister(lir->value());
Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
Scalar::Type arrayType = lir->mir()->arrayType();
size_t width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width);
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
valueTemp, offsetTemp, maskTemp, outTemp, output);
} else {
BaseIndex dest(elements, ToRegister(lir->index()),
ScaleFromElemWidth(width));
masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
valueTemp, offsetTemp, maskTemp, outTemp, output);
}
}
void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
Register ptr = ToRegister(lir->ptr());
Register64 oldValue = ToRegister64(lir->oldValue());
Register64 newValue = ToRegister64(lir->newValue());
Register64 output = ToOutRegister64(lir);
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
output);
}
void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
Register ptr = ToRegister(lir->ptr());
Register64 value = ToRegister64(lir->value());
Register64 output = ToOutRegister64(lir);
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
}
void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
Register ptr = ToRegister(lir->ptr());
Register64 value = ToRegister64(lir->value());
Register64 output = ToOutRegister64(lir);
#ifdef JS_CODEGEN_MIPS32
Register64 temp(ToRegister(lir->getTemp(0)), ToRegister(lir->getTemp(1)));
#else
Register64 temp(ToRegister(lir->getTemp(0)));
#endif
uint32_t offset = lir->mir()->access().offset();
BaseIndex addr(HeapReg, ptr, TimesOne, offset);
masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
addr, temp, output);
}
void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }