void LIRGeneratorX86Shared::lowerUMod(MMod* mod) { if (mod->rhs()->isConstant()) { uint32_t rhs = mod->rhs()->toConstant()->toInt32(); int32_t shift = FloorLog2(rhs); if (rhs != 0 && uint32_t(1) << shift == rhs) { LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegisterAtStart(mod->lhs()), shift); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); defineReuseInput(lir, mod, 0); } else { LUDivOrModConstant* lir = new(alloc()) LUDivOrModConstant(useRegister(mod->lhs()), rhs, tempFixed(edx)); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); defineFixed(lir, mod, LAllocation(AnyRegister(eax))); } return; } LUDivOrMod* lir = new(alloc()) LUDivOrMod(useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(eax)); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); defineFixed(lir, mod, LAllocation(AnyRegister(edx))); }
bool LIRGeneratorARM::lowerModI(MMod *mod) { if (mod->isUnsigned()) return lowerUMod(mod); if (mod->rhs()->isConstant()) { int32_t rhs = mod->rhs()->toConstant()->value().toInt32(); int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LModPowTwoI *lir = new LModPowTwoI(useRegister(mod->lhs()), shift); if (mod->fallible() && !assignSnapshot(lir)) return false; return define(lir, mod); } else if (shift < 31 && (1 << (shift+1)) - 1 == rhs) { LModMaskI *lir = new LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL), shift+1); if (mod->fallible() && !assignSnapshot(lir)) return false; return define(lir, mod); } } if (hasIDIV()) { LModI *lir = new LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp()); if (mod->fallible() && !assignSnapshot(lir)) return false; return define(lir, mod); } else { LSoftModI *lir = new LSoftModI(useFixed(mod->lhs(), r0), use(mod->rhs(), r1), tempFixed(r2), tempFixed(r3), temp(LDefinition::GENERAL)); if (mod->fallible() && !assignSnapshot(lir)) return false; return defineFixed(lir, mod, LAllocation(AnyRegister(r1))); } }
void LIRGeneratorMIPS::lowerModI(MMod *mod) { if (mod->isUnsigned()) { lowerUMod(mod); return; } if (mod->rhs()->isConstant()) { int32_t rhs = mod->rhs()->toConstant()->value().toInt32(); int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LModPowTwoI *lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, mod); return; } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) { LModMaskI *lir = new(alloc()) LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL), temp(LDefinition::GENERAL), shift + 1); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, mod); return; } } LModI *lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp(LDefinition::GENERAL)); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, mod); }
bool LIRGeneratorX86Shared::lowerModI(MMod *mod) { if (mod->isUnsigned()) return lowerUMod(mod); if (mod->rhs()->isConstant()) { int32_t rhs = mod->rhs()->toConstant()->value().toInt32(); int32_t shift = FloorLog2(Abs(rhs)); if (rhs != 0 && uint32_t(1) << shift == Abs(rhs)) { LModPowTwoI *lir = new(alloc()) LModPowTwoI(useRegisterAtStart(mod->lhs()), shift); if (mod->fallible() && !assignSnapshot(lir, Bailout_DoubleOutput)) return false; return defineReuseInput(lir, mod, 0); } else if (rhs != 0 && gen->optimizationInfo().registerAllocator() != RegisterAllocator_LSRA) { LDivOrModConstantI *lir; lir = new(alloc()) LDivOrModConstantI(useRegister(mod->lhs()), rhs, tempFixed(edx)); if (mod->fallible() && !assignSnapshot(lir, Bailout_DoubleOutput)) return false; return defineFixed(lir, mod, LAllocation(AnyRegister(eax))); } } LModI *lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(eax)); if (mod->fallible() && !assignSnapshot(lir, Bailout_DoubleOutput)) return false; return defineFixed(lir, mod, LAllocation(AnyRegister(edx))); }
void LIRGeneratorMIPS::lowerDivI(MDiv *div) { if (div->isUnsigned()) { lowerUDiv(div); return; } // Division instructions are slow. Division by constant denominators can be // rewritten to use other instructions. if (div->rhs()->isConstant()) { int32_t rhs = div->rhs()->toConstant()->value().toInt32(); // Check for division by a positive power of two, which is an easy and // important case to optimize. Note that other optimizations are also // possible; division by negative powers of two can be optimized in a // similar manner as positive powers of two, and division by other // constants can be optimized by a reciprocal multiplication technique. int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LDivPowTwoI *lir = new(alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp()); if (div->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, div); return; } } LDivI *lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp()); if (div->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, div); }
bool LIRGeneratorX86Shared::lowerDivI(MDiv *div) { if (div->isUnsigned()) return lowerUDiv(div); // Division instructions are slow. Division by constant denominators can be // rewritten to use other instructions. if (div->rhs()->isConstant()) { int32_t rhs = div->rhs()->toConstant()->value().toInt32(); // Check for division by a positive power of two, which is an easy and // important case to optimize. Note that other optimizations are also // possible; division by negative powers of two can be optimized in a // similar manner as positive powers of two, and division by other // constants can be optimized by a reciprocal multiplication technique. int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LDivPowTwoI *lir = new LDivPowTwoI(useRegisterAtStart(div->lhs()), useRegister(div->lhs()), shift); if (div->fallible() && !assignSnapshot(lir)) return false; return defineReuseInput(lir, div, 0); } } LDivI *lir = new LDivI(useFixed(div->lhs(), eax), useRegister(div->rhs()), tempFixed(edx)); if (div->fallible() && !assignSnapshot(lir)) return false; return defineFixed(lir, div, LAllocation(AnyRegister(eax))); }
bool LIRGeneratorX86Shared::lowerDivI(MDiv *div) { if (div->isUnsigned()) return lowerUDiv(div); // Division instructions are slow. Division by constant denominators can be // rewritten to use other instructions. if (div->rhs()->isConstant()) { int32_t rhs = div->rhs()->toConstant()->value().toInt32(); // Check for division by a positive power of two, which is an easy and // important case to optimize. Note that other optimizations are also // possible; division by negative powers of two can be optimized in a // similar manner as positive powers of two, and division by other // constants can be optimized by a reciprocal multiplication technique. int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LAllocation lhs = useRegisterAtStart(div->lhs()); LDivPowTwoI *lir; if (!div->canBeNegativeDividend()) { // Numerator is unsigned, so does not need adjusting. lir = new(alloc()) LDivPowTwoI(lhs, lhs, shift); } else { // Numerator is signed, and needs adjusting, and an extra // lhs copy register is needed. lir = new(alloc()) LDivPowTwoI(lhs, useRegister(div->lhs()), shift); } if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return defineReuseInput(lir, div, 0); } } // Optimize x/x. This is quaint, but it also protects the LDivI code below. // Since LDivI requires lhs to be in %eax, and since the register allocator // can't put a virtual register in two physical registers at the same time, // this puts rhs in %eax too, and since rhs isn't marked usedAtStart, it // would conflict with the %eax output register. (rhs could be marked // usedAtStart but for the fact that LDivI clobbers %edx early and rhs could // happen to be in %edx). if (div->lhs() == div->rhs()) { if (!div->canBeDivideByZero()) return define(new(alloc()) LInteger(1), div); LDivSelfI *lir = new(alloc()) LDivSelfI(useRegisterAtStart(div->lhs())); if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return define(lir, div); } LDivI *lir = new(alloc()) LDivI(useFixed(div->lhs(), eax), useRegister(div->rhs()), tempFixed(edx)); if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return defineFixed(lir, div, LAllocation(AnyRegister(eax))); }
void LIRGeneratorX86Shared::lowerDivI(MDiv *div) { if (div->isUnsigned()) { lowerUDiv(div); return; } // Division instructions are slow. Division by constant denominators can be // rewritten to use other instructions. if (div->rhs()->isConstant()) { int32_t rhs = div->rhs()->toConstant()->value().toInt32(); // Division by powers of two can be done by shifting, and division by // other numbers can be done by a reciprocal multiplication technique. int32_t shift = FloorLog2(Abs(rhs)); if (rhs != 0 && uint32_t(1) << shift == Abs(rhs)) { LAllocation lhs = useRegisterAtStart(div->lhs()); LDivPowTwoI *lir; if (!div->canBeNegativeDividend()) { // Numerator is unsigned, so does not need adjusting. lir = new(alloc()) LDivPowTwoI(lhs, lhs, shift, rhs < 0); } else { // Numerator is signed, and needs adjusting, and an extra // lhs copy register is needed. lir = new(alloc()) LDivPowTwoI(lhs, useRegister(div->lhs()), shift, rhs < 0); } if (div->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); defineReuseInput(lir, div, 0); return; } if (rhs != 0 && gen->optimizationInfo().registerAllocator() != RegisterAllocator_LSRA) { LDivOrModConstantI *lir; lir = new(alloc()) LDivOrModConstantI(useRegister(div->lhs()), rhs, tempFixed(eax)); if (div->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); defineFixed(lir, div, LAllocation(AnyRegister(edx))); return; } } LDivI *lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(edx)); if (div->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); defineFixed(lir, div, LAllocation(AnyRegister(eax))); }
void LIRGeneratorARM::lowerModI(MMod* mod) { if (mod->isUnsigned()) { lowerUMod(mod); return; } if (mod->rhs()->isConstant()) { int32_t rhs = mod->rhs()->toConstant()->toInt32(); int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, mod); return; } if (shift < 31 && (1 << (shift+1)) - 1 == rhs) { MOZ_ASSERT(rhs); LModMaskI* lir = new(alloc()) LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift+1); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, mod); return; } } if (HasIDIV()) { LModI* lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp()); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); define(lir, mod); return; } LSoftModI* lir = new(alloc()) LSoftModI(useFixedAtStart(mod->lhs(), r0), useFixedAtStart(mod->rhs(), r1), tempFixed(r0), tempFixed(r2), tempFixed(r3), temp(LDefinition::GENERAL)); if (mod->fallible()) assignSnapshot(lir, Bailout_DoubleOutput); defineFixed(lir, mod, LAllocation(AnyRegister(r1))); }
bool LIRGeneratorX86Shared::lowerModI(MMod *mod) { if (mod->isUnsigned()) return lowerUMod(mod); if (mod->rhs()->isConstant()) { int32_t rhs = mod->rhs()->toConstant()->value().toInt32(); int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LModPowTwoI *lir = new LModPowTwoI(useRegisterAtStart(mod->lhs()), shift); if (mod->fallible() && !assignSnapshot(lir)) return false; return defineReuseInput(lir, mod, 0); } } LModI *lir = new LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(eax)); if (mod->fallible() && !assignSnapshot(lir)) return false; return defineFixed(lir, mod, LAllocation(AnyRegister(edx))); }
bool LIRGeneratorX86Shared::lowerDivI(MDiv *div) { if (div->isUnsigned()) return lowerUDiv(div); // Division instructions are slow. Division by constant denominators can be // rewritten to use other instructions. if (div->rhs()->isConstant()) { int32_t rhs = div->rhs()->toConstant()->value().toInt32(); // Check for division by a positive power of two, which is an easy and // important case to optimize. Note that other optimizations are also // possible; division by negative powers of two can be optimized in a // similar manner as positive powers of two, and division by other // constants can be optimized by a reciprocal multiplication technique. int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LAllocation lhs = useRegisterAtStart(div->lhs()); LDivPowTwoI *lir; if (!div->canBeNegativeDividend()) { // Numerator is unsigned, so does not need adjusting. lir = new(alloc()) LDivPowTwoI(lhs, lhs, shift); } else { // Numerator is signed, and needs adjusting, and an extra // lhs copy register is needed. lir = new(alloc()) LDivPowTwoI(lhs, useRegister(div->lhs()), shift); } if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return defineReuseInput(lir, div, 0); } } LDivI *lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), tempFixed(edx)); if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return defineFixed(lir, div, LAllocation(AnyRegister(eax))); }
bool LIRGeneratorX86Shared::lowerModI(MMod *mod) { if (mod->isUnsigned()) return lowerUMod(mod); if (mod->rhs()->isConstant()) { int32_t rhs = mod->rhs()->toConstant()->value().toInt32(); int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LModPowTwoI *lir = new LModPowTwoI(useRegisterAtStart(mod->lhs()), shift); if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return defineReuseInput(lir, mod, 0); } } // Optimize x%x. The comments in lowerDivI apply here as well, except // that we return 0 for all cases except when x is 0 and we're not // truncated. if (mod->rhs() == mod->lhs()) { if (mod->isTruncated()) return define(new LInteger(0), mod); LModSelfI *lir = new LModSelfI(useRegisterAtStart(mod->lhs())); if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return define(lir, mod); } LModI *lir = new LModI(useFixedAtStart(mod->lhs(), eax), useRegister(mod->rhs()), tempFixed(eax)); if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo)) return false; return defineFixed(lir, mod, LAllocation(AnyRegister(edx))); }
bool CodeGeneratorX86Shared::visitMulI(LMulI *ins) { const LAllocation *lhs = ins->lhs(); const LAllocation *rhs = ins->rhs(); MMul *mul = ins->mir(); JS_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow()); if (rhs->isConstant()) { // Bailout on -0.0 int32_t constant = ToInt32(rhs); if (mul->canBeNegativeZero() && constant <= 0) { Assembler::Condition bailoutCond = (constant == 0) ? Assembler::Signed : Assembler::Equal; masm.testl(ToRegister(lhs), ToRegister(lhs)); if (!bailoutIf(bailoutCond, ins->snapshot())) return false; } switch (constant) { case -1: masm.negl(ToOperand(lhs)); break; case 0: masm.xorl(ToOperand(lhs), ToRegister(lhs)); return true; // escape overflow check; case 1: // nop return true; // escape overflow check; case 2: masm.addl(ToOperand(lhs), ToRegister(lhs)); break; default: if (!mul->canOverflow() && constant > 0) { // Use shift if cannot overflow and constant is power of 2 int32_t shift = FloorLog2(constant); if ((1 << shift) == constant) { masm.shll(Imm32(shift), ToRegister(lhs)); return true; } } masm.imull(Imm32(ToInt32(rhs)), ToRegister(lhs)); } // Bailout on overflow if (mul->canOverflow() && !bailoutIf(Assembler::Overflow, ins->snapshot())) return false; } else { masm.imull(ToOperand(rhs), ToRegister(lhs)); // Bailout on overflow if (mul->canOverflow() && !bailoutIf(Assembler::Overflow, ins->snapshot())) return false; if (mul->canBeNegativeZero()) { // Jump to an OOL path if the result is 0. MulNegativeZeroCheck *ool = new MulNegativeZeroCheck(ins); if (!addOutOfLineCode(ool)) return false; masm.testl(ToRegister(lhs), ToRegister(lhs)); masm.j(Assembler::Zero, ool->entry()); masm.bind(ool->rejoin()); } } return true; }
static AstLoadStoreAddress AstDecodeLoadStoreAddress(const LinearMemoryAddress<AstDecodeStackItem>& addr) { uint32_t flags = FloorLog2(addr.align); return AstLoadStoreAddress(addr.base.expr, flags, addr.offset); }