Example #1
0
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
  assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
         "mismatch in calculation");
  sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
  int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
  OopMap* oop_map = new OopMap(frame_size_in_slots, 0);

  int i;
  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
    Register r = as_Register(i);
    if (r == G1 || r == G3 || r == G4 || r == G5) {
      int sp_offset = cpu_reg_save_offsets[i];
      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
                                r->as_VMReg());
    }
  }

  if (save_fpu_registers) {
    for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
      FloatRegister r = as_FloatRegister(i);
      int sp_offset = fpu_reg_save_offsets[i];
      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
                                r->as_VMReg());
    }
  }
  return oop_map;
}
Example #2
0
void
SnapshotWriter::addSlot(const FloatRegister &reg)
{
    IonSpew(IonSpew_Snapshots, "    slot %u: double (reg %s)", slotsWritten_, reg.name());

    writeSlotHeader(JSVAL_TYPE_DOUBLE, reg.code());
}
Example #3
0
void
SnapshotWriter::addFloat32Slot(const FloatRegister &reg)
{
    JS_ASSERT(reg.code() < MIN_REG_FIELD_ESC);
    IonSpew(IonSpew_Snapshots, "    slot %u: float32 (reg %s)", slotsWritten_, reg.name());
    writeSlotHeader(JSVAL_TYPE_NULL, ESC_REG_FIELD_FLOAT32_REG);
    writer_.writeUnsigned(reg.code());
}
void
MacroAssemblerX86::convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest)
{
    // SUBPD needs SSE2, HADDPD needs SSE3.
    if (!HasSSE3()) {
        convertUInt32ToDouble(src.high, dest);
        movePtr(ImmPtr(&TO_DOUBLE_HIGH_SCALE), temp);
        loadDouble(Address(temp, 0), ScratchDoubleReg);
        asMasm().mulDouble(ScratchDoubleReg, dest);
        convertUInt32ToDouble(src.low, ScratchDoubleReg);
        asMasm().addDouble(ScratchDoubleReg, dest);
        return;
    }

    // Following operation uses entire 128-bit of dest XMM register.
    // Currently higher 64-bit is free when we have access to lower 64-bit.
    MOZ_ASSERT(dest.size() == 8);
    FloatRegister dest128 = FloatRegister(dest.encoding(), FloatRegisters::Simd128);

    // Assume that src is represented as following:
    //   src      = 0x HHHHHHHH LLLLLLLL

    // Move src to dest (=dest128) and ScratchInt32x4Reg (=scratch):
    //   dest     = 0x 00000000 00000000  00000000 LLLLLLLL
    //   scratch  = 0x 00000000 00000000  00000000 HHHHHHHH
    vmovd(src.low, dest128);
    vmovd(src.high, ScratchSimd128Reg);

    // Unpack and interleave dest and scratch to dest:
    //   dest     = 0x 00000000 00000000  HHHHHHHH LLLLLLLL
    vpunpckldq(ScratchSimd128Reg, dest128, dest128);

    // Unpack and interleave dest and a constant C1 to dest:
    //   C1       = 0x 00000000 00000000  45300000 43300000
    //   dest     = 0x 45300000 HHHHHHHH  43300000 LLLLLLLL
    // here, each 64-bit part of dest represents following double:
    //   HI(dest) = 0x 1.00000HHHHHHHH * 2**84 == 2**84 + 0x HHHHHHHH 00000000
    //   LO(dest) = 0x 1.00000LLLLLLLL * 2**52 == 2**52 + 0x 00000000 LLLLLLLL
    movePtr(ImmPtr(TO_DOUBLE), temp);
    vpunpckldq(Operand(temp, 0), dest128, dest128);

    // Subtract a constant C2 from dest, for each 64-bit part:
    //   C2       = 0x 45300000 00000000  43300000 00000000
    // here, each 64-bit part of C2 represents following double:
    //   HI(C2)   = 0x 1.0000000000000 * 2**84 == 2**84
    //   LO(C2)   = 0x 1.0000000000000 * 2**52 == 2**52
    // after the operation each 64-bit part of dest represents following:
    //   HI(dest) = double(0x HHHHHHHH 00000000)
    //   LO(dest) = double(0x 00000000 LLLLLLLL)
    vsubpd(Operand(temp, sizeof(uint64_t) * 2), dest128, dest128);

    // Add HI(dest) and LO(dest) in double and store it into LO(dest),
    //   LO(dest) = double(0x HHHHHHHH 00000000) + double(0x 00000000 LLLLLLLL)
    //            = double(0x HHHHHHHH LLLLLLLL)
    //            = double(src)
    vhaddpd(dest128, dest128);
}
Example #5
0
void
MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest)
{
    Double* dbl = getDouble(d);
    if (!dbl)
        return;
    masm.vaddsd_mr(reinterpret_cast<const void*>(dbl->uses.prev()), dest.code(), dest.code());
    dbl->uses.setPrev(masm.size());
}
Example #6
0
void
MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest)
{
    Float* flt = getFloat(f);
    if (!flt)
        return;
    masm.vaddss_mr(reinterpret_cast<const void*>(flt->uses.prev()), dest.code(), dest.code());
    flt->uses.setPrev(masm.size());
}
void
MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
{
    FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
    unsigned numFpu = fpuSet.size();
    int32_t diffG = set.gprs().size() * sizeof(intptr_t);
    int32_t diffF = fpuSet.getPushSizeInBytes();
    const int32_t reservedG = diffG;
    const int32_t reservedF = diffF;

    for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
        FloatRegister reg = *iter;
        diffF -= reg.size();
        numFpu -= 1;
        if (ignore.has(reg))
            continue;

        Address spillAddress(StackPointer, diffF);
        if (reg.isDouble())
            loadDouble(spillAddress, reg);
        else if (reg.isSingle())
            loadFloat32(spillAddress, reg);
        else if (reg.isSimd128())
            loadUnalignedSimd128Float(spillAddress, reg);
        else
            MOZ_CRASH("Unknown register type.");
    }
    freeStack(reservedF);
    MOZ_ASSERT(numFpu == 0);
    // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
    // GetPushBytesInSize.
    diffF -= diffF % sizeof(uintptr_t);
    MOZ_ASSERT(diffF == 0);

    // On x86, use pop to pop the integer registers, if we're not going to
    // ignore any slots, as it's fast on modern hardware and it's a small
    // instruction.
    if (ignore.emptyGeneral()) {
        for (GeneralRegisterForwardIterator iter(set.gprs()); iter.more(); ++iter) {
            diffG -= sizeof(intptr_t);
            Pop(*iter);
        }
    } else {
        for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
            diffG -= sizeof(intptr_t);
            if (!ignore.has(*iter))
                loadPtr(Address(StackPointer, diffG), *iter);
        }
        freeStack(reservedG);
    }
    MOZ_ASSERT(diffG == 0);
}
void SharedInfo::set_regName() {
  Register reg = GR0;
  for (uint i = 0;
       i < RegisterImpl::number_of_registers;
       ++i, reg = reg->successor()) {
    regName[i] = reg->name();
  }

#ifdef COMPILER1
  uint fbase = RegisterImpl::number_of_registers;
  FloatRegister freg = FR0;
  for (uint i = 0;
       i < FloatRegisterImpl::number_of_registers;
       ++i, freg = freg->successor()) {
    regName[fbase+i] = freg->name();
  }

  uint pbase = RegisterImpl::number_of_registers +
               FloatRegisterImpl::number_of_registers;
  PredicateRegister preg = PR0;
  for (uint i = 0;
       i < PredicateRegisterImpl::number_of_registers;
       ++i, preg = preg->successor()) {
    regName[pbase+i] = preg->name();
  }

  uint bbase = RegisterImpl::number_of_registers +
               FloatRegisterImpl::number_of_registers +
               PredicateRegisterImpl::number_of_registers;
  BranchRegister breg = BR0;
  for (uint i = 0;
       i < BranchRegisterImpl::number_of_registers;
       ++i, breg = breg->successor()) {
    regName[bbase+i] = breg->name();
  }

  uint abase = RegisterImpl::number_of_registers +
               FloatRegisterImpl::number_of_registers +
               PredicateRegisterImpl::number_of_registers +
               BranchRegisterImpl::number_of_registers;
  ApplicationRegister areg = AR0;
  for (uint i = 0;
       i < ApplicationRegisterImpl::number_of_registers;
       ++i, areg = areg->successor()) {
    regName[abase+i] = areg->name();
  }
#endif
}
Example #9
0
void
MacroAssemblerX86::loadConstantFloat32(float f, const FloatRegister &dest)
{
    // Contrarily to loadConstantDouble, this one doesn't have any maybeInlineFloat,
    // but that might be interesting to do it in the future.
    if (!floatMap_.initialized()) {
        enoughMemory_ &= floatMap_.init();
        if (!enoughMemory_)
            return;
    }
    size_t floatIndex;
    FloatMap::AddPtr p = floatMap_.lookupForAdd(f);
    if (p) {
        floatIndex = p->value;
    } else {
        floatIndex = floats_.length();
        enoughMemory_ &= floats_.append(Float(f));
        enoughMemory_ &= floatMap_.add(p, f, floatIndex);
        if (!enoughMemory_)
            return;
    }
    Float &flt = floats_[floatIndex];
    JS_ASSERT(!flt.uses.bound());

    masm.movss_mr(reinterpret_cast<const void *>(flt.uses.prev()), dest.code());
    flt.uses.setPrev(masm.size());
}
void
MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
{
    if (maybeInlineFloat(f, dest))
        return;

    if (!floatMap_.initialized()) {
        enoughMemory_ &= floatMap_.init();
        if (!enoughMemory_)
            return;
    }
    size_t floatIndex;
    if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f)) {
        floatIndex = p->value();
    } else {
        floatIndex = floats_.length();
        enoughMemory_ &= floats_.append(Float(f));
        enoughMemory_ &= floatMap_.add(p, f, floatIndex);
        if (!enoughMemory_)
            return;
    }
    Float& flt = floats_[floatIndex];
    MOZ_ASSERT(!flt.uses.bound());

    // See comment in loadConstantDouble
    JmpSrc j = masm.vmovss_ripr(dest.encoding());
    JmpSrc prev = JmpSrc(flt.uses.use(j.offset()));
    masm.setNextJump(j, prev);
}
void
MacroAssemblerX86::loadConstantDouble(double d, const FloatRegister &dest)
{
    if (maybeInlineDouble(d, dest))
        return;

    if (!doubleMap_.initialized()) {
        enoughMemory_ &= doubleMap_.init();
        if (!enoughMemory_)
            return;
    }
    size_t doubleIndex;
    DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d);
    if (p) {
        doubleIndex = p->value;
    } else {
        doubleIndex = doubles_.length();
        enoughMemory_ &= doubles_.append(Double(d));
        enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
        if (!enoughMemory_)
            return;
    }
    Double &dbl = doubles_[doubleIndex];
    JS_ASSERT(!dbl.uses.bound());

    masm.movsd_mr(reinterpret_cast<const void *>(dbl.uses.prev()), dest.code());
    dbl.uses.setPrev(masm.size());
}
void
MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest)
{
    if (maybeInlineDouble(d, dest))
        return;

    if (!doubleMap_.initialized()) {
        enoughMemory_ &= doubleMap_.init();
        if (!enoughMemory_)
            return;
    }
    size_t doubleIndex;
    if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d)) {
        doubleIndex = p->value();
    } else {
        doubleIndex = doubles_.length();
        enoughMemory_ &= doubles_.append(Double(d));
        enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
        if (!enoughMemory_)
            return;
    }
    Double& dbl = doubles_[doubleIndex];
    MOZ_ASSERT(!dbl.uses.bound());

    // The constants will be stored in a pool appended to the text (see
    // finish()), so they will always be a fixed distance from the
    // instructions which reference them. This allows the instructions to use
    // PC-relative addressing. Use "jump" label support code, because we need
    // the same PC-relative address patching that jumps use.
    JmpSrc j = masm.vmovsd_ripr(dest.encoding());
    JmpSrc prev = JmpSrc(dbl.uses.use(j.offset()));
    masm.setNextJump(j, prev);
}
void
MacroAssemblerX64::passABIArg(const MoveOperand& from, MoveOp::Type type)
{
    MoveOperand to;
    switch (type) {
      case MoveOp::FLOAT32:
      case MoveOp::DOUBLE: {
        FloatRegister dest;
        if (GetFloatArgReg(passedIntArgs_, passedFloatArgs_++, &dest)) {
            // Convert to the right type of register.
            if (type == MoveOp::FLOAT32)
                dest = dest.asSingle();
            if (from.isFloatReg() && from.floatReg() == dest) {
                // Nothing to do; the value is in the right register already
                return;
            }
            to = MoveOperand(dest);
        } else {
            to = MoveOperand(StackPointer, stackForCall_);
            switch (type) {
              case MoveOp::FLOAT32: stackForCall_ += sizeof(float);  break;
              case MoveOp::DOUBLE:  stackForCall_ += sizeof(double); break;
              default: MOZ_CRASH("Unexpected float register class argument type");
            }
        }
        break;
      }
      case MoveOp::GENERAL: {
        Register dest;
        if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) {
            if (from.isGeneralReg() && from.reg() == dest) {
                // Nothing to do; the value is in the right register already
                return;
            }
            to = MoveOperand(dest);
        } else {
            to = MoveOperand(StackPointer, stackForCall_);
            stackForCall_ += sizeof(int64_t);
        }
        break;
      }
      default:
        MOZ_CRASH("Unexpected argument type");
    }

    enoughMemory_ = moveResolver_.addMove(from, to, type);
}
Example #14
0
void
MacroAssemblerMIPS::addConstantDouble(double d, const FloatRegister &dest)
{
    Double *dbl = getDouble(d);
    if (!dbl)
        return;
//    masm.addsd_mr(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code());  // need to modify . by wangqing
    mcss.loadDouble(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code()); 
    dbl->uses.setPrev(masm.size());
}
Example #15
0
void
MacroAssemblerMIPS::addConstantFloat32(float f, const FloatRegister &dest)
{
    Float *flt = getFloat(f);
    if (!flt)
        return;
//    masm.addss_mr(reinterpret_cast<const void *>(flt->uses.prev()), dest.code()); // need to modify. by wangqing
    mcss.loadFloat(reinterpret_cast<const void *>(flt->uses.prev()), dest.code()); 
    flt->uses.setPrev(masm.size());
}
void
MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant&v, FloatRegister dest)
{
    if (maybeInlineSimd128Float(v, dest))
        return;
    SimdData* val = getSimdData(v);
    if (!val)
        return;
    JmpSrc j = masm.vmovaps_ripr(dest.encoding());
    propagateOOM(val->uses.append(CodeOffset(j.offset())));
}
void
MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
{
    if (maybeInlineDouble(d, dest))
        return;
    Double* dbl = getDouble(d);
    if (!dbl)
        return;
    masm.vmovsd_mr(nullptr, dest.encoding());
    propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
}
void
MacroAssemblerX86::loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest)
{
    if (maybeInlineSimd128Float(v, dest))
        return;
    SimdData* f4 = getSimdData(v);
    if (!f4)
        return;
    masm.vmovaps_mr(nullptr, dest.encoding());
    propagateOOM(f4->uses.append(CodeOffset(masm.size())));
}
void
MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
{
    if (maybeInlineFloat(f, dest))
        return;
    Float* flt = getFloat(f);
    if (!flt)
        return;
    masm.vmovss_mr(nullptr, dest.encoding());
    propagateOOM(flt->uses.append(CodeOffset(masm.size())));
}
void
MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
{
    if (maybeInlineFloat(f, dest))
        return;
    Float *flt = getFloat(f);
    if (!flt)
        return;
    masm.movss_mr(reinterpret_cast<const void *>(flt->uses.prev()), dest.code());
    flt->uses.setPrev(masm.size());
}
void
MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
{
    if (maybeInlineDouble(d, dest))
        return;
    Double *dbl = getDouble(d);
    if (!dbl)
        return;
    masm.movsd_mr(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code());
    dbl->uses.setPrev(masm.size());
}
Example #22
0
void VMRegImpl::set_regName() {
  Register reg = ::as_Register(0);
  int i;
  for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) {
    regName[i++  ] = reg->name();
    regName[i++  ] = reg->name();
    reg = reg->successor();
  }

  FloatRegister freg = ::as_FloatRegister(0);
  for ( ; i < ConcreteRegisterImpl::max_fpr ; ) {
    regName[i++] = freg->name();
    regName[i++] = freg->name();
    freg = freg->successor();
  }

  for ( ; i < ConcreteRegisterImpl::number_of_registers ; i++ ) {
    regName[i] = "NON-GPR-FPR";
  }

}
Example #23
0
void
MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
{
    if (maybeInlineFloat(f, dest))
        return;
    Float* flt = getFloat(f);
    if (!flt)
        return;
    // See comment in loadConstantDouble
    JmpSrc j = masm.vmovss_ripr(dest.encoding());
    propagateOOM(flt->uses.append(CodeOffset(j.offset())));
}
Example #24
0
void
MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest)
{
    MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
    if (maybeInlineFloat32x4(v, dest))
        return;
    SimdData* f4 = getSimdData(v);
    if (!f4)
        return;
    MOZ_ASSERT(f4->type() == SimdConstant::Float32x4);
    masm.vmovaps_mr(nullptr, dest.encoding());
    propagateOOM(f4->uses.append(CodeOffset(masm.size())));
}
void
MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant &v, FloatRegister dest)
{
    MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
    if (maybeInlineFloat32x4(v, dest))
        return;
    SimdData *f4 = getSimdData(v);
    if (!f4)
        return;
    MOZ_ASSERT(f4->type() == SimdConstant::Float32x4);
    masm.movaps_mr(reinterpret_cast<const void *>(f4->uses.prev()), dest.code());
    f4->uses.setPrev(masm.size());
}
Example #26
0
void
MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
{
    MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
    if (maybeInlineInt32x4(v, dest))
        return;
    SimdData* i4 = getSimdData(v);
    if (!i4)
        return;
    MOZ_ASSERT(i4->type() == SimdConstant::Int32x4);
    masm.vmovdqa_mr(reinterpret_cast<const void*>(i4->uses.prev()), dest.code());
    i4->uses.setPrev(masm.size());
}
void
MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
{
    if (maybeInlineFloat(f, dest))
        return;
    Float* flt = getFloat(f);
    if (!flt)
        return;
    // See comment in loadConstantDouble
    JmpSrc j = masm.vmovss_ripr(dest.encoding());
    JmpSrc prev = JmpSrc(flt->uses.use(j.offset()));
    masm.setNextJump(j, prev);
}
Example #28
0
void
MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest)
{
    MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
    if (maybeInlineFloat32x4(v, dest))
        return;
    SimdData* val = getSimdData(v);
    if (!val)
        return;
    MOZ_ASSERT(val->type() == SimdConstant::Float32x4);
    JmpSrc j = masm.vmovaps_ripr(dest.encoding());
    propagateOOM(val->uses.append(CodeOffset(j.offset())));
}
void
MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest)
{
    MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
    if (maybeInlineFloat32x4(v, dest))
        return;
    SimdData* val = getSimdData(v);
    if (!val)
        return;
    MOZ_ASSERT(val->type() == SimdConstant::Float32x4);
    JmpSrc j = masm.vmovaps_ripr(dest.encoding());
    JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
    masm.setNextJump(j, prev);
}
void
MacroAssembler::PushRegsInMask(LiveRegisterSet set)
{
    FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
    unsigned numFpu = fpuSet.size();
    int32_t diffF = fpuSet.getPushSizeInBytes();
    int32_t diffG = set.gprs().size() * sizeof(intptr_t);

    // On x86, always use push to push the integer registers, as it's fast
    // on modern hardware and it's a small instruction.
    for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
        diffG -= sizeof(intptr_t);
        Push(*iter);
    }
    MOZ_ASSERT(diffG == 0);

    reserveStack(diffF);
    for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
        FloatRegister reg = *iter;
        diffF -= reg.size();
        numFpu -= 1;
        Address spillAddress(StackPointer, diffF);
        if (reg.isDouble())
            storeDouble(reg, spillAddress);
        else if (reg.isSingle())
            storeFloat32(reg, spillAddress);
        else if (reg.isSimd128())
            storeUnalignedSimd128Float(reg, spillAddress);
        else
            MOZ_CRASH("Unknown register type.");
    }
    MOZ_ASSERT(numFpu == 0);
    // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
    // GetPushBytesInSize.
    diffF -= diffF % sizeof(uintptr_t);
    MOZ_ASSERT(diffF == 0);
}