Beispiel #1
0
/*
 * Insert a DbgAssertRefCount instruction after each place we produce
 * a refcounted value.  The value must be something we can safely dereference
 * to check the _count field.
 */
static void insertRefCountAsserts(IRInstruction& inst, IRUnit& unit) {
  for (SSATmp& dst : inst.dsts()) {
    Type t = dst.type();
    if (t <= (Type::Counted | Type::StaticStr | Type::StaticArr)) {
      insertAfter(&inst, unit.gen(DbgAssertRefCount, inst.marker(), &dst));
    }
  }
}
Beispiel #2
0
/*
 * Insert a DbgAssertRefCount instruction after each place we produce
 * a refcounted value.  The value must be something we can safely dereference
 * to check the _count field.
 */
static void insertRefCountAsserts(IRInstruction& inst, IRFactory& factory) {
  for (SSATmp& dst : inst.dsts()) {
    Type t = dst.type();
    if (t.subtypeOf(Type::Counted | Type::StaticStr | Type::StaticArr)) {
      insertAfter(&inst, factory.gen(DbgAssertRefCount, inst.marker(), &dst));
    }
  }
}
Beispiel #3
0
void LinearScan::allocRegToInstruction(InstructionList::iterator it) {
  IRInstruction* inst = &*it;
  dumpIR<IRInstruction, kExtraLevel>(inst, "allocating to instruction");

  // Reload all source operands if necessary.
  // Mark registers as unpinned.
  for (int regNo = 0; regNo < kNumRegs; ++regNo) {
    m_regs[regNo].m_pinned = false;
  }
  smart::vector<bool> needsReloading(inst->numSrcs(), true);
  for (uint32_t i = 0; i < inst->numSrcs(); ++i) {
    SSATmp* tmp = inst->src(i);
    int32_t slotId = m_spillSlots[tmp];
    if (slotId == -1) {
      needsReloading[i] = false;
    } else if ((tmp = m_slots[slotId].latestReload)) {
      needsReloading[i] = false;
      inst->setSrc(i, tmp);
    }
    if (!needsReloading[i]) {
      for (int i = 0, n = m_allocInfo[tmp].numAllocatedRegs(); i < n; ++i) {
        m_regs[int(m_allocInfo[tmp].reg(i))].m_pinned = true;
      }
    }
  }
  for (uint32_t i = 0; i < inst->numSrcs(); ++i) {
    if (needsReloading[i]) {
      SSATmp* tmp = inst->src(i);
      int32_t slotId = m_spillSlots[tmp];
      // <tmp> is spilled, and not reloaded.
      // Therefore, We need to reload the value into a new SSATmp.

      // Insert the Reload instruction.
      SSATmp* spillTmp = m_slots[slotId].spillTmp;
      IRInstruction* reload = m_unit.gen(Reload, inst->marker(),
                                              spillTmp);
      inst->block()->insert(it, reload);

      // Create <reloadTmp> which inherits <tmp>'s slot ID and
      // <spillTmp>'s last use ID.
      // Replace <tmp> with <reloadTmp> in <inst>.
      SSATmp* reloadTmp = reload->dst();
      m_uses[reloadTmp].lastUse = m_uses[spillTmp].lastUse;
      m_spillSlots[reloadTmp] = slotId;
      inst->setSrc(i, reloadTmp);
      // reloadTmp and tmp share the same type.  Since it was spilled, it
      // must be using its entire needed-count of registers.
      assert(reloadTmp->type() == tmp->type());
      for (int locIndex = 0; locIndex < tmp->numNeededRegs();) {
        locIndex += allocRegToTmp(reloadTmp, locIndex);
      }
      // Remember this reload tmp in case we can reuse it in later blocks.
      m_slots[slotId].latestReload = reloadTmp;
      dumpIR<IRInstruction, kExtraLevel>(reload, "created reload");
    }
  }

  freeRegsAtId(m_linear[inst]);
  // Update next native.
  if (nextNative() == inst) {
    assert(!m_natives.empty());
    m_natives.pop_front();
    computePreColoringHint();
  }

  Range<SSATmp*> dsts = inst->dsts();
  if (dsts.empty()) return;

  Opcode opc = inst->op();
  if (opc == DefMIStateBase) {
    assert(dsts[0].isA(Type::PtrToCell));
    assignRegToTmp(&m_regs[int(rsp)], &dsts[0], 0);
    return;
  }

  for (SSATmp& dst : dsts) {
    for (int numAllocated = 0, n = dst.numNeededRegs(); numAllocated < n; ) {
      // LdRaw, loading a generator's embedded AR, is the only time we have a
      // pointer to an AR that is not in rVmFp.
      const bool abnormalFramePtr =
        (opc == LdRaw &&
          inst->src(1)->getValInt() == RawMemSlot::ContARPtr);

      // Note that the point of StashGeneratorSP is to save a StkPtr
      // somewhere other than rVmSp.  (TODO(#2288359): make rbx not
      // special.)
      const bool abnormalStkPtr = opc == StashGeneratorSP;

      if (!abnormalStkPtr && dst.isA(Type::StkPtr)) {
        assert(opc == DefSP ||
               opc == ReDefSP ||
               opc == ReDefGeneratorSP ||
               opc == PassSP ||
               opc == DefInlineSP ||
               opc == Call ||
               opc == CallArray ||
               opc == SpillStack ||
               opc == SpillFrame ||
               opc == CufIterSpillFrame ||
               opc == ExceptionBarrier ||
               opc == RetAdjustStack ||
               opc == InterpOne ||
               opc == InterpOneCF ||
               opc == GenericRetDecRefs ||
               opc == CheckStk ||
               opc == GuardStk ||
               opc == AssertStk ||
               opc == CastStk ||
               opc == CoerceStk ||
               opc == SideExitGuardStk  ||
               MInstrEffects::supported(opc));
        assignRegToTmp(&m_regs[int(rVmSp)], &dst, 0);
        numAllocated++;
        continue;
      }
      if (!abnormalFramePtr && dst.isA(Type::FramePtr)) {
        assignRegToTmp(&m_regs[int(rVmFp)], &dst, 0);
        numAllocated++;
        continue;
      }

      // Generally speaking, StkPtrs are pretty special due to
      // tracelet ABI registers. Keep track here of the allowed uses
      // that don't use the above allocation.
      assert(!dst.isA(Type::FramePtr) || abnormalFramePtr);
      assert(!dst.isA(Type::StkPtr) || abnormalStkPtr);

      if (!RuntimeOption::EvalHHIRDeadCodeElim || m_uses[dst].lastUse != 0) {
        numAllocated += allocRegToTmp(&dst, numAllocated);
      } else {
        numAllocated++;
      }
    }
  }
  if (!RuntimeOption::EvalHHIRDeadCodeElim) {
    // if any outputs were unused, free regs now.
    freeRegsAtId(m_linear[inst]);
  }
}