示例#1
0
void LinearScan::removeUnusedSpillsAux(Trace* trace) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end(); ) {
    IRInstruction::Iterator next = it; ++next;
    IRInstruction* inst = *it;
    if (inst->getOpcode() == Spill && inst->getDst()->getUseCount() == 0) {
      instList.erase(it);
      SSATmp* src = inst->getSrc(0);
      if (src->decUseCount() == 0) {
        Opcode srcOpc = src->getInstruction()->getOpcode();
        // Not all instructions are able to take noreg as its dest
        // reg.  We pick LdLoc and IncRef because they occur often.
        if (srcOpc == IncRef || srcOpc == LdLoc) {
          for (int locIndex = 0;
               locIndex < src->numNeededRegs();
               ++locIndex) {
            src->setReg(InvalidReg, locIndex);
          }
        }
      }
    }
    it = next;
  }
}
示例#2
0
void LinearScan::preAllocSpillLoc(uint32_t numSpillLocs) {
  for (Block* block : m_blocks) {
    for (IRInstruction& inst : *block) {
      if (inst.getOpcode() == Spill) {
        SSATmp* dst = inst.getDst();
        for (int index = 0; index < dst->numNeededRegs(); ++index) {
          assert(!dst->hasReg(index));
          if (dst->getSpillInfo(index).type() == SpillInfo::Memory) {
            uint32_t spillLoc = dst->getSpillInfo(index).mem();
            // Native stack layout:
            // |               |
            // +---------------+
            // |               |  <-- spill[5..]
            // | pre allocated |  <-- spill[4]
            // |  (16 slots)   |  <-- spill[3]
            // +---------------+
            // |  return addr  |
            // +---------------+
            // |    extra      |  <-- spill[2]
            // |    spill      |  <-- spill[1]
            // |  locations    |  <-- spill[0]
            // +---------------+  <-- %rsp
            // If a spill location falls into the pre-allocated region, we
            // need to increase its index by 1 to avoid overwriting the
            // return address.
            if (spillLoc + NumPreAllocatedSpillLocs >= numSpillLocs) {
              dst->setSpillInfo(index, SpillInfo(spillLoc + 1));
            }
          }
        }
      }
    }
  }
}
示例#3
0
void LinearScan::removeUnusedSpills() {
  for (SlotInfo& slot : m_slots) {
    IRInstruction* spill = slot.m_spillTmp->getInstruction();
    if (spill->getDst()->getUseCount() == 0) {
      Block* block = spill->getBlock();
      block->erase(block->iteratorTo(spill));
      SSATmp* src = spill->getSrc(0);
      if (src->decUseCount() == 0) {
        Opcode srcOpc = src->getInstruction()->getOpcode();
        // Not all instructions are able to take noreg as its dest
        // reg.  We pick LdLoc and IncRef because they occur often.
        if (srcOpc == IncRef || srcOpc == LdLoc) {
          for (int i = 0, n = src->numNeededRegs(); i < n; ++i) {
            src->setReg(InvalidReg, i);
          }
        }
      }
    }
  }
}
示例#4
0
void LinearScan::preAllocSpillLocAux(Trace* trace, uint32 numSpillLocs) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end();
       ++it) {
    IRInstruction* inst = *it;
    if (inst->getOpcode() == Spill) {
      SSATmp* dst = inst->getDst();
      for (int index = 0; index < dst->numNeededRegs(); ++index) {
        ASSERT(!dst->hasReg(index));
        if (dst->getSpillInfo(index).type() == SpillInfo::Memory) {
          uint32 spillLoc = dst->getSpillInfo(index).mem();
          // Native stack layout:
          // |               |
          // +---------------+
          // |               |  <-- spill[5..]
          // | pre allocated |  <-- spill[4]
          // |  (16 slots)   |  <-- spill[3]
          // +---------------+
          // |  return addr  |
          // +---------------+
          // |    extra      |  <-- spill[2]
          // |    spill      |  <-- spill[1]
          // |  locations    |  <-- spill[0]
          // +---------------+  <-- %rsp
          // If a spill location falls into the pre-allocated region, we
          // need to increase its index by 1 to avoid overwriting the
          // return address.
          if (spillLoc + NumPreAllocatedSpillLocs >= numSpillLocs) {
            dst->setSpillInfo(index, SpillInfo(spillLoc + 1));
          }
        }
      }
    }
  }
}
示例#5
0
uint32 LinearScan::assignSpillLocAux(Trace* trace,
                                     uint32 nextSpillLoc,
                                     uint32 nextMmxReg) {
  IRInstruction::List& instructionList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instructionList.begin();
       it != instructionList.end();
       ++it) {
    IRInstruction* inst = *it;
    if (getNextNative() == inst) {
      ASSERT(!m_natives.empty());
      m_natives.pop_front();
    }
    if (inst->getOpcode() == Spill) {
      SSATmp* dst = inst->getDst();
      SSATmp* src = inst->getSrc(0);
      for (int locIndex = 0;
           locIndex < src->numNeededRegs();
           ++locIndex) {
        if (dst->getLastUseId() <= getNextNativeId()) {
          TRACE(3, "[counter] 1 spill a tmp that does not span native\n");
        } else {
          TRACE(3, "[counter] 1 spill a tmp that spans native\n");
        }

        const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx &&
          // The live range of the spill slot doesn't span native calls,
          // and we still have free MMX registers.
          dst->getLastUseId() <= getNextNativeId() &&
          nextMmxReg < (uint32)NumMmxRegs;

        dst->setSpillInfo(locIndex,
          allowMmxSpill
            ? SpillInfo(RegNumber(nextMmxReg++))
            : SpillInfo(nextSpillLoc++)
        );
        if (allowMmxSpill) {
          TRACE(3, "[counter] 1 spill to mmx\n");
        } else {
          TRACE(3, "[counter] 1 spill to memory\n");
        }
      }
    }
    if (inst->getOpcode() == Reload) {
      SSATmp* src = inst->getSrc(0);
      for (int locIndex = 0;
           locIndex < src->numNeededRegs();
           ++locIndex) {
        if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) {
          TRACE(3, "[counter] reload from mmx\n");
        } else {
          TRACE(3, "[counter] reload from memory\n");
        }
      }
    }
    if (inst->isControlFlowInstruction()) {
      LabelInstruction* label = inst->getLabel();
      if (label != NULL && label->getId() == inst->getId() + 1) {
        nextSpillLoc = assignSpillLocAux(label->getTrace(),
                                         nextSpillLoc,
                                         nextMmxReg);
      }
    }
  }
  return nextSpillLoc;
}
示例#6
0
// Assign spill location numbers to Spill/Reload.
uint32_t LinearScan::assignSpillLoc() {
  uint32_t nextSpillLoc = 0;
  uint32_t nextMmxReg = 0;

  // visit blocks in reverse postorder and instructions in forward order,
  // assigning a spill slot id or mmx register number to each Spill.
  // We don't reuse slot id's or mmx registers, but both could be reused
  // either by visiting the dominator tree in preorder or by analyzing
  // lifetimes and reusing id/registers between non-conflicting spills.

  for (Block* block : m_blocks) {
    for (IRInstruction& inst : *block) {
      if (getNextNative() == &inst) {
        assert(!m_natives.empty());
        m_natives.pop_front();
      }
      if (inst.getOpcode() == Spill) {
        SSATmp* dst = inst.getDst();
        SSATmp* src = inst.getSrc(0);
        for (int locIndex = 0;
             locIndex < src->numNeededRegs();
             ++locIndex) {
          if (dst->getLastUseId() <= getNextNativeId()) {
            TRACE(3, "[counter] 1 spill a tmp that does not span native\n");
          } else {
            TRACE(3, "[counter] 1 spill a tmp that spans native\n");
          }

          const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx &&
            // The live range of the spill slot doesn't span native calls,
            // and we still have free MMX registers.
            dst->getLastUseId() <= getNextNativeId() &&
            nextMmxReg < (uint32_t)NumMmxRegs;

          dst->setSpillInfo(locIndex,
            allowMmxSpill
              ? SpillInfo(RegNumber(nextMmxReg++))
              : SpillInfo(nextSpillLoc++)
          );
          if (allowMmxSpill) {
            TRACE(3, "[counter] 1 spill to mmx\n");
          } else {
            TRACE(3, "[counter] 1 spill to memory\n");
          }
        }
      }
      if (inst.getOpcode() == Reload) {
        SSATmp* src = inst.getSrc(0);
        for (int locIndex = 0;
             locIndex < src->numNeededRegs();
             ++locIndex) {
          if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) {
            TRACE(3, "[counter] reload from mmx\n");
          } else {
            TRACE(3, "[counter] reload from memory\n");
          }
        }
      }
    }
  }
  return nextSpillLoc;
}
示例#7
0
// Assign spill location numbers to Spill/Reload.
uint32_t LinearScan::assignSpillLoc() {
  uint32_t maxSpillLoc = 0;
  SpillLocManager spillLocManager(0);

  // visit blocks in reverse postorder and instructions in forward order,
  // assigning a spill slot id to each Spill. We don't reuse slot id's,
  // but both could be reused either by visiting the dominator tree in
  // preorder or by analyzing lifetimes and reusing id/registers between
  // non-conflicting spills.
  // As an intermediate step, re-use id's for exit traces

  smart::map<Block*, uint32_t> exitLocMap;

  for (Block* block : m_blocks) {
    auto it = exitLocMap.find(block);
    if (it != exitLocMap.end()) {
      spillLocManager.setNextSpillLoc(it->second);
    }
    for (IRInstruction& inst : *block) {
      if (nextNative() == &inst) {
        assert(!m_natives.empty());
        m_natives.pop_front();
      }
      if (inst.op() == Spill) {
        SSATmp* dst = inst.dst();
        SSATmp* src = inst.src(0);
        for (int locIndex = 0;
             locIndex < src->numNeededRegs();
             ++locIndex) {
          if (!crossNativeCall(dst)) {
            TRACE(3, "[counter] 1 spill a tmp that does not span native\n");
          } else {
            TRACE(3, "[counter] 1 spill a tmp that spans native\n");
          }

          // SSATmps with 2 regs are aligned to 16 bytes because they may be
          // allocated to XMM registers, either before or after being reloaded
          if (src->numNeededRegs() == 2 && locIndex == 0) {
            spillLocManager.alignTo16Bytes();
          }
          SpillInfo spillLoc = spillLocManager.allocSpillLoc();
          m_allocInfo[dst].setSpillInfo(locIndex, spillLoc);

          if (m_allocInfo[src].isFullXMM()) {
            // Allocate the next, consecutive spill slot for this SSATmp too
            assert(locIndex == 0);
            assert(spillLoc.offset() % 16 == 0);
            spillLoc = spillLocManager.allocSpillLoc();
            m_allocInfo[dst].setSpillInfo(locIndex + 1, spillLoc);
            break;
          }
        }
      }
      if (inst.op() == Reload) {
        SSATmp* src = inst.src(0);
        for (int locIndex = 0;
             locIndex < src->numNeededRegs();
             ++locIndex) {
          TRACE(3, "[counter] reload\n");
        }
      }
    }
    uint32_t totalSpillLocs = spillLocManager.getNumSpillLocs();
    if (totalSpillLocs > maxSpillLoc) maxSpillLoc = totalSpillLocs;
    if (block->trace()->isMain()) {
      if (Block* taken = block->taken()) {
        if (!taken->trace()->isMain()) {
          exitLocMap[taken] = totalSpillLocs;
        }
      }
    }
  }
  return maxSpillLoc;
}
示例#8
0
void LinearScan::allocRegToInstruction(InstructionList::iterator it) {
  IRInstruction* inst = &*it;
  dumpIR<IRInstruction, kExtraLevel>(inst, "allocating to instruction");

  // Reload all source operands if necessary.
  // Mark registers as unpinned.
  for (int regNo = 0; regNo < kNumRegs; ++regNo) {
    m_regs[regNo].m_pinned = false;
  }
  smart::vector<bool> needsReloading(inst->numSrcs(), true);
  for (uint32_t i = 0; i < inst->numSrcs(); ++i) {
    SSATmp* tmp = inst->src(i);
    int32_t slotId = m_spillSlots[tmp];
    if (slotId == -1) {
      needsReloading[i] = false;
    } else if ((tmp = m_slots[slotId].latestReload)) {
      needsReloading[i] = false;
      inst->setSrc(i, tmp);
    }
    if (!needsReloading[i]) {
      for (int i = 0, n = m_allocInfo[tmp].numAllocatedRegs(); i < n; ++i) {
        m_regs[int(m_allocInfo[tmp].reg(i))].m_pinned = true;
      }
    }
  }
  for (uint32_t i = 0; i < inst->numSrcs(); ++i) {
    if (needsReloading[i]) {
      SSATmp* tmp = inst->src(i);
      int32_t slotId = m_spillSlots[tmp];
      // <tmp> is spilled, and not reloaded.
      // Therefore, We need to reload the value into a new SSATmp.

      // Insert the Reload instruction.
      SSATmp* spillTmp = m_slots[slotId].spillTmp;
      IRInstruction* reload = m_unit.gen(Reload, inst->marker(),
                                              spillTmp);
      inst->block()->insert(it, reload);

      // Create <reloadTmp> which inherits <tmp>'s slot ID and
      // <spillTmp>'s last use ID.
      // Replace <tmp> with <reloadTmp> in <inst>.
      SSATmp* reloadTmp = reload->dst();
      m_uses[reloadTmp].lastUse = m_uses[spillTmp].lastUse;
      m_spillSlots[reloadTmp] = slotId;
      inst->setSrc(i, reloadTmp);
      // reloadTmp and tmp share the same type.  Since it was spilled, it
      // must be using its entire needed-count of registers.
      assert(reloadTmp->type() == tmp->type());
      for (int locIndex = 0; locIndex < tmp->numNeededRegs();) {
        locIndex += allocRegToTmp(reloadTmp, locIndex);
      }
      // Remember this reload tmp in case we can reuse it in later blocks.
      m_slots[slotId].latestReload = reloadTmp;
      dumpIR<IRInstruction, kExtraLevel>(reload, "created reload");
    }
  }

  freeRegsAtId(m_linear[inst]);
  // Update next native.
  if (nextNative() == inst) {
    assert(!m_natives.empty());
    m_natives.pop_front();
    computePreColoringHint();
  }

  Range<SSATmp*> dsts = inst->dsts();
  if (dsts.empty()) return;

  Opcode opc = inst->op();
  if (opc == DefMIStateBase) {
    assert(dsts[0].isA(Type::PtrToCell));
    assignRegToTmp(&m_regs[int(rsp)], &dsts[0], 0);
    return;
  }

  for (SSATmp& dst : dsts) {
    for (int numAllocated = 0, n = dst.numNeededRegs(); numAllocated < n; ) {
      // LdRaw, loading a generator's embedded AR, is the only time we have a
      // pointer to an AR that is not in rVmFp.
      const bool abnormalFramePtr =
        (opc == LdRaw &&
          inst->src(1)->getValInt() == RawMemSlot::ContARPtr);

      // Note that the point of StashGeneratorSP is to save a StkPtr
      // somewhere other than rVmSp.  (TODO(#2288359): make rbx not
      // special.)
      const bool abnormalStkPtr = opc == StashGeneratorSP;

      if (!abnormalStkPtr && dst.isA(Type::StkPtr)) {
        assert(opc == DefSP ||
               opc == ReDefSP ||
               opc == ReDefGeneratorSP ||
               opc == PassSP ||
               opc == DefInlineSP ||
               opc == Call ||
               opc == CallArray ||
               opc == SpillStack ||
               opc == SpillFrame ||
               opc == CufIterSpillFrame ||
               opc == ExceptionBarrier ||
               opc == RetAdjustStack ||
               opc == InterpOne ||
               opc == InterpOneCF ||
               opc == GenericRetDecRefs ||
               opc == CheckStk ||
               opc == GuardStk ||
               opc == AssertStk ||
               opc == CastStk ||
               opc == CoerceStk ||
               opc == SideExitGuardStk  ||
               MInstrEffects::supported(opc));
        assignRegToTmp(&m_regs[int(rVmSp)], &dst, 0);
        numAllocated++;
        continue;
      }
      if (!abnormalFramePtr && dst.isA(Type::FramePtr)) {
        assignRegToTmp(&m_regs[int(rVmFp)], &dst, 0);
        numAllocated++;
        continue;
      }

      // Generally speaking, StkPtrs are pretty special due to
      // tracelet ABI registers. Keep track here of the allowed uses
      // that don't use the above allocation.
      assert(!dst.isA(Type::FramePtr) || abnormalFramePtr);
      assert(!dst.isA(Type::StkPtr) || abnormalStkPtr);

      if (!RuntimeOption::EvalHHIRDeadCodeElim || m_uses[dst].lastUse != 0) {
        numAllocated += allocRegToTmp(&dst, numAllocated);
      } else {
        numAllocated++;
      }
    }
  }
  if (!RuntimeOption::EvalHHIRDeadCodeElim) {
    // if any outputs were unused, free regs now.
    freeRegsAtId(m_linear[inst]);
  }
}