示例#1
0
void LinearScan::allocRegToTmp(SSATmp* ssaTmp, uint32_t index) {
  bool preferCallerSaved = true;
  if (RuntimeOption::EvalHHIREnableCalleeSavedOpt) {
    // Prefer caller-saved registers iff <ssaTmp> doesn't span native.
    preferCallerSaved = (ssaTmp->getLastUseId() <= getNextNativeId());
  }

  RegState* reg = NULL;
  if (!preferCallerSaved) {
    reg = getFreeReg(false);
    if (reg->isCallerSaved()) {
      // If we are out of callee-saved registers, fall into the logic of
      // assigning a caller-saved register.
      pushFreeReg(reg);
      // getFreeReg pins the reg. Need restore it here.
      reg->m_pinned = false;
      reg = NULL;
    }
  }
  if (reg == NULL && RuntimeOption::EvalHHIREnablePreColoring) {
    // Pre-colors ssaTmp if it's used as an argument of next native.
    // Search for the original tmp instead of <ssaTmp> itself, because
    // the pre-coloring hint is not aware of reloaded tmps.
    RegNumber targetRegNo =
      m_preColoringHint.getPreColoringReg(getOrigTmp(ssaTmp), index);
    if (targetRegNo != reg::noreg) {
      reg = getReg(&m_regs[int(targetRegNo)]);
    }
  }
  if (reg == NULL &&
      RuntimeOption::EvalHHIREnablePreColoring &&
      ssaTmp->getInstruction()->isNative()) {
    // Pre-colors ssaTmp if it's the return value of a native.
    ASSERT(index == 0);
    reg = getReg(&m_regs[int(rax)]);
  }
  if (reg == NULL) {
    // No pre-coloring for this tmp.
    // Pick a regular caller-saved reg.
    reg = getFreeReg(true);
  }

  ASSERT(reg);
  if (!preferCallerSaved && reg->isCallerSaved()) {
    // ssaTmp spans native, but we failed to find a free callee-saved reg.
    // We eagerly add a spill ssaTmp, and update ssaTmp's live range
    // to end with next native, because we know we have to spill it at
    // the next native.
    // Setting the last use ID to the next native is conservative.
    // Setting it to the last use before the next native would be more precise,
    // but that would be more expensive to compute.
    if (ssaTmp->getSpillSlot() == -1) {
      createSpillSlot(ssaTmp);
    }
    ssaTmp->setLastUseId(getNextNativeId());
  }

  allocRegToTmp(reg, ssaTmp, index);
}
示例#2
0
bool checkRegisters(IRTrace* trace, const IRFactory& factory,
                    const RegAllocInfo& regs) {
  assert(checkCfg(trace, factory));

  auto blocks = rpoSortCfg(trace, factory);
  StateVector<Block, RegState> states(&factory, RegState());
  StateVector<Block, bool> reached(&factory, false);
  for (auto* block : blocks) {
    RegState state = states[block];
    for (IRInstruction& inst : *block) {
      for (SSATmp* src : inst.srcs()) {
        auto const &info = regs[src];
        if (!info.spilled() &&
            (info.reg(0) == Transl::rVmSp ||
             info.reg(0) == Transl::rVmFp)) {
          // hack - ignore rbx and rbp
          continue;
        }
        for (unsigned i = 0, n = info.numAllocatedRegs(); i < n; ++i) {
          assert(state.tmp(info, i) == src);
        }
      }
      for (SSATmp& dst : inst.dsts()) {
        auto const &info = regs[dst];
        for (unsigned i = 0, n = info.numAllocatedRegs(); i < n; ++i) {
          state.tmp(info, i) = &dst;
        }
      }
    }
    // State contains register/spill info at current block end.
    auto updateEdge = [&](Block* succ) {
      if (!reached[succ]) {
        states[succ] = state;
      } else {
        states[succ].merge(state);
      }
    };
    if (auto* next = block->next()) updateEdge(next);
    if (auto* taken = block->taken()) updateEdge(taken);
  }

  return true;
}
示例#3
0
void LinearScan::StateSave::restore(LinearScan* ls) {
  ls->m_allocatedRegs.clear();
  for (int i = 0; i < PhysReg::kNumTypes; i++) {
    ls->m_freeCalleeSaved[i].clear();
    ls->m_freeCallerSaved[i].clear();
  }

  for (size_t i = 0; i < NumRegs; i++) {
    ls->m_regs[i] = m_regs[i];
    RegState* reg = &ls->m_regs[i];
    if (reg->isReserved()) continue;
    if (reg->isAllocated()) {
      SSATmp* tmp = reg->m_ssaTmp;
      for (int r = 0; r < ls->m_allocInfo[tmp].numAllocatedRegs(); r++) {
        if (ls->m_allocInfo[tmp].reg(r) == PhysReg(i)) {
          ls->assignRegToTmp(reg, tmp, r);
        }
      }
    } else {
      ls->pushFreeReg(reg);
    }
  }
}
示例#4
0
/*
 * Allocates a register to ssaTmp's index component (0 for value, 1 for type).
 * Returns the number of 64-bit register-space allocated.  This is normally 1,
 * but it's 2 when both the type and value need registers and they're allocated
 * together to one 128-bit XMM register.
 */
int LinearScan::allocRegToTmp(SSATmp* ssaTmp, uint32_t index) {
  bool preferCallerSaved = true;
  PhysReg::Type regType = getRegType(ssaTmp, index);
  FTRACE(6, "getRegType(SSATmp {}, {}) = {}\n", ssaTmp->id(),
         index, int(regType));
  assert(regType == PhysReg::GP || index == 0); // no type-only in XMM regs

  if (RuntimeOption::EvalHHIREnableCalleeSavedOpt) {
    preferCallerSaved = !crossNativeCall(ssaTmp);
  }

  RegState* reg = nullptr;
  if (!preferCallerSaved) {
    reg = getFreeReg(regType, false);
    if (reg->isCallerSaved()) {
      // If we are out of callee-saved registers, fall into the logic of
      // assigning a caller-saved register.
      pushFreeReg(reg);
      // getFreeReg pins the reg. Need restore it here.
      reg->m_pinned = false;
      reg = nullptr;
    }
  }
  if (reg == nullptr && RuntimeOption::EvalHHIREnablePreColoring) {
    // Pre-colors ssaTmp if it's used as an argument of next native.
    // Search for the original tmp instead of <ssaTmp> itself, because
    // the pre-coloring hint is not aware of reloaded tmps.
    SSATmp* orig = getOrigTmp(ssaTmp);
    RegNumber targetRegNo =
      m_preColoringHint.getPreColoringReg(orig, index);
    if (targetRegNo == reg::noreg) {
      targetRegNo = getJmpPreColor(orig, index, orig != ssaTmp);
    }
    if (targetRegNo == reg::noreg && ssaTmp->inst()->op() == AssertType) {
      targetRegNo = m_allocInfo[ssaTmp->inst()->src(0)].reg(index);
    }
    if (targetRegNo != reg::noreg) {
      reg = getReg(&m_regs[int(targetRegNo)]);
    }
  }
  if (reg == nullptr &&
      RuntimeOption::EvalHHIREnablePreColoring &&
      ssaTmp->inst()->isNative()) {
    // Pre-colors ssaTmp if it's the return value of a native.
    if (index == 0) {
      reg = getReg(&m_regs[int(rax)]);
    } else if (index == 1) {
      reg = getReg(&m_regs[int(rdx)]);
    } else {
      not_reached();
    }
  }
  if (reg == nullptr) {
    // No pre-coloring for this tmp.
    // Pick a regular caller-saved reg.
    reg = getFreeReg(regType, true);
  }

  assert(reg);
  if (!preferCallerSaved && reg->isCallerSaved()) {
    // ssaTmp spans native, but we failed to find a free callee-saved reg.
    // We eagerly add a spill ssaTmp, and update ssaTmp's live range
    // to end with next native, because we know we have to spill it at
    // the next native.
    // Setting the last use ID to the next native is conservative.
    // Setting it to the last use before the next native would be more precise,
    // but that would be more expensive to compute.
    if (m_spillSlots[ssaTmp] == -1) {
      createSpillSlot(ssaTmp);
    }
    m_uses[ssaTmp].lastUse = nextNativeId();
  }

  assignRegToTmp(reg, ssaTmp, index);

  if (m_allocInfo[ssaTmp].isFullXMM()) {
    // Type and value allocated together to a single XMM register
    return 2;
  }
  return 1;
}
示例#5
0
文件: check.cpp 项目: RdeWilde/hhvm
bool checkRegisters(const IRUnit& unit, const RegAllocInfo& regs) {
  assert(checkCfg(unit));
  auto blocks = rpoSortCfg(unit);
  StateVector<Block, RegState> states(unit, RegState());
  StateVector<Block, bool> reached(unit, false);
  for (auto* block : blocks) {
    RegState state = states[block];
    for (IRInstruction& inst : *block) {
      if (inst.op() == Jmp) continue; // handled by Shuffle
      auto& inst_regs = regs[inst];
      for (int i = 0, n = inst.numSrcs(); i < n; ++i) {
        auto const &rs = inst_regs.src(i);
        if (!rs.spilled()) {
          // hack - ignore rbx and rbp
          bool ignore_frame_regs;

          switch (arch()) {
            case Arch::X64:
              ignore_frame_regs = (rs.reg(0) == X64::rVmSp ||
                                  rs.reg(0) == X64::rVmFp);
              break;
            case Arch::ARM:
               ignore_frame_regs = (rs.reg(0) == ARM::rVmSp ||
                                   rs.reg(0) == ARM::rVmFp);
              break;
          }
          if (ignore_frame_regs) continue;
        }
        DEBUG_ONLY auto src = inst.src(i);
        assert(rs.numWords() == src->numWords() ||
               (src->isConst() && rs.numWords() == 0));
        DEBUG_ONLY auto allocated = rs.numAllocated();
        if (allocated == 2) {
          if (rs.spilled()) {
            assert(rs.slot(0) != rs.slot(1));
          } else {
            assert(rs.reg(0) != rs.reg(1));
          }
        }
        for (unsigned i = 0, n = rs.numAllocated(); i < n; ++i) {
          assert(state.tmp(rs, i) == src);
        }
      }
      auto update = [&](SSATmp* tmp, const PhysLoc& loc) {
        for (unsigned i = 0, n = loc.numAllocated(); i < n; ++i) {
          state.tmp(loc, i) = tmp;
        }
      };
      if (inst.op() == Shuffle) {
        checkShuffle(inst, regs);
        for (unsigned i = 0; i < inst.numSrcs(); ++i) {
          update(inst.src(i), inst.extra<Shuffle>()->dests[i]);
        }
      } else {
        for (unsigned i = 0; i < inst.numDsts(); ++i) {
          update(inst.dst(i), inst_regs.dst(i));
        }
      }
    }
    // State contains the PhysLoc->SSATmp reverse mappings at block end;
    // propagate the state to succ
    auto updateEdge = [&](Block* succ) {
      if (!reached[succ]) {
        states[succ] = state;
      } else {
        states[succ].merge(state);
      }
    };
    if (auto* next = block->next()) updateEdge(next);
    if (auto* taken = block->taken()) updateEdge(taken);
  }

  return true;
}