示例#1
0
void LinearScan::preAllocSpillLoc(uint32_t numSpillLocs) {
  for (Block* block : m_blocks) {
    for (IRInstruction& inst : *block) {
      if (inst.getOpcode() == Spill) {
        SSATmp* dst = inst.getDst();
        for (int index = 0; index < dst->numNeededRegs(); ++index) {
          assert(!dst->hasReg(index));
          if (dst->getSpillInfo(index).type() == SpillInfo::Memory) {
            uint32_t spillLoc = dst->getSpillInfo(index).mem();
            // Native stack layout:
            // |               |
            // +---------------+
            // |               |  <-- spill[5..]
            // | pre allocated |  <-- spill[4]
            // |  (16 slots)   |  <-- spill[3]
            // +---------------+
            // |  return addr  |
            // +---------------+
            // |    extra      |  <-- spill[2]
            // |    spill      |  <-- spill[1]
            // |  locations    |  <-- spill[0]
            // +---------------+  <-- %rsp
            // If a spill location falls into the pre-allocated region, we
            // need to increase its index by 1 to avoid overwriting the
            // return address.
            if (spillLoc + NumPreAllocatedSpillLocs >= numSpillLocs) {
              dst->setSpillInfo(index, SpillInfo(spillLoc + 1));
            }
          }
        }
      }
    }
  }
}
示例#2
0
void LinearScan::preAllocSpillLocAux(Trace* trace, uint32 numSpillLocs) {
  IRInstruction::List& instList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instList.begin();
       it != instList.end();
       ++it) {
    IRInstruction* inst = *it;
    if (inst->getOpcode() == Spill) {
      SSATmp* dst = inst->getDst();
      for (int index = 0; index < dst->numNeededRegs(); ++index) {
        ASSERT(!dst->hasReg(index));
        if (dst->getSpillInfo(index).type() == SpillInfo::Memory) {
          uint32 spillLoc = dst->getSpillInfo(index).mem();
          // Native stack layout:
          // |               |
          // +---------------+
          // |               |  <-- spill[5..]
          // | pre allocated |  <-- spill[4]
          // |  (16 slots)   |  <-- spill[3]
          // +---------------+
          // |  return addr  |
          // +---------------+
          // |    extra      |  <-- spill[2]
          // |    spill      |  <-- spill[1]
          // |  locations    |  <-- spill[0]
          // +---------------+  <-- %rsp
          // If a spill location falls into the pre-allocated region, we
          // need to increase its index by 1 to avoid overwriting the
          // return address.
          if (spillLoc + NumPreAllocatedSpillLocs >= numSpillLocs) {
            dst->setSpillInfo(index, SpillInfo(spillLoc + 1));
          }
        }
      }
    }
  }
}
示例#3
0
uint32 LinearScan::assignSpillLocAux(Trace* trace,
                                     uint32 nextSpillLoc,
                                     uint32 nextMmxReg) {
  IRInstruction::List& instructionList = trace->getInstructionList();
  for (IRInstruction::Iterator it = instructionList.begin();
       it != instructionList.end();
       ++it) {
    IRInstruction* inst = *it;
    if (getNextNative() == inst) {
      ASSERT(!m_natives.empty());
      m_natives.pop_front();
    }
    if (inst->getOpcode() == Spill) {
      SSATmp* dst = inst->getDst();
      SSATmp* src = inst->getSrc(0);
      for (int locIndex = 0;
           locIndex < src->numNeededRegs();
           ++locIndex) {
        if (dst->getLastUseId() <= getNextNativeId()) {
          TRACE(3, "[counter] 1 spill a tmp that does not span native\n");
        } else {
          TRACE(3, "[counter] 1 spill a tmp that spans native\n");
        }

        const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx &&
          // The live range of the spill slot doesn't span native calls,
          // and we still have free MMX registers.
          dst->getLastUseId() <= getNextNativeId() &&
          nextMmxReg < (uint32)NumMmxRegs;

        dst->setSpillInfo(locIndex,
          allowMmxSpill
            ? SpillInfo(RegNumber(nextMmxReg++))
            : SpillInfo(nextSpillLoc++)
        );
        if (allowMmxSpill) {
          TRACE(3, "[counter] 1 spill to mmx\n");
        } else {
          TRACE(3, "[counter] 1 spill to memory\n");
        }
      }
    }
    if (inst->getOpcode() == Reload) {
      SSATmp* src = inst->getSrc(0);
      for (int locIndex = 0;
           locIndex < src->numNeededRegs();
           ++locIndex) {
        if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) {
          TRACE(3, "[counter] reload from mmx\n");
        } else {
          TRACE(3, "[counter] reload from memory\n");
        }
      }
    }
    if (inst->isControlFlowInstruction()) {
      LabelInstruction* label = inst->getLabel();
      if (label != NULL && label->getId() == inst->getId() + 1) {
        nextSpillLoc = assignSpillLocAux(label->getTrace(),
                                         nextSpillLoc,
                                         nextMmxReg);
      }
    }
  }
  return nextSpillLoc;
}
示例#4
0
// Assign spill location numbers to Spill/Reload.
uint32_t LinearScan::assignSpillLoc() {
  uint32_t nextSpillLoc = 0;
  uint32_t nextMmxReg = 0;

  // visit blocks in reverse postorder and instructions in forward order,
  // assigning a spill slot id or mmx register number to each Spill.
  // We don't reuse slot id's or mmx registers, but both could be reused
  // either by visiting the dominator tree in preorder or by analyzing
  // lifetimes and reusing id/registers between non-conflicting spills.

  for (Block* block : m_blocks) {
    for (IRInstruction& inst : *block) {
      if (getNextNative() == &inst) {
        assert(!m_natives.empty());
        m_natives.pop_front();
      }
      if (inst.getOpcode() == Spill) {
        SSATmp* dst = inst.getDst();
        SSATmp* src = inst.getSrc(0);
        for (int locIndex = 0;
             locIndex < src->numNeededRegs();
             ++locIndex) {
          if (dst->getLastUseId() <= getNextNativeId()) {
            TRACE(3, "[counter] 1 spill a tmp that does not span native\n");
          } else {
            TRACE(3, "[counter] 1 spill a tmp that spans native\n");
          }

          const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx &&
            // The live range of the spill slot doesn't span native calls,
            // and we still have free MMX registers.
            dst->getLastUseId() <= getNextNativeId() &&
            nextMmxReg < (uint32_t)NumMmxRegs;

          dst->setSpillInfo(locIndex,
            allowMmxSpill
              ? SpillInfo(RegNumber(nextMmxReg++))
              : SpillInfo(nextSpillLoc++)
          );
          if (allowMmxSpill) {
            TRACE(3, "[counter] 1 spill to mmx\n");
          } else {
            TRACE(3, "[counter] 1 spill to memory\n");
          }
        }
      }
      if (inst.getOpcode() == Reload) {
        SSATmp* src = inst.getSrc(0);
        for (int locIndex = 0;
             locIndex < src->numNeededRegs();
             ++locIndex) {
          if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) {
            TRACE(3, "[counter] reload from mmx\n");
          } else {
            TRACE(3, "[counter] reload from memory\n");
          }
        }
      }
    }
  }
  return nextSpillLoc;
}