Пример #1
0
void InstrEmitter::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
        const MCInstrDesc &II,
        bool IsClone, bool IsCloned,
        DenseMap<SDValue, unsigned> &VRBaseMap) {
    assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
           "IMPLICIT_DEF should have been handled as a special case elsewhere!");

    for (unsigned i = 0; i < II.getNumDefs(); ++i) {
        // If the specific node value is only used by a CopyToReg and the dest reg
        // is a vreg in the same register class, use the CopyToReg'd destination
        // register instead of creating a new vreg.
        unsigned VRBase = 0;
        const TargetRegisterClass *RC = TII->getRegClass(II, i, TRI);
        if (II.OpInfo[i].isOptionalDef()) {
            // Optional def must be a physical register.
            unsigned NumResults = CountResults(Node);
            VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
            assert(TargetRegisterInfo::isPhysicalRegister(VRBase));
            MI->addOperand(MachineOperand::CreateReg(VRBase, true));
        }

        if (!VRBase && !IsClone && !IsCloned)
            for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
                    UI != E; ++UI) {
                SDNode *User = *UI;
                if (User->getOpcode() == ISD::CopyToReg &&
                        User->getOperand(2).getNode() == Node &&
                        User->getOperand(2).getResNo() == i) {
                    unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
                    if (TargetRegisterInfo::isVirtualRegister(Reg)) {
                        const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
                        if (RegRC == RC) {
                            VRBase = Reg;
                            MI->addOperand(MachineOperand::CreateReg(Reg, true));
                            break;
                        }
                    }
                }
            }

        // Create the result registers for this node and add the result regs to
        // the machine instruction.
        if (VRBase == 0) {
            assert(RC && "Isn't a register operand!");
            VRBase = MRI->createVirtualRegister(RC);
            MI->addOperand(MachineOperand::CreateReg(VRBase, true));
        }

        SDValue Op(Node, i);
        if (IsClone)
            VRBaseMap.erase(Op);
        bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
        (void)isNew; // Silence compiler warning.
        assert(isNew && "Node emitted out of order - early");
    }
}
// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
void Filler::insertDefsUses(MachineBasicBlock::instr_iterator MI,
                            SmallSet<unsigned, 32> &RegDefs,
                            SmallSet<unsigned, 32> &RegUses) {
  // If MI is a call or return, just examine the explicit non-variadic operands.
  MCInstrDesc MCID = MI->getDesc();
  unsigned E = MI->isCall() || MI->isReturn() ? MCID.getNumOperands()
                                              : MI->getNumOperands();
  for (unsigned I = 0; I != E; ++I) {
    const MachineOperand &MO = MI->getOperand(I);
    unsigned Reg;

    if (!MO.isReg() || !(Reg = MO.getReg()))
      continue;

    if (MO.isDef())
      RegDefs.insert(Reg);
    else if (MO.isUse())
      RegUses.insert(Reg);
  }
}
Пример #3
0
static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
                              const MCSchedClassDesc &SCDesc,
                              const MCSubtargetInfo &STI) {
  if (MCDesc.isCall()) {
    // We cannot estimate how long this call will take.
    // Artificially set an arbitrarily high latency (100cy).
    ID.MaxLatency = 100U;
    return;
  }

  int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
  // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
  ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
}
Пример #4
0
unsigned llvm::constrainOperandRegClass(
    const MachineFunction &MF, const TargetRegisterInfo &TRI,
    MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
    const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
    const MachineOperand &RegMO, unsigned OpIdx) {
  unsigned Reg = RegMO.getReg();
  // Assume physical registers are properly constrained.
  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
         "PhysReg not implemented");

  const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
  // Some of the target independent instructions, like COPY, may not impose any
  // register class constraints on some of their operands: If it's a use, we can
  // skip constraining as the instruction defining the register would constrain
  // it.

  // We can't constrain unallocatable register classes, because we can't create
  // virtual registers for these classes, so we need to let targets handled this
  // case.
  if (RegClass && !RegClass->isAllocatable())
    RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);

  if (!RegClass) {
    assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
           "Register class constraint is required unless either the "
           "instruction is target independent or the operand is a use");
    // FIXME: Just bailing out like this here could be not enough, unless we
    // expect the users of this function to do the right thing for PHIs and
    // COPY:
    //   v1 = COPY v0
    //   v2 = COPY v1
    // v1 here may end up not being constrained at all. Please notice that to
    // reproduce the issue we likely need a destination pattern of a selection
    // rule producing such extra copies, not just an input GMIR with them as
    // every existing target using selectImpl handles copies before calling it
    // and they never reach this function.
    return Reg;
  }
  return constrainRegToClass(MRI, TII, RBI, InsertPt, Reg, *RegClass);
}
Пример #5
0
void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
                                       MachineInstrBuilder &MIB,
                                       const MCInstrDesc &II,
                                       bool IsClone, bool IsCloned,
                                       DenseMap<SDValue, unsigned> &VRBaseMap) {
  assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
         "IMPLICIT_DEF should have been handled as a special case elsewhere!");

  unsigned NumResults = CountResults(Node);
  for (unsigned i = 0; i < II.getNumDefs(); ++i) {
    // If the specific node value is only used by a CopyToReg and the dest reg
    // is a vreg in the same register class, use the CopyToReg'd destination
    // register instead of creating a new vreg.
    unsigned VRBase = 0;
    const TargetRegisterClass *RC =
      TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
    // Always let the value type influence the used register class. The
    // constraints on the instruction may be too lax to represent the value
    // type correctly. For example, a 64-bit float (X86::FR64) can't live in
    // the 32-bit float super-class (X86::FR32).
    if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
      const TargetRegisterClass *VTRC =
        TLI->getRegClassFor(Node->getSimpleValueType(i));
      if (RC)
        VTRC = TRI->getCommonSubClass(RC, VTRC);
      if (VTRC)
        RC = VTRC;
    }

    if (II.OpInfo[i].isOptionalDef()) {
      // Optional def must be a physical register.
      VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
      assert(TargetRegisterInfo::isPhysicalRegister(VRBase));
      MIB.addReg(VRBase, RegState::Define);
    }

    if (!VRBase && !IsClone && !IsCloned)
      for (SDNode *User : Node->uses()) {
        if (User->getOpcode() == ISD::CopyToReg &&
            User->getOperand(2).getNode() == Node &&
            User->getOperand(2).getResNo() == i) {
          unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
          if (TargetRegisterInfo::isVirtualRegister(Reg)) {
            const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
            if (RegRC == RC) {
              VRBase = Reg;
              MIB.addReg(VRBase, RegState::Define);
              break;
            }
          }
        }
      }

    // Create the result registers for this node and add the result regs to
    // the machine instruction.
    if (VRBase == 0) {
      assert(RC && "Isn't a register operand!");
      VRBase = MRI->createVirtualRegister(RC);
      MIB.addReg(VRBase, RegState::Define);
    }

    // If this def corresponds to a result of the SDNode insert the VRBase into
    // the lookup map.
    if (i < NumResults) {
      SDValue Op(Node, i);
      if (IsClone)
        VRBaseMap.erase(Op);
      bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
      (void)isNew; // Silence compiler warning.
      assert(isNew && "Node emitted out of order - early");
    }
  }
}
Пример #6
0
static void populateWrites(InstrDesc &ID, const MCInst &MCI,
                           const MCInstrDesc &MCDesc,
                           const MCSchedClassDesc &SCDesc,
                           const MCSubtargetInfo &STI) {
  // Set if writes through this opcode may update super registers.
  // TODO: on x86-64, a 4 byte write of a general purpose register always
  // fully updates the super-register.
  // More in general, (at least on x86) not all register writes perform
  // a partial (super-)register update.
  // For example, an AVX instruction that writes on a XMM register implicitly
  // zeroes the upper half of every aliasing super-register.
  //
  // For now, we pessimistically assume that writes are all potentially
  // partial register updates. This is a good default for most targets, execept
  // for those like x86 which implement a special semantic for certain opcodes.
  // At least on x86, this may lead to an inaccurate prediction of the
  // instruction level parallelism.
  bool FullyUpdatesSuperRegisters = false;

  // Now Populate Writes.

  // This algorithm currently works under the strong (and potentially incorrect)
  // assumption that information related to register def/uses can be obtained
  // from MCInstrDesc.
  //
  // However class MCInstrDesc is used to describe MachineInstr objects and not
  // MCInst objects. To be more specific, MCInstrDesc objects are opcode
  // descriptors that are automatically generated via tablegen based on the
  // instruction set information available from the target .td files.  That
  // means, the number of (explicit) definitions according to MCInstrDesc always
  // matches the cardinality of the `(outs)` set in tablegen.
  //
  // By constructions, definitions must appear first in the operand sequence of
  // a MachineInstr. Also, the (outs) sequence is preserved (example: the first
  // element in the outs set is the first operand in the corresponding
  // MachineInstr).  That's the reason why MCInstrDesc only needs to declare the
  // total number of register definitions, and not where those definitions are
  // in the machine operand sequence.
  //
  // Unfortunately, it is not safe to use the information from MCInstrDesc to
  // also describe MCInst objects. An MCInst object can be obtained from a
  // MachineInstr through a lowering step which may restructure the operand
  // sequence (and even remove or introduce new operands). So, there is a high
  // risk that the lowering step breaks the assumptions that register
  // definitions are always at the beginning of the machine operand sequence.
  //
  // This is a fundamental problem, and it is still an open problem. Essentially
  // we have to find a way to correlate def/use operands of a MachineInstr to
  // operands of an MCInst. Otherwise, we cannot correctly reconstruct data
  // dependencies, nor we can correctly interpret the scheduling model, which
  // heavily uses machine operand indices to define processor read-advance
  // information, and to identify processor write resources.  Essentially, we
  // either need something like a MCInstrDesc, but for MCInst, or a way
  // to map MCInst operands back to MachineInstr operands.
  //
  // Unfortunately, we don't have that information now. So, this prototype
  // currently work under the strong assumption that we can always safely trust
  // the content of an MCInstrDesc.  For example, we can query a MCInstrDesc to
  // obtain the number of explicit and implicit register defintions.  We also
  // assume that register definitions always come first in the operand sequence.
  // This last assumption usually makes sense for MachineInstr, where register
  // definitions always appear at the beginning of the operands sequence. In
  // reality, these assumptions could be broken by the lowering step, which can
  // decide to lay out operands in a different order than the original order of
  // operand as specified by the MachineInstr.
  //
  // Things get even more complicated in the presence of "optional" register
  // definitions. For MachineInstr, optional register definitions are always at
  // the end of the operand sequence. Some ARM instructions that may update the
  // status flags specify that register as a optional operand.  Since we don't
  // have operand descriptors for MCInst, we assume for now that the optional
  // definition is always the last operand of a MCInst.  Again, this assumption
  // may be okay for most targets. However, there is no guarantee that targets
  // would respect that.
  //
  // In conclusion: these are for now the strong assumptions made by the tool:
  //  * The number of explicit and implicit register definitions in a MCInst
  //    matches the number of explicit and implicit definitions according to
  //    the opcode descriptor (MCInstrDesc).
  //  * Register definitions take precedence over register uses in the operands
  //    list.
  //  * If an opcode specifies an optional definition, then the optional
  //    definition is always the last operand in the sequence, and it can be
  //    set to zero (i.e. "no register").
  //
  // These assumptions work quite well for most out-of-order in-tree targets
  // like x86. This is mainly because the vast majority of instructions is
  // expanded to MCInst using a straightforward lowering logic that preserves
  // the ordering of the operands.
  //
  // In the longer term, we need to find a proper solution for this issue.
  unsigned NumExplicitDefs = MCDesc.getNumDefs();
  unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
  unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
  unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
  if (MCDesc.hasOptionalDef())
    TotalDefs++;
  ID.Writes.resize(TotalDefs);
  // Iterate over the operands list, and skip non-register operands.
  // The first NumExplictDefs register operands are expected to be register
  // definitions.
  unsigned CurrentDef = 0;
  unsigned i = 0;
  for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
    const MCOperand &Op = MCI.getOperand(i);
    if (!Op.isReg())
      continue;

    WriteDescriptor &Write = ID.Writes[CurrentDef];
    Write.OpIndex = i;
    if (CurrentDef < NumWriteLatencyEntries) {
      const MCWriteLatencyEntry &WLE =
          *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
      // Conservatively default to MaxLatency.
      Write.Latency = WLE.Cycles == -1 ? ID.MaxLatency : WLE.Cycles;
      Write.SClassOrWriteResourceID = WLE.WriteResourceID;
    } else {
      // Assign a default latency for this write.
      Write.Latency = ID.MaxLatency;
      Write.SClassOrWriteResourceID = 0;
    }
    Write.FullyUpdatesSuperRegs = FullyUpdatesSuperRegisters;
    Write.IsOptionalDef = false;
    LLVM_DEBUG({
      dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", Latency=" << Write.Latency
             << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
    });
    CurrentDef++;
  }
Пример #7
0
static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
  for (const uint16_t *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
    if (*Regs == ARM::CPSR)
      return true;
  return false;
}
Пример #8
0
unsigned Disassembler::decodeInstruction(unsigned Address,
  MachineBasicBlock *Block) {
  // Disassemble instruction
  const MCDisassembler *DA = MC->getMCDisassembler();
  uint64_t InstSize;
  MCInst *Inst = new MCInst();
  StringRef Bytes;

  if (!(DA->getInstruction(*Inst, InstSize, *CurSectionMemory, Address,
        nulls(), nulls()))) {
    printError("Unknown instruction encountered, instruction decode failed!");
    return 1;
    // Instructions[Address] = NULL;
    // Block->push_back(NULL);	
    // TODO: Replace with default size for each target.
    // return 1;
    // outs() << format("%8" PRIx64 ":\t", SectAddr + Index);
    // Dism->rawBytesToString(StringRef(Bytes.data() + Index, Size));
    // outs() << "   unkn\n";
  }
  Instructions[Address] = Inst;

  // Recover Instruction information
  const MCInstrInfo *MII = MC->getMCInstrInfo();
  MCInstrDesc *MCID = new MCInstrDesc(MII->get(Inst->getOpcode()));
  MCID->Size = InstSize;

  // Check if the instruction can load to program counter and mark it as a Ret
  // FIXME: Better analysis would be to see if the PC value references memory
  // sent as a parameter or set locally in the function, but that would need to
  // happen after decompilation. In either case, this is definitely a BB
  // terminator or branch!
  if (MCID->mayLoad()
    && MCID->mayAffectControlFlow(*Inst, *MC->getMCRegisterInfo())) {
    MCID->Flags |= (1 << MCID::Return);
    MCID->Flags |= (1 << MCID::Terminator);
  }


  // Recover MachineInstr representation
  DebugLoc *Location = setDebugLoc(Address);
  MachineInstrBuilder MIB = BuildMI(Block, *Location, *MCID);
  unsigned int numDefs = MCID->getNumDefs();
  for (unsigned int i = 0; i < Inst->getNumOperands(); i++) {
    MCOperand MCO = Inst->getOperand(i);
    // FIXME: This hack is a workaround for the assert in MachineInstr.cpp:653,
    // where OpNo >= MCID->getNumOperands()...
    if (i >= MCID->getNumOperands() && !(MCID->isVariadic()))
      break;

    if (MCO.isReg()) {
      unsigned flags = 0;
      // Defs always start at the beginning of the operands list,
      // unfortunately BuildMI doesn't set default define flags so we have
      // to do it manually here.
      // NOTE: This should always be true, but might not be if operands list
      //       is not populated correctly by the MC Backend for the target.
      if (i < numDefs) {
        flags |= RegState::Define;
      }

      // NOTE: No need to worry about imp defs and uses, as these are already
      //       specificed in the MCID attached to the MachineInst object.
      MIB.addReg(MCO.getReg(), flags);
      continue;
    }
    if (MCO.isImm()) {
      MIB.addImm(MCO.getImm());
        continue;
    }
    //else if (MCO.isFPImm()) MIB.addFPImm(MCO.getFPImm());
    if (MCO.isExpr()) {
      MCOperandInfo MCOpInfo = MCID->OpInfo[i];
      switch (MCOpInfo.OperandType) {
        case MCOI::OPERAND_MEMORY:
        case MCOI::OPERAND_PCREL:
        case MCOI::OPERAND_UNKNOWN:
        default:
          printError("Unknown how to handle this Expression at this time.");
      }
    }
    printError("Unknown how to handle Operand!");
  }

  // NOTE: I tried MCOpInfo here, and it appearst o be NULL
  // ... at least for ARM.
  unsigned flags = 0;
  if (MCID->mayLoad())
  	flags |= MachineMemOperand::MOLoad;
  if (MCID->mayStore())
  	flags |= MachineMemOperand::MOStore;
  if (flags != 0) {
  	// Constant* cInt = ConstantInt::get(Type::getInt64Ty(ctx), MCO.getImm());
  	// Value *Val = ConstantExpr::getIntToPtr(cInt,
  	// PointerType::getUnqual(Type::getInt32Ty(ctx)));
  	// FIXME: note size of 4 is known to be bad for
  	// some targets

  	//Copy & paste set getImm to zero
  	MachineMemOperand* MMO = new MachineMemOperand(
  			MachinePointerInfo(), flags, 4, 0);	//MCO.getImm()
		 	 MIB.addMemOperand(MMO);
		 	 //outs() << "Name: " << MII->getName(Inst->getOpcode()) << " Flags: " << flags << "\n";
	 }

  // Note: I don't know why they decided instruction size needed to be 64 bits,
  // but the following conversion shouldn't be an issue.
  return ((unsigned)InstSize);
}
Пример #9
0
bool PatmosInstrInfo::canIssueInSlot(const MCInstrDesc &MID,
                                     unsigned Slot) const
{
  return PST.canIssueInSlot(MID.getSchedClass(), Slot);
}
Пример #10
0
static inline bool mayAccessMemory(const MCInstrDesc &TID) {
  return TID.mayLoad() || TID.mayStore() || TID.isCall();
}