//
// runTargetDesc - Output the target register and register file descriptions.
//
void
RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
                                   CodeGenRegBank &RegBank){
  EmitSourceFileHeader("Target Register and Register Classes Information", OS);

  OS << "\n#ifdef GET_REGINFO_TARGET_DESC\n";
  OS << "#undef GET_REGINFO_TARGET_DESC\n";

  OS << "namespace llvm {\n\n";

  // Get access to MCRegisterClass data.
  OS << "extern const MCRegisterClass " << Target.getName()
     << "MCRegisterClasses[];\n";

  // Start out by emitting each of the register classes.
  ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();

  // Collect all registers belonging to any allocatable class.
  std::set<Record*> AllocatableRegs;

  // Collect allocatable registers.
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];
    ArrayRef<Record*> Order = RC.getOrder();

    if (RC.Allocatable)
      AllocatableRegs.insert(Order.begin(), Order.end());
  }

  // Build a shared array of value types.
  SequenceToOffsetTable<std::vector<MVT::SimpleValueType> > VTSeqs;
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc)
    VTSeqs.add(RegisterClasses[rc]->VTs);
  VTSeqs.layout();
  OS << "\nstatic const MVT::SimpleValueType VTLists[] = {\n";
  VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
  OS << "};\n";

  // Now that all of the structs have been emitted, emit the instances.
  if (!RegisterClasses.empty()) {
    std::map<unsigned, std::set<unsigned> > SuperRegClassMap;

    OS << "\nstatic const TargetRegisterClass *const "
       << "NullRegClasses[] = { NULL };\n\n";

    unsigned NumSubRegIndices = RegBank.getSubRegIndices().size();

    if (NumSubRegIndices) {
      // Compute the super-register classes for each RegisterClass
      for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
        const CodeGenRegisterClass &RC = *RegisterClasses[rc];
        for (DenseMap<Record*,Record*>::const_iterator
             i = RC.SubRegClasses.begin(),
             e = RC.SubRegClasses.end(); i != e; ++i) {
          // Find the register class number of i->second for SuperRegClassMap.
          const CodeGenRegisterClass *RC2 = RegBank.getRegClass(i->second);
          assert(RC2 && "Invalid register class in SubRegClasses");
          SuperRegClassMap[RC2->EnumValue].insert(rc);
        }
      }

      // Emit the super-register classes for each RegisterClass
      for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
        const CodeGenRegisterClass &RC = *RegisterClasses[rc];

        // Give the register class a legal C name if it's anonymous.
        std::string Name = RC.getName();

        OS << "// " << Name
           << " Super-register Classes...\n"
           << "static const TargetRegisterClass *const "
           << Name << "SuperRegClasses[] = {\n  ";

        bool Empty = true;
        std::map<unsigned, std::set<unsigned> >::iterator I =
          SuperRegClassMap.find(rc);
        if (I != SuperRegClassMap.end()) {
          for (std::set<unsigned>::iterator II = I->second.begin(),
                 EE = I->second.end(); II != EE; ++II) {
            const CodeGenRegisterClass &RC2 = *RegisterClasses[*II];
            if (!Empty)
              OS << ", ";
            OS << "&" << RC2.getQualifiedName() << "RegClass";
            Empty = false;
          }
        }

        OS << (!Empty ? ", " : "") << "NULL";
        OS << "\n};\n\n";
      }
    }

    // Emit the sub-classes array for each RegisterClass
    for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
      const CodeGenRegisterClass &RC = *RegisterClasses[rc];

      // Give the register class a legal C name if it's anonymous.
      std::string Name = RC.getName();

      OS << "static const uint32_t " << Name << "SubclassMask[] = {\n  ";
      printBitVectorAsHex(OS, RC.getSubClasses(), 32);
      OS << "\n};\n\n";
    }

    // Emit NULL terminated super-class lists.
    for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
      const CodeGenRegisterClass &RC = *RegisterClasses[rc];
      ArrayRef<CodeGenRegisterClass*> Supers = RC.getSuperClasses();

      // Skip classes without supers.  We can reuse NullRegClasses.
      if (Supers.empty())
        continue;

      OS << "static const TargetRegisterClass *const "
         << RC.getName() << "Superclasses[] = {\n";
      for (unsigned i = 0; i != Supers.size(); ++i)
        OS << "  &" << Supers[i]->getQualifiedName() << "RegClass,\n";
      OS << "  NULL\n};\n\n";
    }

    // Emit methods.
    for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
      const CodeGenRegisterClass &RC = *RegisterClasses[i];
      if (!RC.AltOrderSelect.empty()) {
        OS << "\nstatic inline unsigned " << RC.getName()
           << "AltOrderSelect(const MachineFunction &MF) {"
           << RC.AltOrderSelect << "}\n\n"
           << "static ArrayRef<uint16_t> " << RC.getName()
           << "GetRawAllocationOrder(const MachineFunction &MF) {\n";
        for (unsigned oi = 1 , oe = RC.getNumOrders(); oi != oe; ++oi) {
          ArrayRef<Record*> Elems = RC.getOrder(oi);
          if (!Elems.empty()) {
            OS << "  static const uint16_t AltOrder" << oi << "[] = {";
            for (unsigned elem = 0; elem != Elems.size(); ++elem)
              OS << (elem ? ", " : " ") << getQualifiedName(Elems[elem]);
            OS << " };\n";
          }
        }
        OS << "  const MCRegisterClass &MCR = " << Target.getName()
           << "MCRegisterClasses[" << RC.getQualifiedName() + "RegClassID];\n"
           << "  const ArrayRef<uint16_t> Order[] = {\n"
           << "    makeArrayRef(MCR.begin(), MCR.getNumRegs()";
        for (unsigned oi = 1, oe = RC.getNumOrders(); oi != oe; ++oi)
          if (RC.getOrder(oi).empty())
            OS << "),\n    ArrayRef<uint16_t>(";
          else
            OS << "),\n    makeArrayRef(AltOrder" << oi;
        OS << ")\n  };\n  const unsigned Select = " << RC.getName()
           << "AltOrderSelect(MF);\n  assert(Select < " << RC.getNumOrders()
           << ");\n  return Order[Select];\n}\n";
        }
    }

    // Now emit the actual value-initialized register class instances.
    OS << "namespace " << RegisterClasses[0]->Namespace
       << " {   // Register class instances\n";

    for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
      const CodeGenRegisterClass &RC = *RegisterClasses[i];
      OS << "  extern const TargetRegisterClass "
         << RegisterClasses[i]->getName() << "RegClass = {\n    "
         << '&' << Target.getName() << "MCRegisterClasses[" << RC.getName()
         << "RegClassID],\n    "
         << "VTLists + " << VTSeqs.get(RC.VTs) << ",\n    "
         << RC.getName() << "SubclassMask,\n    ";
      if (RC.getSuperClasses().empty())
        OS << "NullRegClasses,\n    ";
      else
        OS << RC.getName() << "Superclasses,\n    ";
      OS << (NumSubRegIndices ? RC.getName() + "Super" : std::string("Null"))
         << "RegClasses,\n    ";
      if (RC.AltOrderSelect.empty())
        OS << "0\n";
      else
        OS << RC.getName() << "GetRawAllocationOrder\n";
      OS << "  };\n\n";
    }

    OS << "}\n";
  }

  OS << "\nnamespace {\n";
  OS << "  const TargetRegisterClass* const RegisterClasses[] = {\n";
  for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i)
    OS << "    &" << RegisterClasses[i]->getQualifiedName()
       << "RegClass,\n";
  OS << "  };\n";
  OS << "}\n";       // End of anonymous namespace...

  // Emit extra information about registers.
  const std::string &TargetName = Target.getName();
  OS << "\nstatic const TargetRegisterInfoDesc "
     << TargetName << "RegInfoDesc[] = { // Extra Descriptors\n";
  OS << "  { 0, 0 },\n";

  const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister &Reg = *Regs[i];
    OS << "  { ";
    OS << Reg.CostPerUse << ", "
       << int(AllocatableRegs.count(Reg.TheDef)) << " },\n";
  }
  OS << "};\n";      // End of register descriptors...


  // Calculate the mapping of subregister+index pairs to physical registers.
  // This will also create further anonymous indices.
  unsigned NamedIndices = RegBank.getNumNamedIndices();

  // Emit SubRegIndex names, skipping 0
  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
  OS << "\nstatic const char *const " << TargetName
     << "SubRegIndexTable[] = { \"";
  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
    OS << SubRegIndices[i]->getName();
    if (i+1 != e)
      OS << "\", \"";
  }
  OS << "\" };\n\n";

  // Emit names of the anonymous subreg indices.
  if (SubRegIndices.size() > NamedIndices) {
    OS << "  enum {";
    for (unsigned i = NamedIndices, e = SubRegIndices.size(); i != e; ++i) {
      OS << "\n    " << SubRegIndices[i]->getName() << " = " << i+1;
      if (i+1 != e)
        OS << ',';
    }
    OS << "\n  };\n\n";
  }
  OS << "\n";

  std::string ClassName = Target.getName() + "GenRegisterInfo";

  // Emit composeSubRegIndices
  OS << "unsigned " << ClassName
     << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
     << "  switch (IdxA) {\n"
     << "  default:\n    return IdxB;\n";
  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
    bool Open = false;
    for (unsigned j = 0; j != e; ++j) {
      if (CodeGenSubRegIndex *Comp =
            SubRegIndices[i]->compose(SubRegIndices[j])) {
        if (!Open) {
          OS << "  case " << SubRegIndices[i]->getQualifiedName()
             << ": switch(IdxB) {\n    default: return IdxB;\n";
          Open = true;
        }
        OS << "    case " << SubRegIndices[j]->getQualifiedName()
           << ": return " << Comp->getQualifiedName() << ";\n";
      }
    }
    if (Open)
      OS << "    }\n";
  }
  OS << "  }\n}\n\n";

  // Emit getSubClassWithSubReg.
  OS << "const TargetRegisterClass *" << ClassName
     << "::getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx)"
        " const {\n";
  if (SubRegIndices.empty()) {
    OS << "  assert(Idx == 0 && \"Target has no sub-registers\");\n"
       << "  return RC;\n";
  } else {
    // Use the smallest type that can hold a regclass ID with room for a
    // sentinel.
    if (RegisterClasses.size() < UINT8_MAX)
      OS << "  static const uint8_t Table[";
    else if (RegisterClasses.size() < UINT16_MAX)
      OS << "  static const uint16_t Table[";
    else
      throw "Too many register classes.";
    OS << RegisterClasses.size() << "][" << SubRegIndices.size() << "] = {\n";
    for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) {
      const CodeGenRegisterClass &RC = *RegisterClasses[rci];
      OS << "    {\t// " << RC.getName() << "\n";
      for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
        CodeGenSubRegIndex *Idx = SubRegIndices[sri];
        if (CodeGenRegisterClass *SRC = RC.getSubClassWithSubReg(Idx))
          OS << "      " << SRC->EnumValue + 1 << ",\t// " << Idx->getName()
             << " -> " << SRC->getName() << "\n";
        else
          OS << "      0,\t// " << Idx->getName() << "\n";
      }
      OS << "    },\n";
    }
    OS << "  };\n  assert(RC && \"Missing regclass\");\n"
       << "  if (!Idx) return RC;\n  --Idx;\n"
       << "  assert(Idx < " << SubRegIndices.size() << " && \"Bad subreg\");\n"
       << "  unsigned TV = Table[RC->getID()][Idx];\n"
       << "  return TV ? getRegClass(TV - 1) : 0;\n";
  }
  OS << "}\n\n";

  // Emit getMatchingSuperRegClass.
  OS << "const TargetRegisterClass *" << ClassName
     << "::getMatchingSuperRegClass(const TargetRegisterClass *A,"
        " const TargetRegisterClass *B, unsigned Idx) const {\n";
  if (SubRegIndices.empty()) {
    OS << "  llvm_unreachable(\"Target has no sub-registers\");\n";
  } else {
    // We need to find the largest sub-class of A such that every register has
    // an Idx sub-register in B.  Map (B, Idx) to a bit-vector of
    // super-register classes that map into B. Then compute the largest common
    // sub-class with A by taking advantage of the register class ordering,
    // like getCommonSubClass().

    // Bitvector table is NumRCs x NumSubIndexes x BVWords, where BVWords is
    // the number of 32-bit words required to represent all register classes.
    const unsigned BVWords = (RegisterClasses.size()+31)/32;
    BitVector BV(RegisterClasses.size());

    OS << "  static const uint32_t Table[" << RegisterClasses.size()
       << "][" << SubRegIndices.size() << "][" << BVWords << "] = {\n";
    for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) {
      const CodeGenRegisterClass &RC = *RegisterClasses[rci];
      OS << "    {\t// " << RC.getName() << "\n";
      for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
        CodeGenSubRegIndex *Idx = SubRegIndices[sri];
        BV.reset();
        RC.getSuperRegClasses(Idx, BV);
        OS << "      { ";
        printBitVectorAsHex(OS, BV, 32);
        OS << "},\t// " << Idx->getName() << '\n';
      }
      OS << "    },\n";
    }
    OS << "  };\n  assert(A && B && \"Missing regclass\");\n"
       << "  --Idx;\n"
       << "  assert(Idx < " << SubRegIndices.size() << " && \"Bad subreg\");\n"
       << "  const uint32_t *TV = Table[B->getID()][Idx];\n"
       << "  const uint32_t *SC = A->getSubClassMask();\n"
       << "  for (unsigned i = 0; i != " << BVWords << "; ++i)\n"
       << "    if (unsigned Common = TV[i] & SC[i])\n"
       << "      return getRegClass(32*i + CountTrailingZeros_32(Common));\n"
       << "  return 0;\n";
  }
  OS << "}\n\n";

  EmitRegUnitPressure(OS, RegBank, ClassName);

  // Emit the constructor of the class...
  OS << "extern const MCRegisterDesc " << TargetName << "RegDesc[];\n";
  OS << "extern const uint16_t " << TargetName << "RegLists[];\n";
  if (SubRegIndices.size() != 0)
    OS << "extern const uint16_t *get" << TargetName
       << "SubRegTable();\n";

  EmitRegMappingTables(OS, Regs, true);

  OS << ClassName << "::\n" << ClassName
     << "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour)\n"
     << "  : TargetRegisterInfo(" << TargetName << "RegInfoDesc"
     << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
     << "             " << TargetName << "SubRegIndexTable) {\n"
     << "  InitMCRegisterInfo(" << TargetName << "RegDesc, "
     << Regs.size()+1 << ", RA,\n                     " << TargetName
     << "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
     << "                     " << TargetName << "RegLists,\n"
     << "                     ";
  if (SubRegIndices.size() != 0)
    OS << "get" << TargetName << "SubRegTable(), "
       << SubRegIndices.size() << ");\n\n";
  else
    OS << "NULL, 0);\n\n";

  EmitRegMapping(OS, Regs, true);

  OS << "}\n\n";


  // Emit CalleeSavedRegs information.
  std::vector<Record*> CSRSets =
    Records.getAllDerivedDefinitions("CalleeSavedRegs");
  for (unsigned i = 0, e = CSRSets.size(); i != e; ++i) {
    Record *CSRSet = CSRSets[i];
    const SetTheory::RecVec *Regs = RegBank.getSets().expand(CSRSet);
    assert(Regs && "Cannot expand CalleeSavedRegs instance");

    // Emit the *_SaveList list of callee-saved registers.
    OS << "static const uint16_t " << CSRSet->getName()
       << "_SaveList[] = { ";
    for (unsigned r = 0, re = Regs->size(); r != re; ++r)
      OS << getQualifiedName((*Regs)[r]) << ", ";
    OS << "0 };\n";

    // Emit the *_RegMask bit mask of call-preserved registers.
    OS << "static const uint32_t " << CSRSet->getName()
       << "_RegMask[] = { ";
    printBitVectorAsHex(OS, RegBank.computeCoveredRegisters(*Regs), 32);
    OS << "};\n";
  }
  OS << "\n\n";

  OS << "} // End llvm namespace \n";
  OS << "#endif // GET_REGINFO_TARGET_DESC\n\n";
}
Beispiel #2
0
void llvm::printCOFFUnwindInfo(const COFFObjectFile *Obj) {
  const coff_file_header *Header;
  if (error(Obj->getHeader(Header))) return;

  if (Header->Machine != COFF::IMAGE_FILE_MACHINE_AMD64) {
    errs() << "Unsupported image machine type "
              "(currently only AMD64 is supported).\n";
    return;
  }

  const coff_section *Pdata = 0;

  error_code ec;
  for (section_iterator SI = Obj->begin_sections(),
                        SE = Obj->end_sections();
                        SI != SE; SI.increment(ec)) {
    if (error(ec)) return;

    StringRef Name;
    if (error(SI->getName(Name))) continue;

    if (Name != ".pdata") continue;

    Pdata = Obj->getCOFFSection(SI);
    std::vector<RelocationRef> Rels;
    for (relocation_iterator RI = SI->begin_relocations(),
                             RE = SI->end_relocations();
                             RI != RE; RI.increment(ec)) {
      if (error(ec)) break;
      Rels.push_back(*RI);
    }

    // Sort relocations by address.
    std::sort(Rels.begin(), Rels.end(), RelocAddressLess);

    ArrayRef<uint8_t> Contents;
    if (error(Obj->getSectionContents(Pdata, Contents))) continue;
    if (Contents.empty()) continue;

    ArrayRef<RuntimeFunction> RFs(
                  reinterpret_cast<const RuntimeFunction *>(Contents.data()),
                                  Contents.size() / sizeof(RuntimeFunction));
    for (const RuntimeFunction *I = RFs.begin(), *E = RFs.end(); I < E; ++I) {
      const uint64_t SectionOffset = std::distance(RFs.begin(), I)
                                     * sizeof(RuntimeFunction);

      outs() << "Function Table:\n";

      outs() << "  Start Address: ";
      printCOFFSymbolAddress(outs(), Rels, SectionOffset +
                             /*offsetof(RuntimeFunction, StartAddress)*/ 0,
                             I->StartAddress);
      outs() << "\n";

      outs() << "  End Address: ";
      printCOFFSymbolAddress(outs(), Rels, SectionOffset +
                             /*offsetof(RuntimeFunction, EndAddress)*/ 4,
                             I->EndAddress);
      outs() << "\n";

      outs() << "  Unwind Info Address: ";
      printCOFFSymbolAddress(outs(), Rels, SectionOffset +
                             /*offsetof(RuntimeFunction, UnwindInfoOffset)*/ 8,
                             I->UnwindInfoOffset);
      outs() << "\n";

      ArrayRef<uint8_t> XContents;
      uint64_t UnwindInfoOffset = 0;
      if (error(getSectionContents(Obj, Rels, SectionOffset +
                              /*offsetof(RuntimeFunction, UnwindInfoOffset)*/ 8,
                                   XContents, UnwindInfoOffset))) continue;
      if (XContents.empty()) continue;

      UnwindInfoOffset += I->UnwindInfoOffset;
      if (UnwindInfoOffset > XContents.size()) continue;

      const Win64EH::UnwindInfo *UI =
                            reinterpret_cast<const Win64EH::UnwindInfo *>
                              (XContents.data() + UnwindInfoOffset);

      // The casts to int are required in order to output the value as number.
      // Without the casts the value would be interpreted as char data (which
      // results in garbage output).
      outs() << "  Version: " << static_cast<int>(UI->getVersion()) << "\n";
      outs() << "  Flags: " << static_cast<int>(UI->getFlags());
      if (UI->getFlags()) {
          if (UI->getFlags() & UNW_ExceptionHandler)
            outs() << " UNW_ExceptionHandler";
          if (UI->getFlags() & UNW_TerminateHandler)
            outs() << " UNW_TerminateHandler";
          if (UI->getFlags() & UNW_ChainInfo)
            outs() << " UNW_ChainInfo";
      }
      outs() << "\n";
      outs() << "  Size of prolog: "
             << static_cast<int>(UI->PrologSize) << "\n";
      outs() << "  Number of Codes: "
             << static_cast<int>(UI->NumCodes) << "\n";
      // Maybe this should move to output of UOP_SetFPReg?
      if (UI->getFrameRegister()) {
        outs() << "  Frame register: "
                << getUnwindRegisterName(UI->getFrameRegister())
                << "\n";
        outs() << "  Frame offset: "
                << 16 * UI->getFrameOffset()
                << "\n";
      } else {
        outs() << "  No frame pointer used\n";
      }
      if (UI->getFlags() & (UNW_ExceptionHandler | UNW_TerminateHandler)) {
        // FIXME: Output exception handler data
      } else if (UI->getFlags() & UNW_ChainInfo) {
        // FIXME: Output chained unwind info
      }

      if (UI->NumCodes)
        outs() << "  Unwind Codes:\n";

      printAllUnwindCodes(ArrayRef<UnwindCode>(&UI->UnwindCodes[0],
                          UI->NumCodes));

      outs() << "\n\n";
      outs().flush();
    }
  }
}
/// Force liveness of registers.
void RegPressureTracker::addLiveRegs(ArrayRef<unsigned> Regs) {
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    if (LiveRegs.insert(Regs[i]))
      increaseRegPressure(Regs[i]);
  }
}
Beispiel #4
0
ArrayRef<Ref<DataBlock> >
DataBlock::getDataBlocks(ArrayRef<unsigned char> rawCodewords,
                         Version *version,
                         ErrorCorrectionLevel &ecLevel) {

    // Figure out the number and size of data blocks used by this version and
    // error correction level
    Version::ECBlocks &ecBlocks = version->getECBlocksForLevel(ecLevel);

    // First count the total number of data blocks
    int totalBlocks = 0;
    vector<Version::ECB*> ecBlockArray = ecBlocks.getECBlocks();
    for (size_t i = 0; i < ecBlockArray.size(); i++) {
        totalBlocks += ecBlockArray[i]->getCount();
    }

    // Now establish DataBlocks of the appropriate size and number of data codewords
    ArrayRef<Ref<DataBlock> > result(totalBlocks);
    int numResultBlocks = 0;
    for (size_t j = 0; j < ecBlockArray.size(); j++) {
        Version::ECB *ecBlock = ecBlockArray[j];
        for (int i = 0; i < ecBlock->getCount(); i++) {
            int numDataCodewords = ecBlock->getDataCodewords();
            int numBlockCodewords = ecBlocks.getECCodewords() + numDataCodewords;
            ArrayRef<unsigned char> buffer(numBlockCodewords);
            Ref<DataBlock> blockRef(new DataBlock(numDataCodewords, buffer));
            result[numResultBlocks++] = blockRef;
        }
    }

    // All blocks have the same amount of data, except that the last n
    // (where n may be 0) have 1 more byte. Figure out where these start.
    int shorterBlocksTotalCodewords = result[0]->codewords_.size();
    int longerBlocksStartAt = result->size() - 1;
    while (longerBlocksStartAt >= 0) {
        int numCodewords = result[longerBlocksStartAt]->codewords_.size();
        if (numCodewords == shorterBlocksTotalCodewords) {
            break;
        }
        if (numCodewords != shorterBlocksTotalCodewords + 1) {
            throw IllegalArgumentException("Data block sizes differ by more than 1");
        }
        longerBlocksStartAt--;
    }
    longerBlocksStartAt++;

    int shorterBlocksNumDataCodewords = shorterBlocksTotalCodewords - ecBlocks.getECCodewords();
    // The last elements of result may be 1 element longer;
    // first fill out as many elements as all of them have
    int rawCodewordsOffset = 0;
    for (int i = 0; i < shorterBlocksNumDataCodewords; i++) {
        for (int j = 0; j < numResultBlocks; j++) {
            result[j]->codewords_[i] = rawCodewords[rawCodewordsOffset++];
        }
    }
    // Fill out the last data block in the longer ones
    for (int j = longerBlocksStartAt; j < numResultBlocks; j++) {
        result[j]->codewords_[shorterBlocksNumDataCodewords] = rawCodewords[rawCodewordsOffset++];
    }
    // Now add in error correction blocks
    int max = result[0]->codewords_.size();
    for (int i = shorterBlocksNumDataCodewords; i < max; i++) {
        for (int j = 0; j < numResultBlocks; j++) {
            int iOffset = j < longerBlocksStartAt ? i : i + 1;
            result[j]->codewords_[iOffset] = rawCodewords[rawCodewordsOffset++];
        }
    }

    if ((size_t) rawCodewordsOffset != rawCodewords.size()) {
        throw IllegalArgumentException("rawCodewordsOffset != rawCodewords.length");
    }

    return result;
}
Beispiel #5
0
SolutionDiff::SolutionDiff(ArrayRef<Solution> solutions) {
  if (solutions.size() <= 1)
    return;

  // Populate the type bindings with the first solution.
  llvm::DenseMap<TypeVariableType *, SmallVector<Type, 2>> typeBindings;
  for (auto binding : solutions[0].typeBindings) {
    typeBindings[binding.first].push_back(binding.second);
  }

  // Populate the overload choices with the first solution.
  llvm::DenseMap<ConstraintLocator *, SmallVector<OverloadChoice, 2>>
    overloadChoices;
  for (auto choice : solutions[0].overloadChoices) {
    overloadChoices[choice.first].push_back(choice.second.choice);
  }

  // Find the type variables and overload locators common to all of the
  // solutions.
  for (auto &solution : solutions.slice(1)) {
    // For each type variable bound in all of the previous solutions, check
    // whether we have a binding for this type variable in this solution.
    SmallVector<TypeVariableType *, 4> removeTypeBindings;
    for (auto &binding : typeBindings) {
      auto known = solution.typeBindings.find(binding.first);
      if (known == solution.typeBindings.end()) {
        removeTypeBindings.push_back(binding.first);
        continue;
      }

      // Add this solution's binding to the results.
      binding.second.push_back(known->second);
    }

    // Remove those type variables for which this solution did not have a
    // binding.
    for (auto typeVar : removeTypeBindings) {
      typeBindings.erase(typeVar);
    }
    removeTypeBindings.clear();

    // For each overload locator for which we have an overload choice in
    // all of the previous solutions. Check whether we have an overload choice
    // in this solution.
    SmallVector<ConstraintLocator *, 4> removeOverloadChoices;
    for (auto &overloadChoice : overloadChoices) {
      auto known = solution.overloadChoices.find(overloadChoice.first);
      if (known == solution.overloadChoices.end()) {
        removeOverloadChoices.push_back(overloadChoice.first);
        continue;
      }

      // Add this solution's overload choice to the results.
      overloadChoice.second.push_back(known->second.choice);
    }

    // Remove those overload locators for which this solution did not have
    // an overload choice.
    for (auto overloadChoice : removeOverloadChoices) {
      overloadChoices.erase(overloadChoice);
    }
  }

  // Look through the type variables that have bindings in all of the
  // solutions, and add those that have differences to the diff.
  for (auto &binding : typeBindings) {
    Type singleType;
    for (auto type : binding.second) {
      if (!singleType)
        singleType = type;
      else if (!singleType->isEqual(type)) {
        // We have a difference. Add this binding to the diff.
        this->typeBindings.push_back(
          SolutionDiff::TypeBindingDiff{
            binding.first,
            std::move(binding.second)
          });

        break;
      }
    }
  }

  // Look through the overload locators that have overload choices in all of
  // the solutions, and add those that have differences to the diff.
  for (auto &overloadChoice : overloadChoices) {
    OverloadChoice singleChoice = overloadChoice.second[0];
    for (auto choice : overloadChoice.second) {
      if (!sameOverloadChoice(singleChoice, choice)) {
        // We have a difference. Add this set of overload choices to the diff.
        this->overloads.push_back(
          SolutionDiff::OverloadDiff{
            overloadChoice.first,
            overloadChoice.second
          });
        
      }
    }
  }
}
Beispiel #6
0
void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
  switch (getKind()) {
  case APValue::Uninitialized:
    Out << "<uninitialized>";
    return;
  case APValue::Int:
    if (Ty->isBooleanType())
      Out << (getInt().getBoolValue() ? "true" : "false");
    else
      Out << getInt();
    return;
  case APValue::Float:
    Out << GetApproxValue(getFloat());
    return;
  case APValue::Vector: {
    Out << '{';
    QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
    getVectorElt(0).printPretty(Out, Ctx, ElemTy);
    for (unsigned i = 1; i != getVectorLength(); ++i) {
      Out << ", ";
      getVectorElt(i).printPretty(Out, Ctx, ElemTy);
    }
    Out << '}';
    return;
  }
  case APValue::ComplexInt:
    Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
    return;
  case APValue::ComplexFloat:
    Out << GetApproxValue(getComplexFloatReal()) << "+"
        << GetApproxValue(getComplexFloatImag()) << "i";
    return;
  case APValue::LValue: {
    LValueBase Base = getLValueBase();
    if (!Base) {
      Out << "0";
      return;
    }

    bool IsReference = Ty->isReferenceType();
    QualType InnerTy
      = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();

    if (!hasLValuePath()) {
      // No lvalue path: just print the offset.
      CharUnits O = getLValueOffset();
      CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
      if (!O.isZero()) {
        if (IsReference)
          Out << "*(";
        if (O % S) {
          Out << "(char*)";
          S = CharUnits::One();
        }
        Out << '&';
      } else if (!IsReference)
        Out << '&';

      if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
        Out << *VD;
      else
        Base.get<const Expr*>()->printPretty(Out, 0, Ctx.getPrintingPolicy());
      if (!O.isZero()) {
        Out << " + " << (O / S);
        if (IsReference)
          Out << ')';
      }
      return;
    }

    // We have an lvalue path. Print it out nicely.
    if (!IsReference)
      Out << '&';
    else if (isLValueOnePastTheEnd())
      Out << "*(&";

    QualType ElemTy;
    if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
      Out << *VD;
      ElemTy = VD->getType();
    } else {
      const Expr *E = Base.get<const Expr*>();
      E->printPretty(Out, 0, Ctx.getPrintingPolicy());
      ElemTy = E->getType();
    }

    ArrayRef<LValuePathEntry> Path = getLValuePath();
    const CXXRecordDecl *CastToBase = 0;
    for (unsigned I = 0, N = Path.size(); I != N; ++I) {
      if (ElemTy->getAs<RecordType>()) {
        // The lvalue refers to a class type, so the next path entry is a base
        // or member.
        const Decl *BaseOrMember =
        BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
        if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
          CastToBase = RD;
          ElemTy = Ctx.getRecordType(RD);
        } else {
          const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
          Out << ".";
          if (CastToBase)
            Out << *CastToBase << "::";
          Out << *VD;
          ElemTy = VD->getType();
        }
      } else {
        // The lvalue must refer to an array.
        Out << '[' << Path[I].ArrayIndex << ']';
        ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
      }
    }

    // Handle formatting of one-past-the-end lvalues.
    if (isLValueOnePastTheEnd()) {
      // FIXME: If CastToBase is non-0, we should prefix the output with
      // "(CastToBase*)".
      Out << " + 1";
      if (IsReference)
        Out << ')';
    }
    return;
  }
  case APValue::Array: {
    const ArrayType *AT = Ctx.getAsArrayType(Ty);
    QualType ElemTy = AT->getElementType();
    Out << '{';
    if (unsigned N = getArrayInitializedElts()) {
      getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
      for (unsigned I = 1; I != N; ++I) {
        Out << ", ";
        if (I == 10) {
          // Avoid printing out the entire contents of large arrays.
          Out << "...";
          break;
        }
        getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
      }
    }
    Out << '}';
    return;
  }
  case APValue::Struct: {
    Out << '{';
    const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
    bool First = true;
    if (unsigned N = getStructNumBases()) {
      const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
      CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
      for (unsigned I = 0; I != N; ++I, ++BI) {
        assert(BI != CD->bases_end());
        if (!First)
          Out << ", ";
        getStructBase(I).printPretty(Out, Ctx, BI->getType());
        First = false;
      }
    }
    for (RecordDecl::field_iterator FI = RD->field_begin();
         FI != RD->field_end(); ++FI) {
      if (!First)
        Out << ", ";
      if (FI->isUnnamedBitfield()) continue;
      getStructField(FI->getFieldIndex()).
        printPretty(Out, Ctx, FI->getType());
      First = false;
    }
    Out << '}';
    return;
  }
  case APValue::Union:
    Out << '{';
    if (const FieldDecl *FD = getUnionField()) {
      Out << "." << *FD << " = ";
      getUnionValue().printPretty(Out, Ctx, FD->getType());
    }
    Out << '}';
    return;
  case APValue::MemberPointer:
    // FIXME: This is not enough to unambiguously identify the member in a
    // multiple-inheritance scenario.
    if (const ValueDecl *VD = getMemberPointerDecl()) {
      Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD;
      return;
    }
    Out << "0";
    return;
  case APValue::AddrLabelDiff:
    Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName();
    Out << " - ";
    Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName();
    return;
  }
  llvm_unreachable("Unknown APValue kind!");
}
/// This function goes through the arguments of F and sees if we have anything
/// to optimize in which case it returns true. If we have nothing to optimize,
/// it returns false.
bool FunctionAnalyzer::analyze() {
  // For now ignore functions with indirect results.
  if (F->getLoweredFunctionType()->hasIndirectResult())
    return false;

  ArrayRef<SILArgument *> Args = F->begin()->getBBArgs();

  // A map from consumed SILArguments to the release associated with an
  // argument.
  ConsumedArgToEpilogueReleaseMatcher ArgToReturnReleaseMap(RCIA, F);
  ConsumedArgToEpilogueReleaseMatcher ArgToThrowReleaseMap(
      RCIA, F, ConsumedArgToEpilogueReleaseMatcher::ExitKind::Throw);

  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
    ArgumentDescriptor A(Allocator, Args[i]);
    bool HaveOptimizedArg = false;

    bool isABIRequired = isArgumentABIRequired(Args[i]);
    auto OnlyRelease = getNonTrivialNonDebugReleaseUse(Args[i]);

    // If this argument is not ABI required and has not uses except for debug
    // instructions, remove it.
    if (!isABIRequired && OnlyRelease && OnlyRelease.getValue().isNull()) {
      A.IsDead = true;
      HaveOptimizedArg = true;
      ++NumDeadArgsEliminated;
    }

    // See if we can find a ref count equivalent strong_release or release_value
    // at the end of this function if our argument is an @owned parameter.
    if (A.hasConvention(ParameterConvention::Direct_Owned)) {
      if (auto *Release = ArgToReturnReleaseMap.releaseForArgument(A.Arg)) {
        SILInstruction *ReleaseInThrow = nullptr;

        // If the function has a throw block we must also find a matching
        // release in the throw block.
        if (!ArgToThrowReleaseMap.hasBlock() ||
            (ReleaseInThrow = ArgToThrowReleaseMap.releaseForArgument(A.Arg))) {

          // TODO: accept a second release in the throw block to let the
          // argument be dead.
          if (OnlyRelease && OnlyRelease.getValue().getPtrOrNull() == Release) {
            A.IsDead = true;
          }
          A.CalleeRelease = Release;
          A.CalleeReleaseInThrowBlock = ReleaseInThrow;
          HaveOptimizedArg = true;
          ++NumOwnedConvertedToGuaranteed;
        }
      }
    }

    if (A.shouldExplode()) {
      HaveOptimizedArg = true;
      ++NumSROAArguments;
    }

    if (HaveOptimizedArg) {
      ShouldOptimize = true;
      // Store that we have modified the self argument. We need to change the
      // calling convention later.
      if (Args[i]->isSelf())
        HaveModifiedSelfArgument = true;
    }

    // Add the argument to our list.
    ArgDescList.push_back(std::move(A));
  }

  return ShouldOptimize;
}
Beispiel #8
0
// runEnums - Print out enum values for all of the registers.
void RegisterInfoEmitter::runEnums(raw_ostream &OS,
                                   CodeGenTarget &Target, CodeGenRegBank &Bank) {
  const std::vector<CodeGenRegister*> &Registers = Bank.getRegisters();

  // Register enums are stored as uint16_t in the tables. Make sure we'll fit.
  assert(Registers.size() <= 0xffff && "Too many regs to fit in tables");

  std::string Namespace = Registers[0]->TheDef->getValueAsString("Namespace");

  emitSourceFileHeader("Target Register Enum Values", OS);

  OS << "\n#ifdef GET_REGINFO_ENUM\n";
  OS << "#undef GET_REGINFO_ENUM\n";

  OS << "namespace llvm {\n\n";

  OS << "class MCRegisterClass;\n"
     << "extern const MCRegisterClass " << Namespace
     << "MCRegisterClasses[];\n\n";

  if (!Namespace.empty())
    OS << "namespace " << Namespace << " {\n";
  OS << "enum {\n  NoRegister,\n";

  for (unsigned i = 0, e = Registers.size(); i != e; ++i)
    OS << "  " << Registers[i]->getName() << " = " <<
      Registers[i]->EnumValue << ",\n";
  assert(Registers.size() == Registers[Registers.size()-1]->EnumValue &&
         "Register enum value mismatch!");
  OS << "  NUM_TARGET_REGS \t// " << Registers.size()+1 << "\n";
  OS << "};\n";
  if (!Namespace.empty())
    OS << "}\n";

  ArrayRef<CodeGenRegisterClass*> RegisterClasses = Bank.getRegClasses();
  if (!RegisterClasses.empty()) {

    // RegisterClass enums are stored as uint16_t in the tables.
    assert(RegisterClasses.size() <= 0xffff &&
           "Too many register classes to fit in tables");

    OS << "\n// Register classes\n";
    if (!Namespace.empty())
      OS << "namespace " << Namespace << " {\n";
    OS << "enum {\n";
    for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
      if (i) OS << ",\n";
      OS << "  " << RegisterClasses[i]->getName() << "RegClassID";
      OS << " = " << i;
    }
    OS << "\n  };\n";
    if (!Namespace.empty())
      OS << "}\n";
  }

  const std::vector<Record*> RegAltNameIndices = Target.getRegAltNameIndices();
  // If the only definition is the default NoRegAltName, we don't need to
  // emit anything.
  if (RegAltNameIndices.size() > 1) {
    OS << "\n// Register alternate name indices\n";
    if (!Namespace.empty())
      OS << "namespace " << Namespace << " {\n";
    OS << "enum {\n";
    for (unsigned i = 0, e = RegAltNameIndices.size(); i != e; ++i)
      OS << "  " << RegAltNameIndices[i]->getName() << ",\t// " << i << "\n";
    OS << "  NUM_TARGET_REG_ALT_NAMES = " << RegAltNameIndices.size() << "\n";
    OS << "};\n";
    if (!Namespace.empty())
      OS << "}\n";
  }

  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = Bank.getSubRegIndices();
  if (!SubRegIndices.empty()) {
    OS << "\n// Subregister indices\n";
    std::string Namespace =
      SubRegIndices[0]->getNamespace();
    if (!Namespace.empty())
      OS << "namespace " << Namespace << " {\n";
    OS << "enum {\n  NoSubRegister,\n";
    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
      OS << "  " << SubRegIndices[i]->getName() << ",\t// " << i+1 << "\n";
    OS << "  NUM_TARGET_SUBREGS\n};\n";
    if (!Namespace.empty())
      OS << "}\n";
  }

  OS << "} // End llvm namespace \n";
  OS << "#endif // GET_REGINFO_ENUM\n\n";
}
Beispiel #9
0
//
// runTargetDesc - Output the target register and register file descriptions.
//
void
RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
                                   CodeGenRegBank &RegBank){
  emitSourceFileHeader("Target Register and Register Classes Information", OS);

  OS << "\n#ifdef GET_REGINFO_TARGET_DESC\n";
  OS << "#undef GET_REGINFO_TARGET_DESC\n";

  OS << "namespace llvm {\n\n";

  // Get access to MCRegisterClass data.
  OS << "extern const MCRegisterClass " << Target.getName()
     << "MCRegisterClasses[];\n";

  // Start out by emitting each of the register classes.
  ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();
  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();

  // Collect all registers belonging to any allocatable class.
  std::set<Record*> AllocatableRegs;

  // Collect allocatable registers.
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];
    ArrayRef<Record*> Order = RC.getOrder();

    if (RC.Allocatable)
      AllocatableRegs.insert(Order.begin(), Order.end());
  }

  // Build a shared array of value types.
  SequenceToOffsetTable<std::vector<MVT::SimpleValueType> > VTSeqs;
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc)
    VTSeqs.add(RegisterClasses[rc]->VTs);
  VTSeqs.layout();
  OS << "\nstatic const MVT::SimpleValueType VTLists[] = {\n";
  VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
  OS << "};\n";

  // Emit SubRegIndex names, skipping 0.
  OS << "\nstatic const char *const SubRegIndexNameTable[] = { \"";
  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
    OS << SubRegIndices[i]->getName();
    if (i + 1 != e)
      OS << "\", \"";
  }
  OS << "\" };\n\n";

  // Emit SubRegIndex lane masks, including 0.
  OS << "\nstatic const unsigned SubRegIndexLaneMaskTable[] = {\n  ~0u,\n";
  for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
    OS << format("  0x%08x, // ", SubRegIndices[i]->LaneMask)
       << SubRegIndices[i]->getName() << '\n';
  }
  OS << " };\n\n";

  OS << "\n";

  // Now that all of the structs have been emitted, emit the instances.
  if (!RegisterClasses.empty()) {
    OS << "\nstatic const TargetRegisterClass *const "
       << "NullRegClasses[] = { NULL };\n\n";

    // Emit register class bit mask tables. The first bit mask emitted for a
    // register class, RC, is the set of sub-classes, including RC itself.
    //
    // If RC has super-registers, also create a list of subreg indices and bit
    // masks, (Idx, Mask). The bit mask has a bit for every superreg regclass,
    // SuperRC, that satisfies:
    //
    //   For all SuperReg in SuperRC: SuperReg:Idx in RC
    //
    // The 0-terminated list of subreg indices starts at:
    //
    //   RC->getSuperRegIndices() = SuperRegIdxSeqs + ...
    //
    // The corresponding bitmasks follow the sub-class mask in memory. Each
    // mask has RCMaskWords uint32_t entries.
    //
    // Every bit mask present in the list has at least one bit set.

    // Compress the sub-reg index lists.
    typedef std::vector<const CodeGenSubRegIndex*> IdxList;
    SmallVector<IdxList, 8> SuperRegIdxLists(RegisterClasses.size());
    SequenceToOffsetTable<IdxList> SuperRegIdxSeqs;
    BitVector MaskBV(RegisterClasses.size());

    for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
      const CodeGenRegisterClass &RC = *RegisterClasses[rc];
      OS << "static const uint32_t " << RC.getName() << "SubClassMask[] = {\n  ";
      printBitVectorAsHex(OS, RC.getSubClasses(), 32);

      // Emit super-reg class masks for any relevant SubRegIndices that can
      // project into RC.
      IdxList &SRIList = SuperRegIdxLists[rc];
      for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
        CodeGenSubRegIndex *Idx = SubRegIndices[sri];
        MaskBV.reset();
        RC.getSuperRegClasses(Idx, MaskBV);
        if (MaskBV.none())
          continue;
        SRIList.push_back(Idx);
        OS << "\n  ";
        printBitVectorAsHex(OS, MaskBV, 32);
        OS << "// " << Idx->getName();
      }
      SuperRegIdxSeqs.add(SRIList);
      OS << "\n};\n\n";
    }

    OS << "static const uint16_t SuperRegIdxSeqs[] = {\n";
    SuperRegIdxSeqs.layout();
    SuperRegIdxSeqs.emit(OS, printSubRegIndex);
    OS << "};\n\n";

    // Emit NULL terminated super-class lists.
    for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
      const CodeGenRegisterClass &RC = *RegisterClasses[rc];
      ArrayRef<CodeGenRegisterClass*> Supers = RC.getSuperClasses();

      // Skip classes without supers.  We can reuse NullRegClasses.
      if (Supers.empty())
        continue;

      OS << "static const TargetRegisterClass *const "
         << RC.getName() << "Superclasses[] = {\n";
      for (unsigned i = 0; i != Supers.size(); ++i)
        OS << "  &" << Supers[i]->getQualifiedName() << "RegClass,\n";
      OS << "  NULL\n};\n\n";
    }

    // Emit methods.
    for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
      const CodeGenRegisterClass &RC = *RegisterClasses[i];
      if (!RC.AltOrderSelect.empty()) {
        OS << "\nstatic inline unsigned " << RC.getName()
           << "AltOrderSelect(const MachineFunction &MF) {"
           << RC.AltOrderSelect << "}\n\n"
           << "static ArrayRef<uint16_t> " << RC.getName()
           << "GetRawAllocationOrder(const MachineFunction &MF) {\n";
        for (unsigned oi = 1 , oe = RC.getNumOrders(); oi != oe; ++oi) {
          ArrayRef<Record*> Elems = RC.getOrder(oi);
          if (!Elems.empty()) {
            OS << "  static const uint16_t AltOrder" << oi << "[] = {";
            for (unsigned elem = 0; elem != Elems.size(); ++elem)
              OS << (elem ? ", " : " ") << getQualifiedName(Elems[elem]);
            OS << " };\n";
          }
        }
        OS << "  const MCRegisterClass &MCR = " << Target.getName()
           << "MCRegisterClasses[" << RC.getQualifiedName() + "RegClassID];\n"
           << "  const ArrayRef<uint16_t> Order[] = {\n"
           << "    makeArrayRef(MCR.begin(), MCR.getNumRegs()";
        for (unsigned oi = 1, oe = RC.getNumOrders(); oi != oe; ++oi)
          if (RC.getOrder(oi).empty())
            OS << "),\n    ArrayRef<uint16_t>(";
          else
            OS << "),\n    makeArrayRef(AltOrder" << oi;
        OS << ")\n  };\n  const unsigned Select = " << RC.getName()
           << "AltOrderSelect(MF);\n  assert(Select < " << RC.getNumOrders()
           << ");\n  return Order[Select];\n}\n";
        }
    }

    // Now emit the actual value-initialized register class instances.
    OS << "namespace " << RegisterClasses[0]->Namespace
       << " {   // Register class instances\n";

    for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
      const CodeGenRegisterClass &RC = *RegisterClasses[i];
      OS << "  extern const TargetRegisterClass "
         << RegisterClasses[i]->getName() << "RegClass = {\n    "
         << '&' << Target.getName() << "MCRegisterClasses[" << RC.getName()
         << "RegClassID],\n    "
         << "VTLists + " << VTSeqs.get(RC.VTs) << ",\n    "
         << RC.getName() << "SubClassMask,\n    SuperRegIdxSeqs + "
         << SuperRegIdxSeqs.get(SuperRegIdxLists[i]) << ",\n    ";
      if (RC.getSuperClasses().empty())
        OS << "NullRegClasses,\n    ";
      else
        OS << RC.getName() << "Superclasses,\n    ";
      if (RC.AltOrderSelect.empty())
        OS << "0\n";
      else
        OS << RC.getName() << "GetRawAllocationOrder\n";
      OS << "  };\n\n";
    }

    OS << "}\n";
  }

  OS << "\nnamespace {\n";
  OS << "  const TargetRegisterClass* const RegisterClasses[] = {\n";
  for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i)
    OS << "    &" << RegisterClasses[i]->getQualifiedName()
       << "RegClass,\n";
  OS << "  };\n";
  OS << "}\n";       // End of anonymous namespace...

  // Emit extra information about registers.
  const std::string &TargetName = Target.getName();
  OS << "\nstatic const TargetRegisterInfoDesc "
     << TargetName << "RegInfoDesc[] = { // Extra Descriptors\n";
  OS << "  { 0, 0 },\n";

  const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister &Reg = *Regs[i];
    OS << "  { ";
    OS << Reg.CostPerUse << ", "
       << int(AllocatableRegs.count(Reg.TheDef)) << " },\n";
  }
  OS << "};\n";      // End of register descriptors...


  std::string ClassName = Target.getName() + "GenRegisterInfo";

  // Emit composeSubRegIndices
  if (!SubRegIndices.empty()) {
    OS << "unsigned " << ClassName
      << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
      << "  switch (IdxA) {\n"
      << "  default:\n    return IdxB;\n";
    for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
      bool Open = false;
      for (unsigned j = 0; j != e; ++j) {
        CodeGenSubRegIndex *Comp = SubRegIndices[i]->compose(SubRegIndices[j]);
        if (Comp && Comp != SubRegIndices[j]) {
          if (!Open) {
            OS << "  case " << SubRegIndices[i]->getQualifiedName()
              << ": switch(IdxB) {\n    default: return IdxB;\n";
            Open = true;
          }
          OS << "    case " << SubRegIndices[j]->getQualifiedName()
            << ": return " << Comp->getQualifiedName() << ";\n";
        }
      }
      if (Open)
        OS << "    }\n";
    }
    OS << "  }\n}\n\n";
  }

  // Emit getSubClassWithSubReg.
  if (!SubRegIndices.empty()) {
    OS << "const TargetRegisterClass *" << ClassName
       << "::getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx)"
       << " const {\n";
    // Use the smallest type that can hold a regclass ID with room for a
    // sentinel.
    if (RegisterClasses.size() < UINT8_MAX)
      OS << "  static const uint8_t Table[";
    else if (RegisterClasses.size() < UINT16_MAX)
      OS << "  static const uint16_t Table[";
    else
      PrintFatalError("Too many register classes.");
    OS << RegisterClasses.size() << "][" << SubRegIndices.size() << "] = {\n";
    for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) {
      const CodeGenRegisterClass &RC = *RegisterClasses[rci];
      OS << "    {\t// " << RC.getName() << "\n";
      for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
        CodeGenSubRegIndex *Idx = SubRegIndices[sri];
        if (CodeGenRegisterClass *SRC = RC.getSubClassWithSubReg(Idx))
          OS << "      " << SRC->EnumValue + 1 << ",\t// " << Idx->getName()
             << " -> " << SRC->getName() << "\n";
        else
          OS << "      0,\t// " << Idx->getName() << "\n";
      }
      OS << "    },\n";
    }
    OS << "  };\n  assert(RC && \"Missing regclass\");\n"
       << "  if (!Idx) return RC;\n  --Idx;\n"
       << "  assert(Idx < " << SubRegIndices.size() << " && \"Bad subreg\");\n"
       << "  unsigned TV = Table[RC->getID()][Idx];\n"
       << "  return TV ? getRegClass(TV - 1) : 0;\n}\n\n";
  }

  EmitRegUnitPressure(OS, RegBank, ClassName);

  // Emit the constructor of the class...
  OS << "extern const MCRegisterDesc " << TargetName << "RegDesc[];\n";
  OS << "extern const uint16_t " << TargetName << "RegDiffLists[];\n";
  OS << "extern const char " << TargetName << "RegStrings[];\n";
  OS << "extern const uint16_t " << TargetName << "RegUnitRoots[][2];\n";
  OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[];\n";
  OS << "extern const uint16_t " << TargetName << "RegEncodingTable[];\n";

  EmitRegMappingTables(OS, Regs, true);

  OS << ClassName << "::\n" << ClassName
     << "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour)\n"
     << "  : TargetRegisterInfo(" << TargetName << "RegInfoDesc"
     << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
     << "             SubRegIndexNameTable, SubRegIndexLaneMaskTable) {\n"
     << "  InitMCRegisterInfo(" << TargetName << "RegDesc, "
     << Regs.size()+1 << ", RA,\n                     " << TargetName
     << "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
     << "                     " << TargetName << "RegUnitRoots,\n"
     << "                     " << RegBank.getNumNativeRegUnits() << ",\n"
     << "                     " << TargetName << "RegDiffLists,\n"
     << "                     " << TargetName << "RegStrings,\n"
     << "                     " << TargetName << "SubRegIdxLists,\n"
     << "                     " << SubRegIndices.size() + 1 << ",\n"
     << "                     " << TargetName << "RegEncodingTable);\n\n";

  EmitRegMapping(OS, Regs, true);

  OS << "}\n\n";


  // Emit CalleeSavedRegs information.
  std::vector<Record*> CSRSets =
    Records.getAllDerivedDefinitions("CalleeSavedRegs");
  for (unsigned i = 0, e = CSRSets.size(); i != e; ++i) {
    Record *CSRSet = CSRSets[i];
    const SetTheory::RecVec *Regs = RegBank.getSets().expand(CSRSet);
    assert(Regs && "Cannot expand CalleeSavedRegs instance");

    // Emit the *_SaveList list of callee-saved registers.
    OS << "static const uint16_t " << CSRSet->getName()
       << "_SaveList[] = { ";
    for (unsigned r = 0, re = Regs->size(); r != re; ++r)
      OS << getQualifiedName((*Regs)[r]) << ", ";
    OS << "0 };\n";

    // Emit the *_RegMask bit mask of call-preserved registers.
    OS << "static const uint32_t " << CSRSet->getName()
       << "_RegMask[] = { ";
    printBitVectorAsHex(OS, RegBank.computeCoveredRegisters(*Regs), 32);
    OS << "};\n";
  }
  OS << "\n\n";

  OS << "} // End llvm namespace \n";
  OS << "#endif // GET_REGINFO_TARGET_DESC\n\n";
}
Beispiel #10
0
ManagedValue SILGenFunction::emitExistentialErasure(
                            SILLocation loc,
                            CanType concreteFormalType,
                            const TypeLowering &concreteTL,
                            const TypeLowering &existentialTL,
                            ArrayRef<ProtocolConformanceRef> conformances,
                            SGFContext C,
                            llvm::function_ref<ManagedValue (SGFContext)> F,
                            bool allowEmbeddedNSError) {
  // Mark the needed conformances as used.
  for (auto conformance : conformances)
    SGM.useConformance(conformance);

  // If we're erasing to the 'Error' type, we might be able to get an NSError
  // representation more efficiently.
  auto &ctx = getASTContext();
  if (ctx.LangOpts.EnableObjCInterop && conformances.size() == 1 &&
      conformances[0].getRequirement() == ctx.getErrorDecl() &&
      ctx.getNSErrorDecl()) {
    auto nsErrorDecl = ctx.getNSErrorDecl();

    // If the concrete type is NSError or a subclass thereof, just erase it
    // directly.
    auto nsErrorType = nsErrorDecl->getDeclaredType()->getCanonicalType();
    if (nsErrorType->isExactSuperclassOf(concreteFormalType, nullptr)) {
      ManagedValue nsError =  F(SGFContext());
      if (nsErrorType != concreteFormalType) {
        nsError = ManagedValue(B.createUpcast(loc, nsError.getValue(),
                                              getLoweredType(nsErrorType)),
                               nsError.getCleanup());
      }
      return emitBridgedToNativeError(loc, nsError);
    }

    // If the concrete type is known to conform to _BridgedStoredNSError,
    // call the _nsError witness getter to extract the NSError directly,
    // then just erase the NSError.
    if (auto storedNSErrorConformance =
          SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType)) {
      auto nsErrorVar = SGM.getNSErrorRequirement(loc);
      if (!nsErrorVar) return emitUndef(loc, existentialTL.getLoweredType());

      ArrayRef<Substitution> nsErrorVarSubstitutions;

      // Devirtualize.  Maybe this should be done implicitly by
      // emitPropertyLValue?
      if (storedNSErrorConformance->isConcrete()) {
        if (auto witnessVar = storedNSErrorConformance->getConcrete()
                                          ->getWitness(nsErrorVar, nullptr)) {
          nsErrorVar = cast<VarDecl>(witnessVar.getDecl());
          nsErrorVarSubstitutions = witnessVar.getSubstitutions();
        }
      }

      auto nativeError = F(SGFContext());

      WritebackScope writebackScope(*this);
      auto nsError =
        emitRValueForPropertyLoad(loc, nativeError, concreteFormalType,
                                  /*super*/ false, nsErrorVar,
                                  nsErrorVarSubstitutions,
                                  AccessSemantics::Ordinary, nsErrorType,
                                  SGFContext())
        .getAsSingleValue(*this, loc);

      return emitBridgedToNativeError(loc, nsError);
    }

    // Otherwise, if it's an archetype, try calling the _getEmbeddedNSError()
    // witness to try to dig out the embedded NSError.  But don't do this
    // when we're being called recursively.
    if (isa<ArchetypeType>(concreteFormalType) && allowEmbeddedNSError) {
      auto contBB = createBasicBlock();
      auto isNotPresentBB = createBasicBlock();
      auto isPresentBB = createBasicBlock();

      // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an
      // NSError from the value.
      auto getEmbeddedNSErrorFn = SGM.getGetErrorEmbeddedNSError(loc);
      if (!getEmbeddedNSErrorFn)
        return emitUndef(loc, existentialTL.getLoweredType());

      Substitution getEmbeddedNSErrorSubstitutions[1] = {
        Substitution(concreteFormalType, conformances)
      };

      ManagedValue concreteValue = F(SGFContext());
      ManagedValue potentialNSError =
        emitApplyOfLibraryIntrinsic(loc,
                                    getEmbeddedNSErrorFn,
                                    getEmbeddedNSErrorSubstitutions,
                                    { concreteValue.copy(*this, loc) },
                                    SGFContext())
          .getAsSingleValue(*this, loc);

      // We're going to consume 'concreteValue' in exactly one branch,
      // so kill its cleanup now and recreate it on both branches.
      (void) concreteValue.forward(*this);

      // Check whether we got an NSError back.
      std::pair<EnumElementDecl*, SILBasicBlock*> cases[] = {
        { ctx.getOptionalSomeDecl(), isPresentBB },
        { ctx.getOptionalNoneDecl(), isNotPresentBB }
      };
      B.createSwitchEnum(loc, potentialNSError.forward(*this),
                         /*default*/ nullptr, cases);

      // If we did get an NSError, emit the existential erasure from that
      // NSError.
      B.emitBlock(isPresentBB);
      SILValue branchArg;
      {
        // Don't allow cleanups to escape the conditional block.
        FullExpr presentScope(Cleanups, CleanupLocation::get(loc));
        enterDestroyCleanup(concreteValue.getValue());

        // Receive the error value.  It's typed as an 'AnyObject' for
        // layering reasons, so perform an unchecked cast down to NSError.
        SILType anyObjectTy =
          potentialNSError.getType().getAnyOptionalObjectType();
        SILValue nsError = isPresentBB->createBBArg(anyObjectTy);
        nsError = B.createUncheckedRefCast(loc, nsError, 
                                           getLoweredType(nsErrorType));

        branchArg = emitBridgedToNativeError(loc,
                                        emitManagedRValueWithCleanup(nsError))
                      .forward(*this);
      }
      B.createBranch(loc, contBB, branchArg);

      // If we did not get an NSError, just directly emit the existential.
      // Since this is a recursive call, make sure we don't end up in this
      // path again.
      B.emitBlock(isNotPresentBB);
      {
        FullExpr presentScope(Cleanups, CleanupLocation::get(loc));
        concreteValue = emitManagedRValueWithCleanup(concreteValue.getValue());
        branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL,
                                           existentialTL, conformances,
                                           SGFContext(),
                                           [&](SGFContext C) {
                                             return concreteValue;
                                           },
                                           /*allowEmbeddedNSError=*/false)
                      .forward(*this);
      }
      B.createBranch(loc, contBB, branchArg);

      // Continue.
      B.emitBlock(contBB);

      SILValue existentialResult =
        contBB->createBBArg(existentialTL.getLoweredType());
      return emitManagedRValueWithCleanup(existentialResult, existentialTL);
    }
  }

  switch (existentialTL.getLoweredType().getObjectType()
            .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) {
  case ExistentialRepresentation::None:
    llvm_unreachable("not an existential type");
  case ExistentialRepresentation::Metatype: {
    assert(existentialTL.isLoadable());

    SILValue metatype = F(SGFContext()).getUnmanagedValue();
    assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation()
             == MetatypeRepresentation::Thick);

    auto upcast =
      B.createInitExistentialMetatype(loc, metatype,
                                      existentialTL.getLoweredType(),
                                      conformances);
    return ManagedValue::forUnmanaged(upcast);
  }
  case ExistentialRepresentation::Class: {
    assert(existentialTL.isLoadable());

    ManagedValue sub = F(SGFContext());
    SILValue v = B.createInitExistentialRef(loc,
                                            existentialTL.getLoweredType(),
                                            concreteFormalType,
                                            sub.getValue(),
                                            conformances);
    return ManagedValue(v, sub.getCleanup());
  }
  case ExistentialRepresentation::Boxed: {
    // Allocate the existential.
    auto *existential = B.createAllocExistentialBox(loc,
                                           existentialTL.getLoweredType(),
                                           concreteFormalType,
                                           conformances);
    auto *valueAddr = B.createProjectExistentialBox(loc,
                                           concreteTL.getLoweredType(),
                                           existential);
    // Initialize the concrete value in-place.
    ExistentialInitialization init(existential, valueAddr, concreteFormalType,
                                   ExistentialRepresentation::Boxed, *this);
    ManagedValue mv = F(SGFContext(&init));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init.getAddress());
      init.finishInitialization(*this);
    }
    
    return emitManagedRValueWithCleanup(existential);
  }
  case ExistentialRepresentation::Opaque: {
  
    // If the concrete value is a pseudogeneric archetype, first erase it to
    // its upper bound.
    auto anyObjectProto = getASTContext()
      .getProtocol(KnownProtocolKind::AnyObject);
    auto anyObjectTy = anyObjectProto
      ? anyObjectProto->getDeclaredType()->getCanonicalType()
      : CanType();
    auto eraseToAnyObject =
    [&, concreteFormalType, F](SGFContext C) -> ManagedValue {
      auto concreteValue = F(SGFContext());
      auto anyObjectConformance = SGM.SwiftModule
        ->lookupConformance(concreteFormalType, anyObjectProto, nullptr);
      ProtocolConformanceRef buf[] = {
        *anyObjectConformance,
      };
      
      auto asAnyObject = B.createInitExistentialRef(loc,
                                  SILType::getPrimitiveObjectType(anyObjectTy),
                                  concreteFormalType,
                                  concreteValue.getValue(),
                                  getASTContext().AllocateCopy(buf));
      return ManagedValue(asAnyObject, concreteValue.getCleanup());
    };
    
    auto concreteTLPtr = &concreteTL;
    if (this->F.getLoweredFunctionType()->isPseudogeneric()) {
      if (anyObjectTy && concreteFormalType->is<ArchetypeType>()) {
        concreteFormalType = anyObjectTy;
        concreteTLPtr = &getTypeLowering(anyObjectTy);
        F = eraseToAnyObject;
      }
    }

    // Allocate the existential.
    SILValue existential =
      getBufferForExprResult(loc, existentialTL.getLoweredType(), C);

    // Allocate the concrete value inside the container.
    SILValue valueAddr = B.createInitExistentialAddr(
                            loc, existential,
                            concreteFormalType,
                            concreteTLPtr->getLoweredType(),
                            conformances);
    // Initialize the concrete value in-place.
    InitializationPtr init(
        new ExistentialInitialization(existential, valueAddr, concreteFormalType,
                                      ExistentialRepresentation::Opaque,
                                      *this));
    ManagedValue mv = F(SGFContext(init.get()));
    if (!mv.isInContext()) {
      mv.forwardInto(*this, loc, init->getAddress());
      init->finishInitialization(*this);
    }

    return manageBufferForExprResult(existential, existentialTL, C);
  }
  }
}
Beispiel #11
0
//
// runMCDesc - Print out MC register descriptions.
//
void
RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
                               CodeGenRegBank &RegBank) {
  emitSourceFileHeader("MC Register Information", OS);

  OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
  OS << "#undef GET_REGINFO_MC_DESC\n";

  const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();

  // The lists of sub-registers, super-registers, and overlaps all go in the
  // same array. That allows us to share suffixes.
  typedef std::vector<const CodeGenRegister*> RegVec;

  // Differentially encoded lists.
  SequenceToOffsetTable<DiffVec> DiffSeqs;
  SmallVector<DiffVec, 4> SubRegLists(Regs.size());
  SmallVector<DiffVec, 4> SuperRegLists(Regs.size());
  SmallVector<DiffVec, 4> OverlapLists(Regs.size());
  SmallVector<DiffVec, 4> RegUnitLists(Regs.size());
  SmallVector<unsigned, 4> RegUnitInitScale(Regs.size());

  // Keep track of sub-register names as well. These are not differentially
  // encoded.
  typedef SmallVector<const CodeGenSubRegIndex*, 4> SubRegIdxVec;
  SequenceToOffsetTable<SubRegIdxVec> SubRegIdxSeqs;
  SmallVector<SubRegIdxVec, 4> SubRegIdxLists(Regs.size());

  SequenceToOffsetTable<std::string> RegStrings;

  // Precompute register lists for the SequenceToOffsetTable.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];

    RegStrings.add(Reg->getName());

    // Compute the ordered sub-register list.
    SetVector<const CodeGenRegister*> SR;
    Reg->addSubRegsPreOrder(SR, RegBank);
    diffEncode(SubRegLists[i], Reg->EnumValue, SR.begin(), SR.end());
    DiffSeqs.add(SubRegLists[i]);

    // Compute the corresponding sub-register indexes.
    SubRegIdxVec &SRIs = SubRegIdxLists[i];
    for (unsigned j = 0, je = SR.size(); j != je; ++j)
      SRIs.push_back(Reg->getSubRegIndex(SR[j]));
    SubRegIdxSeqs.add(SRIs);

    // Super-registers are already computed.
    const RegVec &SuperRegList = Reg->getSuperRegs();
    diffEncode(SuperRegLists[i], Reg->EnumValue,
               SuperRegList.begin(), SuperRegList.end());
    DiffSeqs.add(SuperRegLists[i]);

    // The list of overlaps doesn't need to have any particular order, and Reg
    // itself must be omitted.
    DiffVec &OverlapList = OverlapLists[i];
    CodeGenRegister::Set OSet;
    Reg->computeOverlaps(OSet, RegBank);
    OSet.erase(Reg);
    diffEncode(OverlapList, Reg->EnumValue, OSet.begin(), OSet.end());
    DiffSeqs.add(OverlapList);

    // Differentially encode the register unit list, seeded by register number.
    // First compute a scale factor that allows more diff-lists to be reused:
    //
    //   D0 -> (S0, S1)
    //   D1 -> (S2, S3)
    //
    // A scale factor of 2 allows D0 and D1 to share a diff-list. The initial
    // value for the differential decoder is the register number multiplied by
    // the scale.
    //
    // Check the neighboring registers for arithmetic progressions.
    unsigned ScaleA = ~0u, ScaleB = ~0u;
    ArrayRef<unsigned> RUs = Reg->getNativeRegUnits();
    if (i > 0 && Regs[i-1]->getNativeRegUnits().size() == RUs.size())
      ScaleB = RUs.front() - Regs[i-1]->getNativeRegUnits().front();
    if (i+1 != Regs.size() &&
        Regs[i+1]->getNativeRegUnits().size() == RUs.size())
      ScaleA = Regs[i+1]->getNativeRegUnits().front() - RUs.front();
    unsigned Scale = std::min(ScaleB, ScaleA);
    // Default the scale to 0 if it can't be encoded in 4 bits.
    if (Scale >= 16)
      Scale = 0;
    RegUnitInitScale[i] = Scale;
    DiffSeqs.add(diffEncode(RegUnitLists[i], Scale * Reg->EnumValue, RUs));
  }

  // Compute the final layout of the sequence table.
  DiffSeqs.layout();
  SubRegIdxSeqs.layout();

  OS << "namespace llvm {\n\n";

  const std::string &TargetName = Target.getName();

  // Emit the shared table of differential lists.
  OS << "extern const uint16_t " << TargetName << "RegDiffLists[] = {\n";
  DiffSeqs.emit(OS, printDiff16);
  OS << "};\n\n";

  // Emit the table of sub-register indexes.
  OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[] = {\n";
  SubRegIdxSeqs.emit(OS, printSubRegIndex);
  OS << "};\n\n";

  // Emit the string table.
  RegStrings.layout();
  OS << "extern const char " << TargetName << "RegStrings[] = {\n";
  RegStrings.emit(OS, printChar);
  OS << "};\n\n";

  OS << "extern const MCRegisterDesc " << TargetName
     << "RegDesc[] = { // Descriptors\n";
  OS << "  { " << RegStrings.get("") << ", 0, 0, 0, 0, 0 },\n";

  // Emit the register descriptors now.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];
    OS << "  { " << RegStrings.get(Reg->getName()) << ", "
       << DiffSeqs.get(OverlapLists[i]) << ", "
       << DiffSeqs.get(SubRegLists[i]) << ", "
       << DiffSeqs.get(SuperRegLists[i]) << ", "
       << SubRegIdxSeqs.get(SubRegIdxLists[i]) << ", "
       << (DiffSeqs.get(RegUnitLists[i])*16 + RegUnitInitScale[i]) << " },\n";
  }
  OS << "};\n\n";      // End of register descriptors...

  // Emit the table of register unit roots. Each regunit has one or two root
  // registers.
  OS << "extern const uint16_t " << TargetName << "RegUnitRoots[][2] = {\n";
  for (unsigned i = 0, e = RegBank.getNumNativeRegUnits(); i != e; ++i) {
    ArrayRef<const CodeGenRegister*> Roots = RegBank.getRegUnit(i).getRoots();
    assert(!Roots.empty() && "All regunits must have a root register.");
    assert(Roots.size() <= 2 && "More than two roots not supported yet.");
    OS << "  { " << getQualifiedName(Roots.front()->TheDef);
    for (unsigned r = 1; r != Roots.size(); ++r)
      OS << ", " << getQualifiedName(Roots[r]->TheDef);
    OS << " },\n";
  }
  OS << "};\n\n";

  ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();

  // Loop over all of the register classes... emitting each one.
  OS << "namespace {     // Register classes...\n";

  // Emit the register enum value arrays for each RegisterClass
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];
    ArrayRef<Record*> Order = RC.getOrder();

    // Give the register class a legal C name if it's anonymous.
    std::string Name = RC.getName();

    // Emit the register list now.
    OS << "  // " << Name << " Register Class...\n"
       << "  const uint16_t " << Name
       << "[] = {\n    ";
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      OS << getQualifiedName(Reg) << ", ";
    }
    OS << "\n  };\n\n";

    OS << "  // " << Name << " Bit set.\n"
       << "  const uint8_t " << Name
       << "Bits[] = {\n    ";
    BitVectorEmitter BVE;
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
    }
    BVE.print(OS);
    OS << "\n  };\n\n";

  }
  OS << "}\n\n";

  OS << "extern const MCRegisterClass " << TargetName
     << "MCRegisterClasses[] = {\n";

  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];

    // Asserts to make sure values will fit in table assuming types from
    // MCRegisterInfo.h
    assert((RC.SpillSize/8) <= 0xffff && "SpillSize too large.");
    assert((RC.SpillAlignment/8) <= 0xffff && "SpillAlignment too large.");
    assert(RC.CopyCost >= -128 && RC.CopyCost <= 127 && "Copy cost too large.");

    OS << "  { " << '\"' << RC.getName() << "\", "
       << RC.getName() << ", " << RC.getName() << "Bits, "
       << RC.getOrder().size() << ", sizeof(" << RC.getName() << "Bits), "
       << RC.getQualifiedName() + "RegClassID" << ", "
       << RC.SpillSize/8 << ", "
       << RC.SpillAlignment/8 << ", "
       << RC.CopyCost << ", "
       << RC.Allocatable << " },\n";
  }

  OS << "};\n\n";

  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();

  EmitRegMappingTables(OS, Regs, false);

  // Emit Reg encoding table
  OS << "extern const uint16_t " << TargetName;
  OS << "RegEncodingTable[] = {\n";
  // Add entry for NoRegister
  OS << "  0,\n";
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    Record *Reg = Regs[i]->TheDef;
    BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
    uint64_t Value = 0;
    for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
      if (BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
      Value |= (uint64_t)B->getValue() << b;
    }
    OS << "  " << Value << ",\n";
  }
  OS << "};\n";       // End of HW encoding table

  // MCRegisterInfo initialization routine.
  OS << "static inline void Init" << TargetName
     << "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
     << "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0) {\n"
     << "  RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
     << Regs.size()+1 << ", RA, " << TargetName << "MCRegisterClasses, "
     << RegisterClasses.size() << ", "
     << TargetName << "RegUnitRoots, "
     << RegBank.getNumNativeRegUnits() << ", "
     << TargetName << "RegDiffLists, "
     << TargetName << "RegStrings, "
     << TargetName << "SubRegIdxLists, "
     << (SubRegIndices.size() + 1) << ",\n"
     << "  " << TargetName << "RegEncodingTable);\n\n";

  EmitRegMapping(OS, Regs, false);

  OS << "}\n\n";

  OS << "} // End llvm namespace \n";
  OS << "#endif // GET_REGINFO_MC_DESC\n\n";
}
Beispiel #12
0
/// AggregateAvailableValues - Given a bunch of primitive subelement values,
/// build out the right aggregate type (LoadTy) by emitting tuple and struct
/// instructions as necessary.
static SILValue
AggregateAvailableValues(SILInstruction *Inst, SILType LoadTy,
                         SILValue Address,
                         ArrayRef<std::pair<SILValue, unsigned>> AvailableValues,
                         unsigned FirstElt) {
  assert(LoadTy.isObject());
  SILModule &M = Inst->getModule();
  
  // Check to see if the requested value is fully available, as an aggregate.
  // This is a super-common case for single-element structs, but is also a
  // general answer for arbitrary structs and tuples as well.
  if (FirstElt < AvailableValues.size()) {  // #Elements may be zero.
    SILValue FirstVal = AvailableValues[FirstElt].first;
    if (FirstVal && AvailableValues[FirstElt].second == 0 &&
        FirstVal->getType() == LoadTy) {
      // If the first element of this value is available, check any extra ones
      // before declaring success.
      bool AllMatch = true;
      for (unsigned i = 0, e = getNumSubElements(LoadTy, M); i != e; ++i)
        if (AvailableValues[FirstElt+i].first != FirstVal ||
            AvailableValues[FirstElt+i].second != i) {
          AllMatch = false;
          break;
        }
      
      if (AllMatch)
        return FirstVal;
    }
  }
  
  
  SILBuilderWithScope B(Inst);
  
  if (TupleType *TT = LoadTy.getAs<TupleType>()) {
    SmallVector<SILValue, 4> ResultElts;
    
    for (unsigned EltNo : indices(TT->getElements())) {
      SILType EltTy = LoadTy.getTupleElementType(EltNo);
      unsigned NumSubElt = getNumSubElements(EltTy, M);
      
      // If we are missing any of the available values in this struct element,
      // compute an address to load from.
      SILValue EltAddr;
      if (anyMissing(FirstElt, NumSubElt, AvailableValues))
        EltAddr = B.createTupleElementAddr(Inst->getLoc(), Address, EltNo,
                                           EltTy.getAddressType());
      
      ResultElts.push_back(AggregateAvailableValues(Inst, EltTy, EltAddr,
                                                    AvailableValues, FirstElt));
      FirstElt += NumSubElt;
    }
    
    return B.createTuple(Inst->getLoc(), LoadTy, ResultElts);
  }
  
  // Extract struct elements from fully referenceable structs.
  if (auto *SD = getFullyReferenceableStruct(LoadTy)) {
    SmallVector<SILValue, 4> ResultElts;
    
    for (auto *FD : SD->getStoredProperties()) {
      SILType EltTy = LoadTy.getFieldType(FD, M);
      unsigned NumSubElt = getNumSubElements(EltTy, M);
      
      // If we are missing any of the available values in this struct element,
      // compute an address to load from.
      SILValue EltAddr;
      if (anyMissing(FirstElt, NumSubElt, AvailableValues))
        EltAddr = B.createStructElementAddr(Inst->getLoc(), Address, FD,
                                            EltTy.getAddressType());
      
      ResultElts.push_back(AggregateAvailableValues(Inst, EltTy, EltAddr,
                                                    AvailableValues, FirstElt));
      FirstElt += NumSubElt;
    }
    return B.createStruct(Inst->getLoc(), LoadTy, ResultElts);
  }
  
  // Otherwise, we have a simple primitive.  If the value is available, use it,
  // otherwise emit a load of the value.
  auto Val = AvailableValues[FirstElt];
  if (!Val.first)
    return B.createLoad(Inst->getLoc(), Address);
  
  SILValue EltVal = ExtractSubElement(Val.first, Val.second, B, Inst->getLoc());
  // It must be the same type as LoadTy if available.
  assert(EltVal->getType() == LoadTy &&
         "Subelement types mismatch");
  return EltVal;
}
Beispiel #13
0
/// Return true if we can evaluate the specified expression tree if the vector
/// elements were shuffled in a different order.
static bool CanEvaluateShuffled(Value *V, ArrayRef<int> Mask,
                                unsigned Depth = 5) {
  // We can always reorder the elements of a constant.
  if (isa<Constant>(V))
    return true;

  // We won't reorder vector arguments. No IPO here.
  Instruction *I = dyn_cast<Instruction>(V);
  if (!I) return false;

  // Two users may expect different orders of the elements. Don't try it.
  if (!I->hasOneUse())
    return false;

  if (Depth == 0) return false;

  switch (I->getOpcode()) {
    case Instruction::Add:
    case Instruction::FAdd:
    case Instruction::Sub:
    case Instruction::FSub:
    case Instruction::Mul:
    case Instruction::FMul:
    case Instruction::UDiv:
    case Instruction::SDiv:
    case Instruction::FDiv:
    case Instruction::URem:
    case Instruction::SRem:
    case Instruction::FRem:
    case Instruction::Shl:
    case Instruction::LShr:
    case Instruction::AShr:
    case Instruction::And:
    case Instruction::Or:
    case Instruction::Xor:
    case Instruction::ICmp:
    case Instruction::FCmp:
    case Instruction::Trunc:
    case Instruction::ZExt:
    case Instruction::SExt:
    case Instruction::FPToUI:
    case Instruction::FPToSI:
    case Instruction::UIToFP:
    case Instruction::SIToFP:
    case Instruction::FPTrunc:
    case Instruction::FPExt:
    case Instruction::GetElementPtr: {
      for (Value *Operand : I->operands()) {
        if (!CanEvaluateShuffled(Operand, Mask, Depth-1))
          return false;
      }
      return true;
    }
    case Instruction::InsertElement: {
      ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
      if (!CI) return false;
      int ElementNumber = CI->getLimitedValue();

      // Verify that 'CI' does not occur twice in Mask. A single 'insertelement'
      // can't put an element into multiple indices.
      bool SeenOnce = false;
      for (int i = 0, e = Mask.size(); i != e; ++i) {
        if (Mask[i] == ElementNumber) {
          if (SeenOnce)
            return false;
          SeenOnce = true;
        }
      }
      return CanEvaluateShuffled(I->getOperand(0), Mask, Depth-1);
    }
  }
  return false;
}
bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
                                     SlotIndex Use, unsigned PhysReg,
                                     ArrayRef<SlotIndex> Undefs) {
  unsigned UseMBBNum = UseMBB.getNumber();

  // Block numbers where LR should be live-in.
  SmallVector<unsigned, 16> WorkList(1, UseMBBNum);

  // Remember if we have seen more than one value.
  bool UniqueVNI = true;
  VNInfo *TheVNI = nullptr;

  bool FoundUndef = false;

  // Using Seen as a visited set, perform a BFS for all reaching defs.
  for (unsigned i = 0; i != WorkList.size(); ++i) {
    MachineBasicBlock *MBB = MF->getBlockNumbered(WorkList[i]);

#ifndef NDEBUG
    if (Undefs.size() > 0 && MBB->pred_empty()) {
      MBB->getParent()->verify();
      errs() << "Use of " << PrintReg(PhysReg)
             << " does not have a corresponding definition on every path:\n";
      const MachineInstr *MI = Indexes->getInstructionFromIndex(Use);
      if (MI != nullptr)
        errs() << Use << " " << *MI;
      llvm_unreachable("Use not jointly dominated by defs.");
    }

    if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
        !MBB->isLiveIn(PhysReg)) {
      MBB->getParent()->verify();
      const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
      errs() << "The register " << PrintReg(PhysReg, TRI)
             << " needs to be live in to BB#" << MBB->getNumber()
             << ", but is missing from the live-in list.\n";
      llvm_unreachable("Invalid global physical register");
    }
#endif
    FoundUndef |= MBB->pred_empty();

    for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
         PE = MBB->pred_end(); PI != PE; ++PI) {
       MachineBasicBlock *Pred = *PI;

       // Is this a known live-out block?
       if (Seen.test(Pred->getNumber())) {
         if (VNInfo *VNI = Map[Pred].first) {
           if (TheVNI && TheVNI != VNI)
             UniqueVNI = false;
           TheVNI = VNI;
         }
         continue;
       }

       SlotIndex Start, End;
       std::tie(Start, End) = Indexes->getMBBRange(Pred);

       // First time we see Pred.  Try to determine the live-out value, but set
       // it as null if Pred is live-through with an unknown value.
       auto EP = LR.extendInBlock(Undefs, Start, End);
       VNInfo *VNI = EP.first;
       FoundUndef |= EP.second;
       setLiveOutValue(Pred, VNI);
       if (VNI) {
         if (TheVNI && TheVNI != VNI)
           UniqueVNI = false;
         TheVNI = VNI;
       }
       if (VNI || EP.second)
         continue;

       // No, we need a live-in value for Pred as well
       if (Pred != &UseMBB)
         WorkList.push_back(Pred->getNumber());
       else
          // Loopback to UseMBB, so value is really live through.
         Use = SlotIndex();
    }
  }

  LiveIn.clear();
  FoundUndef |= (TheVNI == nullptr);
  if (Undefs.size() > 0 && FoundUndef)
    UniqueVNI = false;

  // Both updateSSA() and LiveRangeUpdater benefit from ordered blocks, but
  // neither require it. Skip the sorting overhead for small updates.
  if (WorkList.size() > 4)
    array_pod_sort(WorkList.begin(), WorkList.end());

  // If a unique reaching def was found, blit in the live ranges immediately.
  if (UniqueVNI) {
    assert(TheVNI != nullptr);
    LiveRangeUpdater Updater(&LR);
    for (unsigned BN : WorkList) {
      SlotIndex Start, End;
      std::tie(Start, End) = Indexes->getMBBRange(BN);
      // Trim the live range in UseMBB.
      if (BN == UseMBBNum && Use.isValid())
        End = Use;
      else
        Map[MF->getBlockNumbered(BN)] = LiveOutPair(TheVNI, nullptr);
      Updater.add(Start, End, TheVNI);
    }
    return true;
  }

  // Prepare the defined/undefined bit vectors.
  auto EF = EntryInfoMap.find(&LR);
  if (EF == EntryInfoMap.end()) {
    unsigned N = MF->getNumBlockIDs();
    EF = EntryInfoMap.insert({&LR, {BitVector(), BitVector()}}).first;
    EF->second.first.resize(N);
    EF->second.second.resize(N);
  }
  BitVector &DefOnEntry = EF->second.first;
  BitVector &UndefOnEntry = EF->second.second;

  // Multiple values were found, so transfer the work list to the LiveIn array
  // where UpdateSSA will use it as a work list.
  LiveIn.reserve(WorkList.size());
  for (unsigned BN : WorkList) {
    MachineBasicBlock *MBB = MF->getBlockNumbered(BN);
    if (Undefs.size() > 0 && !isDefOnEntry(LR, Undefs, *MBB, DefOnEntry, UndefOnEntry))
      continue;
    addLiveInBlock(LR, DomTree->getNode(MBB));
    if (MBB == &UseMBB)
      LiveIn.back().Kill = Use;
  }

  return false;
}
Beispiel #15
0
void NonNullParamChecker::checkPreCall(const CallEvent &Call,
                                       CheckerContext &C) const {
  const Decl *FD = Call.getDecl();
  if (!FD)
    return;

  // Merge all non-null attributes
  unsigned NumArgs = Call.getNumArgs();
  llvm::SmallBitVector AttrNonNull(NumArgs);
  for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
    if (!NonNull->args_size()) {
      AttrNonNull.set(0, NumArgs);
      break;
    }
    for (unsigned Val : NonNull->args()) {
      if (Val >= NumArgs)
        continue;
      AttrNonNull.set(Val);
    }
  }

  ProgramStateRef state = C.getState();

  CallEvent::param_type_iterator TyI = Call.param_type_begin(),
                                 TyE = Call.param_type_end();

  for (unsigned idx = 0; idx < NumArgs; ++idx) {

    // Check if the parameter is a reference. We want to report when reference
    // to a null pointer is passed as a paramter.
    bool haveRefTypeParam = false;
    if (TyI != TyE) {
      haveRefTypeParam = (*TyI)->isReferenceType();
      TyI++;
    }

    bool haveAttrNonNull = AttrNonNull[idx];
    if (!haveAttrNonNull) {
      // Check if the parameter is also marked 'nonnull'.
      ArrayRef<ParmVarDecl*> parms = Call.parameters();
      if (idx < parms.size())
        haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
    }

    if (!haveRefTypeParam && !haveAttrNonNull)
      continue;

    // If the value is unknown or undefined, we can't perform this check.
    const Expr *ArgE = Call.getArgExpr(idx);
    SVal V = Call.getArgSVal(idx);
    Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();
    if (!DV)
      continue;

    // Process the case when the argument is not a location.
    assert(!haveRefTypeParam || DV->getAs<Loc>());

    if (haveAttrNonNull && !DV->getAs<Loc>()) {
      // If the argument is a union type, we want to handle a potential
      // transparent_union GCC extension.
      if (!ArgE)
        continue;

      QualType T = ArgE->getType();
      const RecordType *UT = T->getAsUnionType();
      if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
        continue;

      if (Optional<nonloc::CompoundVal> CSV =
              DV->getAs<nonloc::CompoundVal>()) {
        nonloc::CompoundVal::iterator CSV_I = CSV->begin();
        assert(CSV_I != CSV->end());
        V = *CSV_I;
        DV = V.getAs<DefinedSVal>();
        assert(++CSV_I == CSV->end());
        // FIXME: Handle (some_union){ some_other_union_val }, which turns into
        // a LazyCompoundVal inside a CompoundVal.
        if (!V.getAs<Loc>())
          continue;
        // Retrieve the corresponding expression.
        if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
          if (const InitListExpr *IE =
                dyn_cast<InitListExpr>(CE->getInitializer()))
             ArgE = dyn_cast<Expr>(*(IE->begin()));

      } else {
        // FIXME: Handle LazyCompoundVals?
        continue;
      }
    }

    ConstraintManager &CM = C.getConstraintManager();
    ProgramStateRef stateNotNull, stateNull;
    std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);

    if (stateNull) {
      if (!stateNotNull) {
        // Generate an error node.  Check for a null node in case
        // we cache out.
        if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {

          std::unique_ptr<BugReport> R;
          if (haveAttrNonNull)
            R = genReportNullAttrNonNull(errorNode, ArgE);
          else if (haveRefTypeParam)
            R = genReportReferenceToNullPointer(errorNode, ArgE);

          // Highlight the range of the argument that was null.
          R->addRange(Call.getArgSourceRange(idx));

          // Emit the bug report.
          C.emitReport(std::move(R));
        }

        // Always return.  Either we cached out or we just emitted an error.
        return;
      }
      if (ExplodedNode *N = C.generateSink(stateNull, C.getPredecessor())) {
        ImplicitNullDerefEvent event = {
            V, false, N, &C.getBugReporter(),
            /*IsDirectDereference=*/haveRefTypeParam};
        dispatchEvent(event);
      }
    }

    // If a pointer value passed the check we should assume that it is
    // indeed not null from this point forward.
    assert(stateNotNull);
    state = stateNotNull;
  }

  // If we reach here all of the arguments passed the nonnull check.
  // If 'state' has been updated generated a new node.
  C.addTransition(state);
}
unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
                                         ArrayRef<Type *> Tys) const {
  unsigned ISD = 0;
  switch (IID) {
  default: {
    // Assume that we need to scalarize this intrinsic.
    unsigned ScalarizationCost = 0;
    unsigned ScalarCalls = 1;
    if (RetTy->isVectorTy()) {
      ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
      ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
    }
    for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
      if (Tys[i]->isVectorTy()) {
        ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
        ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
      }
    }

    return ScalarCalls + ScalarizationCost;
  }
  // Look for intrinsics that can be lowered directly or turned into a scalar
  // intrinsic call.
  case Intrinsic::sqrt:    ISD = ISD::FSQRT;  break;
  case Intrinsic::sin:     ISD = ISD::FSIN;   break;
  case Intrinsic::cos:     ISD = ISD::FCOS;   break;
  case Intrinsic::exp:     ISD = ISD::FEXP;   break;
  case Intrinsic::exp2:    ISD = ISD::FEXP2;  break;
  case Intrinsic::log:     ISD = ISD::FLOG;   break;
  case Intrinsic::log10:   ISD = ISD::FLOG10; break;
  case Intrinsic::log2:    ISD = ISD::FLOG2;  break;
  case Intrinsic::fabs:    ISD = ISD::FABS;   break;
  case Intrinsic::floor:   ISD = ISD::FFLOOR; break;
  case Intrinsic::ceil:    ISD = ISD::FCEIL;  break;
  case Intrinsic::trunc:   ISD = ISD::FTRUNC; break;
  case Intrinsic::rint:    ISD = ISD::FRINT;  break;
  case Intrinsic::pow:     ISD = ISD::FPOW;   break;
  case Intrinsic::fma:     ISD = ISD::FMA;    break;
  case Intrinsic::fmuladd: ISD = ISD::FMA;    break; // FIXME: mul + add?
  }

  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);

  if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
    // The operation is legal. Assume it costs 1.
    // If the type is split to multiple registers, assume that thre is some
    // overhead to this.
    // TODO: Once we have extract/insert subvector cost we need to use them.
    if (LT.first > 1)
      return LT.first * 2;
    return LT.first * 1;
  }

  if (!TLI->isOperationExpand(ISD, LT.second)) {
    // If the operation is custom lowered then assume
    // thare the code is twice as expensive.
    return LT.first * 2;
  }

  // Else, assume that we need to scalarize this intrinsic. For math builtins
  // this will emit a costly libcall, adding call overhead and spills. Make it
  // very expensive.
  if (RetTy->isVectorTy()) {
    unsigned Num = RetTy->getVectorNumElements();
    unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(),
                                                  Tys);
    return 10 * Cost * Num;
  }

  // This is going to be turned into a library call, make it expensive.
  return 10;
}
Beispiel #17
0
/// Produce note diagnostics for a jump into a protected scope.
void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) {
  assert(!ToScopes.empty());
  for (unsigned I = 0, E = ToScopes.size(); I != E; ++I)
    if (Scopes[ToScopes[I]].InDiag)
      S.Diag(Scopes[ToScopes[I]].Loc, Scopes[ToScopes[I]].InDiag);
}
Beispiel #18
0
void elf::ObjectFile<ELFT>::initializeSections(
    DenseSet<CachedHashStringRef> &ComdatGroups) {
  const ELFFile<ELFT> &Obj = this->getObj();

  ArrayRef<Elf_Shdr> ObjSections =
      check(this->getObj().sections(), toString(this));
  uint64_t Size = ObjSections.size();
  this->Sections.resize(Size);
  this->SectionStringTable =
      check(Obj.getSectionStringTable(ObjSections), toString(this));

  for (size_t I = 0, E = ObjSections.size(); I < E; I++) {
    if (this->Sections[I] == &InputSection::Discarded)
      continue;
    const Elf_Shdr &Sec = ObjSections[I];

    // SHF_EXCLUDE'ed sections are discarded by the linker. However,
    // if -r is given, we'll let the final link discard such sections.
    // This is compatible with GNU.
    if ((Sec.sh_flags & SHF_EXCLUDE) && !Config->Relocatable) {
      this->Sections[I] = &InputSection::Discarded;
      continue;
    }

    switch (Sec.sh_type) {
    case SHT_GROUP: {
      // De-duplicate section groups by their signatures.
      StringRef Signature = getShtGroupSignature(ObjSections, Sec);
      bool IsNew = ComdatGroups.insert(CachedHashStringRef(Signature)).second;
      this->Sections[I] = &InputSection::Discarded;

      // If it is a new section group, we want to keep group members.
      // Group leader sections, which contain indices of group members, are
      // discarded because they are useless beyond this point. The only
      // exception is the -r option because in order to produce re-linkable
      // object files, we want to pass through basically everything.
      if (IsNew) {
        if (Config->Relocatable)
          this->Sections[I] = createInputSection(Sec);
        continue;
      }

      // Otherwise, discard group members.
      for (uint32_t SecIndex : getShtGroupEntries(Sec)) {
        if (SecIndex >= Size)
          fatal(toString(this) +
                ": invalid section index in group: " + Twine(SecIndex));
        this->Sections[SecIndex] = &InputSection::Discarded;
      }
      break;
    }
    case SHT_SYMTAB:
      this->initSymtab(ObjSections, &Sec);
      break;
    case SHT_SYMTAB_SHNDX:
      this->SymtabSHNDX =
          check(Obj.getSHNDXTable(Sec, ObjSections), toString(this));
      break;
    case SHT_STRTAB:
    case SHT_NULL:
      break;
    default:
      this->Sections[I] = createInputSection(Sec);
    }

    // .ARM.exidx sections have a reverse dependency on the InputSection they
    // have a SHF_LINK_ORDER dependency, this is identified by the sh_link.
    if (Sec.sh_flags & SHF_LINK_ORDER) {
      if (Sec.sh_link >= this->Sections.size())
        fatal(toString(this) + ": invalid sh_link index: " +
              Twine(Sec.sh_link));
      this->Sections[Sec.sh_link]->DependentSections.push_back(
          this->Sections[I]);
    }
  }
}
Beispiel #19
0
void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
  assert(Clauses.size() == getNumClauses() &&
         "Number of clauses is not the same as the preallocated buffer");
  std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
}
Beispiel #20
0
InputSectionBase *
elf::ObjectFile<ELFT>::createInputSection(const Elf_Shdr &Sec) {
  StringRef Name = getSectionName(Sec);

  switch (Sec.sh_type) {
  case SHT_ARM_ATTRIBUTES:
    // FIXME: ARM meta-data section. Retain the first attribute section
    // we see. The eglibc ARM dynamic loaders require the presence of an
    // attribute section for dlopen to work.
    // In a full implementation we would merge all attribute sections.
    if (InX::ARMAttributes == nullptr) {
      InX::ARMAttributes = make<InputSection>(this, &Sec, Name);
      return InX::ARMAttributes;
    }
    return &InputSection::Discarded;
  case SHT_RELA:
  case SHT_REL: {
    // Find the relocation target section and associate this
    // section with it. Target can be discarded, for example
    // if it is a duplicated member of SHT_GROUP section, we
    // do not create or proccess relocatable sections then.
    InputSectionBase *Target = getRelocTarget(Sec);
    if (!Target)
      return nullptr;

    // This section contains relocation information.
    // If -r is given, we do not interpret or apply relocation
    // but just copy relocation sections to output.
    if (Config->Relocatable)
      return make<InputSection>(this, &Sec, Name);

    if (Target->FirstRelocation)
      fatal(toString(this) +
            ": multiple relocation sections to one section are not supported");

    // Mergeable sections with relocations are tricky because relocations
    // need to be taken into account when comparing section contents for
    // merging. It's not worth supporting such mergeable sections because
    // they are rare and it'd complicates the internal design (we usually
    // have to determine if two sections are mergeable early in the link
    // process much before applying relocations). We simply handle mergeable
    // sections with relocations as non-mergeable.
    if (auto *MS = dyn_cast<MergeInputSection>(Target)) {
      Target = toRegularSection(MS);
      this->Sections[Sec.sh_info] = Target;
    }

    size_t NumRelocations;
    if (Sec.sh_type == SHT_RELA) {
      ArrayRef<Elf_Rela> Rels =
          check(this->getObj().relas(&Sec), toString(this));
      Target->FirstRelocation = Rels.begin();
      NumRelocations = Rels.size();
      Target->AreRelocsRela = true;
    } else {
      ArrayRef<Elf_Rel> Rels = check(this->getObj().rels(&Sec), toString(this));
      Target->FirstRelocation = Rels.begin();
      NumRelocations = Rels.size();
      Target->AreRelocsRela = false;
    }
    assert(isUInt<31>(NumRelocations));
    Target->NumRelocations = NumRelocations;

    // Relocation sections processed by the linker are usually removed
    // from the output, so returning `nullptr` for the normal case.
    // However, if -emit-relocs is given, we need to leave them in the output.
    // (Some post link analysis tools need this information.)
    if (Config->EmitRelocs) {
      InputSection *RelocSec = make<InputSection>(this, &Sec, Name);
      // We will not emit relocation section if target was discarded.
      Target->DependentSections.push_back(RelocSec);
      return RelocSec;
    }
    return nullptr;
  }
  }

  // The GNU linker uses .note.GNU-stack section as a marker indicating
  // that the code in the object file does not expect that the stack is
  // executable (in terms of NX bit). If all input files have the marker,
  // the GNU linker adds a PT_GNU_STACK segment to tells the loader to
  // make the stack non-executable. Most object files have this section as
  // of 2017.
  //
  // But making the stack non-executable is a norm today for security
  // reasons. Failure to do so may result in a serious security issue.
  // Therefore, we make LLD always add PT_GNU_STACK unless it is
  // explicitly told to do otherwise (by -z execstack). Because the stack
  // executable-ness is controlled solely by command line options,
  // .note.GNU-stack sections are simply ignored.
  if (Name == ".note.GNU-stack")
    return &InputSection::Discarded;

  // Split stacks is a feature to support a discontiguous stack. At least
  // as of 2017, it seems that the feature is not being used widely.
  // Only GNU gold supports that. We don't. For the details about that,
  // see https://gcc.gnu.org/wiki/SplitStacks
  if (Name == ".note.GNU-split-stack") {
    error(toString(this) +
          ": object file compiled with -fsplit-stack is not supported");
    return &InputSection::Discarded;
  }

  if (Config->Strip != StripPolicy::None && Name.startswith(".debug"))
    return &InputSection::Discarded;

  // If -gdb-index is given, LLD creates .gdb_index section, and that
  // section serves the same purpose as .debug_gnu_pub{names,types} sections.
  // If that's the case, we want to eliminate .debug_gnu_pub{names,types}
  // because they are redundant and can waste large amount of disk space
  // (for example, they are about 400 MiB in total for a clang debug build.)
  if (Config->GdbIndex &&
      (Name == ".debug_gnu_pubnames" || Name == ".debug_gnu_pubtypes"))
    return &InputSection::Discarded;

  // The linkonce feature is a sort of proto-comdat. Some glibc i386 object
  // files contain definitions of symbol "__x86.get_pc_thunk.bx" in linkonce
  // sections. Drop those sections to avoid duplicate symbol errors.
  // FIXME: This is glibc PR20543, we should remove this hack once that has been
  // fixed for a while.
  if (Name.startswith(".gnu.linkonce."))
    return &InputSection::Discarded;

  // The linker merges EH (exception handling) frames and creates a
  // .eh_frame_hdr section for runtime. So we handle them with a special
  // class. For relocatable outputs, they are just passed through.
  if (Name == ".eh_frame" && !Config->Relocatable)
    return make<EhInputSection>(this, &Sec, Name);

  if (shouldMerge(Sec))
    return make<MergeInputSection>(this, &Sec, Name);
  return make<InputSection>(this, &Sec, Name);
}
/// compute - Compute the preferred allocation order for RC with reserved
/// registers filtered out. Volatile registers come first followed by CSR
/// aliases ordered according to the CSR order specified by the target.
void RegisterClassInfo::compute(const TargetRegisterClass *RC) const {
  RCInfo &RCI = RegClass[RC->getID()];

  // Raw register count, including all reserved regs.
  unsigned NumRegs = RC->getNumRegs();

  if (!RCI.Order)
    RCI.Order.reset(new MCPhysReg[NumRegs]);

  unsigned N = 0;
  SmallVector<MCPhysReg, 16> CSRAlias;
  unsigned MinCost = 0xff;
  unsigned LastCost = ~0u;
  unsigned LastCostChange = 0;

  // FIXME: Once targets reserve registers instead of removing them from the
  // allocation order, we can simply use begin/end here.
  ArrayRef<MCPhysReg> RawOrder = RC->getRawAllocationOrder(*MF);
  for (unsigned i = 0; i != RawOrder.size(); ++i) {
    unsigned PhysReg = RawOrder[i];
    // Remove reserved registers from the allocation order.
    if (Reserved.test(PhysReg))
      continue;
    unsigned Cost = TRI->getCostPerUse(PhysReg);
    MinCost = std::min(MinCost, Cost);

    if (CSRNum[PhysReg])
      // PhysReg aliases a CSR, save it for later.
      CSRAlias.push_back(PhysReg);
    else {
      if (Cost != LastCost)
        LastCostChange = N;
      RCI.Order[N++] = PhysReg;
      LastCost = Cost;
    }
  }
  RCI.NumRegs = N + CSRAlias.size();
  assert (RCI.NumRegs <= NumRegs && "Allocation order larger than regclass");

  // CSR aliases go after the volatile registers, preserve the target's order.
  for (unsigned i = 0, e = CSRAlias.size(); i != e; ++i) {
    unsigned PhysReg = CSRAlias[i];
    unsigned Cost = TRI->getCostPerUse(PhysReg);
    if (Cost != LastCost)
      LastCostChange = N;
    RCI.Order[N++] = PhysReg;
    LastCost = Cost;
  }

  // Register allocator stress test.  Clip register class to N registers.
  if (StressRA && RCI.NumRegs > StressRA)
    RCI.NumRegs = StressRA;

  // Check if RC is a proper sub-class.
  if (const TargetRegisterClass *Super = TRI->getLargestLegalSuperClass(RC))
    if (Super != RC && getNumAllocatableRegs(Super) > RCI.NumRegs)
      RCI.ProperSubClass = true;

  RCI.MinCost = uint8_t(MinCost);
  RCI.LastCostChange = LastCostChange;

  DEBUG({
    dbgs() << "AllocationOrder(" << RC->getName() << ") = [";
    for (unsigned I = 0; I != RCI.NumRegs; ++I)
      dbgs() << ' ' << PrintReg(RCI.Order[I], TRI);
    dbgs() << (RCI.ProperSubClass ? " ] (sub-class)\n" : " ]\n");
  });
Beispiel #22
0
/// Look through operations that will be free to find the earliest source of
/// this value.
///
/// @param ValLoc If V has aggegate type, we will be interested in a particular
/// scalar component. This records its address; the reverse of this list gives a
/// sequence of indices appropriate for an extractvalue to locate the important
/// value. This value is updated during the function and on exit will indicate
/// similar information for the Value returned.
///
/// @param DataBits If this function looks through truncate instructions, this
/// will record the smallest size attained.
static const Value *getNoopInput(const Value *V,
                                 SmallVectorImpl<unsigned> &ValLoc,
                                 unsigned &DataBits,
                                 const TargetLoweringBase &TLI) {
  while (true) {
    // Try to look through V1; if V1 is not an instruction, it can't be looked
    // through.
    const Instruction *I = dyn_cast<Instruction>(V);
    if (!I || I->getNumOperands() == 0) return V;
    const Value *NoopInput = nullptr;

    Value *Op = I->getOperand(0);
    if (isa<BitCastInst>(I)) {
      // Look through truly no-op bitcasts.
      if (isNoopBitcast(Op->getType(), I->getType(), TLI))
        NoopInput = Op;
    } else if (isa<GetElementPtrInst>(I)) {
      // Look through getelementptr
      if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
        NoopInput = Op;
    } else if (isa<IntToPtrInst>(I)) {
      // Look through inttoptr.
      // Make sure this isn't a truncating or extending cast.  We could
      // support this eventually, but don't bother for now.
      if (!isa<VectorType>(I->getType()) &&
          TLI.getPointerTy().getSizeInBits() ==
          cast<IntegerType>(Op->getType())->getBitWidth())
        NoopInput = Op;
    } else if (isa<PtrToIntInst>(I)) {
      // Look through ptrtoint.
      // Make sure this isn't a truncating or extending cast.  We could
      // support this eventually, but don't bother for now.
      if (!isa<VectorType>(I->getType()) &&
          TLI.getPointerTy().getSizeInBits() ==
          cast<IntegerType>(I->getType())->getBitWidth())
        NoopInput = Op;
    } else if (isa<TruncInst>(I) &&
               TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
      DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
      NoopInput = Op;
    } else if (isa<CallInst>(I)) {
      // Look through call (skipping callee)
      for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
           i != e; ++i) {
        unsigned attrInd = i - I->op_begin() + 1;
        if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
            isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
          NoopInput = *i;
          break;
        }
      }
    } else if (isa<InvokeInst>(I)) {
      // Look through invoke (skipping BB, BB, Callee)
      for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
           i != e; ++i) {
        unsigned attrInd = i - I->op_begin() + 1;
        if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
            isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
          NoopInput = *i;
          break;
        }
      }
    } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
      // Value may come from either the aggregate or the scalar
      ArrayRef<unsigned> InsertLoc = IVI->getIndices();
      if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(),
                     ValLoc.rbegin())) {
        // The type being inserted is a nested sub-type of the aggregate; we
        // have to remove those initial indices to get the location we're
        // interested in for the operand.
        ValLoc.resize(ValLoc.size() - InsertLoc.size());
        NoopInput = IVI->getInsertedValueOperand();
      } else {
        // The struct we're inserting into has the value we're interested in, no
        // change of address.
        NoopInput = Op;
      }
    } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
      // The part we're interested in will inevitably be some sub-section of the
      // previous aggregate. Combine the two paths to obtain the true address of
      // our element.
      ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
      std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(),
                std::back_inserter(ValLoc));
      NoopInput = Op;
    }
    // Terminate if we couldn't find anything to look through.
    if (!NoopInput)
      return V;

    V = NoopInput;
  }
}
Beispiel #23
0
SILSpecializeAttr::SILSpecializeAttr(ArrayRef<Requirement> requirements,
                                     bool exported, SpecializationKind kind)
    : numRequirements(requirements.size()), kind(kind), exported(exported) {
  std::copy(requirements.begin(), requirements.end(), getRequirementsData());
}
MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction(
    MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/,
    raw_ostream &OS, raw_ostream &CS) const {
  Size = 0;
  uint64_t Pos = 0;

  // Read the opcode.
  if (Pos + sizeof(uint64_t) > Bytes.size())
    return MCDisassembler::Fail;
  uint64_t Opcode = support::endian::read64le(Bytes.data() + Pos);
  Pos += sizeof(uint64_t);

  if (Opcode >= WebAssembly::INSTRUCTION_LIST_END)
    return MCDisassembler::Fail;

  MI.setOpcode(Opcode);
  const MCInstrDesc &Desc = MCII->get(Opcode);
  unsigned NumFixedOperands = Desc.NumOperands;

  // If it's variadic, read the number of extra operands.
  unsigned NumExtraOperands = 0;
  if (Desc.isVariadic()) {
    if (Pos + sizeof(uint64_t) > Bytes.size())
      return MCDisassembler::Fail;
    NumExtraOperands = support::endian::read64le(Bytes.data() + Pos);
    Pos += sizeof(uint64_t);
  }

  // Read the fixed operands. These are described by the MCInstrDesc.
  for (unsigned i = 0; i < NumFixedOperands; ++i) {
    const MCOperandInfo &Info = Desc.OpInfo[i];
    switch (Info.OperandType) {
    case MCOI::OPERAND_IMMEDIATE:
    case WebAssembly::OPERAND_P2ALIGN:
    case WebAssembly::OPERAND_BASIC_BLOCK: {
      if (Pos + sizeof(uint64_t) > Bytes.size())
        return MCDisassembler::Fail;
      uint64_t Imm = support::endian::read64le(Bytes.data() + Pos);
      Pos += sizeof(uint64_t);
      MI.addOperand(MCOperand::createImm(Imm));
      break;
    }
    case MCOI::OPERAND_REGISTER: {
      if (Pos + sizeof(uint64_t) > Bytes.size())
        return MCDisassembler::Fail;
      uint64_t Reg = support::endian::read64le(Bytes.data() + Pos);
      Pos += sizeof(uint64_t);
      MI.addOperand(MCOperand::createReg(Reg));
      break;
    }
    case WebAssembly::OPERAND_FPIMM: {
      // TODO: MC converts all floating point immediate operands to double.
      // This is fine for numeric values, but may cause NaNs to change bits.
      if (Pos + sizeof(uint64_t) > Bytes.size())
        return MCDisassembler::Fail;
      uint64_t Bits = support::endian::read64le(Bytes.data() + Pos);
      Pos += sizeof(uint64_t);
      double Imm;
      memcpy(&Imm, &Bits, sizeof(Imm));
      MI.addOperand(MCOperand::createFPImm(Imm));
      break;
    }
    default:
      llvm_unreachable("unimplemented operand kind");
    }
  }

  // Read the extra operands.
  assert(NumExtraOperands == 0 || Desc.isVariadic());
  for (unsigned i = 0; i < NumExtraOperands; ++i) {
    if (Pos + sizeof(uint64_t) > Bytes.size())
      return MCDisassembler::Fail;
    if (Desc.TSFlags & WebAssemblyII::VariableOpIsImmediate) {
      // Decode extra immediate operands.
      uint64_t Imm = support::endian::read64le(Bytes.data() + Pos);
      MI.addOperand(MCOperand::createImm(Imm));
    } else {
      // Decode extra register operands.
      uint64_t Reg = support::endian::read64le(Bytes.data() + Pos);
      MI.addOperand(MCOperand::createReg(Reg));
    }
    Pos += sizeof(uint64_t);
  }

  Size = Pos;
  return MCDisassembler::Success;
}
Beispiel #25
0
void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad,
                                       ArrayRef<const GlobalValue *> TyInfo) {
  LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
  for (unsigned N = TyInfo.size(); N; --N)
    LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
}
Beispiel #26
0
std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
                                                const LiveIntervals *lis,
                                                const MachineLoopInfo *loopInfo,
                                                const RegSet &vregs) {

  LiveIntervals *LIS = const_cast<LiveIntervals*>(lis);
  MachineRegisterInfo *mri = &mf->getRegInfo();
  const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();

  std::auto_ptr<PBQPRAProblem> p(new PBQPRAProblem());
  PBQP::Graph &g = p->getGraph();
  RegSet pregs;

  // Collect the set of preg intervals, record that they're used in the MF.
  for (unsigned Reg = 1, e = tri->getNumRegs(); Reg != e; ++Reg) {
    if (mri->def_empty(Reg))
      continue;
    pregs.insert(Reg);
    mri->setPhysRegUsed(Reg);
  }

  BitVector reservedRegs = tri->getReservedRegs(*mf);

  // Iterate over vregs.
  for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end();
       vregItr != vregEnd; ++vregItr) {
    unsigned vreg = *vregItr;
    const TargetRegisterClass *trc = mri->getRegClass(vreg);
    LiveInterval *vregLI = &LIS->getInterval(vreg);

    // Record any overlaps with regmask operands.
    BitVector regMaskOverlaps;
    LIS->checkRegMaskInterference(*vregLI, regMaskOverlaps);

    // Compute an initial allowed set for the current vreg.
    typedef std::vector<unsigned> VRAllowed;
    VRAllowed vrAllowed;
    ArrayRef<uint16_t> rawOrder = trc->getRawAllocationOrder(*mf);
    for (unsigned i = 0; i != rawOrder.size(); ++i) {
      unsigned preg = rawOrder[i];
      if (reservedRegs.test(preg))
        continue;

      // vregLI crosses a regmask operand that clobbers preg.
      if (!regMaskOverlaps.empty() && !regMaskOverlaps.test(preg))
        continue;

      // vregLI overlaps fixed regunit interference.
      bool Interference = false;
      for (MCRegUnitIterator Units(preg, tri); Units.isValid(); ++Units) {
        if (vregLI->overlaps(LIS->getRegUnit(*Units))) {
          Interference = true;
          break;
        }
      }
      if (Interference)
        continue;

      // preg is usable for this virtual register.
      vrAllowed.push_back(preg);
    }

    // Construct the node.
    PBQP::Graph::NodeItr node =
      g.addNode(PBQP::Vector(vrAllowed.size() + 1, 0));

    // Record the mapping and allowed set in the problem.
    p->recordVReg(vreg, node, vrAllowed.begin(), vrAllowed.end());

    PBQP::PBQPNum spillCost = (vregLI->weight != 0.0) ?
        vregLI->weight : std::numeric_limits<PBQP::PBQPNum>::min();

    addSpillCosts(g.getNodeCosts(node), spillCost);
  }

  for (RegSet::const_iterator vr1Itr = vregs.begin(), vrEnd = vregs.end();
         vr1Itr != vrEnd; ++vr1Itr) {
    unsigned vr1 = *vr1Itr;
    const LiveInterval &l1 = lis->getInterval(vr1);
    const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1);

    for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr);
         vr2Itr != vrEnd; ++vr2Itr) {
      unsigned vr2 = *vr2Itr;
      const LiveInterval &l2 = lis->getInterval(vr2);
      const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2);

      assert(!l2.empty() && "Empty interval in vreg set?");
      if (l1.overlaps(l2)) {
        PBQP::Graph::EdgeItr edge =
          g.addEdge(p->getNodeForVReg(vr1), p->getNodeForVReg(vr2),
                    PBQP::Matrix(vr1Allowed.size()+1, vr2Allowed.size()+1, 0));

        addInterferenceCosts(g.getEdgeCosts(edge), vr1Allowed, vr2Allowed, tri);
      }
    }
  }

  return p;
}
/// Simply decrease the current pressure as impacted by these registers.
void RegPressureTracker::decreaseRegPressure(ArrayRef<unsigned> RegUnits) {
  for (unsigned I = 0, E = RegUnits.size(); I != E; ++I)
    decreaseSetPressure(CurrSetPressure, MRI->getPressureSets(RegUnits[I]));
}
Beispiel #28
0
// Find the minimum offset that we may store a value of size Size bits at. If
// IsAfter is set, look for an offset before the object, otherwise look for an
// offset after the object.
uint64_t
wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets,
                                     bool IsAfter, uint64_t Size) {
  // Find a minimum offset taking into account only vtable sizes.
  uint64_t MinByte = 0;
  for (const VirtualCallTarget &Target : Targets) {
    if (IsAfter)
      MinByte = std::max(MinByte, Target.minAfterBytes());
    else
      MinByte = std::max(MinByte, Target.minBeforeBytes());
  }

  // Build a vector of arrays of bytes covering, for each target, a slice of the
  // used region (see AccumBitVector::BytesUsed in
  // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively,
  // this aligns the used regions to start at MinByte.
  //
  // In this example, A, B and C are vtables, # is a byte already allocated for
  // a virtual function pointer, AAAA... (etc.) are the used regions for the
  // vtables and Offset(X) is the value computed for the Offset variable below
  // for X.
  //
  //                    Offset(A)
  //                    |       |
  //                            |MinByte
  // A: ################AAAAAAAA|AAAAAAAA
  // B: ########BBBBBBBBBBBBBBBB|BBBB
  // C: ########################|CCCCCCCCCCCCCCCC
  //            |   Offset(B)   |
  //
  // This code produces the slices of A, B and C that appear after the divider
  // at MinByte.
  std::vector<ArrayRef<uint8_t>> Used;
  for (const VirtualCallTarget &Target : Targets) {
    ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed
                                       : Target.TM->Bits->Before.BytesUsed;
    uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes()
                              : MinByte - Target.minBeforeBytes();

    // Disregard used regions that are smaller than Offset. These are
    // effectively all-free regions that do not need to be checked.
    if (VTUsed.size() > Offset)
      Used.push_back(VTUsed.slice(Offset));
  }

  if (Size == 1) {
    // Find a free bit in each member of Used.
    for (unsigned I = 0;; ++I) {
      uint8_t BitsUsed = 0;
      for (auto &&B : Used)
        if (I < B.size())
          BitsUsed |= B[I];
      if (BitsUsed != 0xff)
        return (MinByte + I) * 8 +
               countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined);
    }
  } else {
    // Find a free (Size/8) byte region in each member of Used.
    // FIXME: see if alignment helps.
    for (unsigned I = 0;; ++I) {
      for (auto &&B : Used) {
        unsigned Byte = 0;
        while ((I + Byte) < B.size() && Byte < (Size / 8)) {
          if (B[I + Byte])
            goto NextI;
          ++Byte;
        }
      }
      return (MinByte + I) * 8;
    NextI:;
    }
  }
}
Beispiel #29
0
static inline uint32_t eatB32(ArrayRef<uint8_t>& Bytes) {
  assert(Bytes.size() >= sizeof eatB32(Bytes));
  const auto Res = support::endian::read32le(Bytes.data());
  Bytes = Bytes.slice(sizeof Res);
  return Res;
}
//
// runMCDesc - Print out MC register descriptions.
//
void
RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
                               CodeGenRegBank &RegBank) {
  EmitSourceFileHeader("MC Register Information", OS);

  OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
  OS << "#undef GET_REGINFO_MC_DESC\n";

  const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();
  std::map<const CodeGenRegister*, CodeGenRegister::Set> Overlaps;
  RegBank.computeOverlaps(Overlaps);

  // The lists of sub-registers, super-registers, and overlaps all go in the
  // same array. That allows us to share suffixes.
  typedef std::vector<const CodeGenRegister*> RegVec;
  SmallVector<RegVec, 4> SubRegLists(Regs.size());
  SmallVector<RegVec, 4> OverlapLists(Regs.size());
  SequenceToOffsetTable<RegVec, CodeGenRegister::Less> RegSeqs;

  // Precompute register lists for the SequenceToOffsetTable.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];

    // Compute the ordered sub-register list.
    SetVector<const CodeGenRegister*> SR;
    Reg->addSubRegsPreOrder(SR, RegBank);
    RegVec &SubRegList = SubRegLists[i];
    SubRegList.assign(SR.begin(), SR.end());
    RegSeqs.add(SubRegList);

    // Super-registers are already computed.
    const RegVec &SuperRegList = Reg->getSuperRegs();
    RegSeqs.add(SuperRegList);

    // The list of overlaps doesn't need to have any particular order, except
    // Reg itself must be the first element. Pick an ordering that has one of
    // the other lists as a suffix.
    RegVec &OverlapList = OverlapLists[i];
    const RegVec &Suffix = SubRegList.size() > SuperRegList.size() ?
                           SubRegList : SuperRegList;
    CodeGenRegister::Set Omit(Suffix.begin(), Suffix.end());

    // First element is Reg itself.
    OverlapList.push_back(Reg);
    Omit.insert(Reg);

    // Any elements not in Suffix.
    const CodeGenRegister::Set &OSet = Overlaps[Reg];
    std::set_difference(OSet.begin(), OSet.end(),
                        Omit.begin(), Omit.end(),
                        std::back_inserter(OverlapList),
                        CodeGenRegister::Less());

    // Finally, Suffix itself.
    OverlapList.insert(OverlapList.end(), Suffix.begin(), Suffix.end());
    RegSeqs.add(OverlapList);
  }

  // Compute the final layout of the sequence table.
  RegSeqs.layout();

  OS << "namespace llvm {\n\n";

  const std::string &TargetName = Target.getName();

  // Emit the shared table of register lists.
  OS << "extern const uint16_t " << TargetName << "RegLists[] = {\n";
  RegSeqs.emit(OS, printRegister);
  OS << "};\n\n";

  OS << "extern const MCRegisterDesc " << TargetName
     << "RegDesc[] = { // Descriptors\n";
  OS << "  { \"NOREG\", 0, 0, 0 },\n";

  // Emit the register descriptors now.
  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
    const CodeGenRegister *Reg = Regs[i];
    OS << "  { \"" << Reg->getName() << "\", "
       << RegSeqs.get(OverlapLists[i]) << ", "
       << RegSeqs.get(SubRegLists[i]) << ", "
       << RegSeqs.get(Reg->getSuperRegs()) << " },\n";
  }
  OS << "};\n\n";      // End of register descriptors...

  ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();

  // Loop over all of the register classes... emitting each one.
  OS << "namespace {     // Register classes...\n";

  // Emit the register enum value arrays for each RegisterClass
  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];
    ArrayRef<Record*> Order = RC.getOrder();

    // Give the register class a legal C name if it's anonymous.
    std::string Name = RC.getName();

    // Emit the register list now.
    OS << "  // " << Name << " Register Class...\n"
       << "  const uint16_t " << Name
       << "[] = {\n    ";
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      OS << getQualifiedName(Reg) << ", ";
    }
    OS << "\n  };\n\n";

    OS << "  // " << Name << " Bit set.\n"
       << "  const uint8_t " << Name
       << "Bits[] = {\n    ";
    BitVectorEmitter BVE;
    for (unsigned i = 0, e = Order.size(); i != e; ++i) {
      Record *Reg = Order[i];
      BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
    }
    BVE.print(OS);
    OS << "\n  };\n\n";

  }
  OS << "}\n\n";

  OS << "extern const MCRegisterClass " << TargetName
     << "MCRegisterClasses[] = {\n";

  for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
    const CodeGenRegisterClass &RC = *RegisterClasses[rc];

    // Asserts to make sure values will fit in table assuming types from
    // MCRegisterInfo.h
    assert((RC.SpillSize/8) <= 0xffff && "SpillSize too large.");
    assert((RC.SpillAlignment/8) <= 0xffff && "SpillAlignment too large.");
    assert(RC.CopyCost >= -128 && RC.CopyCost <= 127 && "Copy cost too large.");

    OS << "  { " << '\"' << RC.getName() << "\", "
       << RC.getName() << ", " << RC.getName() << "Bits, "
       << RC.getOrder().size() << ", sizeof(" << RC.getName() << "Bits), "
       << RC.getQualifiedName() + "RegClassID" << ", "
       << RC.SpillSize/8 << ", "
       << RC.SpillAlignment/8 << ", "
       << RC.CopyCost << ", "
       << RC.Allocatable << " },\n";
  }

  OS << "};\n\n";

  // Emit the data table for getSubReg().
  ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
  if (SubRegIndices.size()) {
    OS << "const uint16_t " << TargetName << "SubRegTable[]["
       << SubRegIndices.size() << "] = {\n";
    for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
      const CodeGenRegister::SubRegMap &SRM = Regs[i]->getSubRegs();
      OS << "  /* " << Regs[i]->TheDef->getName() << " */\n";
      if (SRM.empty()) {
        OS << "  {0},\n";
        continue;
      }
      OS << "  {";
      for (unsigned j = 0, je = SubRegIndices.size(); j != je; ++j) {
        // FIXME: We really should keep this to 80 columns...
        CodeGenRegister::SubRegMap::const_iterator SubReg =
          SRM.find(SubRegIndices[j]);
        if (SubReg != SRM.end())
          OS << getQualifiedName(SubReg->second->TheDef);
        else
          OS << "0";
        if (j != je - 1)
          OS << ", ";
      }
      OS << "}" << (i != e ? "," : "") << "\n";
    }
    OS << "};\n\n";
    OS << "const uint16_t *get" << TargetName
       << "SubRegTable() {\n  return (const uint16_t *)" << TargetName
       << "SubRegTable;\n}\n\n";
  }

  EmitRegMappingTables(OS, Regs, false);

  // MCRegisterInfo initialization routine.
  OS << "static inline void Init" << TargetName
     << "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
     << "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0) {\n";
  OS << "  RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
     << Regs.size()+1 << ", RA, " << TargetName << "MCRegisterClasses, "
     << RegisterClasses.size() << ", " << TargetName << "RegLists, ";
  if (SubRegIndices.size() != 0)
    OS << "(uint16_t*)" << TargetName << "SubRegTable, "
       << SubRegIndices.size() << ");\n\n";
  else
    OS << "NULL, 0);\n\n";

  EmitRegMapping(OS, Regs, false);

  OS << "}\n\n";

  OS << "} // End llvm namespace \n";
  OS << "#endif // GET_REGINFO_MC_DESC\n\n";
}