예제 #1
0
// get or create value
Pointer getPointer(Module *M, const char *name, int64_t off)
{
    Value *va;

    MapVector<TestPointer, Pointer>::iterator E = valueMap.end();
    Pointer& p = valueMap[TestPointer(name, off)];

    // if the last operation increased the size of valueMap, then
    // the pointer does not exist
    if (E != valueMap.end()) {
        // use always the same llvm value
        std::map<const char *, Value *>::iterator VI = llvmValues.find(name);
        if (VI == llvmValues.end()) {
            if (strcmp(name, "null") == 0)
                va = ConstantPointerNull::get(llvm::Type::getInt32PtrTy(M->getContext()));
            else
                va = new GlobalVariable(*M, IntegerType::get(M->getContext(), 32),
                                        false,
                                        GlobalValue::CommonLinkage, 0 , name);
            llvmValues.insert(std::make_pair(name, va));
        } else {
            va = VI->second;
        }

	// copy it to map (p is reference)
        p = Pointer(va, off);
    }

    return p;
}
예제 #2
0
int IniFile::ReadAndFillTDC(){
    std::ifstream ini( sFileName.c_str() );
    std::stringstream parser;
    std::string token, value, line, group;
    iError = INI_OK;
    // Loading the file into the parser
    if( ini ){
        parser << ini.rdbuf();
        ini.close();
    } else {
        iError = INI_ERROR_CANNOT_OPEN_READ_FILE;
        return iError;
    }
    group = "";
    TdcMap mapTDC;
    MapVector mapVector;
    while( std::getline( parser, line ) && ( iError == INI_OK ) ){
            // Check if the line is comment
            if( !CheckIfComment( line ) ){
                // Check for group
                if( !CheckIfGroup( line, group ) ){
                    // Check for token
                    if( CheckIfToken( line, token, value ) ){
                        // Make the key in format group.key if the group is not empty
                        //if( group.size() > 1 ) token = group + "." + token;
                        mData[ token ] = value;
                    }   else {
                        iError = INI_ERROR_WRONG_FORMAT;
                        return iError;
                    }
                }
                else{
                  mapVector.push_back(mData);
                  mData.clear();
                }
            }
        }
    mapVector.push_back(mData);



    for(int i=3 ; i<mapVector.size() ; i++){
      TDC tempTDC;
      tempTDC.SetName(mapVector[i]["Name"]);
      //std::cout<<"TriggerWindowWidth : "<<mapVector[i]["TriggerWindowWidth"]<<std::endl;
      tempTDC.SetTriggerWindowWidth(std::stoi(mapVector[i]["TriggerWindowWidth"], nullptr,10 ));
      tempTDC.SetTriggerWindowOffset(std::stoi(mapVector[i]["TriggerWindowOffset"], nullptr,10 ));
      tempTDC.SetTriggerExtraSearchMargin(std::stoi(mapVector[i]["TriggerExtraSearchMargin"], nullptr,10 ));
      tempTDC.SetTriggerRejectMargin(std::stoi(mapVector[i]["TriggerRejectMargin"], nullptr,10 ));
      tempTDC.SetEnableTriggerTimeSubstraction(std::stoi(mapVector[i]["EnableTriggerTimeSubstraction"], nullptr,10 ));
      tempTDC.SetIndividualLSB(std::stoi(mapVector[i]["IndividualLSB"], nullptr,10 ));

      fTdcVector.push_back(tempTDC);
      //std::cout<<"Tdc-NName : "<<mapVector[i]["Name"]<<std::endl;
      //std::cout<<"TDC-NAME : "<<mapVector[i]["Name"]<<std::endl;
    }

    std::cout<<"Num of Groups in INI file : "<<mapVector.size()<<std::endl;

}
예제 #3
0
MatrixEpetraStructured<DataType>::MatrixEpetraStructured ( const MapVector<MapEpetra>& vector, int numEntries, bool ignoreNonLocalValues ) :
    MatrixEpetra<DataType> ( typename MatrixEpetra<DataType>::matrix_ptrtype() ), M_blockStructure ( vector )
{
    ASSERT ( vector.nbMap() > 0 , "Map vector empty, impossible to construct a MatrixBlockMonolithicEpetra!" );

    MapEpetra myMap ( vector.totalMap() );

    this->mapPtr().reset ( new MapEpetra ( myMap ) );
    this->matrixPtr().reset ( new typename MatrixEpetra<DataType>::matrix_type ( Copy, *myMap.map ( Unique ), numEntries, ignoreNonLocalValues ) );
}
예제 #4
0
void
MatrixBlockMonolithicEpetra<DataType>::setBlockStructure (const MapVector<MapEpetra>& mapVector)
{
    ASSERT ( mapVector.nbMap() > 0 , "Map vector empty, impossible to set the block structure");

    M_blockNumRows.resize (mapVector.nbMap() );
    M_blockNumColumns.resize (mapVector.nbMap() );

    M_blockFirstRows.resize (mapVector.nbMap() );
    M_blockFirstColumns.resize (mapVector.nbMap() );

    UInt totalSize (0);

    for (UInt i (0); i < mapVector.nbMap(); ++i)
    {
        M_blockNumRows[i] = mapVector.mapSize (i);
        M_blockNumColumns[i] = mapVector.mapSize (i);

        M_blockFirstRows[i] = totalSize;
        M_blockFirstColumns[i] = totalSize;

        totalSize += mapVector.mapSize (i);
    }

    ASSERT ( this->matrixPtr()->NumGlobalCols() == totalSize, " Incompatible block structure (global size does not match) ");
    ASSERT ( this->matrixPtr()->NumGlobalRows() == totalSize, " Incompatible block structure (global size does not match) ");
}
예제 #5
0
static void
writeIndex(MCStreamer &Out, MCSection *Section,
           ArrayRef<unsigned> ContributionOffsets,
           const MapVector<uint64_t, UnitIndexEntry> &IndexEntries) {
  if (IndexEntries.empty())
    return;

  unsigned Columns = 0;
  for (auto &C : ContributionOffsets)
    if (C)
      ++Columns;

  std::vector<unsigned> Buckets(NextPowerOf2(3 * IndexEntries.size() / 2));
  uint64_t Mask = Buckets.size() - 1;
  size_t i = 0;
  for (const auto &P : IndexEntries) {
    auto S = P.first;
    auto H = S & Mask;
    auto HP = ((S >> 32) & Mask) | 1;
    while (Buckets[H]) {
      assert(S != IndexEntries.begin()[Buckets[H] - 1].first &&
             "Duplicate unit");
      H = (H + HP) & Mask;
    }
    Buckets[H] = i + 1;
    ++i;
  }

  Out.SwitchSection(Section);
  Out.EmitIntValue(2, 4);                   // Version
  Out.EmitIntValue(Columns, 4);             // Columns
  Out.EmitIntValue(IndexEntries.size(), 4); // Num Units
  Out.EmitIntValue(Buckets.size(), 4);      // Num Buckets

  // Write the signatures.
  for (const auto &I : Buckets)
    Out.EmitIntValue(I ? IndexEntries.begin()[I - 1].first : 0, 8);

  // Write the indexes.
  for (const auto &I : Buckets)
    Out.EmitIntValue(I, 4);

  // Write the column headers (which sections will appear in the table)
  for (size_t i = 0; i != ContributionOffsets.size(); ++i)
    if (ContributionOffsets[i])
      Out.EmitIntValue(i + DW_SECT_INFO, 4);

  // Write the offsets.
  writeIndexTable(Out, ContributionOffsets, IndexEntries,
                  &DWARFUnitIndex::Entry::SectionContribution::Offset);

  // Write the lengths.
  writeIndexTable(Out, ContributionOffsets, IndexEntries,
                  &DWARFUnitIndex::Entry::SectionContribution::Length);
}
예제 #6
0
TEST(MapVectorTest, insert) {
  MapVector<int, int> MV;
  std::pair<MapVector<int, int>::iterator, bool> R;

  R = MV.insert(std::make_pair(1, 2));
  ASSERT_EQ(R.first, MV.begin());
  EXPECT_EQ(R.first->first, 1);
  EXPECT_EQ(R.first->second, 2);
  EXPECT_TRUE(R.second);

  R = MV.insert(std::make_pair(1, 3));
  ASSERT_EQ(R.first, MV.begin());
  EXPECT_EQ(R.first->first, 1);
  EXPECT_EQ(R.first->second, 2);
  EXPECT_FALSE(R.second);

  R = MV.insert(std::make_pair(4, 5));
  ASSERT_NE(R.first, MV.end());
  EXPECT_EQ(R.first->first, 4);
  EXPECT_EQ(R.first->second, 5);
  EXPECT_TRUE(R.second);

  EXPECT_EQ(MV.size(), 2u);
  EXPECT_EQ(MV[1], 2);
  EXPECT_EQ(MV[4], 5);
}
예제 #7
0
static void addAllTypesFromDWP(
    MCStreamer &Out, MapVector<uint64_t, UnitIndexEntry> &TypeIndexEntries,
    const DWARFUnitIndex &TUIndex, MCSection *OutputTypes, StringRef Types,
    const UnitIndexEntry &TUEntry, uint32_t &TypesOffset) {
  Out.SwitchSection(OutputTypes);
  for (const DWARFUnitIndex::Entry &E : TUIndex.getRows()) {
    auto *I = E.getOffsets();
    if (!I)
      continue;
    auto P = TypeIndexEntries.insert(std::make_pair(E.getSignature(), TUEntry));
    if (!P.second)
      continue;
    auto &Entry = P.first->second;
    // Zero out the debug_info contribution
    Entry.Contributions[0] = {};
    for (auto Kind : TUIndex.getColumnKinds()) {
      auto &C = Entry.Contributions[Kind - DW_SECT_INFO];
      C.Offset += I->Offset;
      C.Length = I->Length;
      ++I;
    }
    auto &C = Entry.Contributions[DW_SECT_TYPES - DW_SECT_INFO];
    Out.EmitBytes(Types.substr(
        C.Offset - TUEntry.Contributions[DW_SECT_TYPES - DW_SECT_INFO].Offset,
        C.Length));
    C.Offset = TypesOffset;
    TypesOffset += C.Length;
  }
}
예제 #8
0
static void addAllTypes(MCStreamer &Out,
                        MapVector<uint64_t, UnitIndexEntry> &TypeIndexEntries,
                        MCSection *OutputTypes,
                        const std::vector<StringRef> &TypesSections,
                        const UnitIndexEntry &CUEntry, uint32_t &TypesOffset) {
  for (StringRef Types : TypesSections) {
    Out.SwitchSection(OutputTypes);
    uint32_t Offset = 0;
    DataExtractor Data(Types, true, 0);
    while (Data.isValidOffset(Offset)) {
      UnitIndexEntry Entry = CUEntry;
      // Zero out the debug_info contribution
      Entry.Contributions[0] = {};
      auto &C = Entry.Contributions[DW_SECT_TYPES - DW_SECT_INFO];
      C.Offset = TypesOffset;
      auto PrevOffset = Offset;
      // Length of the unit, including the 4 byte length field.
      C.Length = Data.getU32(&Offset) + 4;

      Data.getU16(&Offset); // Version
      Data.getU32(&Offset); // Abbrev offset
      Data.getU8(&Offset);  // Address size
      auto Signature = Data.getU64(&Offset);
      Offset = PrevOffset + C.Length;

      auto P = TypeIndexEntries.insert(std::make_pair(Signature, Entry));
      if (!P.second)
        continue;

      Out.EmitBytes(Types.substr(PrevOffset, C.Length));
      TypesOffset += C.Length;
    }
  }
}
예제 #9
0
TEST(MapVectorTest, erase) {
  MapVector<int, int> MV;

  MV.insert(std::make_pair(1, 2));
  MV.insert(std::make_pair(3, 4));
  MV.insert(std::make_pair(5, 6));
  ASSERT_EQ(MV.size(), 3u);

  MV.erase(MV.find(1));
  ASSERT_EQ(MV.size(), 2u);
  ASSERT_EQ(MV.find(1), MV.end());
  ASSERT_EQ(MV[3], 4);
  ASSERT_EQ(MV[5], 6);
}
예제 #10
0
void
MatrixEpetraStructured<DataType>::setBlockStructure ( const MapVector<MapEpetra>& mapVector )
{
    ASSERT ( mapVector.nbMap() > 0 , "Map vector empty, impossible to set the block structure" );

    M_blockStructure.setBlockStructure ( mapVector );

    ASSERT ( this->matrixPtr()->NumGlobalCols() == M_blockStructure.numRows(), " Incompatible block structure (global size does not match) " );
    ASSERT ( this->matrixPtr()->NumGlobalRows() == M_blockStructure.numColumns(), " Incompatible block structure (global size does not match) " );
}
예제 #11
0
int DialogReaderWriter::MapMemberName(MapVector& aVector, string& aName)
{
	int wId;

	CaseValues* wCase;
	for(int i = 0; i < aVector.size(); i++)
	{
		wCase = aVector[i];
		wId = aVector[i]->isMatch(aName);
		if(wId > -1)
			return wId;
	}
	return -1;
}
예제 #12
0
파일: VectorUtils.cpp 프로젝트: MatzeB/llvm
// Analyze interleaved accesses and collect them into interleaved load and
// store groups.
//
// When generating code for an interleaved load group, we effectively hoist all
// loads in the group to the location of the first load in program order. When
// generating code for an interleaved store group, we sink all stores to the
// location of the last store. This code motion can change the order of load
// and store instructions and may break dependences.
//
// The code generation strategy mentioned above ensures that we won't violate
// any write-after-read (WAR) dependences.
//
// E.g., for the WAR dependence:  a = A[i];      // (1)
//                                A[i] = b;      // (2)
//
// The store group of (2) is always inserted at or below (2), and the load
// group of (1) is always inserted at or above (1). Thus, the instructions will
// never be reordered. All other dependences are checked to ensure the
// correctness of the instruction reordering.
//
// The algorithm visits all memory accesses in the loop in bottom-up program
// order. Program order is established by traversing the blocks in the loop in
// reverse postorder when collecting the accesses.
//
// We visit the memory accesses in bottom-up order because it can simplify the
// construction of store groups in the presence of write-after-write (WAW)
// dependences.
//
// E.g., for the WAW dependence:  A[i] = a;      // (1)
//                                A[i] = b;      // (2)
//                                A[i + 1] = c;  // (3)
//
// We will first create a store group with (3) and (2). (1) can't be added to
// this group because it and (2) are dependent. However, (1) can be grouped
// with other accesses that may precede it in program order. Note that a
// bottom-up order does not imply that WAW dependences should not be checked.
void InterleavedAccessInfo::analyzeInterleaving(
                                 bool EnablePredicatedInterleavedMemAccesses) {
  LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
  const ValueToValueMap &Strides = LAI->getSymbolicStrides();

  // Holds all accesses with a constant stride.
  MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
  collectConstStrideAccesses(AccessStrideInfo, Strides);

  if (AccessStrideInfo.empty())
    return;

  // Collect the dependences in the loop.
  collectDependences();

  // Holds all interleaved store groups temporarily.
  SmallSetVector<InterleaveGroup *, 4> StoreGroups;
  // Holds all interleaved load groups temporarily.
  SmallSetVector<InterleaveGroup *, 4> LoadGroups;

  // Search in bottom-up program order for pairs of accesses (A and B) that can
  // form interleaved load or store groups. In the algorithm below, access A
  // precedes access B in program order. We initialize a group for B in the
  // outer loop of the algorithm, and then in the inner loop, we attempt to
  // insert each A into B's group if:
  //
  //  1. A and B have the same stride,
  //  2. A and B have the same memory object size, and
  //  3. A belongs in B's group according to its distance from B.
  //
  // Special care is taken to ensure group formation will not break any
  // dependences.
  for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
       BI != E; ++BI) {
    Instruction *B = BI->first;
    StrideDescriptor DesB = BI->second;

    // Initialize a group for B if it has an allowable stride. Even if we don't
    // create a group for B, we continue with the bottom-up algorithm to ensure
    // we don't break any of B's dependences.
    InterleaveGroup *Group = nullptr;
    if (isStrided(DesB.Stride) && 
        (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
      Group = getInterleaveGroup(B);
      if (!Group) {
        LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
                          << '\n');
        Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
      }
      if (B->mayWriteToMemory())
        StoreGroups.insert(Group);
      else
        LoadGroups.insert(Group);
    }

    for (auto AI = std::next(BI); AI != E; ++AI) {
      Instruction *A = AI->first;
      StrideDescriptor DesA = AI->second;

      // Our code motion strategy implies that we can't have dependences
      // between accesses in an interleaved group and other accesses located
      // between the first and last member of the group. Note that this also
      // means that a group can't have more than one member at a given offset.
      // The accesses in a group can have dependences with other accesses, but
      // we must ensure we don't extend the boundaries of the group such that
      // we encompass those dependent accesses.
      //
      // For example, assume we have the sequence of accesses shown below in a
      // stride-2 loop:
      //
      //  (1, 2) is a group | A[i]   = a;  // (1)
      //                    | A[i-1] = b;  // (2) |
      //                      A[i-3] = c;  // (3)
      //                      A[i]   = d;  // (4) | (2, 4) is not a group
      //
      // Because accesses (2) and (3) are dependent, we can group (2) with (1)
      // but not with (4). If we did, the dependent access (3) would be within
      // the boundaries of the (2, 4) group.
      if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
        // If a dependence exists and A is already in a group, we know that A
        // must be a store since A precedes B and WAR dependences are allowed.
        // Thus, A would be sunk below B. We release A's group to prevent this
        // illegal code motion. A will then be free to form another group with
        // instructions that precede it.
        if (isInterleaved(A)) {
          InterleaveGroup *StoreGroup = getInterleaveGroup(A);
          StoreGroups.remove(StoreGroup);
          releaseGroup(StoreGroup);
        }

        // If a dependence exists and A is not already in a group (or it was
        // and we just released it), B might be hoisted above A (if B is a
        // load) or another store might be sunk below A (if B is a store). In
        // either case, we can't add additional instructions to B's group. B
        // will only form a group with instructions that it precedes.
        break;
      }

      // At this point, we've checked for illegal code motion. If either A or B
      // isn't strided, there's nothing left to do.
      if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
        continue;

      // Ignore A if it's already in a group or isn't the same kind of memory
      // operation as B.
      // Note that mayReadFromMemory() isn't mutually exclusive to
      // mayWriteToMemory in the case of atomic loads. We shouldn't see those
      // here, canVectorizeMemory() should have returned false - except for the
      // case we asked for optimization remarks.
      if (isInterleaved(A) ||
          (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
          (A->mayWriteToMemory() != B->mayWriteToMemory()))
        continue;

      // Check rules 1 and 2. Ignore A if its stride or size is different from
      // that of B.
      if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
        continue;

      // Ignore A if the memory object of A and B don't belong to the same
      // address space
      if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
        continue;

      // Calculate the distance from A to B.
      const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
          PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
      if (!DistToB)
        continue;
      int64_t DistanceToB = DistToB->getAPInt().getSExtValue();

      // Check rule 3. Ignore A if its distance to B is not a multiple of the
      // size.
      if (DistanceToB % static_cast<int64_t>(DesB.Size))
        continue;

      // All members of a predicated interleave-group must have the same predicate,
      // and currently must reside in the same BB.
      BasicBlock *BlockA = A->getParent();  
      BasicBlock *BlockB = B->getParent();  
      if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
          (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
        continue;

      // The index of A is the index of B plus A's distance to B in multiples
      // of the size.
      int IndexA =
          Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);

      // Try to insert A into B's group.
      if (Group->insertMember(A, IndexA, DesA.Align)) {
        LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
                          << "    into the interleave group with" << *B
                          << '\n');
        InterleaveGroupMap[A] = Group;

        // Set the first load in program order as the insert position.
        if (A->mayReadFromMemory())
          Group->setInsertPos(A);
      }
    } // Iteration over A accesses.
  }   // Iteration over B accesses.

  // Remove interleaved store groups with gaps.
  for (InterleaveGroup *Group : StoreGroups)
    if (Group->getNumMembers() != Group->getFactor()) {
      LLVM_DEBUG(
          dbgs() << "LV: Invalidate candidate interleaved store group due "
                    "to gaps.\n");
      releaseGroup(Group);
    }
  // Remove interleaved groups with gaps (currently only loads) whose memory
  // accesses may wrap around. We have to revisit the getPtrStride analysis,
  // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
  // not check wrapping (see documentation there).
  // FORNOW we use Assume=false;
  // TODO: Change to Assume=true but making sure we don't exceed the threshold
  // of runtime SCEV assumptions checks (thereby potentially failing to
  // vectorize altogether).
  // Additional optional optimizations:
  // TODO: If we are peeling the loop and we know that the first pointer doesn't
  // wrap then we can deduce that all pointers in the group don't wrap.
  // This means that we can forcefully peel the loop in order to only have to
  // check the first pointer for no-wrap. When we'll change to use Assume=true
  // we'll only need at most one runtime check per interleaved group.
  for (InterleaveGroup *Group : LoadGroups) {
    // Case 1: A full group. Can Skip the checks; For full groups, if the wide
    // load would wrap around the address space we would do a memory access at
    // nullptr even without the transformation.
    if (Group->getNumMembers() == Group->getFactor())
      continue;

    // Case 2: If first and last members of the group don't wrap this implies
    // that all the pointers in the group don't wrap.
    // So we check only group member 0 (which is always guaranteed to exist),
    // and group member Factor - 1; If the latter doesn't exist we rely on
    // peeling (if it is a non-reveresed accsess -- see Case 3).
    Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
    if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
                      /*ShouldCheckWrap=*/true)) {
      LLVM_DEBUG(
          dbgs() << "LV: Invalidate candidate interleaved group due to "
                    "first group member potentially pointer-wrapping.\n");
      releaseGroup(Group);
      continue;
    }
    Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
    if (LastMember) {
      Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
      if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
                        /*ShouldCheckWrap=*/true)) {
        LLVM_DEBUG(
            dbgs() << "LV: Invalidate candidate interleaved group due to "
                      "last group member potentially pointer-wrapping.\n");
        releaseGroup(Group);
      }
    } else {
      // Case 3: A non-reversed interleaved load group with gaps: We need
      // to execute at least one scalar epilogue iteration. This will ensure
      // we don't speculatively access memory out-of-bounds. We only need
      // to look for a member at index factor - 1, since every group must have
      // a member at index zero.
      if (Group->isReverse()) {
        LLVM_DEBUG(
            dbgs() << "LV: Invalidate candidate interleaved group due to "
                      "a reverse access with gaps.\n");
        releaseGroup(Group);
        continue;
      }
      LLVM_DEBUG(
          dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
      RequiresScalarEpilogue = true;
    }
  }
}
static void
computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
                       const Function &F, BlockFrequencyInfo *BFI,
                       ProfileSummaryInfo *PSI, bool HasLocalsInUsed,
                       DenseSet<GlobalValue::GUID> &CantBePromoted) {
  // Summary not currently supported for anonymous functions, they should
  // have been named.
  assert(F.hasName());

  unsigned NumInsts = 0;
  // Map from callee ValueId to profile count. Used to accumulate profile
  // counts for all static calls to a given callee.
  MapVector<ValueInfo, CalleeInfo> CallGraphEdges;
  SetVector<ValueInfo> RefEdges;
  SetVector<GlobalValue::GUID> TypeTests;
  ICallPromotionAnalysis ICallAnalysis;

  bool HasInlineAsmMaybeReferencingInternal = false;
  SmallPtrSet<const User *, 8> Visited;
  for (const BasicBlock &BB : F)
    for (const Instruction &I : BB) {
      if (isa<DbgInfoIntrinsic>(I))
        continue;
      ++NumInsts;
      findRefEdges(&I, RefEdges, Visited);
      auto CS = ImmutableCallSite(&I);
      if (!CS)
        continue;

      const auto *CI = dyn_cast<CallInst>(&I);
      // Since we don't know exactly which local values are referenced in inline
      // assembly, conservatively mark the function as possibly referencing
      // a local value from inline assembly to ensure we don't export a
      // reference (which would require renaming and promotion of the
      // referenced value).
      if (HasLocalsInUsed && CI && CI->isInlineAsm())
        HasInlineAsmMaybeReferencingInternal = true;

      auto *CalledValue = CS.getCalledValue();
      auto *CalledFunction = CS.getCalledFunction();
      // Check if this is an alias to a function. If so, get the
      // called aliasee for the checks below.
      if (auto *GA = dyn_cast<GlobalAlias>(CalledValue)) {
        assert(!CalledFunction && "Expected null called function in callsite for alias");
        CalledFunction = dyn_cast<Function>(GA->getBaseObject());
      }
      // Check if this is a direct call to a known function or a known
      // intrinsic, or an indirect call with profile data.
      if (CalledFunction) {
        if (CalledFunction->isIntrinsic()) {
          if (CalledFunction->getIntrinsicID() != Intrinsic::type_test)
            continue;
          // Produce a summary from type.test intrinsics. We only summarize
          // type.test intrinsics that are used other than by an llvm.assume
          // intrinsic. Intrinsics that are assumed are relevant only to the
          // devirtualization pass, not the type test lowering pass.
          bool HasNonAssumeUses = llvm::any_of(CI->uses(), [](const Use &CIU) {
            auto *AssumeCI = dyn_cast<CallInst>(CIU.getUser());
            if (!AssumeCI)
              return true;
            Function *F = AssumeCI->getCalledFunction();
            return !F || F->getIntrinsicID() != Intrinsic::assume;
          });
          if (HasNonAssumeUses) {
            auto *TypeMDVal = cast<MetadataAsValue>(CI->getArgOperand(1));
            if (auto *TypeId = dyn_cast<MDString>(TypeMDVal->getMetadata()))
              TypeTests.insert(GlobalValue::getGUID(TypeId->getString()));
          }
        }
        // We should have named any anonymous globals
        assert(CalledFunction->hasName());
        auto ScaledCount = BFI ? BFI->getBlockProfileCount(&BB) : None;
        auto Hotness = ScaledCount ? getHotness(ScaledCount.getValue(), PSI)
                                   : CalleeInfo::HotnessType::Unknown;

        // Use the original CalledValue, in case it was an alias. We want
        // to record the call edge to the alias in that case. Eventually
        // an alias summary will be created to associate the alias and
        // aliasee.
        CallGraphEdges[cast<GlobalValue>(CalledValue)].updateHotness(Hotness);
      } else {
        // Skip inline assembly calls.
        if (CI && CI->isInlineAsm())
          continue;
        // Skip direct calls.
        if (!CS.getCalledValue() || isa<Constant>(CS.getCalledValue()))
          continue;

        uint32_t NumVals, NumCandidates;
        uint64_t TotalCount;
        auto CandidateProfileData =
            ICallAnalysis.getPromotionCandidatesForInstruction(
                &I, NumVals, TotalCount, NumCandidates);
        for (auto &Candidate : CandidateProfileData)
          CallGraphEdges[Candidate.Value].updateHotness(
              getHotness(Candidate.Count, PSI));
      }
    }

  bool NonRenamableLocal = isNonRenamableLocal(F);
  bool NotEligibleForImport =
      NonRenamableLocal || HasInlineAsmMaybeReferencingInternal ||
      // Inliner doesn't handle variadic functions.
      // FIXME: refactor this to use the same code that inliner is using.
      F.isVarArg();
  GlobalValueSummary::GVFlags Flags(F.getLinkage(), NotEligibleForImport,
                                    /* LiveRoot = */ false);
  auto FuncSummary = llvm::make_unique<FunctionSummary>(
      Flags, NumInsts, RefEdges.takeVector(), CallGraphEdges.takeVector(),
      TypeTests.takeVector());
  if (NonRenamableLocal)
    CantBePromoted.insert(F.getGUID());
  Index.addGlobalValueSummary(F.getName(), std::move(FuncSummary));
}
예제 #14
0
static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
                              std::unique_ptr<raw_pwrite_stream> OS,
                              std::string SampleProfile) {
  StringMap<std::map<GlobalValue::GUID, GlobalValueSummary *>>
      ModuleToDefinedGVSummaries;
  CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);

  // We can simply import the values mentioned in the combined index, since
  // we should only invoke this using the individual indexes written out
  // via a WriteIndexesThinBackend.
  FunctionImporter::ImportMapTy ImportList;
  for (auto &GlobalList : *CombinedIndex) {
    auto GUID = GlobalList.first;
    assert(GlobalList.second.size() == 1 &&
           "Expected individual combined index to have one summary per GUID");
    auto &Summary = GlobalList.second[0];
    // Skip the summaries for the importing module. These are included to
    // e.g. record required linkage changes.
    if (Summary->modulePath() == M->getModuleIdentifier())
      continue;
    // Doesn't matter what value we plug in to the map, just needs an entry
    // to provoke importing by thinBackend.
    ImportList[Summary->modulePath()][GUID] = 1;
  }

  std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
  MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;

  for (auto &I : ImportList) {
    ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
        llvm::MemoryBuffer::getFile(I.first());
    if (!MBOrErr) {
      errs() << "Error loading imported file '" << I.first()
             << "': " << MBOrErr.getError().message() << "\n";
      return;
    }

    Expected<std::vector<BitcodeModule>> BMsOrErr =
        getBitcodeModuleList(**MBOrErr);
    if (!BMsOrErr) {
      handleAllErrors(BMsOrErr.takeError(), [&](ErrorInfoBase &EIB) {
        errs() << "Error loading imported file '" << I.first()
               << "': " << EIB.message() << '\n';
      });
      return;
    }

    // The bitcode file may contain multiple modules, we want the one with a
    // summary.
    bool FoundModule = false;
    for (BitcodeModule &BM : *BMsOrErr) {
      Expected<bool> HasSummary = BM.hasSummary();
      if (HasSummary && *HasSummary) {
        ModuleMap.insert({I.first(), BM});
        FoundModule = true;
        break;
      }
    }
    if (!FoundModule) {
      errs() << "Error loading imported file '" << I.first()
             << "': Could not find module summary\n";
      return;
    }

    OwnedImports.push_back(std::move(*MBOrErr));
  }
  auto AddStream = [&](size_t Task) {
    return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
  };
  lto::Config Conf;
  Conf.SampleProfile = SampleProfile;
  if (Error E = thinBackend(
          Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
          ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
    handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
      errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
    });
  }
}
예제 #15
0
static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
                              const HeaderSearchOptions &HeaderOpts,
                              const CodeGenOptions &CGOpts,
                              const clang::TargetOptions &TOpts,
                              const LangOptions &LOpts,
                              std::unique_ptr<raw_pwrite_stream> OS,
                              std::string SampleProfile,
                              BackendAction Action) {
  StringMap<DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
      ModuleToDefinedGVSummaries;
  CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);

  setCommandLineOpts(CGOpts);

  // We can simply import the values mentioned in the combined index, since
  // we should only invoke this using the individual indexes written out
  // via a WriteIndexesThinBackend.
  FunctionImporter::ImportMapTy ImportList;
  for (auto &GlobalList : *CombinedIndex) {
    // Ignore entries for undefined references.
    if (GlobalList.second.SummaryList.empty())
      continue;

    auto GUID = GlobalList.first;
    assert(GlobalList.second.SummaryList.size() == 1 &&
           "Expected individual combined index to have one summary per GUID");
    auto &Summary = GlobalList.second.SummaryList[0];
    // Skip the summaries for the importing module. These are included to
    // e.g. record required linkage changes.
    if (Summary->modulePath() == M->getModuleIdentifier())
      continue;
    // Doesn't matter what value we plug in to the map, just needs an entry
    // to provoke importing by thinBackend.
    ImportList[Summary->modulePath()][GUID] = 1;
  }

  std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
  MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;

  for (auto &I : ImportList) {
    ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
        llvm::MemoryBuffer::getFile(I.first());
    if (!MBOrErr) {
      errs() << "Error loading imported file '" << I.first()
             << "': " << MBOrErr.getError().message() << "\n";
      return;
    }

    Expected<BitcodeModule> BMOrErr = FindThinLTOModule(**MBOrErr);
    if (!BMOrErr) {
      handleAllErrors(BMOrErr.takeError(), [&](ErrorInfoBase &EIB) {
        errs() << "Error loading imported file '" << I.first()
               << "': " << EIB.message() << '\n';
      });
      return;
    }
    ModuleMap.insert({I.first(), *BMOrErr});

    OwnedImports.push_back(std::move(*MBOrErr));
  }
  auto AddStream = [&](size_t Task) {
    return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
  };
  lto::Config Conf;
  Conf.CPU = TOpts.CPU;
  Conf.CodeModel = getCodeModel(CGOpts);
  Conf.MAttrs = TOpts.Features;
  Conf.RelocModel = getRelocModel(CGOpts);
  Conf.CGOptLevel = getCGOptLevel(CGOpts);
  initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
  Conf.SampleProfile = std::move(SampleProfile);
  Conf.UseNewPM = CGOpts.ExperimentalNewPassManager;
  switch (Action) {
  case Backend_EmitNothing:
    Conf.PreCodeGenModuleHook = [](size_t Task, const Module &Mod) {
      return false;
    };
    break;
  case Backend_EmitLL:
    Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
      M->print(*OS, nullptr, CGOpts.EmitLLVMUseLists);
      return false;
    };
    break;
  case Backend_EmitBC:
    Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
      WriteBitcodeToFile(M, *OS, CGOpts.EmitLLVMUseLists);
      return false;
    };
    break;
  default:
    Conf.CGFileType = getCodeGenFileType(Action);
    break;
  }
  if (Error E = thinBackend(
          Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
          ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
    handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
      errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
    });
  }
}
예제 #16
0
MatrixBlockMonolithicEpetra<DataType>::MatrixBlockMonolithicEpetra ( const MapVector<MapEpetra>& vector, int numEntries)
    :
    MatrixEpetra<DataType> (vector.totalMap() )
    //MatrixEpetra<DataType>( typename MatrixEpetra<DataType>::matrix_ptrtype())
{
    ASSERT ( vector.nbMap() > 0 , "Map vector empty, impossible to construct a MatrixBlockMonolithicEpetra!");

    MapEpetra myMap (vector.map (0) );
    M_blockNumRows.push_back (vector.mapSize (0) );
    M_blockNumColumns.push_back (vector.mapSize (0) );
    M_blockFirstRows.push_back (0);
    M_blockFirstColumns.push_back (0);

    UInt totalRows (vector.mapSize (0) );
    UInt totalColumns (vector.mapSize (0) );

    for (UInt i (1); i < vector.nbMap(); ++i)
    {
        myMap += vector.map (i);
        M_blockNumRows.push_back (vector.mapSize (i) );
        M_blockNumColumns.push_back (vector.mapSize (i) );
        M_blockFirstRows.push_back (totalRows);
        M_blockFirstColumns.push_back (totalColumns);

        totalRows += vector.mapSize (i);
        totalColumns += vector.mapSize (i);
    }

    this->mapPtr().reset (new MapEpetra (myMap) );
    this->matrixPtr().reset ( new typename MatrixEpetra<DataType>::matrix_type ( Copy, *myMap.map ( Unique ), numEntries, false) );
}
예제 #17
0
static void
computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
                       const Function &F, BlockFrequencyInfo *BFI,
                       ProfileSummaryInfo *PSI, bool HasLocalsInUsedOrAsm,
                       DenseSet<GlobalValue::GUID> &CantBePromoted) {
  // Summary not currently supported for anonymous functions, they should
  // have been named.
  assert(F.hasName());

  unsigned NumInsts = 0;
  // Map from callee ValueId to profile count. Used to accumulate profile
  // counts for all static calls to a given callee.
  MapVector<ValueInfo, CalleeInfo> CallGraphEdges;
  SetVector<ValueInfo> RefEdges;
  SetVector<GlobalValue::GUID> TypeTests;
  SetVector<FunctionSummary::VFuncId> TypeTestAssumeVCalls,
      TypeCheckedLoadVCalls;
  SetVector<FunctionSummary::ConstVCall> TypeTestAssumeConstVCalls,
      TypeCheckedLoadConstVCalls;
  ICallPromotionAnalysis ICallAnalysis;
  SmallPtrSet<const User *, 8> Visited;

  // Add personality function, prefix data and prologue data to function's ref
  // list.
  findRefEdges(Index, &F, RefEdges, Visited);

  bool HasInlineAsmMaybeReferencingInternal = false;
  for (const BasicBlock &BB : F)
    for (const Instruction &I : BB) {
      if (isa<DbgInfoIntrinsic>(I))
        continue;
      ++NumInsts;
      findRefEdges(Index, &I, RefEdges, Visited);
      auto CS = ImmutableCallSite(&I);
      if (!CS)
        continue;

      const auto *CI = dyn_cast<CallInst>(&I);
      // Since we don't know exactly which local values are referenced in inline
      // assembly, conservatively mark the function as possibly referencing
      // a local value from inline assembly to ensure we don't export a
      // reference (which would require renaming and promotion of the
      // referenced value).
      if (HasLocalsInUsedOrAsm && CI && CI->isInlineAsm())
        HasInlineAsmMaybeReferencingInternal = true;

      auto *CalledValue = CS.getCalledValue();
      auto *CalledFunction = CS.getCalledFunction();
      if (CalledValue && !CalledFunction) {
        CalledValue = CalledValue->stripPointerCastsNoFollowAliases();
        // Stripping pointer casts can reveal a called function.
        CalledFunction = dyn_cast<Function>(CalledValue);
      }
      // Check if this is an alias to a function. If so, get the
      // called aliasee for the checks below.
      if (auto *GA = dyn_cast<GlobalAlias>(CalledValue)) {
        assert(!CalledFunction && "Expected null called function in callsite for alias");
        CalledFunction = dyn_cast<Function>(GA->getBaseObject());
      }
      // Check if this is a direct call to a known function or a known
      // intrinsic, or an indirect call with profile data.
      if (CalledFunction) {
        if (CI && CalledFunction->isIntrinsic()) {
          addIntrinsicToSummary(
              CI, TypeTests, TypeTestAssumeVCalls, TypeCheckedLoadVCalls,
              TypeTestAssumeConstVCalls, TypeCheckedLoadConstVCalls);
          continue;
        }
        // We should have named any anonymous globals
        assert(CalledFunction->hasName());
        auto ScaledCount = PSI->getProfileCount(&I, BFI);
        auto Hotness = ScaledCount ? getHotness(ScaledCount.getValue(), PSI)
                                   : CalleeInfo::HotnessType::Unknown;
        if (ForceSummaryEdgesCold != FunctionSummary::FSHT_None)
          Hotness = CalleeInfo::HotnessType::Cold;

        // Use the original CalledValue, in case it was an alias. We want
        // to record the call edge to the alias in that case. Eventually
        // an alias summary will be created to associate the alias and
        // aliasee.
        auto &ValueInfo = CallGraphEdges[Index.getOrInsertValueInfo(
            cast<GlobalValue>(CalledValue))];
        ValueInfo.updateHotness(Hotness);
        // Add the relative block frequency to CalleeInfo if there is no profile
        // information.
        if (BFI != nullptr && Hotness == CalleeInfo::HotnessType::Unknown) {
          uint64_t BBFreq = BFI->getBlockFreq(&BB).getFrequency();
          uint64_t EntryFreq = BFI->getEntryFreq();
          ValueInfo.updateRelBlockFreq(BBFreq, EntryFreq);
        }
      } else {
        // Skip inline assembly calls.
        if (CI && CI->isInlineAsm())
          continue;
        // Skip direct calls.
        if (!CalledValue || isa<Constant>(CalledValue))
          continue;

        // Check if the instruction has a callees metadata. If so, add callees
        // to CallGraphEdges to reflect the references from the metadata, and
        // to enable importing for subsequent indirect call promotion and
        // inlining.
        if (auto *MD = I.getMetadata(LLVMContext::MD_callees)) {
          for (auto &Op : MD->operands()) {
            Function *Callee = mdconst::extract_or_null<Function>(Op);
            if (Callee)
              CallGraphEdges[Index.getOrInsertValueInfo(Callee)];
          }
        }

        uint32_t NumVals, NumCandidates;
        uint64_t TotalCount;
        auto CandidateProfileData =
            ICallAnalysis.getPromotionCandidatesForInstruction(
                &I, NumVals, TotalCount, NumCandidates);
        for (auto &Candidate : CandidateProfileData)
          CallGraphEdges[Index.getOrInsertValueInfo(Candidate.Value)]
              .updateHotness(getHotness(Candidate.Count, PSI));
      }
    }

  // Explicit add hot edges to enforce importing for designated GUIDs for
  // sample PGO, to enable the same inlines as the profiled optimized binary.
  for (auto &I : F.getImportGUIDs())
    CallGraphEdges[Index.getOrInsertValueInfo(I)].updateHotness(
        ForceSummaryEdgesCold == FunctionSummary::FSHT_All
            ? CalleeInfo::HotnessType::Cold
            : CalleeInfo::HotnessType::Critical);

  bool NonRenamableLocal = isNonRenamableLocal(F);
  bool NotEligibleForImport =
      NonRenamableLocal || HasInlineAsmMaybeReferencingInternal ||
      // Inliner doesn't handle variadic functions.
      // FIXME: refactor this to use the same code that inliner is using.
      F.isVarArg() ||
      // Don't try to import functions with noinline attribute.
      F.getAttributes().hasFnAttribute(Attribute::NoInline);
  GlobalValueSummary::GVFlags Flags(F.getLinkage(), NotEligibleForImport,
                                    /* Live = */ false, F.isDSOLocal());
  FunctionSummary::FFlags FunFlags{
      F.hasFnAttribute(Attribute::ReadNone),
      F.hasFnAttribute(Attribute::ReadOnly),
      F.hasFnAttribute(Attribute::NoRecurse),
      F.returnDoesNotAlias(),
  };
  auto FuncSummary = llvm::make_unique<FunctionSummary>(
      Flags, NumInsts, FunFlags, RefEdges.takeVector(),
      CallGraphEdges.takeVector(), TypeTests.takeVector(),
      TypeTestAssumeVCalls.takeVector(), TypeCheckedLoadVCalls.takeVector(),
      TypeTestAssumeConstVCalls.takeVector(),
      TypeCheckedLoadConstVCalls.takeVector());
  if (NonRenamableLocal)
    CantBePromoted.insert(F.getGUID());
  Index.addGlobalValueSummary(F.getName(), std::move(FuncSummary));
}
예제 #18
0
static std::error_code write(MCStreamer &Out, ArrayRef<std::string> Inputs) {
  const auto &MCOFI = *Out.getContext().getObjectFileInfo();
  MCSection *const StrSection = MCOFI.getDwarfStrDWOSection();
  MCSection *const StrOffsetSection = MCOFI.getDwarfStrOffDWOSection();
  MCSection *const TypesSection = MCOFI.getDwarfTypesDWOSection();
  MCSection *const CUIndexSection = MCOFI.getDwarfCUIndexSection();
  MCSection *const TUIndexSection = MCOFI.getDwarfTUIndexSection();
  const StringMap<std::pair<MCSection *, DWARFSectionKind>> KnownSections = {
      {"debug_info.dwo", {MCOFI.getDwarfInfoDWOSection(), DW_SECT_INFO}},
      {"debug_types.dwo", {MCOFI.getDwarfTypesDWOSection(), DW_SECT_TYPES}},
      {"debug_str_offsets.dwo", {StrOffsetSection, DW_SECT_STR_OFFSETS}},
      {"debug_str.dwo", {StrSection, static_cast<DWARFSectionKind>(0)}},
      {"debug_loc.dwo", {MCOFI.getDwarfLocDWOSection(), DW_SECT_LOC}},
      {"debug_line.dwo", {MCOFI.getDwarfLineDWOSection(), DW_SECT_LINE}},
      {"debug_abbrev.dwo", {MCOFI.getDwarfAbbrevDWOSection(), DW_SECT_ABBREV}},
      {"debug_cu_index", {CUIndexSection, static_cast<DWARFSectionKind>(0)}},
      {"debug_tu_index", {TUIndexSection, static_cast<DWARFSectionKind>(0)}}};

  MapVector<uint64_t, UnitIndexEntry> IndexEntries;
  MapVector<uint64_t, UnitIndexEntry> TypeIndexEntries;

  StringMap<uint32_t> Strings;
  uint32_t StringOffset = 0;

  uint32_t ContributionOffsets[8] = {};

  for (const auto &Input : Inputs) {
    auto ErrOrObj = object::ObjectFile::createObjectFile(Input);
    if (!ErrOrObj)
      return errorToErrorCode(ErrOrObj.takeError());

    UnitIndexEntry CurEntry = {};

    StringRef CurStrSection;
    StringRef CurStrOffsetSection;
    std::vector<StringRef> CurTypesSection;
    StringRef InfoSection;
    StringRef AbbrevSection;
    StringRef CurCUIndexSection;
    StringRef CurTUIndexSection;

    SmallVector<SmallString<32>, 4> UncompressedSections;

    for (const auto &Section : ErrOrObj->getBinary()->sections()) {
      if (Section.isBSS())
        continue;
      if (Section.isVirtual())
        continue;

      StringRef Name;
      if (std::error_code Err = Section.getName(Name))
        return Err;

      Name = Name.substr(Name.find_first_not_of("._"));

      StringRef Contents;
      if (auto Err = Section.getContents(Contents))
        return Err;

      if (Name.startswith("zdebug_")) {
        uint64_t OriginalSize;
        if (!zlib::isAvailable() ||
            !consumeCompressedDebugSectionHeader(Contents, OriginalSize))
          return make_error_code(std::errc::invalid_argument);
        UncompressedSections.resize(UncompressedSections.size() + 1);
        if (zlib::uncompress(Contents, UncompressedSections.back(), OriginalSize) !=
            zlib::StatusOK) {
          UncompressedSections.pop_back();
          continue;
        }
        Name = Name.substr(1);
        Contents = UncompressedSections.back();
      }

      auto SectionPair = KnownSections.find(Name);
      if (SectionPair == KnownSections.end())
        continue;

      if (DWARFSectionKind Kind = SectionPair->second.second) {
        auto Index = Kind - DW_SECT_INFO;
        if (Kind != DW_SECT_TYPES) {
          CurEntry.Contributions[Index].Offset = ContributionOffsets[Index];
          ContributionOffsets[Index] +=
              (CurEntry.Contributions[Index].Length = Contents.size());
        }

        switch (Kind) {
        case DW_SECT_INFO:
          InfoSection = Contents;
          break;
        case DW_SECT_ABBREV:
          AbbrevSection = Contents;
          break;
        default:
          break;
        }
      }

      MCSection *OutSection = SectionPair->second.first;
      if (OutSection == StrOffsetSection)
        CurStrOffsetSection = Contents;
      else if (OutSection == StrSection)
        CurStrSection = Contents;
      else if (OutSection == TypesSection)
        CurTypesSection.push_back(Contents);
      else if (OutSection == CUIndexSection)
        CurCUIndexSection = Contents;
      else if (OutSection == TUIndexSection)
        CurTUIndexSection = Contents;
      else {
        Out.SwitchSection(OutSection);
        Out.EmitBytes(Contents);
      }
    }

    if (InfoSection.empty())
      continue;

    if (!CurCUIndexSection.empty()) {
      DWARFUnitIndex CUIndex(DW_SECT_INFO);
      DataExtractor CUIndexData(CurCUIndexSection,
                                ErrOrObj->getBinary()->isLittleEndian(), 0);
      if (!CUIndex.parse(CUIndexData))
        return make_error_code(std::errc::invalid_argument);

      for (const DWARFUnitIndex::Entry &E : CUIndex.getRows()) {
        auto *I = E.getOffsets();
        if (!I)
          continue;
        auto P =
            IndexEntries.insert(std::make_pair(E.getSignature(), CurEntry));
        CompileUnitIdentifiers ID = getCUIdentifiers(
            getSubsection(AbbrevSection, E, DW_SECT_ABBREV),
            getSubsection(InfoSection, E, DW_SECT_INFO),
            getSubsection(CurStrOffsetSection, E, DW_SECT_STR_OFFSETS),
            CurStrSection);
        if (!P.second) {
          printDuplicateError(*P.first, ID, Input);
          return make_error_code(std::errc::invalid_argument);
        }
        auto &NewEntry = P.first->second;
        NewEntry.Name = ID.Name;
        NewEntry.DWOName = ID.DWOName;
        NewEntry.DWPName = Input;
        for (auto Kind : CUIndex.getColumnKinds()) {
          auto &C = NewEntry.Contributions[Kind - DW_SECT_INFO];
          C.Offset += I->Offset;
          C.Length = I->Length;
          ++I;
        }
      }

      if (!CurTypesSection.empty()) {
        assert(CurTypesSection.size() == 1);
        if (CurTUIndexSection.empty())
          return make_error_code(std::errc::invalid_argument);
        DWARFUnitIndex TUIndex(DW_SECT_TYPES);
        DataExtractor TUIndexData(CurTUIndexSection,
                                  ErrOrObj->getBinary()->isLittleEndian(), 0);
        if (!TUIndex.parse(TUIndexData))
          return make_error_code(std::errc::invalid_argument);
        addAllTypesFromDWP(Out, TypeIndexEntries, TUIndex, TypesSection,
                           CurTypesSection.front(), CurEntry,
                           ContributionOffsets[DW_SECT_TYPES - DW_SECT_INFO]);
      }
    } else {
      CompileUnitIdentifiers ID = getCUIdentifiers(
          AbbrevSection, InfoSection, CurStrOffsetSection, CurStrSection);
      auto P = IndexEntries.insert(std::make_pair(ID.Signature, CurEntry));
      if (!P.second) {
        printDuplicateError(*P.first, ID, "");
        return make_error_code(std::errc::invalid_argument);
      }
      P.first->second.Name = ID.Name;
      P.first->second.DWOName = ID.DWOName;
      addAllTypes(Out, TypeIndexEntries, TypesSection, CurTypesSection,
                  CurEntry, ContributionOffsets[DW_SECT_TYPES - DW_SECT_INFO]);
    }

    if (auto Err = writeStringsAndOffsets(Out, Strings, StringOffset,
                                          StrSection, StrOffsetSection,
                                          CurStrSection, CurStrOffsetSection))
      return Err;
  }

  // Lie about there being no info contributions so the TU index only includes
  // the type unit contribution
  ContributionOffsets[0] = 0;
  writeIndex(Out, MCOFI.getDwarfTUIndexSection(), ContributionOffsets,
             TypeIndexEntries);

  // Lie about the type contribution
  ContributionOffsets[DW_SECT_TYPES - DW_SECT_INFO] = 0;
  // Unlie about the info contribution
  ContributionOffsets[0] = 1;

  writeIndex(Out, MCOFI.getDwarfCUIndexSection(), ContributionOffsets,
             IndexEntries);

  return std::error_code();
}
예제 #19
0
TEST(MapVectorTest, remove_if) {
  MapVector<int, int> MV;

  MV.insert(std::make_pair(1, 11));
  MV.insert(std::make_pair(2, 12));
  MV.insert(std::make_pair(3, 13));
  MV.insert(std::make_pair(4, 14));
  MV.insert(std::make_pair(5, 15));
  MV.insert(std::make_pair(6, 16));
  ASSERT_EQ(MV.size(), 6u);

  MV.remove_if([](const std::pair<int, int> &Val) { return Val.second % 2; });
  ASSERT_EQ(MV.size(), 3u);
  ASSERT_EQ(MV.find(1), MV.end());
  ASSERT_EQ(MV.find(3), MV.end());
  ASSERT_EQ(MV.find(5), MV.end());
  ASSERT_EQ(MV[2], 12);
  ASSERT_EQ(MV[4], 14);
  ASSERT_EQ(MV[6], 16);
}