static void writeIndex(MCStreamer &Out, MCSection *Section, ArrayRef<unsigned> ContributionOffsets, const MapVector<uint64_t, UnitIndexEntry> &IndexEntries) { if (IndexEntries.empty()) return; unsigned Columns = 0; for (auto &C : ContributionOffsets) if (C) ++Columns; std::vector<unsigned> Buckets(NextPowerOf2(3 * IndexEntries.size() / 2)); uint64_t Mask = Buckets.size() - 1; size_t i = 0; for (const auto &P : IndexEntries) { auto S = P.first; auto H = S & Mask; auto HP = ((S >> 32) & Mask) | 1; while (Buckets[H]) { assert(S != IndexEntries.begin()[Buckets[H] - 1].first && "Duplicate unit"); H = (H + HP) & Mask; } Buckets[H] = i + 1; ++i; } Out.SwitchSection(Section); Out.EmitIntValue(2, 4); // Version Out.EmitIntValue(Columns, 4); // Columns Out.EmitIntValue(IndexEntries.size(), 4); // Num Units Out.EmitIntValue(Buckets.size(), 4); // Num Buckets // Write the signatures. for (const auto &I : Buckets) Out.EmitIntValue(I ? IndexEntries.begin()[I - 1].first : 0, 8); // Write the indexes. for (const auto &I : Buckets) Out.EmitIntValue(I, 4); // Write the column headers (which sections will appear in the table) for (size_t i = 0; i != ContributionOffsets.size(); ++i) if (ContributionOffsets[i]) Out.EmitIntValue(i + DW_SECT_INFO, 4); // Write the offsets. writeIndexTable(Out, ContributionOffsets, IndexEntries, &DWARFUnitIndex::Entry::SectionContribution::Offset); // Write the lengths. writeIndexTable(Out, ContributionOffsets, IndexEntries, &DWARFUnitIndex::Entry::SectionContribution::Length); }
// Analyze interleaved accesses and collect them into interleaved load and // store groups. // // When generating code for an interleaved load group, we effectively hoist all // loads in the group to the location of the first load in program order. When // generating code for an interleaved store group, we sink all stores to the // location of the last store. This code motion can change the order of load // and store instructions and may break dependences. // // The code generation strategy mentioned above ensures that we won't violate // any write-after-read (WAR) dependences. // // E.g., for the WAR dependence: a = A[i]; // (1) // A[i] = b; // (2) // // The store group of (2) is always inserted at or below (2), and the load // group of (1) is always inserted at or above (1). Thus, the instructions will // never be reordered. All other dependences are checked to ensure the // correctness of the instruction reordering. // // The algorithm visits all memory accesses in the loop in bottom-up program // order. Program order is established by traversing the blocks in the loop in // reverse postorder when collecting the accesses. // // We visit the memory accesses in bottom-up order because it can simplify the // construction of store groups in the presence of write-after-write (WAW) // dependences. // // E.g., for the WAW dependence: A[i] = a; // (1) // A[i] = b; // (2) // A[i + 1] = c; // (3) // // We will first create a store group with (3) and (2). (1) can't be added to // this group because it and (2) are dependent. However, (1) can be grouped // with other accesses that may precede it in program order. Note that a // bottom-up order does not imply that WAW dependences should not be checked. void InterleavedAccessInfo::analyzeInterleaving( bool EnablePredicatedInterleavedMemAccesses) { LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); const ValueToValueMap &Strides = LAI->getSymbolicStrides(); // Holds all accesses with a constant stride. MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; collectConstStrideAccesses(AccessStrideInfo, Strides); if (AccessStrideInfo.empty()) return; // Collect the dependences in the loop. collectDependences(); // Holds all interleaved store groups temporarily. SmallSetVector<InterleaveGroup *, 4> StoreGroups; // Holds all interleaved load groups temporarily. SmallSetVector<InterleaveGroup *, 4> LoadGroups; // Search in bottom-up program order for pairs of accesses (A and B) that can // form interleaved load or store groups. In the algorithm below, access A // precedes access B in program order. We initialize a group for B in the // outer loop of the algorithm, and then in the inner loop, we attempt to // insert each A into B's group if: // // 1. A and B have the same stride, // 2. A and B have the same memory object size, and // 3. A belongs in B's group according to its distance from B. // // Special care is taken to ensure group formation will not break any // dependences. for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); BI != E; ++BI) { Instruction *B = BI->first; StrideDescriptor DesB = BI->second; // Initialize a group for B if it has an allowable stride. Even if we don't // create a group for B, we continue with the bottom-up algorithm to ensure // we don't break any of B's dependences. InterleaveGroup *Group = nullptr; if (isStrided(DesB.Stride) && (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { Group = getInterleaveGroup(B); if (!Group) { LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); } if (B->mayWriteToMemory()) StoreGroups.insert(Group); else LoadGroups.insert(Group); } for (auto AI = std::next(BI); AI != E; ++AI) { Instruction *A = AI->first; StrideDescriptor DesA = AI->second; // Our code motion strategy implies that we can't have dependences // between accesses in an interleaved group and other accesses located // between the first and last member of the group. Note that this also // means that a group can't have more than one member at a given offset. // The accesses in a group can have dependences with other accesses, but // we must ensure we don't extend the boundaries of the group such that // we encompass those dependent accesses. // // For example, assume we have the sequence of accesses shown below in a // stride-2 loop: // // (1, 2) is a group | A[i] = a; // (1) // | A[i-1] = b; // (2) | // A[i-3] = c; // (3) // A[i] = d; // (4) | (2, 4) is not a group // // Because accesses (2) and (3) are dependent, we can group (2) with (1) // but not with (4). If we did, the dependent access (3) would be within // the boundaries of the (2, 4) group. if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { // If a dependence exists and A is already in a group, we know that A // must be a store since A precedes B and WAR dependences are allowed. // Thus, A would be sunk below B. We release A's group to prevent this // illegal code motion. A will then be free to form another group with // instructions that precede it. if (isInterleaved(A)) { InterleaveGroup *StoreGroup = getInterleaveGroup(A); StoreGroups.remove(StoreGroup); releaseGroup(StoreGroup); } // If a dependence exists and A is not already in a group (or it was // and we just released it), B might be hoisted above A (if B is a // load) or another store might be sunk below A (if B is a store). In // either case, we can't add additional instructions to B's group. B // will only form a group with instructions that it precedes. break; } // At this point, we've checked for illegal code motion. If either A or B // isn't strided, there's nothing left to do. if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) continue; // Ignore A if it's already in a group or isn't the same kind of memory // operation as B. // Note that mayReadFromMemory() isn't mutually exclusive to // mayWriteToMemory in the case of atomic loads. We shouldn't see those // here, canVectorizeMemory() should have returned false - except for the // case we asked for optimization remarks. if (isInterleaved(A) || (A->mayReadFromMemory() != B->mayReadFromMemory()) || (A->mayWriteToMemory() != B->mayWriteToMemory())) continue; // Check rules 1 and 2. Ignore A if its stride or size is different from // that of B. if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) continue; // Ignore A if the memory object of A and B don't belong to the same // address space if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) continue; // Calculate the distance from A to B. const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); if (!DistToB) continue; int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); // Check rule 3. Ignore A if its distance to B is not a multiple of the // size. if (DistanceToB % static_cast<int64_t>(DesB.Size)) continue; // All members of a predicated interleave-group must have the same predicate, // and currently must reside in the same BB. BasicBlock *BlockA = A->getParent(); BasicBlock *BlockB = B->getParent(); if ((isPredicated(BlockA) || isPredicated(BlockB)) && (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) continue; // The index of A is the index of B plus A's distance to B in multiples // of the size. int IndexA = Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); // Try to insert A into B's group. if (Group->insertMember(A, IndexA, DesA.Align)) { LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' << " into the interleave group with" << *B << '\n'); InterleaveGroupMap[A] = Group; // Set the first load in program order as the insert position. if (A->mayReadFromMemory()) Group->setInsertPos(A); } } // Iteration over A accesses. } // Iteration over B accesses. // Remove interleaved store groups with gaps. for (InterleaveGroup *Group : StoreGroups) if (Group->getNumMembers() != Group->getFactor()) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved store group due " "to gaps.\n"); releaseGroup(Group); } // Remove interleaved groups with gaps (currently only loads) whose memory // accesses may wrap around. We have to revisit the getPtrStride analysis, // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does // not check wrapping (see documentation there). // FORNOW we use Assume=false; // TODO: Change to Assume=true but making sure we don't exceed the threshold // of runtime SCEV assumptions checks (thereby potentially failing to // vectorize altogether). // Additional optional optimizations: // TODO: If we are peeling the loop and we know that the first pointer doesn't // wrap then we can deduce that all pointers in the group don't wrap. // This means that we can forcefully peel the loop in order to only have to // check the first pointer for no-wrap. When we'll change to use Assume=true // we'll only need at most one runtime check per interleaved group. for (InterleaveGroup *Group : LoadGroups) { // Case 1: A full group. Can Skip the checks; For full groups, if the wide // load would wrap around the address space we would do a memory access at // nullptr even without the transformation. if (Group->getNumMembers() == Group->getFactor()) continue; // Case 2: If first and last members of the group don't wrap this implies // that all the pointers in the group don't wrap. // So we check only group member 0 (which is always guaranteed to exist), // and group member Factor - 1; If the latter doesn't exist we rely on // peeling (if it is a non-reveresed accsess -- see Case 3). Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved group due to " "first group member potentially pointer-wrapping.\n"); releaseGroup(Group); continue; } Instruction *LastMember = Group->getMember(Group->getFactor() - 1); if (LastMember) { Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, /*ShouldCheckWrap=*/true)) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved group due to " "last group member potentially pointer-wrapping.\n"); releaseGroup(Group); } } else { // Case 3: A non-reversed interleaved load group with gaps: We need // to execute at least one scalar epilogue iteration. This will ensure // we don't speculatively access memory out-of-bounds. We only need // to look for a member at index factor - 1, since every group must have // a member at index zero. if (Group->isReverse()) { LLVM_DEBUG( dbgs() << "LV: Invalidate candidate interleaved group due to " "a reverse access with gaps.\n"); releaseGroup(Group); continue; } LLVM_DEBUG( dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); RequiresScalarEpilogue = true; } } }