/* * ??? This function really categorizes the lblock "line"... ??? * Annotates the "line" with ALWAYSMISS/ALWAYSHIT/FIRSTMISS/FIRSTHIT * In the case of FIRSTMISS, also annotate with the loop-header of the most inner loop. */ void CATBuilder::worst(LBlock *line , ContextTree *node , LBlockSet *idset, int dec){ int number = idset->count(); BasicBlock *bb = line->bb(); LBlock *cacheline; BitSet *in = new BitSet(number); in = IN(bb); //int count = 0; bool nonconflitdetected = false; bool continu = false; unsigned long tagcachline,tagline; //test if it's the lbloc which find in the same memory block /* * If the IN(line) = {LB} and cacheblock(line)==cacheblock(LB), then * nonconflict (Always Hit) */ if (in->count() == 1){ for (int i=0;i < number;i++){ if (in->contains(i)){ cacheline = idset->lblock(i); tagcachline = ((unsigned long)cacheline->address()) >> dec; unsigned long tagline = ((unsigned long)line->address()) >> dec; if (tagcachline == tagline ) nonconflitdetected = true; } } }
/** * Test if the first basic block postdominates the second one. * @param bb1 Dominator BB. * @param bb2 Dominated BB. * @return True if bb1 postdominates bb2. */ bool PostDominance::postDominates(BasicBlock *bb1, BasicBlock *bb2) { ASSERTP(bb1, "null BB 1"); ASSERTP(bb2, "null BB 2"); ASSERTP(bb1->cfg() == bb2->cfg(), "both BB are not owned by the same CFG"); int index = bb1->number(); ASSERTP(index >= 0, "no index for BB 1"); BitSet *set = REVERSE_POSTDOM(bb2); ASSERTP(set, "no index for BB 2"); ASSERTP(bb1 == bb2 || !REVERSE_POSTDOM(bb1)->contains(bb2->number()) || !REVERSE_POSTDOM(bb2)->contains(bb1->number()), "CFG with disconnected nodes"); return set->contains(index); }
/* * This function categorize the l-blocks in the ContextTree S (and its children) * * @param lineset The l-block list * @param S The root context tree * @param dec In an address, the number of bits representing the offset in a cache block. */ void CATBuilder::setCATEGORISATION(LBlockSet *lineset ,ContextTree *S ,int dec){ int size = lineset->count(); int ident; BitSet *u = new BitSet(size); LBlock *cachelin; /* * Categorize first all the l-blocks in the children ContextTree */ for(ContextTree::ChildrenIterator fils(S); fils; fils++){ setCATEGORISATION(lineset,fils,dec); } /* Now categorize the l-blocks in this ContextTree */ if(S->kind() == ContextTree::LOOP){ /* * Call worst() on each l-block of the loop. */ u = SET(S); for (int a = 0; a < size; a++){ if (u->contains(a)){ cachelin = lineset->lblock(a); worst(cachelin ,S,lineset,dec); } } } else { /* * Call worst() on each l-block of this ContextTree. */ for(ContextTree::BBIterator bk(S); bk; bk++){ for(BasicBlock::InstIter inst(bk); inst; inst++) { address_t adlbloc = inst->address(); for (LBlockSet::Iterator lbloc(*lineset); lbloc; lbloc++){ if ((adlbloc == (lbloc->address()))&&(bk == lbloc->bb())){ ident = lbloc->id(); cachelin = lineset->lblock(ident); worst(cachelin ,S , lineset,dec); } } } } } }
bool LiveRangeAllocator<VREG>::buildLivenessInfo() { if (!init()) return false; Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList; BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds()); if (!loopDone) return false; for (size_t i = graph.numBlocks(); i > 0; i--) { if (mir->shouldCancel("Build Liveness Info (main loop)")) return false; LBlock *block = graph.getBlock(i - 1); MBasicBlock *mblock = block->mir(); BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters()); if (!live) return false; liveIn[mblock->id()] = live; // Propagate liveIn from our successors to us for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) { MBasicBlock *successor = mblock->lastIns()->getSuccessor(i); // Skip backedges, as we fix them up at the loop header. if (mblock->id() < successor->id()) live->insertAll(liveIn[successor->id()]); } // Add successor phis if (mblock->successorWithPhis()) { LBlock *phiSuccessor = mblock->successorWithPhis()->lir(); for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { LPhi *phi = phiSuccessor->getPhi(j); LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor()); uint32_t reg = use->toUse()->virtualRegister(); live->insert(reg); } } // Variables are assumed alive for the entire block, a define shortens // the interval to the point of definition. for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()), outputOf(block->lastId()).next())) { return false; } } // Shorten the front end of live intervals for live variables to their // point of definition, if found. for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) { // Calls may clobber registers, so force a spill and reload around the callsite. if (ins->isCall()) { for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) { if (forLSRA) { if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins))) return false; } else { bool found = false; for (size_t i = 0; i < ins->numDefs(); i++) { if (ins->getDef(i)->isPreset() && *ins->getDef(i)->output() == LAllocation(*iter)) { found = true; break; } } if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next())) return false; } } } for (size_t i = 0; i < ins->numDefs(); i++) { if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) { LDefinition *def = ins->getDef(i); CodePosition from; if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) { // The fixed range covers the current instruction so the // interval for the virtual register starts at the next // instruction. If the next instruction has a fixed use, // this can lead to unnecessary register moves. To avoid // special handling for this, assert the next instruction // has no fixed uses. defineFixed guarantees this by inserting // an LNop. JS_ASSERT(!NextInstructionHasFixedUses(block, *ins)); AnyRegister reg = def->output()->toRegister(); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next())) return false; from = outputOf(*ins).next(); } else { from = forLSRA ? inputOf(*ins) : outputOf(*ins); } if (def->policy() == LDefinition::MUST_REUSE_INPUT) { // MUST_REUSE_INPUT is implemented by allocating an output // register and moving the input to it. Register hints are // used to avoid unnecessary moves. We give the input an // LUse::ANY policy to avoid allocating a register for the // input. LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse(); JS_ASSERT(inputUse->policy() == LUse::REGISTER); JS_ASSERT(inputUse->usedAtStart()); *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true); } LiveInterval *interval = vregs[def].getInterval(0); interval->setFrom(from); // Ensure that if there aren't any uses, there's at least // some interval for the output to go into. if (interval->numRanges() == 0) { if (!interval->addRangeAtHead(from, from.next())) return false; } live->remove(def->virtualRegister()); } } for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition *temp = ins->getTemp(i); if (temp->isBogusTemp()) continue; if (forLSRA) { if (temp->policy() == LDefinition::PRESET) { if (ins->isCall()) continue; AnyRegister reg = temp->output()->toRegister(); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins))) return false; // Fixed intervals are not added to safepoints, so do it // here. if (LSafepoint *safepoint = ins->safepoint()) AddRegisterToSafepoint(safepoint, reg, *temp); } else { JS_ASSERT(!ins->isCall()); if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins))) return false; } } else { // Normally temps are considered to cover both the input // and output of the associated instruction. In some cases // though we want to use a fixed register as both an input // and clobbered register in the instruction, so watch for // this and shorten the temp to cover only the output. CodePosition from = inputOf(*ins); if (temp->policy() == LDefinition::PRESET) { AnyRegister reg = temp->output()->toRegister(); for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) { if (alloc->isUse()) { LUse *use = alloc->toUse(); if (use->isFixedRegister()) { if (GetFixedRegister(vregs[use].def(), use) == reg) from = outputOf(*ins); } } } } CodePosition to = ins->isCall() ? outputOf(*ins) : outputOf(*ins).next(); if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to)) return false; } } DebugOnly<bool> hasUseRegister = false; DebugOnly<bool> hasUseRegisterAtStart = false; for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) { if (inputAlloc->isUse()) { LUse *use = inputAlloc->toUse(); // The first instruction, LLabel, has no uses. JS_ASSERT(inputOf(*ins) > outputOf(block->firstId())); // Call uses should always be at-start or fixed, since the fixed intervals // use all registers. JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(), use->isFixedRegister() || use->usedAtStart()); #ifdef DEBUG // Don't allow at-start call uses if there are temps of the same kind, // so that we don't assign the same register. if (ins->isCall() && use->usedAtStart()) { for (size_t i = 0; i < ins->numTemps(); i++) JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble()); } // If there are both useRegisterAtStart(x) and useRegister(y) // uses, we may assign the same register to both operands due to // interval splitting (bug 772830). Don't allow this for now. if (use->policy() == LUse::REGISTER) { if (use->usedAtStart()) { if (!IsInputReused(*ins, use)) hasUseRegisterAtStart = true; } else { hasUseRegister = true; } } JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart)); #endif // Don't treat RECOVERED_INPUT uses as keeping the vreg alive. if (use->policy() == LUse::RECOVERED_INPUT) continue; CodePosition to; if (forLSRA) { if (use->isFixedRegister()) { AnyRegister reg = GetFixedRegister(vregs[use].def(), use); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins))) return false; to = inputOf(*ins); // Fixed intervals are not added to safepoints, so do it // here. LSafepoint *safepoint = ins->safepoint(); if (!ins->isCall() && safepoint) AddRegisterToSafepoint(safepoint, reg, *vregs[use].def()); } else { to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins); } } else { to = (use->usedAtStart() || ins->isCall()) ? inputOf(*ins) : outputOf(*ins); if (use->isFixedRegister()) { LAllocation reg(AnyRegister::FromCode(use->registerCode())); for (size_t i = 0; i < ins->numDefs(); i++) { LDefinition *def = ins->getDef(i); if (def->policy() == LDefinition::PRESET && *def->output() == reg) to = inputOf(*ins); } } } LiveInterval *interval = vregs[use].getInterval(0); if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next())) return false; interval->addUse(new(alloc()) UsePosition(use, to)); live->insert(use->virtualRegister()); } } } // Phis have simultaneous assignment semantics at block begin, so at // the beginning of the block we can be sure that liveIn does not // contain any phi outputs. for (unsigned int i = 0; i < block->numPhis(); i++) { LDefinition *def = block->getPhi(i)->getDef(0); if (live->contains(def->virtualRegister())) { live->remove(def->virtualRegister()); } else { // This is a dead phi, so add a dummy range over all phis. This // can go away if we have an earlier dead code elimination pass. if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()), outputOf(block->firstId()))) { return false; } } } if (mblock->isLoopHeader()) { // A divergence from the published algorithm is required here, as // our block order does not guarantee that blocks of a loop are // contiguous. As a result, a single live interval spanning the // loop is not possible. Additionally, we require liveIn in a later // pass for resolution, so that must also be fixed up here. MBasicBlock *loopBlock = mblock->backedge(); while (true) { // Blocks must already have been visited to have a liveIn set. JS_ASSERT(loopBlock->id() >= mblock->id()); // Add an interval for this entire loop block CodePosition from = inputOf(loopBlock->lir()->firstId()); CodePosition to = outputOf(loopBlock->lir()->lastId()).next(); for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { if (!vregs[*liveRegId].getInterval(0)->addRange(from, to)) return false; } // Fix up the liveIn set to account for the new interval liveIn[loopBlock->id()]->insertAll(live); // Make sure we don't visit this node again loopDone->insert(loopBlock->id()); // If this is the loop header, any predecessors are either the // backedge or out of the loop, so skip any predecessors of // this block if (loopBlock != mblock) { for (size_t i = 0; i < loopBlock->numPredecessors(); i++) { MBasicBlock *pred = loopBlock->getPredecessor(i); if (loopDone->contains(pred->id())) continue; if (!loopWorkList.append(pred)) return false; } } // Terminate loop if out of work. if (loopWorkList.empty()) break; // Grab the next block off the work list, skipping any OSR block. while (!loopWorkList.empty()) { loopBlock = loopWorkList.popCopy(); if (loopBlock->lir() != graph.osrBlock()) break; } // If end is reached without finding a non-OSR block, then no more work items were found. if (loopBlock->lir() == graph.osrBlock()) { JS_ASSERT(loopWorkList.empty()); break; } } // Clear the done set for other loops loopDone->clear(); } JS_ASSERT_IF(!mblock->numPredecessors(), live->empty()); } validateVirtualRegisters(); // If the script has an infinite loop, there may be no MReturn and therefore // no fixed intervals. Add a small range to fixedIntervalsUnion so that the // rest of the allocator can assume it has at least one range. if (fixedIntervalsUnion->numRanges() == 0) { if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT), CodePosition(0, CodePosition::OUTPUT))) { return false; } } return true; }