void MemMap::sinkStores(StoreList& stores) { // sink dead stores into exit edges that occur between the dead store and the // next store StoreList::reverse_iterator it, end; for (it = stores.rbegin(), end = stores.rend(); it != end; ++it) { IRInstruction* store = it->first; if (store->getId() != DEAD) { continue; } std::vector<IRInstruction*>::iterator i, e; for (i = it->second.begin(), e = it->second.end(); i != e; ++i) { IRInstruction* guard = *i; IRInstruction* clone = store->clone(factory); if (store->getDst() != NULL) { factory->getSSATmp(clone); } guard->getLabel()->getParent()->prependInstruction(clone); } // StRefs cannot just be removed, they have to be converted into Movs // as the destination of the StRef still has the DecRef attached to it. if (store->getOpcode() == StRef || store->getOpcode() == StRefNT) { store->setOpcode(Mov); store->setSrc(1, NULL); store->setNumSrcs(1); store->setId(LIVE); } } }
bool BoUpSLP::vectorizeStores(StoreList &Stores, int costThreshold) { ValueSet Heads, Tails; SmallDenseMap<Value*, Value*> ConsecutiveChain; bool Changed = false; // Do a quadratic search on all of the given stores and find // all of the pairs of loads that follow each other. for (unsigned i = 0, e = Stores.size(); i < e; ++i) for (unsigned j = 0; j < e; ++j) { if (i == j) continue; if (isConsecutiveAccess(Stores[i], Stores[j])) { Tails.insert(Stores[j]); Heads.insert(Stores[i]); ConsecutiveChain[Stores[i]] = Stores[j]; } } // For stores that start but don't end a link in the chain: for (ValueSet::iterator it = Heads.begin(), e = Heads.end();it != e; ++it) { if (Tails.count(*it)) continue; // We found a store instr that starts a chain. Now follow the chain and try // to vectorize it. ValueList Operands; Value *I = *it; int MinCost = 0, MinVF = 0; while (Tails.count(I) || Heads.count(I)) { Operands.push_back(I); unsigned VF = Operands.size(); if (isPowerOf2_32(VF) && VF > 1) { int cost = getTreeRollCost(Operands, 0); DEBUG(dbgs() << "Found cost=" << cost << " for VF=" << VF << "\n"); if (cost < MinCost) { MinCost = cost; MinVF = VF; } } // Move to the next value in the chain. I = ConsecutiveChain[I]; } if (MinCost <= costThreshold && MinVF > 1) { DEBUG(dbgs() << "Decided to vectorize cost=" << MinCost << "\n"); vectorizeTree(Operands, MinVF); Stores.clear(); // The current numbering is invalid because we added and removed instrs. numberInstructions(); Changed = true; } } return Changed; }
void MemMap::sinkStores(StoreList& stores) { // sink dead stores into exit edges that occur between the dead store and the // next store StoreList::reverse_iterator it, end; for (it = stores.rbegin(), end = stores.rend(); it != end; ++it) { IRInstruction* store = it->first; if (isLive(store)) continue; for (IRInstruction* guard : it->second) { Block* exit = guard->getTaken(); exit->prepend(store->clone(m_factory)); } // StRefs cannot just be removed, they have to be converted into Movs // as the destination of the StRef still has the DecRef attached to it. if (store->getOpcode() == StRef || store->getOpcode() == StRefNT) { store->setOpcode(Mov); store->setSrc(1, nullptr); store->setNumSrcs(1); setLive(*store, true); } } }
void MemMap::optimizeMemoryAccesses(Trace* trace) { StoreList tracking; for (IRInstruction* inst : trace->getInstructionList()) { // initialize each instruction as live inst->setId(LIVE); int offset = -1; Opcode op = inst->getOpcode(); if (isLoad(op)) { if (op == LdProp) { offset = inst->getSrc(1)->getConstValAsInt(); } optimizeLoad(inst, offset); } else if (isStore(op)) { if (op == StProp || op == StPropNT) { offset = inst->getSrc(1)->getConstValAsInt(); } // if we see a store, first check if its last available access is a store // if it is, then the last access is a dead store IRInstruction* access = getLastAccess(inst->getSrc(0), offset); if (access != NULL && isStore(access->getOpcode())) { // if a dead St* is followed by a St*NT, then the second store needs to // now write in the type because the first store will be removed if (access->getOpcode() == StProp && op == StPropNT) { inst->setOpcode(StProp); } else if (access->getOpcode() == StLoc && op == StLocNT) { inst->setOpcode(StLoc); } else if (access->getOpcode() == StRef && op == StRefNT) { inst->setOpcode(StRef); } access->setId(DEAD); } // start tracking the current store tracking.push_back(std::make_pair(inst, std::vector<IRInstruction*>())); } else if (inst->mayRaiseError()) { // if the function has an exit edge that we don't know anything about // (raising an error), then all stores we're currently tracking need to // be erased. all stores already declared dead are untouched StoreList::iterator it, end; for (it = tracking.begin(), end = tracking.end(); it != end; ) { StoreList::iterator copy = it; ++it; if (copy->first->getId() != DEAD) { // XXX: t1779667 tracking.erase(copy); } } } // if the current instruction is guarded, make sure all of our stores that // are not yet dead know about it if (inst->getLabel() != NULL) { for (auto& entry : tracking) { if (entry.first->getId() != DEAD) { entry.second.push_back(inst); } } } Simplifier::copyProp(inst); processInstruction(inst); } sinkStores(tracking); // kill the dead stores removeDeadInstructions(trace); }
yarp::os::Bottle& BottleImpl::addList() { StoreList *lst = new StoreList(); add(lst); return lst->internal(); }
void MemMap::optimizeMemoryAccesses(Trace* trace) { if (hasInternalFlow(trace)) { // This algorithm only works with linear traces. // TODO t2066994: reset state after each block, at least. return; } StoreList tracking; const Func* curFunc = nullptr; for (Block* block : trace->getBlocks()) { for (IRInstruction& inst : *block) { if (inst.getOpcode() == Marker) { curFunc = inst.getExtra<Marker>()->func; } // initialize each instruction as live setLive(inst, true); int offset = -1; Opcode op = inst.getOpcode(); if (isLoad(op)) { if (op == LdProp) { offset = inst.getSrc(1)->getValInt(); } // initialize each instruction as live setLive(inst, true); optimizeLoad(&inst, offset); } else if (isStore(op)) { if (op == StProp || op == StPropNT) { offset = inst.getSrc(1)->getValInt(); } // if we see a store, first check if its last available access is a store // if it is, then the last access is a dead store auto access = inst.getOpcode() == StLoc || inst.getOpcode() == StLocNT ? lastLocalAccess(inst.getExtra<LocalId>()->locId) : getLastAccess(inst.getSrc(0), offset); if (access && isStore(access->getOpcode())) { // if a dead St* is followed by a St*NT, then the second store needs to // now write in the type because the first store will be removed if (access->getOpcode() == StProp && op == StPropNT) { inst.setOpcode(StProp); } else if (access->getOpcode() == StLoc && op == StLocNT) { inst.setOpcode(StLoc); } else if (access->getOpcode() == StRef && op == StRefNT) { inst.setOpcode(StRef); } setLive(*access, false); } // start tracking the current store tracking.push_back(std::make_pair(&inst, std::vector<IRInstruction*>())); } else if (inst.mayRaiseError()) { // if the function has an exit edge that we don't know anything about // (raising an error), then all stores we're currently tracking need to // be erased. all stores already declared dead are untouched StoreList::iterator it, end; for (it = tracking.begin(), end = tracking.end(); it != end; ) { StoreList::iterator copy = it; ++it; if (isLive(copy->first)) { // XXX: t1779667 tracking.erase(copy); } } } // if the current instruction is guarded, make sure all of our stores that // are not yet dead know about it if (inst.getTaken()) { for (auto& entry : tracking) { if (isLive(entry.first)) { entry.second.push_back(&inst); } } } Simplifier::copyProp(&inst); processInstruction(&inst, curFunc && curFunc->isPseudoMain()); } } sinkStores(tracking); // kill the dead stores removeDeadInstructions(trace, m_liveInsts); }