//_________________________________________________________________________________________________ void CallingConventionClient::layoutAuxilaryOpnds(Inst::OpndRole role, OpndKind kindForStackArgs) { StlVector<CallingConvention::OpndInfo> & infos = getInfos(role); StlVector<StackOpndInfo> & stackOpndInfos = getStackOpndInfos(role); U_32 slotSize=sizeof(POINTER_SIZE_INT); U_32 regArgCount=0, stackArgCount=0; Inst::Opnds opnds(ownerInst, Inst::OpndRole_Auxilary|role); Inst::Opnds::iterator handledOpnds=opnds.begin(); for (U_32 i=0, n=(U_32)infos.size(); i<n; i++){ const CallingConvention::OpndInfo& info=infos[i]; #ifdef _DEBUG bool eachSlotRequiresOpnd=false; #endif U_32 offset=0; for (U_32 j=0, cbCurrent=0; j<info.slotCount; j++){ Opnd * opnd=opnds.getOpnd(handledOpnds); OpndSize sz=opnd->getSize(); U_32 cb=getByteSize(sz); RegName r=(RegName)info.slots[j]; if (info.isReg) { r=Constraint::getAliasRegName(r,sz); assert(r!=RegName_Null); #ifdef _DEBUG eachSlotRequiresOpnd=true; #endif cbCurrent+=getByteSize(getRegSize(r)); }else{ if (cbCurrent==0) offset=(info.slots[j] & 0xffff)*slotSize; cbCurrent+=slotSize; } if (cbCurrent>=cb){ if (info.isReg){ ownerInst->setConstraint(handledOpnds, r); regArgCount++; }else{ ownerInst->setConstraint(handledOpnds, Constraint(kindForStackArgs, sz)); stackArgCount++; StackOpndInfo sainfo={ handledOpnds, offset }; stackOpndInfos.push_back(sainfo); } handledOpnds = opnds.next(handledOpnds); #ifdef _DEBUG eachSlotRequiresOpnd=false; #endif cbCurrent=0; } #ifdef _DEBUG assert(!eachSlotRequiresOpnd); #endif } } if (stackArgCount>0) sort(stackOpndInfos.begin(), stackOpndInfos.end()); assert(handledOpnds == opnds.end()); assert(stackArgCount==stackOpndInfos.size()); }
//_________________________________________________________________________________________________ void DCE::runImpl() { bool early = false; getArg("early", early); if (early && !irManager->getCGFlags()->earlyDCEOn) { return; } irManager->updateLivenessInfo(); irManager->calculateOpndStatistics(); BitSet ls(irManager->getMemoryManager(), irManager->getOpndCount()); const Nodes& nodes = irManager->getFlowGraph()->getNodesPostOrder(); #ifdef ORDER MemoryManager mm("dce_parents"); U_32 opndCount = irManager->getOpndCount(); bool * isParentOpnd = new(mm) bool [opndCount]; memset(isParentOpnd, 0, sizeof(bool) * opndCount); for (Nodes::const_iterator it = nodes.begin(),end = nodes.end(); it!=end; ++it) { Node* node = *it; for (Inst * inst=(Inst*)node->getLastInst(); inst!=NULL; inst=inst->getPrevInst()) { Opnd* load_obj = inst->getParentObjectLoad(); if (load_obj) { isParentOpnd[load_obj->getId()] = true; } Opnd* store_obj = inst->getParentObjectStore(); if (store_obj) { isParentOpnd[store_obj->getId()] = true; } } } #endif for (Nodes::const_iterator it = nodes.begin(),end = nodes.end(); it!=end; ++it) { Node* node = *it; if (node->isBlockNode()) { //Here we'll try to remove redundant branches that could appear after //branch translations. All such branches are supposed to be conditional. Inst * inst = (Inst *)node->getLastInst(); if(inst && node->getOutEdges().size() > 1) { Edges edges = node->getOutEdges(); for (Edges::const_iterator ite1 = ++edges.begin(), end = edges.end(); ite1 != end; ++ite1) { for (Edges::const_iterator ite2 = edges.begin(); ite1 != ite2; ++ite2) { Edge *edge1 = *ite1; Edge *edge2 = *ite2; assert(edge1 != edge2); //If this condition is satisfied then there are at least two branches with //the same destination if (edge1->getTargetNode() == edge2->getTargetNode()) { //Check that edges are conditional and the last instruction is branch, //the other situations are not permitted at the moment assert(inst->hasKind(Inst::Kind_BranchInst)); assert(edge1->getKind() == Edge::Kind_True || edge1->getKind() == Edge::Kind_False); assert(edge2->getKind() == Edge::Kind_True || edge2->getKind() == Edge::Kind_False); //Remove last instruction if it is a branch inst->unlink(); irManager->getFlowGraph()->removeEdge(edge2); } } } } irManager->getLiveAtExit(node, ls); for (Inst * inst=(Inst*)node->getLastInst(), * prevInst=NULL; inst!=NULL; inst=prevInst) { prevInst=inst->getPrevInst(); // Prevent debug traps or instructions with side effects // like (MOVS) from being removed. bool deadInst=!inst->hasSideEffect() && (inst->getMnemonic() != Mnemonic_INT3); #ifdef ORDER //yzm for (unsigned int i = 0 ; i < inst->getOpndCount() ; i ++) { Opnd* opnd = inst->getOpnd(i); if (isParentOpnd[opnd->getId()]) deadInst = false; } #endif if (deadInst) { if (inst->hasKind(Inst::Kind_CopyPseudoInst)) { Opnd * opnd=inst->getOpnd(1); if (opnd->getType()->isFP() && opnd->getDefiningInst()!=NULL && opnd->getDefiningInst()->getMnemonic()==Mnemonic_CALL) { deadInst=false; } } if (deadInst) { Inst::Opnds opnds(inst, Inst::OpndRole_All); for (Inst::Opnds::iterator ito = opnds.begin(); ito != opnds.end(); ito = opnds.next(ito)) { Opnd * opnd = inst->getOpnd(ito); if ((ls.getBit(opnd->getId()) && (inst->getOpndRoles(ito) & Inst::OpndRole_Def)) || (((opnd->getMemOpndKind()&(MemOpndKind_Heap|MemOpndKind_StackManualLayout))!=0) && (inst->getMnemonic() != Mnemonic_LEA))) { deadInst=false; break; } } } } if (deadInst) { inst->unlink(); } else { irManager->updateLiveness(inst, ls); } } irManager->getLiveAtEntry(node)->copyFrom(ls); } } irManager->eliminateSameOpndMoves(); irManager->getFlowGraph()->purgeEmptyNodes(); irManager->getFlowGraph()->mergeAdjacentNodes(true, false); irManager->getFlowGraph()->purgeUnreachableNodes(); irManager->packOpnds(); irManager->invalidateLivenessInfo(); }
//___________________________________________________________________________________________________ void EarlyPropagation::runImpl() { irManager->updateLoopInfo(); U_32 opndCount=irManager->getOpndCount(); MemoryManager mm("early_prop"); OpndInfo * opndInfos = new(mm) OpndInfo[opndCount]; Node * currentLoopHeader = NULL; bool anyInstHandled=false; LoopTree* lt = irManager->getFlowGraph()->getLoopTree(); const Nodes& postOrdered = irManager->getFlowGraph()->getNodesPostOrder(); for (Nodes::const_reverse_iterator it = postOrdered.rbegin(), end = postOrdered.rend(); it!=end; ++it) { Node * node=*it; if (!node->isBlockNode()) { continue; } Node * loopHeader = lt->getLoopHeader(node, false); if (currentLoopHeader != loopHeader){ currentLoopHeader = loopHeader; for (U_32 i = 0; i < opndCount; ++i) if (opndInfos[i].sourceOpndId != EmptyUint32) opndInfos[i].defCount++; } for (Inst * inst = (Inst*)node->getFirstInst(); inst != NULL; inst=inst->getNextInst()){ bool assignedOpndPropagated = false; Inst::Opnds opnds(inst, Inst::OpndRole_All); for (Inst::Opnds::iterator it = opnds.begin(); it != opnds.end(); it = opnds.next(it)){ Opnd * opnd=inst->getOpnd(it); U_32 roles=inst->getOpndRoles(it); U_32 opndId = opnd->getId(); OpndInfo& opndInfo = opndInfos[opndId]; U_32 mask = 0; if (roles & Inst::OpndRole_Def){ ++opndInfo.defCount; }else if (roles & Inst::OpndRole_Use){ if (opndInfo.sourceOpndId != EmptyUint32){ if (opndInfo.sourceOpndDefCountAtCopy < opndInfos[opndInfo.sourceOpndId].defCount) opndInfo.sourceOpndId = EmptyUint32; else{ Opnd * srcOpnd = irManager->getOpnd(opndInfo.sourceOpndId); Constraint co = srcOpnd->getConstraint(Opnd::ConstraintKind_Location); if (co.getKind() == OpndKind_Mem){ mask = (1<<it)-1; if ((roles & Inst::OpndRole_Explicit) == 0 || inst->hasKind(Inst::Kind_PseudoInst) || irManager->isGCSafePoint(inst) || opndInfo.sourceInst != inst->getPrevInst() || assignedOpndPropagated || (inst->getConstraint(it, mask, co.getSize())&co).isNull() ) opndInfo.sourceOpndId = EmptyUint32; assignedOpndPropagated = true; } } } } if (opndInfo.defCount > 1){ opndInfo.sourceOpndId = EmptyUint32; } } /* Here is the previous version to test whether the inst is copy or not. bool isCopy = inst->getMnemonic() == Mnemonic_MOV ||( (inst->getMnemonic() == Mnemonic_ADD || inst->getMnemonic() == Mnemonic_SUB) && inst->getOpnd(3)->isPlacedIn(OpndKind_Imm) && inst->getOpnd(3)->getImmValue()==0 && inst->getOpnd(3)->getRuntimeInfo()==NULL ); It considered special case of 'dst = src +/- 0' as copy. In fact there are more similar cases like 'IMUL src, 1 ; shift src, 0' etc. Such checks are obsolete now, Should as peephole takes care about such copies. Anyway, the code above had a bug: 'inst->getOpnd(3)' crashes in instructions in native form (like ADD def_use, use). */ const bool isCopy = inst->getMnemonic() == Mnemonic_MOV; if (isCopy){ // CopyPseudoInst or mov Opnd * defOpnd = inst->getOpnd(0); Opnd * srcOpnd = inst->getOpnd(1); U_32 defOpndId = defOpnd->getId(); OpndInfo * opndInfo = opndInfos + defOpndId; bool instHandled=false; bool typeConvOk = isTypeConversionAllowed(srcOpnd, defOpnd); if (typeConvOk && opndInfo->defCount == 1 && ! srcOpnd->isPlacedIn(OpndKind_Reg)){ if (!defOpnd->hasAssignedPhysicalLocation()){ opndInfo->sourceInst = inst; opndInfo->sourceOpndId = srcOpnd->getId(); instHandled=true; } } if (instHandled){ if (opndInfos[opndInfo->sourceOpndId].sourceOpndId != EmptyUint32) opndInfo->sourceOpndId = opndInfos[opndInfo->sourceOpndId].sourceOpndId; opndInfo->sourceOpndDefCountAtCopy = opndInfos[opndInfo->sourceOpndId].defCount; anyInstHandled=true; } } } } if (anyInstHandled){ Opnd ** replacements = new(mm) Opnd* [opndCount]; memset(replacements, 0, sizeof(Opnd*) * opndCount); bool hasReplacements = false; for (U_32 i = 0; i < opndCount; ++i){ if (opndInfos[i].sourceOpndId != EmptyUint32){ Inst * inst = opndInfos[i].sourceInst; if (inst !=NULL){ inst->unlink(); } if (opndInfos[i].sourceOpndId != i){ Opnd* origOpnd= irManager->getOpnd(i); Opnd* replacementOpnd = irManager->getOpnd(opndInfos[i].sourceOpndId); assert(isTypeConversionAllowed(replacementOpnd, origOpnd)); if (origOpnd->getType()->isUnmanagedPtr() && replacementOpnd->getType()->isInteger()) { replacementOpnd->setType(origOpnd->getType()); }/* else if (origOpnd->getType()->isObject() && replacementOpnd->getType()->isUnmanagedPtr()) { replacementOpnd->setType(origOpnd->getType()); }*/ replacements[i] = replacementOpnd; hasReplacements = true; } } } if (hasReplacements){ const Nodes& postOrdered = irManager->getFlowGraph()->getNodesPostOrder(); for (Nodes::const_reverse_iterator it = postOrdered.rbegin(), end = postOrdered.rend(); it!=end; ++it) { Node * node=*it; if (!node->isBlockNode()) { continue; } for (Inst * inst = (Inst*)node->getFirstInst(); inst != NULL; inst=inst->getNextInst()){ inst->replaceOpnds(replacements); } } } } }
PeepHoleOpt::Changed PeepHoleOpt::handleInst(Inst* inst) { PeepHoleOpt::Changed temp; // Local propagation Inst::Opnds opnds(inst, Inst::OpndRole_All); for (Inst::Opnds::iterator it=opnds.begin();it != opnds.end();it = opnds.next(it)) { Opnd * opnd=inst->getOpnd(it); U_32 roles=inst->getOpndRoles(it); if (roles & Inst::OpndRole_Use) { if ((roles & Inst::OpndRole_All & Inst::OpndRole_FromEncoder) && (roles & Inst::OpndRole_All & Inst::OpndRole_ForIterator) && (roles & Inst::OpndRole_Changeable) && ((roles & Inst::OpndRole_Def) == 0) && copyMap->has(opnd)) { if (opnd->getType()->isUnmanagedPtr() && (*copyMap)[opnd]->getType()->isInteger()) (*copyMap)[opnd]->setType(opnd->getType()); inst->setOpnd(it, (*copyMap)[opnd]); } } } for (Inst::Opnds::iterator it = opnds.begin();it != opnds.end();it = opnds.next(it)) { Opnd * opnd=inst->getOpnd(it); U_32 roles=inst->getOpndRoles(it); if (roles & Inst::OpndRole_Def) { if (copyMap->has(opnd)) { if (Log::isEnabled()) Log::out()<<"copy relation DELETED: " << opnd->getFirstId() << "<=" << (*copyMap)[opnd]->getFirstId() <<std::endl; copyMap->erase(opnd); } tempSet->clear(); for(StlHashMap<Opnd*, Opnd*>::iterator iter=copyMap->begin(); iter!=copyMap->end();++iter) if (iter->second == opnd) { if (Log::isEnabled()) Log::out()<<"copy relation DELETED: " << iter->first->getFirstId() << "<=" << iter->second->getFirstId() <<std::endl; tempSet->insert(iter->first); } for(StlSet<Opnd*>::iterator iter=tempSet->begin(); iter!=tempSet->end();++iter) copyMap->erase(*iter); } } if (inst->getMnemonic() == Mnemonic_MOV) { Inst::Opnds opnds(inst, Inst::OpndRole_All); Opnd * dst = NULL; Opnd * src = NULL; U_32 counterDef = 0; U_32 counterUse = 0; for (Inst::Opnds::iterator it=opnds.begin();it!=opnds.end();it=opnds.next(it)) { Opnd * opnd = inst->getOpnd(it); U_32 roles = inst->getOpndRoles(it); if (roles & Inst::OpndRole_Def) { counterDef++; dst = opnd; } else if (roles & Inst::OpndRole_Use) { counterUse++; src = opnd; } } if ((counterDef == 1) && (counterUse == 1) && (!dst->hasAssignedPhysicalLocation())) { bool kindsAreOk = true; if(src->canBePlacedIn(OpndKind_FPReg) || dst->canBePlacedIn(OpndKind_FPReg)) { Constraint srcConstr = src->getConstraint(Opnd::ConstraintKind_Calculated); Constraint dstConstr = dst->getConstraint(Opnd::ConstraintKind_Calculated); kindsAreOk = ! (srcConstr&dstConstr).isNull(); } bool typeConvOk = src->getSize() == dst->getSize() && isTypeConversionAllowed(src, dst); if (typeConvOk && kindsAreOk && ! src->isPlacedIn(OpndKind_Reg)) { if (copyMap->has(src)) { (*copyMap)[dst] = (*copyMap)[src]; if (Log::isEnabled()) Log::out()<<"copy relation INSERTED: " << dst->getFirstId() << "<=" << (*copyMap)[src]->getFirstId() <<std::endl; } else { (*copyMap)[dst] = src; if (Log::isEnabled()) Log::out()<<"copy relation INSERTED: " << dst->getFirstId() << "<=" << src->getFirstId() <<std::endl; } } } } if (inst->hasKind(Inst::Kind_PseudoInst) && inst->getKind() != Inst::Kind_CopyPseudoInst) { return Changed_Nothing; } Mnemonic mnemonic = inst->getMnemonic(); switch(mnemonic) { case Mnemonic_MOV: return handleInst_MOV(inst); case Mnemonic_CALL: return handleInst_Call(inst); case Mnemonic_ADD: case Mnemonic_ADC: case Mnemonic_SUB: case Mnemonic_SBB: case Mnemonic_NOT: case Mnemonic_AND: case Mnemonic_OR: case Mnemonic_XOR: case Mnemonic_TEST: return handleInst_ALU(inst); case Mnemonic_CMP: temp = handleInst_CMP(inst); if ( temp == Changed_Nothing ) { return handleInst_ALU(inst); } else { return temp; } case Mnemonic_SETG: case Mnemonic_SETE: case Mnemonic_SETNE: case Mnemonic_SETL: return handleInst_SETcc(inst); case Mnemonic_IMUL: case Mnemonic_MUL: return handleInst_MUL(inst); case Mnemonic_MOVSS: case Mnemonic_MOVSD: //return handleInst_SSEMov(inst); case Mnemonic_XORPS: case Mnemonic_XORPD: //return handleInst_SSEXor(inst); default: break; } return Changed_Nothing; }