Inst *NegIRBuilder::process(AnalysisProcessor &ap) const { this->checkSetup(); Inst *inst = new Inst(ap.getThreadID(), this->address, this->disas); try { this->templateMethod(ap, *inst, this->operands, "NEG"); ap.incNumberOfExpressions(inst->numberOfExpressions()); /* Used for statistics */ ControlFlow::rip(*inst, ap, this->nextAddress); } catch (std::exception &e) { delete inst; throw; } return inst; }
Inst *CwdeIRBuilder::process(void) const { this->checkSetup(); Inst *inst = new Inst(ap.getThreadID(), this->address, this->disas); try { this->templateMethod(*inst, this->operands, "CWDE"); ControlFlow::rip(*inst, this->nextAddress); ap.incNumberOfExpressions(inst->numberOfExpressions()); /* Used for statistics */ } catch (std::exception &e) { delete inst; throw; } return inst; }
/** * Checks if Op_TauStInd (stind) instruction has a side effect. * @param inst - checked instruction * @return <code>true</code> if an instruction has side effect; * <code>false<code> if an instruction has no side effect. */ bool LazyExceptionOpt::fieldUsageHasSideEffect(Inst* inst) { Opnd* insOp = inst->getSrc(0); Inst* instDef = insOp->getInst(); if (instDef->getOpcode() == Op_DefArg) { #ifdef _DEBUG if (Log::isEnabled()) { Log::out() << " fieldUsageHasSideEffect: "; inst->print(Log::out()); Log::out() << std::endl; Log::out() << " fieldUsageHasSideEffect: "; instDef->print(Log::out()); Log::out() << std::endl; Log::out() << " fieldUsageHasSideEffect: "; Log::out() << (int)(instDef->getDefArgModifier()) << " " << (instDef->getDefArgModifier()==DefArgNoModifier) << " " << (instDef->getDefArgModifier()==NonNullThisArg) << " " << (instDef->getDefArgModifier()==DefArgBothModifiers) << std::endl; } #endif if (instDef->getDefArgModifier()==NonNullThisArg && isExceptionInit) return false; } return true; }
void IpfCfgCodeSelector::genSwitchEdges(U_32 tailNodeId, U_32 numTargets, U_32 *targets, double *probs, U_32 defaultTarget) { BbNode *tailNode = (BbNode *)nodes[tailNodeId]; InstVector &insts = tailNode->getInsts(); Inst *switchInst = insts.back(); Opnd *defTargetImm = switchInst->getOpnd(POS_SWITCH_DEFAULT); ConstantRef *constantRef = (ConstantRef *) switchInst->getOpnd(POS_SWITCH_TABLE); SwitchConstant *switchConstant = (SwitchConstant *)constantRef->getConstant(); Edge *defedge = NULL; Edge *edge = NULL; bool defadded = false; U_32 i = 0; IPF_LOG << " Generate Switch tailNodeId=" << tailNodeId << "; defaultTarget=" << defaultTarget << endl; defedge = new(mm) Edge(nodes[tailNodeId], nodes[defaultTarget], probs[defaultTarget], EDGE_BRANCH); defedge->insert(); for(i=0; i<numTargets; i++) { if(targets[i] == defaultTarget) { defTargetImm->setValue(i); switchConstant->addEdge(defedge); defadded = true; IPF_LOG << " default: " << i << endl; continue; } IPF_LOG << " case " << i << ": " << targets[i] << endl; edge = new(mm) Edge(nodes[tailNodeId], nodes[targets[i]], probs[i], EDGE_BRANCH); edge->insert(); switchConstant->addEdge(edge); } if (!defadded) { defTargetImm->setValue(i); switchConstant->addEdge(defedge); defadded = true; IPF_LOG << " default: " << i << endl; } }
U_32 EscapeAnalyzer::doAggressiveAnalysis() { // // Initialization: // // (1) Ptrs & refs that are incoming args are free // (2) Ptrs & refs returned by calls are free // (3) Ptrs & refs returned by the method are free // (4) Refs that are thrown by the method are free // (5) Refs pass as args to calls are free (ptrs do not escape) // // Iteration: // // (6) Refs that are stored through free ptrs or refs are free // (7) Refs loaded through free ptrs or refs are free // MemoryManager memManager("EscapeAnalyzer::doAggressiveAnalysis"); // // work list of instructions that define free refs & ptrs // StlDeque<Inst*> freeWorkList(memManager); DefUseBuilder defUseBuilder(memManager); // // Initialization step // const Nodes& nodes = irManager.getFlowGraph().getNodes(); Nodes::const_iterator niter; Opnd *returnOpnd = irManager.getReturnOpnd(); for(niter = nodes.begin(); niter != nodes.end(); ++niter) { Node* node = *niter; Inst *headInst = (Inst*)node->getFirstInst(); for (Inst* inst=headInst->getNextInst();inst!=NULL; inst=inst->getNextInst()) { initialize(inst,freeWorkList,defUseBuilder,returnOpnd); } } // // Iteration step // while (freeWorkList.empty() == false) { freeWorkList.pop_front(); } U_32 numTrapped = 0; return numTrapped; }
// true if current input signatures reveal that the given signal is observable bool Circuit::observable_cover(string inst_name, string wire_name, CoverType cover) { Inst* inst = (Inst*) sym_table[inst_name]; Wire* wire = (Wire*) sym_table[wire_name]; assert(sim_patterns > 0); Wire* owire = inst->get_output(0)->get_wire(); int num_patterns = (sim_patterns - 1)/ SIGSTEP + 1; int leftover = sim_patterns%SIGSTEP; for (int j = 0; j < num_patterns; ++j) { for (int i = 0; i < int(input_wires.size()); ++i) { input_wires[i]->set_sig_temp(input_wires[i]->get_signature(j)); } for (int i = 0; i < int(linsts.size()); ++i) { if ((j == (num_patterns - 1)) && (leftover > 0)) { linsts[i]->evaluate(leftover); } else { linsts[i]->evaluate(SIGSTEP); } if (linsts[i] == inst) { if (cover == EQUAL) { owire->set_sig_temp((wire->get_sig_temp())); } else if (cover == AND) { owire->set_sig_temp((wire->get_sig_temp() & owire->get_sig_temp())); } else if (cover == OR) { owire->set_sig_temp((wire->get_sig_temp() | owire->get_sig_temp())); } else { assert(0); } } } for (int i = 0; i < int(output_wires.size()); ++i) { if (output_wires[i]->get_sig_temp() != output_wires[i]->get_signature(j)) { return true; } } } return false; }
/** * Transform an address to a smart string, that is, * if a source line is available, transform it to "source_file:source_line", * if the CFG has a label, it gives "label + 0xoffset", else return the * address. * @param base Base address of the function containing the give address. * @param address Address to display. * @return Address transformed in string. */ string CFGProcessor::str(const Address& base, const Address& address) { Inst *first; if(base.isNull()) first = _cfg->firstInst(); else first = workspace()->findInstAt(base); String label; if(first) label = FUNCTION_LABEL(first); if(label) { int offset = address.offset() - first->address().offset(); if(offset < 0) return _ << label << " - 0x" << io::hex(-offset) << " (" << address << ")"; else return _ << label << " + 0x" << io::hex(offset) << " (" << address << ")"; } else return _ << "0x" << address; }
virtual string getMnemo(Inst inst) const { assert(inst); unsigned opcode = inst.opcode(); const ExtInstDesc* desc = getInstDesc(opcode); if (opcode == BRIG_OPCODE_AMD_GCN_APPEND || opcode == BRIG_OPCODE_AMD_GCN_CONSUME) { return string(desc->name) + "_" + type2str(inst.type()); } else if (desc->parser == parseMnemoBasicNoType) { return string(desc->name); } else { return ""; } }
static void callbackAfter(CONTEXT *ctx, THREADID threadId) { Inst *inst; if (!analysisTrigger.getState()) /* Analysis locked */ return; /* Update the current context handler */ ap.updateCurrentCtxH(new PINContextHandler(ctx, threadId)); /* Get the last instruction */ inst = ap.getLastInstruction(); /* Update statistics */ ap.incNumberOfBranchesTaken(inst->isBranch()); /* Python callback after instruction processing */ processingPyConf.callbackAfter(inst, &ap); }
void OSR::replace(SsaOpnd* opnd, SsaOpnd* iv, SsaOpnd* rc) { Inst* inst = opnd->getInst(); SsaOpnd* dstInst = reduce(inst->getDst()->getType(), inst->getOpcode(), inst->getOperation(), iv, rc); Inst* copyInst = irManager.getInstFactory().makeCopy(opnd, dstInst); copyInst->insertBefore(inst); inst->unlink(); writeLeadingOperand(opnd, getLeadingOperand(iv)); }
/** * Fill-in the BSet structure by identifying each brbanch address class. * * @param cfg CFG to analyze. * @param bs BSets structure to fill-in. */ void BPredProcessor::generateClasses(CFG *cfg, BSets& bs) { for(CFG::BBIterator bb(cfg); bb; bb++) { unsigned int nb_OE = 0; // Parcours des OutEdges for(BasicBlock::OutIterator edge(bb); edge ; edge++ ) { // on incremente que s'il s'agit d'un edge TAKEN ou NOT_TAKEN if(edge->kind() == Edge::TAKEN) nb_OE++; else if(edge->kind() == Edge::NOT_TAKEN) nb_OE++; } // Si un branchement a ete trouve ... if(nb_OE == 2 ) { Inst* inst = NULL; for(BasicBlock::InstIter i(bb); i; i++) { inst=i; } bs.add((inst->address() & (this->BHT)), bb->number()); } } }
SsaOpnd* OSR::reduce(Type* type, Opcode opcode, Operation op, SsaOpnd* iv, SsaOpnd* rc) { if (Log::isEnabled()) { Log::out() << "Reducing: "; iv->print(Log::out()); Log::out() << std::endl; rc->print(Log::out()); Log::out() << std::endl; } Inst* newinst = hashTable->lookup(op.encodeForHashing(), iv->getId(), rc->getId()); if (!newinst) { Inst* newDef = insertNewDef(type, iv, rc); if (Log::isEnabled()) { Log::out() << "NewDef" << std::endl; newDef->print(Log::out()); Log::out() << std::endl; } SsaOpnd* result = newDef->getDst()->asSsaOpnd(); writeLeadingOperand(result, getLeadingOperand(iv)); hashTable->insert(op.encodeForHashing(), iv->getId(), rc->getId(), newDef); writeLFTR(iv, op, rc, newDef->getDst()->asSsaOpnd()); replaceOperands(type, newDef, iv, rc, opcode, op); return result; } else { SsaOpnd* result = newinst->getDst()->asSsaOpnd(); return result; } }
void findLoopsToUnroll(MemoryManager& tmpMM, IRManager& irm, UnrollInfos& result, const UnrollFlags& flags) { ControlFlowGraph& fg = irm.getFlowGraph(); LoopTree* lt = fg.getLoopTree(); //find all loop exits Edges loopExits(tmpMM); const Nodes& nodes = fg.getNodes(); for (Nodes::const_iterator it = nodes.begin(), end = nodes.end(); it!=end; ++it) { Node* node = *it; LoopNode* loopNode = lt->getLoopNode(node, false); if (loopNode == NULL) { continue; //node not in a loop } if (!flags.unrollParentLoops && loopNode->getChild()!=NULL) { continue; //skip parent loops } const Edges& edges = node->getOutEdges(); for (Edges::const_iterator ite = edges.begin(), ende = edges.end(); ite!=ende; ++ite) { Edge* edge = *ite; if (lt->isLoopExit(edge)) { loopExits.push_back(edge); } } } //filter out all edges except branches for (Edges::iterator ite = loopExits.begin(), ende = loopExits.end(); ite!=ende; ++ite) { Edge* edge = *ite; if (edge->isDispatchEdge() || edge->isUnconditionalEdge() || edge->isCatchEdge()) { *ite = NULL; continue; } Inst* lastInst = (Inst*)edge->getSourceNode()->getLastInst(); if (lastInst->isSwitch()) { *ite = NULL; continue; } assert(lastInst->isBranch()); assert(edge->isFalseEdge() || edge->isTrueEdge()); } loopExits.erase(std::remove(loopExits.begin(), loopExits.end(), (Edge*)NULL), loopExits.end()); // analyze every loop exit and prepare unroll info for (Edges::const_iterator ite = loopExits.begin(), ende = loopExits.end(); ite!=ende; ++ite) { Edge* edge = *ite; Node* sourceNode = edge->getSourceNode(); Inst* lastInst = (Inst*)sourceNode->getLastInst(); assert(lastInst->isBranch()); LoopUnrollInfo* info = prepareUnrollInfo(tmpMM, lt, lastInst->asBranchInst()); if (info == NULL) { continue; } if (Log::isEnabled()) { info->print(Log::out()); Log::out()<<std::endl; } result.push_back(info); } }
bool Inst::operator<(const Inst &Other) const { if (this == &Other) return false; if (K < Other.K) return true; if (K > Other.K) return false; if (Width < Other.Width) return true; if (Width > Other.Width) return false; switch (K) { case Const: return Val.ult(Other.Val); case UntypedConst: { llvm::APInt Val1 = Val, Val2 = Other.Val; if (Val1.getBitWidth() < Val2.getBitWidth()) Val1 = Val1.sext(Val2.getBitWidth()); else if (Val1.getBitWidth() > Val2.getBitWidth()) Val2 = Val2.sext(Val1.getBitWidth()); return Val1.slt(Val2); } case Var: return Number < Other.Number; case Phi: if (B->Number < Other.B->Number) return true; if (B->Number > Other.B->Number) return false; default: break; } if (Ops.size() < Other.Ops.size()) return true; if (Ops.size() > Other.Ops.size()) return false; const std::vector<Inst *> &OpsA = orderedOps(); const std::vector<Inst *> &OpsB = Other.orderedOps(); for (unsigned I = 0; I != OpsA.size(); ++I) { if (OpsA[I] == OpsB[I]) continue; return (*OpsA[I] < *OpsB[I]); } llvm_unreachable("Should have found an unequal operand"); }
SsaTmpOpnd* OSR::makeTmp(SsaOpnd* inOpnd, Inst* place) { if (inOpnd->isSsaVarOpnd()) { SsaVarOpnd* inSsaVarOpnd = inOpnd->asSsaVarOpnd(); Inst* inst = inSsaVarOpnd->getInst(); if (inst->getOpcode() == Op_StVar) { SsaTmpOpnd* res = inst->getSrc(0)->asSsaTmpOpnd(); return res; } else { OpndManager& opndManager = irManager.getOpndManager(); InstFactory& instFactory = irManager.getInstFactory(); SsaTmpOpnd* res = opndManager.createSsaTmpOpnd(inOpnd->getType()); Inst* ldVarInst = instFactory.makeLdVar(res, inSsaVarOpnd); Inst* where = chooseLocationForConvert(inst, place); insertInst(ldVarInst, where); writeLeadingOperand(res, getLeadingOperand(inOpnd)); return res; } } else { return inOpnd->asSsaTmpOpnd(); } }
static asynStatus readUInt32(void* ppvt,asynUser* pasynUser,epicsUInt32* value,epicsUInt32 mask) { asynStatus sts; Port* pport = (Port*)ppvt; Inst* pinst = (Inst*)pasynUser->drvUser; asynPrint(pasynUser,ASYN_TRACE_FLOW,"drvLove::readUInt32\n"); if( pinst->pcmd->read ) sprintf(pport->outMsg,"%s",pinst->pcmd->read); else { epicsSnprintf(pasynUser->errorMessage,pasynUser->errorMessageSize,"%s error %s",pport->name,pport->pasynUser->errorMessage); return( asynError ); } lockPort(pport,pasynUser); sts = executeCommand(pport,pasynUser); unlockPort(pport,pasynUser); if( ISNOTOK(sts) ) { epicsSnprintf(pasynUser->errorMessage,pasynUser->errorMessageSize,"%s error %s",pport->name,pport->pasynUser->errorMessage); return( sts ); } sts = pinst->read(pinst,(epicsInt32*)value); if( ISNOTOK(sts) ) { epicsSnprintf(pasynUser->errorMessage,pasynUser->errorMessageSize,"%s error %s",pport->name,pport->pasynUser->errorMessage); return( sts ); } if( ISOK(sts) ) asynPrint(pasynUser,ASYN_TRACEIO_FILTER,"drvLove::readUInt32 readback from %s is 0x%X,mask=0x%X\n",pport->name,*value,mask); return( asynSuccess ); }
void QpTree::makeQpTree(InstVector &insts) { IPF_ASSERT(qpMap.size() == 1); slot = 1; // init slot (position in mask) for (InstVector::iterator it=insts.begin(); it!=insts.end(); it++) { Inst *inst = *it; // iterate insts if (isDefOnePred(inst)) { // if inst defs one predicate opnd OpndVector &opnds = inst->getOpnds(); // get opnds of the inst QpNode *qpNode = findQpNode(opnds[0]); // inst qp is predecessor for predicates defined in the inst makeQpNode(qpNode, opnds[2]); // make qpNode for the predicate opnd (it is always second one) continue; } if (isDefTwoPreds(inst)) { // if inst defs two predicate opnds OpndVector &opnds = inst->getOpnds(); // get opnds of the inst QpNode *qpNode = findQpNode(opnds[0]); // inst qp is predecessor for predicates defined in the inst QpNode *p1Node = makeQpNode(qpNode, opnds[1]); // make qpNode for first predicate opnd QpNode *p2Node = makeQpNode(qpNode, opnds[2]); // make qpNode for second predicate opnd if (isDefComps(inst) == false) continue; // inst does not define mutually complemen predicates - continue if (p1Node != NULL) p1Node->setCompNode(p2Node); // p2Node complements p1Node if (p2Node != NULL) p2Node->setCompNode(p1Node); // p1Node complements p2Node } } for (QpMap::iterator it=qpMap.begin(); it!=qpMap.end(); it++) { QpNode *qpNode = it->second; // iterate all qpNodes in the tree qpNode->initCompMask(); // set comp mask (to speed up getCompMask) } for (QpMap::iterator it=qpMap.begin(); it!=qpMap.end(); it++) { QpNode *qpNode = it->second; // iterate all qpNodes in the tree qpNode->initLiveMask(); // set live masks (predicate spaces which do not complement) } }
unsigned ExtManager::getDefRounding(Inst inst, unsigned machineModel, unsigned profile) const { assert(inst); assert(machineModel == BRIG_MACHINE_SMALL || machineModel == BRIG_MACHINE_LARGE); assert(profile == BRIG_PROFILE_BASE || profile == BRIG_PROFILE_FULL); if (isCoreInst(inst)) { return getCoreDefRounding(inst, machineModel, profile); } else if (const Extension* e = getByProp(PROP_OPCODE, inst.opcode())) { return e->getDefRounding(inst, machineModel, profile); } return BRIG_ROUND_NONE; }
bool ExtManager::validateInst(Inst inst, unsigned model, unsigned profile) const { assert(inst); assert(model == BRIG_MACHINE_SMALL || model == BRIG_MACHINE_LARGE); assert(profile == BRIG_PROFILE_BASE || profile == BRIG_PROFILE_FULL); if (isCoreInst(inst)) { InstValidator(model, profile).validateInst(inst); return true; } else if (const Extension* e = getByProp(PROP_OPCODE, inst.opcode())) { return e->validateInst(inst, model, profile); } return false; }
unsigned ExtManager::getOperandType(Inst inst, unsigned operandIdx, unsigned machineModel, unsigned profile) const { assert(inst); assert(operandIdx < MAX_OPERANDS_NUM); assert(machineModel == BRIG_MACHINE_SMALL || machineModel == BRIG_MACHINE_LARGE); assert(profile == BRIG_PROFILE_BASE || profile == BRIG_PROFILE_FULL); if (isCoreInst(inst)) { return getCoreOperandType(inst, operandIdx, machineModel, profile); } else if (const Extension* e = getByProp(PROP_OPCODE, inst.opcode())) { return e->getOperandType(inst, operandIdx, machineModel, profile); } return BRIG_TYPE_INVALID; }
U_32 EscapeAnalyzer::doAnalysis() { MemoryManager memManager("EscapeAnalyzer::doAnalysis"); StlDeque<Inst*> candidateSet(memManager); BitSet escapingInsts(memManager,irManager.getInstFactory().getNumInsts()); const Nodes& nodes = irManager.getFlowGraph().getNodes(); Nodes::const_iterator niter; // // Clear all marks on instructions // Collect instructions that are candidate for escape optimizations // for(niter = nodes.begin(); niter != nodes.end(); ++niter) { Node* node = *niter; Inst *headInst = (Inst*)node->getFirstInst(); for (Inst* inst=headInst->getNextInst();inst!=NULL;inst=inst->getNextInst()) { if (isEscapeOptimizationCandidate(inst)) candidateSet.push_back(inst); } } // // Iteratively mark instructions whose results escape the method // for(niter = nodes.begin(); niter != nodes.end(); ++niter) { Node* node = *niter; Inst *headInst = (Inst*)node->getFirstInst(); for (Inst* inst=headInst->getNextInst();inst!=NULL;inst=inst->getNextInst()) { if (isPotentiallyEscapingInst(inst) == false) continue; escapingInsts.setBit(inst->getId(),true); for (U_32 i=0; i<inst->getNumSrcOperands(); i++) { if (isEscapingSrcObject(inst,i) == false) continue; // src escapes markEscapingInst(inst->getSrc(i)->getInst(),escapingInsts); } } } // // Print out non-escaping instructions // U_32 numTrapped = 0; while (candidateSet.empty() == false) { Inst* inst = candidateSet.front(); candidateSet.pop_front(); if (escapingInsts.getBit(inst->getId())) continue; numTrapped++; } return numTrapped; }
bool SSABuilder::phiInstsOnRightPositionsInBB(Node* node) { Inst* inst = (Inst*)node->getSecondInst(); if(inst && !inst->isPhi()) { // try the next one (third) inst = inst->getNextInst(); } // skip all phis while ( inst!=NULL && inst->isPhi() ) { inst = inst->getNextInst(); } // 'true' only if there is no any other phis in the node while ( inst!=NULL ) { if(inst->isPhi()) { return false; } inst = inst->getNextInst(); } return true; }
RetType dispatchByItemKind_gen(Inst item,Visitor& vis) { using namespace Brig; switch(item.brig()->kind) { case BRIG_INST_CVT: return vis(InstCvt(item)); case BRIG_INST_MOD: return vis(InstMod(item)); case BRIG_INST_BASIC: return vis(InstBasic(item)); case BRIG_INST_ATOMIC: return vis(InstAtomic(item)); case BRIG_INST_SOURCE_TYPE: return vis(InstSourceType(item)); case BRIG_INST_IMAGE: return vis(InstImage(item)); case BRIG_INST_BR: return vis(InstBr(item)); case BRIG_INST_FBAR: return vis(InstFbar(item)); case BRIG_INST_SEG: return vis(InstSeg(item)); case BRIG_INST_MEM: return vis(InstMem(item)); case BRIG_INST_BAR: return vis(InstBar(item)); case BRIG_INST_CMP: return vis(InstCmp(item)); case BRIG_INST_ATOMIC_IMAGE: return vis(InstAtomicImage(item)); case BRIG_INST_ADDR: return vis(InstAddr(item)); default: assert(false); break; } return RetType(); }
void PersistentInstructionIdGenerator::runPass(IRManager& irm) { MemoryManager mm("PersistentInstructionIdGenerator::runPass"); MethodDesc& methodDesc = irm.getMethodDesc(); StlVector<Node*> nodes(mm); irm.getFlowGraph().getNodesPostOrder(nodes); StlVector<Node*>::reverse_iterator i; for(i = nodes.rbegin(); i != nodes.rend(); ++i) { Node* node = *i; Inst* label = (Inst*)node->getFirstInst(); for(Inst* inst = label->getNextInst(); inst != NULL; inst = inst->getNextInst()) inst->setPersistentInstructionId(PersistentInstructionId(&methodDesc, inst->getId() - irm.getMinimumInstId())); } }
void OSR::replaceLinearFuncTest(StlVector < Node* >&postOrderNodes) { StlVector < Node* >::reverse_iterator riter = postOrderNodes.rbegin(), rend = postOrderNodes.rend(); for (; riter != rend; ++riter) { Node* node = *riter; Inst* labelInst = (Inst*) node->getLabelInst(); for (Inst* iter = (Inst*) labelInst->next(); (iter != 0 && iter != labelInst); iter = (Inst*) iter->next()) { if ((iter->getOpcode() == Op_Cmp) || (iter->getOpcode() == Op_Branch)) performLFTR(iter); } } }
RetType dispatchByItemKind_gen(Inst item,Visitor& vis) { switch(item.kind()) { case BRIG_KIND_INST_ADDR: return vis(InstAddr(item)); case BRIG_KIND_INST_ATOMIC: return vis(InstAtomic(item)); case BRIG_KIND_INST_BASIC: return vis(InstBasic(item)); case BRIG_KIND_INST_BR: return vis(InstBr(item)); case BRIG_KIND_INST_CMP: return vis(InstCmp(item)); case BRIG_KIND_INST_CVT: return vis(InstCvt(item)); case BRIG_KIND_INST_IMAGE: return vis(InstImage(item)); case BRIG_KIND_INST_LANE: return vis(InstLane(item)); case BRIG_KIND_INST_MEM: return vis(InstMem(item)); case BRIG_KIND_INST_MEM_FENCE: return vis(InstMemFence(item)); case BRIG_KIND_INST_MOD: return vis(InstMod(item)); case BRIG_KIND_INST_QUERY_IMAGE: return vis(InstQueryImage(item)); case BRIG_KIND_INST_QUERY_SAMPLER: return vis(InstQuerySampler(item)); case BRIG_KIND_INST_QUEUE: return vis(InstQueue(item)); case BRIG_KIND_INST_SEG: return vis(InstSeg(item)); case BRIG_KIND_INST_SEG_CVT: return vis(InstSegCvt(item)); case BRIG_KIND_INST_SIGNAL: return vis(InstSignal(item)); case BRIG_KIND_INST_SOURCE_TYPE: return vis(InstSourceType(item)); default: assert(false); break; } return RetType(); }
/** * Prints information about optimization candidates. * @param os - output stream */ void LazyExceptionOpt::printOptCandidates(::std::ostream& os) { OptCandidates::iterator it; Inst* oinst; Inst* iinst; Inst* tinst; if (optCandidates == NULL) { return; } for (it = optCandidates->begin( ); it != optCandidates->end( ); it++ ) { os << "~~ opndId " << (*it)->opndId << std::endl; oinst = (*it)->objInst; os << " obj "; if (oinst != NULL) oinst->print(os); else os << "newobj NULL"; os << std::endl; iinst = (*it)->initInst; os << " init "; if (iinst != NULL) iinst->print(os); else os << "call init NULL"; os << std::endl; if ((*it)->throwInsts == NULL) { os << " thr throw NULL"; os << std::endl; continue; } ThrowInsts::iterator it1; for (it1 = (*it)->throwInsts->begin(); it1 !=(*it)->throwInsts->end(); it1++) { tinst = *it1; assert(tinst != NULL); os << " thr "; tinst->print(os); os << std::endl; } } os << "end~~" << std::endl; }
// // find def sites (blocks) of var operand // void SSABuilder::findDefSites(DefSites& allDefSites) { const Nodes& nodes = fg->getNodes(); Nodes::const_iterator niter; for(niter = nodes.begin(); niter != nodes.end(); ++niter) { Node* node = *niter; if (!node->isBlockNode()) continue; // go over each instruction to find var definition Inst* first = (Inst*)node->getFirstInst(); for (Inst* inst = first->getNextInst(); inst != NULL; inst = inst->getNextInst()) { // look for var definitions if (!inst->isStVar()) continue; assert(inst->isVarAccess()); // if inst->getVar() return NULL, then inst is accessing SSAOpnd. // Hence, there is no need to do SSA transformation (addVarDefSite() // immediately returns. allDefSites.addVarDefSite(((VarAccessInst*)inst)->getVar(),node); } } }
static OpndLoopInfo processOpnd(LoopNode* loopHead, LoopTree* lt, InstStack& defStack, Opnd* opnd) { OpndLoopInfo result; Inst* defInst = opnd->getInst(); if (Log::isEnabled()) { log_ident(defStack.size()); defInst->print(Log::out()); Log::out()<<"]"<<std::endl; } if (std::find(defStack.begin(), defStack.end(), defInst)!=defStack.end()) { result.setType(OpndLoopInfo::COUNTER); result.setIncrement(0); if (Log::isEnabled()) { log_ident(defStack.size()); Log::out()<<"Found duplicate in def stack -> stopping recursion. ";result.print(Log::out()); Log::out()<<std::endl; } return result; } Node* defNode = defInst->getNode(); Opcode opcode = defInst->getOpcode(); if (opcode == Op_LdConstant) { result.setType(OpndLoopInfo::LD_CONST); result.setConst(defInst->asConstInst()->getValue().i4); if (Log::isEnabled()) { log_ident(defStack.size()); Log::out()<<"assigning to const -> stopping recursion. ";result.print(Log::out());Log::out()<<std::endl; } return result; } if (!loopHead->inLoop(defNode)) { if (Log::isEnabled()) { log_ident(defStack.size()); Log::out()<<"Inst out of the loop -> stopping recursion. ";result.print(Log::out()); Log::out()<<std::endl; } return result; } defStack.push_back(defInst); if (opcode == Op_Phi) { OpndLoopInfo info1 = processOpnd(loopHead, lt, defStack, defInst->getSrc(0)); OpndLoopInfo info2 = processOpnd(loopHead, lt, defStack, defInst->getSrc(1)); if (Log::isEnabled()) { log_ident(defStack.size()); Log::out()<<"PHI(";info1.print(Log::out());Log::out()<<",";info2.print(Log::out());Log::out()<<")"<<std::endl; } if ( ((info1.isCounter() && !info1.isPhiSplit()) && (info2.isDOL() || info2.isLDConst())) || ((info2.isCounter() && !info2.isPhiSplit()) && (info1.isDOL() || info1.isLDConst())) ) { result.setType(OpndLoopInfo::COUNTER); result.setIncrement(info1.isCounter() ? info1.getIncrement() : info2.getIncrement()); result.markPhiSplit(); } else { result.setType(OpndLoopInfo::UNDEF); } } else if (opcode == Op_Add || opcode == Op_Sub) { //todo: LADD Opnd *op1 = defInst->getSrc(0); Opnd *op2 = defInst->getSrc(1); OpndLoopInfo info1 = processOpnd(loopHead, lt, defStack, op1); OpndLoopInfo info2 = processOpnd(loopHead, lt, defStack, op2); if ((info1.isLDConst() || info1.isDOL()) && (info2.isLDConst() || info2.isDOL())) { if (info1.isLDConst() && info2.isLDConst() && info1.getConst() == info2.getConst()) { result.setType(OpndLoopInfo::LD_CONST); result.setConst(info1.getConst()); } else { //result is DOL (default type) } } else if ((info1.isCounter() && info2.isLDConst()) || (info2.isCounter() && info1.isLDConst())) { int increment = info1.isCounter()? info1.getIncrement(): info2.getIncrement(); int diff = info1.isLDConst()? info1.getConst(): info2.getConst(); //we use SSA form to analyze how opnd changes in loop and we do not analyze actual control flow, // so we can unroll loops with monotonically changing 'counters' only. //Example: when 'counter' changes not monotonically and we can't unroll: //idx=0; loop {idx+=100; if(idx>=100) break; idx-=99;} ->'increment'=1 but not monotonicaly. bool monotonousFlag = increment == 0 || diff == 0 || (opcode == Op_Add && signof(diff) == signof(increment)) || (opcode == Op_Sub && signof(diff) != signof(increment)); if (monotonousFlag) { result.setType(OpndLoopInfo::COUNTER); if ((info1.isCounter() && info1.isPhiSplit()) || (info2.isCounter() && info2.isPhiSplit())) { result.markPhiSplit(); } //TO IMPROVE: for loops like: for (; length-1>=0;length--){...} //we have 2 SUBs by -1 => "-2", but real counter is changed by "-1". //Loop unroll will use "-2". It's ok, because this value is used in a guard inst //and ABS(increment_in_unroll) >= ABS(real_increment). This work only for monotonous loops. //To make increment_in_unroll == real_increment we must track modifications (SUB,ADD) that affects vars only. if (opcode == Op_Add) { result.setIncrement(increment + diff); } else { result.setIncrement(increment - diff); } } else { result.setType(OpndLoopInfo::UNDEF); } } else { result.setType(OpndLoopInfo::UNDEF); } } else if (opcode == Op_StVar || opcode == Op_LdVar) { Opnd* newOpnd = defInst->getSrc(0); result = processOpnd(loopHead, lt, defStack, newOpnd); } else if (opcode == Op_TauArrayLen) { Opnd* arrayOpnd = defInst->getSrc(0); result = processOpnd(loopHead, lt, defStack, arrayOpnd); } else { //unsupported op result.setType(OpndLoopInfo::UNDEF); if (Log::isEnabled()) { log_ident(defStack.size()); Log::out()<<"unknown op -> stopping recursion. "; } } defStack.pop_back(); if (Log::isEnabled()) { log_ident(defStack.size()); result.print(Log::out());Log::out()<<std::endl; } return result; }
/* The code below is based on loop_unroll processOpnd function. However it * gathers some additional OSR-specific information which is obsolele for * loop_unroll. * * TODO: create flexible mechanism for gathering info on the operands which * would help to avoid code duplication from the one hand, and be customizable * from another hand */ OSROpndInfo OSRInductionDetector::processOpnd(LoopTree* tree, LoopNode* loopHead, InstStack& defStack, const Opnd* opnd, iv_detection_flag flag) { if (Log::isEnabled()) { Log::out() << "Processing opnd: "; opnd->print(Log::out()); Log::out() << "\n"; } OSROpndInfo result; Inst* defInst = opnd->getInst(); if (std::find(defStack.begin(), defStack.end(), defInst) != defStack.end()) { result.setType(OSROpndInfo::COUNTER); result.setIncrement(0); result.setOpnd((Opnd*) opnd); return result; } Opcode opcode = defInst->getOpcode(); if (opcode == Op_LdConstant) { result.setType(OSROpndInfo::LD_CONST); result.setConst(defInst->asConstInst()->getValue().i4); result.setOpnd((Opnd*) opnd); result.setHeader((Opnd*) opnd); result.setHeaderFound(); return result; } if (!inExactLoop(tree, (Opnd*) opnd, loopHead)) { result.setOpnd((Opnd*) opnd); result.setHeader((Opnd*) opnd); result.setHeaderFound(); return result; } defStack.push_back(defInst); if (opcode == Op_Phi) { OSROpndInfo info1 = processOpnd(tree, loopHead, defStack, defInst->getSrc(0)); if (defInst->getNumSrcOperands() > 1) { OSROpndInfo info2 = processOpnd(tree, loopHead, defStack, defInst->getSrc(1)); if ( ((info1.isCounter() && !info1.isPhiSplit()) && (info2.isDOL() || info2.isLDConst())) || ((info2.isCounter() && !info2.isPhiSplit()) && (info1.isDOL() || info1.isLDConst())) ) { result.setType(OSROpndInfo::COUNTER); result.setIncrement(info1.isCounter()? info1. getIncrement() : info2.getIncrement()); result.markPhiSplit(); result.setHeader((Opnd*) opnd); result.setHeaderFound(); } else if ((flag == CHOOSE_MAX_IN_BRANCH) && info1.isCounter() && info2.isCounter() && signof(info1.getIncrement()) == signof(info2.getIncrement())) { result.setType(OSROpndInfo::COUNTER); result.setIncrement(std::abs(info1.getIncrement()) > std::abs(info2.getIncrement())? info1. getIncrement() : info2.getIncrement()); result.markPhiSplit(); result.setHeader((Opnd*) opnd); result.setHeaderFound(); } else { result.setType(OSROpndInfo::UNDEF); } } } else if (opcode == Op_Add || opcode == Op_Sub) { Opnd* opnd1 = defInst->getSrc(0); Opnd* opnd2 = defInst->getSrc(1); OSROpndInfo info1 = processOpnd(tree, loopHead, defStack, opnd1); OSROpndInfo info2 = processOpnd(tree, loopHead, defStack, opnd2); if ((info1.isLDConst() || info1.isDOL()) && (info2.isLDConst() || info2.isDOL())) { if (info1.isLDConst() && info2.isLDConst() && info1.getConst() == info2.getConst()) { result.setType(OSROpndInfo::LD_CONST); result.setConst(info1.getConst()); writeHeaderToResult(result, tree, info1, info2); } } else if ((info1.isCounter() && info2.isLDConst()) || (info2.isCounter() && info1.isLDConst())) { U_32 increment = info1.isCounter() ? info1.getIncrement() : info2.getIncrement(); U_32 diff = info1.isLDConst()? info1.getConst() : info2.getConst(); bool monotonousFlag = increment == 0 || diff == 0 || (opcode == Op_Add && signof(diff) == signof(increment)) || (opcode == Op_Sub && signof(diff) != signof(increment)); if (monotonousFlag) { result.setType(OSROpndInfo::COUNTER); if ((info1.isCounter() && info1.isPhiSplit()) || (info2.isCounter() && info2.isPhiSplit())) { result.markPhiSplit(); writeHeaderToResult(result, tree, info1, info2); } if (opcode == Op_Add) { result.setIncrement(increment + diff); writeHeaderToResult(result, tree, info1, info2); } else { result.setIncrement(increment - diff); writeHeaderToResult(result, tree, info1, info2); } } else { result.setType(OSROpndInfo::UNDEF); } } else { result.setType(OSROpndInfo::UNDEF); } } else if (opcode == Op_StVar || opcode == Op_LdVar) { Opnd* newOpnd = defInst->getSrc(0); result = processOpnd(tree, loopHead, defStack, newOpnd); } else if (opcode == Op_TauArrayLen) { Opnd* arrayOpnd = defInst->getSrc(0); result = processOpnd(tree, loopHead, defStack, arrayOpnd); } else { result.setType(OSROpndInfo::UNDEF); } defStack.pop_back(); result.setOpnd((Opnd*) opnd); return result; }