TR_DominatorVerifier::TR_DominatorVerifier(TR_Dominators &findDominators) : _compilation(findDominators.comp()) { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); _dominators = &findDominators; TR::CFG *cfg = comp()->getFlowGraph(); _visitCount = comp()->incVisitCount(); _numBlocks = cfg->getNumberOfNodes()+1; if (debug("traceVER")) { dumpOptDetails(comp(), "Printing out the TreeTops from DominatorVerifier\n"); TR::TreeTop *currentTree = comp()->getStartTree(); while (!(currentTree == NULL)) { comp()->getDebug()->print(comp()->getOutFile(), currentTree); currentTree = currentTree->getNextTreeTop(); } dumpOptDetails(comp(), "Printing out the CFG from DominatorVerifier\n"); if (cfg != NULL) comp()->getDebug()->print(comp()->getOutFile(), cfg); } TR_DominatorsChk expensiveAlgorithm(comp()); expensiveAlgorithmCorrect = isExpensiveAlgorithmCorrect(expensiveAlgorithm); if (expensiveAlgorithmCorrect) { if (debug("traceVER")) dumpOptDetails(comp(), "Dominators computed by the expensive algorithm are correct\n"); } else { if (debug("traceVER")) dumpOptDetails(comp(), "Dominators computed by the expensive algorithm are NOT correct\n"); TR_ASSERT(0, "Dominators computed by the expensive algorithm are NOT correct\n"); } bothImplementationsConsistent = areBothImplementationsConsistent(expensiveAlgorithm, findDominators); if (bothImplementationsConsistent) { if (debug("traceVER")) dumpOptDetails(comp(), "Dominators computed by the two implementations are consistent\n"); } else { if (debug("traceVER")) dumpOptDetails(comp(), "Dominators computed by the two implementations are NOT consistent\n"); TR_ASSERT(0, "Dominators computed by the two implementations are NOT consistent\n"); } }
int32_t TR_ReachingBlocks::perform() { // Allocate the block info before setting the stack mark - it will be used by // the caller // initializeBlockInfo(); { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); TR_Structure *rootStructure = comp()->getFlowGraph()->getStructure(); performAnalysis(rootStructure, false); } // scope of the stack memory region return 10; // actual cost }
int32_t TR_ExpressionsSimplification::perform() { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); int32_t cost = 0; _supportedExpressions = NULL; if (trace()) { comp()->dumpMethodTrees("Trees Before Performing Expression Simplification"); } cost = perform(comp()->getFlowGraph()->getStructure()); return cost; }
int32_t TR_LoadExtensions::perform() { if (comp()->getOptLevel() >= warm && !optimizer()->cantBuildGlobalsUseDefInfo()) { if (!comp()->getFlowGraph()->getStructure()) { optimizer()->doStructuralAnalysis(); } TR::LexicalMemProfiler memoryProfiler("Load Extensions: Usedef calculation", comp()->phaseMemProfiler()); optimizer()->setUseDefInfo(NULL); TR_UseDefInfo* useDefInfo = new (comp()->allocator()) TR_UseDefInfo(comp(), comp()->getFlowGraph(), optimizer(), false, false, false, true, true); if (useDefInfo->infoIsValid()) { optimizer()->setUseDefInfo(useDefInfo); } else { delete useDefInfo; } } TR::StackMemoryRegion stackMemoryRegion(*trMemory()); excludedNodes = new (stackMemoryRegion) NodeToIntTable(NodeToIntTableComparator(), NodeToIntTableAllocator(stackMemoryRegion)); loadExtensionPreference = new (stackMemoryRegion) NodeToIntTable(NodeToIntTableComparator(), NodeToIntTableAllocator(stackMemoryRegion)); for (TR::PreorderNodeIterator iter(comp()->getStartTree(), comp()); iter.currentTree() != NULL; ++iter) { findPreferredLoadExtensions(iter.currentNode()); } for (TR::PreorderNodeIterator iter(comp()->getStartTree(), comp()); iter.currentTree() != NULL; ++iter) { flagPreferredLoadExtensions(iter.currentNode()); } return 0; }
int32_t TR_LocalLiveRangeReduction::perform() { if (TR::Compiler->target.cpu.isZ()) return false; TR::TreeTop * exitTT, * nextTT; TR::Block *b; TR::TreeTop * tt; //calculate number of TreeTops in each bb (or extended bb) for (tt = comp()->getStartTree(); tt; tt = nextTT) { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); TR::Node *node = tt->getNode(); b = node->getBlock(); exitTT = b->getExit(); _numTreeTops = b->getNumberOfRealTreeTops()+2; //include both BBStart/BBend //support for extended blocks while ((nextTT = exitTT->getNextTreeTop()) && (b = nextTT->getNode()->getBlock(), b->isExtensionOfPreviousBlock())) { _numTreeTops += b->getNumberOfRealTreeTops()+2; exitTT = b->getExit(); } _treesRefInfoArray = (TR_TreeRefInfo**)trMemory()->allocateStackMemory(_numTreeTops*sizeof(TR_TreeRefInfo*)); memset(_treesRefInfoArray, 0, _numTreeTops*sizeof(TR_TreeRefInfo*)); _movedTreesList.deleteAll(); _depPairList.deleteAll(); transformExtendedBlock(tt,exitTT->getNextTreeTop()); } if (trace()) traceMsg(comp(), "\nEnding LocalLiveRangeReducer\n"); return 2; }
int32_t TR_AsyncCheckInsertion::perform() { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); // If this is a large acyclic method - add a yield point at each return from this method // so that sampling will realize that we are actually in this method. // static const char *p; static uint32_t numNodesInLargeMethod = (p = feGetEnv("TR_LargeMethodNodes")) ? atoi(p) : NUMBER_OF_NODES_IN_LARGE_METHOD; const bool largeAcyclicMethod = !comp()->mayHaveLoops() && comp()->getNodeCount() > numNodesInLargeMethod; // If this method has loops whose asyncchecks were versioned out, it may // still spend a significant amount of time in each invocation without // yielding. In this case, insert yield points before returns whenever there // is a sufficiently frequent block somewhere in the method. // bool loopyMethodWithVersionedAsyncChecks = false; if (!largeAcyclicMethod && comp()->getLoopWasVersionedWrtAsyncChecks()) { // The max (normalized) block frequency is fixed, but very frequent // blocks push down the frequency of method entry. int32_t entry = comp()->getStartTree()->getNode()->getBlock()->getFrequency(); int32_t limit = comp()->getOptions()->getLoopyAsyncCheckInsertionMaxEntryFreq(); loopyMethodWithVersionedAsyncChecks = 0 <= entry && entry <= limit; } if (largeAcyclicMethod || loopyMethodWithVersionedAsyncChecks) { const char * counterPrefix = largeAcyclicMethod ? "acyclic" : "loopy"; int32_t numAsyncChecksInserted = insertReturnAsyncChecks(this, counterPrefix); if (trace()) traceMsg(comp(), "Inserted %d async checks\n", numAsyncChecksInserted); return 1; } return 0; }
int32_t TR_ReachingDefinitions::perform() { LexicalTimer tlex("reachingDefs_perform", comp()->phaseTimer()); if (traceRD()) traceMsg(comp(), "Starting ReachingDefinitions\n"); // Allocate the block info, allowing the bit vectors to be allocated on the fly // initializeBlockInfo(false); { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); TR_Structure *rootStructure = _cfg->getStructure(); performAnalysis(rootStructure, false); if (traceRD()) traceMsg(comp(), "\nEnding ReachingDefinitions\n"); } // scope of the stack memory region return 10; // actual cost }
bool TR_LocalLiveRangeReduction::moveTreeBefore(TR_TreeRefInfo *treeToMove,TR_TreeRefInfo *anchor,int32_t passNumber) { TR::TreeTop *treeToMoveTT = treeToMove->getTreeTop(); TR::TreeTop *anchorTT = anchor->getTreeTop(); if (treeToMoveTT->getNextRealTreeTop() == anchorTT) { addDepPair(treeToMove, anchor); return false; } if (!performTransformation(comp(), "%sPass %d: moving tree [%p] before Tree %p\n", OPT_DETAILS, passNumber, treeToMoveTT->getNode(),anchorTT->getNode())) return false; // printf("Moving [%p] before Tree %p\n", treeToMoveTT->getNode(),anchorTT->getNode()); //changing location in block TR::TreeTop *origPrevTree = treeToMoveTT->getPrevTreeTop(); TR::TreeTop *origNextTree = treeToMoveTT->getNextTreeTop(); origPrevTree->setNextTreeTop(origNextTree); origNextTree->setPrevTreeTop(origPrevTree); TR::TreeTop *prevTree = anchorTT->getPrevTreeTop(); anchorTT->setPrevTreeTop(treeToMoveTT); treeToMoveTT->setNextTreeTop(anchorTT); treeToMoveTT->setPrevTreeTop(prevTree); prevTree->setNextTreeTop(treeToMoveTT); //UPDATE REFINFO //find locations of treeTops in TreeTopsRefInfo array //startIndex points to the currentTree that has moved //endIndex points to the treeTop after which we moved the tree (nextTree) int32_t startIndex = getIndexInArray(treeToMove); int32_t endIndex = getIndexInArray(anchor)-1; int32_t i=0; for ( i = startIndex+1; i<= endIndex ; i++) { TR_TreeRefInfo *currentTreeRefInfo = _treesRefInfoArray[i]; List<TR::Node> *firstList = currentTreeRefInfo->getFirstRefNodesList(); List<TR::Node> *midList = currentTreeRefInfo->getMidRefNodesList(); List<TR::Node> *lastList = currentTreeRefInfo->getLastRefNodesList(); List<TR::Node> *M_firstList = treeToMove->getFirstRefNodesList(); List<TR::Node> *M_midList = treeToMove->getMidRefNodesList(); List<TR::Node> *M_lastList = treeToMove->getLastRefNodesList(); if (trace()) { traceMsg(comp(),"Before move:\n"); printRefInfo(treeToMove); printRefInfo(currentTreeRefInfo); } updateRefInfo(treeToMove->getTreeTop()->getNode(), currentTreeRefInfo, treeToMove , false); treeToMove->resetSyms(); currentTreeRefInfo->resetSyms(); populatePotentialDeps(currentTreeRefInfo,currentTreeRefInfo->getTreeTop()->getNode()); populatePotentialDeps(treeToMove,treeToMove->getTreeTop()->getNode()); if (trace()) { traceMsg(comp(),"After move:\n"); printRefInfo(treeToMove); printRefInfo(currentTreeRefInfo); traceMsg(comp(),"------------------------\n"); } } TR_TreeRefInfo *temp = _treesRefInfoArray[startIndex]; for (i = startIndex; i< endIndex ; i++) { _treesRefInfoArray[i] = _treesRefInfoArray[i+1]; } _treesRefInfoArray[endIndex]=temp; #if defined(DEBUG) || defined(PROD_WITH_ASSUMES) if (!(comp()->getOption(TR_EnableParanoidOptCheck) || debug("paranoidOptCheck"))) return true; //verifier { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); vcount_t visitCount = comp()->getVisitCount(); int32_t maxRefCount = 0; TR::TreeTop *tt; TR_TreeRefInfo **treesRefInfoArrayTemp = (TR_TreeRefInfo**)trMemory()->allocateStackMemory(_numTreeTops*sizeof(TR_TreeRefInfo*)); memset(treesRefInfoArrayTemp, 0, _numTreeTops*sizeof(TR_TreeRefInfo*)); TR_TreeRefInfo *treeRefInfoTemp; //collect info for ( int32_t i = 0; i<_numTreeTops-1; i++) { tt =_treesRefInfoArray[i]->getTreeTop(); treeRefInfoTemp = new (trStackMemory()) TR_TreeRefInfo(tt, trMemory()); collectRefInfo(treeRefInfoTemp, tt->getNode(),visitCount,&maxRefCount); treesRefInfoArrayTemp[i] = treeRefInfoTemp; } comp()->setVisitCount(visitCount+maxRefCount); for ( int32_t i = 0; i<_numTreeTops-1; i++) { if (!verifyRefInfo(treesRefInfoArrayTemp[i]->getFirstRefNodesList(),_treesRefInfoArray[i]->getFirstRefNodesList())) { printOnVerifyError(_treesRefInfoArray[i],treesRefInfoArrayTemp[i]); TR_ASSERT(0,"fail to verify firstRefNodesList for %p\n",_treesRefInfoArray[i]->getTreeTop()->getNode()); } if (!verifyRefInfo(treesRefInfoArrayTemp[i]->getMidRefNodesList(),_treesRefInfoArray[i]->getMidRefNodesList())) { printOnVerifyError(_treesRefInfoArray[i],treesRefInfoArrayTemp[i]); TR_ASSERT(0,"fail to verify midRefNodesList for %p\n",_treesRefInfoArray[i]->getTreeTop()->getNode()); } if (!verifyRefInfo(treesRefInfoArrayTemp[i]->getLastRefNodesList(),_treesRefInfoArray[i]->getLastRefNodesList())) { printOnVerifyError(_treesRefInfoArray[i],treesRefInfoArrayTemp[i]); TR_ASSERT(0,"fail to verify lastRefNodesList for %p\n",_treesRefInfoArray[i]->getTreeTop()->getNode()); } } } // scope of the stack memory region #endif return true; }
int32_t TR::ARM64SystemLinkage::buildArgs(TR::Node *callNode, TR::RegisterDependencyConditions *dependencies) { const TR::ARM64LinkageProperties &properties = getProperties(); TR::ARM64MemoryArgument *pushToMemory = NULL; TR::Register *argMemReg; TR::Register *tempReg; int32_t argIndex = 0; int32_t numMemArgs = 0; int32_t argSize = 0; int32_t numIntegerArgs = 0; int32_t numFloatArgs = 0; int32_t totalSize; int32_t i; TR::Node *child; TR::DataType childType; TR::DataType resType = callNode->getType(); uint32_t firstArgumentChild = callNode->getFirstArgumentIndex(); /* Step 1 - figure out how many arguments are going to be spilled to memory i.e. not in registers */ for (i = firstArgumentChild; i < callNode->getNumChildren(); i++) { child = callNode->getChild(i); childType = child->getDataType(); switch (childType) { case TR::Int8: case TR::Int16: case TR::Int32: case TR::Int64: case TR::Address: if (numIntegerArgs >= properties.getNumIntArgRegs()) numMemArgs++; numIntegerArgs++; break; case TR::Float: case TR::Double: if (numFloatArgs >= properties.getNumFloatArgRegs()) numMemArgs++; numFloatArgs++; break; default: TR_ASSERT(false, "Argument type %s is not supported\n", childType.toString()); } } // From here, down, any new stack allocations will expire / die when the function returns TR::StackMemoryRegion stackMemoryRegion(*trMemory()); /* End result of Step 1 - determined number of memory arguments! */ if (numMemArgs > 0) { pushToMemory = new (trStackMemory()) TR::ARM64MemoryArgument[numMemArgs]; argMemReg = cg()->allocateRegister(); } totalSize = numMemArgs * 8; // align to 16-byte boundary totalSize = (totalSize + 15) & (~15); numIntegerArgs = 0; numFloatArgs = 0; for (i = firstArgumentChild; i < callNode->getNumChildren(); i++) { TR::MemoryReference *mref = NULL; TR::Register *argRegister; TR::InstOpCode::Mnemonic op; child = callNode->getChild(i); childType = child->getDataType(); switch (childType) { case TR::Int8: case TR::Int16: case TR::Int32: case TR::Int64: case TR::Address: if (childType == TR::Address) argRegister = pushAddressArg(child); else if (childType == TR::Int64) argRegister = pushLongArg(child); else argRegister = pushIntegerWordArg(child); if (numIntegerArgs < properties.getNumIntArgRegs()) { if (!cg()->canClobberNodesRegister(child, 0)) { if (argRegister->containsCollectedReference()) tempReg = cg()->allocateCollectedReferenceRegister(); else tempReg = cg()->allocateRegister(); generateMovInstruction(cg(), callNode, tempReg, argRegister); argRegister = tempReg; } if (numIntegerArgs == 0 && (resType.isAddress() || resType.isInt32() || resType.isInt64())) { TR::Register *resultReg; if (resType.isAddress()) resultReg = cg()->allocateCollectedReferenceRegister(); else resultReg = cg()->allocateRegister(); dependencies->addPreCondition(argRegister, TR::RealRegister::x0); dependencies->addPostCondition(resultReg, TR::RealRegister::x0); } else { addDependency(dependencies, argRegister, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg()); } } else { // numIntegerArgs >= properties.getNumIntArgRegs() if (childType == TR::Address || childType == TR::Int64) { op = TR::InstOpCode::strpostx; } else { op = TR::InstOpCode::strpostw; } mref = getOutgoingArgumentMemRef(argMemReg, argRegister, op, pushToMemory[argIndex++]); argSize += 8; // always 8-byte aligned } numIntegerArgs++; break; case TR::Float: case TR::Double: if (childType == TR::Float) argRegister = pushFloatArg(child); else argRegister = pushDoubleArg(child); if (numFloatArgs < properties.getNumFloatArgRegs()) { if (!cg()->canClobberNodesRegister(child, 0)) { tempReg = cg()->allocateRegister(TR_FPR); op = (childType == TR::Float) ? TR::InstOpCode::fmovs : TR::InstOpCode::fmovd; generateTrg1Src1Instruction(cg(), op, callNode, tempReg, argRegister); argRegister = tempReg; } if ((numFloatArgs == 0 && resType.isFloatingPoint())) { TR::Register *resultReg; if (resType.getDataType() == TR::Float) resultReg = cg()->allocateSinglePrecisionRegister(); else resultReg = cg()->allocateRegister(TR_FPR); dependencies->addPreCondition(argRegister, TR::RealRegister::v0); dependencies->addPostCondition(resultReg, TR::RealRegister::v0); } else { addDependency(dependencies, argRegister, properties.getFloatArgumentRegister(numFloatArgs), TR_FPR, cg()); } } else { // numFloatArgs >= properties.getNumFloatArgRegs() if (childType == TR::Double) { op = TR::InstOpCode::vstrpostd; } else { op = TR::InstOpCode::vstrposts; } mref = getOutgoingArgumentMemRef(argMemReg, argRegister, op, pushToMemory[argIndex++]); argSize += 8; // always 8-byte aligned } numFloatArgs++; break; } // end of switch } // end of for // NULL deps for non-preserved and non-system regs while (numIntegerArgs < properties.getNumIntArgRegs()) { if (numIntegerArgs == 0 && resType.isAddress()) { dependencies->addPreCondition(cg()->allocateRegister(), properties.getIntegerArgumentRegister(0)); dependencies->addPostCondition(cg()->allocateCollectedReferenceRegister(), properties.getIntegerArgumentRegister(0)); } else { addDependency(dependencies, NULL, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg()); } numIntegerArgs++; } int32_t floatRegsUsed = (numFloatArgs > properties.getNumFloatArgRegs()) ? properties.getNumFloatArgRegs() : numFloatArgs; for (i = (TR::RealRegister::RegNum)((uint32_t)TR::RealRegister::v0 + floatRegsUsed); i <= TR::RealRegister::LastFPR; i++) { if (!properties.getPreserved((TR::RealRegister::RegNum)i)) { // NULL dependency for non-preserved regs addDependency(dependencies, NULL, (TR::RealRegister::RegNum)i, TR_FPR, cg()); } } if (numMemArgs > 0) { TR::RealRegister *sp = cg()->machine()->getRealRegister(properties.getStackPointerRegister()); generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::subimmx, callNode, argMemReg, sp, totalSize); for (argIndex = 0; argIndex < numMemArgs; argIndex++) { TR::Register *aReg = pushToMemory[argIndex].argRegister; generateMemSrc1Instruction(cg(), pushToMemory[argIndex].opCode, callNode, pushToMemory[argIndex].argMemory, aReg); cg()->stopUsingRegister(aReg); } cg()->stopUsingRegister(argMemReg); } return totalSize; }
int32_t TR_CatchBlockRemover::perform() { TR::CFG *cfg = comp()->getFlowGraph(); if (cfg == NULL) { if (trace()) traceMsg(comp(), "Can't do Catch Block Removal, no CFG\n"); return 0; } if (trace()) traceMsg(comp(), "Starting Catch Block Removal\n"); bool thereMayBeRemovableCatchBlocks = false; { TR::StackMemoryRegion stackMemoryRegion(*trMemory()); TR::Block *block; ListIterator<TR::CFGEdge> edgeIterator; // Go through all blocks that have exception successors and see if any of them // are not reached. Mark each of these edges with a visit count so they can // be identified later. // vcount_t visitCount = comp()->incOrResetVisitCount(); TR::CFGNode *cfgNode; for (cfgNode = cfg->getFirstNode(); cfgNode; cfgNode = cfgNode->getNext()) { if (cfgNode->getExceptionSuccessors().empty()) continue; block = toBlock(cfgNode); uint32_t reachedExceptions = 0; TR::TreeTop *treeTop; for (treeTop = block->getEntry(); treeTop != block->getExit(); treeTop = treeTop->getNextTreeTop()) { reachedExceptions |= treeTop->getNode()->exceptionsRaised(); if (treeTop->getNode()->getOpCodeValue() == TR::monexitfence) // for live monitor metadata reachedExceptions |= TR::Block::CanCatchMonitorExit; } if (reachedExceptions & TR::Block::CanCatchUserThrows) continue; for (auto edge = block->getExceptionSuccessors().begin(); edge != block->getExceptionSuccessors().end();) { TR::CFGEdge * current = *(edge++); TR::Block *catchBlock = toBlock(current->getTo()); if (catchBlock->isOSRCodeBlock() || catchBlock->isOSRCatchBlock()) continue; if (!reachedExceptions && performTransformation(comp(), "%sRemove redundant exception edge from block_%d at [%p] to catch block_%d at [%p]\n", optDetailString(), block->getNumber(), block, catchBlock->getNumber(), catchBlock)) { cfg->removeEdge(block, catchBlock); thereMayBeRemovableCatchBlocks = true; } else { if (!catchBlock->canCatchExceptions(reachedExceptions)) { current->setVisitCount(visitCount); thereMayBeRemovableCatchBlocks = true; } } } } bool edgesRemoved = false; // Now look to see if there are any catch blocks for which all exception // predecessors have the visit count set. If so, the block is unreachable and // can be removed. // If only some of the exception predecessors are marked, these edges are // left in place to identify the try/catch structure properly. // while (thereMayBeRemovableCatchBlocks) { thereMayBeRemovableCatchBlocks = false; for (cfgNode = cfg->getFirstNode(); cfgNode; cfgNode = cfgNode->getNext()) { if (cfgNode->getExceptionPredecessors().empty()) continue; auto edgeIt = cfgNode->getExceptionPredecessors().begin(); for (; edgeIt != cfgNode->getExceptionPredecessors().end(); ++edgeIt) { if ((*edgeIt)->getVisitCount() != visitCount) break; } if (edgeIt == cfgNode->getExceptionPredecessors().end() && performTransformation(comp(), "%sRemove redundant catch block_%d at [%p]\n", optDetailString(), cfgNode->getNumber(), cfgNode)) { while (!cfgNode->getExceptionPredecessors().empty()) { cfg->removeEdge(cfgNode->getExceptionPredecessors().front()); } edgesRemoved = true; thereMayBeRemovableCatchBlocks = true; } } } // Any transformations invalidate use/def and value number information // if (edgesRemoved) { optimizer()->setUseDefInfo(NULL); optimizer()->setValueNumberInfo(NULL); requestOpt(OMR::treeSimplification, true); } } // scope of the stack memory region if (trace()) traceMsg(comp(), "\nEnding Catch Block Removal\n"); return 1; // actual cost }
const char *TR::DebugCounter::debugCounterBucketName(TR::Compilation *comp, int32_t value, const char *format, ...) { if (!comp->getOptions()->enableDebugCounters()) { return NULL; } TR::StackMemoryRegion stackMemoryRegion(*comp->trMemory()); char *bucketFormat = (char*)comp->trMemory()->allocateStackMemory(strlen(format) + 40); // appending "=%d..%d" where each %d could be 11 characters int32_t low = value; int32_t high = value; if (value != 0 && comp->getOptions()->getDebugCounterBucketGranularity() >= 1) { const int32_t magnitude = abs(value); low = high = magnitude; const int32_t granularity = comp->getOptions()->getDebugCounterBucketGranularity(); const double bucketRatio = pow(2.0, 1.0 / granularity); // TODO: calculate once const int32_t logLow = (int)(log((double)magnitude)/log(bucketRatio)); // Figure out which bucket sizing algorithm to use // const int32_t log2magnitude = 31-leadingZeroes(magnitude); // floor const int32_t doublingInterval = 1 << log2magnitude; if (doublingInterval <= granularity) { // Tiny buckets degenerate to one value per bucket high = low; } else { // We do this with some buckets of size smallBucketSize, plus // some that are 1 larger. // We'd like to make sure make sure the smaller ones come // before bigger ones (eg. we want to see 8-9, 10-12, 13-15 // rather than 8-10, 11-12, 13-15) // low = 1 << log2magnitude; // power-of-two starting point int32_t smallBucketSize = doublingInterval / granularity; int32_t numBigBuckets = doublingInterval - smallBucketSize * granularity; int32_t numSmallBuckets = granularity - numBigBuckets; int32_t totalSmallBucketSize = numSmallBuckets * smallBucketSize; int32_t offset = magnitude-low; if (offset < totalSmallBucketSize) { low += offset - offset % smallBucketSize; high = low + smallBucketSize - 1; } else { offset -= totalSmallBucketSize; low += totalSmallBucketSize + offset - offset % (smallBucketSize+1); high = low + smallBucketSize; } } TR_ASSERT(low <= magnitude && magnitude <= high, "Range (%d..%d) must contain %d\n", low, high, magnitude); if (value < 0) { low = -low; high = -high; } } if (low == high) sprintf(bucketFormat, "%s=%d", format, low); else sprintf(bucketFormat, "%s=%d..%d", format, low, high); va_list args; va_start(args, format); const char *result = comp->getPersistentInfo()->getStaticCounters()->counterName(comp, bucketFormat, args); va_end(args); return result; }