Spectrum::Spectrum(const std::vector<essentia::Real>& spectrum, essentia::Real strongPeak, essentia::Real loudness) : m_spectrum(spectrum), m_strongPeak(strongPeak), m_loudness(loudness) { computeMax(); }
PivPointData PivData::max() { if (maxComputed) return _max; else { computeMax(); if (maxComputed) return _max; else return oneToZero(); } }
int RefinerTemp::multirefine() { computeAverage(); double avg = averageLoad; int maxPe=-1; // double max = computeMax(); double max = computeMax(&maxPe); //const double overloadStep = 0.01; const double overloadStep = 0.01; const double overloadStart = 1.001; // double dCurOverload = max / avg; double dCurOverload = max /(totalInst*procFreqNew[maxPe]/sumFreqs); int minOverload = 0; int maxOverload = (int)((dCurOverload - overloadStart)/overloadStep + 1); double dMinOverload = minOverload * overloadStep + overloadStart; double dMaxOverload = maxOverload * overloadStep + overloadStart; int curOverload; int refineDone = 0; //CmiPrintf("maxPe=%d max=%f myAvg=%f dMinOverload: %f dMaxOverload: %f\n",maxPe,max,(totalInst*procFreqNew[maxPe]/sumFreqs), dMinOverload, dMaxOverload); if (_lb_args.debug()>=1) CmiPrintf("dMinOverload: %f dMaxOverload: %f\n", dMinOverload, dMaxOverload); overLoad = dMinOverload; if (refine()) refineDone = 1; else { overLoad = dMaxOverload; if (!refine()) { CmiPrintf("ERROR: Could not refine at max overload\n"); refineDone = 1; } } // Scan up, until we find a refine that works while (!refineDone) { if (maxOverload - minOverload <= 1) refineDone = 1; else { curOverload = (maxOverload + minOverload ) / 2; overLoad = curOverload * overloadStep + overloadStart; if (_lb_args.debug()>=1) CmiPrintf("Testing curOverload %d = %f [min,max]= %d, %d\n", curOverload, overLoad, minOverload, maxOverload); if (refine()) maxOverload = curOverload; else minOverload = curOverload; } } return 1; }
int main() { int array[MAX]; int max; getNums(array); max = computeMax(array); printArray(array); printf("Max is %d\n", max); return 0; }
void Alg7::strategy() { // double bestSize0, bestSize1, bestSize2; computeInfo *c; int numAssigned; processorInfo* goodP[3][3][2]; // goodP[# of real patches][# of proxies] processorInfo* poorP[3][3][2]; // fallback option double startTime = CmiWallTimer(); // iout << iINFO << "calling makeHeaps. \n"; adjustBackgroundLoadAndComputeAverage(); makeHeaps(); // iout << iINFO << "Before assignment\n" << endi; // printLoads(); /* int numOverloaded = 0; for (int ip=0; ip<P; ip++) { if ( processors[ip].backgroundLoad > averageLoad ) ++numOverloaded; } if ( numOverloaded ) { iout << iWARN << numOverloaded << " processors are overloaded due to background load.\n" << endi; } */ numAssigned = 0; // for (int i=0; i<numPatches; i++) // { std::cout << "(" << patches[i].Id << "," << patches[i].processor ;} overLoad = 1.2; for (int ic=0; ic<numComputes; ic++) { // place computes w/ patches on heavily background loaded nodes first // place pair before self, because self is more flexible c = (computeInfo *) computeBgPairHeap->deleteMax(); if ( ! c ) c = (computeInfo *) computeBgSelfHeap->deleteMax(); if ( ! c ) c = (computeInfo *) computePairHeap->deleteMax(); if ( ! c ) c = (computeInfo *) computeSelfHeap->deleteMax(); if (c->processor != -1) continue; // skip to the next compute; if ( ! c ) NAMD_bug("Alg7: computesHeap empty!"); int i,j,k; for(i=0;i<3;i++) for(j=0;j<3;j++) { for(k=0;k<2;k++) { goodP[i][j][k]=0; poorP[i][j][k]=0; } } // first try for at least one proxy { Iterator nextProc; processorInfo *p; p = &processors[patches[c->patch1].processor]; togrid(goodP, poorP, p, c); p = &processors[patches[c->patch2].processor]; togrid(goodP, poorP, p, c); p = (processorInfo *)patches[c->patch1]. proxiesOn.iterator((Iterator *)&nextProc); while (p) { togrid(goodP, poorP, p, c); p = (processorInfo *)patches[c->patch1]. proxiesOn.next((Iterator*)&nextProc); } p = (processorInfo *)patches[c->patch2]. proxiesOn.iterator((Iterator *)&nextProc); while (p) { togrid(goodP, poorP, p, c); p = (processorInfo *)patches[c->patch2]. proxiesOn.next((Iterator*)&nextProc); } p = 0; // prefer to place compute with existing proxies over home patches if ((p = goodP[0][2][0]) // No home, two proxies || (p = goodP[1][1][0]) // One home, one proxy || (p = goodP[2][0][0]) // Two home, no proxies || (p = goodP[0][1][0]) // No home, one proxy || (p = goodP[1][0][0]) // One home, no proxies || (p = goodP[0][0][0]) // No home, no proxies || (p = goodP[0][1][1]) // No home, one proxy || (p = goodP[1][0][1]) // One home, no proxies || (p = goodP[0][0][1]) // No home, no proxies ) { assign(c,p); numAssigned++; continue; } } // no luck, do it the long way heapIterator nextProcessor; processorInfo *p = (processorInfo *) pes->iterator((heapIterator *) &nextProcessor); while (p) { togrid(goodP, poorP, p, c); p = (processorInfo *) pes->next(&nextProcessor); } // if (numAssigned >= 0) { Else is commented out below p = 0; // prefer to place compute with existing proxies over home patches if ((p = goodP[0][2][0]) // No home, two proxies || (p = goodP[1][1][0]) // One home, one proxy || (p = goodP[2][0][0]) // Two home, no proxies || (p = goodP[0][1][0]) // No home, one proxy || (p = goodP[1][0][0]) // One home, no proxies || (p = goodP[0][0][0]) // No home, no proxies || (p = goodP[0][1][1]) // No home, one proxy || (p = goodP[1][0][1]) // One home, no proxies || (p = goodP[0][0][1]) // No home, no proxies ) { assign(c,p); numAssigned++; } else if ( // overloaded processors (p = poorP[0][2][0]) // No home, two proxies || (p = poorP[1][1][0]) // One home, one proxy || (p = poorP[2][0][0]) // Two home, no proxies || (p = poorP[0][1][0]) // No home, one proxy || (p = poorP[1][0][0]) // One home, no proxies || (p = poorP[0][0][0]) // No home, no proxies || (p = poorP[0][1][1]) // No home, one proxy || (p = poorP[1][0][1]) // One home, no proxies || (p = poorP[0][0][1]) // No home, no proxies ) { //iout << iWARN << "overload assign to " << p->Id << "\n" << endi; assign(c,p); numAssigned++; } else { NAMD_bug("*** Alg 7 No receiver found 1 ***"); break; } } printLoads(); if ( computeMax() <= origMaxLoad ) { // binary-search refinement procedure multirefine(1.05); printLoads(); } }
int Refiner::multirefine(bool reset) { computeAverage(); double avg = averageLoad; double max = computeMax(); const double overloadStep = 0.01; const double overloadStart = overLoad; double dCurOverload = max / avg; int minOverload = 0; int maxOverload = (int)((dCurOverload - overloadStart)/overloadStep + 1); double dMinOverload = minOverload * overloadStep + overloadStart; double dMaxOverload = maxOverload * overloadStep + overloadStart; int curOverload; int refineDone = 0; if (_lb_args.debug()>=1) CmiPrintf("dMinOverload: %f dMaxOverload: %f\n", dMinOverload, dMaxOverload); overLoad = dMinOverload; if (refine()) refineDone = 1; else { overLoad = dMaxOverload; if (!refine()) { CmiPrintf("ERROR: Could not refine at max overload\n"); refineDone = 1; } } // Scan up, until we find a refine that works while (!refineDone) { if (maxOverload - minOverload <= 1) refineDone = 1; else { curOverload = (maxOverload + minOverload ) / 2; overLoad = curOverload * overloadStep + overloadStart; if (_lb_args.debug()>=1) CmiPrintf("Testing curOverload %d = %f [min,max]= %d, %d\n", curOverload, overLoad, minOverload, maxOverload); // Reset the processors datastructure to the original if (reset) { int i; for (i = 0; i < P; i++) { processors[i].computeLoad = 0; delete processors[i].computeSet; processors[i].computeSet = new Set(); } for (i = 0; i < numComputes; i++) assign((computeInfo *) &(computes[i]), (processorInfo *) &(processors[computes[i].oldProcessor])); } if (refine()) maxOverload = curOverload; else minOverload = curOverload; } } return 1; }
//**************************************************************************** void DatabaseInformation::computeGMM_PairObject_AllFeat(int nclusters) { if (TESTFLAG) { cout << "Inside DBInfo compute GMM PAIR O: start." << endl; } learnedModelPairObject.reserve(NOBJECTCLASSES); int numberOfFeat = FMPairObject[0].size(); // 5; compute it int countFeat; // loop over reference object i for ( int i = 0 ; i < FMPairObject.size(); i++ ) { vector<cv::EM> internalEMvector; // to work with vector of vector vector<vector<double> > meanNormalizationPair_currentRef; vector<vector<double> > stdNormalizationPair_currentRef; vector<vector<double> > mintmp; vector<vector<double> > maxtmp; // loop over target object j for ( int j = 0; j < FMPairObject[i].size(); j++) { if (TESTFLAG) { cout << "Inside DBInfo compute GMM PAIR O: size of model is now: " << learnedModelPairObject.size() << " for object classes : " << i << " and " << j << endl; } int numberOfScenes = FMPairObject[i][j].size(); int countScene = 0; cv::Mat FeatMat = cv::Mat::zeros ( numberOfScenes, numberOfFeat, CV_64F ); for(vector<vector<FeatureInformation> >::iterator it = (FMPairObject[i][j]).begin(); it != (FMPairObject[i][j]).end(); ++it) { countFeat = 0; // for each feature of the current scene and belonging to current object for (vector<FeatureInformation>::iterator it2 = (*it).begin(); it2 != (*it).end(); ++it2) { FeatureInformation currentFeature = *it2; vector<float> currentFeatureValues = currentFeature.getAllValues(); // depending on dimentionality of that feature: I add all values in current row for ( int k = 0; k < currentFeatureValues.size() ; k++ ) { FeatMat.at<double>(countScene, countFeat) = (double) (currentFeatureValues.at(k)); countFeat++; } } countScene++; } //***************************************************************** // // NORMALIZATION of feature matrix cv::Mat FeatMatreduced = FeatMat.colRange(0, 5); if (DEBUG) {cout << "Before normalization " << endl; } vector<double> meansVectorCurrentPair = computeMean(FeatMatreduced); vector<double> stdVectorCurrentPair = computeStd(FeatMatreduced, meansVectorCurrentPair); meanNormalizationPair_currentRef.push_back(meansVectorCurrentPair); stdNormalizationPair_currentRef.push_back(stdVectorCurrentPair); vector<double> maxVectorCurrentPair = computeMax(FeatMatreduced); vector<double> minVectorCurrentPair = computeMin(FeatMatreduced); maxtmp.push_back(maxVectorCurrentPair); mintmp.push_back(minVectorCurrentPair); // compute weight based on STD of featuers double weight = computeStdWeights(FeatMatreduced, maxVectorCurrentPair, minVectorCurrentPair); cv::Mat normalizedFeatMat; if (NORMALIZEPAIR == 1) { normalizedFeatMat = doNormalization(FeatMatreduced, meansVectorCurrentPair, stdVectorCurrentPair); } else if (NORMALIZEPAIR == 2) { cout << "Before normalization Min Max do Nornmalization" << endl; normalizedFeatMat = doNormalizationMinMax(FeatMatreduced, maxVectorCurrentPair, minVectorCurrentPair); } else { normalizedFeatMat = FeatMatreduced.clone(); } //**************************************************************** if (DEBUG) { cout << endl << endl << "Object : " << i << "and " << j << endl << "The feature matrix dim is " << normalizedFeatMat.size() << endl; cout << endl << endl << "The feature matrix N ROWS is " << normalizedFeatMat.rows << endl; cout << endl << "The features are " << endl << normalizedFeatMat << endl; } // Training EM model for the current object. cv::EM em_model(nclusters); cout << endl << endl << "The feature matrix N ROWS is " << normalizedFeatMat.rows << endl; // Constraint: trains the GMM model for object pair features ONLY if the number of samples is sufficient! if (FeatMat.rows > 14) { if (TESTFLAG) { std::cout << "Training the EM model for: " << "Objects : " << i << " and " << j << endl << std::endl; } em_model.train ( normalizedFeatMat ); if (DEBUG) { std::cout << "Getting the parameters of the learned GMM model." << std::endl; } } else { std::cout << "NOT Training the EM model for: " << "Objects : " << i << " and " << j << endl << std::endl; } internalEMvector.push_back(em_model); } learnedModelPairObject.push_back(internalEMvector); meanNormalizationPair.push_back(meanNormalizationPair_currentRef); stdNormalizationPair.push_back(stdNormalizationPair_currentRef); minFeatPair.push_back(mintmp); maxFeatPair.push_back(maxtmp); } }