void MDDAGClassifier::run(const string& dataFileName, const string& shypFileName, int numIterations, const string& outResFileName, int numRanksEnclosed) { InputData* pData = loadInputData(dataFileName, shypFileName); if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<BaseLearner*> weakHypotheses; // loads them us.loadHypotheses(shypFileName, weakHypotheses, pData); // where the results go vector< ExampleResults* > results; if (_verbose > 0) cout << "Classifying..." << flush; // get the results computeResults( pData, weakHypotheses, results, numIterations ); const int numClasses = pData->getNumClasses(); if (_verbose > 0) { // well.. if verbose = 0 no results are displayed! :) cout << "Done!" << endl; vector< vector<float> > rankedError(numRanksEnclosed); // Get the per-class error for the numRanksEnclosed-th ranks for (int i = 0; i < numRanksEnclosed; ++i) getClassError( pData, results, rankedError[i], i ); // output it cout << endl; cout << "Error Summary" << endl; cout << "=============" << endl; for ( int l = 0; l < numClasses; ++l ) { // first rank (winner): rankedError[0] cout << "Class '" << pData->getClassMap().getNameFromIdx(l) << "': " << setprecision(4) << rankedError[0][l] * 100 << "%"; // output the others on its side if (numRanksEnclosed > 1 && _verbose > 1) { cout << " ("; for (int i = 1; i < numRanksEnclosed; ++i) cout << " " << i+1 << ":[" << setprecision(4) << rankedError[i][l] * 100 << "%]"; cout << " )"; } cout << endl; } // the overall error cout << "\n--> Overall Error: " << setprecision(4) << getOverallError(pData, results, 0) * 100 << "%"; // output the others on its side if (numRanksEnclosed > 1 && _verbose > 1) { cout << " ("; for (int i = 1; i < numRanksEnclosed; ++i) cout << " " << i+1 << ":[" << setprecision(4) << getOverallError(pData, results, i) * 100 << "%]"; cout << " )"; } cout << endl; } // verbose // If asked output the results if ( !outResFileName.empty() ) { const int numExamples = pData->getNumExamples(); ofstream outRes(outResFileName.c_str()); outRes << "Instance" << '\t' << "Forecast" << '\t' << "Labels" << '\n'; string exampleName; for (int i = 0; i < numExamples; ++i) { // output the name if it exists, otherwise the number // of the example exampleName = pData->getExampleName(i); if ( exampleName.empty() ) outRes << i << '\t'; else outRes << exampleName << '\t'; // output the predicted class outRes << pData->getClassMap().getNameFromIdx( results[i]->getWinner().first ) << '\t'; outRes << '|'; vector<Label>& labels = pData->getLabels(i); for (vector<Label>::iterator lIt=labels.begin(); lIt != labels.end(); ++lIt) { if (lIt->y>0) { outRes << ' ' << pData->getClassMap().getNameFromIdx(lIt->idx); } } outRes << endl; } if (_verbose > 0) cout << "\nPredictions written on file <" << outResFileName << ">!" << endl; } // delete the input data file if (pData) delete pData; vector<ExampleResults*>::iterator it; for (it = results.begin(); it != results.end(); ++it) delete (*it); }
void MDDAGClassifier::saveCalibratedPosteriors(const string& dataFileName, const string& shypFileName, const string& outFileName, int numIterations) { InputData* pData = loadInputData(dataFileName, shypFileName); if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<BaseLearner*> weakHypotheses; // loads them us.loadHypotheses(shypFileName, weakHypotheses, pData); // where the results go vector< ExampleResults* > results; if (_verbose > 0) cout << "Classifying..." << flush; // get the results computeResults( pData, weakHypotheses, results, numIterations ); const int numClasses = pData->getNumClasses(); const int numExamples = pData->getNumExamples(); ofstream outFile(outFileName.c_str()); string exampleName; if (_verbose > 0) cout << "Output posteriors..." << flush; for (int i = 0; i < numExamples; ++i) { // output the name if it exists, otherwise the number // of the example exampleName = pData->getExampleName(i); if ( !exampleName.empty() ) outFile << exampleName << ','; // output the posteriors outFile << results[i]->getVotesVector()[0]; for (int l = 1; l < numClasses; ++l) outFile << ',' << results[i]->getVotesVector()[l]; outFile << '\n'; } if (_verbose > 0) cout << "Done!" << endl; if (_verbose > 1) { cout << "\nClass order (You can change it in the header of the data file):" << endl; for (int l = 0; l < numClasses; ++l) cout << "- " << pData->getClassMap().getNameFromIdx(l) << endl; } // delete the input data file if (pData) delete pData; vector<ExampleResults*>::iterator it; for (it = results.begin(); it != results.end(); ++it) delete (*it); }
void MDDAGClassifier::saveLikelihoods(const string& dataFileName, const string& shypFileName, const string& outFileName, int numIterations) { InputData* pData = loadInputData(dataFileName, shypFileName); if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<BaseLearner*> weakHypotheses; // loads them us.loadHypotheses(shypFileName, weakHypotheses, pData); // where the results go vector< ExampleResults* > results; if (_verbose > 0) cout << "Classifying..." << flush; const int numClasses = pData->getNumClasses(); const int numExamples = pData->getNumExamples(); ofstream outFile(outFileName.c_str()); string exampleName; if (_verbose > 0) cout << "Output likelihoods..." << flush; // get the results ///////////////////////////////////////////////////////////////////// // computeResults( pData, weakHypotheses, results, numIterations ); assert( !weakHypotheses.empty() ); // Initialize the output info OutputInfo* pOutInfo = NULL; if ( !_outputInfoFile.empty() ) pOutInfo = new OutputInfo(_outputInfoFile, "err"); // Creating the results structures. See file Structures.h for the // PointResults structure results.clear(); results.reserve(numExamples); for (int i = 0; i < numExamples; ++i) results.push_back( new ExampleResults(i, numClasses) ); // sum votes for classes vector< AlphaReal > votesForExamples( numClasses ); vector< AlphaReal > expVotesForExamples( numClasses ); // iterator over all the weak hypotheses vector<BaseLearner*>::const_iterator whyIt; int t; pOutInfo->initialize( pData ); // for every feature: 1..T for (whyIt = weakHypotheses.begin(), t = 0; whyIt != weakHypotheses.end() && t < numIterations; ++whyIt, ++t) { BaseLearner* currWeakHyp = *whyIt; AlphaReal alpha = currWeakHyp->getAlpha(); // for every point for (int i = 0; i < numExamples; ++i) { // a reference for clarity and speed vector<AlphaReal>& currVotesVector = results[i]->getVotesVector(); // for every class for (int l = 0; l < numClasses; ++l) currVotesVector[l] += alpha * currWeakHyp->classify(pData, i, l); } // if needed output the step-by-step information if ( pOutInfo ) { pOutInfo->outputIteration(t); pOutInfo->outputCustom(pData, currWeakHyp); // Margins and edge requires an update of the weight, // therefore I keep them out for the moment //outInfo.outputMargins(pData, currWeakHyp); //outInfo.outputEdge(pData, currWeakHyp); pOutInfo->endLine(); } // for (int i = 0; i < numExamples; ++i) // calculate likelihoods from votes fill( votesForExamples.begin(), votesForExamples.end(), 0.0 ); AlphaReal lLambda = 0.0; for (int i = 0; i < numExamples; ++i) { // a reference for clarity and speed vector<AlphaReal>& currVotesVector = results[i]->getVotesVector(); AlphaReal sumExp = 0.0; // for every class for (int l = 0; l < numClasses; ++l) { expVotesForExamples[l] = exp( currVotesVector[l] ) ; sumExp += expVotesForExamples[l]; } if ( sumExp > numeric_limits<AlphaReal>::epsilon() ) { for (int l = 0; l < numClasses; ++l) { expVotesForExamples[l] /= sumExp; } } Example ex = pData->getExample( results[i]->getIdx() ); vector<Label> labs = ex.getLabels(); AlphaReal m = numeric_limits<AlphaReal>::infinity(); for (int l = 0; l < numClasses; ++l) { if ( labs[l].y > 0 ) { if ( expVotesForExamples[l] > numeric_limits<AlphaReal>::epsilon() ) { AlphaReal logVal = log( expVotesForExamples[l] ); if ( logVal != m ) { lLambda += ( ( 1.0/(AlphaReal)numExamples ) * logVal ); } } } } } outFile << t << "\t" << lLambda ; outFile << '\n'; outFile.flush(); } if (pOutInfo) delete pOutInfo; // computeResults( pData, weakHypotheses, results, numIterations ); /////////////////////////////////////////////////////////////////////////////////// /* for (int i = 0; i < numExamples; ++i) { // output the name if it exists, otherwise the number // of the example exampleName = pData->getExampleName(i); if ( !exampleName.empty() ) outFile << exampleName << ','; // output the posteriors outFile << results[i]->getVotesVector()[0]; for (int l = 1; l < numClasses; ++l) outFile << ',' << results[i]->getVotesVector()[l]; outFile << '\n'; } */ if (_verbose > 0) cout << "Done!" << endl; if (_verbose > 1) { cout << "\nClass order (You can change it in the header of the data file):" << endl; for (int l = 0; l < numClasses; ++l) cout << "- " << pData->getClassMap().getNameFromIdx(l) << endl; } // delete the input data file if (pData) delete pData; vector<ExampleResults*>::iterator it; for (it = results.begin(); it != results.end(); ++it) delete (*it); }
void MDDAGClassifier::saveConfusionMatrix(const string& dataFileName, const string& shypFileName, const string& outFileName) { InputData* pData = loadInputData(dataFileName, shypFileName); if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<BaseLearner*> weakHypotheses; // loads them us.loadHypotheses(shypFileName, weakHypotheses, pData); // where the results go vector< ExampleResults* > results; if (_verbose > 0) cout << "Classifying..." << flush; // get the results computeResults( pData, weakHypotheses, results, (int)weakHypotheses.size() ); const int numClasses = pData->getNumClasses(); const int numExamples = pData->getNumExamples(); ofstream outFile(outFileName.c_str()); ////////////////////////////////////////////////////////////////////////// for (int l = 0; l < numClasses; ++l) outFile << '\t' << pData->getClassMap().getNameFromIdx(l); outFile << endl; for (int l = 0; l < numClasses; ++l) { vector<int> winnerCount(numClasses, 0); for (int i = 0; i < numExamples; ++i) { if ( pData->hasPositiveLabel(i,l) ) ++winnerCount[ results[i]->getWinner().first ]; } // class name outFile << pData->getClassMap().getNameFromIdx(l); for (int j = 0; j < numClasses; ++j) outFile << '\t' << winnerCount[j]; outFile << endl; } ////////////////////////////////////////////////////////////////////////// if (_verbose > 0) cout << "Done!" << endl; // delete the input data file if (pData) delete pData; vector<ExampleResults*>::iterator it; for (it = results.begin(); it != results.end(); ++it) delete (*it); }
void MDDAGClassifier::printConfusionMatrix(const string& dataFileName, const string& shypFileName) { InputData* pData = loadInputData(dataFileName, shypFileName); if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<BaseLearner*> weakHypotheses; // loads them us.loadHypotheses(shypFileName, weakHypotheses, pData); // where the results go vector< ExampleResults* > results; if (_verbose > 0) cout << "Classifying..." << flush; // get the results computeResults( pData, weakHypotheses, results, (int)weakHypotheses.size()); const int numClasses = pData->getNumClasses(); const int numExamples = pData->getNumExamples(); if (_verbose > 0) cout << "Done!" << endl; const int colSize = 7; if (_verbose > 0) { cout << "Raw Confusion Matrix:\n"; cout << setw(colSize) << "Truth "; for (int l = 0; l < numClasses; ++l) cout << setw(colSize) << nor_utils::getAlphanumeric(l); cout << "\nClassification\n"; for (int l = 0; l < numClasses; ++l) { vector<int> winnerCount(numClasses, 0); for (int i = 0; i < numExamples; ++i) { if ( pData->hasPositiveLabel(i, l) ) ++winnerCount[ results[i]->getWinner().first ]; } // class cout << setw(colSize) << " " << nor_utils::getAlphanumeric(l); for (int j = 0; j < numClasses; ++j) cout << setw(colSize) << winnerCount[j]; cout << endl; } } cout << "\nMatrix Key:\n"; // Print the legend for (int l = 0; l < numClasses; ++l) cout << setw(5) << nor_utils::getAlphanumeric(l) << ": " << pData->getClassMap().getNameFromIdx(l) << "\n"; // delete the input data file if (pData) delete pData; vector<ExampleResults*>::iterator it; for (it = results.begin(); it != results.end(); ++it) delete (*it); }
void AdaBoostMHClassifier::saveROC(const string& dataFileName, const string& shypFileName, const string& outFileName, int numIterations) { InputData* pData = loadInputData(dataFileName, shypFileName); ofstream outFile(outFileName.c_str()); if ( ! outFile.is_open() ) { cout << "Cannot open outfile" << endl; exit( -1 ); } if (_verbose > 0) cout << "Loading strong hypothesis..." << flush; // The class that loads the weak hypotheses UnSerialization us; // Where to put the weak hypotheses vector<BaseLearner*> weakHypotheses; // loads them us.loadHypotheses(shypFileName, weakHypotheses, pData); weakHypotheses.resize( numIterations ); // where the results go vector< ExampleResults* > results; if (_verbose > 0) cout << "Classifying..." << flush; // get the results computeResults( pData, weakHypotheses, results, weakHypotheses.size()); const int numClasses = pData->getNumClasses(); const int numExamples = pData->getNumExamples(); if (_verbose > 0) cout << "Done!" << endl; vector< pair< int, double> > sortedExample( numExamples ); for( int i=0; i<numExamples; i++ ) { sortedExample[i].first = i; sortedExample[i].second = results[i]->getVotesVector()[0]; } sort( sortedExample.begin(), sortedExample.end(), nor_utils::comparePair< 2, int, double, greater<double> >() ); vector<double> positiveWeights( numExamples ); double sumOfPositiveWeights = 0.0; vector<double> negativeWeights( numExamples ); double sumOfNegativeWeights = 0.0; fill( positiveWeights.begin(), positiveWeights.end(), 0.0 ); fill( negativeWeights.begin(), negativeWeights.end(), 0.0 ); string className = pData->getClassMap().getNameFromIdx( 0 ); vector<Label>& labels = pData->getLabels( sortedExample[0].first ); vector<Label>::iterator labIt = find( labels.begin(), labels.end(), 0); if ( labIt != labels.end() ) { if ( labIt->y > 0.0 ) { positiveWeights[0] = labIt->initialWeight; sumOfPositiveWeights += labIt->initialWeight; } else { negativeWeights[0] = labIt->initialWeight; sumOfNegativeWeights += labIt->initialWeight; } } for( int i=1; i<numExamples; i++ ) { labels = pData->getLabels( sortedExample[i].first ); labIt = find( labels.begin(), labels.end(), 0); if ( labIt != labels.end() ) { if ( labIt->y > 0.0 ) { negativeWeights[i] = negativeWeights[i-1]; positiveWeights[i] = positiveWeights[i-1] + labIt->initialWeight; sumOfPositiveWeights += labIt->initialWeight; } else { positiveWeights[i] = positiveWeights[i-1]; negativeWeights[i] = negativeWeights[i-1] + labIt->initialWeight; sumOfNegativeWeights += labIt->initialWeight; } } else { positiveWeights[i] = positiveWeights[i-1]; negativeWeights[i] = negativeWeights[i-1]; } } outFile << "Class name: " << className << endl; for( int i=0; i<numExamples; i++ ) { outFile << sortedExample[i].first << " "; // false positive rate outFile << ( positiveWeights[i] / sumOfPositiveWeights ) << " "; //true negative rate outFile << ( negativeWeights[i] / sumOfNegativeWeights ) << endl; } outFile.close(); // delete the input data file if (pData) delete pData; vector<ExampleResults*>::iterator it; for (it = results.begin(); it != results.end(); ++it) delete (*it); }
void FilterBoostLearner::run(const nor_utils::Args& args) { // load the arguments this->getArgs(args); time_t startTime, currentTime; time(&startTime); // get the registered weak learner (type from name) BaseLearner* pWeakHypothesisSource = BaseLearner::RegisteredLearners().getLearner(_baseLearnerName); // initialize learning options; normally it's done in the strong loop // also, here we do it for Product learners, so input data can be created pWeakHypothesisSource->initLearningOptions(args); BaseLearner* pConstantWeakHypothesisSource = BaseLearner::RegisteredLearners().getLearner("ConstantLearner"); // get the training input data, and load it InputData* pTrainingData = pWeakHypothesisSource->createInputData(); pTrainingData->initOptions(args); pTrainingData->load(_trainFileName, IT_TRAIN, _verbose); const int numClasses = pTrainingData->getNumClasses(); const int numExamples = pTrainingData->getNumExamples(); //initialize the margins variable _margins.resize( numExamples ); for( int i=0; i<numExamples; i++ ) { _margins[i].resize( numClasses ); fill( _margins[i].begin(), _margins[i].end(), 0.0 ); } // get the testing input data, and load it InputData* pTestData = NULL; if ( !_testFileName.empty() ) { pTestData = pWeakHypothesisSource->createInputData(); pTestData->initOptions(args); pTestData->load(_testFileName, IT_TEST, _verbose); } // The output information object OutputInfo* pOutInfo = NULL; if ( !_outputInfoFile.empty() ) { // Baseline: constant classifier - goes into 0th iteration BaseLearner* pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ; pConstantWeakHypothesis->initLearningOptions(args); pConstantWeakHypothesis->setTrainingData(pTrainingData); AlphaReal constantEnergy = pConstantWeakHypothesis->run(); pOutInfo = new OutputInfo(args); pOutInfo->initialize(pTrainingData); updateMargins( pTrainingData, pConstantWeakHypothesis ); if (pTestData) pOutInfo->initialize(pTestData); pOutInfo->outputHeader(pTrainingData->getClassMap() ); pOutInfo->outputIteration(-1); pOutInfo->outputCustom(pTrainingData, pConstantWeakHypothesis); if (pTestData) { pOutInfo->separator(); pOutInfo->outputCustom(pTestData, pConstantWeakHypothesis); } pOutInfo->outputCurrentTime(); pOutInfo->endLine(); pOutInfo->initialize(pTrainingData); if (pTestData) pOutInfo->initialize(pTestData); } // reload the previously found weak learners if -resume is set. // otherwise just return 0 int startingIteration = resumeWeakLearners(pTrainingData); Serialization ss(_shypFileName, _isShypCompressed ); ss.writeHeader(_baseLearnerName); // this must go after resumeProcess has been called // perform the resuming if necessary. If not it will just return resumeProcess(ss, pTrainingData, pTestData, pOutInfo); if (_verbose == 1) cout << "Learning in progress..." << endl; /////////////////////////////////////////////////////////////////////// // Starting the AdaBoost main loop /////////////////////////////////////////////////////////////////////// for (int t = startingIteration; t < _numIterations; ++t) { if (_verbose > 1) cout << "------- WORKING ON ITERATION " << (t+1) << " -------" << endl; // create the weak learner BaseLearner* pWeakHypothesis; BaseLearner* pConstantWeakHypothesis; pWeakHypothesis = pWeakHypothesisSource->create(); pWeakHypothesis->initLearningOptions(args); //pTrainingData->clearIndexSet(); pWeakHypothesis->setTrainingData(pTrainingData); AlphaReal edge, energy=0.0; // create the constant learner pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ; pConstantWeakHypothesis->initLearningOptions(args); pConstantWeakHypothesis->setTrainingData(pTrainingData); AlphaReal constantEdge = -numeric_limits<AlphaReal>::max(); int currentNumberOfUsedData = static_cast<int>(_Cn * log(t+3.0)); if ( _onlineWeakLearning ) { //check whether the weak learner is a ScalarLeaerner try { StochasticLearner* pStochasticLearner = dynamic_cast<StochasticLearner*>(pWeakHypothesis); StochasticLearner* pStochasticConstantWeakHypothesis = dynamic_cast<StochasticLearner*> (pConstantWeakHypothesis); pStochasticLearner->initLearning(); pStochasticConstantWeakHypothesis->initLearning(); if (_verbose>1) cout << "Number of random instances: \t" << currentNumberOfUsedData << endl; // set the weights setWeightToMargins(pTrainingData); //learning for (int i=0; i<currentNumberOfUsedData; ++i ) { int randomIndex = (rand() % pTrainingData->getNumExamples()); //int randomIndex = getRandomIndex(); pStochasticLearner->update(randomIndex); pStochasticConstantWeakHypothesis->update(randomIndex); } pStochasticLearner->finishLearning(); pStochasticConstantWeakHypothesis->finishLearning(); } catch (bad_cast& e) { cerr << "The weak learner must be a StochasticLearner!!!" << endl; exit(-1); } } else { filter( pTrainingData, currentNumberOfUsedData ); if ( pTrainingData->getNumExamples() < 2 ) { filter( pTrainingData, currentNumberOfUsedData, false ); } if (_verbose > 1) { cout << "--> Size of training data = " << pTrainingData->getNumExamples() << endl; } energy = pWeakHypothesis->run(); pConstantWeakHypothesis->run(); } //estimate edge filter( pTrainingData, currentNumberOfUsedData, false ); edge = pWeakHypothesis->getEdge(true) / 2.0; constantEdge = pConstantWeakHypothesis->getEdge() / 2.0; if ( constantEdge > edge ) { delete pWeakHypothesis; pWeakHypothesis = pConstantWeakHypothesis; edge = constantEdge; } else { delete pConstantWeakHypothesis; } // calculate alpha AlphaReal alpha = 0.0; alpha = 0.5 * log( ( 1 + edge ) / ( 1 - edge ) ); pWeakHypothesis->setAlpha( alpha ); _sumAlpha += alpha; if (_verbose > 1) cout << "Weak learner: " << pWeakHypothesis->getName()<< endl; // Output the step-by-step information pTrainingData->clearIndexSet(); printOutputInfo(pOutInfo, t, pTrainingData, pTestData, pWeakHypothesis); // Updates the weights and returns the edge //AlphaReal gamma = updateWeights(pTrainingData, pWeakHypothesis); if (_verbose > 1) { cout << setprecision(5) << "--> Alpha = " << pWeakHypothesis->getAlpha() << endl << "--> Edge = " << edge << endl << "--> Energy = " << energy << endl // << "--> ConstantEnergy = " << constantEnergy << endl // << "--> difference = " << (energy - constantEnergy) << endl ; } // update the margins //saveMargins(); updateMargins( pTrainingData, pWeakHypothesis ); // append the current weak learner to strong hypothesis file, // that is, serialize it. ss.appendHypothesis(t, pWeakHypothesis); // Add it to the internal list of weak hypotheses _foundHypotheses.push_back(pWeakHypothesis); // check if the time limit has been reached if (_maxTime > 0) { time( ¤tTime ); float diff = difftime(currentTime, startTime); // difftime is in seconds diff /= 60; // = minutes if (diff > _maxTime) { if (_verbose > 0) cout << "Time limit of " << _maxTime << " minutes has been reached!" << endl; break; } } // check for maxtime delete pWeakHypothesis; } // loop on iterations ///////////////////////////////////////////////////////// // write the footer of the strong hypothesis file ss.writeFooter(); // Free the two input data objects if (pTrainingData) delete pTrainingData; if (pTestData) delete pTestData; if (pOutInfo) delete pOutInfo; if (_verbose > 0) cout << "Learning completed." << endl; }
void FilterBoostLearner::run(const nor_utils::Args& args) { // load the arguments this->getArgs(args); time_t startTime, currentTime; time(&startTime); // get the registered weak learner (type from name) BaseLearner* pWeakHypothesisSource = BaseLearner::RegisteredLearners().getLearner(_baseLearnerName); // initialize learning options; normally it's done in the strong loop // also, here we do it for Product learners, so input data can be created pWeakHypothesisSource->initLearningOptions(args); BaseLearner* pConstantWeakHypothesisSource = BaseLearner::RegisteredLearners().getLearner("ConstantLearner"); // get the training input data, and load it InputData* pTrainingData = pWeakHypothesisSource->createInputData(); pTrainingData->initOptions(args); pTrainingData->load(_trainFileName, IT_TRAIN, _verbose); const int numClasses = pTrainingData->getNumClasses(); const int numExamples = pTrainingData->getNumExamples(); //initialize the margins variable _margins.resize( numExamples ); for( int i=0; i<numExamples; i++ ) { _margins[i].resize( numClasses ); fill( _margins[i].begin(), _margins[i].end(), 0.0 ); } // get the testing input data, and load it InputData* pTestData = NULL; if ( !_testFileName.empty() ) { pTestData = pWeakHypothesisSource->createInputData(); pTestData->initOptions(args); pTestData->load(_testFileName, IT_TEST, _verbose); } // The output information object OutputInfo* pOutInfo = NULL; if ( !_outputInfoFile.empty() ) { // Baseline: constant classifier - goes into 0th iteration BaseLearner* pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ; pConstantWeakHypothesis->initLearningOptions(args); pConstantWeakHypothesis->setTrainingData(pTrainingData); float constantEnergy = pConstantWeakHypothesis->run(); pOutInfo = new OutputInfo(_outputInfoFile); pOutInfo->initialize(pTrainingData); updateMargins( pTrainingData, pConstantWeakHypothesis ); if (pTestData) pOutInfo->initialize(pTestData); pOutInfo->outputHeader(); pOutInfo->outputIteration(-1); pOutInfo->outputError(pTrainingData, pConstantWeakHypothesis); if (pTestData) pOutInfo->outputError(pTestData, pConstantWeakHypothesis); /* pOutInfo->outputMargins(pTrainingData, pConstantWeakHypothesis); pOutInfo->outputEdge(pTrainingData, pConstantWeakHypothesis); if (pTestData) pOutInfo->outputMargins(pTestData, pConstantWeakHypothesis); pOutInfo->outputMAE(pTrainingData); if (pTestData) pOutInfo->outputMAE(pTestData); */ pOutInfo->outputCurrentTime(); pOutInfo->endLine(); pOutInfo->initialize(pTrainingData); if (pTestData) pOutInfo->initialize(pTestData); } // reload the previously found weak learners if -resume is set. // otherwise just return 0 int startingIteration = resumeWeakLearners(pTrainingData); Serialization ss(_shypFileName, _isShypCompressed ); ss.writeHeader(_baseLearnerName); // this must go after resumeProcess has been called // perform the resuming if necessary. If not it will just return resumeProcess(ss, pTrainingData, pTestData, pOutInfo); if (_verbose == 1) cout << "Learning in progress..." << endl; /////////////////////////////////////////////////////////////////////// // Starting the AdaBoost main loop /////////////////////////////////////////////////////////////////////// for (int t = startingIteration; t < _numIterations; ++t) { if (_verbose > 1) cout << "------- WORKING ON ITERATION " << (t+1) << " -------" << endl; filter( pTrainingData, (int)(_Cn * log(t+2.0)) ); if ( pTrainingData->getNumExamples() < 2 ) { filter( pTrainingData, (int)(_Cn * log(t+2.0)), false ); } if (_verbose > 1) { cout << "--> Size of training data = " << pTrainingData->getNumExamples() << endl; } BaseLearner* pWeakHypothesis = pWeakHypothesisSource->create(); pWeakHypothesis->initLearningOptions(args); //pTrainingData->clearIndexSet(); pWeakHypothesis->setTrainingData(pTrainingData); float energy = pWeakHypothesis->run(); BaseLearner* pConstantWeakHypothesis; if (_withConstantLearner) // check constant learner if user wants it { pConstantWeakHypothesis = pConstantWeakHypothesisSource->create() ; pConstantWeakHypothesis->initLearningOptions(args); pConstantWeakHypothesis->setTrainingData(pTrainingData); float constantEnergy = pConstantWeakHypothesis->run(); } //estimate edge filter( pTrainingData, (int)(_Cn * log(t+2.0)), false ); float edge = pWeakHypothesis->getEdge() / 2.0; if (_withConstantLearner) // check constant learner if user wants it { float constantEdge = pConstantWeakHypothesis->getEdge() / 2.0; if ( constantEdge > edge ) { delete pWeakHypothesis; pWeakHypothesis = pConstantWeakHypothesis; edge = constantEdge; } else { delete pConstantWeakHypothesis; } } // calculate alpha float alpha = 0.0; alpha = 0.5 * log( ( 0.5 + edge ) / ( 0.5 - edge ) ); pWeakHypothesis->setAlpha( alpha ); if (_verbose > 1) cout << "Weak learner: " << pWeakHypothesis->getName()<< endl; // Output the step-by-step information pTrainingData->clearIndexSet(); printOutputInfo(pOutInfo, t, pTrainingData, pTestData, pWeakHypothesis); // Updates the weights and returns the edge float gamma = updateWeights(pTrainingData, pWeakHypothesis); if (_verbose > 1) { cout << setprecision(5) << "--> Alpha = " << pWeakHypothesis->getAlpha() << endl << "--> Edge = " << gamma << endl << "--> Energy = " << energy << endl // << "--> ConstantEnergy = " << constantEnergy << endl // << "--> difference = " << (energy - constantEnergy) << endl ; } // update the margins updateMargins( pTrainingData, pWeakHypothesis ); // append the current weak learner to strong hypothesis file, // that is, serialize it. ss.appendHypothesis(t, pWeakHypothesis); // Add it to the internal list of weak hypotheses _foundHypotheses.push_back(pWeakHypothesis); // check if the time limit has been reached if (_maxTime > 0) { time( ¤tTime ); float diff = difftime(currentTime, startTime); // difftime is in seconds diff /= 60; // = minutes if (diff > _maxTime) { if (_verbose > 0) cout << "Time limit of " << _maxTime << " minutes has been reached!" << endl; break; } } // check for maxtime delete pWeakHypothesis; } // loop on iterations ///////////////////////////////////////////////////////// // write the footer of the strong hypothesis file ss.writeFooter(); // Free the two input data objects if (pTrainingData) delete pTrainingData; if (pTestData) delete pTestData; if (pOutInfo) delete pOutInfo; if (_verbose > 0) cout << "Learning completed." << endl; }