bool HMM::convertDataToObservationSequence( TimeSeriesClassificationData &classData, vector< vector< UINT > > &observationSequences ){ observationSequences.resize( classData.getNumSamples() ); for(UINT i=0; i<classData.getNumSamples(); i++){ MatrixDouble ×eries = classData[i].getData(); observationSequences[i].resize( timeseries.getNumRows() ); for(UINT j=0; j<timeseries.getNumRows(); j++){ if( timeseries[j][0] >= numSymbols ){ errorLog << "train(TimeSeriesClassificationData &trainingData) - Found an observation sequence with a value outside of the symbol range! Value: " << timeseries[j][0] << endl; return false; } observationSequences[i][j] = (UINT)timeseries[j][0]; } } return true; }
bool TimeSeriesClassificationData::merge(const TimeSeriesClassificationData &labelledData){ if( labelledData.getNumDimensions() != numDimensions ){ errorLog << "merge(TimeSeriesClassificationData &labelledData) - The number of dimensions in the labelledData (" << labelledData.getNumDimensions() << ") does not match the number of dimensions of this dataset (" << numDimensions << ")" << std::endl; return false; } //The dataset has changed so flag that any previous cross validation setup will now not work crossValidationSetup = false; crossValidationIndexs.clear(); //Add the data from the labelledData to this instance for(UINT i=0; i<labelledData.getNumSamples(); i++){ addSample(labelledData[i].getClassLabel(), labelledData[i].getData()); } //Set the class names from the dataset Vector< ClassTracker > classTracker = labelledData.getClassTracker(); for(UINT i=0; i<classTracker.size(); i++){ setClassNameForCorrespondingClassLabel(classTracker[i].className, classTracker[i].classLabel); } return true; }
int main (int argc, const char * argv[]) { TimeSeriesClassificationData trainingData; //This will store our training data GestureRecognitionPipeline pipeline; //This is a wrapper for our classifier and any pre/post processing modules string dirPath = "/home/vlad/AndroidStudioProjects/DataCapture/dataSetGenerator/build"; if (!trainingData.loadDatasetFromFile(dirPath + "/acc-training-set-segmented.data")) { printf("Cannot open training segmented set\n"); return 0; } printf("Successfully opened training data set ...\n"); DTW dtw; // LowPassFilter lpf(0.1, 1, 1); // pipeline.setPreProcessingModule(lpf); // DoubleMovingAverageFilter filter( 1000, 3 ); // pipeline.setPreProcessingModule(filter); //dtw.enableNullRejection( true ); //Set the null rejection coefficient to 3, this controls the thresholds for the automatic null rejection //You can increase this value if you find that your real-time gestures are not being recognized //If you are getting too many false positives then you should decrease this value //dtw.setNullRejectionCoeff( 5 ); dtw.enableTrimTrainingData(true, 0.1, 90); // dtw.setOffsetTimeseriesUsingFirstSample(true); pipeline.setClassifier( dtw ); UINT KFolds = 5; /* Separate input dataset using KFold */ KfoldTimeSeriesData* kFoldTS = new KfoldTimeSeriesData(trainingData); if( !kFoldTS->spiltDataIntoKFolds(KFolds) ) { printf("BaseTGTestModel: Failed to spiltDataIntoKFolds!"); return 0; } UINT maxTrainigSetSize = trainingData.getNumSamples() * (KFolds - 1) / (KFolds * trainingData.getNumClasses()); // KFolds ofstream myfile; myfile.open ("example.txt"); Float acc = 0; for (GRT::UINT k = 1 ; k < KFolds; k++) { printf("Running tests for: %d fold", k); // maxTrainigSetSize // for (UINT trainingSetSize = 1; trainingSetSize <= maxTrainigSetSize; trainingSetSize ++) { /* Set up training datasets for current fold */ TimeSeriesClassificationData trainingDataset = kFoldTS->getTrainingFoldData(k, maxTrainigSetSize); /* Set up validation datasets for current fold */ TimeSeriesClassificationDataStream testDataset = kFoldTS->getTestFoldData(k); /* Log test dataset size */ //printf("Data set size: training %d; testing %d", // trainingDataset.getNumSamples(), testDataset.getNumSamples()); /* Run test for current fold */ pipeline.train(trainingDataset); pipeline.test(testDataset); myfile << pipeline.getTestAccuracy() << "\n"; // } } myfile.close(); printf("Accuracy = %f ; %d\n", acc, maxTrainigSetSize); }
int main (int argc, const char * argv[]) { //Create a new DTW instance, using the default parameters DTW dtw; //Load some training data to train the classifier - the DTW uses TimeSeriesClassificationData TimeSeriesClassificationData trainingData; if( !trainingData.load("DTWTrainingData.grt") ){ cout << "Failed to load training data!\n"; return EXIT_FAILURE; } //Use 20% of the training dataset to create a test dataset TimeSeriesClassificationData testData = trainingData.partition( 80 ); //Trim the training data for any sections of non-movement at the start or end of the recordings dtw.enableTrimTrainingData(true,0.1,90); //Train the classifier if( !dtw.train( trainingData ) ){ cout << "Failed to train classifier!\n"; return EXIT_FAILURE; } //Save the DTW model to a file if( !dtw.save("DTWModel.grt") ){ cout << "Failed to save the classifier model!\n"; return EXIT_FAILURE; } //Load the DTW model from a file if( !dtw.load("DTWModel.grt") ){ cout << "Failed to load the classifier model!\n"; return EXIT_FAILURE; } //Use the test dataset to test the DTW model double accuracy = 0; for(UINT i=0; i<testData.getNumSamples(); i++){ //Get the i'th test sample - this is a timeseries UINT classLabel = testData[i].getClassLabel(); MatrixDouble timeseries = testData[i].getData(); //Perform a prediction using the classifier if( !dtw.predict( timeseries ) ){ cout << "Failed to perform prediction for test sampel: " << i <<"\n"; return EXIT_FAILURE; } //Get the predicted class label UINT predictedClassLabel = dtw.getPredictedClassLabel(); double maximumLikelihood = dtw.getMaximumLikelihood(); VectorDouble classLikelihoods = dtw.getClassLikelihoods(); VectorDouble classDistances = dtw.getClassDistances(); //Update the accuracy if( classLabel == predictedClassLabel ) accuracy++; cout << "TestSample: " << i << "\tClassLabel: " << classLabel << "\tPredictedClassLabel: " << predictedClassLabel << "\tMaximumLikelihood: " << maximumLikelihood << endl; } cout << "Test Accuracy: " << accuracy/double(testData.getNumSamples())*100.0 << "%" << endl; return EXIT_SUCCESS; }
int main(int argc, const char * argv[]){ //Load the training data TimeSeriesClassificationData trainingData; if( !trainingData.loadDatasetFromFile("HMMTrainingData.grt") ){ cout << "ERROR: Failed to load training data!\n"; return false; } //Remove 20% of the training data to use as test data TimeSeriesClassificationData testData = trainingData.partition( 80 ); //The input to the HMM must be a quantized discrete value //We therefore use a KMeansQuantizer to covert the N-dimensional continuous data into 1-dimensional discrete data const UINT NUM_SYMBOLS = 10; KMeansQuantizer quantizer( NUM_SYMBOLS ); //Train the quantizer using the training data if( !quantizer.train( trainingData ) ){ cout << "ERROR: Failed to train quantizer!\n"; return false; } //Quantize the training data TimeSeriesClassificationData quantizedTrainingData( 1 ); for(UINT i=0; i<trainingData.getNumSamples(); i++){ UINT classLabel = trainingData[i].getClassLabel(); MatrixDouble quantizedSample; for(UINT j=0; j<trainingData[i].getLength(); j++){ quantizer.quantize( trainingData[i].getData().getRowVector(j) ); quantizedSample.push_back( quantizer.getFeatureVector() ); } if( !quantizedTrainingData.addSample(classLabel, quantizedSample) ){ cout << "ERROR: Failed to quantize training data!\n"; return false; } } //Create a new HMM instance HMM hmm; //Set the number of states in each model hmm.setNumStates( 4 ); //Set the number of symbols in each model, this must match the number of symbols in the quantizer hmm.setNumSymbols( NUM_SYMBOLS ); //Set the HMM model type to LEFTRIGHT with a delta of 1 hmm.setModelType( HiddenMarkovModel::LEFTRIGHT ); hmm.setDelta( 1 ); //Set the training parameters hmm.setMinImprovement( 1.0e-5 ); hmm.setMaxNumIterations( 100 ); hmm.setNumRandomTrainingIterations( 20 ); //Train the HMM model if( !hmm.train( quantizedTrainingData ) ){ cout << "ERROR: Failed to train the HMM model!\n"; return false; } //Save the HMM model to a file if( !hmm.save( "HMMModel.grt" ) ){ cout << "ERROR: Failed to save the model to a file!\n"; return false; } //Load the HMM model from a file if( !hmm.load( "HMMModel.grt" ) ){ cout << "ERROR: Failed to load the model from a file!\n"; return false; } //Quantize the test data TimeSeriesClassificationData quantizedTestData( 1 ); for(UINT i=0; i<testData.getNumSamples(); i++){ UINT classLabel = testData[i].getClassLabel(); MatrixDouble quantizedSample; for(UINT j=0; j<testData[i].getLength(); j++){ quantizer.quantize( testData[i].getData().getRowVector(j) ); quantizedSample.push_back( quantizer.getFeatureVector() ); } if( !quantizedTestData.addSample(classLabel, quantizedSample) ){ cout << "ERROR: Failed to quantize training data!\n"; return false; } } //Compute the accuracy of the HMM models using the test data double numCorrect = 0; double numTests = 0; for(UINT i=0; i<quantizedTestData.getNumSamples(); i++){ UINT classLabel = quantizedTestData[i].getClassLabel(); hmm.predict( quantizedTestData[i].getData() ); if( classLabel == hmm.getPredictedClassLabel() ) numCorrect++; numTests++; VectorDouble classLikelihoods = hmm.getClassLikelihoods(); VectorDouble classDistances = hmm.getClassDistances(); cout << "ClassLabel: " << classLabel; cout << " PredictedClassLabel: " << hmm.getPredictedClassLabel(); cout << " MaxLikelihood: " << hmm.getMaximumLikelihood(); cout << " ClassLikelihoods: "; for(UINT k=0; k<classLikelihoods.size(); k++){ cout << classLikelihoods[k] << "\t"; } cout << "ClassDistances: "; for(UINT k=0; k<classDistances.size(); k++){ cout << classDistances[k] << "\t"; } cout << endl; } cout << "Test Accuracy: " << numCorrect/numTests*100.0 << endl; return true; }
bool HMM::train_continuous(TimeSeriesClassificationData &trainingData){ clear(); if( trainingData.getNumSamples() == 0 ){ errorLog << "train_continuous(TimeSeriesClassificationData &trainingData) - There are no training samples to train the CHMM classifer!" << endl; return false; } //Reset the CHMM numInputDimensions = trainingData.getNumDimensions(); numClasses = trainingData.getNumClasses(); classLabels.resize( numClasses ); for(UINT k=0; k<numClasses; k++){ classLabels[k] = trainingData.getClassTracker()[k].classLabel; } //Scale the training data if needed ranges = trainingData.getRanges(); if( useScaling ){ trainingData.scale(0, 1); } //Setup the models, there will be 1 model for each training sample const UINT numTrainingSamples = trainingData.getNumSamples(); continuousModels.resize( numTrainingSamples ); //Train each of the models for(UINT k=0; k<numTrainingSamples; k++){ //Init the model continuousModels[k].setDownsampleFactor( downsampleFactor ); continuousModels[k].setModelType( modelType ); continuousModels[k].setDelta( delta ); continuousModels[k].setSigma( sigma ); continuousModels[k].setAutoEstimateSigma( autoEstimateSigma ); continuousModels[k].enableScaling( false ); //Scaling should always off for the models as we do any scaling in the CHMM //Train the model if( !continuousModels[k].train_( trainingData[k] ) ){ errorLog << "train_continuous(TimeSeriesClassificationData &trainingData) - Failed to train CHMM for sample " << k << endl; return false; } } if( committeeSize > trainingData.getNumSamples() ){ committeeSize = trainingData.getNumSamples(); warningLog << "train_continuous(TimeSeriesClassificationData &trainingData) - The committeeSize is larger than the number of training sample. Setting committeeSize to number of training samples: " << trainingData.getNumSamples() << endl; } //Flag that the model has been trained trained = true; //Compute any null rejection thresholds if needed if( useNullRejection ){ //Compute the rejection thresholds nullRejectionThresholds.resize(numClasses); } return true; }
bool HMM::train_discrete(TimeSeriesClassificationData &trainingData){ clear(); if( trainingData.getNumSamples() == 0 ){ errorLog << "train_discrete(TimeSeriesClassificationData &trainingData) - There are no training samples to train the HMM classifer!" << endl; return false; } if( trainingData.getNumDimensions() != 1 ){ errorLog << "train_discrete(TimeSeriesClassificationData &trainingData) - The number of dimensions in the training data must be 1. If your training data is not 1 dimensional then you must quantize the training data using one of the GRT quantization algorithms" << endl; return false; } //Reset the HMM numInputDimensions = trainingData.getNumDimensions(); numClasses = trainingData.getNumClasses(); discreteModels.resize( numClasses ); classLabels.resize( numClasses ); //Init the models for(UINT k=0; k<numClasses; k++){ discreteModels[k].resetModel(numStates,numSymbols,modelType,delta); discreteModels[k].setMaxNumEpochs( maxNumEpochs ); discreteModels[k].setMinChange( minChange ); } //Train each of the models for(UINT k=0; k<numClasses; k++){ //Get the class ID of this gesture UINT classID = trainingData.getClassTracker()[k].classLabel; classLabels[k] = classID; //Convert this classes training data into a list of observation sequences TimeSeriesClassificationData classData = trainingData.getClassData( classID ); vector< vector< UINT > > observationSequences; if( !convertDataToObservationSequence( classData, observationSequences ) ){ return false; } //Train the model if( !discreteModels[k].train( observationSequences ) ){ errorLog << "train_discrete(TimeSeriesClassificationData &trainingData) - Failed to train HMM for class " << classID << endl; return false; } } //Compute the rejection thresholds nullRejectionThresholds.resize(numClasses); for(UINT k=0; k<numClasses; k++){ //Get the class ID of this gesture UINT classID = trainingData.getClassTracker()[k].classLabel; classLabels[k] = classID; //Convert this classes training data into a list of observation sequences TimeSeriesClassificationData classData = trainingData.getClassData( classID ); vector< vector< UINT > > observationSequences; if( !convertDataToObservationSequence( classData, observationSequences ) ){ return false; } //Test the model double loglikelihood = 0; double avgLoglikelihood = 0; for(UINT i=0; i<observationSequences.size(); i++){ loglikelihood = discreteModels[k].predict( observationSequences[i] ); avgLoglikelihood += fabs( loglikelihood ); } nullRejectionThresholds[k] = -( avgLoglikelihood / double( observationSequences.size() ) ); } //Flag that the model has been trained trained = true; return true; }
int main() { vector<string> gestures(0,""); GetFilesInDirectory(gestures, "rawdata"); CreateDirectory("processed", NULL); sort(gestures.begin(), gestures.end()); data = vector<vector<vector<double > > >(gestures.size(), vector<vector<double > >(0,vector<double>(0,0))); for(size_t i = 0; i < gestures.size(); i++) { ifstream fin(gestures[i]); int n; fin >> n; // cerr << gestures[i] << endl; // cerr << n << endl; data[i] = vector<vector<double> >(n, vector<double>(NUMPARAM, 0)); for(int j = 0; j < n; j++) { for(int k = 0; k < NUMPARAM; k++) { fin >> data[i][j][k]; } } fin.close(); } //Create a new instance of the TimeSeriesClassificationDataStream TimeSeriesClassificationData trainingData; // ax, ay, az trainingData.setNumDimensions(3); trainingData.setDatasetName("processed\\GestureTrainingData.txt"); ofstream labelfile("processed\\GestureTrainingDataLabels.txt"); UINT currLabel = 1; Random random; map<string, int> gesturenames; for(size_t overall = 0; overall < gestures.size(); overall++) { string nam = gestures[overall].substr(8,gestures[overall].find_first_of('_')-8); if(gesturenames.count(nam)) currLabel = gesturenames[nam]; else { currLabel = gesturenames.size()+1; gesturenames[nam] = currLabel; labelfile << currLabel << " " << nam << endl; } MatrixDouble trainingSample; VectorDouble currVec( trainingData.getNumDimensions() ); for(size_t k = 1; k < data[overall].size(); k++) { for(UINT j=0; j<currVec.size(); j++){ currVec[j] = data[overall][k][j]; } trainingSample.push_back(currVec); } trainingData.addSample(currLabel, trainingSample); } for(size_t i = 0; i < gestures.size(); i++) { MatrixDouble trainingSample; VectorDouble currVec(trainingData.getNumDimensions()); for(UINT j = 0; j < currVec.size(); j++) { currVec[j] = random.getRandomNumberUniform(-1.0, 1.0); } for(size_t k = 0; k < 100; k++) { trainingSample.push_back(currVec); } trainingData.addSample(0, trainingSample); } //After recording your training data you can then save it to a file if( !trainingData.save( "processed\\TrainingData.grt" ) ){ cout << "ERROR: Failed to save dataset to file!\n"; return EXIT_FAILURE; } //This can then be loaded later if( !trainingData.load( "processed\\TrainingData.grt" ) ){ cout << "ERROR: Failed to load dataset from file!\n"; return EXIT_FAILURE; } //This is how you can get some stats from the training data string datasetName = trainingData.getDatasetName(); string infoText = trainingData.getInfoText(); UINT numSamples = trainingData.getNumSamples(); UINT numDimensions = trainingData.getNumDimensions(); UINT numClasses = trainingData.getNumClasses(); cout << "Dataset Name: " << datasetName << endl; cout << "InfoText: " << infoText << endl; cout << "NumberOfSamples: " << numSamples << endl; cout << "NumberOfDimensions: " << numDimensions << endl; cout << "NumberOfClasses: " << numClasses << endl; //You can also get the minimum and maximum ranges of the data vector< MinMax > ranges = trainingData.getRanges(); cout << "The ranges of the dataset are: \n"; for(UINT j=0; j<ranges.size(); j++){ cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl; } DTW dtw; if( !dtw.train( trainingData ) ){ cerr << "Failed to train classifier!\n"; exit(EXIT_FAILURE); } dtw.enableNullRejection(true); dtw.setNullRejectionCoeff(4); dtw.enableTrimTrainingData(true, 0.1, 90); //Save the DTW model to a file if( !dtw.saveModelToFile("processed\\DTWModel.txt") ){ cerr << "Failed to save the classifier model!\n"; exit(EXIT_FAILURE); } trainingData.clear(); return EXIT_SUCCESS; }
int main (int argc, const char * argv[]) { //Create a new instance of the TimeSeriesClassificationData TimeSeriesClassificationData trainingData; //Set the dimensionality of the data (you need to do this before you can add any samples) trainingData.setNumDimensions( 3 ); //You can also give the dataset a name (the name should have no spaces) trainingData.setDatasetName("DummyData"); //You can also add some info text about the data trainingData.setInfoText("This data contains some dummy timeseries data"); //Here you would record a time series, when you have finished recording the time series then add the training sample to the training data UINT gestureLabel = 1; MatrixDouble trainingSample; //For now we will just add 10 x 20 random walk data timeseries Random random; for(UINT k=0; k<10; k++){//For the number of classes gestureLabel = k+1; //Get the init random walk position for this gesture VectorDouble startPos( trainingData.getNumDimensions() ); for(UINT j=0; j<startPos.size(); j++){ startPos[j] = random.getRandomNumberUniform(-1.0,1.0); } //Generate the 20 time series for(UINT x=0; x<20; x++){ //Clear any previous timeseries trainingSample.clear(); //Generate the random walk UINT randomWalkLength = random.getRandomNumberInt(90, 110); VectorDouble sample = startPos; for(UINT i=0; i<randomWalkLength; i++){ for(UINT j=0; j<startPos.size(); j++){ sample[j] += random.getRandomNumberUniform(-0.1,0.1); } //Add the sample to the training sample trainingSample.push_back( sample ); } //Add the training sample to the dataset trainingData.addSample( gestureLabel, trainingSample ); } } //After recording your training data you can then save it to a file if( !trainingData.saveDatasetToFile( "TrainingData.txt" ) ){ cout << "Failed to save dataset to file!\n"; return EXIT_FAILURE; } //This can then be loaded later if( !trainingData.loadDatasetFromFile( "TrainingData.txt" ) ){ cout << "Failed to load dataset from file!\n"; return EXIT_FAILURE; } //This is how you can get some stats from the training data string datasetName = trainingData.getDatasetName(); string infoText = trainingData.getInfoText(); UINT numSamples = trainingData.getNumSamples(); UINT numDimensions = trainingData.getNumDimensions(); UINT numClasses = trainingData.getNumClasses(); cout << "Dataset Name: " << datasetName << endl; cout << "InfoText: " << infoText << endl; cout << "NumberOfSamples: " << numSamples << endl; cout << "NumberOfDimensions: " << numDimensions << endl; cout << "NumberOfClasses: " << numClasses << endl; //You can also get the minimum and maximum ranges of the data vector< MinMax > ranges = trainingData.getRanges(); cout << "The ranges of the dataset are: \n"; for(UINT j=0; j<ranges.size(); j++){ cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl; } //If you want to partition the dataset into a training dataset and a test dataset then you can use the partition function //A value of 80 means that 80% of the original data will remain in the training dataset and 20% will be returned as the test dataset TimeSeriesClassificationData testData = trainingData.partition( 80 ); //If you have multiple datasets that you want to merge together then use the merge function if( !trainingData.merge( testData ) ){ cout << "Failed to merge datasets!\n"; return EXIT_FAILURE; } //If you want to run K-Fold cross validation using the dataset then you should first spilt the dataset into K-Folds //A value of 10 splits the dataset into 10 folds and the true parameter signals that stratified sampling should be used if( !trainingData.spiltDataIntoKFolds( 10, true ) ){ cout << "Failed to spiltDataIntoKFolds!\n"; return EXIT_FAILURE; } //After you have called the spilt function you can then get the training and test sets for each fold for(UINT foldIndex=0; foldIndex<10; foldIndex++){ TimeSeriesClassificationData foldTrainingData = trainingData.getTrainingFoldData( foldIndex ); TimeSeriesClassificationData foldTestingData = trainingData.getTestFoldData( foldIndex ); } //If need you can clear any training data that you have recorded trainingData.clear(); return EXIT_SUCCESS; }
int main(int argc, const char * argv[]){ //Load the training data TimeSeriesClassificationData trainingData; if( !trainingData.load("HMMTrainingData.grt") ){ cout << "ERROR: Failed to load training data!\n"; return false; } //Remove 20% of the training data to use as test data TimeSeriesClassificationData testData = trainingData.partition( 80 ); //Create a new HMM instance HMM hmm; //Set the HMM as a Continuous HMM hmm.setHMMType( HMM_CONTINUOUS ); //Set the downsample factor, a higher downsample factor will speed up the prediction time, but might reduce the classification accuracy hmm.setDownsampleFactor( 5 ); //Set the committee size, this sets the (top) number of models that will be used to make a prediction hmm.setCommitteeSize( 10 ); //Tell the hmm algorithm that we want it to estimate sigma from the training data hmm.setAutoEstimateSigma( true ); //Set the minimum value for sigma, you might need to adjust this based on the range of your data //If you set setAutoEstimateSigma to false, then all sigma values will use the value below hmm.setSigma( 20.0 ); //Set the HMM model type to LEFTRIGHT with a delta of 1, this means the HMM can only move from the left-most state to the right-most state //in steps of 1 hmm.setModelType( HMM_LEFTRIGHT ); hmm.setDelta( 1 ); //Train the HMM model if( !hmm.train( trainingData ) ){ cout << "ERROR: Failed to train the HMM model!\n"; return false; } //Save the HMM model to a file if( !hmm.save( "HMMModel.grt" ) ){ cout << "ERROR: Failed to save the model to a file!\n"; return false; } //Load the HMM model from a file if( !hmm.load( "HMMModel.grt" ) ){ cout << "ERROR: Failed to load the model from a file!\n"; return false; } //Compute the accuracy of the HMM models using the test data double numCorrect = 0; double numTests = 0; for(UINT i=0; i<testData.getNumSamples(); i++){ UINT classLabel = testData[i].getClassLabel(); hmm.predict( testData[i].getData() ); if( classLabel == hmm.getPredictedClassLabel() ) numCorrect++; numTests++; VectorFloat classLikelihoods = hmm.getClassLikelihoods(); VectorFloat classDistances = hmm.getClassDistances(); cout << "ClassLabel: " << classLabel; cout << " PredictedClassLabel: " << hmm.getPredictedClassLabel(); cout << " MaxLikelihood: " << hmm.getMaximumLikelihood(); cout << " ClassLikelihoods: "; for(UINT k=0; k<classLikelihoods.size(); k++){ cout << classLikelihoods[k] << "\t"; } cout << "ClassDistances: "; for(UINT k=0; k<classDistances.size(); k++){ cout << classDistances[k] << "\t"; } cout << endl; } cout << "Test Accuracy: " << numCorrect/numTests*100.0 << endl; return true; }