bool ParticleClassifier::train_(TimeSeriesClassificationData &trainingData){ clear(); numClasses = trainingData.getNumClasses(); numInputDimensions = trainingData.getNumDimensions(); ranges = trainingData.getRanges(); //Scale the training data if needed if( useScaling ){ trainingData.scale(0, 1); } //Train the particle filter particleFilter.train( numParticles, trainingData, sensorNoise, transitionSigma, phaseSigma, velocitySigma ); classLabels.resize(numClasses); classLikelihoods.resize(numClasses,0); classDistances.resize(numClasses,0); for(unsigned int i=0; i<numClasses; i++){ classLabels[i] = trainingData.getClassTracker()[i].classLabel; } trained = true; return trained; }
bool TimeSeriesClassificationData::merge(const TimeSeriesClassificationData &labelledData){ if( labelledData.getNumDimensions() != numDimensions ){ errorLog << "merge(TimeSeriesClassificationData &labelledData) - The number of dimensions in the labelledData (" << labelledData.getNumDimensions() << ") does not match the number of dimensions of this dataset (" << numDimensions << ")" << std::endl; return false; } //The dataset has changed so flag that any previous cross validation setup will now not work crossValidationSetup = false; crossValidationIndexs.clear(); //Add the data from the labelledData to this instance for(UINT i=0; i<labelledData.getNumSamples(); i++){ addSample(labelledData[i].getClassLabel(), labelledData[i].getData()); } //Set the class names from the dataset Vector< ClassTracker > classTracker = labelledData.getClassTracker(); for(UINT i=0; i<classTracker.size(); i++){ setClassNameForCorrespondingClassLabel(classTracker[i].className, classTracker[i].classLabel); } return true; }
bool HMM::train_continuous(TimeSeriesClassificationData &trainingData){ clear(); if( trainingData.getNumSamples() == 0 ){ errorLog << "train_continuous(TimeSeriesClassificationData &trainingData) - There are no training samples to train the CHMM classifer!" << endl; return false; } //Reset the CHMM numInputDimensions = trainingData.getNumDimensions(); numClasses = trainingData.getNumClasses(); classLabels.resize( numClasses ); for(UINT k=0; k<numClasses; k++){ classLabels[k] = trainingData.getClassTracker()[k].classLabel; } //Scale the training data if needed ranges = trainingData.getRanges(); if( useScaling ){ trainingData.scale(0, 1); } //Setup the models, there will be 1 model for each training sample const UINT numTrainingSamples = trainingData.getNumSamples(); continuousModels.resize( numTrainingSamples ); //Train each of the models for(UINT k=0; k<numTrainingSamples; k++){ //Init the model continuousModels[k].setDownsampleFactor( downsampleFactor ); continuousModels[k].setModelType( modelType ); continuousModels[k].setDelta( delta ); continuousModels[k].setSigma( sigma ); continuousModels[k].setAutoEstimateSigma( autoEstimateSigma ); continuousModels[k].enableScaling( false ); //Scaling should always off for the models as we do any scaling in the CHMM //Train the model if( !continuousModels[k].train_( trainingData[k] ) ){ errorLog << "train_continuous(TimeSeriesClassificationData &trainingData) - Failed to train CHMM for sample " << k << endl; return false; } } if( committeeSize > trainingData.getNumSamples() ){ committeeSize = trainingData.getNumSamples(); warningLog << "train_continuous(TimeSeriesClassificationData &trainingData) - The committeeSize is larger than the number of training sample. Setting committeeSize to number of training samples: " << trainingData.getNumSamples() << endl; } //Flag that the model has been trained trained = true; //Compute any null rejection thresholds if needed if( useNullRejection ){ //Compute the rejection thresholds nullRejectionThresholds.resize(numClasses); } return true; }
bool HMM::train_discrete(TimeSeriesClassificationData &trainingData){ clear(); if( trainingData.getNumSamples() == 0 ){ errorLog << "train_discrete(TimeSeriesClassificationData &trainingData) - There are no training samples to train the HMM classifer!" << endl; return false; } if( trainingData.getNumDimensions() != 1 ){ errorLog << "train_discrete(TimeSeriesClassificationData &trainingData) - The number of dimensions in the training data must be 1. If your training data is not 1 dimensional then you must quantize the training data using one of the GRT quantization algorithms" << endl; return false; } //Reset the HMM numInputDimensions = trainingData.getNumDimensions(); numClasses = trainingData.getNumClasses(); discreteModels.resize( numClasses ); classLabels.resize( numClasses ); //Init the models for(UINT k=0; k<numClasses; k++){ discreteModels[k].resetModel(numStates,numSymbols,modelType,delta); discreteModels[k].setMaxNumEpochs( maxNumEpochs ); discreteModels[k].setMinChange( minChange ); } //Train each of the models for(UINT k=0; k<numClasses; k++){ //Get the class ID of this gesture UINT classID = trainingData.getClassTracker()[k].classLabel; classLabels[k] = classID; //Convert this classes training data into a list of observation sequences TimeSeriesClassificationData classData = trainingData.getClassData( classID ); vector< vector< UINT > > observationSequences; if( !convertDataToObservationSequence( classData, observationSequences ) ){ return false; } //Train the model if( !discreteModels[k].train( observationSequences ) ){ errorLog << "train_discrete(TimeSeriesClassificationData &trainingData) - Failed to train HMM for class " << classID << endl; return false; } } //Compute the rejection thresholds nullRejectionThresholds.resize(numClasses); for(UINT k=0; k<numClasses; k++){ //Get the class ID of this gesture UINT classID = trainingData.getClassTracker()[k].classLabel; classLabels[k] = classID; //Convert this classes training data into a list of observation sequences TimeSeriesClassificationData classData = trainingData.getClassData( classID ); vector< vector< UINT > > observationSequences; if( !convertDataToObservationSequence( classData, observationSequences ) ){ return false; } //Test the model double loglikelihood = 0; double avgLoglikelihood = 0; for(UINT i=0; i<observationSequences.size(); i++){ loglikelihood = discreteModels[k].predict( observationSequences[i] ); avgLoglikelihood += fabs( loglikelihood ); } nullRejectionThresholds[k] = -( avgLoglikelihood / double( observationSequences.size() ) ); } //Flag that the model has been trained trained = true; return true; }