bool LabelledClassificationData::merge(LabelledClassificationData &labelledData){ if( labelledData.getNumDimensions() != numDimensions ){ errorLog << "merge(LabelledClassificationData &labelledData) - The number of dimensions in the labelledData (" << labelledData.getNumDimensions() << ") does not match the number of dimensions of this dataset (" << numDimensions << ")" << endl; return false; } //The dataset has changed so flag that any previous cross validation setup will now not work crossValidationSetup = false; crossValidationIndexs.clear(); //Add the data from the labelledData to this instance for(UINT i=0; i<labelledData.getNumSamples(); i++){ addSample(labelledData[i].getClassLabel(), labelledData[i].getSample()); } //Set the class names from the dataset vector< ClassTracker > classTracker = labelledData.getClassTracker(); for(UINT i=0; i<classTracker.size(); i++){ setClassNameForCorrespondingClassLabel(classTracker[i].className, classTracker[i].classLabel); } sortClassLabels(); return true; }
bool Softmax::train(LabelledClassificationData trainingData){ //Clear any previous model clear(); const unsigned int M = trainingData.getNumSamples(); const unsigned int N = trainingData.getNumDimensions(); const unsigned int K = trainingData.getNumClasses(); if( M == 0 ){ errorLog << "train(LabelledClassificationData labelledTrainingData) - Training data has zero samples!" << endl; return false; } numFeatures = N; numClasses = K; models.resize(K); classLabels.resize(K); ranges = trainingData.getRanges(); //Scale the training data if needed if( useScaling ){ //Scale the training data between 0 and 1 trainingData.scale(0, 1); } //Train a regression model for each class in the training data for(UINT k=0; k<numClasses; k++){ //Set the class label classLabels[k] = trainingData.getClassTracker()[k].classLabel; //Train the model if( !trainSoftmaxModel(classLabels[k],models[k],trainingData) ){ errorLog << "train(LabelledClassificationData labelledTrainingData) - Failed to train model for class: " << classLabels[k] << endl; return false; } } //Flag that the algorithm has been trained trained = true; return trained; }
bool KNN::train_(LabelledClassificationData &trainingData,UINT K){ //Clear any previous models clear(); if( trainingData.getNumSamples() == 0 ){ errorLog << "train(LabelledClassificationData &trainingData) - Training data has zero samples!" << endl; return false; } //Set the dimensionality of the input data this->K = K; this->numFeatures = trainingData.getNumDimensions(); this->numClasses = trainingData.getNumClasses(); //TODO: In the future need to build a kdtree from the training data to allow better realtime prediction this->trainingData = trainingData; if( useScaling ){ ranges = this->trainingData.getRanges(); this->trainingData.scale(ranges, 0, 1); } //Set the class labels classLabels.resize(numClasses); for(UINT k=0; k<numClasses; k++){ classLabels[k] = trainingData.getClassTracker()[k].classLabel; } //Flag that the algorithm has been trained so we can compute the rejection thresholds trained = true; //If null rejection is enabled then compute the null rejection thresholds if( useNullRejection ){ //Set the null rejection to false so we can compute the values for it (this will be set back to its current value later) bool tempUseNullRejection = useNullRejection; useNullRejection = false; rejectionThresholds.clear(); //Compute the rejection thresholds for each of the K classes VectorDouble counter(numClasses,0); trainingMu.resize( numClasses, 0 ); trainingSigma.resize( numClasses, 0 ); rejectionThresholds.resize( numClasses, 0 ); //Compute Mu for each of the classes const unsigned int numTrainingExamples = trainingData.getNumSamples(); vector< IndexedDouble > predictionResults( numTrainingExamples ); for(UINT i=0; i<numTrainingExamples; i++){ predict( trainingData[i].getSample(), K); UINT classLabelIndex = 0; for(UINT k=0; k<numClasses; k++){ if( predictedClassLabel == classLabels[k] ){ classLabelIndex = k; break; } } predictionResults[ i ].index = classLabelIndex; predictionResults[ i ].value = classDistances[ classLabelIndex ]; trainingMu[ classLabelIndex ] += predictionResults[ i ].value; counter[ classLabelIndex ]++; } for(UINT j=0; j<numClasses; j++){ trainingMu[j] /= counter[j]; } //Compute Sigma for each of the classes for(UINT i=0; i<numTrainingExamples; i++){ trainingSigma[predictionResults[i].index] += SQR(predictionResults[i].value - trainingMu[predictionResults[i].index]); } for(UINT j=0; j<numClasses; j++){ double count = counter[j]; if( count > 1 ){ trainingSigma[ j ] = sqrt( trainingSigma[j] / (count-1) ); }else{ trainingSigma[ j ] = 1.0; } } //Check to see if any of the mu or sigma values are zero or NaN bool errorFound = false; for(UINT j=0; j<numClasses; j++){ if( trainingMu[j] == 0 ){ warningLog << "TrainingMu[ " << j << " ] is zero for a K value of " << K << endl; } if( trainingSigma[j] == 0 ){ warningLog << "TrainingSigma[ " << j << " ] is zero for a K value of " << K << endl; } if( isnan( trainingMu[j] ) ){ errorLog << "TrainingMu[ " << j << " ] is NAN for a K value of " << K << endl; errorFound = true; } if( isnan( trainingSigma[j] ) ){ errorLog << "TrainingSigma[ " << j << " ] is NAN for a K value of " << K << endl; errorFound = true; } } if( errorFound ){ trained = false; return false; } //Recompute the rejection thresholds recomputeNullRejectionThresholds(); //Restore the actual state of the null rejection useNullRejection = tempUseNullRejection; }else{ //Resize the rejection thresholds but set the values to 0 rejectionThresholds.clear(); rejectionThresholds.resize( numClasses, 0 ); } return true; }
bool ANBC::train(LabelledClassificationData &labelledTrainingData,double gamma) { const unsigned int M = labelledTrainingData.getNumSamples(); const unsigned int N = labelledTrainingData.getNumDimensions(); const unsigned int K = labelledTrainingData.getNumClasses(); trained = false; models.clear(); classLabels.clear(); if( M == 0 ) { errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Training data has zero samples!" << endl; return false; } if( weightsDataSet ) { if( weightsData.getNumDimensions() != N ) { errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - The number of dimensions in the weights data (" << weightsData.getNumDimensions() << ") is not equal to the number of dimensions of the training data (" << N << ")" << endl; return false; } } numFeatures = N; numClasses = K; models.resize(K); classLabels.resize(K); ranges = labelledTrainingData.getRanges(); //Train each of the models for(UINT k=0; k<numClasses; k++) { //Get the class label for the kth class UINT classLabel = labelledTrainingData.getClassTracker()[k].classLabel; //Set the kth class label classLabels[k] = classLabel; //Get the weights for this class VectorDouble weights(numFeatures); if( weightsDataSet ) { bool weightsFound = false; for(UINT i=0; i<weightsData.getNumSamples(); i++) { if( weightsData[i].getClassLabel() == classLabel ) { weights = weightsData[i].getSample(); weightsFound = true; break; } } if( !weightsFound ) { errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Failed to find the weights for class " << classLabel << endl; return false; } } else { //If the weights data has not been set then all the weights are 1 for(UINT j=0; j<numFeatures; j++) weights[j] = 1.0; } //Get all the training data for this class LabelledClassificationData classData = labelledTrainingData.getClassData(classLabel); MatrixDouble data(classData.getNumSamples(),N); //Copy the training data into a matrix, scaling the training data if needed for(UINT i=0; i<data.getNumRows(); i++) { for(UINT j=0; j<data.getNumCols(); j++) { if( useScaling ) { data[i][j] = scale(classData[i][j],ranges[j].minValue,ranges[j].maxValue,MIN_SCALE_VALUE,MAX_SCALE_VALUE); } else data[i][j] = classData[i][j]; } } //Train the model for this class models[k].gamma = gamma; if( !models[k].train(classLabel,data,weights) ) { errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Failed to train model for class: " << classLabel << endl; //Try and work out why the training failed if( models[k].N == 0 ) { errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - N == 0!" << endl; models.clear(); return false; } for(UINT j=0; j<numFeatures; j++) { if( models[k].mu[j] == 0 ) { errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - The mean of column " << j+1 << " is zero! Check the training data" << endl; models.clear(); return false; } } models.clear(); return false; } } //Store the null rejection thresholds nullRejectionThresholds.resize(numClasses); for(UINT k=0; k<numClasses; k++) { nullRejectionThresholds[k] = models[k].threshold; } //Flag that the models have been trained trained = true; return trained; }
bool MinDist::train(LabelledClassificationData &labelledTrainingData,double gamma){ const unsigned int M = labelledTrainingData.getNumSamples(); const unsigned int N = labelledTrainingData.getNumDimensions(); const unsigned int K = labelledTrainingData.getNumClasses(); trained = false; models.clear(); classLabels.clear(); if( M == 0 ){ errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Training data has zero samples!" << endl; return false; } if( M <= numClusters ){ errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - There are not enough training samples for the number of clusters. Either reduce the number of clusters or increase the number of training samples!" << endl; return false; } numFeatures = N; numClasses = K; models.resize(K); classLabels.resize(K); ranges = labelledTrainingData.getRanges(); //Train each of the models for(UINT k=0; k<numClasses; k++){ //Get the class label for the kth class UINT classLabel = labelledTrainingData.getClassTracker()[k].classLabel; //Set the kth class label classLabels[k] = classLabel; //Get all the training data for this class LabelledClassificationData classData = labelledTrainingData.getClassData(classLabel); MatrixDouble data(classData.getNumSamples(),N); //Copy the training data into a matrix, scaling the training data if needed for(UINT i=0; i<data.getNumRows(); i++){ for(UINT j=0; j<data.getNumCols(); j++){ if( useScaling ){ data[i][j] = scale(classData[i][j],ranges[j].minValue,ranges[j].maxValue,0,1); }else data[i][j] = classData[i][j]; } } //Train the model for this class models[k].setGamma( gamma ); if( !models[k].train(classLabel,data,numClusters) ){ errorLog << "train(LabelledClassificationData &labelledTrainingData,double gamma) - Failed to train model for class: " << classLabel; errorLog << ". This is might be because this class does not have enough training samples! You should reduce the number of clusters or increase the number of training samples for this class." << endl; models.clear(); return false; } } trained = true; return true; }
bool GMM::train(LabelledClassificationData trainingData){ //Clear any old models models.clear(); trained = false; numFeatures = 0; numClasses = 0; if( trainingData.getNumSamples() == 0 ){ errorLog << "train(LabelledClassificationData &trainingData) - Training data is empty!" << endl; return false; } //Set the number of features and number of classes and resize the models buffer numFeatures = trainingData.getNumDimensions(); numClasses = trainingData.getNumClasses(); models.resize(numClasses); if( numFeatures >= 6 ){ warningLog << "train(LabelledClassificationData &trainingData) - The number of features in your training data is high (" << numFeatures << "). The GMMClassifier does not work well with high dimensional data, you might get better results from one of the other classifiers." << endl; } //Get the ranges of the training data if the training data is going to be scaled if( useScaling ){ ranges = trainingData.getRanges(); } //Fit a Mixture Model to each class (independently) for(UINT k=0; k<numClasses; k++){ UINT classLabel = trainingData.getClassTracker()[k].classLabel; LabelledClassificationData classData = trainingData.getClassData( classLabel ); //Scale the training data if needed if( useScaling ){ if( !classData.scale(ranges,GMM_MIN_SCALE_VALUE, GMM_MAX_SCALE_VALUE) ){ errorLog << "train(LabelledClassificationData &trainingData) - Failed to scale training data!" << endl; return false; } } //Convert the labelled data to unlabelled data UnlabelledClassificationData unlabelledData = classData.reformatAsUnlabelledClassificationData(); //Train the Mixture Model for this class GaussianMixtureModels gaussianMixtureModel; gaussianMixtureModel.setMinChange( minChange ); gaussianMixtureModel.setMaxIter( maxIter ); if( !gaussianMixtureModel.train(unlabelledData, numMixtureModels) ){ errorLog << "train(LabelledClassificationData &trainingData) - Failed to train Mixture Model for class " << classLabel << endl; return false; } //Setup the model container models[k].resize( numMixtureModels ); models[k].setClassLabel( classLabel ); //Store the mixture model in the container for(UINT j=0; j<numMixtureModels; j++){ models[k][j].mu = gaussianMixtureModel.getMu().getRowVector(j); models[k][j].sigma = gaussianMixtureModel.getSigma()[j]; //Compute the determinant and invSigma for the realtime prediction LUDecomposition ludcmp(models[k][j].sigma); if( !ludcmp.inverse( models[k][j].invSigma ) ){ models.clear(); errorLog << "train(LabelledClassificationData &trainingData) - Failed to invert Matrix for class " << classLabel << "!" << endl; return false; } models[k][j].det = ludcmp.det(); } //Compute the normalize factor models[k].recomputeNormalizationFactor(); //Compute the rejection thresholds double mu = 0; double sigma = 0; VectorDouble predictionResults(classData.getNumSamples(),0); for(UINT i=0; i<classData.getNumSamples(); i++){ vector< double > sample = classData[i].getSample(); predictionResults[i] = models[k].computeMixtureLikelihood( sample ); mu += predictionResults[i]; } //Update mu mu /= double( classData.getNumSamples() ); //Calculate the standard deviation for(UINT i=0; i<classData.getNumSamples(); i++) sigma += SQR( (predictionResults[i]-mu) ); sigma = sqrt( sigma / (double(classData.getNumSamples())-1.0) ); sigma = 0.2; //Set the models training mu and sigma models[k].setTrainingMuAndSigma(mu,sigma); if( !models[k].recomputeNullRejectionThreshold(nullRejectionCoeff) && useNullRejection ){ warningLog << "train(LabelledClassificationData &trainingData) - Failed to recompute rejection threshold for class " << classLabel << " - the nullRjectionCoeff value is too high!" << endl; } //cout << "Training Mu: " << mu << " TrainingSigma: " << sigma << " RejectionThreshold: " << models[k].getNullRejectionThreshold() << endl; //models[k].printModelValues(); } //Reset the class labels classLabels.resize(numClasses); for(UINT k=0; k<numClasses; k++){ classLabels[k] = models[k].getClassLabel(); } //Resize the rejection thresholds nullRejectionThresholds.resize(numClasses); for(UINT k=0; k<numClasses; k++){ nullRejectionThresholds[k] = models[k].getNullRejectionThreshold(); } //Flag that the models have been trained trained = true; return true; }