VectorFloat MatrixFloat::multiple(const VectorFloat &b) const{ const unsigned int M = rows; const unsigned int N = cols; const unsigned int K = (unsigned int)b.size(); if( N != K ){ warningLog << "multiple(vector b) - The size of b (" << b.size() << ") does not match the number of columns in this matrix (" << N << ")" << std::endl; return VectorFloat(); } VectorFloat c(M); const Float *pb = &b[0]; Float *pc = &c[0]; unsigned int i,j = 0; for(i=0; i<rows; i++){ pc[i] = 0; for(j=0; j<cols; j++){ pc[i] += dataPtr[i*cols+j]*pb[j]; } } return c; }
VectorFloat DoubleMovingAverageFilter::filter(const VectorFloat &x){ //If the filter has not been initialised then return 0, otherwise filter x and return y if( !initialized ){ errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl; return VectorFloat(); } if( x.getSize() != numInputDimensions ){ errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl; return VectorFloat(); } //Perform the first filter VectorFloat y = filter1.filter( x ); if( y.size() == 0 ) return y; //Perform the second filter VectorFloat yy = filter2.filter( y ); if( yy.size() == 0 ) return y; //Account for the filter lag const UINT N = y.getSize(); for(UINT i=0; i<N; i++){ yy[i] = y[i] + (y[i] - yy[i]); processedData[i] = yy[i]; } return yy; }
bool BernoulliRBM::predict_(VectorFloat &inputData,VectorFloat &outputData){ if( !trained ){ errorLog << "predict_(VectorFloat &inputData,VectorFloat &outputData) - Failed to run prediction - the model has not been trained." << std::endl; return false; } if( inputData.size() != numVisibleUnits ){ errorLog << "predict_(VectorFloat &inputData,VectorFloat &outputData) - Failed to run prediction - the input data size (" << inputData.size() << ")"; errorLog << " does not match the number of visible units (" << numVisibleUnits << "). " << std::endl; return false; } if( outputData.size() != numHiddenUnits ){ outputData.resize( numHiddenUnits ); } //Scale the data if needed if( useScaling ){ for(UINT i=0; i<numVisibleUnits; i++){ inputData[i] = grt_scale(inputData[i],ranges[i].minValue,ranges[i].maxValue,0.0,1.0); } } //Propagate the data up through the RBM Float x = 0.0; for(UINT i=0; i<numHiddenUnits; i++){ for(UINT j=0; j<numVisibleUnits; j++) { x += weightsMatrix[i][j] * inputData[j]; } outputData[i] = grt_sigmoid( x + hiddenLayerBias[i] ); } return true; }
void ModelUsfCasCor::ProbabilitiesByClass (FeatureVectorPtr _example, const MLClassList& _mlClasses, double* _probabilities, RunLog& _log ) { if (!usfCasCorClassifier) { KKStr errMsg = "ModelUsfCasCor::ProbabilitiesByClass ***ERROR*** (usfCasCorClassifier == NULL)"; _log.Level (-1) << endl << endl << errMsg << endl << endl; throw KKException (errMsg); } VectorFloat probabilities; MLClassPtr pc1 = NULL; MLClassPtr pc2 = NULL; MLClassPtr kc = NULL; float pc1p = 0.0f; float pc2p = 0.0f; float kcp = 0.0f; bool newExampleCreated = false; FeatureVectorPtr encodedExample = PrepExampleForPrediction (_example, newExampleCreated); usfCasCorClassifier->PredictConfidences (encodedExample, kc, pc1, pc1p, pc2, pc2p, kcp, _mlClasses, probabilities ); if (newExampleCreated) { delete encodedExample; encodedExample = NULL; } if (_mlClasses.size () != probabilities.size ()) { _log.Level (-1) << endl << "ModelUsfCasCor::ProbabilitiesByClass ***ERROR***" << endl << "\"_mlClasses.size () != probabilities.size ()\" This should not ever be able to happen." << endl << endl; for (int x = 0; x < _mlClasses.QueueSize (); ++x) { _probabilities[x] = 0.0; } } else { for (kkuint32 x = 0; x < probabilities.size (); ++x) _probabilities[x] = probabilities[x]; } return; } /* ProbabilitiesByClass */
bool FFT::update(const VectorFloat &x){ if( !initialized ){ errorLog << "update(const VectorFloat &x) - Not initialized!" << std::endl; return false; } if( x.size() != numInputDimensions ){ errorLog << "update(const VectorFloat &x) - The size of the input (" << x.size() << ") does not match that of the FeatureExtraction (" << numInputDimensions << ")!" << std::endl; return false; } //Add the current input to the data buffers dataBuffer.push_back( x ); featureDataReady = false; if( ++hopCounter == hopSize ){ hopCounter = 0; //Compute the FFT for each dimension for(UINT j=0; j<numInputDimensions; j++){ //Copy the input data for this dimension into the temp buffer for(UINT i=0; i<dataBufferSize; i++){ tempBuffer[i] = dataBuffer[i][j]; } //Compute the FFT if( !fft[j].computeFFT( tempBuffer ) ){ errorLog << "update(const VectorFloat &x) - Failed to compute FFT!" << std::endl; return false; } } //Flag that the fft was computed during this update featureDataReady = true; //Copy the FFT data to the feature vector UINT index = 0; for(UINT j=0; j<numInputDimensions; j++){ if( computeMagnitude ){ Float *mag = fft[j].getMagnitudeDataPtr(); for(UINT i=0; i<fft[j].getFFTSize()/2; i++){ featureVector[index++] = *mag++; } } if( computePhase ){ Float *phase = fft[j].getPhaseDataPtr(); for(UINT i=0; i<fft[j].getFFTSize()/2; i++){ featureVector[index++] = *phase++; } } } } return true; }
int main (int argc, const char * argv[]) { //Load the example data ClassificationData data; if( !data.load("WiiAccShakeData.grt") ){ cout << "ERROR: Failed to load data from file!\n"; return EXIT_FAILURE; } //The variables used to initialize the MovementIndex feature extraction UINT windowSize = 10; UINT numDimensions = data.getNumDimensions(); //Create a new instance of the MovementIndex feature extraction MovementIndex movementIndex(windowSize,numDimensions); //Loop over the accelerometer data, at each time sample (i) compute the features using the new sample and then write the results to a file for(UINT i=0; i<data.getNumSamples(); i++){ //Compute the features using this new sample movementIndex.computeFeatures( data[i].getSample() ); //Write the data cout << "InputVector: "; for(UINT j=0; j<data.getNumDimensions(); j++){ cout << data[i].getSample()[j] << "\t"; } //Get the latest feature vector VectorFloat featureVector = movementIndex.getFeatureVector(); //Write the features cout << "FeatureVector: "; for(UINT j=0; j<featureVector.size(); j++){ cout << featureVector[j]; if( j != featureVector.size()-1 ) cout << "\t"; } cout << endl; } //Save the MovementIndex settings to a file movementIndex.save("MovementIndexSettings.grt"); //You can then load the settings again if you need them movementIndex.load("MovementIndexSettings.grt"); return EXIT_SUCCESS; }
bool LinearRegression::predict_(VectorFloat &inputVector){ if( !trained ){ errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl; return false; } if( !trained ) return false; if( inputVector.size() != numInputDimensions ){ errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << int( inputVector.size() ) << ") does not match the num features in the model (" << numInputDimensions << std::endl; return false; } if( useScaling ){ for(UINT n=0; n<numInputDimensions; n++){ inputVector[n] = scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0, 1); } } regressionData[0] = w0; for(UINT j=0; j<numInputDimensions; j++){ regressionData[0] += inputVector[j] * w[j]; } if( useScaling ){ for(UINT n=0; n<numOutputDimensions; n++){ regressionData[n] = scale(regressionData[n], 0, 1, targetVectorRanges[n].minValue, targetVectorRanges[n].maxValue); } } return true; }
bool RegressionTree::predict_(VectorFloat &inputVector){ if( !trained ){ Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl; return false; } if( tree == NULL ){ Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Tree pointer is null!" << std::endl; return false; } if( inputVector.size() != numInputDimensions ){ Regressifier::errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl; return false; } if( useScaling ){ for(UINT n=0; n<numInputDimensions; n++){ inputVector[n] = scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0, 1); } } if( !tree->predict( inputVector, regressionData ) ){ Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Failed to predict!" << std::endl; return false; } return true; }
VectorFloat MovingAverageFilter::filter(const VectorFloat &x){ //If the filter has not been initialised then return 0, otherwise filter x and return y if( !initialized ){ errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl; return VectorFloat(); } if( x.size() != numInputDimensions ){ errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl; return VectorFloat(); } if( ++inputSampleCounter > filterSize ) inputSampleCounter = filterSize; //Add the new value to the buffer dataBuffer.push_back( x ); for(unsigned int j=0; j<numInputDimensions; j++){ processedData[j] = 0; for(unsigned int i=0; i<inputSampleCounter; i++) { processedData[j] += dataBuffer[i][j]; } processedData[j] /= Float(inputSampleCounter); } return processedData; }
VectorFloat Derivative::computeDerivative(const VectorFloat &x) { if( !initialized ) { errorLog << "computeDerivative(const VectorFloat &x) - Not Initialized!" << std::endl; return VectorFloat(); } if( x.size() != numInputDimensions ) { errorLog << "computeDerivative(const VectorFloat &x) - The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.size() << ")!" << std::endl; return VectorFloat(); } VectorFloat y; if( filterData ) { y = filter.filter( x ); } else y = x; for(UINT n=0; n<numInputDimensions; n++) { processedData[n] = (y[n]-yy[n])/delta; yy[n] = y[n]; } if( derivativeOrder == SECOND_DERIVATIVE ) { Float tmp = 0; for(UINT n=0; n<numInputDimensions; n++) { tmp = processedData[n]; processedData[n] = (processedData[n]-yyy[n])/delta; yyy[n] = tmp; } } return processedData; }
bool BAG::setWeights(const VectorFloat &weights){ if( this->weights.size() != weights.size() ){ return false; } this->weights = weights; return true; }
bool Softmax::predict_(VectorFloat &inputVector){ if( !trained ){ errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl; return false; } predictedClassLabel = 0; maxLikelihood = -10000; if( !trained ) return false; if( inputVector.size() != numInputDimensions ){ errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl; return false; } if( useScaling ){ for(UINT n=0; n<numInputDimensions; n++){ inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1); } } if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0); if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0); //Loop over each class and compute the likelihood of the input data coming from class k. Pick the class with the highest likelihood Float sum = 0; Float bestEstimate = -grt_numeric_limits< Float >::max(); UINT bestIndex = 0; for(UINT k=0; k<numClasses; k++){ Float estimate = models[k].compute( inputVector ); if( estimate > bestEstimate ){ bestEstimate = estimate; bestIndex = k; } classDistances[k] = estimate; classLikelihoods[k] = estimate; sum += estimate; } if( sum > 1.0e-5 ){ for(UINT k=0; k<numClasses; k++){ classLikelihoods[k] /= sum; } }else{ //If the sum is less than the value above then none of the models found a positive class maxLikelihood = bestEstimate; predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL; return true; } maxLikelihood = classLikelihoods[bestIndex]; predictedClassLabel = classLabels[bestIndex]; return true; }
bool MinDist::predict_(VectorFloat &inputVector){ predictedClassLabel = 0; maxLikelihood = 0; if( !trained ){ errorLog << "predict_(VectorFloat &inputVector) - MinDist Model Not Trained!" << std::endl; return false; } if( inputVector.size() != numInputDimensions ){ errorLog << "predict_(VectorFloat &inputVector) - The size of the input vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl; return false; } if( useScaling ){ for(UINT n=0; n<numInputDimensions; n++){ inputVector[n] = grt_scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0.0, 1.0); } } if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0); if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0); Float sum = 0; Float minDist = grt_numeric_limits< Float >::max(); for(UINT k=0; k<numClasses; k++){ //Compute the distance for class k classDistances[k] = models[k].predict( inputVector ); //Keep track of the best value if( classDistances[k] < minDist ){ minDist = classDistances[k]; predictedClassLabel = k; } //Set the class likelihoods as 1.0 / dist[k], the small number is to stop divide by zero classLikelihoods[k] = 1.0 / (classDistances[k] + 0.0001); sum += classLikelihoods[k]; } //Normalize the classlikelihoods if( sum != 0 ){ for(UINT k=0; k<numClasses; k++){ classLikelihoods[k] /= sum; } maxLikelihood = classLikelihoods[predictedClassLabel]; }else maxLikelihood = classLikelihoods[predictedClassLabel]; if( useNullRejection ){ //Check to see if the best result is greater than the models threshold if( minDist <= models[predictedClassLabel].getRejectionThreshold() ) predictedClassLabel = models[predictedClassLabel].getClassLabel(); else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL; }else predictedClassLabel = models[predictedClassLabel].getClassLabel(); return true; }
bool MovementDetector::predict_( VectorFloat &input ){ movementDetected = false; noMovementDetected = false; if( !trained ){ errorLog << "predict_(VectorFloat &input) - AdaBoost Model Not Trained!" << std::endl; return false; } if( input.size() != numInputDimensions ){ errorLog << "predict_(VectorFloat &input) - The size of the input vector (" << input.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl; return false; } //Compute the movement index, unless we are in the first sample Float x = 0; if( !firstSample ){ for(UINT n=0; n<numInputDimensions; n++){ x += SQR( input[n] - lastSample[n] ); } movementIndex = (movementIndex*gamma) + sqrt( x ); } //Flag that this is not the first sample and store the input for the next prediction firstSample = false; lastSample = input; switch( state ){ case SEARCHING_FOR_MOVEMENT: if( movementIndex >= upperThreshold ){ movementDetected = true; state = SEARCHING_FOR_NO_MOVEMENT; } break; case SEARCHING_FOR_NO_MOVEMENT: if( movementIndex < lowerThreshold ){ noMovementDetected = true; state = SEARCH_TIMEOUT; searchTimer.start(); } break; case SEARCH_TIMEOUT: // searchTimeout is cast because of a C4018 warning on visual (signed/unsigned incompatibility) if( searchTimer.getMilliSeconds() >= (signed long)searchTimeout ){ state = SEARCH_TIMEOUT; searchTimer.stop(); } break; } return true; }
Float Derivative::computeDerivative(const Float x) { if( numInputDimensions != 1 ) { errorLog << "computeDerivative(const Float x) - The Number Of Input Dimensions is not 1! NumInputDimensions: " << numInputDimensions << std::endl; return 0; } VectorFloat y = computeDerivative( VectorFloat(1,x) ); if( y.size() == 0 ) return 0 ; return y[0]; }
Float MovingAverageFilter::filter(const Float x){ //If the filter has not been initialised then return 0, otherwise filter x and return y if( !initialized ){ errorLog << "filter(const Float x) - The filter has not been initialized!" << std::endl; return 0; } VectorFloat y = filter(VectorFloat(1,x)); if( y.size() == 0 ) return 0; return y[0]; }
Float SavitzkyGolayFilter::filter(const Float x){ //If the filter has not been initialised then return 0, otherwise filter x and return y if( !initialized ){ errorLog << "filter(Float x) - The filter has not been initialized!" << std::endl; return 0; } VectorFloat y = filter(VectorFloat(1,x)); if( y.size() > 0 ) return y[0]; return 0; }
VectorFloat SavitzkyGolayFilter::filter(const VectorFloat &x){ if( !initialized ){ errorLog << "filter(const VectorFloat &x) - Not Initialized!" << std::endl; return VectorFloat(); } if( x.size() != numInputDimensions ){ errorLog << "filter(const VectorFloat &x) - The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.size() << ")!" << std::endl; return VectorFloat(); } //Add the new input data to the data buffer data.push_back( x ); //Filter the data for(UINT j=0; j<x.size(); j++){ processedData[j] = 0; for(UINT i=0; i<numPoints; i++) processedData[j] += data[i][j] * coeff[i]; } return processedData; }
bool TimeseriesBuffer::computeFeatures(const VectorFloat &inputVector){ if( !initialized ){ errorLog << "computeFeatures(const VectorFloat &inputVector) - Not initialized!" << std::endl; return false; } if( inputVector.size() != numInputDimensions ){ errorLog << "computeFeatures(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.size() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl; return false; } update( inputVector ); return true; }
bool DoubleMovingAverageFilter::process(const VectorFloat &inputVector){ if( !initialized ){ errorLog << "process(const VectorFloat &inputVector) - The filter has not been initialized!" << std::endl; return false; } if( inputVector.size() != numInputDimensions ){ errorLog << "process(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl; return false; } processedData = filter( inputVector ); if( processedData.size() == numOutputDimensions ) return true; return false; }
bool SavitzkyGolayFilter::process(const VectorFloat &inputVector){ if( !initialized ){ errorLog << "process(const VectorFloat &inputVector) - Not initialized!" << std::endl; return false; } if( inputVector.size() != numInputDimensions ){ errorLog << "process(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.size() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl; return false; } processedData = filter( inputVector ); if( processedData.size() == numOutputDimensions ) return true; return false; }
VectorFloat LowPassFilter::filter(const VectorFloat &x){ if( !initialized ){ errorLog << "filter(const VectorFloat &x) - Not Initialized!" << std::endl; return VectorFloat(); } if( x.size() != numInputDimensions ){ errorLog << "filter(const VectorFloat &x) - The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.size() << ")!" << std::endl; return VectorFloat(); } //Exponential moving average filter: lastOutput*alpha + (1.0f-alpha)*input; for(UINT n=0; n<numInputDimensions; n++){ processedData[n] = (yy[n] * filterFactor) + (x[n] * (1.0 - filterFactor)) * gain; yy[n] = processedData[n]; } return processedData; }
bool ClassificationDataStream::addSample(const UINT classLabel,const VectorFloat &sample){ if( numDimensions != sample.size() ){ errorLog << "addSample(const UINT classLabel, VectorFloat sample) - the size of the new sample (" << sample.size() << ") does not match the number of dimensions of the dataset (" << numDimensions << ")" << std::endl; return false; } bool searchForNewClass = true; if( trackingClass ){ if( classLabel != lastClassID ){ //The class ID has changed so update the time series tracker timeSeriesPositionTracker[ timeSeriesPositionTracker.size()-1 ].setEndIndex( totalNumSamples-1 ); }else searchForNewClass = false; } if( searchForNewClass ){ bool newClass = true; //Search to see if this class has been found before for(UINT k=0; k<classTracker.size(); k++){ if( classTracker[k].classLabel == classLabel ){ newClass = false; classTracker[k].counter++; } } if( newClass ){ ClassTracker newCounter(classLabel,1); classTracker.push_back( newCounter ); } //Set the timeSeriesPositionTracker start position trackingClass = true; lastClassID = classLabel; TimeSeriesPositionTracker newTracker(totalNumSamples,0,classLabel); timeSeriesPositionTracker.push_back( newTracker ); } ClassificationSample labelledSample(classLabel,sample); data.push_back( labelledSample ); totalNumSamples++; return true; }
bool KNN::predict_(VectorFloat &inputVector){ if( !trained ){ errorLog << "predict_(VectorFloat &inputVector) - KNN model has not been trained" << std::endl; return false; } if( inputVector.size() != numInputDimensions ){ errorLog << "predict_(VectorFloat &inputVector) - the size of the input vector " << inputVector.size() << " does not match the number of features " << numInputDimensions << std::endl; return false; } //Scale the input vector if needed if( useScaling ){ for(UINT i=0; i<numInputDimensions; i++){ inputVector[i] = scale(inputVector[i], ranges[i].minValue, ranges[i].maxValue, 0, 1); } } //Run the prediction return predict(inputVector,K); }
bool PrincipalComponentAnalysis::setModel( const VectorFloat &mean, const MatrixFloat &eigenvectors ){ if( (UINT)mean.size() != eigenvectors.getNumCols() ){ return false; } trained = true; numInputDimensions = eigenvectors.getNumCols(); numPrincipalComponents = eigenvectors.getNumRows(); this->mean = mean; stdDev.clear(); componentWeights.clear(); eigenvalues.clear(); sortedEigenvalues.clear(); this->eigenvectors = eigenvectors; //The eigenvectors are already sorted, so the sorted eigenvalues just holds the default index for(UINT i=0; i<numPrincipalComponents; i++){ sortedEigenvalues.push_back( IndexedDouble(i,0.0) ); } return true; }
bool PrincipalComponentAnalysis::project(const VectorFloat &data,VectorFloat &prjData){ const unsigned int N = (unsigned int)data.size(); if( !trained ){ warningLog << "project(const VectorFloat &data,VectorFloat &prjData) - The PrincipalComponentAnalysis module has not been trained!" << std::endl; return false; } if( N != numInputDimensions ){ warningLog << "project(const VectorFloat &data,VectorFloat &prjData) - The size of the input vector (" << N << ") does not match the number of input dimensions (" << numInputDimensions << ")!" << std::endl; return false; } VectorFloat msData = data; if( normData ){ //Mean subtract the data for(UINT j=0; j<numInputDimensions; j++) msData[j] = (msData[j]-mean[j])/stdDev[j]; }else{ //Mean subtract the data for(UINT j=0; j<numInputDimensions; j++) msData[j] -= mean[j]; } //Projected Data prjData.resize( numPrincipalComponents ); for(UINT i=0; i<numPrincipalComponents; i++){//For each PC prjData[i]=0; for(UINT j=0; j<N; j++)//For each feature prjData[i] += msData[j] * eigenvectors[j][sortedEigenvalues[i].index]; } return true; }
VectorFloat HighPassFilter::filter(const VectorFloat &x){ if( !initialized ){ errorLog << "filter(const VectorFloat &x) - Not Initialized!" << std::endl; return VectorFloat(); } if( x.size() != numInputDimensions ){ errorLog << "filter(const VectorFloat &x) - The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.size() << ")!" << std::endl; return VectorFloat(); } for(UINT n=0; n<numInputDimensions; n++){ //Compute the new output yy[n] = filterFactor * (yy[n] + x[n] - xx[n]) * gain; //Store the current input xx[n] = x[n]; //Store the current output in processed data so it can be accessed by the base class processedData[n] = yy[n]; } return processedData; }
bool Derivative::process(const VectorFloat &inputVector) { if( !initialized ) { errorLog << "process(const VectorFloat &inputVector) - Not initialized!" << std::endl; return false; } if( inputVector.getSize() != numInputDimensions ) { errorLog << "process(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.size() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl; return false; } computeDerivative( inputVector ); if( processedData.size() == numOutputDimensions ) return true; return false; }
int main(int argc, const char *argv[]) { static bool is_running = true; string input_file = "-"; cmdline::parser c; c.add<int> ("verbose", 'v', "verbosity level: 0-4", false, 0); c.add ("help", 'h', "print this message"); c.add<string>("type", 't', "force classification, regression or timeseries input", false, "", cmdline::oneof<string>("classification", "regression", "timeseries", "auto")); c.footer ("<pre-processor> [<filename>] "); /* parse common options */ bool parse_ok = c.parse(argc,argv,false) && !c.exist("help"); set_verbosity(c.get<int>("verbose")); /* do we have a predictor? */ string preproc_name = c.rest().size() > 0 ? c.rest()[0] : "list"; if (preproc_name == "list") { cout << c.usage() << endl; cout << list_preprocessors(); exit(0); } PreProcessing *pp = apply_cmdline_args(preproc_name,c,1,input_file); if (pp==NULL) exit(-1); if (!parse_ok) { cerr << c.usage() << endl << c.error() << endl; exit(-1); } /* do we read from a file or from stdin-? */ ifstream fin; fin.open(input_file); istream &in = input_file=="-" ? cin : fin; string line; int linenum=0; while(getline(in,line)) { stringstream ss(line); if (line[0] == '#') { cout << line << endl; continue; } if (line.size() == 0) { cout << endl; continue; } try { string label; ss >> label; cout << label << "\t"; } catch (exception &e) { /* unlabeled data */ } VectorFloat vals; double value; while (ss >> value) vals.push_back(value); if (linenum == 0) { // weird stuff, pp resets only when initialized, it only initialized once // data has been seen, and only set num outputdimenstion when reset so: pp->setNumInputDimensions(vals.size()); pp->process(VectorFloat(vals.size(), 1.)); pp->reset(); } bool ok = pp->process(vals); if (!ok) { cerr << "unable to process line " << linenum << endl; exit(-1); } for(auto value : pp->getProcessedData()) cout << value << "\t"; cout << endl; linenum++; } }
int main(int argc, const char * argv[]){ //Load the training data TimeSeriesClassificationData trainingData; if( !trainingData.load("HMMTrainingData.grt") ){ cout << "ERROR: Failed to load training data!\n"; return false; } //Remove 20% of the training data to use as test data TimeSeriesClassificationData testData = trainingData.partition( 80 ); //The input to the HMM must be a quantized discrete value //We therefore use a KMeansQuantizer to covert the N-dimensional continuous data into 1-dimensional discrete data const UINT NUM_SYMBOLS = 10; KMeansQuantizer quantizer( NUM_SYMBOLS ); //Train the quantizer using the training data if( !quantizer.train( trainingData ) ){ cout << "ERROR: Failed to train quantizer!\n"; return false; } //Quantize the training data TimeSeriesClassificationData quantizedTrainingData( 1 ); for(UINT i=0; i<trainingData.getNumSamples(); i++){ UINT classLabel = trainingData[i].getClassLabel(); MatrixDouble quantizedSample; for(UINT j=0; j<trainingData[i].getLength(); j++){ quantizer.quantize( trainingData[i].getData().getRow(j) ); quantizedSample.push_back( quantizer.getFeatureVector() ); } if( !quantizedTrainingData.addSample(classLabel, quantizedSample) ){ cout << "ERROR: Failed to quantize training data!\n"; return false; } } //Create a new HMM instance HMM hmm; //Set the HMM as a Discrete HMM hmm.setHMMType( HMM_DISCRETE ); //Set the number of states in each model hmm.setNumStates( 4 ); //Set the number of symbols in each model, this must match the number of symbols in the quantizer hmm.setNumSymbols( NUM_SYMBOLS ); //Set the HMM model type to LEFTRIGHT with a delta of 1 hmm.setModelType( HMM_LEFTRIGHT ); hmm.setDelta( 1 ); //Set the training parameters hmm.setMinChange( 1.0e-5 ); hmm.setMaxNumEpochs( 100 ); hmm.setNumRandomTrainingIterations( 20 ); //Train the HMM model if( !hmm.train( quantizedTrainingData ) ){ cout << "ERROR: Failed to train the HMM model!\n"; return false; } //Save the HMM model to a file if( !hmm.save( "HMMModel.grt" ) ){ cout << "ERROR: Failed to save the model to a file!\n"; return false; } //Load the HMM model from a file if( !hmm.load( "HMMModel.grt" ) ){ cout << "ERROR: Failed to load the model from a file!\n"; return false; } //Quantize the test data TimeSeriesClassificationData quantizedTestData( 1 ); for(UINT i=0; i<testData.getNumSamples(); i++){ UINT classLabel = testData[i].getClassLabel(); MatrixDouble quantizedSample; for(UINT j=0; j<testData[i].getLength(); j++){ quantizer.quantize( testData[i].getData().getRow(j) ); quantizedSample.push_back( quantizer.getFeatureVector() ); } if( !quantizedTestData.addSample(classLabel, quantizedSample) ){ cout << "ERROR: Failed to quantize training data!\n"; return false; } } //Compute the accuracy of the HMM models using the test data double numCorrect = 0; double numTests = 0; for(UINT i=0; i<quantizedTestData.getNumSamples(); i++){ UINT classLabel = quantizedTestData[i].getClassLabel(); hmm.predict( quantizedTestData[i].getData() ); if( classLabel == hmm.getPredictedClassLabel() ) numCorrect++; numTests++; VectorFloat classLikelihoods = hmm.getClassLikelihoods(); VectorFloat classDistances = hmm.getClassDistances(); cout << "ClassLabel: " << classLabel; cout << " PredictedClassLabel: " << hmm.getPredictedClassLabel(); cout << " MaxLikelihood: " << hmm.getMaximumLikelihood(); cout << " ClassLikelihoods: "; for(UINT k=0; k<classLikelihoods.size(); k++){ cout << classLikelihoods[k] << "\t"; } cout << "ClassDistances: "; for(UINT k=0; k<classDistances.size(); k++){ cout << classDistances[k] << "\t"; } cout << endl; } cout << "Test Accuracy: " << numCorrect/numTests*100.0 << endl; return true; }