예제 #1
0
VectorFloat DoubleMovingAverageFilter::filter(const VectorFloat &x){
    
    //If the filter has not been initialised then return 0, otherwise filter x and return y
    if( !initialized ){
        errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl;
        return VectorFloat();
    }
    
    //Perform the first filter
    VectorFloat y = filter1.filter( x );
    
    if( y.size() == 0 ) return y;
    
    //Perform the second filter
    VectorFloat yy = filter2.filter( y );
    
    if( yy.size() == 0 ) return y;
    
    //Account for the filter lag
    const UINT N = y.getSize();
    for(UINT i=0; i<N; i++){
        yy[i] = y[i] + (y[i] - yy[i]); 
        processedData[i] = yy[i];
    }
    
    return yy;
}
예제 #2
0
파일: Softmax.cpp 프로젝트: nickgillian/grt
bool Softmax::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << __GRT_LOG__ << " Model Not Trained!" << std::endl;
        return false;
    }
    
    predictedClassLabel = 0;
    maxLikelihood = -10000;
    
    if( !trained ) return false;
    
    if( inputVector.getSize() != numInputDimensions ){
        errorLog << __GRT_LOG__ << " The size of the input vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
        return false;
    }
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
        }
    }
    
    if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
    if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
    
    //Loop over each class and compute the likelihood of the input data coming from class k. Pick the class with the highest likelihood
    Float sum = 0;
    Float bestEstimate = -grt_numeric_limits< Float >::max();
    UINT bestIndex = 0;
    for(UINT k=0; k<numClasses; k++){
        Float estimate = models[k].compute( inputVector );
        
        if( estimate > bestEstimate ){
            bestEstimate = estimate;
            bestIndex = k;
        }
        
        classDistances[k] = estimate;
        classLikelihoods[k] = estimate;
        sum += estimate;
    }
    
    if( sum > 1.0e-5 ){
        for(UINT k=0; k<numClasses; k++){
            classLikelihoods[k] /= sum;
        }
    }else{
        //If the sum is less than the value above then none of the models found a positive class
        maxLikelihood = bestEstimate;
        predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
        return true;
    }
    maxLikelihood = classLikelihoods[bestIndex];
    predictedClassLabel = classLabels[bestIndex];
    
    return true;
}
예제 #3
0
bool ClassificationData::addSample(const UINT classLabel,const VectorFloat &sample){
    
	if( sample.getSize() != numDimensions ){
        if( totalNumSamples == 0 ){
            warningLog << "addSample(const UINT classLabel, VectorFloat &sample) - the size of the new sample (" << sample.getSize() << ") does not match the number of dimensions of the dataset (" << numDimensions << "), setting dimensionality to: " << numDimensions << std::endl;
            numDimensions = sample.getSize();
        }else{
            errorLog << "addSample(const UINT classLabel, VectorFloat &sample) - the size of the new sample (" << sample.getSize() << ") does not match the number of dimensions of the dataset (" << numDimensions << ")" << std::endl;
            return false;
        }
    }

    //The class label must be greater than zero (as zero is used for the null rejection class label
    if( classLabel == GRT_DEFAULT_NULL_CLASS_LABEL && !allowNullGestureClass ){
        errorLog << "addSample(const UINT classLabel, VectorFloat &sample) - the class label can not be 0!" << std::endl;
        return false;
    }

    //The dataset has changed so flag that any previous cross validation setup will now not work
    crossValidationSetup = false;
    crossValidationIndexs.clear();

	ClassificationSample newSample(classLabel,sample);
	data.push_back( newSample );
	totalNumSamples++;

	if( classTracker.getSize() == 0 ){
		ClassTracker tracker(classLabel,1);
		classTracker.push_back(tracker);
	}else{
		bool labelFound = false;
		for(UINT i=0; i<classTracker.getSize(); i++){
			if( classLabel == classTracker[i].classLabel ){
				classTracker[i].counter++;
				labelFound = true;
				break;
			}
		}
		if( !labelFound ){
			ClassTracker tracker(classLabel,1);
			classTracker.push_back(tracker);
		}
	}

    //Update the class labels
    sortClassLabels();

	return true;
}
예제 #4
0
VectorFloat TimeseriesBuffer::update(const VectorFloat &x){
    
    if( !initialized ){
        errorLog << "update(const VectorFloat &x) - Not Initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "update(const VectorFloat &x)- The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.getSize() << ")!" << std::endl;
        return VectorFloat();
    }
    
    //Add the new data to the buffer
    dataBuffer.push_back( x );
    
    //Search the buffer for the zero crossing features
    UINT colIndex = 0;
    for(UINT j=0; j<numInputDimensions; j++){
        for(UINT i=0; i<dataBuffer.getSize(); i++){
            featureVector[ colIndex++ ] = dataBuffer[i][j];
        }
    }
    
    //Flag that the feature data has been computed
    if( dataBuffer.getBufferFilled() ){
        featureDataReady = true;
    }else featureDataReady = false;
    
    return featureVector;
}
예제 #5
0
UINT RBMQuantizer::quantize(const VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << "quantize(const VectorFloat &inputVector) - The quantizer model has not been trained!" << std::endl;
        return 0;
    }
    
    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "quantize(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return 0;
    }
    
    if( !rbm.predict( inputVector ) ){
        errorLog << "quantize(const VectorFloat &inputVector) - Failed to quantize input!" << std::endl;
        return 0;
    }
    
    quantizationDistances = rbm.getOutputData();
    
    //Search for the neuron with the maximum output
    UINT quantizedValue = 0;
    Float maxValue = 0;
    for(UINT k=0; k<numClusters; k++){
        if( quantizationDistances[k] > maxValue ){
            maxValue = quantizationDistances[k];
            quantizedValue = k;
        }
    }
    
    featureVector[0] = quantizedValue;
    featureDataReady = true;
    
    return quantizedValue;
}
bool MultidimensionalRegression::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
        return false;
    }
    
    if( !trained ) return false;
    
	if( inputVector.getSize() != numInputDimensions ){
        errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
		return false;
	}
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = grt_scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0.0, 1.0);
        }
    }
    
    for(UINT n=0; n<numOutputDimensions; n++){
        if( !regressionModules[ n ]->predict( inputVector ) ){
            errorLog << "predict_(VectorFloat &inputVector) - Failed to predict for regression module " << n << std::endl;
        }
        regressionData[ n ] = regressionModules[ n ]->getRegressionData()[0];
    }
    
    if( useScaling ){
        for(UINT n=0; n<numOutputDimensions; n++){
            regressionData[n] = grt_scale(regressionData[n], 0.0, 1.0, targetVectorRanges[n].minValue, targetVectorRanges[n].maxValue);
        }
    }
    
    return true;
}
예제 #7
0
UINT KMeansQuantizer::quantize(const VectorFloat &inputVector){
	
    if( !trained ){
        errorLog << "computeFeatures(const VectorFloat &inputVector) - The quantizer has not been trained!" << std::endl;
        return 0;
    }

    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "computeFeatures(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return 0;
    }

	//Find the minimum cluster
    Float minDist = grt_numeric_limits< Float >::max();
    UINT quantizedValue = 0;
    
    for(UINT k=0; k<numClusters; k++){
        //Compute the squared Euclidean distance
        quantizationDistances[k] = 0;
        for(UINT i=0; i<numInputDimensions; i++){
            quantizationDistances[k] += grt_sqr( inputVector[i]-clusters[k][i] );
        }
        if( quantizationDistances[k] < minDist ){
            minDist = quantizationDistances[k];
            quantizedValue = k;
        }
    }
    
    featureVector[0] = quantizedValue;
    featureDataReady = true;
	
	return quantizedValue;
}
예제 #8
0
파일: KMeans.cpp 프로젝트: BryanBo-Cao/grt
bool KMeans::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        return false;
	}
	
	if( inputVector.getSize() != numInputDimensions ){
		return false;
	}
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = grt_scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0.0, 1.0);
        }
    }
	
    const Float sigma = 1.0;
    const Float gamma = 1.0 / (2.0*grt_sqr(sigma));
    Float sum = 0;
    Float dist = 0;
	UINT minIndex = 0;
	bestDistance = grt_numeric_limits< Float >::max();
	predictedClusterLabel = 0;
	maxLikelihood = 0;
	if( clusterLikelihoods.getSize() != numClusters )
        clusterLikelihoods.resize( numClusters );
    if( clusterDistances.getSize() != numClusters )
        clusterDistances.resize( numClusters );
	
	for(UINT i=0; i<numClusters; i++){
		
        //We don't need to compute the sqrt as it works without it and is faster
		dist = 0;
		for(UINT j=0; j<numInputDimensions; j++){
			dist += grt_sqr( inputVector[j]-clusters[i][j] );
		}
    
        clusterDistances[i] = dist;
        clusterLikelihoods[i] = exp( - grt_sqr(gamma * dist) ); //1.0/(1.0+dist); //This will give us a value close to 1 for a dist of 0, and a value closer to 0 when the dist is large
        
		sum += clusterLikelihoods[i];
        
		if( dist < bestDistance ){
			bestDistance = dist;
			minIndex = i;
		}
	}
	
	//Normalize the likelihood
	for(UINT i=0; i<numClusters; i++){
		clusterLikelihoods[i] /= sum;
	}
	
	predictedClusterLabel = clusterLabels[ minIndex ];
	maxLikelihood = clusterLikelihoods[ minIndex ];
    
    return true;
}
예제 #9
0
// Tests the VectorFloat type
TEST(DynamicType, VectorFloatTest) {
  DynamicType type;
  VectorFloat a(3);
  a[0] = 1.1; a[1] = 1.2; a[2] = 1.3;
  EXPECT_TRUE( type.set( a ) );
  VectorFloat b = type.get< VectorFloat >();
  EXPECT_EQ( a.getSize(), b.getSize() );
  for(unsigned int i=0; i<a.getSize(); i++){
    EXPECT_EQ( a[i], b[i] );
  }
}
예제 #10
0
bool GaussianMixtureModels::predict_(VectorFloat &x){
    
    if( !trained ){
        return false;
    }
    
    if( x.getSize() != numInputDimensions ){
        return false;
    }
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            x[n] = grt_scale(x[n], ranges[n].minValue, ranges[n].maxValue, 0.0, 1.0);
        }
    }
    
    Float sum = 0;
    Float dist = 0;
    UINT minIndex = 0;
    bestDistance = 0;
    predictedClusterLabel = 0;
    maxLikelihood = 0;
    if( clusterLikelihoods.size() != numClusters )
        clusterLikelihoods.resize( numClusters );
    if( clusterDistances.size() != numClusters )
        clusterDistances.resize( numClusters );
    
    for(UINT i=0; i<numClusters; i++){
        
        dist = gauss(x,i,det,mu,invSigma);
        
        clusterDistances[i] = dist;
        clusterLikelihoods[i] = dist;
        
        sum += clusterLikelihoods[i];
        
        if( dist > bestDistance ){
            bestDistance = dist;
            minIndex = i;
        }
    }
    
    //Normalize the likelihood
    for(UINT i=0; i<numClusters; i++){
        clusterLikelihoods[i] /= sum;
    }
    
    predictedClusterLabel = clusterLabels[ minIndex ];
    maxLikelihood = clusterLikelihoods[ minIndex ];
    
    return true;
}
예제 #11
0
Float DoubleMovingAverageFilter::filter(const Float x){
    
    //If the filter has not been initialised then return 0, otherwise filter x and return y
    if( !initialized ){
        errorLog << "filter(const Float x) - The filter has not been initialized!" << std::endl;
        return 0;
    }
    
    VectorFloat y = filter(VectorFloat(1,x));
    
    if( y.getSize() == 0 ) return 0;
    return y[0];
}
예제 #12
0
VectorFloat ZeroCrossingCounter::update(const VectorFloat &x){
    
    if( !initialized ){
        errorLog << "update(const VectorFloat &x) - Not Initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "update(const VectorFloat &x)- The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.getSize() << ")!" << std::endl;
        return VectorFloat();
    }
    
    //Clear the feature vector
    std::fill(featureVector.begin(),featureVector.end(),0);
    
    //Update the derivative data and 
    derivative.computeDerivative( x );
    
    //Dead zone the derivative data
    deadZone.filter( derivative.getProcessedData() );
    
    //Add the deadzone data to the buffer
    dataBuffer.push_back( deadZone.getProcessedData() );
    
    //Search the buffer for the zero crossing features
    for(UINT j=0; j<numInputDimensions; j++){
        UINT colIndex = (featureMode == INDEPENDANT_FEATURE_MODE ? (TOTAL_NUM_ZERO_CROSSING_FEATURES*j) : 0);
        for(UINT i=1; i<dataBuffer.getSize(); i++){
            //Search for a zero crossing
            if( (dataBuffer[i][j] > 0 && dataBuffer[i-1][j] <= 0) || (dataBuffer[i][j] < 0 && dataBuffer[i-1][j] >= 0) ){
                //Update the zero crossing count
                featureVector[ NUM_ZERO_CROSSINGS_COUNTED + colIndex ]++;
                
                //Update the magnitude, search the last 5 values around the zero crossing to make sure we get the maxima of the peak
                Float maxValue = 0;
                UINT searchSize = i > 5 ? 5 : i;
                for(UINT n=0; n<searchSize; n++){
                    Float value = fabs( dataBuffer[ i-n ][j] );
                    if( value > maxValue ) maxValue = value;
                }
                featureVector[ ZERO_CROSSING_MAGNITUDE + colIndex ] += maxValue;
            }
        }
    }
    
    //Flag that the feature data has been computed
    featureDataReady = true;

    return featureVector;
}
예제 #13
0
bool ContinuousHiddenMarkovModel::predict_(VectorFloat &x){
    
    if( !trained ){
        errorLog << "predict_(VectorFloat &x) - The model is not trained!" << std::endl;
        return false;
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "predict_(VectorFloat &x) - The input vector size (" << x.getSize() << ") does not match the number of input dimensions (" << numInputDimensions << ")" << std::endl;
        return false;
    }
    
    //Add the new sample to the circular buffer
    observationSequence.push_back( x );
    
    //Convert the circular buffer to MatrixFloat
    for(unsigned int i=0; i<observationSequence.getSize(); i++){
        for(unsigned int j=0; j<numInputDimensions; j++){
            obsSequence[i][j] = observationSequence[i][j];
        }
    }
    
    return predict_( obsSequence );
}
예제 #14
0
bool Derivative::process(const VectorFloat &inputVector) {

    if( !initialized ) {
        errorLog << "process(const VectorFloat &inputVector) - Not initialized!" << std::endl;
        return false;
    }

    if( inputVector.getSize() != numInputDimensions ) {
        errorLog << "process(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.size() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return false;
    }

    computeDerivative( inputVector );

    if( processedData.size() == numOutputDimensions ) return true;
    return false;
}
예제 #15
0
bool MovingAverageFilter::process(const VectorFloat &inputVector){
    
    if( !initialized ){
        errorLog << "process(const VectorFloat &inputVector) - The filter has not been initialized!" << std::endl;
        return false;
    }

    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "process(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
        return false;
    }
    
    filter( inputVector );
    
    if( processedData.getSize() == numOutputDimensions ) return true;

    return false;
}
예제 #16
0
int main (int argc, const char * argv[])
{
    //Create a new instance of an FFT with a window size of 256 and a hop size of 1
    FFT fft(256,1);
    
    //Create some varaibles to help generate the signal data
    const UINT numSeconds = 10;                         //The number of seconds of data we want to generate
    double t = 0;                                       //This keeps track of the time
    double tStep = 1.0/1000.0;                          //This is how much the time will be updated at each iteration in the for loop
    double freq = 100;                                  //Stores the frequency
    
    //Generate the signal and filter the data
    for(UINT i=0; i<numSeconds*1000; i++){
        
        //Generate the signal
        double signal = sin( t * TWO_PI*freq );
        
        //Compute the FFT of the input signal (and the previous buffer data)
        fft.update( signal );
        
        //Update the t
        t += tStep;
    }
    
    //Take the output of the last FFT and save the values to a file
    Vector<FastFourierTransform> fftResults = fft.getFFTResults();
    
    //The input signal is a 1 dimensional signal, so get the magnitude data for dimension 1 (which is at element 0)
    VectorFloat magnitudeData = fftResults[0].getMagnitudeData();
    
    //Write the magnitude data to a file
    cout << "Magnitude Data:\n";
    for(UINT i=0; i<magnitudeData.getSize(); i++){
        cout << magnitudeData[i] << endl;
    }
    
    return EXIT_SUCCESS;
    
}
예제 #17
0
bool DecisionTreeClusterNode::computeFeatureWeights( VectorFloat &weights ) const{

    if( isLeafNode ){ //If we reach a leaf node, no weight update needed
        return true;
    }
    
    if( featureIndex >= weights.getSize() ){ //Feature index is out of bounds
        warningLog << __GRT_LOG__ << " Feature index is greater than weights Vector size!" << std::endl;
        return false;
    }else{
        weights[ featureIndex ]++;
    }
    
    if( leftChild ){ //Recursively compute the weights for the left child
        leftChild->computeFeatureWeights( weights );
    }
    if( rightChild ){ //Recursively compute the weights for the right child
        rightChild->computeFeatureWeights( weights );
    }

    return true;
}
예제 #18
0
bool computeFeatureWeights( CommandLineParser &parser ){

    infoLog << "Computing feature weights..." << endl;

    string resultsFilename = "";
    string modelFilename = "";
    bool combineWeights = false;

    //Get the model filename
    if( !parser.get("model-filename",modelFilename) ){
        errorLog << "Failed to parse filename from command line! You can set the model filename using the --model." << endl;
        printUsage();
        return false;
    }

    //Get the results filename
    if( !parser.get("filename",resultsFilename) ){
        errorLog << "Failed to parse results filename from command line! You can set the results filename using the -f." << endl;
        printUsage();
        return false;
    }

    //Get the results filename
    parser.get("combine-weights",combineWeights);

    //Load the model
    GestureRecognitionPipeline pipeline;

    if( !pipeline.load( modelFilename ) ){
        errorLog << "Failed to load model from file: " << modelFilename << endl;
        printUsage();
        return false;
    }

    //Make sure the pipeline contains a random forest model and that it is trained
    RandomForests *forest = pipeline.getClassifier< RandomForests >();

    if( !forest ){
        errorLog << "Model loaded, but the pipeline does not contain a RandomForests classifier!" << endl;
        printUsage();
        return false;
    }

    if( !forest->getTrained() ){
        errorLog << "Model loaded, but the RandomForests classifier is not trained!" << endl;
        printUsage();
        return false;
    }

    //Compute the feature weights
    if( combineWeights ){
        VectorFloat weights = forest->getFeatureWeights();
        if( weights.getSize() == 0 ){
            errorLog << "Failed to compute feature weights!" << endl;
            printUsage();
            return false;
        }

        //Save the results to a file
        fstream file;
        file.open( resultsFilename.c_str(), fstream::out );
        
        const unsigned int N = weights.getSize();
        for(unsigned int i=0; i<N; i++){
            file << weights[i] << endl;
        }
        
        file.close();
    }else{

        double norm = 0.0;
        const unsigned int K = forest->getForestSize();
        const unsigned int N = forest->getNumInputDimensions();
        VectorFloat tmp( N, 0.0 );
        MatrixDouble weights(K,N);

        for(unsigned int i=0; i<K; i++){

            DecisionTreeNode *tree = forest->getTree(i);
            tree->computeFeatureWeights( tmp );
            norm = 1.0 / Util::sum( tmp );
            for(unsigned int j=0; j<N; j++){
                tmp[j] *= norm;
                weights[i][j] = tmp[j];
                tmp[j] = 0;
            }
        }

        //Save the results to a file
        weights.save( resultsFilename );
    }
    

    return true;
}
예제 #19
0
파일: DictTest.cpp 프로젝트: sgrignard/grt
// Tests the basic functionality
TEST(Dict, DictTest) {

  //Create a new dictionary
  Dict dict;

  //Create some values to add to the dictionary
  const int apple = 1;
  const int orange = 2;
  const int melon = 3;
  const Float pi = 3.14;
  VectorFloat buf(3);
  buf[0] = 1; buf[1] = 2; buf[2] = 3;
  int expectedSize = 0;

  //Check the size, it should be zero
  EXPECT_TRUE( dict.getSize() == expectedSize );

  //Add some key-value pairs to the dictionary
  EXPECT_TRUE( dict.add( "apple", apple ) );
  EXPECT_TRUE( dict.getSize() == ++expectedSize );
  EXPECT_TRUE( dict.add( "orange", orange ) );
  EXPECT_TRUE( dict.getSize() == ++expectedSize );
  EXPECT_TRUE( dict.add( "melon", melon ) );
  EXPECT_TRUE( dict.getSize() == ++expectedSize );
  EXPECT_TRUE( dict.add( "pi", pi ) );
  EXPECT_TRUE( dict.getSize() == ++expectedSize );
  EXPECT_TRUE( dict.add( "pi", pi ) ); //Add it twice, the first value will be overwritten
  EXPECT_TRUE( dict.getSize() == expectedSize );
  EXPECT_FALSE( dict.add( "pi", pi, false ) ); //Try and add it, but disable overwrites
  EXPECT_TRUE( dict.getSize() == expectedSize );
  EXPECT_TRUE( dict.add( "buf", buf ) );
  EXPECT_TRUE( dict.getSize() == ++expectedSize );

  //Remove some values
  EXPECT_TRUE( dict.remove( "orange" ) );
  EXPECT_TRUE( dict.getSize() == --expectedSize );
  EXPECT_FALSE( dict.remove( "orange" ) ); //Try and remove the value a second time
  EXPECT_TRUE( dict.getSize() == expectedSize );
  EXPECT_FALSE( dict.remove( "pear" ) ); //Try and remove a value that does not exist
  EXPECT_TRUE( dict.getSize() == expectedSize );

  //Test some keys exist
  EXPECT_TRUE( dict.exists( "apple" ) );
  EXPECT_TRUE( dict.exists( "pi" ) );
  EXPECT_FALSE( dict.exists( "orange" ) );

  //Test the getter
  EXPECT_EQ( dict.get< int >( "apple" ), apple );
  EXPECT_EQ( dict.get< Float >( "pi" ), pi );

  //Test the reference update
  int &v = dict.get< int >( "apple" );
  v++;
  EXPECT_EQ( dict.get< int >( "apple" ), apple+1 );

  //Test the vector
  VectorFloat vec = dict.get< VectorFloat >( "buf" );
  EXPECT_TRUE( buf.getSize() == vec.getSize() );
  for(UINT i=0; i<buf.getSize(); i++){
    EXPECT_TRUE( buf[i] == vec[i] );
  }

  //Test the keys
  Vector< std::string > keys = dict.getKeys();
  EXPECT_EQ( keys.getSize(), dict.getSize() );

  //Test the setter
  EXPECT_TRUE( dict.set( "pi", 3.14159 ) );
  EXPECT_FALSE( dict.set( "foo", 3.14159 ) ); //This wil fail, as foo does not exist in the dictionary

  //Test the () operator
  int x = 0;
  EXPECT_TRUE( dict("melon", x) );
  EXPECT_EQ( x, melon );

  //This should return false
  EXPECT_FALSE( dict("pear", x) );

  //Test the copy constructor
  Dict d2( dict );

  EXPECT_EQ( dict.getSize(), d2.getSize() );
  EXPECT_EQ( dict.getSize(), expectedSize );

  //The values should match
  EXPECT_TRUE( dict.get< int >( "apple" ) == d2.get< int >( "apple" ) );

  //Change something in the original dict, test that d2 has not changed
  EXPECT_TRUE( dict.set( "apple", 5 ) );

  //The values should now not match
  EXPECT_FALSE( dict.get< int >( "apple" ) == d2.get< int >( "apple" ) );

  //Clear all the values
  EXPECT_TRUE( dict.clear() );
  EXPECT_TRUE( dict.getSize() == 0 );

}
예제 #20
0
파일: FFTFeatures.cpp 프로젝트: CV-IP/grt
bool FFTFeatures::computeFeatures(const VectorFloat &inputVector){

    if( !initialized ){
        errorLog << "computeFeatures(const VectorFloat &inputVector) - Not initialized!" << std::endl;
        return false;
    }
    
    //The input vector should be the magnitude data from an FFT
    if( inputVector.getSize() != fftWindowSize*numChannelsInFFTSignal ){
        errorLog << "computeFeatures(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match the expected size! Verify that the FFT module that generated this inputVector has a window size of " << fftWindowSize << " and the number of input channels is: " << numChannelsInFFTSignal << ". Also verify that only the magnitude values are being computed (and not the phase)." << std::endl;
        return false;
    }
    
    featureDataReady = false;
    
    UINT featureIndex = 0;
    IndexedDouble maxFreq(0,0);
    Vector< IndexedDouble > fftMagData(fftWindowSize);
    
    for(UINT i=0; i<numChannelsInFFTSignal; i++){
        Float spectrumSum = 0;
        maxFreq.value = 0;
        maxFreq.index = 0;
        centroidFeature = 0;

        for(UINT n=0; n<fftWindowSize; n++){
            
            //Find the max freq
            if( inputVector[i*fftWindowSize + n] > maxFreq.value ){
                maxFreq.value = inputVector[i*fftWindowSize + n];
                maxFreq.index = n;
            }

			centroidFeature += (n+1) * inputVector[i*fftWindowSize + n];
            
            spectrumSum += inputVector[i*fftWindowSize + n];
            
            //Copy the magnitude data so we can sort it later if needed
            fftMagData[n].value = inputVector[i*fftWindowSize + n];
            fftMagData[n].index = n;
        }
        
        maxFreqFeature = maxFreq.index;
        maxFreqSpectrumRatio = spectrumSum > 0 ? maxFreq.value/spectrumSum : 0;
        centroidFeature = spectrumSum > 0 ? centroidFeature/spectrumSum : 0;
        
        if( computeMaxFreqFeature ){
            featureVector[ featureIndex++ ] = maxFreqFeature;
        }
        
        if( computeMaxFreqSpectrumRatio ){
            featureVector[ featureIndex++ ] = maxFreqSpectrumRatio;
        }
        
        if( computeCentroidFeature ){
            featureVector[ featureIndex++ ] = centroidFeature;
        }
        
        if( computeTopNFreqFeatures ){
            
            sort(fftMagData.begin(),fftMagData.end(),sortIndexDoubleDecendingValue);
            for(UINT n=0; n<N; n++){
                topNFreqFeatures[n] = fftMagData[n].index;
            }
            
            for(UINT n=0; n<N; n++){
                featureVector[ featureIndex++ ] = topNFreqFeatures[n];
            }
        }
    }
    
    //Flag that the features are ready
    featureDataReady = true;
    
    return true;
}
예제 #21
0
int main (int argc, const char * argv[])
{
    //Create a matrix for the test data
    MatrixFloat data(4,2);

    //Populate the test data
    data[0][0] = 1;
    data[0][1] = 2;
    data[1][0] = 3;
    data[1][1] = 4;
    data[2][0] = 5;
    data[2][1] = 6;
    data[3][0] = 7;
    data[3][1] = 8;

    cout << "Data:\n";
    for(UINT i=0; i<data.getNumRows(); i++) {
        for(UINT j=0; j<data.getNumCols(); j++) {
            cout << data[i][j] << "\t";
        }
        cout << endl;
    }

    //Create a new instance of the SVD class
    SVD svd;

    //Computes the singular value decomposition of the data matrix
    if( !svd.solve(data) ) {
        cout << "ERROR: Failed to solve SVD solution!\n";
        return EXIT_FAILURE;
    }

    //Get the U, V, and W results (V is sometimes called S in other packages like Matlab)
    MatrixFloat u = svd.getU();
    MatrixFloat v = svd.getV();
    VectorFloat w = svd.getW();

    cout << "U:\n";
    for(UINT i=0; i<u.getNumRows(); i++) {
        for(UINT j=0; j<u.getNumCols(); j++) {
            cout << u[i][j] << "\t";
        }
        cout << endl;
    }

    cout << "V:\n";
    for(UINT i=0; i<v.getNumRows(); i++) {
        for(UINT j=0; j<v.getNumCols(); j++) {
            cout << v[i][j] << "\t";
        }
        cout << endl;
    }

    cout << "W:\n";
    for(UINT i=0; i<w.getSize(); i++) {
        cout << w[i] << "\t";
    }
    cout << endl;

    return EXIT_SUCCESS;
}
예제 #22
0
VectorFloat TimeDomainFeatures::update(const VectorFloat &x){
    
    if( !initialized ){
        errorLog << "update(const VectorFloat &x) - Not Initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "update(const VectorFloat &x)- The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.getSize() << ")!" << std::endl;
        return VectorFloat();
    }
    
    //Add the new data to the data buffer
    dataBuffer.push_back( x );
    
    //Only flag that the feature data is ready if the data is full
    if( dataBuffer.getBufferFilled() ){
        featureDataReady = true;
    }else featureDataReady = false;
    
    MatrixFloat meanFeatures(numInputDimensions,numFrames);
    MatrixFloat stdDevFeatures(numInputDimensions,numFrames);
    MatrixFloat normFeatures(numInputDimensions,numFrames);
    MatrixFloat rmsFeatures(numInputDimensions,numFrames);
    MatrixFloat data(bufferLength,numInputDimensions);
    
    if( offsetInput ){
        for(UINT n=0; n<numInputDimensions; n++){
            data[0][n] = dataBuffer[0][n];
            for(UINT i=1; i<bufferLength; i++){
                data[i][n] = dataBuffer[i][n]-dataBuffer[0][n];
            }
        }
    }else{
        for(UINT n=0; n<numInputDimensions; n++){
            for(UINT i=0; i<bufferLength; i++){
                data[i][n] = dataBuffer[i][n];
            }
        }
    }
    
    if( useMean || useStdDev ){ meanFeatures.setAllValues(0); stdDevFeatures.setAllValues(0); }
    if( useEuclideanNorm ) normFeatures.setAllValues(0);
    if( useRMS ) rmsFeatures.setAllValues(0);
    
    UINT frameSize = bufferLength / numFrames;
    UINT frame = 0;
    UINT index = 0;
    for(UINT n=0; n<numInputDimensions; n++){
        frame = 0;
        index = 0;
        for(UINT i=0; i<bufferLength; i++){
            //Update the mean
            meanFeatures[n][frame] += data[i][n];
            
            //Update the norm features
            if( useEuclideanNorm )
                normFeatures[n][frame] += data[i][n]*data[i][n];
            
            //Update the rms features
            if( useRMS )
                rmsFeatures[n][frame] += data[i][n]*data[i][n];
            
            if( ++index == frameSize ){
                frame++;
                index = 0;
            }
        }
        
        //Update the mean
        for(UINT j=0; j<numFrames; j++){
            meanFeatures[n][j] /= frameSize;
        }
        
        //Update the std dev if needed
        if( useStdDev ){
            frame = 0;
            index = 0;
            for(UINT i=0; i<bufferLength; i++){
                stdDevFeatures[n][frame] += (data[i][n]-meanFeatures[n][frame]) * (data[i][n]-meanFeatures[n][frame]);
                if( ++index == frameSize ){
                    frame++;
                    index = 0;
                }
            }
            Float norm = frameSize>1 ? frameSize-1 : 1;
            for(UINT j=0; j<numFrames; j++){
                stdDevFeatures[n][j] = sqrt( stdDevFeatures[n][j]/norm );
            }
        }
        
        //Update the euclidean norm if needed
        if( useEuclideanNorm ){
            for(UINT j=0; j<numFrames; j++){
                normFeatures[n][j] = sqrt( normFeatures[n][j] );
            }
        }
        
        //Update the rms if needed
        if( useRMS ){
            for(UINT j=0; j<numFrames; j++){
                rmsFeatures[n][j] = sqrt( rmsFeatures[n][j] / frameSize );
            }
        }
    }
    
    //Update the features
    index = 0;
    frame = 0;
    for(UINT n=0; n<numInputDimensions; n++){
        for(UINT j=0; j<numFrames; j++){
            if( useMean ){
                featureVector[index++] = meanFeatures[n][j];
            }
            if( useStdDev ){
                featureVector[index++] = stdDevFeatures[n][j];
            }
            if( useEuclideanNorm ){
                featureVector[index++] = normFeatures[n][j];
            }
            if( useRMS ){
                featureVector[index++] = rmsFeatures[n][j];
            }
        }
    }
    
    return featureVector;
}
예제 #23
0
bool RandomForests::train_(ClassificationData &trainingData){
    
    //Clear any previous model
    clear();
    
    const unsigned int M = trainingData.getNumSamples();
    const unsigned int N = trainingData.getNumDimensions();
    const unsigned int K = trainingData.getNumClasses();
    
    if( M == 0 ){
        errorLog << "train_(ClassificationData &trainingData) - Training data has zero samples!" << std::endl;
        return false;
    }

    if( bootstrappedDatasetWeight <= 0.0 || bootstrappedDatasetWeight > 1.0 ){
        errorLog << "train_(ClassificationData &trainingData) - Bootstrapped Dataset Weight must be [> 0.0 and <= 1.0]" << std::endl;
        return false;
    }
    
    numInputDimensions = N;
    numClasses = K;
    classLabels = trainingData.getClassLabels();
    ranges = trainingData.getRanges();
    
    //Scale the training data if needed
    if( useScaling ){
        //Scale the training data between 0 and 1
        trainingData.scale(0, 1);
    }

    if( useValidationSet ){
        validationSetAccuracy = 0;
        validationSetPrecision.resize( useNullRejection ? K+1 : K, 0 );
        validationSetRecall.resize( useNullRejection ? K+1 : K, 0 );
    }
    
    //Flag that the main algorithm has been trained encase we need to trigger any callbacks
    trained = true;
    
    //Train the random forest
    forest.reserve( forestSize );

    for(UINT i=0; i<forestSize; i++){
        
        //Get a balanced bootstrapped dataset
        UINT datasetSize = (UINT)(trainingData.getNumSamples() * bootstrappedDatasetWeight);
        ClassificationData data = trainingData.getBootstrappedDataset( datasetSize, true );

        Timer timer;
        timer.start();
 
        DecisionTree tree;
        tree.setDecisionTreeNode( *decisionTreeNode );
        tree.enableScaling( false ); //We have already scaled the training data so we do not need to scale it again
        tree.setUseValidationSet( useValidationSet );
        tree.setValidationSetSize( validationSetSize );
        tree.setTrainingMode( trainingMode );
        tree.setNumSplittingSteps( numRandomSplits );
        tree.setMinNumSamplesPerNode( minNumSamplesPerNode );
        tree.setMaxDepth( maxDepth );
        tree.enableNullRejection( useNullRejection );
        tree.setRemoveFeaturesAtEachSpilt( removeFeaturesAtEachSpilt );

        trainingLog << "Training decision tree " << i+1 << "/" << forestSize << "..." << std::endl;
        
        //Train this tree
        if( !tree.train_( data ) ){
            errorLog << "train_(ClassificationData &trainingData) - Failed to train tree at forest index: " << i << std::endl;
            clear();
            return false;
        }

        Float computeTime = timer.getMilliSeconds();
        trainingLog << "Decision tree trained in " << (computeTime*0.001)/60.0 << " minutes" << std::endl;

        if( useValidationSet ){
            Float forestNorm = 1.0 / forestSize;
            validationSetAccuracy += tree.getValidationSetAccuracy();
            VectorFloat precision = tree.getValidationSetPrecision();
            VectorFloat recall = tree.getValidationSetRecall();

            grt_assert( precision.getSize() == validationSetPrecision.getSize() );
            grt_assert( recall.getSize() == validationSetRecall.getSize() );

            for(UINT i=0; i<validationSetPrecision.getSize(); i++){
                validationSetPrecision[i] += precision[i] * forestNorm;
            }

            for(UINT i=0; i<validationSetRecall.getSize(); i++){
                validationSetRecall[i] += recall[i] * forestNorm;
            }

        }
        
        //Deep copy the tree into the forest
        forest.push_back( tree.deepCopyTree() );
    }

    if( useValidationSet ){
        validationSetAccuracy /= forestSize;
        trainingLog << "Validation set accuracy: " << validationSetAccuracy << std::endl;

        trainingLog << "Validation set precision: ";
        for(UINT i=0; i<validationSetPrecision.getSize(); i++){
            trainingLog << validationSetPrecision[i] << " ";
        }
        trainingLog << std::endl;

        trainingLog << "Validation set recall: ";
        for(UINT i=0; i<validationSetRecall.getSize(); i++){
            trainingLog << validationSetRecall[i] << " ";
        }
        trainingLog << std::endl;
    }

    return true;
}
예제 #24
0
파일: BAG.cpp 프로젝트: sgrignard/grt
bool BAG::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
        return false;
    }
    
    predictedClassLabel = 0;
    maxLikelihood = -10000;
    
    if( !trained ) return false;
    
    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
        return false;
    }
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
        }
    }
    
    if( classLikelihoods.getSize() != numClasses ) classLikelihoods.resize(numClasses);
    if( classDistances.getSize() != numClasses ) classDistances.resize(numClasses);
    
    //Reset the likelihoods and distances
    for(UINT k=0; k<numClasses; k++){
        classLikelihoods[k] = 0;
        classDistances[k] = 0;
    }
    
    //Run the prediction for each classifier
    Float sum = 0;
    UINT ensembleSize = ensemble.getSize();
    for(UINT i=0; i<ensembleSize; i++){
        
        if( !ensemble[i]->predict(inputVector) ){
            errorLog << "predict_(VectorFloat &inputVector) - The " << i << " classifier in the ensemble failed prediction!" << std::endl;
            return false;
        }
        
        classLikelihoods[ getClassLabelIndexValue( ensemble[i]->getPredictedClassLabel() ) ] += weights[i];
        classDistances[ getClassLabelIndexValue( ensemble[i]->getPredictedClassLabel() ) ] += ensemble[i]->getMaximumLikelihood() * weights[i];
        
        sum += weights[i];
    }
    
    //Set the predicted class label as the most common class
    Float maxCount = 0;
    UINT maxIndex = 0;
    for(UINT i=0; i<numClasses; i++){
        if( classLikelihoods[i] > maxCount ){
            maxIndex = i;
            maxCount = classLikelihoods[i];
        }
        classLikelihoods[i] /= sum;
        classDistances[i] /= Float(ensembleSize);
    }
    
    predictedClassLabel = classLabels[ maxIndex ];
    maxLikelihood = classLikelihoods[ maxIndex ];
    
    return true;
}
예제 #25
0
VectorFloat MovingAverageFilter::filter(const VectorFloat &x){
    
    //If the filter has not been initialised then return 0, otherwise filter x and return y
    if( !initialized ){
        errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl;
        return VectorFloat();
    }
    
    if( x.size() != numInputDimensions ){
        errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl;
        return VectorFloat();
    }
    
    if( ++inputSampleCounter > filterSize ) inputSampleCounter = filterSize;
    
    //Add the new value to the buffer
    dataBuffer.push_back( x );
    
    for(unsigned int j=0; j<numInputDimensions; j++){
        processedData[j] = 0;
        for(unsigned int i=0; i<inputSampleCounter; i++) {
            processedData[j] += dataBuffer[i][j];
        }
        processedData[j] /= Float(inputSampleCounter);
    }
    
    return processedData;
}
예제 #26
0
// Tests the GetClassLikelihoods function
TEST(Classifier, GetClassLikelihoods) {
  Classifier classifier;
  VectorFloat classLikelihoods = classifier.getClassLikelihoods();
  EXPECT_TRUE( classLikelihoods.getSize() == 0 ); //The default size should be zero
}
예제 #27
0
// Tests the GetClassLikelihoods function
TEST(Classifier, GetClassDistances) {
  Classifier classifier;
  VectorFloat classDistances = classifier.getClassDistances();
  EXPECT_TRUE( classDistances.getSize() == 0 ); //The default size should be zero
}
예제 #28
0
// Tests the GetNullRejectionThresholds function
TEST(Classifier, GetNullRejectionThresholds) {
  Classifier classifier;
  VectorFloat thresholds = classifier.getNullRejectionThresholds();
  EXPECT_TRUE( thresholds.getSize() == 0 ); //The default size should be zero
}
예제 #29
0
파일: GMM.cpp 프로젝트: BryanBo-Cao/grt
bool GMM::predict_(VectorFloat &x){

	predictedClassLabel = 0;
	
    if( classDistances.getSize() != numClasses || classLikelihoods.getSize() != numClasses ){
        classDistances.resize(numClasses);
        classLikelihoods.resize(numClasses);
    }
    
    if( !trained ){
        errorLog << "predict_(VectorFloat &x) - Mixture Models have not been trained!" << std::endl;
        return false;
    }
    
    if( x.getSize() != numInputDimensions ){
        errorLog << "predict_(VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of features the model was trained with (" << numInputDimensions << ")." << std::endl;
        return false;
    }
    
    if( useScaling ){
        for(UINT i=0; i<numInputDimensions; i++){
            x[i] = grt_scale(x[i], ranges[i].minValue, ranges[i].maxValue, GMM_MIN_SCALE_VALUE, GMM_MAX_SCALE_VALUE);
        }
    }

	UINT bestIndex = 0;
	maxLikelihood = 0;
    bestDistance = 0;
    Float sum = 0;
	for(UINT k=0; k<numClasses; k++){
        classDistances[k] = computeMixtureLikelihood(x,k);
        
        //cout << "K: " << k << " Dist: " << classDistances[k] << std::endl;
        classLikelihoods[k] = classDistances[k];
        sum += classLikelihoods[k];
		if( classLikelihoods[k] > bestDistance ){
			bestDistance = classLikelihoods[k];
			bestIndex = k;
		}
	}
    
    //Normalize the likelihoods
    for(unsigned int k=0; k<numClasses; k++){
        classLikelihoods[k] /= sum;
    }
    maxLikelihood = classLikelihoods[bestIndex];
    
    if( useNullRejection ){
        
        //cout << "Dist: " << classDistances[bestIndex] << " RejectionThreshold: " << models[bestIndex].getRejectionThreshold() << std::endl;
        
        //If the best distance is below the modles rejection threshold then set the predicted class label as the best class label
        //Otherwise set the predicted class label as the default null rejection class label of 0
        if( classDistances[bestIndex] >= models[bestIndex].getNullRejectionThreshold() ){
            predictedClassLabel = models[bestIndex].getClassLabel();
        }else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
   }else{
       //Get the predicted class label
       predictedClassLabel = models[bestIndex].getClassLabel();
   }
	
	return true;
}
예제 #30
0
// Tests the default c'tor.
TEST(VectorFloat, DefaultConstructor) {
	VectorFloat vec;
	EXPECT_EQ(0, vec.getSize());
}