Ejemplo n.º 1
0
bool MatrixFloat::subtract(const MatrixFloat &b){
    
    if( b.getNumRows() != rows ){
        errorLog << "subtract(const MatrixFloat &b) - Failed to add matrix! The rows do not match!" << std::endl;
        errorLog << " rows: " << rows << " b rows: " << b.getNumRows() << std::endl;
        return false;
    }
    
    if( b.getNumCols() != cols ){
        errorLog << "subtract(const MatrixFloat &b) - Failed to add matrix! The rows do not match!" << std::endl;
        errorLog << "  cols: " << cols << " b cols: " << b.getNumCols() << std::endl;
        return false;
    }
    
    unsigned int i;
    
    //Using direct pointers really helps speed up the computation time
    Float *pb = b.getData();
    
    const unsigned int size = rows*cols;
    for(i=0; i<size; i++){
        dataPtr[i] -= pb[i];
    }
    
    return true;
}
Ejemplo n.º 2
0
bool MatrixFloat::subtract(const MatrixFloat &a,const MatrixFloat &b){
    
    const unsigned int M = a.getNumRows();
    const unsigned int N = a.getNumCols();
    
    if( M != b.getNumRows() ){
        errorLog << "subtract(const MatrixFloat &a,const MatrixFloat &b) - Failed to add matrix! The rows do not match!";
        errorLog << " a rows: " << M << " b rows: " << b.getNumRows() << std::endl;
        return false;
    }
    
    if( N != b.getNumCols() ){
        errorLog << "subtract(const MatrixFloat &a,const MatrixFloat &b) - Failed to add matrix! The columns do not match!";
        errorLog << " a cols: " << N << " b cols: " << b.getNumCols() << std::endl;
        return false;
    }
    
    resize( M, N );
    
    UINT i,j;
    
    //Using direct pointers really helps speed up the computation time
    Float **pa = a.getDataPointer();
    Float **pb = b.getDataPointer();
    
    for(i=0; i<M; i++){
        for(j=0; j<N; j++){
            dataPtr[i*cols+j] = pa[i][j] - pb[i][j];
        }
    }
    
    return true;
}
Ejemplo n.º 3
0
bool MatrixFloat::add(const MatrixFloat &a,const MatrixFloat &b){
    
    const unsigned int M = a.getNumRows();
    const unsigned int N = a.getNumCols();
    
    if( M != b.getNumRows() ){
        errorLog << "add(const MatrixFloat &a,const MatrixFloat &b) - Failed to add matrix! The rows do not match!";
        errorLog << " a rows: " << M << " b rows: " << b.getNumRows() << std::endl;
        return false;
    }
    
    if( N != b.getNumCols() ){
        errorLog << "add(const MatrixFloat &a,const MatrixFloat &b) - Failed to add matrix! The columns do not match!";
        errorLog << " a cols: " << N << " b cols: " << b.getNumCols() << std::endl;
        return false;
    }
    
    resize( M, N );
    
    UINT i;
    
    //Using direct pointers really helps speed up the computation time
    Float *pa = a.getData();
    Float *pb = b.getData();
    
    const unsigned int size = M*N;
    for(i=0; i<size; i++){
        dataPtr[i] = pa[i] + pb[i];
    }
    
    return true;
}
Ejemplo n.º 4
0
bool MatrixFloat::multiple(const MatrixFloat &a,const MatrixFloat &b,const bool aTranspose){
    
    const unsigned int M = !aTranspose ? a.getNumRows() : a.getNumCols();
    const unsigned int N = !aTranspose ? a.getNumCols() : a.getNumRows();
    const unsigned int K = b.getNumRows();
    const unsigned int L = b.getNumCols();
    
    if( N != K ) {
        errorLog << "multiple(const MatrixFloat &a,const MatrixFloat &b,const bool aTranspose) - The number of rows in a (" << K << ") does not match the number of columns in matrix b (" << N << ")" << std::endl;
        return false;
    }
    
    if( !resize( M, L ) ){
        errorLog << "multiple(const MatrixFloat &b,const MatrixFloat &c,const bool bTranspose) - Failed to resize matrix!" << std::endl;
        return false;
    }
    
    unsigned int i, j, k = 0;
    
    //Using direct pointers really helps speed up the computation time
    Float **pa = a.getDataPointer();
    Float **pb = b.getDataPointer();
    
    if( aTranspose ){
        
        for(j=0; j<L; j++){
            for(i=0; i<M; i++){
                dataPtr[i*cols+j] = 0;
                for(k=0; k<K; k++){
                    dataPtr[i*cols+j] += pa[k][i] * pb[k][j];
                }
            }
        }
        
    }else{
        
        for(j=0; j<L; j++){
            for(i=0; i<M; i++){
                dataPtr[i*cols+j] = 0;
                for(k=0; k<K; k++){
                    dataPtr[i*cols+j] += pa[i][k] * pb[k][j];
                }
            }
        }
        
    }
    
    return true;
}
Ejemplo n.º 5
0
bool ClassificationDataStream::addSample(const UINT classLabel,const MatrixFloat &sample){

    if( numDimensions != sample.getNumCols() ){
        errorLog << "addSample(const UINT classLabel, const MatrixFloat &sample) - the number of columns in the sample (" << sample.getNumCols() << ") does not match the number of dimensions of the dataset (" << numDimensions << ")" << std::endl;
        return false;
    }

    bool searchForNewClass = true;
    if( trackingClass ){
        if( classLabel != lastClassID ){
            //The class ID has changed so update the time series tracker
            timeSeriesPositionTracker[ timeSeriesPositionTracker.size()-1 ].setEndIndex( totalNumSamples-1 );
        }else searchForNewClass = false;
    }
    
    if( searchForNewClass ){
        bool newClass = true;
        //Search to see if this class has been found before
        for(UINT k=0; k<classTracker.size(); k++){
            if( classTracker[k].classLabel == classLabel ){
                newClass = false;
                classTracker[k].counter += sample.getNumRows();
            }
        }
        if( newClass ){
            ClassTracker newCounter(classLabel,1);
            classTracker.push_back( newCounter );
        }

        //Set the timeSeriesPositionTracker start position
        trackingClass = true;
        lastClassID = classLabel;
        TimeSeriesPositionTracker newTracker(totalNumSamples,0,classLabel);
        timeSeriesPositionTracker.push_back( newTracker );
    }

    ClassificationSample labelledSample( numDimensions );
    for(UINT i=0; i<sample.getNumRows(); i++){
        data.push_back( labelledSample );
        data.back().setClassLabel( classLabel );
        for(UINT j=0; j<numDimensions; j++){
            data.back()[j] = sample[i][j];
        }
    }
    totalNumSamples += sample.getNumRows();
    return true;

}
Ejemplo n.º 6
0
bool RBMQuantizer::train_(MatrixFloat &trainingData){
    
    //Clear any previous model
    clear();
    
    if( trainingData.getNumRows() == 0 ){
        errorLog << "train_(MatrixFloat &trainingData) - Failed to train quantizer, the training data is empty!" << std::endl;
        return false;
    }
    
    //Train the RBM model
    rbm.setNumHiddenUnits( numClusters );
    rbm.setLearningRate( learningRate );
    rbm.setMinNumEpochs( minNumEpochs );
    rbm.setMaxNumEpochs( maxNumEpochs );
    rbm.setMinChange( minChange );
    
    if( !rbm.train_( trainingData ) ){
        errorLog << "train_(MatrixFloat &trainingData) - Failed to train quantizer!" << std::endl;
        return false;
    }
    
    //Flag that the feature vector is now initalized
    initialized = true;
    trained = true;
    numInputDimensions = trainingData.getNumCols();
    numOutputDimensions = 1; //This is always 1 for the quantizer
    featureVector.resize(numOutputDimensions,0);
    quantizationDistances.resize(numClusters,0);
    
    return true;
}
Ejemplo n.º 7
0
bool KMeans::setClusters(const MatrixFloat &clusters){
    clear();
    numClusters = clusters.getNumRows();
    numInputDimensions = clusters.getNumCols();
    this->clusters = clusters;
    return true;
}
Ejemplo n.º 8
0
MatrixFloat MatrixFloat::multiple(const MatrixFloat &b) const{
    
    const unsigned int M = rows;
    const unsigned int N = cols;
    const unsigned int K = b.getNumRows();
    const unsigned int L = b.getNumCols();
    
    if( N != K ) {
        errorLog << "multiple(MatrixFloat b) - The number of rows in b (" << K << ") does not match the number of columns in this matrix (" << N << ")" << std::endl;
        return MatrixFloat();
    }
    
    MatrixFloat c(M,L);
    Float **pb = b.getDataPointer();
    Float **pc = c.getDataPointer();
    
    unsigned int i,j,k = 0;
    for(i=0; i<M; i++){
        for(j=0; j<L; j++){
            pc[i][j] = 0;
            for(k=0; k<K; k++){
                pc[i][j] += dataPtr[i*cols+k] * pb[k][j];
            }
        }
    }
    
    return c;
}
Ejemplo n.º 9
0
bool FFT::update(const MatrixFloat &x){

    if( !initialized ){
        errorLog << "update(const MatrixFloat &x) - Not initialized!" << std::endl;
        return false;
    }
    
    if( x.getNumCols() != numInputDimensions ){
        errorLog << "update(const MatrixFloat &x) - The number of columns in the inputMatrix (" << x.getNumCols() << ") does not match that of the FeatureExtraction (" << numInputDimensions << ")!" << std::endl;
        return false;
    }
    
    featureDataReady = false;
    
    for(UINT k=0; k<x.getNumRows(); k++){

        //Add the current input to the data buffers
        dataBuffer.push_back( x.getRow(k) );

        if( ++hopCounter == hopSize ){
            hopCounter = 0;
            //Compute the FFT for each dimension
            for(UINT j=0; j<numInputDimensions; j++){
                
                //Copy the input data for this dimension into the temp buffer
                for(UINT i=0; i<dataBufferSize; i++){
                    tempBuffer[i] = dataBuffer[i][j];
                }
                
                //Compute the FFT
                if( !fft[j].computeFFT( tempBuffer ) ){
                    errorLog << "update(const VectorFloat &x) - Failed to compute FFT!" << std::endl;
                    return false;
                }
            }
            
            //Flag that the fft was computed during this update
            featureDataReady = true;
            
            //Copy the FFT data to the feature vector
            UINT index = 0;
            for(UINT j=0; j<numInputDimensions; j++){
                if( computeMagnitude ){
                    Float *mag = fft[j].getMagnitudeDataPtr();
                    for(UINT i=0; i<fft[j].getFFTSize()/2; i++){
                        featureVector[index++] = *mag++;
                    }
                }
                if( computePhase ){
                    Float *phase = fft[j].getPhaseDataPtr();
                    for(UINT i=0; i<fft[j].getFFTSize()/2; i++){
                        featureVector[index++] = *phase++;
                    }
                }
            }
        }
    }
    
    return true;
}
Ejemplo n.º 10
0
bool PrincipalComponentAnalysis::project(const MatrixFloat &data,MatrixFloat &prjData){
	
    if( !trained ){
        warningLog << "project(const MatrixFloat &data,MatrixFloat &prjData) - The PrincipalComponentAnalysis module has not been trained!" << std::endl;
        return false;
    }

    if( data.getNumCols() != numInputDimensions ){
        warningLog << "project(const MatrixFloat &data,MatrixFloat &prjData) - The number of columns in the input vector (" << data.getNumCols() << ") does not match the number of input dimensions (" << numInputDimensions << ")!" << std::endl;
        return false;
    }
	
    MatrixFloat msData( data );
    prjData.resize(data.getNumRows(),numPrincipalComponents);
	
    if( normData ){
        //Mean subtract the data
        for(UINT i=0; i<data.getNumRows(); i++)
            for(UINT j=0; j<numInputDimensions; j++)
                msData[i][j] = (msData[i][j]-mean[j])/stdDev[j];
    }else{
        //Mean subtract the data
        for(UINT i=0; i<data.getNumRows(); i++)
            for(UINT j=0; j<numInputDimensions; j++)
                msData[i][j] -= mean[j];
    }
	
    //Projected Data
    for(UINT row=0; row<msData.getNumRows(); row++){//For each row in the final data
        for(UINT i=0; i<numPrincipalComponents; i++){//For each PC
            prjData[row][i]=0;
	    for(UINT j=0; j<data.getNumCols(); j++)//For each feature
                prjData[row][i] += msData[row][j] * eigenvectors[j][sortedEigenvalues[i].index];
        }
    }
	
    return true;
}
Ejemplo n.º 11
0
bool DecisionTreeClusterNode::computeLeafNodeWeights( MatrixFloat &weights ) const{

    if( isLeafNode ){ //If we reach a leaf node, there is nothing to do
        return true;
    }

    if( featureIndex >= weights.getNumCols() ){ //Feature index is out of bounds
        warningLog << __GRT_LOG__ << " Feature index is greater than weights Vector size!" << std::endl;
        return false;
    }

    if( leftChild ){ //Recursively compute the weights for the left child until we reach the node above a leaf node
        if( leftChild->getIsLeafNode() ){
            if( classProbabilities.getSize() != weights.getNumRows() ){
                warningLog << __GRT_LOG__ << " The number of rows in the weights matrix does not match the class probabilities Vector size!" << std::endl;
                return false;
            }
            for(UINT i=0; i<classProbabilities.getSize(); i++){
                weights[ i ][ featureIndex ] += classProbabilities[ i ];
            }
            
        } leftChild->computeLeafNodeWeights( weights );
    }
    if( rightChild ){ //Recursively compute the weights for the right child until we reach the node above a leaf node
        if( rightChild->getIsLeafNode() ){
            if( classProbabilities.getSize() != weights.getNumRows() ){
                warningLog << __GRT_LOG__ << " The number of rows in the weights matrix does not match the class probabilities Vector size!" << std::endl;
                return false;
            }
            for(UINT i=0; i<classProbabilities.getSize(); i++){
                weights[ i ][ featureIndex ] += classProbabilities[ i ];
            }
        } rightChild->computeLeafNodeWeights( weights );
    }

    return true;
}
Ejemplo n.º 12
0
// Tests the MatrixFloat type
TEST(DynamicType, MatrixFloatTest) {
  DynamicType type;
  MatrixFloat a(3,1);
  a[0][0] = 1.1; a[1][0] = 1.2; a[2][0] = 1.3;
  EXPECT_TRUE( type.set( a ) );
  MatrixFloat b = type.get< MatrixFloat >();
  EXPECT_EQ( a.getSize(), b.getSize() );
  EXPECT_EQ( a.getNumRows(), b.getNumRows() );
  EXPECT_EQ( a.getNumCols(), b.getNumCols() );
  for(unsigned int i=0; i<a.getNumRows(); i++){
    for(unsigned int j=0; j<a.getNumCols(); j++){
      EXPECT_EQ( a[i][j], b[i][j] );
    }
  }
}
Ejemplo n.º 13
0
bool KMeans::train_(MatrixFloat &data){
	
	trained = false;
	
	if( numClusters == 0 ){
        errorLog << "train_(MatrixFloat &data) - Failed to train model. NumClusters is zero!" << std::endl;
		return false;
	}
    
    if( data.getNumRows() == 0 || data.getNumCols() == 0 ){
        errorLog << "train_(MatrixFloat &data) - The number of rows or columns in the data is zero!" << std::endl;
		return false;
	}
    
	numTrainingSamples = data.getNumRows();
	numInputDimensions = data.getNumCols();

	clusters.resize(numClusters,numInputDimensions);
	assign.resize(numTrainingSamples);
	count.resize(numClusters);

	//Randomly pick k data points as the starting clusters
	Random random;
	Vector< UINT > randIndexs(numTrainingSamples);
	for(UINT i=0; i<numTrainingSamples; i++) randIndexs[i] = i;
    std::random_shuffle(randIndexs.begin(), randIndexs.end());

    //Copy the clusters
	for(UINT k=0; k<numClusters; k++){
		for(UINT j=0; j<numInputDimensions; j++){
            clusters[k][j] = data[ randIndexs[k] ][j];
		}
	}

	return trainModel( data );
}
Ejemplo n.º 14
0
bool saveResults( const GestureRecognitionPipeline &pipeline, const string &filename ){
    
    infoLog << "Saving results to file: " << filename << endl;

    fstream file( filename.c_str(), fstream::out );

    if( !file.is_open() ){
        errorLog << "Failed to open results file: " << filename << endl;
        return false;
    }

    file << pipeline.getTestAccuracy() << endl;

    Vector< UINT > classLabels = pipeline.getClassLabels();

    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        file << pipeline.getTestPrecision( classLabels[k] );
        if( k+1 < pipeline.getNumClassesInModel() ) file << "\t";
        else file << endl;
    }

    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        file << pipeline.getTestRecall( classLabels[k] );
        if( k+1 < pipeline.getNumClassesInModel() ) file << "\t";
        else file << endl;
    }

    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        file << pipeline.getTestFMeasure( classLabels[k] );
        if( k+1 < pipeline.getNumClassesInModel() ) file << "\t";
        else file << endl;
    }

    MatrixFloat confusionMatrix = pipeline.getTestConfusionMatrix();
    for(UINT i=0; i<confusionMatrix.getNumRows(); i++){
        for(UINT j=0; j<confusionMatrix.getNumCols(); j++){
            file << confusionMatrix[i][j];
            if( j+1 < confusionMatrix.getNumCols() ) file << "\t";
        }file << endl;
    }

    file.close();

    infoLog << "Results saved." << endl;

    return true;
}
Ejemplo n.º 15
0
int main (int argc, const char * argv[])
{
    //Create a new KMeans instance
    KMeans kmeans;
    kmeans.setComputeTheta( true );
    kmeans.setMinChange( 1.0e-10 );
    kmeans.setMinNumEpochs( 10 );
	kmeans.setMaxNumEpochs( 10000 );

	//There are a number of ways of training the KMeans algorithm, depending on what you need the KMeans for
	//These are:
	//- with labelled training data (in the ClassificationData format)
	//- with unlablled training data (in the UnlabelledData format)
	//- with unlabelled training data (in a simple MatrixDouble format)
	
	//This example shows you how to train the algorithm with ClassificationData
	
	//Load some training data to train the KMeans algorithm
    ClassificationData trainingData;
    
    if( !trainingData.load("LabelledClusterData.csv") ){
        cout << "Failed to load training data!\n";
        return EXIT_FAILURE;
    }
	
    //Train the KMeans algorithm - K will automatically be set to the number of classes in the training dataset
    if( !kmeans.train( trainingData ) ){
        cout << "Failed to train model!\n";
        return EXIT_FAILURE;
    }
	
	//Get the K clusters from the KMeans instance and print them
	cout << "\nClusters:\n";
	MatrixFloat clusters = kmeans.getClusters();
    for(unsigned int k=0; k<clusters.getNumRows(); k++){
		for(unsigned int n=0; n<clusters.getNumCols(); n++){
			cout << clusters[k][n] << "\t";
		}cout << endl;
	}
	
    return EXIT_SUCCESS;
}
Ejemplo n.º 16
0
ClassificationData ClassificationDataStream::getClassificationData( const bool includeNullGestures ) const {
    
    ClassificationData classificationData;
    
    classificationData.setNumDimensions( getNumDimensions() );
    classificationData.setAllowNullGestureClass( includeNullGestures );

    bool addSample = false;
    for(UINT i=0; i<timeSeriesPositionTracker.size(); i++){
        addSample = includeNullGestures ? true : timeSeriesPositionTracker[i].getClassLabel() != GRT_DEFAULT_NULL_CLASS_LABEL;
        if( addSample ){
            MatrixFloat dataSegment = getTimeSeriesData( timeSeriesPositionTracker[i] );
            for(UINT j=0; j<dataSegment.getNumRows(); j++){
                classificationData.addSample(timeSeriesPositionTracker[i].getClassLabel(), dataSegment.getRow(j) );
            }
        }
    }
    
    return classificationData;
}
Ejemplo n.º 17
0
bool PrincipalComponentAnalysis::setModel( const VectorFloat &mean, const MatrixFloat &eigenvectors ){

    if( (UINT)mean.size() != eigenvectors.getNumCols() ){
        return false;
    }

    trained = true;
    numInputDimensions = eigenvectors.getNumCols();
    numPrincipalComponents = eigenvectors.getNumRows();
    this->mean = mean;
    stdDev.clear();
    componentWeights.clear();
    eigenvalues.clear();
    sortedEigenvalues.clear();
    this->eigenvectors = eigenvectors;
    
    //The eigenvectors are already sorted, so the sorted eigenvalues just holds the default index
    for(UINT i=0; i<numPrincipalComponents; i++){
        sortedEigenvalues.push_back( IndexedDouble(i,0.0) );
    }
    return true;
}
Ejemplo n.º 18
0
bool MatrixFloat::add(const MatrixFloat &b){
    
    if( b.getNumRows() != rows ){
        errorLog << "add(const MatrixFloat &b) - Failed to add matrix! The rows do not match!" << std::endl;
        return false;
    }
    
    if( b.getNumCols() != cols ){
        errorLog << "add(const MatrixFloat &b) - Failed to add matrix! The rows do not match!" << std::endl;
        return false;
    }
    
    unsigned int i = 0;
    
    //Using direct pointers really helps speed up the computation time
    const Float *p_b = &(b[0][0]);
    
    for(i=0; i<rows*cols; i++){
        dataPtr[i] += p_b[i];
    }
    
    return true;
}
Ejemplo n.º 19
0
bool GaussianMixtureModels::train_(MatrixFloat &data){
    
    trained = false;
    
    //Clear any previous training results
    det.clear();
    invSigma.clear();
    numTrainingIterationsToConverge = 0;
    
    if( data.getNumRows() == 0 ){
        errorLog << "train_(MatrixFloat &data) - Training Failed! Training data is empty!" << std::endl;
        return false;
    }
    
    //Resize the variables
    numTrainingSamples = data.getNumRows();
    numInputDimensions = data.getNumCols();
    
    //Resize mu and resp
    mu.resize(numClusters,numInputDimensions);
    resp.resize(numTrainingSamples,numClusters);
    
    //Resize sigma
    sigma.resize(numClusters);
    for(UINT k=0; k<numClusters; k++){
        sigma[k].resize(numInputDimensions,numInputDimensions);
    }
    
    //Resize frac and lndets
    frac.resize(numClusters);
    lndets.resize(numClusters);
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<numTrainingSamples; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = scale(data[i][j],ranges[j].minValue,ranges[j].maxValue,0,1);
            }
        }
    }
    
    //Pick K random starting points for the inital guesses of Mu
    Random random;
    Vector< UINT > randomIndexs(numTrainingSamples);
    for(UINT i=0; i<numTrainingSamples; i++) randomIndexs[i] = i;
    for(UINT i=0; i<numClusters; i++){
        SWAP(randomIndexs[ i ],randomIndexs[ random.getRandomNumberInt(0,numTrainingSamples) ]);
    }
    for(UINT k=0; k<numClusters; k++){
        for(UINT n=0; n<numInputDimensions; n++){
            mu[k][n] = data[ randomIndexs[k] ][n];
        }
    }
    
    //Setup sigma and the uniform prior on P(k)
    for(UINT k=0; k<numClusters; k++){
        frac[k] = 1.0/Float(numClusters);
        for(UINT i=0; i<numInputDimensions; i++){
            for(UINT j=0; j<numInputDimensions; j++) sigma[k][i][j] = 0;
            sigma[k][i][i] = 1.0e-2;   //Set the diagonal to a small number
        }
    }
    
    loglike = 0;
    bool keepGoing = true;
    Float change = 99.9e99;
    UINT numIterationsNoChange = 0;
    VectorFloat u(numInputDimensions);
	VectorFloat v(numInputDimensions);
    
    while( keepGoing ){
        
        //Run the estep
        if( estep( data, u, v, change ) ){
            
            //Run the mstep
            mstep( data );
        
            //Check for convergance
            if( fabs( change ) < minChange ){
                if( ++numIterationsNoChange >= minNumEpochs ){
                    keepGoing = false;
                }
            }else numIterationsNoChange = 0;
            if( ++numTrainingIterationsToConverge >= maxNumEpochs ) keepGoing = false;
            
        }else{
            errorLog << "train_(MatrixFloat &data) - Estep failed at iteration " << numTrainingIterationsToConverge << std::endl;
            return false;
        }
    }
    
    //Compute the inverse of sigma and the determinants for prediction
    if( !computeInvAndDet() ){
        det.clear();
        invSigma.clear();
        errorLog << "train_(MatrixFloat &data) - Failed to compute inverse and determinat!" << std::endl;
        return false;
    }
    
    //Flag that the model was trained
    trained = true;
    
    //Setup the cluster labels
    clusterLabels.resize(numClusters);
    for(UINT i=0; i<numClusters; i++){
        clusterLabels[i] = i+1;
    }
    clusterLikelihoods.resize(numClusters,0);
    clusterDistances.resize(numClusters,0);
    
    return true;
}
int main (int argc, const char * argv[])
{
    //Create some input data for the PCA algorithm - this data comes from the Matlab PCA example
	MatrixFloat data(13,4);
	
	data[0][0] = 7; data[0][1] = 26; data[0][2] = 6; data[0][3] = 60;
	data[1][0] = 1; data[1][1] = 29; data[1][2] = 15; data[1][3] = 52;
	data[2][0] = 11; data[2][1] = 56; data[2][2] = 8; data[2][3] = 20;
	data[3][0] = 11; data[3][1] = 31; data[3][2] = 8; data[3][3] = 47;
	data[4][0] = 7; data[4][1] = 52; data[4][2] = 6; data[4][3] = 33;
	data[5][0] = 11; data[5][1] = 55; data[5][2] = 9; data[5][3] = 22;
	data[6][0] = 3; data[6][1] = 71; data[6][2] = 17; data[6][3] = 6;
	data[7][0] = 1; data[7][1] = 31; data[7][2] = 22; data[7][3] = 44;
	data[8][0] = 2; data[8][1] = 54; data[8][2] = 18; data[8][3] = 22;
	data[9][0] = 21; data[9][1] = 47; data[9][2] = 4; data[9][3] = 26;
	data[10][0] = 1; data[10][1] = 40; data[10][2] = 23; data[10][3] = 34;
	data[11][0] = 11; data[11][1] = 66; data[11][2] = 9; data[11][3] = 12;
	data[12][0] = 10; data[12][1] = 68; data[12][2] = 8; data[12][3] = 12;
    
    //Print the input data
    data.print("Input Data:");
	
    //Create a new principal component analysis instance
	PrincipalComponentAnalysis pca;
	
    //Run pca on the input data, setting the maximum variance value to 95% of the variance
	if( !pca.computeFeatureVector( data, 0.95 ) ){
		cout << "ERROR: Failed to compute feature vector!\n";
		return EXIT_FAILURE;
	}
    
    //Get the number of principal components
    UINT numPrincipalComponents = pca.getNumPrincipalComponents();
    cout << "Number of Principal Components: " << numPrincipalComponents << endl;
	
    //Project the original data onto the principal subspace
	MatrixFloat prjData;
	if( !pca.project( data, prjData ) ){
		cout << "ERROR: Failed to project data!\n";
		return EXIT_FAILURE;
	}
    
    //Print out the pca info
    pca.print("PCA Info:");
    
    //Print the projected data
    cout << "ProjectedData:\n";
	for(UINT i=0; i<prjData.getNumRows(); i++){
		for(UINT j=0; j<prjData.getNumCols(); j++){
			cout << prjData[i][j] << "\t";
		}cout << endl;
	}

	//Save the model to a file
	if( !pca.save( "pca-model.grt" ) ){
		cout << "ERROR: Failed to save model to file!\n";
		return EXIT_FAILURE;
	}

	//Load the model from the file
	if( !pca.load( "pca-model.grt" ) ){
		cout << "ERROR: Failed to load model from file!\n";
		return EXIT_FAILURE;
	}

	//Print out the pca info again to make sure it matches
    pca.print("PCA Info:");
    
    return EXIT_SUCCESS;
}
Ejemplo n.º 21
0
bool SelfOrganizingMap::train_( MatrixFloat &data ){
    
    //Clear any previous models
    clear();
    
    const UINT M = data.getNumRows();
    const UINT N = data.getNumCols();
    numInputDimensions = N;
    numOutputDimensions = numClusters*numClusters;
    Random rand;
    
    //Setup the neurons
    neurons.resize( numClusters, numClusters );
    
    if( neurons.getSize() != numClusters*numClusters ){
        errorLog << "train_( MatrixFloat &data ) - Failed to resize neurons matrix, there might not be enough memory!" << std::endl;
        return false;
    }
    
    //Init the neurons
    for(UINT i=0; i<numClusters; i++){
        for(UINT j=0; j<numClusters; j++){
            neurons[i][j].init( N, 0.5, SOM_MIN_TARGET, SOM_MAX_TARGET );
        }
    }
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<M; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = scale(data[i][j],ranges[j].minValue,ranges[j].maxValue,SOM_MIN_TARGET,SOM_MAX_TARGET);
            }
        }
    }
    
    Float error = 0;
    Float lastError = 0;
    Float trainingSampleError = 0;
    Float delta = 0;
    Float minChange = 0;
    Float weightUpdate = 0;
    Float alpha = 1.0;
    Float neuronDiff = 0;
    Float neuronWeightFunction = 0;
    Float gamma = 0;
    UINT iter = 0;
    bool keepTraining = true;
    VectorFloat trainingSample;
    Vector< UINT > randomTrainingOrder(M);
    
    //In most cases, the training data is grouped into classes (100 samples for class 1, followed by 100 samples for class 2, etc.)
    //This can cause a problem for stochastic gradient descent algorithm. To avoid this issue, we randomly shuffle the order of the
    //training samples. This random order is then used at each epoch.
    for(UINT i=0; i<M; i++){
        randomTrainingOrder[i] = i;
    }
    std::random_shuffle(randomTrainingOrder.begin(), randomTrainingOrder.end());
    
    //Enter the main training loop
    while( keepTraining ){
        
        //Update alpha based on the current iteration
        alpha = Util::scale(iter,0,maxNumEpochs,alphaStart,alphaEnd);
        
        //Run one epoch of training using the online best-matching-unit algorithm
        error = 0;
        for(UINT m=0; m<M; m++){
            
            trainingSampleError = 0;
            
            //Get the i'th random training sample
            trainingSample = data.getRowVector( randomTrainingOrder[m] );
            
            //Find the best matching unit
            Float dist = 0;
            Float bestDist = grt_numeric_limits< Float >::max();
            UINT bestIndexRow = 0;
            UINT bestIndexCol = 0;
            for(UINT i=0; i<numClusters; i++){
                for(UINT j=0; j<numClusters; j++){
                    dist = neurons[i][j].getSquaredWeightDistance( trainingSample );
                    if( dist < bestDist ){
                        bestDist = dist;
                        bestIndexRow = i;
                        bestIndexCol = j;
                    }
                }
            }
            error += bestDist;
            
            //Update the weights based on the distance to the winning neuron
            //Neurons closer to the winning neuron will have their weights update more
            const Float bir = bestIndexRow;
            const Float bic = bestIndexCol;
            for(UINT i=0; i<numClusters; i++){  
                for(UINT j=0; j<numClusters; j++){
                
                    //Update the weights for all the neurons, pulling them a little closer to the input example
                    neuronDiff = 0;
                    gamma = 2.0 * grt_sqr( numClusters * sigmaWeight );
                    neuronWeightFunction = exp( -grt_sqr(bir-i)/gamma ) * exp( -grt_sqr(bic-j)/gamma );
                    //std::cout << "best index: " << bestIndexRow << " " << bestIndexCol << " bestDist: " << bestDist << " pos: " << i << " " << j << " neuronWeightFunction: " << neuronWeightFunction << std::endl;
                    for(UINT n=0; n<N; n++){
                        neuronDiff = trainingSample[n] - neurons[i][j][n];
                        weightUpdate = neuronWeightFunction * alpha * neuronDiff;
                        neurons[i][j][n] += weightUpdate;
                    }
                }
            }
        }

        error = error / M;

        trainingLog << "iter: " << iter << " average error: " << error << std::endl;
        
        //Compute the error
        delta = fabs( error-lastError );
        lastError = error;
        
        //Check to see if we should stop
        if( delta <= minChange && false ){
            converged = true;
            keepTraining = false;
        }
        
        if( grt_isinf( error ) ){
            errorLog << "train_(MatrixFloat &data) - Training failed! Error is NAN!" << std::endl;
            return false;
        }
        
        if( ++iter >= maxNumEpochs ){
            keepTraining = false;
        }
        
        trainingLog << "Epoch: " << iter << " Squared Error: " << error << " Delta: " << delta << " Alpha: " << alpha << std::endl;
    }
    
    numTrainingIterationsToConverge = iter;
    trained = true;
    
    return true;
}
Ejemplo n.º 22
0
bool ContinuousHiddenMarkovModel::predict_( MatrixFloat &timeseries ){
    
    if( !trained ){
        errorLog << "predict_( MatrixFloat &timeseries ) - The model is not trained!" << std::endl;
        return false;
    }
    
    if( timeseries.getNumCols() != numInputDimensions ){
        errorLog << "predict_( MatrixFloat &timeseries ) - The matrix column size (" << timeseries.getNumCols() << ") does not match the number of input dimensions (" << numInputDimensions << ")" << std::endl;
        return false;
    }
    
    unsigned int t,i,j,k,index = 0;
    Float maxAlpha = 0;
    Float norm = 0;
    
    //Downsample the observation timeseries using the same downsample factor of the training data
    const unsigned int timeseriesLength = (unsigned int)timeseries.getNumRows();
    const unsigned int T = downsampleFactor < timeseriesLength ? (unsigned int)floor( timeseriesLength / Float(downsampleFactor) ) : timeseriesLength;
    const unsigned int K = downsampleFactor < timeseriesLength ? downsampleFactor : 1; //K is used to average over multiple bins
    MatrixFloat obs(T,numInputDimensions);
    for(j=0; j<numInputDimensions; j++){
        index = 0;
        for(i=0; i<T; i++){
            norm = 0;
            obs[i][j] = 0;
            for(k=0; k<K; k++){
                if( index < timeseriesLength ){
                    obs[i][j] += timeseries[index++][j];
                    norm += 1;
                }
            }
            if( norm > 1 )
            obs[i][j] /= norm;
        }
    }
    
    //Resize alpha, c, and the estimated states vector as needed
    if( alpha.getNumRows() != T || alpha.getNumCols() != numStates ) alpha.resize(T,numStates);
    if( (unsigned int)c.size() != T ) c.resize(T);
    if( (unsigned int)estimatedStates.size() != T ) estimatedStates.resize(T);
    
    ////////////////// Run the forward algorithm ////////////////////////
    //Step 1: Init at t=0
    t = 0;
    c[t] = 0;
    maxAlpha = 0;
    for(i=0; i<numStates; i++){
        alpha[t][i] = pi[i]*gauss(b,obs,sigmaStates,i,t,numInputDimensions);
        c[t] += alpha[t][i];
        
        //Keep track of the best state at time t
        if( alpha[t][i] > maxAlpha ){
            maxAlpha = alpha[t][i];
            estimatedStates[t] = i;
        }
    }
    
    //Set the inital scaling coeff
    c[t] = 1.0/c[t];
    
    //Scale alpha
    for(i=0; i<numStates; i++) alpha[t][i] *= c[t];
    
    //Step 2: Induction
    for(t=1; t<T; t++){
        c[t] = 0.0;
        maxAlpha = 0;
        for(j=0; j<numStates; j++){
            alpha[t][j] = 0.0;
            for(i=0; i<numStates; i++){
                alpha[t][j] +=  alpha[t-1][i] * a[i][j];
            }
            alpha[t][j] *= gauss(b,obs,sigmaStates,j,t,numInputDimensions);
            c[t] += alpha[t][j];
            
            //Keep track of the best state at time t
            if( alpha[t][j] > maxAlpha ){
                maxAlpha = alpha[t][j];
                estimatedStates[t] = j;
            }
        }
        
        //Set the scaling coeff
        c[t] = 1.0/c[t];
        
        //Scale Alpha
        for(j=0; j<numStates; j++) alpha[t][j] *= c[t];
    }
    
    //Termination
    loglikelihood = 0.0;
    for(t=0; t<T; t++) loglikelihood += log( c[t] );
    loglikelihood = -loglikelihood; //Store the negative log likelihood
    
    //Set the phase as the last estimated state, this will give a phase between [0 1]
    phase = (estimatedStates[T-1]+1.0)/Float(numStates);
    
    return true;
}
Ejemplo n.º 23
0
int main (int argc, const char * argv[])
{
    //Parse the data filename from the argument list, you should pass in the data path to the iris data set in the GRT data folder
    if( argc != 2 ){
        cout << "Error: failed to parse data filename from command line. You should run this example with one argument pointing to the data filename!\n";
        return EXIT_FAILURE;
    }
    const string filename = argv[1];

    //We are going to use the Iris dataset, you can find more about the orginal dataset at: http://en.wikipedia.org/wiki/Iris_flower_data_set
    
    //Create a new instance of ClassificationData to hold the training data
    ClassificationData trainingData;
    
    //Load the training dataset from a file, the file should be in the same directory as this program
    if( !trainingData.load( filename ) ){
        cout << "Failed to load Iris data from file!\n";
        return EXIT_FAILURE;
    }
    
    //Print some basic stats about the dataset we have loaded
    trainingData.printStats();
    
    //Partition the training dataset into a training dataset and test dataset
    //We will use 60% of the data to train the algorithm and 40% of the data to test it
    //The true parameter flags that we want to use stratified sampling, which means there 
    //should be an equal class distribution between the training and test datasets
    ClassificationData testData = trainingData.split( 60, true );
    
    //Setup the gesture recognition pipeline
    GestureRecognitionPipeline pipeline;
    
    //Add a KNN classification algorithm as the main classifier with a K value of 10
    pipeline << KNN(10);
    
    //Train the KNN algorithm using the training dataset
    if( !pipeline.train( trainingData ) ){
        cout << "Failed to train the pipeline!\n";
        return EXIT_FAILURE;
    }
    
    //Test the KNN model using the test dataset
    if( !pipeline.test( testData ) ){
        cout << "Failed to test the pipeline!\n";
        return EXIT_FAILURE;
    }
    
    //Print some metrics about how successful the classification was
    //Print the accuracy
    cout << "The classification accuracy was: " << pipeline.getTestAccuracy() << "%\n" << endl;
    
    //Print the precision for each class
    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        UINT classLabel = pipeline.getClassLabels()[k];
        double classPrecision = pipeline.getTestPrecision( classLabel );
        cout << "The precision for class " << classLabel << " was " << classPrecision << endl;
    }
    cout << endl;
    
    //Print the recall for each class
    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        UINT classLabel = pipeline.getClassLabels()[k];
        double classRecall = pipeline.getTestRecall( classLabel );
        cout << "The recall for class " << classLabel << " was " << classRecall << endl;
    }
    cout << endl;
    
    //Print the confusion matrix
    MatrixFloat confusionMatrix = pipeline.getTestConfusionMatrix();
    cout << "Confusion Matrix: \n";
    for(UINT i=0; i<confusionMatrix.getNumRows(); i++){
        for(UINT j=0; j<confusionMatrix.getNumCols(); j++){
            cout << confusionMatrix[i][j] << "\t";
        }
        cout << endl;
        
    }
    cout << endl;

    return EXIT_SUCCESS;
}
Ejemplo n.º 24
0
bool BernoulliRBM::train_(MatrixFloat &data){
    
    const UINT numTrainingSamples = data.getNumRows();
    numInputDimensions = data.getNumCols();
    numOutputDimensions = numHiddenUnits;
    numVisibleUnits = numInputDimensions;
    
    trainingLog << "NumInputDimensions: " << numInputDimensions << std::endl;
    trainingLog << "NumOutputDimensions: " << numOutputDimensions << std::endl;
    
    if( randomizeWeightsForTraining ){
    
        //Init the weights matrix
        weightsMatrix.resize(numHiddenUnits, numVisibleUnits);
        
        Float a = 1.0 / numVisibleUnits;
        for(UINT i=0; i<numHiddenUnits; i++) {
            for(UINT j=0; j<numVisibleUnits; j++) {
                weightsMatrix[i][j] = rand.getRandomNumberUniform(-a, a);
            }
        }

        //Init the bias units
        visibleLayerBias.resize( numVisibleUnits );
        hiddenLayerBias.resize( numHiddenUnits );
        std::fill(visibleLayerBias.begin(),visibleLayerBias.end(),0);
        std::fill(hiddenLayerBias.begin(),hiddenLayerBias.end(),0);
        
    }else{
        if( weightsMatrix.getNumRows() != numHiddenUnits ){
            errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of hidden units!" << std::endl;
            return false;
        }
        if( weightsMatrix.getNumCols() != numVisibleUnits ){
            errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of visible units!" << std::endl;
            return false;
        }
        if( visibleLayerBias.size() != numVisibleUnits ){
            errorLog << "train_(MatrixFloat &data) - Visible layer bias size does not match the number of visible units!" << std::endl;
            return false;
        }
        if( hiddenLayerBias.size() != numHiddenUnits ){
            errorLog << "train_(MatrixFloat &data) - Hidden layer bias size does not match the number of hidden units!" << std::endl;
            return false;
        }
    }
    
    //Flag the model has been trained encase the user wants to save the model during a training iteration using an observer
    trained = true;
    
    //Make sure the data is scaled between [0 1]
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<numTrainingSamples; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = grt_scale(data[i][j], ranges[j].minValue, ranges[j].maxValue, 0.0, 1.0);
            }
        }
    }
    

    const UINT numBatches = static_cast<UINT>( ceil( Float(numTrainingSamples)/batchSize ) );
    
    //Setup the batch indexs
    Vector< BatchIndexs > batchIndexs( numBatches );
    UINT startIndex = 0;
    for(UINT i=0; i<numBatches; i++){
        batchIndexs[i].startIndex = startIndex;
        batchIndexs[i].endIndex = startIndex + batchSize;
        
        //Make sure the last batch end index is not larger than the number of training examples
        if( batchIndexs[i].endIndex >= numTrainingSamples ){
            batchIndexs[i].endIndex = numTrainingSamples;
        }
        
        //Get the batch size
        batchIndexs[i].batchSize = batchIndexs[i].endIndex - batchIndexs[i].startIndex;
        
        //Set the start index for the next batch
        startIndex = batchIndexs[i].endIndex;
    }
    
    Timer timer;
    UINT i,j,n,epoch,noChangeCounter = 0;
    Float startTime = 0;
    Float alpha = learningRate;
    Float error = 0;
    Float err = 0;
    Float delta = 0;
    Float lastError = 0;
    Vector< UINT > indexList(numTrainingSamples);
    TrainingResult trainingResult;
    MatrixFloat wT( numVisibleUnits, numHiddenUnits );       //Stores a transposed copy of the weights vector
    MatrixFloat vW( numHiddenUnits, numVisibleUnits );       //Stores the weight velocity updates
    MatrixFloat tmpW( numHiddenUnits, numVisibleUnits );     //Stores the weight values that will be used to update the main weights matrix at each batch update
    MatrixFloat v1( batchSize, numVisibleUnits );            //Stores the real batch data during a batch update
    MatrixFloat v2( batchSize, numVisibleUnits );            //Stores the sampled batch data during a batch update
    MatrixFloat h1( batchSize, numHiddenUnits );             //Stores the hidden states given v1 and the current weightsMatrix
    MatrixFloat h2( batchSize, numHiddenUnits );             //Stores the sampled hidden states given v2 and the current weightsMatrix
    MatrixFloat c1( numHiddenUnits, numVisibleUnits );       //Stores h1' * v1
    MatrixFloat c2( numHiddenUnits, numVisibleUnits );       //Stores h2' * v2
    MatrixFloat vDiff( batchSize, numVisibleUnits );         //Stores the difference between v1-v2
    MatrixFloat hDiff( batchSize, numVisibleUnits );         //Stores the difference between h1-h2
    MatrixFloat cDiff( numHiddenUnits, numVisibleUnits );    //Stores the difference between c1-c2
    VectorFloat vDiffSum( numVisibleUnits );                 //Stores the column sum of vDiff
    VectorFloat hDiffSum( numHiddenUnits );                  //Stores the column sum of hDiff
    VectorFloat visibleLayerBiasVelocity( numVisibleUnits ); //Stores the velocity update of the visibleLayerBias
    VectorFloat hiddenLayerBiasVelocity( numHiddenUnits );   //Stores the velocity update of the hiddenLayerBias
    
    //Set all the velocity weights to zero
    vW.setAllValues( 0 );
    std::fill(visibleLayerBiasVelocity.begin(),visibleLayerBiasVelocity.end(),0);
    std::fill(hiddenLayerBiasVelocity.begin(),hiddenLayerBiasVelocity.end(),0);
    
    //Randomize the order that the training samples will be used in
    for(UINT i=0; i<numTrainingSamples; i++) indexList[i] = i;
    if( randomiseTrainingOrder ){
        std::random_shuffle(indexList.begin(), indexList.end());
    }
    
    //Start the main training loop
    timer.start();
    for(epoch=0; epoch<maxNumEpochs; epoch++) {
        startTime = timer.getMilliSeconds();
        error = 0;
        
        //Randomize the batch order
        std::random_shuffle(batchIndexs.begin(),batchIndexs.end());
        
        //Run each of the batch updates
        for(UINT k=0; k<numBatches; k+=batchStepSize){
            
            //Resize the data matrices, the matrices will only be resized if the rows cols are different
            v1.resize( batchIndexs[k].batchSize, numVisibleUnits );
            h1.resize( batchIndexs[k].batchSize, numHiddenUnits );
            v2.resize( batchIndexs[k].batchSize, numVisibleUnits );
            h2.resize( batchIndexs[k].batchSize, numHiddenUnits );
            
            //Setup the data pointers, using data pointers saves a few ms on large matrix updates
            Float **w_p = weightsMatrix.getDataPointer();
            Float **wT_p = wT.getDataPointer();
            Float **vW_p = vW.getDataPointer();
            Float **data_p = data.getDataPointer();
            Float **v1_p = v1.getDataPointer();
            Float **v2_p = v2.getDataPointer();
            Float **h1_p = h1.getDataPointer();
            Float **h2_p = h2.getDataPointer();
            Float *vlb_p = &visibleLayerBias[0];
            Float *hlb_p = &hiddenLayerBias[0];
            
            //Get the batch data
            UINT index = 0;
            for(i=batchIndexs[k].startIndex; i<batchIndexs[k].endIndex; i++){
                for(j=0; j<numVisibleUnits; j++){
                    v1_p[index][j] = data_p[ indexList[i] ][j];
                }
                index++;
            }
            
            //Copy a transposed version of the weights matrix, this is used to compute h1 and h2
            for(i=0; i<numHiddenUnits; i++)
                for(j=0; j<numVisibleUnits; j++)
                    wT_p[j][i] = w_p[i][j];
            
            //Compute h1
            h1.multiple(v1, wT);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numHiddenUnits; i++){
                    h1_p[n][i] = sigmoidRandom( h1_p[n][i] + hlb_p[i] );
                }
            }
            
            //Compute v2
            v2.multiple(h1, weightsMatrix);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numVisibleUnits; i++){
                    v2_p[n][i] = sigmoidRandom( v2_p[n][i] + vlb_p[i] );
                }
            }
            
            //Compute h2
            h2.multiple(v2,wT);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numHiddenUnits; i++){
                    h2_p[n][i] = grt_sigmoid( h2_p[n][i] + hlb_p[i] );
                }
            }
            
            //Compute c1, c2 and the difference between v1-v2
            c1.multiple(h1,v1,true);
            c2.multiple(h2,v2,true);
            vDiff.subtract(v1, v2);
            
            //Compute the sum of vdiff
            for(j=0; j<numVisibleUnits; j++){
                vDiffSum[j] = 0;
                for(i=0; i<batchIndexs[k].batchSize; i++){
                    vDiffSum[j] += vDiff[i][j];
                }
            }
            
            //Compute the difference between h1 and h2
            hDiff.subtract(h1, h2);
            for(j=0; j<numHiddenUnits; j++){
                hDiffSum[j] = 0;
                for(i=0; i<batchIndexs[k].batchSize; i++){
                    hDiffSum[j] += hDiff[i][j];
                }
            }
            
            //Compute the difference between c1 and c2
            cDiff.subtract(c1,c2);
            
            //Update the weight velocities
            for(i=0; i<numHiddenUnits; i++){
                for(j=0; j<numVisibleUnits; j++){
                    vW_p[i][j] = ((momentum * vW_p[i][j]) + (alpha * cDiff[i][j])) / batchIndexs[k].batchSize;
                }
            }
            for(i=0; i<numVisibleUnits; i++){
                visibleLayerBiasVelocity[i] = ((momentum * visibleLayerBiasVelocity[i]) + (alpha * vDiffSum[i])) / batchIndexs[k].batchSize;
            }
            for(i=0; i<numHiddenUnits; i++){
                hiddenLayerBiasVelocity[i] = ((momentum * hiddenLayerBiasVelocity[i]) + (alpha * hDiffSum[i])) / batchIndexs[k].batchSize;
            }
            
            //Update the weights
            weightsMatrix.add( vW );
            
            //Update the bias for the visible layer
            for(i=0; i<numVisibleUnits; i++){
                visibleLayerBias[i] += visibleLayerBiasVelocity[i];
            }
            
            //Update the bias for the visible layer
            for(i=0; i<numHiddenUnits; i++){
                hiddenLayerBias[i] += hiddenLayerBiasVelocity[i];
            }
            
            //Compute the reconstruction error
            err = 0;
            for(i=0; i<batchIndexs[k].batchSize; i++){
                for(j=0; j<numVisibleUnits; j++){
                    err += SQR( v1[i][j] - v2[i][j] );
                }
            }
            
            error += err / batchIndexs[k].batchSize;
        }
        error /= numBatches;
        delta = lastError - error;
        lastError = error;
        
        trainingLog << "Epoch: " << epoch+1 << "/" << maxNumEpochs;
        trainingLog << " Epoch time: " << (timer.getMilliSeconds()-startTime)/1000.0 << " seconds";
        trainingLog << " Learning rate: " << alpha;
        trainingLog << " Momentum: " << momentum;
        trainingLog << " Average reconstruction error: " << error;
        trainingLog << " Delta: " << delta << std::endl;
        
        //Update the learning rate
        alpha *= learningRateUpdate;
        
        trainingResult.setClassificationResult(epoch, error, this);
        trainingResults.push_back(trainingResult);
        trainingResultsObserverManager.notifyObservers( trainingResult );
        
        //Check for convergance
        if( fabs(delta) < minChange ){
            if( ++noChangeCounter >= minNumEpochs ){
                trainingLog << "Stopping training. MinChange limit reached!" << std::endl;
                break;
            }
        }else noChangeCounter = 0;
        
    }
    trainingLog << "Training complete after " << epoch << " epochs. Total training time: " << timer.getMilliSeconds()/1000.0 << " seconds" << std::endl;
    
    trained = true;
    
    return true;
}
Ejemplo n.º 25
0
bool SelfOrganizingMap::train_( MatrixFloat &data ){
    
    //Clear any previous models
    clear();
    
    const UINT M = data.getNumRows();
    const UINT N = data.getNumCols();
    numInputDimensions = N;
    numOutputDimensions = numClusters;
    Random rand;
    
    //Setup the neurons
    neurons.resize( numClusters );
    
    if( neurons.size() != numClusters ){
        errorLog << "train_( MatrixFloat &data ) - Failed to resize neurons Vector, there might not be enough memory!" << std::endl;
        return false;
    }
    
    for(UINT j=0; j<numClusters; j++){
        
        //Init the neuron
        neurons[j].init( N, 0.5 );
        
        //Set the weights as a random training example
        neurons[j].weights = data.getRowVector( rand.getRandomNumberInt(0, M) );
    }
    
    //Setup the network weights
    switch( networkTypology ){
        case RANDOM_NETWORK:
            networkWeights.resize(numClusters, numClusters);
            
            //Set the diagonal weights as 1 (as i==j)
            for(UINT i=0; i<numClusters; i++){
                networkWeights[i][i] = 1;
            }
            
            //Randomize the other weights
            UINT indexA = 0;
            UINT indexB = 0;
            Float weight = 0;
            for(UINT i=0; i<numClusters*numClusters; i++){
                indexA = rand.getRandomNumberInt(0, numClusters);
                indexB = rand.getRandomNumberInt(0, numClusters);
                
                //Make sure the two random indexs are the same (as this is a diagonal and should be 1)
                if( indexA != indexB ){
                    //Pick a random weight between these two neurons
                    weight = rand.getRandomNumberUniform(0,1);
                    
                    //The weight betwen neurons a and b is the mirrored
                    networkWeights[indexA][indexB] = weight;
                    networkWeights[indexB][indexA] = weight;
                }
            }
            break;
    }
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<M; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = scale(data[i][j],ranges[j].minValue,ranges[j].maxValue,0,1);
            }
        }
    }
    
    Float error = 0;
    Float lastError = 0;
    Float trainingSampleError = 0;
    Float delta = 0;
    Float minChange = 0;
    Float weightUpdate = 0;
    Float weightUpdateSum = 0;
    Float alpha = 1.0;
    Float neuronDiff = 0;
    UINT iter = 0;
    bool keepTraining = true;
    VectorFloat trainingSample;
    Vector< UINT > randomTrainingOrder(M);
    
    //In most cases, the training data is grouped into classes (100 samples for class 1, followed by 100 samples for class 2, etc.)
    //This can cause a problem for stochastic gradient descent algorithm. To avoid this issue, we randomly shuffle the order of the
    //training samples. This random order is then used at each epoch.
    for(UINT i=0; i<M; i++){
        randomTrainingOrder[i] = i;
    }
    std::random_shuffle(randomTrainingOrder.begin(), randomTrainingOrder.end());
    
    //Enter the main training loop
    while( keepTraining ){
        
        //Update alpha based on the current iteration
        alpha = Util::scale(iter,0,maxNumEpochs,alphaStart,alphaEnd);
        
        //Run one epoch of training using the online best-matching-unit algorithm
        error = 0;
        for(UINT i=0; i<M; i++){
            
            trainingSampleError = 0;
            
            //Get the i'th random training sample
            trainingSample = data.getRowVector( randomTrainingOrder[i] );
            
            //Find the best matching unit
            Float dist = 0;
            Float bestDist = grt_numeric_limits< Float >::max();
            UINT bestIndex = 0;
            for(UINT j=0; j<numClusters; j++){
                dist = neurons[j].getSquaredWeightDistance( trainingSample );
                if( dist < bestDist ){
                    bestDist = dist;
                    bestIndex = j;
                }
            }
            
            //Update the weights based on the distance to the winning neuron
            //Neurons closer to the winning neuron will have their weights update more
            for(UINT j=0; j<numClusters; j++){
                
                //Update the weights for the j'th neuron
                weightUpdateSum = 0;
                neuronDiff = 0;
                for(UINT n=0; n<N; n++){
                    neuronDiff = trainingSample[n] - neurons[j][n];
                    weightUpdate = networkWeights[bestIndex][j] * alpha * neuronDiff;
                    neurons[j][n] += weightUpdate;
                    weightUpdateSum += neuronDiff;
                }
                
                trainingSampleError += grt_sqr( weightUpdateSum );
            }
            
            error += grt_sqrt( trainingSampleError / numClusters );
        }
        
        //Compute the error
        delta = fabs( error-lastError );
        lastError = error;
        
        //Check to see if we should stop
        if( delta <= minChange ){
            converged = true;
            keepTraining = false;
        }
        
        if( grt_isinf( error ) ){
            errorLog << "train_(MatrixFloat &data) - Training failed! Error is NAN!" << std::endl;
            return false;
        }
        
        if( ++iter >= maxNumEpochs ){
            keepTraining = false;
        }
        
        trainingLog << "Epoch: " << iter << " Squared Error: " << error << " Delta: " << delta << " Alpha: " << alpha << std::endl;
    }
    
    numTrainingIterationsToConverge = iter;
    trained = true;
    
    return true;
}
Ejemplo n.º 26
0
bool PrincipalComponentAnalysis::computeFeatureVector_(const MatrixFloat &data,const UINT analysisMode){

    trained = false;
    const UINT M = data.getNumRows();
    const UINT N = data.getNumCols();
    this->numInputDimensions = N;

    MatrixFloat msData( M, N );

    //Compute the mean and standard deviation of the input data
    mean = data.getMean();
    stdDev = data.getStdDev();

    if( normData ){
        //Normalize the data
        for(UINT i=0; i<M; i++)
            for(UINT j=0; j<N; j++)
                msData[i][j] = (data[i][j]-mean[j]) / stdDev[j];

    }else{
        //Mean Subtract Data
        for(UINT i=0; i<M; i++)
            for(UINT j=0; j<N; j++)
                msData[i][j] = data[i][j] - mean[j];
    }

    //Get the covariance matrix
    MatrixFloat cov = msData.getCovarianceMatrix();

    //Use Eigen Value Decomposition to find eigenvectors of the covariance matrix
    EigenvalueDecomposition eig;

    if( !eig.decompose( cov ) ){
        mean.clear();
        stdDev.clear();
        componentWeights.clear();
        sortedEigenvalues.clear();
        eigenvectors.clear();
        errorLog << "computeFeatureVector(const MatrixFloat &data,UINT analysisMode) - Failed to decompose input matrix!" << std::endl;
        return false;
    }

    //Get the eigenvectors and eigenvalues
    eigenvectors = eig.getEigenvectors();
    eigenvalues = eig.getRealEigenvalues();

    //Any eigenvalues less than 0 are not worth anything so set to 0
    for(UINT i=0; i<eigenvalues.size(); i++){
        if( eigenvalues[i] < 0 )
            eigenvalues[i] = 0;
    }

    //Sort the eigenvalues and compute the component weights
    Float sum = 0;
    UINT componentIndex = 0;
    sortedEigenvalues.clear();
    componentWeights.resize(N,0);

    while( true ){
        Float maxValue = 0;
        UINT index = 0;
        for(UINT i=0; i<eigenvalues.size(); i++){
            if( eigenvalues[i] > maxValue ){
                maxValue = eigenvalues[i];
                index = i;
            }
        }
        if( maxValue == 0 || componentIndex >= eigenvalues.size() ){
            break;
        }
        sortedEigenvalues.push_back( IndexedDouble(index,maxValue) );
        componentWeights[ componentIndex++ ] = eigenvalues[ index ];
        sum += eigenvalues[ index ];
        eigenvalues[ index ] = 0; //Set the maxValue to zero so it won't be used again
    }

    Float cumulativeVariance = 0;
    switch( analysisMode ){
        case MAX_VARIANCE:
            //Normalize the component weights and workout how many components we need to use to reach the maxVariance
            numPrincipalComponents = 0;
            for(UINT k=0; k<N; k++){
                componentWeights[k] /= sum;
                cumulativeVariance += componentWeights[k];
                if( cumulativeVariance >= maxVariance && numPrincipalComponents==0 ){
                    numPrincipalComponents = k+1;
                }
            }
        break;
        case MAX_NUM_PCS:
            //Normalize the component weights and compute the maxVariance
            maxVariance = 0;
            for(UINT k=0; k<N; k++){
                componentWeights[k] /= sum;
                if( k < numPrincipalComponents ){
                    maxVariance += componentWeights[k];
                }
            }
        break;
        default:
        errorLog << "computeFeatureVector(const MatrixFloat &data,UINT analysisMode) - Unknown analysis mode!" << std::endl;
        break;
    }
    
    //Get the raw eigenvalues (encase the user asks for these later)
    eigenvalues = eig.getRealEigenvalues();

    //Flag that the features have been computed
    trained = true;

    return true;
}
Ejemplo n.º 27
0
int main (int argc, const char * argv[])
{
    //Create a matrix for the test data
    MatrixFloat data(4,2);

    //Populate the test data
    data[0][0] = 1;
    data[0][1] = 2;
    data[1][0] = 3;
    data[1][1] = 4;
    data[2][0] = 5;
    data[2][1] = 6;
    data[3][0] = 7;
    data[3][1] = 8;

    cout << "Data:\n";
    for(UINT i=0; i<data.getNumRows(); i++) {
        for(UINT j=0; j<data.getNumCols(); j++) {
            cout << data[i][j] << "\t";
        }
        cout << endl;
    }

    //Create a new instance of the SVD class
    SVD svd;

    //Computes the singular value decomposition of the data matrix
    if( !svd.solve(data) ) {
        cout << "ERROR: Failed to solve SVD solution!\n";
        return EXIT_FAILURE;
    }

    //Get the U, V, and W results (V is sometimes called S in other packages like Matlab)
    MatrixFloat u = svd.getU();
    MatrixFloat v = svd.getV();
    VectorFloat w = svd.getW();

    cout << "U:\n";
    for(UINT i=0; i<u.getNumRows(); i++) {
        for(UINT j=0; j<u.getNumCols(); j++) {
            cout << u[i][j] << "\t";
        }
        cout << endl;
    }

    cout << "V:\n";
    for(UINT i=0; i<v.getNumRows(); i++) {
        for(UINT j=0; j<v.getNumCols(); j++) {
            cout << v[i][j] << "\t";
        }
        cout << endl;
    }

    cout << "W:\n";
    for(UINT i=0; i<w.getSize(); i++) {
        cout << w[i] << "\t";
    }
    cout << endl;

    return EXIT_SUCCESS;
}
Ejemplo n.º 28
0
bool HierarchicalClustering::train_(MatrixFloat &data){
	
	trained = false;
    clusters.clear();
    distanceMatrix.clear();
    
    if( data.getNumRows() == 0 || data.getNumCols() == 0 ){
		return false;
	}
	
    //Set the rows and columns
    M = data.getNumRows();
	N = data.getNumCols();
    
    //Build the distance matrix
    distanceMatrix.resize(M,M);

    //Build the distance matrix
    for(UINT i=0; i<M; i++){
        for(UINT j=0; j<M; j++){
            if( i== j ) distanceMatrix[i][j] = grt_numeric_limits< Float >::max();
            else{
                distanceMatrix[i][j] = squaredEuclideanDistance(data[i], data[j]);
            }
        }
    }

    //Build the initial clusters, at the start each sample gets its own cluster
    UINT uniqueClusterID = 0;
    Vector< ClusterInfo > clusterData(M);
    for(UINT i=0; i<M; i++){
        clusterData[i].uniqueClusterID = uniqueClusterID++;
        clusterData[i].addSampleToCluster(i);
    }
    
    trainingLog << "Starting clustering..." << std::endl;
    
    //Create the first cluster level, each sample is it's own cluster
    UINT level = 0;
    ClusterLevel newLevel;
    newLevel.level = level;
    for(UINT i=0; i<M; i++){
        newLevel.clusters.push_back( clusterData[i] );
    }
    clusters.push_back( newLevel );
    
    //Move to level 1 and start the search
    level++;
    bool keepClustering = true;
    
    while( keepClustering ){
        
        //Find the closest two clusters within the cluster data
        Float minDist = grt_numeric_limits< Float >::max();
        Vector< Vector< UINT > > clusterPairs;
        UINT K = (UINT)clusterData.size();
        for(UINT i=0; i<K; i++){
            for(UINT j=0; j<K; j++){
                if( i != j ){
                    Float dist = computeClusterDistance( clusterData[i], clusterData[j]  );
             
                    if( dist < minDist ){
                        minDist = dist;
                        Vector< UINT > clusterPair(2);
                        clusterPair[0] = i;
                        clusterPair[1] = j;
                        clusterPairs.clear();
                        clusterPairs.push_back( clusterPair );
                    }
                    
                }
            }
        }
        
        if( minDist == grt_numeric_limits< Float >::max() ){
            keepClustering = false;
            warningLog << "train_(MatrixFloat &data) - Failed to find any cluster at level: " << level << std::endl;
            return false;
        }else{
        
            //Merge the two closest clusters together and create a new level
            ClusterLevel newLevel;
            newLevel.level = level;
            
            //Create the new cluster
            ClusterInfo newCluster;
            newCluster.uniqueClusterID = uniqueClusterID++;
            
            const UINT numClusterPairs = clusterPairs.getSize();
            
            for(UINT k=0; k<numClusterPairs; k++){
                //Add all the samples in the first cluster to the new cluster
                UINT numSamplesInClusterA = clusterData[ clusterPairs[k][0] ].getNumSamplesInCluster();
                for(UINT i=0; i<numSamplesInClusterA; i++){
                    UINT index = clusterData[ clusterPairs[k][0] ][ i ];
                    newCluster.addSampleToCluster( index );
                }
                
                //Add all the samples in the second cluster to the new cluster
                UINT numSamplesInClusterB = clusterData[ clusterPairs[k][1] ].getNumSamplesInCluster();
                for(UINT i=0; i<numSamplesInClusterB; i++){
                    UINT index = clusterData[ clusterPairs[k][1] ][ i ];
                    newCluster.addSampleToCluster( index );
                }
                
                //Compute the cluster variance
                newCluster.clusterVariance = computeClusterVariance( newCluster, data );
                
                //Remove the two cluster pairs (so they will not be used in the next search
                UINT idA = clusterData[ clusterPairs[k][0] ].getUniqueClusterID();
                UINT idB = clusterData[ clusterPairs[k][1] ].getUniqueClusterID();
                UINT numRemoved = 0;
                Vector< ClusterInfo >::iterator iter = clusterData.begin();
                while( iter != clusterData.end() ){
                    if( iter->getUniqueClusterID() == idA || iter->getUniqueClusterID() == idB ){
                        iter = clusterData.erase( iter );
                        if( ++numRemoved >= 2 ) break;
                    }else iter++;
                }
            }
            
            //Add the merged cluster to the clusterData
            clusterData.push_back( newCluster );
            
            //Add the new level and cluster data to the main cluster buffer
            newLevel.clusters.push_back( newCluster );
            
            clusters.push_back( newLevel );
            
            //Update the level
            level++;
        }
        
        //Check to see if we should stop clustering
        if( level >= M ){
            keepClustering = false;
        }
        
        if( clusterData.size() == 0 ){
            keepClustering = false;
        }
        
        trainingLog << "Cluster level: " << level << " Number of clusters: " << clusters.back().getNumClusters() << std::endl;
    }
    
    //Flag that the model is trained
    trained = true;
    
    //Setup the cluster labels
    clusterLabels.resize(numClusters);
    for(UINT i=0; i<numClusters; i++){
        clusterLabels[i] = i+1;
    }
    clusterLikelihoods.resize(numClusters,0);
    clusterDistances.resize(numClusters,0);

	return true;
}