Пример #1
0
void CProblem::set_d_function()
{
	d.resize(n);
	for (unsigned int u = 0; u < n; u++)
	{
		d[u].resize(n);
		for (unsigned int v = 0; v < u; v++)
		{
			d[u][v] = dist_linear(distances[u][v]);
			d[v][u] = d[u][v];
			if (d[u][v] <= max_distance && u != v)
			{ /* save vertices for which additional constraints to be created*/
				Info.numAddVar++;
				Info.ConstrIndex.push_back(u);
				Info.ConstrIndex.push_back(v);
			}
		}
	}
	Info.numVar += Info.numAddVar;
	std::cout << " numAddVar " << Info.numAddVar << "\n";
}
Пример #2
0
void CProblem::coordTOdist()
{
	distances.resize(n);
	//max_distance = 0.0;
	for (unsigned int u = 0; u < n; u++)
	{
		distances[u].resize(n);
		for (unsigned int v = 0; v < u; v++)
		{
			distances[u][v] = (sqrt(
					(coordinates[u][0] - coordinates[v][0])
							* (coordinates[u][0] - coordinates[v][0])
							+ (coordinates[u][1] - coordinates[v][1])
									* (coordinates[u][1] - coordinates[v][1])));
			distances[v][u] = distances[u][v];
			//if (distances[u][v] > max_distance) max_distance = distances[u][v];
		}

		distances[0][0] = 0;
	}
}
CalibrateResult restingDataCollected(const MatrixDouble& data)
{
    // take average of X and Y acceleration as the zero G value
    zeroG = (data.getMean()[0] + data.getMean()[1]) / 2;
    oneG = data.getMean()[2]; // use Z acceleration as one G value
    
    double range = abs(oneG - zeroG);
    vector<double> stddev = data.getStdDev();
    
    if (stddev[0] / range > 0.05 ||
        stddev[1] / range > 0.05 ||
        stddev[2] / range > 0.05)
        return CalibrateResult(CalibrateResult::WARNING,
            "Accelerometer seemed to be moving; consider recollecting the "
            "calibration sample.");
    
    if (abs(data.getMean()[0] - data.getMean()[1]) / range > 0.1)
        return CalibrateResult(CalibrateResult::WARNING,
            "X and Y axes differ by " + std::to_string(
            abs(data.getMean()[0] - data.getMean()[1]) / range * 100) +
            " percent. Check that accelerometer is flat.");

    return CalibrateResult::SUCCESS;
}
Пример #4
0
void CProblem::print()
{
	std::cout << "\n";
	std::cout << "Graph (V,E) with |V|= " << n << " and diameter = " << diameter
			<< "\n";
	std::cout << "\n";

	std::cout << "Coordinates: " << "\n";
	for (unsigned int u = 0; u < n; u++)
	{
		std::cout << "vertex " << u + 1 << " : " << coordinates[u][0] << " "
				<< coordinates[u][1] << "\n";
	}
	std::cout << "\n";

	std::cout << "Distances: " << "\n";
	for (unsigned int u = 0; u < n; u++)
	{
		for (unsigned int v = 0; v < n; v++)
		{
			std::cout << std::setw(3) << distances[u][v] << " ";
		}
		std::cout << "\n";
	}

	std::cout << "d: " << "\n";
	for (unsigned int u = 0; u < d.size(); u++)
	{
		for (unsigned int v = 0; v < d[u].size(); v++)
		{
			std::cout << std::setw(3) << d[u][v] << " ";
		}
		std::cout << "\n";
	}
	std::cout << "\n";
	std::cout << "max distance : " << max_distance;
	std::cout << "\n";
}
bool TimeSeriesClassificationData::addSample(const UINT classLabel,const MatrixDouble &trainingSample){
	
    if( trainingSample.getNumCols() != numDimensions ){
        errorLog << "addSample(UINT classLabel, MatrixDouble trainingSample) - The dimensionality of the training sample (" << trainingSample.getNumCols() << ") does not match that of the dataset (" << numDimensions << ")" << endl;
        return false;
    }
    
    //The class label must be greater than zero (as zero is used for the null rejection class label
    if( classLabel == GRT_DEFAULT_NULL_CLASS_LABEL && !allowNullGestureClass ){
        errorLog << "addSample(UINT classLabel, MatrixDouble sample) - the class label can not be 0!" << endl;
        return false;
    }

    TimeSeriesClassificationSample newSample(classLabel,trainingSample);
    data.push_back( newSample );
    totalNumSamples++;

    if( classTracker.size() == 0 ){
        ClassTracker tracker(classLabel,1);
        classTracker.push_back(tracker);
    }else{
        bool labelFound = false;
        for(UINT i=0; i<classTracker.size(); i++){
            if( classLabel == classTracker[i].classLabel ){
                classTracker[i].counter++;
                labelFound = true;
                break;
            }
        }
        if( !labelFound ){
            ClassTracker tracker(classLabel,1);
            classTracker.push_back(tracker);
        }
    }
    return true;
}
bool PrincipalComponentAnalysis::project(const MatrixDouble &data,MatrixDouble &prjData) {

    if( !trained ) {
        warningLog << "project(const MatrixDouble &data,MatrixDouble &prjData) - The PrincipalComponentAnalysis module has not been trained!" << endl;
        return false;
    }
    if( data.getNumCols() != numInputDimensions ) {
        warningLog << "project(const MatrixDouble &data,MatrixDouble &prjData) - The number of columns in the input vector (" << data.getNumCols() << ") does not match the number of input dimensions (" << numInputDimensions << ")!" << endl;
        return false;
    }

    MatrixDouble msData( data );
    prjData.resize(data.getNumRows(),numPrincipalComponents);

    if( normData ) {
        //Mean subtract the data
        for(UINT i=0; i<data.getNumRows(); i++)
            for(UINT j=0; j<numInputDimensions; j++)
                msData[i][j] = (msData[i][j]-mean[j])/stdDev[j];
    } else {
        //Mean subtract the data
        for(UINT i=0; i<data.getNumRows(); i++)
            for(UINT j=0; j<numInputDimensions; j++)
                msData[i][j] -= mean[j];
    }

    //Projected Data
    for(UINT row=0; row<msData.getNumRows(); row++) { //For each row in the final data
        for(UINT i=0; i<numPrincipalComponents; i++) { //For each PC
            prjData[row][i]=0;
            for(UINT j=0; j<data.getNumCols(); j++)//For each feature
                prjData[row][i] += msData[row][j] * eigenvectors[j][sortedEigenvalues[i].index];
        }
    }

    return true;
}
Пример #7
0
int main(int argc, const char * argv[]){
    
    //Load the training data
    TimeSeriesClassificationData trainingData;
    
    if( !trainingData.loadDatasetFromFile("HMMTrainingData.grt") ){
        cout << "ERROR: Failed to load training data!\n";
        return false;
    }
    
    //Remove 20% of the training data to use as test data
    TimeSeriesClassificationData testData = trainingData.partition( 80 );
    
    //The input to the HMM must be a quantized discrete value
    //We therefore use a KMeansQuantizer to covert the N-dimensional continuous data into 1-dimensional discrete data
    const UINT NUM_SYMBOLS = 10;
    KMeansQuantizer quantizer( NUM_SYMBOLS );
    
    //Train the quantizer using the training data
    if( !quantizer.train( trainingData ) ){
        cout << "ERROR: Failed to train quantizer!\n";
        return false;
    }
    
    //Quantize the training data
    TimeSeriesClassificationData quantizedTrainingData( 1 );
    
    for(UINT i=0; i<trainingData.getNumSamples(); i++){
        
        UINT classLabel = trainingData[i].getClassLabel();
        MatrixDouble quantizedSample;
        
        for(UINT j=0; j<trainingData[i].getLength(); j++){
            quantizer.quantize( trainingData[i].getData().getRowVector(j) );
            
            quantizedSample.push_back( quantizer.getFeatureVector() );
        }
        
        if( !quantizedTrainingData.addSample(classLabel, quantizedSample) ){
            cout << "ERROR: Failed to quantize training data!\n";
            return false;
        }
        
    }
    
    //Create a new HMM instance
    HMM hmm;
    
    //Set the number of states in each model
    hmm.setNumStates( 4 );
    
    //Set the number of symbols in each model, this must match the number of symbols in the quantizer
    hmm.setNumSymbols( NUM_SYMBOLS );
    
    //Set the HMM model type to LEFTRIGHT with a delta of 1
    hmm.setModelType( HiddenMarkovModel::LEFTRIGHT );
    hmm.setDelta( 1 );
    
    //Set the training parameters
    hmm.setMinImprovement( 1.0e-5 );
    hmm.setMaxNumIterations( 100 );
    hmm.setNumRandomTrainingIterations( 20 );
    
    //Train the HMM model
    if( !hmm.train( quantizedTrainingData ) ){
        cout << "ERROR: Failed to train the HMM model!\n";
        return false;
    }
    
    //Save the HMM model to a file
    if( !hmm.save( "HMMModel.grt" ) ){
        cout << "ERROR: Failed to save the model to a file!\n";
        return false;
    }
    
    //Load the HMM model from a file
    if( !hmm.load( "HMMModel.grt" ) ){
        cout << "ERROR: Failed to load the model from a file!\n";
        return false;
    }
    
    //Quantize the test data
    TimeSeriesClassificationData quantizedTestData( 1 );
    
    for(UINT i=0; i<testData.getNumSamples(); i++){
        
        UINT classLabel = testData[i].getClassLabel();
        MatrixDouble quantizedSample;
        
        for(UINT j=0; j<testData[i].getLength(); j++){
            quantizer.quantize( testData[i].getData().getRowVector(j) );
            
            quantizedSample.push_back( quantizer.getFeatureVector() );
        }
        
        if( !quantizedTestData.addSample(classLabel, quantizedSample) ){
            cout << "ERROR: Failed to quantize training data!\n";
            return false;
        }
    }
    
    //Compute the accuracy of the HMM models using the test data
    double numCorrect = 0;
    double numTests = 0;
    for(UINT i=0; i<quantizedTestData.getNumSamples(); i++){
        
        UINT classLabel = quantizedTestData[i].getClassLabel();
        hmm.predict( quantizedTestData[i].getData() );
        
        if( classLabel == hmm.getPredictedClassLabel() ) numCorrect++;
        numTests++;
        
        VectorDouble classLikelihoods = hmm.getClassLikelihoods();
        VectorDouble classDistances = hmm.getClassDistances();
        
        cout << "ClassLabel: " << classLabel;
        cout << " PredictedClassLabel: " << hmm.getPredictedClassLabel();
        cout << " MaxLikelihood: " << hmm.getMaximumLikelihood();
        
        cout << "  ClassLikelihoods: ";
        for(UINT k=0; k<classLikelihoods.size(); k++){
            cout << classLikelihoods[k] << "\t";
        }
        
        cout << "ClassDistances: ";
        for(UINT k=0; k<classDistances.size(); k++){
            cout << classDistances[k] << "\t";
        }
        cout << endl;
    }
    
    cout << "Test Accuracy: " << numCorrect/numTests*100.0 << endl;
    
    return true;
}
Пример #8
0
bool GaussianMixtureModels::train_(MatrixDouble &data){
    
    trained = false;
    
    //Clear any previous training results
    det.clear();
    invSigma.clear();
    numTrainingIterationsToConverge = 0;
    
    if( data.getNumRows() == 0 ){
        errorLog << "train_(MatrixDouble &data) - Training Failed! Training data is empty!" << endl;
        return false;
    }
    
    //Resize the variables
    numTrainingSamples = data.getNumRows();
    numInputDimensions = data.getNumCols();
    
    //Resize mu and resp
    mu.resize(numClusters,numInputDimensions);
    resp.resize(numTrainingSamples,numClusters);
    
    //Resize sigma
    sigma.resize(numClusters);
    for(UINT k=0; k<numClusters; k++){
        sigma[k].resize(numInputDimensions,numInputDimensions);
    }
    
    //Resize frac and lndets
    frac.resize(numClusters);
    lndets.resize(numClusters);
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<numTrainingSamples; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = scale(data[i][j],ranges[j].minValue,ranges[j].maxValue,0,1);
            }
        }
    }
    
    //Pick K random starting points for the inital guesses of Mu
    Random random;
    vector< UINT > randomIndexs(numTrainingSamples);
    for(UINT i=0; i<numTrainingSamples; i++) randomIndexs[i] = i;
    for(UINT i=0; i<numClusters; i++){
        SWAP(randomIndexs[ i ],randomIndexs[ random.getRandomNumberInt(0,numTrainingSamples) ]);
    }
    for(UINT k=0; k<numClusters; k++){
        for(UINT n=0; n<numInputDimensions; n++){
            mu[k][n] = data[ randomIndexs[k] ][n];
        }
    }
    
    //Setup sigma and the uniform prior on P(k)
    for(UINT k=0; k<numClusters; k++){
        frac[k] = 1.0/double(numClusters);
        for(UINT i=0; i<numInputDimensions; i++){
            for(UINT j=0; j<numInputDimensions; j++) sigma[k][i][j] = 0;
            sigma[k][i][i] = 1.0e-2;   //Set the diagonal to a small number
        }
    }
    
    loglike = 0;
    bool keepGoing = true;
    double change = 99.9e99;
    UINT numIterationsNoChange = 0;
    VectorDouble u(numInputDimensions);
	VectorDouble v(numInputDimensions);
    
    while( keepGoing ){
        
        //Run the estep
        if( estep( data, u, v, change ) ){
            
            //Run the mstep
            mstep( data );
        
            //Check for convergance
            if( fabs( change ) < minChange ){
                if( ++numIterationsNoChange >= minNumEpochs ){
                    keepGoing = false;
                }
            }else numIterationsNoChange = 0;
            if( ++numTrainingIterationsToConverge >= maxNumEpochs ) keepGoing = false;
            
        }else{
            errorLog << "train_(MatrixDouble &data) - Estep failed at iteration " << numTrainingIterationsToConverge << endl;
            return false;
        }
    }
    
    //Compute the inverse of sigma and the determinants for prediction
    if( !computeInvAndDet() ){
        det.clear();
        invSigma.clear();
        errorLog << "train_(MatrixDouble &data) - Failed to compute inverse and determinat!" << endl;
        return false;
    }
    
    //Flag that the model was trained
    trained = true;
    
    return true;
}
Пример #9
0
bool KMeans::trainModel(MatrixDouble &data){
    
    if( numClusters == 0 ){
        errorLog << "trainModel(MatrixDouble &data) - Failed to train model. NumClusters is zero!" << endl;
		return false;
	}
    
    if( clusters.getNumRows() != numClusters ){
        errorLog << "trainModel(MatrixDouble &data) - Failed to train model. The number of rows in the cluster matrix does not match the number of clusters! You should need to initalize the clusters matrix first before calling this function!" << endl;
		return false;
	}
    
    if( clusters.getNumCols() != numInputDimensions ){
        errorLog << "trainModel(MatrixDouble &data) - Failed to train model. The number of columns in the cluster matrix does not match the number of input dimensions! You should need to initalize the clusters matrix first before calling this function!" << endl;
		return false;
	}

    Timer timer;
	UINT currentIter = 0;
    UINT numChanged = 0;
	bool keepTraining = true;
    double theta = 0;
    double lastTheta = 0;
    double delta = 0;
    double startTime = 0;
    thetaTracker.clear();
    finalTheta = 0;
    numTrainingIterationsToConverge = 0;
    trained = false;
    converged = false;
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        data.scale(0,1);
    }

    //Init the assign and count vectors
    //Assign is set to K+1 so that the nChanged values in the eStep at the first iteration will be updated correctly
    for(UINT m=0; m<numTrainingSamples; m++) assign[m] = numClusters+1;
	for(UINT k=0; k<numClusters; k++) count[k] = 0;

    //Run the training loop
    timer.start();
	while( keepTraining ){
        startTime = timer.getMilliSeconds();

		//Compute the E step
		numChanged = estep( data );

        //Compute the M step
        mstep( data );

        //Update the iteration counter
		currentIter++;

		//Compute theta if needed
		if( computeTheta ){
            theta = calculateTheta(data);
            delta = lastTheta - theta;
            lastTheta = theta;
        }else theta = delta = 0;
        
        //Check convergance
		if( numChanged == 0 && currentIter > minNumEpochs ){ converged = true; keepTraining = false; }
		if( currentIter >= maxNumEpochs ){ keepTraining = false; }
		if( fabs( delta ) < minChange && computeTheta && currentIter > minNumEpochs ){ converged = true; keepTraining = false; }
        if( computeTheta )  thetaTracker.push_back( theta );
        
        trainingLog << "Epoch: " << currentIter << "/" << maxNumEpochs;
        trainingLog << " Epoch time: " << (timer.getMilliSeconds()-startTime)/1000.0 << " seconds";
        trainingLog << " Theta: " << theta << " Delta: " << delta << endl;
	}
    trainingLog << "Model Trained at epoch: " << currentIter << " with a theta value of: " << theta << endl;

    finalTheta = theta;
    numTrainingIterationsToConverge = currentIter;
	trained = true;
	
	return true;
}
Пример #10
0
bool KMeansFeatures::train_(MatrixDouble &trainingData){
    
    if( !initialized ){
        errorLog << "train_(MatrixDouble &trainingData) - The quantizer has not been initialized!" << endl;
        return false;
    }
    
    //Reset any previous model
    featureDataReady = false;
    
    const UINT M = trainingData.getNumRows();
    const UINT N = trainingData.getNumCols();
    
    numInputDimensions = N;
    numOutputDimensions = numClustersPerLayer[ numClustersPerLayer.size()-1 ];
    
    //Scale the input data if needed
    ranges = trainingData.getRanges();
    if( useScaling ){
        for(UINT i=0; i<M; i++){
            for(UINT j=0; j<N; j++){
                trainingData[i][j] = scale(trainingData[i][j],ranges[j].minValue,ranges[j].maxValue,0,1.0);
            }
        }
    }
    
    //Train the KMeans model at each layer
    const UINT K = (UINT)numClustersPerLayer.size();
    for(UINT k=0; k<K; k++){
        KMeans kmeans;
        kmeans.setNumClusters( numClustersPerLayer[k] );
        kmeans.setComputeTheta( true );
        kmeans.setMinChange( minChange );
        kmeans.setMinNumEpochs( minNumEpochs );
        kmeans.setMaxNumEpochs( maxNumEpochs );
        
        trainingLog << "Layer " << k+1 << "/" << K << " NumClusters: " << numClustersPerLayer[k] << endl;
        if( !kmeans.train_( trainingData ) ){
            errorLog << "train_(MatrixDouble &trainingData) - Failed to train kmeans model at layer: " << k << endl;
            return false;
        }
        
        //Save the clusters
        clusters.push_back( kmeans.getClusters() );
        
        //Project the data through the current layer to use as training data for the next layer
        if( k+1 != K ){
            MatrixDouble data( M, numClustersPerLayer[k] );
            VectorDouble input( trainingData.getNumCols() );
            VectorDouble output( data.getNumCols() );
            
            for(UINT i=0; i<M; i++){
                
                //Copy the data into the sample
                for(UINT j=0; j<input.size(); j++){
                    input[j] = trainingData[i][j];
                }
                
                //Project the sample through the current layer
                if( !projectDataThroughLayer( input, output, k ) ){
                    errorLog << "train_(MatrixDouble &trainingData) - Failed to project sample through layer: " << k << endl;
                    return false;
                }
                
                //Copy the result into the training data for the next layer
                for(UINT j=0; j<output.size(); j++){
                    data[i][j] = output[j];
                }
            }
            
            //Swap the data for the next layer
            trainingData = data;
            
        }
        
    }
    
    //Flag that the kmeans model has been trained
    trained = true;
    featureVector.resize( numOutputDimensions, 0 );
    
    return true;
}
Пример #11
0
/*!
 * Solve linear system using Gauss-Jordan procedure
 *
 * \throw	ExceptionDimension	incompatible matrix dimensions
 * \throw ExceptionRuntime	Equation has either no solution or an infinity of solutions
 *
 * \param[in]	Coefficients	matrix of equations' coefficients
 * \param[in]	ConstantTerms	column matrix of constant terms
 *
 * \return	solutions packed in a column matrix (SMatrixDouble)
 */
MatrixDouble LinearSystem::GaussJordan(const SquareMatrixDouble &Coefficients, const MatrixDouble &ConstantTerms)
{
	size_t n = Coefficients.GetRows();
		
	if (ConstantTerms.GetRows() != n)
	{
		throw ExceptionDimension(StringUTF8("LinearEquationsSystem::GaussJordanSolver("
					"const SquareMatrixDouble *Coefficients, const MatrixDouble *ConstantTerms): ") + 
				_("invalid or incompatible matrix dimensions"));
	}
	else
	{
		USquareMatrixDouble CopyCoefficients = CloneAs<SquareMatrixDouble>(Coefficients);
		UMatrixDouble CopyConstantTerms = CloneAs<MatrixDouble>(ConstantTerms);
	
		for (size_t c = 0; c < n - 1; c++)
		{
			// Search the greatest pivot in column
				
			double Pivot = CopyCoefficients->At(c, c);
			double AbsMaxPivot = fabs(Pivot);
			size_t RowIndex = c;
				
			for (size_t r = c + 1 ; r < n; r++)
			{
				double Candidate = CopyCoefficients->At(r, c);
					
				if (fabs(Candidate) > AbsMaxPivot)
				{
					Pivot = Candidate;
					AbsMaxPivot = fabs(Pivot);
					RowIndex = r;
				}
			}
			
			// If no non-null pivot found, system may have infinite number of solutions
				
			if (Pivot == 0.0)
			{
				throw ExceptionRuntime(_("Equation has either no solution or an infinity of solutions."));
			}
			
			if (RowIndex != c)
			{
				CopyCoefficients->SwapRows(c, RowIndex);
				CopyConstantTerms->SwapRows(c, RowIndex);
			}
				// Elimination
			
			for (size_t r = c + 1; r < n; r++)
			{
				double Coeff = CopyCoefficients->At(r, c);
				
				if (Coeff != 0.0)
				{
					double Scale = - Coeff / Pivot;
					
					for (size_t k = c; k < n; k++)
					{
						CopyCoefficients->IncreaseElement(r, k, CopyCoefficients->At(c, k) * Scale);
					}
					
					CopyConstantTerms->IncreaseElement(r, 0, CopyConstantTerms->At(c, 0) * Scale);
				}
			}
		}
		// End of loop for column
			
		MatrixDouble Solutions(n, 1, 0.0);
			
		Solutions.At(n - 1, 0) = CopyConstantTerms->At(n - 1, 0) / CopyCoefficients->At(n - 1, n - 1);
			
		for (auto r = int(n) - 2; r >= 0; --r)
		{
			double Cumul = 0.0;
			
			for (auto c = int(n) - 1; c > r; --c)
			{
				Cumul += CopyCoefficients->At(r, c) * Solutions.At(c, 0);
			}
				
			Solutions.At(r, 0) = (CopyConstantTerms->At(r, 0) - Cumul) / CopyCoefficients->At(r, r);
		}
			
		return Solutions;
	}
}
Пример #12
0
int CProblem::setProblem() {
	std::string line;
	std::fstream oFile;
	std::stringstream buf;
	//int bufInt;

	switch (lattice) {
		case 0:
			double bufDouble;
			oFile.open(fileName.c_str());
			if (!oFile.is_open()) {
				std::cerr<< "Cannot open file!\n";
				exit(1);
			}

			std::getline(oFile, line);	/* number of vertices and diameter */
			std::getline(oFile, line);
			buf<<line;
			buf >> n;
			buf >> diameter;
			buf.clear();

			std::getline(oFile, line);	/* coordinates */
			coordinates.resize(n);
			for (unsigned int i=0; i< n; i++) {
				coordinates[i].resize(2);
				std::getline(oFile, line);
				buf << line;
				buf >> bufDouble;	/* because first number in line is the number of the vertex */
				buf >> bufDouble;
				coordinates[i][0] = bufDouble;
				buf >> bufDouble;
				coordinates[i][1] = bufDouble;
				buf.clear();
			}
			oFile.close();
			break;

		case 1:
			n=24;
			coordinates = hexagonalLattice2(n, 1); // number of vertices, number of vertices in first line
			diameter = 7;
			fileName = "hexagonal_lattice.dat";
			break;

		case 2:
			n=23;
			coordinates = triangularLattice2(n,5); // number of vertices, number of vertices in first line
			diameter = 6;
			fileName = "triangular_lattice.dat";
			break;

		case 3:
			n=25;
			//n = 4;
			//diameter = 2;
			coordinates = squareLattice2(n);
			diameter = 8;
			fileName = "square_lattice.dat";
			break;

		default:std::cerr << "Wrong lattice type\n"; exit (-1);
	}	//switch

	coordTOdist();

	set_d_function();

	return 0;
}
Пример #13
0
bool ANBC_Model::train(UINT classLabel,MatrixDouble &trainingData,VectorDouble &weightsVector){

	//Check to make sure the column sizes match
	if( trainingData.getNumCols() != weightsVector.size() ){
		N = 0;
		return false;
	}
    
	UINT M = trainingData.getNumRows();
	N = trainingData.getNumCols();
    this->classLabel = classLabel;

	//Update the weights buffer
	weights = weightsVector;

	//Resize the buffers
	mu.resize( N );
	sigma.resize( N );

	//Calculate the mean for each dimension
	for(UINT j=0; j<N; j++){
		mu[j] = 0.0;

		for(UINT i=0; i<M; i++){
			mu[j] += trainingData[i][j];
        }

		mu[j] /= double(M);
        
        if( mu[j] == 0 ){
            return false;
        }
	}

	//Calculate the sample standard deviation
	for(UINT j=0; j<N; j++){
		sigma[j] = 0.0;

		for(UINT i=0; i<M; i++){
			sigma[j] += SQR( trainingData[i][j]-mu[j] );
        }

		sigma[j] = sqrt( sigma[j]/double(M-1) );
        
        if( sigma[j] == 0 ){
            return false;
        }
	}

	//Now compute the threshold
    double meanPrediction = 0.0;
	VectorDouble predictions(M);
	for(UINT i=0; i<M; i++){
		//Test the ith training example
		vector<double> testData(N);
		for(UINT j=0; j<N; j++) {
			testData[j] = trainingData[i][j];
        }
        
		predictions[i] = predict(testData);
        meanPrediction += predictions[i];
	}

	//Calculate the mean prediction value
	meanPrediction /= double(M);

	//Calculate the standard deviation
	double stdDev = 0.0;
	for(UINT i=0; i<M; i++) {
		stdDev += SQR( predictions[i]-meanPrediction );
    }
	stdDev = sqrt( stdDev / (double(M)-1.0) );

	threshold = meanPrediction-(stdDev*gamma);

	//Update the training mu and sigma values so the threshold value can be dynamically computed at a later stage
	trainingMu = meanPrediction;
	trainingSigma = stdDev;

	return true;
}
Пример #14
0
bool DTW::train_NDDTW(LabelledTimeSeriesClassificationData &trainingData,DTWTemplate &dtwTemplate,UINT &bestIndex){

   UINT numExamples = trainingData.getNumSamples();
   VectorDouble results(numExamples,0.0);
   MatrixDouble distanceResults(numExamples,numExamples);
   dtwTemplate.averageTemplateLength = 0;
    
   for(UINT m=0; m<numExamples; m++){
       
	   MatrixDouble templateA; //The m'th template
	   MatrixDouble templateB; //The n'th template
	   dtwTemplate.averageTemplateLength += trainingData[m].getLength();

	   //Smooth the data if required
	   if( useSmoothing ) smoothData(trainingData[m].getData(),smoothingFactor,templateA);
	   else templateA = trainingData[m].getData();
       
       if( offsetUsingFirstSample ){
           offsetTimeseries(templateA);
       }

	   for(UINT n=0; n<numExamples; n++){
		if(m!=n){
		    //Smooth the data if required
		    if( useSmoothing ) smoothData(trainingData[n].getData(),smoothingFactor,templateB);
		    else templateB = trainingData[n].getData();
            
            if( offsetUsingFirstSample ){
                offsetTimeseries(templateB);
            }

			//Compute the distance between the two time series
            MatrixDouble distanceMatrix(templateA.getNumRows(),templateB.getNumRows());
            vector< IndexDist > warpPath;
			double dist = computeDistance(templateA,templateB,distanceMatrix,warpPath);
            
            trainingLog << "Template: " << m << " Timeseries: " << n << " Dist: " << dist << endl;

			//Update the results values
			distanceResults[m][n] = dist;
			results[m] += dist;
		}else distanceResults[m][n] = 0; //The distance is zero because the two timeseries are the same
	   }
   }

	for(UINT m=0; m<numExamples; m++) results[m]/=(numExamples-1);
	//Find the best average result, this is the result with the minimum value
	bestIndex = 0;
	double bestAverage = results[0];
	for(UINT m=1; m<numExamples; m++){
		if( results[m] < bestAverage ){
			bestAverage = results[m];
			bestIndex = m;
		}
	}

    if( numExamples > 2 ){

        //Work out the threshold value for the best template
        dtwTemplate.trainingMu = results[bestIndex];
        dtwTemplate.trainingSigma = 0.0;

        for(UINT n=0; n<numExamples; n++){
            if(n!=bestIndex){
                dtwTemplate.trainingSigma += SQR( distanceResults[ bestIndex ][n] - dtwTemplate.trainingMu );
            }
        }
        dtwTemplate.trainingSigma = sqrt( dtwTemplate.trainingSigma / double(numExamples-2) );
    }else{
        warningLog << "_train_NDDTW(LabelledTimeSeriesClassificationData &trainingData,DTWTemplate &dtwTemplate,UINT &bestIndex - There are not enough examples to compute the trainingMu and trainingSigma for the template for class " << dtwTemplate.classLabel << endl;
        dtwTemplate.trainingMu = 0.0;
        dtwTemplate.trainingSigma = 0.0;
    }

	//Set the average length of the training examples
	dtwTemplate.averageTemplateLength = (UINT) (dtwTemplate.averageTemplateLength/double(numExamples));
    
    trainingLog << "AverageTemplateLength: " << dtwTemplate.averageTemplateLength << endl;

    //Flag that the training was successfull
	return true;
}
Пример #15
0
double DTW::computeDistance(MatrixDouble &timeSeriesA,MatrixDouble &timeSeriesB,MatrixDouble &distanceMatrix,vector< IndexDist > &warpPath){

	const int M = timeSeriesA.getNumRows();
	const int N = timeSeriesB.getNumRows();
	const int C = timeSeriesA.getNumCols();
	int i,j,k,index = 0;
	double totalDist,v,normFactor = 0.;
    
    warpPath.clear();
    if( int(distanceMatrix.getNumRows()) != M || int(distanceMatrix.getNumCols()) != N ){
        distanceMatrix.resize(M, N);
    }

	switch (distanceMethod) {
		case (ABSOLUTE_DIST):
			for(i=0; i<M; i++){
				for(j=0; j<N; j++){
					distanceMatrix[i][j] = 0.0;
					for(k=0; k< C; k++){
					   distanceMatrix[i][j] += fabs(timeSeriesA[i][k]-timeSeriesB[j][k]);
					}
				}
			}
			break;
		case (EUCLIDEAN_DIST):
			//Calculate Euclidean Distance for all possible values
			for(i=0; i<M; i++){
				for(j=0; j<N; j++){
					distanceMatrix[i][j] = 0.0;
					for(k=0; k< C; k++){
						distanceMatrix[i][j] += SQR( timeSeriesA[i][k]-timeSeriesB[j][k] );
					}
					distanceMatrix[i][j] = sqrt( distanceMatrix[i][j] );
				}
			}
			break;
		case (NORM_ABSOLUTE_DIST):
			for(i=0; i<M; i++){
				for(j=0; j<N; j++){
					distanceMatrix[i][j] = 0.0;
					for(k=0; k< C; k++){
					   distanceMatrix[i][j] += fabs(timeSeriesA[i][k]-timeSeriesB[j][k]);
					}
					distanceMatrix[i][j]/=N;
				}
			}
			break;
		default:
			errorLog<<"ERROR: Unknown distance method: "<<distanceMethod<<endl;
			return -1;
			break;
	}

    //Run the recursive search function to build the cost matrix
    double distance = sqrt( d(M-1,N-1,distanceMatrix,M,N) );

    if( isinf(distance) || isnan(distance) ){
        warningLog << "DTW computeDistance(...) - Distance Matrix Values are INF!" << endl;
        return INFINITY;
    }
    
    //cout << "DIST: " << distance << endl;

    //The distMatrix values are negative so make them positive
    for(i=0; i<M; i++){
        for(j=0; j<N; j++){
            distanceMatrix[i][j] = fabs( distanceMatrix[i][j] );
        }
    }

	//Now Create the Warp Path through the cost matrix, starting at the end
    i=M-1;
	j=N-1;
	totalDist = distanceMatrix[i][j];
    warpPath.push_back( IndexDist(i,j,distanceMatrix[i][j]) );
    
	//Use dynamic programming to navigate through the cost matrix until [0][0] has been reached
    normFactor = 1;
	while( true ) {
        if( i==0 && j==0 ) break;
		if( i==0 ){ j--; }
        else{ 
            if( j==0 ) i--;
            else{
                //Find the minimum cell to move to
                v = numeric_limits<double>::max();
                index = 0;
                if( distanceMatrix[i-1][j] < v ){ v = distanceMatrix[i-1][j]; index = 1; }
                if( distanceMatrix[i][j-1] < v ){ v = distanceMatrix[i][j-1]; index = 2; }
                if( distanceMatrix[i-1][j-1] <= v ){ index = 3; }
                switch(index){
                    case(1):
                        i--;
                        break;
                    case(2):
                        j--;
                        break;
                    case(3):
                        i--;
                        j--;
                        break;
                    default:
                        warningLog << "DTW computeDistance(...) - Could not compute a warping path for the input matrix! Dist: " << distanceMatrix[i-1][j] << " i: " << i << " j: " << j << endl;
                        return INFINITY;
                        break;
                }
            }
        }
        normFactor++;
		totalDist += distanceMatrix[i][j];
		warpPath.push_back( IndexDist(i,j,distanceMatrix[i][j]) );
	}

	return totalDist/normFactor;
}
Пример #16
0
bool HMM::predict_continuous(MatrixDouble &timeseries){
    
    if( !trained ){
        errorLog << "predict_continuous(MatrixDouble &timeseries) - The HMM classifier has not been trained!" << endl;
        return false;
    }
    
	if( timeseries.getNumCols() != numInputDimensions ){
        errorLog << "predict_continuous(MatrixDouble &timeseries) - The number of columns in the input matrix (" << timeseries.getNumCols() << ") does not match the num features in the model (" << numInputDimensions << endl;
		return false;
	}
    
    //Scale the input vector if needed
    if( useScaling ){
        const UINT timeseriesLength = timeseries.getNumRows();
        for(UINT j=0; j<numInputDimensions; j++){
            for(UINT i=0; i<timeseriesLength; i++){
                timeseries[i][j] = scale(timeseries[i][j], ranges[j].minValue, ranges[j].maxValue, 0, 1);
            }
        }
    }
    
    if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
    if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
    
    std::fill(classLikelihoods.begin(),classLikelihoods.end(),0);
    std::fill(classDistances.begin(),classDistances.end(),0);
    
    bestDistance = -1000;
    UINT bestIndex = 0;
    double minValue = -1000;
    
    const UINT numModels = (UINT)continuousModels.size();
    vector< IndexedDouble > results(numModels);
    for(UINT i=0; i<numModels; i++){
        
        //Run the prediction for this model
        if( continuousModels[i].predict_( timeseries ) ){
            results[i].value = continuousModels[i].getLoglikelihood();
            results[i].index = continuousModels[i].getClassLabel();
        }else{
            errorLog << "predict_(VectorDouble &inputVector) - Prediction failed for model: " << i << endl;
            return false;
        }
        
        if( results[i].value < minValue ){
            minValue = results[i].value;
        }
        
        if( results[i].value > bestDistance ){
            bestDistance = results[i].value;
            bestIndex = i;
        }
    }
    
    //Store the phase from the best model
    phase = continuousModels[ bestIndex ].getPhase();
    
    //Sort the results
    std::sort(results.begin(),results.end(),IndexedDouble::sortIndexedDoubleByValueDescending);
    
    //Run the majority vote
    const double committeeWeight = 1.0 / committeeSize;
    for(UINT i=0; i<committeeSize; i++){
        classDistances[ getClassLabelIndexValue( results[i].index ) ] += Util::scale(results[i].value, -1000, 0, 0, committeeWeight, true);
    }
    
    //Turn the class distances into likelihoods
    double sum = Util::sum(classDistances);
    if( sum > 0 ){
        for(UINT k=0; k<numClasses; k++){
            classLikelihoods[k] = classDistances[k] / sum;
        }
        
        //Find the maximum label
        for(UINT k=0; k<numClasses; k++){
            if( classDistances[k] > bestDistance ){
                bestDistance = classDistances[k];
                bestIndex = k;
            }
        }
        
        maxLikelihood = classLikelihoods[ bestIndex ];
        predictedClassLabel = classLabels[ bestIndex ];
    }else{
        //If the sum is not greater than 1, then no class is close to any model
        maxLikelihood = 0;
        predictedClassLabel = 0;
    }
    return true;
}
Пример #17
0
bool HMM::predict_discrete(MatrixDouble &timeseries){
    
    if( !trained ){
        errorLog << "predict_continuous(MatrixDouble &timeseries) - The HMM classifier has not been trained!" << endl;
        return false;
    }
    
    if( timeseries.getNumCols() != 1 ){
        errorLog << "predict_discrete(MatrixDouble &timeseries) The number of columns in the input matrix must be 1. It is: " << timeseries.getNumCols() << endl;
        return false;
    }
    
    //Covert the matrix double to observations
    const UINT M = timeseries.getNumRows();
    vector<UINT> observationSequence( M );
    
    for(UINT i=0; i<M; i++){
        observationSequence[i] = (UINT)timeseries[i][0];
        
        if( observationSequence[i] >= numSymbols ){
            errorLog << "predict_discrete(VectorDouble &inputVector) - The new observation is not a valid symbol! It should be in the range [0 numSymbols-1]" << endl;
            return false;
        }
    }
    
    if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
    if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
    
    bestDistance = -99e+99;
    UINT bestIndex = 0;
    double sum = 0;
	for(UINT k=0; k<numClasses; k++){
		classDistances[k] = discreteModels[k].predict( observationSequence );
        
        //Set the class likelihood as the antilog of the class distances
        classLikelihoods[k] = antilog( classDistances[k] );
        
        //The loglikelihood values are negative so we want the values closest to 0
		if( classDistances[k] > bestDistance ){
			bestDistance = classDistances[k];
			bestIndex = k;
		}
        
        sum += classLikelihoods[k];
    }
    
    //Turn the class distances into proper likelihoods
    for(UINT k=0; k<numClasses; k++){
		classLikelihoods[k] /= sum;
    }
    
    maxLikelihood = classLikelihoods[ bestIndex ];
    predictedClassLabel = classLabels[ bestIndex ];
    
    if( useNullRejection ){
        if( maxLikelihood > nullRejectionThresholds[ bestIndex ] ){
            predictedClassLabel = classLabels[ bestIndex ];
        }else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
    }
    
    return true;
}
Пример #18
0
int main() {
    vector<string> gestures(0,"");
    GetFilesInDirectory(gestures, "rawdata");
    CreateDirectory("processed", NULL);
    sort(gestures.begin(), gestures.end());
    data = vector<vector<vector<double > > >(gestures.size(), vector<vector<double > >(0,vector<double>(0,0)));
    for(size_t i = 0; i < gestures.size(); i++) {
        ifstream fin(gestures[i]);
        int n; fin >> n;
       // cerr << gestures[i] << endl;
       // cerr << n << endl;
        data[i] = vector<vector<double> >(n, vector<double>(NUMPARAM, 0));
        for(int j = 0; j < n; j++) {
            for(int k = 0; k < NUMPARAM; k++) {
                fin >> data[i][j][k];
            }
        }
        fin.close();
    }


    //Create a new instance of the TimeSeriesClassificationDataStream
    TimeSeriesClassificationData trainingData;

    // ax, ay, az
    trainingData.setNumDimensions(3);
    trainingData.setDatasetName("processed\\GestureTrainingData.txt");
    ofstream labelfile("processed\\GestureTrainingDataLabels.txt");
    UINT currLabel = 1;
    Random random;
    map<string, int> gesturenames;
    for(size_t overall = 0; overall < gestures.size(); overall++) {

        string nam = gestures[overall].substr(8,gestures[overall].find_first_of('_')-8);
        if(gesturenames.count(nam)) currLabel = gesturenames[nam];
        else {
            currLabel = gesturenames.size()+1;
            gesturenames[nam] = currLabel;
            labelfile << currLabel << " " << nam << endl;
        }
        MatrixDouble trainingSample;
        VectorDouble currVec( trainingData.getNumDimensions() );
        for(size_t k = 1; k < data[overall].size(); k++) {
            for(UINT j=0; j<currVec.size(); j++){
                currVec[j] = data[overall][k][j];
            }
            trainingSample.push_back(currVec);
        }
        trainingData.addSample(currLabel, trainingSample);

    }
    for(size_t i = 0; i < gestures.size(); i++) {
        MatrixDouble trainingSample;
        VectorDouble currVec(trainingData.getNumDimensions());
        for(UINT j = 0; j < currVec.size(); j++) {
            currVec[j] = random.getRandomNumberUniform(-1.0, 1.0);
        }
        for(size_t k = 0; k < 100; k++) {
            trainingSample.push_back(currVec);
        }
        trainingData.addSample(0, trainingSample);
    }

    //After recording your training data you can then save it to a file
    if( !trainingData.save( "processed\\TrainingData.grt" ) ){
        cout << "ERROR: Failed to save dataset to file!\n";
        return EXIT_FAILURE;
    }

    //This can then be loaded later
    if( !trainingData.load( "processed\\TrainingData.grt" ) ){
        cout << "ERROR: Failed to load dataset from file!\n";
        return EXIT_FAILURE;
    }

    //This is how you can get some stats from the training data
    string datasetName = trainingData.getDatasetName();
    string infoText = trainingData.getInfoText();
    UINT numSamples = trainingData.getNumSamples();
    UINT numDimensions = trainingData.getNumDimensions();
    UINT numClasses = trainingData.getNumClasses();

    cout << "Dataset Name: " << datasetName << endl;
    cout << "InfoText: " << infoText << endl;
    cout << "NumberOfSamples: " << numSamples << endl;
    cout << "NumberOfDimensions: " << numDimensions << endl;
    cout << "NumberOfClasses: " << numClasses << endl;

    //You can also get the minimum and maximum ranges of the data
    vector< MinMax > ranges = trainingData.getRanges();

    cout << "The ranges of the dataset are: \n";
    for(UINT j=0; j<ranges.size(); j++){
        cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl;
    }

    DTW dtw;

    if( !dtw.train( trainingData ) ){
        cerr << "Failed to train classifier!\n";
        exit(EXIT_FAILURE);
    }
    dtw.enableNullRejection(true);
    dtw.setNullRejectionCoeff(4);
    dtw.enableTrimTrainingData(true, 0.1, 90);
    //Save the DTW model to a file
    if( !dtw.saveModelToFile("processed\\DTWModel.txt") ){
        cerr << "Failed to save the classifier model!\n";
        exit(EXIT_FAILURE);
    }

    trainingData.clear();

    return EXIT_SUCCESS;
}
int main (int argc, const char * argv[])
{
    //Create a new instance of the TimeSeriesClassificationData
    TimeSeriesClassificationData trainingData;
    
    //Set the dimensionality of the data (you need to do this before you can add any samples)
    trainingData.setNumDimensions( 3 );
    
    //You can also give the dataset a name (the name should have no spaces)
    trainingData.setDatasetName("DummyData");
    
    //You can also add some info text about the data
    trainingData.setInfoText("This data contains some dummy timeseries data");
    
    //Here you would record a time series, when you have finished recording the time series then add the training sample to the training data
    UINT gestureLabel = 1;
    MatrixDouble trainingSample;
    
    //For now we will just add 10 x 20 random walk data timeseries
    Random random;
    for(UINT k=0; k<10; k++){//For the number of classes
        gestureLabel = k+1;
        
        //Get the init random walk position for this gesture
        VectorDouble startPos( trainingData.getNumDimensions() );
        for(UINT j=0; j<startPos.size(); j++){
            startPos[j] = random.getRandomNumberUniform(-1.0,1.0);
        }
                
        //Generate the 20 time series
        for(UINT x=0; x<20; x++){
            
            //Clear any previous timeseries
            trainingSample.clear();
            
            //Generate the random walk
            UINT randomWalkLength = random.getRandomNumberInt(90, 110);
            VectorDouble sample = startPos;
            for(UINT i=0; i<randomWalkLength; i++){
                for(UINT j=0; j<startPos.size(); j++){
                    sample[j] += random.getRandomNumberUniform(-0.1,0.1);
                }
                
                //Add the sample to the training sample
                trainingSample.push_back( sample );
            }
            
            //Add the training sample to the dataset
            trainingData.addSample( gestureLabel, trainingSample );
            
        }
    }
    
    //After recording your training data you can then save it to a file
    if( !trainingData.saveDatasetToFile( "TrainingData.txt" ) ){
	    cout << "Failed to save dataset to file!\n";
	    return EXIT_FAILURE;
	}
    
    //This can then be loaded later
    if( !trainingData.loadDatasetFromFile( "TrainingData.txt" ) ){
		cout << "Failed to load dataset from file!\n";
		return EXIT_FAILURE;
	}
    
    //This is how you can get some stats from the training data
    string datasetName = trainingData.getDatasetName();
    string infoText = trainingData.getInfoText();
    UINT numSamples = trainingData.getNumSamples();
    UINT numDimensions = trainingData.getNumDimensions();
    UINT numClasses = trainingData.getNumClasses();
    
    cout << "Dataset Name: " << datasetName << endl;
    cout << "InfoText: " << infoText << endl;
    cout << "NumberOfSamples: " << numSamples << endl;
    cout << "NumberOfDimensions: " << numDimensions << endl;
    cout << "NumberOfClasses: " << numClasses << endl;
    
    //You can also get the minimum and maximum ranges of the data
    vector< MinMax > ranges = trainingData.getRanges();
    
    cout << "The ranges of the dataset are: \n";
    for(UINT j=0; j<ranges.size(); j++){
        cout << "Dimension: " << j << " Min: " << ranges[j].minValue << " Max: " << ranges[j].maxValue << endl;
    }
    
    //If you want to partition the dataset into a training dataset and a test dataset then you can use the partition function
    //A value of 80 means that 80% of the original data will remain in the training dataset and 20% will be returned as the test dataset
    TimeSeriesClassificationData testData = trainingData.partition( 80 );
    
    //If you have multiple datasets that you want to merge together then use the merge function
    if( !trainingData.merge( testData ) ){
		cout << "Failed to merge datasets!\n";
		return EXIT_FAILURE;
	}
    
    //If you want to run K-Fold cross validation using the dataset then you should first spilt the dataset into K-Folds
    //A value of 10 splits the dataset into 10 folds and the true parameter signals that stratified sampling should be used
    if( !trainingData.spiltDataIntoKFolds( 10, true ) ){
		cout << "Failed to spiltDataIntoKFolds!\n";
		return EXIT_FAILURE;
	}
    
    //After you have called the spilt function you can then get the training and test sets for each fold
    for(UINT foldIndex=0; foldIndex<10; foldIndex++){
        TimeSeriesClassificationData foldTrainingData = trainingData.getTrainingFoldData( foldIndex );
        TimeSeriesClassificationData foldTestingData = trainingData.getTestFoldData( foldIndex );
    }
    
    //If need you can clear any training data that you have recorded
    trainingData.clear();
    
    return EXIT_SUCCESS;
}
int main (int argc, const char * argv[])
{
    //Create an empty matrix double
    MatrixDouble matrix;
    
    //Resize the matrix
    matrix.resize( 100, 2 );
    
    //Set all the values in the matrix to zero
    matrix.setAllValues( 0 );
    
    //Loop over the data and set the values to random values
    UINT counter = 0;
    for(UINT i=0; i<matrix.getNumRows(); i++){
        for(UINT j=0; j<matrix.getNumCols(); j++){
            matrix[i][j] = counter++;
        }
    }
    
    //Add a new row at the very end of the matrix
    VectorDouble newRow(2);
    newRow[0] = 1000;
    newRow[1] = 2000;
    matrix.push_back( newRow );
    
    //Print the values
    cout << "Matrix Data: \n";
    for(UINT i=0; i<matrix.getNumRows(); i++){
        for(UINT j=0; j<matrix.getNumCols(); j++){
            cout << matrix[i][j] << "\t";
        }
        cout << endl;
    }
    cout << endl;
    
    //Get the second row as a vector
    VectorDouble rowVector = matrix.getRowVector( 1 );
    
    cout << "Row Vector Data: \n";
    for(UINT i=0; i<rowVector.size(); i++){
        cout << rowVector[i] << "\t";
    }
    cout << endl;
    
    //Get the second column as a vector
    VectorDouble colVector = matrix.getColVector( 1 );
    
    cout << "Column Vector Data: \n";
    for(UINT i=0; i<colVector.size(); i++){
        cout << colVector[i] << "\n";
    }
    cout << endl;
    
    //Get the mean of each column
	VectorDouble mean = matrix.getMean();
	
	cout << "Mean: \n";
    for(UINT i=0; i<mean.size(); i++){
        cout << mean[i] << "\n";
    }
    cout << endl;
	
	//Get the Standard Deviation of each column
	VectorDouble stdDev = matrix.getStdDev();
	
	cout << "StdDev: \n";
    for(UINT i=0; i<stdDev.size(); i++){
        cout << stdDev[i] << "\n";
    }
    cout << endl;
	
	//Get the covariance matrix
	MatrixDouble cov = matrix.getCovarianceMatrix();
	
	cout << "Covariance Matrix: \n";
    for(UINT i=0; i<cov.getNumRows(); i++){
        for(UINT j=0; j<cov.getNumCols(); j++){
            cout << cov[i][j] << "\t";
        }
        cout << endl;
    }
    cout << endl;

	vector< MinMax > ranges = matrix.getRanges();
	
	cout << "Ranges: \n";
    for(UINT i=0; i<ranges.size(); i++){
        cout << "i: " << i << "\tMinValue: " << ranges[i].minValue << "\tMaxValue:" << ranges[i].maxValue << "\n";
    }
    cout << endl;
    
    //Save the matrix data to a csv file
    matrix.save( "data.csv" );
    
    //load the matrix data from a csv file
    matrix.load( "data.csv" );
    
    return EXIT_SUCCESS;
}
Пример #21
0
bool DTW::predict(MatrixDouble inputTimeSeries){

    if( !trained ){
        errorLog << "predict(Matrix<double> &inputTimeSeries) - The DTW templates have not been trained!" << endl;
        return false;
    }

    if( classLikelihoods.size() != numTemplates ) classLikelihoods.resize(numTemplates);
    if( classDistances.size() != numTemplates ) classDistances.resize(numTemplates);

    predictedClassLabel = 0;
    maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
    for(UINT k=0; k<classLikelihoods.size(); k++){
        classLikelihoods[k] = 0;
        classDistances[k] = DEFAULT_NULL_LIKELIHOOD_VALUE;
    }

	if( numFeatures != inputTimeSeries.getNumCols() ){
        errorLog << "predict(Matrix<double> &inputTimeSeries) - The number of features in the model (" << numFeatures << ") do not match that of the input time series (" << inputTimeSeries.getNumCols() << ")" << endl;
        return false;
    }

	//Perform any preprocessing if requried
    MatrixDouble *timeSeriesPtr = &inputTimeSeries;
    MatrixDouble processedTimeSeries;
    MatrixDouble tempMatrix;
	if(useScaling){
        scaleData(*timeSeriesPtr,processedTimeSeries);
        timeSeriesPtr = &processedTimeSeries;
    }
    
    //Normalize the data if needed
	if( useZNormalisation ){
        znormData(*timeSeriesPtr,processedTimeSeries);
        timeSeriesPtr = &processedTimeSeries;
    }

	//Smooth the data if required
	if( useSmoothing ){
		smoothData(*timeSeriesPtr,smoothingFactor,tempMatrix);
		timeSeriesPtr = &tempMatrix;
	}
    
    //Offset the timeseries if required
    if( offsetUsingFirstSample ){
        offsetTimeseries( *timeSeriesPtr );
    }

	//Make the prediction by finding the closest template
    double sum = 0;
    if( distanceMatrices.size() != numTemplates ) distanceMatrices.resize( numTemplates );
    if( warpPaths.size() != numTemplates ) warpPaths.resize( numTemplates );
    
	//Test the timeSeries against all the templates in the timeSeries buffer
	for(UINT k=0; k<numTemplates; k++){
		//Perform DTW
		classDistances[k] = computeDistance(templatesBuffer[k].timeSeries,*timeSeriesPtr,distanceMatrices[k],warpPaths[k]);
        classLikelihoods[k] = classDistances[k];
        sum += classLikelihoods[k];
	}

	//See which gave the min distance
	UINT closestTemplateIndex = 0;
	bestDistance = classDistances[0];
	for(UINT k=1; k<numTemplates; k++){
		if( classDistances[k] < bestDistance ){
			bestDistance = classDistances[k];
			closestTemplateIndex = k;
		}
	}
    
    //Normalize the class likelihoods and check which class has the maximum likelihood
    UINT maxLikelihoodIndex = 0;
    maxLikelihood = 0;
    for(UINT k=0; k<numTemplates; k++){
        classLikelihoods[k] = (sum-classLikelihoods[k])/sum;
        if( classLikelihoods[k] > maxLikelihood ){
            maxLikelihood = classLikelihoods[k];
            maxLikelihoodIndex = k;
        }
    }

    if( useNullRejection ){

        switch( rejectionMode ){
            case TEMPLATE_THRESHOLDS:
                if( bestDistance <= nullRejectionThresholds[ closestTemplateIndex ] ) predictedClassLabel = templatesBuffer[ closestTemplateIndex ].classLabel;
                else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
                break;
            case CLASS_LIKELIHOODS:
                if( maxLikelihood >= 0.99 )  predictedClassLabel = templatesBuffer[ maxLikelihoodIndex ].classLabel;
                else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
                break;
            case THRESHOLDS_AND_LIKELIHOODS:
                if( bestDistance <= nullRejectionThresholds[ closestTemplateIndex ] && maxLikelihood >= 0.99 )
                    predictedClassLabel = templatesBuffer[ closestTemplateIndex ].classLabel;
                else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
                break;
            default:
                errorLog << "predict(Matrix<double> &timeSeries) - Unknown RejectionMode!" << endl;
                return false;
                break;
        }

	}else predictedClassLabel = templatesBuffer[ closestTemplateIndex ].classLabel;

    return true;
}
bool GaussianMixtureModels::train(const MatrixDouble &data,const UINT K){

	modelTrained = false;
    failed = false;

	//Clear any previous training results
	det.clear();
	invSigma.clear();

    if( data.getNumRows() == 0 ){
        errorLog << "train(const MatrixDouble &trainingData,const unsigned int K) - Training Failed! Training data is empty!" << endl;
        return false;
    }

	//Resize the variables
	M = data.getNumRows();
	N = data.getNumCols();
	this->K = K;
	
	//Resize mu and resp
	mu.resize(K,N);
	resp.resize(M,K);
	
	//Resize sigma
	sigma.resize(K);
	for(UINT k=0; k<K; k++){
		sigma[k].resize(N,N);
	}
	
	//Resize frac and lndets
	frac.resize(K);
	lndets.resize(K);

	//Pick K random starting points for the inital guesses of Mu
	Random random;
	vector< UINT > randomIndexs(M);
	for(UINT i=0; i<M; i++) randomIndexs[i] = i;
	for(UINT i=0; i<M; i++){
		SWAP(randomIndexs[ random.getRandomNumberInt(0,M) ],randomIndexs[ random.getRandomNumberInt(0,M) ]);
	}
	for(UINT k=0; k<K; k++){
		for(UINT n=0; n<N; n++){
			mu[k][n] = data[ randomIndexs[k] ][n];
		}
	}

	//Setup sigma and the uniform prior on P(k)
	for(UINT k=0; k<K; k++){
		frac[k] = 1.0/double(K);
		for(UINT i=0; i<N; i++){
			for(UINT j=0; j<N; j++) sigma[k][i][j] = 0;
			sigma[k][i][i] = 1.0e-10;   //Set the diagonal to a small number
		}
	}

	loglike = 0;
	UINT iterCounter = 0;
	bool keepGoing = true;
	double change = 99.9e99;
    
	while( keepGoing ){
		change = estep( data );
		mstep( data );

		if( fabs( change ) < minChange ) keepGoing = false;
		if( ++iterCounter >= maxIter ) keepGoing = false;
		if( failed ) keepGoing = false;
	}

	if( failed ){
		errorLog << "train(UnlabelledClassificationData &trainingData,unsigned int K) - Training failed!" << endl;
		return modelTrained;
	}

	//Compute the inverse of sigma and the determinants for prediction
	if( !computeInvAndDet() ){
        det.clear();
        invSigma.clear();
        errorLog << "train(UnlabelledClassificationData &trainingData,unsigned int K) - Failed to compute inverse and determinat!" << endl;
        return false;
    }

    //Flag that the model was trained
	modelTrained = true;

	return true;
}
void metrics_subset_data(){
    
    
    ANBC anbc;
    anbc.enableScaling(true);
    anbc.enableNullRejection(true);
    
    MinDist minDist;
    minDist.setNumClusters(4);
    minDist.enableScaling(true);
    minDist.enableNullRejection(true);
    
    //    ofstream opRecall("anbc-recall-nr-0-10.csv");
    //    opRecall <<"nrCoeff,class0,class1,class2,class3,class4,class5\n";
    //
    //    ofstream opInstanceRes("anbc-prediction-nr-2.csv");
    //    opInstanceRes <<"actualClass,predictedClass,maximumLikelihood,lZ,lY,lZ,rZ,rY,rZ\n";
    //
    //    ofstream opMetrics("anbc-precision-recall-fmeasure-nr-2.csv");
    //    opMetrics <<"class1,class2,class3,class4,class5\n";
    //
    //    ofstream opConfusion("anbc-confusion-nr-2.csv");
    //    opConfusion <<"class0,class1,class2,class3,class4,class5\n";
    
    
    ofstream opRecall("mindist-recall-nr-0-10.csv");
    opRecall <<"nrCoeff,class0,class1,class2,class3,class4,class5\n";
    
    ofstream opInstanceRes("mindist-prediction-nr-2.csv");
    opInstanceRes <<"actualClass,predictedClass,maximumLikelihood,lZ,lY,lZ,rZ,rY,rZ\n";
    
    ofstream opMetrics("mindist-precision-recall-fmeasure-nr-2.csv");
    opMetrics <<"class1,class2,class3,class4,class5\n";
    
    ofstream opConfusion("mindist-confusion-nr-2.csv");
    opConfusion <<"class0,class1,class2,class3,class4,class5\n";
    
    // Training and test data
    ClassificationData trainingData;
    ClassificationData testData;
    ClassificationData nullGestureData;
    
    string file_path = "../../../data/";
    
    if( !trainingData.loadDatasetFromFile(file_path +  "train/grt/hri-training-dataset.txt") ){
        std::cout <<"Failed to load training data!\n";
    }
    
    if( !nullGestureData.loadDatasetFromFile(file_path +  "test/grt/0.txt") ){
        std::cout <<"Failed to load null gesture data!\n";
    }
    
    
    testData = trainingData.partition(90);
    testData.sortClassLabels();
//    testData.saveDatasetToFile("anbc-validation-subset.txt");
    testData.saveDatasetToFile("mindist-validation-subset.txt");
    
    
    for(double nullRejectionCoeff = 0; nullRejectionCoeff <= 10; nullRejectionCoeff=nullRejectionCoeff+0.2){
        
        //        anbc.setNullRejectionCoeff(nullRejectionCoeff);
        //        GestureRecognitionPipeline pipeline;
        //        pipeline.setClassifier(anbc);
        
        minDist.setNullRejectionCoeff(nullRejectionCoeff);
        GestureRecognitionPipeline pipeline;
        pipeline.setClassifier(minDist);
        
        pipeline.train(trainingData);
        
        pipeline.test(testData);
        TestResult testRes = pipeline.getTestResults();
        
        opRecall << nullRejectionCoeff << ",";
        
        
        //null rejection prediction
        double accuracy = 0;
        for(UINT i=0; i<nullGestureData.getNumSamples(); i++){
            
            vector< double > inputVector = nullGestureData[i].getSample();
            
            if( !pipeline.predict( inputVector )){
                std::cout << "Failed to perform prediction for test sampel: " << i <<"\n";
            }
            
            UINT predictedClassLabel = pipeline.getPredictedClassLabel();
            if(predictedClassLabel == 0 ) accuracy++;
        }
        
        opRecall << accuracy/double(nullGestureData.getNumSamples()) << ",";
        
        
        // other classes prediction
        for(int cl = 0; cl < testRes.recall.size(); cl++ ){
            opRecall << testRes.recall[cl];
            if(cl < testRes.recall.size() - 1){
                opRecall << ",";
            }
        }
        
        opRecall<< endl;
        
        
        // Calculate instance prediction, precision, recall, fmeasure and confusion matrix for nullRejection 2.0
        if(AreDoubleSame(nullRejectionCoeff, 2.0))
        {
            //instance prediction
            for(UINT i=0; i<testData.getNumSamples(); i++){
                
                UINT actualClassLabel = testData[i].getClassLabel();
                vector< double > inputVector = testData[i].getSample();
                
                if( !pipeline.predict( inputVector )){
                    std::cout << "Failed to perform prediction for test sampel: " << i <<"\n";
                }
                
                UINT predictedClassLabel = pipeline.getPredictedClassLabel();
                double maximumLikelihood = pipeline.getMaximumLikelihood();
                
                opInstanceRes << actualClassLabel << "," << predictedClassLabel << "," << maximumLikelihood << ","
                << inputVector[0] << "," << inputVector[1] << ","  << inputVector[2] << ","  << inputVector[3] << ","  << inputVector[4] << ","  << inputVector[5] << "\n";
                
            }
            
            //precision, recall, fmeasure
            for(int cl = 0; cl < testRes.precision.size(); cl++ ){
                opMetrics << testRes.precision[cl];
                if(cl < testRes.precision.size() - 1){
                    opMetrics << ",";
                }
            }
            opMetrics<< endl;
            
            for(int cl = 0; cl < testRes.recall.size(); cl++ ){
                opMetrics << testRes.recall[cl];
                
                if(cl < testRes.recall.size() - 1){
                    opMetrics << ",";
                }
            }
            opMetrics<< endl;
            
            for(int cl = 0; cl < testRes.fMeasure.size(); cl++ ){
                opMetrics << testRes.fMeasure[cl];
                
                if(cl < testRes.fMeasure.size() - 1){
                    opMetrics << ",";
                }
            }
            opMetrics<< endl;
            
            //confusion matrix
            MatrixDouble matrix = testRes.confusionMatrix;
            for(UINT i=0; i<matrix.getNumRows(); i++){
                for(UINT j=0; j<matrix.getNumCols(); j++){
                    opConfusion << matrix[i][j];
                    
                    if(j < matrix.getNumCols() - 1){
                        opConfusion << ",";
                    }
                    
                }
                opConfusion << endl;
            }
            opConfusion << endl;
            
        }
        
        
        
    }
    
    cout << "Done\n";
}
int main (int argc, const char * argv[])
{
    //Load some training data from a file
    ClassificationData trainingData;
    
    if( !trainingData.loadDatasetFromFile("HelloWorldTrainingData.grt") ){
        cout << "ERROR: Failed to load training data from file\n";
        return EXIT_FAILURE;
    }
    
    cout << "Data Loaded\n";
    
    //Print out some stats about the training data
    trainingData.printStats();
    
    //Partition the training data into a training dataset and a test dataset. 80 means that 80%
    //of the data will be used for the training data and 20% will be returned as the test dataset
    ClassificationData testData = trainingData.partition(80);
    
    //Create a new Gesture Recognition Pipeline using an Adaptive Naive Bayes Classifier
    GestureRecognitionPipeline pipeline;
    pipeline.setClassifier( ANBC() );
    
    //Train the pipeline using the training data
    if( !pipeline.train( trainingData ) ){
        cout << "ERROR: Failed to train the pipeline!\n";
        return EXIT_FAILURE;
    }
    
    //Save the pipeline to a file
	if( !pipeline.savePipelineToFile( "HelloWorldPipeline.grt" ) ){
        cout << "ERROR: Failed to save the pipeline!\n";
        return EXIT_FAILURE;
    }
    
	//Load the pipeline from a file
	if( !pipeline.loadPipelineFromFile( "HelloWorldPipeline.grt" ) ){
        cout << "ERROR: Failed to load the pipeline!\n";
        return EXIT_FAILURE;
    }
    
    //Test the pipeline using the test data
    if( !pipeline.test( testData ) ){
        cout << "ERROR: Failed to test the pipeline!\n";
        return EXIT_FAILURE;
    }
    
    //Print some stats about the testing
    cout << "Test Accuracy: " << pipeline.getTestAccuracy() << endl;
    
    cout << "Precision: ";
    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        UINT classLabel = pipeline.getClassLabels()[k];
        cout << "\t" << pipeline.getTestPrecision(classLabel);
    }cout << endl;
    
    cout << "Recall: ";
    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        UINT classLabel = pipeline.getClassLabels()[k];
        cout << "\t" << pipeline.getTestRecall(classLabel);
    }cout << endl;
    
    cout << "FMeasure: ";
    for(UINT k=0; k<pipeline.getNumClassesInModel(); k++){
        UINT classLabel = pipeline.getClassLabels()[k];
        cout << "\t" << pipeline.getTestFMeasure(classLabel);
    }cout << endl;
    
    MatrixDouble confusionMatrix = pipeline.getTestConfusionMatrix();
    cout << "ConfusionMatrix: \n";
    for(UINT i=0; i<confusionMatrix.getNumRows(); i++){
        for(UINT j=0; j<confusionMatrix.getNumCols(); j++){
            cout << confusionMatrix[i][j] << "\t";
        }cout << endl;
    }
    
    return EXIT_SUCCESS;
}
Пример #25
0
bool PrincipalComponentAnalysis::computeFeatureVector_(const MatrixDouble &data,const UINT analysisMode) {

    trained = false;
    const UINT M = data.getNumRows();
    const UINT N = data.getNumCols();
    this->numInputDimensions = N;

    MatrixDouble msData( M, N );

    //Compute the mean and standard deviation of the input data
    mean = data.getMean();
    stdDev = data.getStdDev();

    if( normData ) {
        //Normalize the data
        for(UINT i=0; i<M; i++)
            for(UINT j=0; j<N; j++)
                msData[i][j] = (data[i][j]-mean[j]) / stdDev[j];

    } else {
        //Mean Subtract Data
        for(UINT i=0; i<M; i++)
            for(UINT j=0; j<N; j++)
                msData[i][j] = data[i][j] - mean[j];
    }

    //Get the covariance matrix
    MatrixDouble cov = msData.getCovarianceMatrix();

    //Use Eigen Value Decomposition to find eigenvectors of the covariance matrix
    EigenvalueDecomposition eig;

    if( !eig.decompose( cov ) ) {
        mean.clear();
        stdDev.clear();
        componentWeights.clear();
        sortedEigenvalues.clear();
        eigenvectors.clear();
        errorLog << "computeFeatureVector(const MatrixDouble &data,UINT analysisMode) - Failed to decompose input matrix!" << endl;
        return false;
    }

    //Get the eigenvectors and eigenvalues
    eigenvectors = eig.getEigenvectors();
    VectorDouble eigenvalues = eig.getRealEigenvalues();

    //Any eigenvalues less than 0 are not worth anything so set to 0
    for(UINT i=0; i<eigenvalues.size(); i++) {
        if( eigenvalues[i] < 0 )
            eigenvalues[i] = 0;
    }

    //Sort the eigenvalues and compute the component weights
    double sum = 0;
    UINT componentIndex = 0;
    sortedEigenvalues.clear();
    componentWeights.resize(N,0);

    while( true ) {
        double maxValue = 0;
        UINT index = 0;
        for(UINT i=0; i<eigenvalues.size(); i++) {
            if( eigenvalues[i] > maxValue ) {
                maxValue = eigenvalues[i];
                index = i;
            }
        }
        if( maxValue == 0 || componentIndex >= eigenvalues.size() ) {
            break;
        }
        sortedEigenvalues.push_back( IndexedDouble(index,maxValue) );
        componentWeights[ componentIndex++ ] = eigenvalues[ index ];
        sum += eigenvalues[ index ];
        eigenvalues[ index ] = 0; //Set the maxValue to zero so it won't be used again
    }

    double cumulativeVariance = 0;
    switch( analysisMode ) {
    case MAX_VARIANCE:
        //Normalize the component weights and workout how many components we need to use to reach the maxVariance
        numPrincipalComponents = 0;
        for(UINT k=0; k<N; k++) {
            componentWeights[k] /= sum;
            cumulativeVariance += componentWeights[k];
            if( cumulativeVariance >= maxVariance && numPrincipalComponents==0 ) {
                numPrincipalComponents = k+1;
            }
        }
        break;
    case MAX_NUM_PCS:
        //Normalize the component weights and compute the maxVariance
        maxVariance = 0;
        for(UINT k=0; k<N; k++) {
            componentWeights[k] /= sum;
            if( k < numPrincipalComponents ) {
                maxVariance += componentWeights[k];
            }
        }
        break;
    default:
        errorLog << "computeFeatureVector(const MatrixDouble &data,UINT analysisMode) - Unknown analysis mode!" << endl;
        break;
    }

    //Flag that the features have been computed
    trained = true;

    return true;
}
bool TimeSeriesClassificationData::loadDatasetFromCSVFile(const string &filename){
    
    numDimensions = 0;
    datasetName = "NOT_SET";
    infoText = "";
    
    //Clear any previous data
    clear();
    
    //Parse the CSV file
    FileParser parser;
    
    if( !parser.parseCSVFile(filename,true) ){
        errorLog << "loadDatasetFromCSVFile(const string &filename) - Failed to parse CSV file!" << endl;
        return false;
    }
    
    if( !parser.getConsistentColumnSize() ){
        errorLog << "loadDatasetFromCSVFile(const string &filename) - The CSV file does not have a consistent number of columns!" << endl;
        return false;
    }
    
    if( parser.getColumnSize() <= 2 ){
        errorLog << "loadDatasetFromCSVFile(const string &filename) - The CSV file does not have enough columns! It should contain at least three columns!" << endl;
        return false;
    }
    
    //Set the number of dimensions
    numDimensions = parser.getColumnSize()-2;
    
    //Reserve the memory for the data
    data.reserve( parser.getRowSize() );
    
    UINT sampleCounter = 0;
    UINT lastSampleCounter = 0;
    UINT classLabel = 0;
    UINT j = 0;
    UINT n = 0;
    VectorDouble sample(numDimensions);
    MatrixDouble timeseries;
    for(UINT i=0; i<parser.getRowSize(); i++){
        
        sampleCounter = Util::stringToInt( parser[i][0] );
        
        //Check to see if a new timeseries has started, if so then add the previous time series as a sample and start recording the new time series
        if( sampleCounter != lastSampleCounter && i != 0 ){
            //Add the labelled sample to the dataset
            if( !addSample(classLabel, timeseries) ){
                warningLog << "loadDatasetFromCSVFile(const string &filename,const UINT classLabelColumnIndex) - Could not add sample " << i << " to the dataset!" << endl;
            }
            timeseries.clear();
        }
        lastSampleCounter = sampleCounter;
        
        //Get the class label
        classLabel = Util::stringToInt( parser[i][1] );
        
        //Get the sample data
        j=0;
        n=2;
        while( j != numDimensions ){
            sample[j++] = Util::stringToDouble( parser[i][n] );
            n++;
        }
        
        //Add the sample to the timeseries
        timeseries.push_back( sample );
    }
	if ( timeseries.getSize() > 0 )
        //Add the labelled sample to the dataset
        if( !addSample(classLabel, timeseries) ){
            warningLog << "loadDatasetFromCSVFile(const string &filename,const UINT classLabelColumnIndex) - Could not add sample " << parser.getRowSize()-1 << " to the dataset!" << endl;
        }
    
    return true;
}
Пример #27
0
MatrixDouble squareLattice2(int n) {
	MatrixDouble coord;

	coord.resize(n);

	coord[0].resize(2);
	coord[0][0] = 0; coord[0][1] = 0; 
	coord[22].resize(2);
	coord[22][0] = 1; coord[22][1] = 0; 
	coord[19].resize(2);
	coord[19][0] = 2; coord[19][1] = 0; 
	coord[11].resize(2);
	coord[11][0] = 3; coord[11][1] = 0; 
	coord[8].resize(2);
	coord[8][0] = 4; coord[8][1] = 0; 

	coord[5].resize(2);
	coord[5][0] = 0; coord[5][1] = 1; 
	coord[2].resize(2);
	coord[2][0] = 1; coord[2][1] = 1; 
	coord[24].resize(2);
	coord[24][0] = 2; coord[24][1] = 1; 
	coord[16].resize(2);
	coord[16][0] = 3; coord[16][1] = 1; 
	coord[13].resize(2);
	coord[13][0] = 4; coord[13][1] = 1; 
	
	coord[10].resize(2);
	coord[10][0] = 0; coord[10][1] = 2; 
	coord[7].resize(2);
	coord[7][0] = 1; coord[7][1] = 2; 
	coord[4].resize(2);
	coord[4][0] = 2; coord[4][1] = 2; 
	coord[21].resize(2);
	coord[21][0] = 3; coord[21][1] = 2; 
	coord[18].resize(2);
	coord[18][0] = 4; coord[18][1] = 2; 
	
	coord[15].resize(2);
	coord[15][0] = 0; coord[15][1] = 3; 
	coord[12].resize(2);
	coord[12][0] = 1; coord[12][1] = 3; 
	coord[9].resize(2);
	coord[9][0] = 2; coord[9][1] = 3; 
	coord[1].resize(2);
	coord[1][0] = 3; coord[1][1] = 3; 
	coord[23].resize(2);
	coord[23][0] = 4; coord[23][1] = 3; 
	
	coord[20].resize(2);
	coord[20][0] = 0; coord[20][1] = 4; 
	coord[17].resize(2);
	coord[17][0] = 1; coord[17][1] = 4; 
	coord[14].resize(2);
	coord[14][0] = 2; coord[14][1] = 4; 
	coord[6].resize(2);
	coord[6][0] = 3; coord[6][1] = 4; 
	coord[3].resize(2);
	coord[3][0] = 4; coord[3][1] = 4; 

	return coord;
}
Пример #28
0
void rand_covariance(MatrixDouble& cov, double s, int rank) {
	MatrixDouble x(cov.width, rank);
	x.rand(-s,s);
	cov.covariance(x);
}
Пример #29
0
MatrixDouble hexagonalLattice2(int n, int n1) {
  	MatrixDouble coord;

	coord.resize(n);

	coord[0].resize(2);
	coord[0][0] = 0; coord[0][1] = 0; 
	coord[3].resize(2);
	coord[3][0] = sqrt(3); coord[3][1] = 0; 
	
	coord[15].resize(2);
	coord[15][0] = -sqrt(3)/2.; coord[15][1] = 0.5; 
	coord[19].resize(2);
	coord[19][0] = sqrt(3)/2.; coord[19][1] = 0.5; 
	coord[23].resize(2);
	coord[23][0] = 3*sqrt(3)/2.; coord[23][1] = 0.5; 
	
	coord[21].resize(2);
	coord[21][0] = -sqrt(3)/2.; coord[21][1] = 1.5; 
	coord[13].resize(2);
	coord[13][0] = sqrt(3)/2.; coord[13][1] = 1.5; 
	coord[17].resize(2);
	coord[17][0] = 3*sqrt(3)/2.; coord[17][1] = 1.5; 

	coord[8].resize(2);
	coord[8][0] = -sqrt(3); coord[8][1] = 2; 	
	coord[10].resize(2);
	coord[10][0] = 0; coord[10][1] = 2; 
	coord[7].resize(2);
	coord[7][0] = sqrt(3); coord[7][1] = 2; 
	coord[11].resize(2);
	coord[11][0] = 2*sqrt(3); coord[11][1] = 2;
	
	coord[2].resize(2);
	coord[2][0] = -sqrt(3); coord[2][1] = 3; 	
	coord[4].resize(2);
	coord[4][0] = 0; coord[4][1] = 3; 
	coord[1].resize(2);
	coord[1][0] = sqrt(3); coord[1][1] = 3; 
	coord[5].resize(2);
	coord[5][0] = 2*sqrt(3); coord[5][1] = 3;

	coord[18].resize(2);
	coord[18][0] = -sqrt(3)/2.; coord[18][1] = 3.5; 
	coord[22].resize(2);
	coord[22][0] = sqrt(3)/2.; coord[22][1] = 3.5; 
	coord[14].resize(2);
	coord[14][0] = 3*sqrt(3)/2.; coord[14][1] = 3.5; 
	
	coord[12].resize(2);
	coord[12][0] = -sqrt(3)/2.; coord[12][1] = 4.5; 
	coord[16].resize(2);
	coord[16][0] = sqrt(3)/2.; coord[16][1] = 4.5; 
	coord[20].resize(2);
	coord[20][0] = 3*sqrt(3)/2.; coord[20][1] = 4.5; 
	
	coord[6].resize(2);
	coord[6][0] = 0; coord[6][1] = 5; 
	coord[9].resize(2);
	coord[9][0] = sqrt(3); coord[9][1] = 5;

	return coord;
}
Пример #30
0
bool BernoulliRBM::train_(MatrixDouble &data){
    
    const UINT numTrainingSamples = data.getNumRows();
    numInputDimensions = data.getNumCols();
    numOutputDimensions = numHiddenUnits;
    numVisibleUnits = numInputDimensions;
    
    trainingLog << "NumInputDimensions: " << numInputDimensions << endl;
    trainingLog << "NumOutputDimensions: " << numOutputDimensions << endl;
    
    if( randomizeWeightsForTraining ){
    
        //Init the weights matrix
        weightsMatrix.resize(numHiddenUnits, numVisibleUnits);
        
        double a = 1.0 / numVisibleUnits;
        for(UINT i=0; i<numHiddenUnits; i++) {
            for(UINT j=0; j<numVisibleUnits; j++) {
                weightsMatrix[i][j] = rand.getRandomNumberUniform(-a, a);
            }
        }

        //Init the bias units
        visibleLayerBias.resize( numVisibleUnits );
        hiddenLayerBias.resize( numHiddenUnits );
        std::fill(visibleLayerBias.begin(),visibleLayerBias.end(),0);
        std::fill(hiddenLayerBias.begin(),hiddenLayerBias.end(),0);
        
    }else{
        if( weightsMatrix.getNumRows() != numHiddenUnits ){
            errorLog << "train_(MatrixDouble &data) - Weights matrix row size does not match the number of hidden units!" << endl;
            return false;
        }
        if( weightsMatrix.getNumCols() != numVisibleUnits ){
            errorLog << "train_(MatrixDouble &data) - Weights matrix row size does not match the number of visible units!" << endl;
            return false;
        }
        if( visibleLayerBias.size() != numVisibleUnits ){
            errorLog << "train_(MatrixDouble &data) - Visible layer bias size does not match the number of visible units!" << endl;
            return false;
        }
        if( hiddenLayerBias.size() != numHiddenUnits ){
            errorLog << "train_(MatrixDouble &data) - Hidden layer bias size does not match the number of hidden units!" << endl;
            return false;
        }
    }
    
    //Flag the model has been trained encase the user wants to save the model during a training iteration using an observer
    trained = true;
    
    //Make sure the data is scaled between [0 1]
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<numTrainingSamples; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = scale(data[i][j], ranges[j].minValue, ranges[j].maxValue, 0, 1);
            }
        }
    }
    
    const UINT numBatches = (UINT)ceil( numTrainingSamples/batchSize );
    
    //Setup the batch indexs
    vector< BatchIndexs > batchIndexs( numBatches );
    UINT startIndex = 0;
    for(UINT i=0; i<numBatches; i++){
        batchIndexs[i].startIndex = startIndex;
        batchIndexs[i].endIndex = startIndex + batchSize;
        
        //Make sure the last batch end index is not larger than the number of training examples
        if( batchIndexs[i].endIndex >= numTrainingSamples ){
            batchIndexs[i].endIndex = numTrainingSamples;
        }
        
        //Get the batch size
        batchIndexs[i].batchSize = batchIndexs[i].endIndex - batchIndexs[i].startIndex;
        
        //Set the start index for the next batch
        startIndex = batchIndexs[i].endIndex;
    }
    
    Timer timer;
    UINT i,j,n,epoch,noChangeCounter = 0;
    double startTime = 0;
    double alpha = learningRate;
    double error = 0;
    double err = 0;
    double delta = 0;
    double lastError = 0;
    vector< UINT > indexList(numTrainingSamples);
    TrainingResult trainingResult;
    MatrixDouble wT( numVisibleUnits, numHiddenUnits );       //Stores a transposed copy of the weights vector
    MatrixDouble vW( numHiddenUnits, numVisibleUnits );       //Stores the weight velocity updates
    MatrixDouble tmpW( numHiddenUnits, numVisibleUnits );     //Stores the weight values that will be used to update the main weights matrix at each batch update
    MatrixDouble v1( batchSize, numVisibleUnits );            //Stores the real batch data during a batch update
    MatrixDouble v2( batchSize, numVisibleUnits );            //Stores the sampled batch data during a batch update
    MatrixDouble h1( batchSize, numHiddenUnits );             //Stores the hidden states given v1 and the current weightsMatrix
    MatrixDouble h2( batchSize, numHiddenUnits );             //Stores the sampled hidden states given v2 and the current weightsMatrix
    MatrixDouble c1( numHiddenUnits, numVisibleUnits );       //Stores h1' * v1
    MatrixDouble c2( numHiddenUnits, numVisibleUnits );       //Stores h2' * v2
    MatrixDouble vDiff( batchSize, numVisibleUnits );         //Stores the difference between v1-v2
    MatrixDouble hDiff( batchSize, numVisibleUnits );         //Stores the difference between h1-h2
    MatrixDouble cDiff( numHiddenUnits, numVisibleUnits );    //Stores the difference between c1-c2
    VectorDouble vDiffSum( numVisibleUnits );                 //Stores the column sum of vDiff
    VectorDouble hDiffSum( numHiddenUnits );                  //Stores the column sum of hDiff
    VectorDouble visibleLayerBiasVelocity( numVisibleUnits ); //Stores the velocity update of the visibleLayerBias
    VectorDouble hiddenLayerBiasVelocity( numHiddenUnits );   //Stores the velocity update of the hiddenLayerBias
    
    //Set all the velocity weights to zero
    vW.setAllValues( 0 );
    std::fill(visibleLayerBiasVelocity.begin(),visibleLayerBiasVelocity.end(),0);
    std::fill(hiddenLayerBiasVelocity.begin(),hiddenLayerBiasVelocity.end(),0);
    
    //Randomize the order that the training samples will be used in
    for(UINT i=0; i<numTrainingSamples; i++) indexList[i] = i;
    if( randomiseTrainingOrder ){
        std::random_shuffle(indexList.begin(), indexList.end());
    }
    
    //Start the main training loop
    timer.start();
    for(epoch=0; epoch<maxNumEpochs; epoch++) {
        startTime = timer.getMilliSeconds();
        error = 0;
        
        //Randomize the batch order
        std::random_shuffle(batchIndexs.begin(),batchIndexs.end());
        
        //Run each of the batch updates
        for(UINT k=0; k<numBatches; k+=batchStepSize){
            
            //Resize the data matrices, the matrices will only be resized if the rows cols are different
            v1.resize( batchIndexs[k].batchSize, numVisibleUnits );
            h1.resize( batchIndexs[k].batchSize, numHiddenUnits );
            v2.resize( batchIndexs[k].batchSize, numVisibleUnits );
            h2.resize( batchIndexs[k].batchSize, numHiddenUnits );
            
            //Setup the data pointers, using data pointers saves a few ms on large matrix updates
            double **w_p = weightsMatrix.getDataPointer();
            double **wT_p = wT.getDataPointer();
            double **vW_p = vW.getDataPointer();
            double **data_p = data.getDataPointer();
            double **v1_p = v1.getDataPointer();
            double **v2_p = v2.getDataPointer();
            double **h1_p = h1.getDataPointer();
            double **h2_p = h2.getDataPointer();
            double *vlb_p = &visibleLayerBias[0];
            double *hlb_p = &hiddenLayerBias[0];
            
            //Get the batch data
            UINT index = 0;
            for(i=batchIndexs[k].startIndex; i<batchIndexs[k].endIndex; i++){
                for(j=0; j<numVisibleUnits; j++){
                    v1_p[index][j] = data_p[ indexList[i] ][j];
                }
                index++;
            }
            
            //Copy a transposed version of the weights matrix, this is used to compute h1 and h2
            for(i=0; i<numHiddenUnits; i++)
                for(j=0; j<numVisibleUnits; j++)
                    wT_p[j][i] = w_p[i][j];
            
            //Compute h1
            h1.multiple(v1, wT);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numHiddenUnits; i++){
                    h1_p[n][i] = sigmoidRandom( h1_p[n][i] + hlb_p[i] );
                }
            }
            
            //Compute v2
            v2.multiple(h1, weightsMatrix);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numVisibleUnits; i++){
                    v2_p[n][i] = sigmoidRandom( v2_p[n][i] + vlb_p[i] );
                }
            }
            
            //Compute h2
            h2.multiple(v2,wT);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numHiddenUnits; i++){
                    h2_p[n][i] = sigmoid( h2_p[n][i] + hlb_p[i] );
                }
            }
            
            //Compute c1, c2 and the difference between v1-v2
            c1.multiple(h1,v1,true);
            c2.multiple(h2,v2,true);
            vDiff.subtract(v1, v2);
            
            //Compute the sum of vdiff
            for(j=0; j<numVisibleUnits; j++){
                vDiffSum[j] = 0;
                for(i=0; i<batchIndexs[k].batchSize; i++){
                    vDiffSum[j] += vDiff[i][j];
                }
            }
            
            //Compute the difference between h1 and h2
            hDiff.subtract(h1, h2);
            for(j=0; j<numHiddenUnits; j++){
                hDiffSum[j] = 0;
                for(i=0; i<batchIndexs[k].batchSize; i++){
                    hDiffSum[j] += hDiff[i][j];
                }
            }
            
            //Compute the difference between c1 and c2
            cDiff.subtract(c1,c2);
            
            //Update the weight velocities
            for(i=0; i<numHiddenUnits; i++){
                for(j=0; j<numVisibleUnits; j++){
                    vW_p[i][j] = ((momentum * vW_p[i][j]) + (alpha * cDiff[i][j])) / batchIndexs[k].batchSize;
                }
            }
            for(i=0; i<numVisibleUnits; i++){
                visibleLayerBiasVelocity[i] = ((momentum * visibleLayerBiasVelocity[i]) + (alpha * vDiffSum[i])) / batchIndexs[k].batchSize;
            }
            for(i=0; i<numHiddenUnits; i++){
                hiddenLayerBiasVelocity[i] = ((momentum * hiddenLayerBiasVelocity[i]) + (alpha * hDiffSum[i])) / batchIndexs[k].batchSize;
            }
            
            //Update the weights
            weightsMatrix.add( vW );
            
            //Update the bias for the visible layer
            for(i=0; i<numVisibleUnits; i++){
                visibleLayerBias[i] += visibleLayerBiasVelocity[i];
            }
            
            //Update the bias for the visible layer
            for(i=0; i<numHiddenUnits; i++){
                hiddenLayerBias[i] += hiddenLayerBiasVelocity[i];
            }
            
            //Compute the reconstruction error
            err = 0;
            for(i=0; i<batchIndexs[k].batchSize; i++){
                for(j=0; j<numVisibleUnits; j++){
                    err += SQR( v1[i][j] - v2[i][j] );
                }
            }
            
            error += err / batchIndexs[k].batchSize;
        }
        error /= numBatches;
        delta = lastError - error;
        lastError = error;
        
        trainingLog << "Epoch: " << epoch+1 << "/" << maxNumEpochs;
        trainingLog << " Epoch time: " << (timer.getMilliSeconds()-startTime)/1000.0 << " seconds";
        trainingLog << " Learning rate: " << alpha;
        trainingLog << " Momentum: " << momentum;
        trainingLog << " Average reconstruction error: " << error;
        trainingLog << " Delta: " << delta << endl;
        
        //Update the learning rate
        alpha *= learningRateUpdate;
        
        trainingResult.setClassificationResult(epoch, error, this);
        trainingResults.push_back(trainingResult);
        trainingResultsObserverManager.notifyObservers( trainingResult );
        
        //Check for convergance
        if( fabs(delta) < minChange ){
            if( ++noChangeCounter >= minNumEpochs ){
                trainingLog << "Stopping training. MinChange limit reached!" << endl;
                break;
            }
        }else noChangeCounter = 0;
        
    }
    trainingLog << "Training complete after " << epoch << " epochs. Total training time: " << timer.getMilliSeconds()/1000.0 << " seconds" << endl;
    
    trained = true;
    
    return true;
}