DetectorEvaluationResult::DetectorEvaluationResult(size_t truePositives, size_t trueNegatives, size_t falsePositives, size_t falseNegatives) :
	_truePositives(truePositives), _trueNegatives(trueNegatives), _falsePositives(falsePositives), _falseNegatives(falseNegatives) {	

	_precision = computePrecision(truePositives, falsePositives);
	_recall = computeRecall(truePositives, falseNegatives);
	_accuracy = computeAccuracy(truePositives, trueNegatives, falsePositives, falseNegatives);
}
DetectorEvaluationResult::DetectorEvaluationResult(Mat& votingMask, vector<Mat>& targetMasks, unsigned short votingMaskThreshold) :
_truePositives(0), _trueNegatives(0), _falsePositives(0), _falseNegatives(0){
	Mat mergedTargetsMask;
	if (ImageUtils::mergeTargetMasks(targetMasks, mergedTargetsMask)) {
		computeMasksSimilarity(votingMask, mergedTargetsMask, votingMaskThreshold, &_truePositives, &_trueNegatives, &_falsePositives, &_falseNegatives);

		_precision = computePrecision(_truePositives, _falsePositives);
		_recall = computeRecall(_truePositives, _falseNegatives);
		_accuracy = computeAccuracy(_truePositives, _trueNegatives, _falsePositives, _falseNegatives);
	}
}
int main()
{
	// Store images, 784 rows, 10000 columns
	double** images = create2DDoubleArray(IMGSIZE, NUMIMGS);

	// Store layer 1 weights, 200 rows, 784 columns
	double** wL1 = create2DDoubleArray(LAYER1, IMGSIZE);

	// Stores layer 2 weights, 200 rows, 200 columns
	double** wL2 = create2DDoubleArray(LAYER2, LAYER1);

	// Stores layer 3 weights, 10 rows, 200 columns
	double** wL3 = create2DDoubleArray(LAYERFINAL, LAYER2);

	// Stores bias data, two sets of 200 rows, 1 column
	double** bias = create2DDoubleArray(LAYER1, 2);

	// Where the computed probabilities are stored
	double** probabilities = create2DDoubleArray(LAYERFINAL, NUMTEST);

	// Stores solutions
	int solutions[NUMIMGS];


	if(loadData(images, wL1, wL2, wL3, bias, solutions) == -1)
	{
		return -1;
	}

	computeProb(images, wL1, wL2, wL3, bias, &probabilities);

	double accuracy = computeAccuracy(probabilities, solutions);

	printf("Hit Percentage: %3.2lf%%\n", accuracy * 100);

	return 0;
}
Beispiel #4
0
bool Softmax::train_(ClassificationData &trainingData){
    
    //Clear any previous model
    clear();
    
    const unsigned int M = trainingData.getNumSamples();
    const unsigned int N = trainingData.getNumDimensions();
    const unsigned int K = trainingData.getNumClasses();
    
    if( M == 0 ){
        errorLog << __GRT_LOG__ << " Training data has zero samples!" << std::endl;
        return false;
    }
    
    numInputDimensions = N;
    numOutputDimensions = K;
    numClasses = K;
    models.resize(K);
    classLabels.resize(K);
    ranges = trainingData.getRanges();
    ClassificationData validationData;
    
    //Scale the training data if needed
    if( useScaling ){
        //Scale the training data between 0 and 1
        trainingData.scale(0, 1);
    }

    if( useValidationSet ){
        validationData = trainingData.split( 100-validationSetSize );
    }
    
    //Train a regression model for each class in the training data
    for(UINT k=0; k<numClasses; k++){
        
        //Set the class label
        classLabels[k] = trainingData.getClassTracker()[k].classLabel;
        
        //Train the model
        if( !trainSoftmaxModel(classLabels[k],models[k],trainingData) ){
            errorLog << __GRT_LOG__ << " Failed to train model for class: " << classLabels[k] << std::endl;
                return false;
        }
    }

    //Flag that the models have been trained
    trained = true;
    converged = true;

    //Compute the final training stats
    trainingSetAccuracy = 0;
    validationSetAccuracy = 0;

    //If scaling was on, then the data will already be scaled, so turn it off temporially so we can test the model accuracy
    bool scalingState = useScaling;
    useScaling = false;
    if( !computeAccuracy( trainingData, trainingSetAccuracy ) ){
        trained = false;
        converged = false;
        errorLog << __GRT_LOG__ << " Failed to compute training set accuracy! Failed to fully train model!" << std::endl;
        return false;
    }
    
    if( useValidationSet ){
        if( !computeAccuracy( validationData, validationSetAccuracy ) ){
            trained = false;
            converged = false;
            errorLog << __GRT_LOG__ << " Failed to compute validation set accuracy! Failed to fully train model!" << std::endl;
            return false;
        }
    }

    trainingLog << "Training set accuracy: " << trainingSetAccuracy << std::endl;

    if( useValidationSet ){
        trainingLog << "Validation set accuracy: " << validationSetAccuracy << std::endl;
    }

    //Reset the scaling state for future prediction
    useScaling = scalingState;

    return trained;
}
Beispiel #5
0
int main()
{
	void* virtual_base;
	int fd;
	void* sdram_ptr;

	if(setup(&fd, &virtual_base) != 0)
	{
		return(1);
	}

	sdram_ptr = virtual_base + ((unsigned long)(SDRAM_OFST + 0x00) & (unsigned long)(HW_REGS_MASK));

	// Store layer 1 weights, 200 rows, 784 columns
	double** wL1 = create2DDoubleArray(LAYER1, IMGSIZE);

	// Stores layer 2 weights, 200 rows, 200 columns
	double** wL2 = create2DDoubleArray(LAYER2, LAYER1);

	// Stores layer 3 weights, 10 rows, 200 columns
	double** wL3 = create2DDoubleArray(LAYERFINAL, LAYER2);

	// Stores bias data, two sets of 200 rows, 1 column
	double** bias = create2DDoubleArray(LAYER1, 2);

	// Where the computed probabilities are stored
	double** probabilities = create2DDoubleArray(LAYERFINAL, NUMTEST);

	// Stores solutions
	int solutions[NUMIMGS];

	printf("Loading images to SDRAM...\n");
	// Loads image data into the SDRAM
	if(loadImagesSDRAM(sdram_ptr) == -1)
	{
		return -1;
	}
	
	/*
	printf("((double*)sdram_ptr)[177 * NUMIMGS + 0]: %lf\n", ((double*)sdram_ptr)[177 * NUMIMGS + 0]);
	printf("((double*)sdram_ptr)[177 * NUMIMGS + 6]: %lf\n", ((double*)sdram_ptr)[177 * NUMIMGS + 6]);
	*/
	
	printf("Loading weights, biases and solutions...\n");
	if(loadData(wL1, wL2, wL3, bias, solutions) == -1)
	{
		return -1;
	}
	clock_t begin, end;
	
	printf("Computing Probabilities...\n");
	// Computes probabilities for each image
	begin = clock();
	computeProb(sdram_ptr, wL1, wL2, wL3, bias, &probabilities);
	end = clock();
	printf("Computing accuracy...\n");
	// Stores and computes the accuracy, 0 < accuracy < 1
	double accuracy = computeAccuracy(probabilities, solutions);

	printf("Hit Percentage: %3.2lf%%\n", accuracy * 100);
	printf("Time Elapsed: %f\n", (double)(end - begin) / CLOCKS_PER_SEC);

	if(munmap(virtual_base, HW_REGS_SPAN) != 0)
	{
		printf("ERROR: munmap() failed...\n");
		close(fd);
		return(1);
	}
	close(fd);

	return 0;
}
float NeuralNetwork::computeAccuracy(const Matrix& input, const Matrix& reference) const
{
	return computeAccuracy(convertToBlockSparseForLayerInput(front(), input),
		convertToBlockSparseForLayerOutput(back(), reference));
}
Beispiel #7
0
bool MinDist::train_(ClassificationData &trainingData){
    
    //Clear any previous models
    clear();
    
    const unsigned int M = trainingData.getNumSamples();
    const unsigned int N = trainingData.getNumDimensions();
    const unsigned int K = trainingData.getNumClasses();
    
    if( M == 0 ){
        errorLog << __GRT_LOG__ << " Training data has zero samples!" << std::endl;
        return false;
    }
    
    if( M <= numClusters ){
        errorLog << __GRT_LOG__ << " There are not enough training samples for the number of clusters. Either reduce the number of clusters or increase the number of training samples!" << std::endl;
        return false;
    }
    
    numInputDimensions = N;
    numOutputDimensions = K;
    numClasses = K;
    models.resize(K);
    classLabels.resize(K);
    nullRejectionThresholds.resize(K);
    ranges = trainingData.getRanges();
    ClassificationData validationData;
    
    //Scale the training data if needed
    if( useScaling ){
        //Scale the training data between 0 and 1
        trainingData.scale(0, 1);
    }

    if( useValidationSet ){
        validationData = trainingData.split( 100-validationSetSize );
    }
    
    //Train each of the models
    for(UINT k=0; k<numClasses; k++){
        
        trainingLog << "Training model for class: " << trainingData.getClassTracker()[k].classLabel << std::endl;

        //Pass the logging state onto the kmeans algorithm
        models[k].setTrainingLoggingEnabled( this->getTrainingLoggingEnabled() );
            
        //Get the class label for the kth class
        UINT classLabel = trainingData.getClassTracker()[k].classLabel;
        
        //Set the kth class label
        classLabels[k] = classLabel;
        
        //Get all the training data for this class
        ClassificationData classData = trainingData.getClassData(classLabel);
        MatrixFloat data(classData.getNumSamples(),N);
        
        //Copy the training data into a matrix
        for(UINT i=0; i<data.getNumRows(); i++){
            for(UINT j=0; j<data.getNumCols(); j++){
                data[i][j] = classData[i][j];
            }
        }
        
        //Train the model for this class
        models[k].setGamma( nullRejectionCoeff );
        if( !models[k].train(classLabel,data,numClusters,minChange,maxNumEpochs) ){
            errorLog << __GRT_LOG__ << " Failed to train model for class: " << classLabel;
            errorLog << ". This is might be because this class does not have enough training samples! You should reduce the number of clusters or increase the number of training samples for this class." << std::endl;
            models.clear();
            return false;
        }
            
        //Set the null rejection threshold
        nullRejectionThresholds[k] = models[k].getRejectionThreshold();
    }

    //Flag that the models have been trained
    trained = true;
    converged = true;

    //Compute the final training stats
    trainingSetAccuracy = 0;
    validationSetAccuracy = 0;

    //If scaling was on, then the data will already be scaled, so turn it off temporially so we can test the model accuracy
    bool scalingState = useScaling;
    useScaling = false;
    if( !computeAccuracy( trainingData, trainingSetAccuracy ) ){
        trained = false;
        converged = false;
        errorLog << __GRT_LOG__ << " Failed to compute training set accuracy! Failed to fully train model!" << std::endl;
        return false;
    }
    
    if( useValidationSet ){
        if( !computeAccuracy( validationData, validationSetAccuracy ) ){
            trained = false;
            converged = false;
            errorLog << __GRT_LOG__ << " Failed to compute validation set accuracy! Failed to fully train model!" << std::endl;
            return false;
        }
        
    }

    trainingLog << "Training set accuracy: " << trainingSetAccuracy << std::endl;

    if( useValidationSet ){
        trainingLog << "Validation set accuracy: " << validationSetAccuracy << std::endl;
    }

    //Reset the scaling state for future prediction
    useScaling = scalingState;

    return trained;
}