void EventType::writeDescriptors(cv::FileStorage outfile) throw(runtime_error) {
    if(outfile.isOpened()) {
        outfile << getId() + "_desc" << getDescriptors();
    } else {
        throw runtime_error("File is not open for writing");
    }
}
Exemple #2
0
int main(int argc, char *argv[])
{
    int time = getTimeMill();
    QCoreApplication a(argc, argv);
    
    QString dirPath = (argc > 1) ? a.arguments()[1] : 
            a.applicationDirPath();
    QString patternPath = (argc > 2) ? a.arguments()[2] :
            QString("input.jpg");
    QImage pattern(patternPath);
    if (pattern.isNull()) {
        pattern = QImage("input.jpg");
    }
    if (pattern.isNull())
        cout << "No pattern image." << endl;
    else {
        gpattern = GImage(pattern);
        GPyramid pyr(gpattern, 1.6f, 0.5f, 3);
        poivec p = getDOGDetection(pyr);
        p = calculateOrientations(pyr, p);
        pdescs = getDescriptors(pyr, p);
        processDir(dirPath);
    }
    
    time = getTimeMill() - time;
    cout << "Completed in " << time << "ms." << endl;
    return 0;
//    return a.exec();
}
/**
 * Calculate the dense
 * descriptors for multiple
 * images
 *
 * @param images		The Source images
 * @param xystep		Dense xy step size
 *
 *
 * @return Vector Descriptors
 */
vector<Mat> Extractor::getDescriptors(vector<Mat> images, int xyStep)
{
	vector<Mat> sifts;

	for (size_t i = 0; i < images.size(); i++)
	{
		sifts.push_back(getDescriptors(images[i], xyStep));
	}

	return sifts;
}
Exemple #4
0
void Faze::computePupil(int mode) {
	assert(mode == MODE_PUPIL_SP || mode == MODE_PUPIL_CDF);

	std::vector<cv::Point> leftEyePoints = getDescriptors(INDEX_LEFT_EYE);
	cv::Rect rectLeftEye = cv::boundingRect(leftEyePoints);
	cv::Mat roiLeftEye = imageGray(rectLeftEye);
	preprocessROI(roiLeftEye);

	std::vector<cv::Point> rightEyePoints = getDescriptors(INDEX_RIGHT_EYE);
	cv::Rect rectRightEye = cv::boundingRect(rightEyePoints);
	cv::Mat roiRightEye = imageGray(rectRightEye);
	preprocessROI(roiRightEye);

	if(mode == MODE_PUPIL_SP) {
		descriptors.push_back(get_pupil_coordinates(roiLeftEye,rectLeftEye));
		descriptors.push_back(get_pupil_coordinates(roiRightEye,rectRightEye));
	}
	else {
		descriptors.push_back(computePupilCDF(roiLeftEye));
		descriptors.push_back(computePupilCDF(roiRightEye));
	}
}
Exemple #5
0
void processFile(QString &path) {
    QImage img(path);
    if (img.isNull())
        return;
    GImage gimg = GImage(img);
    GPyramid pyr(gimg, 1.6f, 0.5f, 3);
    auto p = getDOGDetection(pyr);
    p = calculateOrientations(pyr, p);
    auto descs = getDescriptors(pyr, p);
    auto m = getMatchingPOIs(pdescs, descs, 
                             numeric_limits<float>::max());
    int k = getHough(m.first, m.second, gimg.width, gimg.height, 
                     1e-3f, 1e3f, 100, 100, 22, 10).second;
    if (k >= 4)
        cout << path.toStdString() << endl;
}
Exemple #6
0
//Read images from a list of file names and returns, for each read image for testing
void annTrain::readImagesToTest(vec_iter begin, vec_iter end)
{
    for (auto it = begin; it != end; ++it)
    {
        std::string filename = *it;
        std::cout << "Reading image " << filename << "..." << std::endl;
        cv::Mat img = cv::imread(filename, 0);
        if (img.empty())
        {
            std::cerr << "WARNING: Could not read image." << std::endl;
            continue;
        }
        std::string classname = getClassName(filename);
        cv::Mat descriptors = getDescriptors(img);
        processClassAndDescForTest(classname, descriptors);
    }
}
void EventType::writeForSVM(ofstream &outfile, string label, bool add_keypoints) throw(runtime_error) {
    cv::Mat desc = getDescriptors();
    vector<cv::Point2f> points = getKeypoints();
    if(outfile.is_open()) {
        for(int i=0; i<desc.rows; i++) {
            outfile << label << " ";
            for(int j=0; j<desc.cols; j++) {
                outfile << j+1 << ":" << desc.at<float>(i, j) << " ";
            }
            if(add_keypoints) {
                outfile << desc.cols+1 << ":" << points.at(i).x << " ";
                outfile << desc.cols+2 << ":" << points.at(i).y << " ";
            }
            outfile << endl;
        }
    } else {
        throw runtime_error("File is not open for writing");
    }
}
const IPropertyDescriptor& getDescriptor(uint32 type, uint32 name_hash)
{
	Array<IPropertyDescriptor*>& props = getDescriptors(type);
	for (int i = 0; i < props.size(); ++i)
	{
		if (props[i]->getNameHash() == name_hash)
		{
			return *props[i];
		}
		auto& children = props[i]->getChildren();
		for (int j = 0; j < children.size(); ++j)
		{
			if (children[j]->getNameHash() == name_hash)
			{
				return *children[j];
			}

		}
	}
	ASSERT(false);
	return *props[0];
}
const IPropertyDescriptor* getDescriptor(ComponentType type, uint32 name_hash)
{
	Array<IPropertyDescriptor*>& props = getDescriptors(type);
	for (int i = 0; i < props.size(); ++i)
	{
		if (props[i]->getNameHash() == name_hash)
		{
			return props[i];
		}
		if (props[i]->getType() == IPropertyDescriptor::ARRAY)
		{
			auto* array_desc = static_cast<IArrayDescriptor*>(props[i]);
			for (auto* child : array_desc->getChildren())
			{
				if (child->getNameHash() == name_hash)
				{
					return child;
				}
			}
		}
	}
	return nullptr;
}
Exemple #10
0
Matching::Matching(char *img1, char *img2, double thr):threshold(thr){
    image1 = cv::imread(img1, CV_LOAD_IMAGE_UNCHANGED);
    image2 = cv::imread(img2, CV_LOAD_IMAGE_UNCHANGED);
    //image identity, if found l.jpg then remove l
    std::string str = std::string(img1);
    //remove directory
    size_t lastSlash = str.find_last_of("/");
    if(lastSlash != std::string::npos){
        str.replace(str.begin(), str.begin() + lastSlash + 1, ""); 
    }
    size_t found = str.find_last_of("l.");
    //remove .jpg
    if(found == std::string::npos){
        imageIdentify = str.replace(str.end() - 4, str.end(), "");
    }else{//remove l.jpg
        imageIdentify = str.replace(str.end() - 5, str.end(), "");
    }
debug(imageIdentify);
    writeRansac.open(OUTPUT_DIR + RANSAC + TEXT_OUTPUT, std::ios::out | std::ios::app);
    writeHeuristic.open(OUTPUT_DIR + HEURISTIC + TEXT_OUTPUT, std::ios::out | std::ios::app);

    getDescriptors();
}
void add(const char* component_type, IPropertyDescriptor* descriptor)
{
	getDescriptors(crc32(component_type)).push(descriptor);
}
Exemple #12
0
GPU_TEST_P(HOG, GetDescriptors)
{
    // Load image (e.g. train data, composed from windows)
    cv::Mat img_rgb = readImage("hog/train_data.png");
    ASSERT_FALSE(img_rgb.empty());

    // Convert to C4
    cv::Mat img;
    cv::cvtColor(img_rgb, img, CV_BGR2BGRA);

    cv::gpu::GpuMat d_img(img);

    // Convert train images into feature vectors (train table)
    cv::gpu::GpuMat descriptors, descriptors_by_cols;
    getDescriptors(d_img, win_size, descriptors, DESCR_FORMAT_ROW_BY_ROW);
    getDescriptors(d_img, win_size, descriptors_by_cols, DESCR_FORMAT_COL_BY_COL);

    // Check size of the result train table
    wins_per_img_x = 3;
    wins_per_img_y = 2;
    blocks_per_win_x = 7;
    blocks_per_win_y = 15;
    block_hist_size = 36;
    cv::Size descr_size_expected = cv::Size(blocks_per_win_x * blocks_per_win_y * block_hist_size,
                                            wins_per_img_x * wins_per_img_y);
    ASSERT_EQ(descr_size_expected, descriptors.size());

    // Check both formats of output descriptors are handled correctly
    cv::Mat dr(descriptors);
    cv::Mat dc(descriptors_by_cols);
    for (int i = 0; i < wins_per_img_x * wins_per_img_y; ++i)
    {
        const float* l = dr.rowRange(i, i + 1).ptr<float>();
        const float* r = dc.rowRange(i, i + 1).ptr<float>();
        for (int y = 0; y < blocks_per_win_y; ++y)
            for (int x = 0; x < blocks_per_win_x; ++x)
                for (int k = 0; k < block_hist_size; ++k)
                    ASSERT_EQ(l[(y * blocks_per_win_x + x) * block_hist_size + k],
                              r[(x * blocks_per_win_y + y) * block_hist_size + k]);
    }

    /* Now we want to extract the same feature vectors, but from single images. NOTE: results will
    be defferent, due to border values interpolation. Using of many small images is slower, however we
    wont't call getDescriptors and will use computeBlockHistograms instead of. computeBlockHistograms
    works good, it can be checked in the gpu_hog sample */

    img_rgb = readImage("hog/positive1.png");
    ASSERT_TRUE(!img_rgb.empty());
    cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
    computeBlockHistograms(cv::gpu::GpuMat(img));
    // Everything is fine with interpolation for left top subimage
    ASSERT_EQ(0.0, cv::norm((cv::Mat)block_hists, (cv::Mat)descriptors.rowRange(0, 1)));

    img_rgb = readImage("hog/positive2.png");
    ASSERT_TRUE(!img_rgb.empty());
    cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
    computeBlockHistograms(cv::gpu::GpuMat(img));
    compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(1, 2)));

    img_rgb = readImage("hog/negative1.png");
    ASSERT_TRUE(!img_rgb.empty());
    cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
    computeBlockHistograms(cv::gpu::GpuMat(img));
    compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(2, 3)));

    img_rgb = readImage("hog/negative2.png");
    ASSERT_TRUE(!img_rgb.empty());
    cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
    computeBlockHistograms(cv::gpu::GpuMat(img));
    compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(3, 4)));

    img_rgb = readImage("hog/positive3.png");
    ASSERT_TRUE(!img_rgb.empty());
    cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
    computeBlockHistograms(cv::gpu::GpuMat(img));
    compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(4, 5)));

    img_rgb = readImage("hog/negative3.png");
    ASSERT_TRUE(!img_rgb.empty());
    cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
    computeBlockHistograms(cv::gpu::GpuMat(img));
    compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
}
Exemple #13
0
int annTrain::train(std::string imagesDir, int networkInputSize, float testRatio)
{

    std::cout << "Reading training set..." << std::endl;
    uint64 start = ofGetElapsedTimeMillis();
    std::vector<std::string> files = getFilesInDirectory(imagesDir);
    std::random_shuffle(files.begin(), files.end());
    
    cv::Mat img;
    
    for (auto it = files.begin(); it != files.end(); ++it)
    {
        std::string filename = *it;
        //std::cout << "Reading image " << filename << "..." << std::endl;
        img = cv::imread(filename, 0);

        if (img.empty())
        {
            std::cerr << "WARNING: Could not read image." << std::endl;
            continue;
        }
        std::string classname = getClassName(filename);
        cv::Mat descriptors = getDescriptors(img);
        processClassAndDesc(classname, descriptors);
    }
    
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    std::cout << "Creating vocabulary..." << std::endl;
    start = ofGetElapsedTimeMillis();
    cv::Mat labels;
    cv::Mat vocabulary;
    // Use k-means to find k centroids (the words of our vocabulary)
    cv::kmeans(descriptorsSet, networkInputSize, labels, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 10, 0.01), 1, cv::KMEANS_PP_CENTERS, vocabulary);
    // No need to keep it on memory anymore
    descriptorsSet.release();
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // Convert a set of local features for each image in a single descriptors
    // using the bag of words technique
    std::cout << "Getting histograms of visual words..." << std::endl;
    int* ptrLabels = (int*)(labels.data);
    int size = labels.rows * labels.cols;
    for (int i = 0; i < size; i++)
    {
        int label = *ptrLabels++;
        ImageData* data = descriptorsMetadata[i];
        data->bowFeatures.at<float>(label)++;
    }
    
    // Filling matrixes to be used by the neural network
    std::cout << "Preparing neural network..." << std::endl;
    std::set<ImageData*> uniqueMetadata(descriptorsMetadata.begin(), descriptorsMetadata.end());
    for (auto it = uniqueMetadata.begin(); it != uniqueMetadata.end(); )
    {
        ImageData* data = *it;
        cv::Mat normalizedHist;
        cv::normalize(data->bowFeatures, normalizedHist, 0, data->bowFeatures.rows, cv::NORM_MINMAX, -1, cv::Mat());
        trainSamples.push_back(normalizedHist);
        trainResponses.push_back(getClassCode(classes, data->classname));
        delete *it; // clear memory
        it++;
    }
    descriptorsMetadata.clear();
    
    // Training neural network
    std::cout << "Training neural network..." << std::endl;
    start = ofGetElapsedTimeMillis();
    mlp = getTrainedNeuralNetwork(trainSamples, trainResponses);
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // We can clear memory now
    trainSamples.release();
    trainResponses.release();
    
    // Train FLANN
    std::cout << "Training FLANN..." << std::endl;
    start = ofGetElapsedTimeMillis();
    
    flann = cv::Ptr<cv::FlannBasedMatcher>(new cv::FlannBasedMatcher());
    
    flann->add(vocabulary);
    flann->train();
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // Reading test set
    std::cout << "Reading test set..." << std::endl;
    start = ofGetElapsedTimeMillis();
    readImagesToTest(files.begin() + (size_t)(files.size() * testRatio), files.end());
    std::cout << " Seconds : " << (ofGetElapsedTimeMillis() - start) / 1000.0 << std::endl;
    
    // Get confusion matrix of the test set
    std::vector<std::vector<int> > confusionMatrix = getConfusionMatrix();
    
    // how accurate is our model
    std::cout << "Confusion matrix " << std::endl;
    printConfusionMatrix(confusionMatrix, classes);
    std::cout << "Accuracy " << getAccuracy(confusionMatrix) << std::endl;
    
    // now save everything
    std::cout << "saving models" << std::endl;
    saveModels(vocabulary, classes);
    
    return 0;
}