Beispiel #1
0
TEST(KMajority, Regression) {

	std::vector<std::string> filenames;
	filenames.push_back("brief_0.bin");

	vlr::Mat descriptors(filenames);

	cv::Mat centroids;
	std::vector<int> labels;

	double mytime = cv::getTickCount();

	clustering::kmajority(16, 100, descriptors, centroids, labels);

	mytime = ((double) cv::getTickCount() - mytime) / cv::getTickFrequency()
			* 1000;

	printf("Clustered [%d] points into [%d] clusters in [%lf] ms\n",
			descriptors.rows, centroids.rows, mytime);

	for (int j = 0; j < centroids.rows; ++j) {
		printf("   Cluster %d:\n", j + 1);
		FunctionUtils::printDescriptors(centroids.row(j));
	}

	for (int i = 0; i < descriptors.rows; ++i) {
		printf("   Data point [%d] was assigned to cluster [%d]\n", i,
				labels[i]);
	}

	EXPECT_FALSE(centroids.empty());
	EXPECT_FALSE(labels.empty());
}
Beispiel #2
0
TEST(KMajority, Clustering) {

	std::vector<std::string> filenames;
	filenames.push_back("brief_0.bin");

	vlr::Mat descriptors(filenames);

	vlr::KMajorityParams params;
	params["num.clusters"] = 10;
	params["max.iterations"] = 10;

	vlr::KMajority bofModel(descriptors, params);

	bofModel.build();

	EXPECT_FALSE(bofModel.getCentroids().empty());

	EXPECT_TRUE(bofModel.getCentroids().rows == 10);

	EXPECT_TRUE(
			descriptors.rows >= 0
					&& (size_t ) descriptors.rows
							== bofModel.getClusterAssignments().size());

	// Check all data has been assigned to same cluster
	int cumRes = 0;
	for (int k = 0; k < int(bofModel.getClusterCounts().size()); ++k) {
		cumRes = cumRes + bofModel.getClusterCounts().at(k);
	}

	EXPECT_TRUE(cumRes == descriptors.rows);

}
Beispiel #3
0
  void mitk::CoreExtActivator::StartInputDeviceModules()
  {
    IInputDeviceRegistry::Pointer inputDeviceRegistry(new mitk::InputDeviceRegistry());
    berry::Platform::GetServiceRegistry().RegisterService(
          mitk::CoreExtConstants::INPUTDEVICE_SERVICE,
          inputDeviceRegistry);

    // Gets the last setting of the preferences; if a device was selected,
    // it will still be activated after a restart
    berry::IPreferencesService::Pointer prefService = berry::Platform::GetServiceRegistry()
        .GetServiceById<berry::IPreferencesService>(berry::IPreferencesService::ID);
    berry::IPreferences::Pointer extPreferencesNode =
        prefService->GetSystemPreferences()->Node(CoreExtConstants::INPUTDEVICE_PREFERENCES);

    // Initializes the modules
    std::vector<IInputDeviceDescriptor::Pointer> descriptors(inputDeviceRegistry->GetInputDevices());
    for (std::vector<IInputDeviceDescriptor::Pointer>::const_iterator it = descriptors.begin();
         it != descriptors.end(); ++it)
    {
      if (extPreferencesNode->GetBool((*it)->GetID(), false))
      {
        IInputDevice::Pointer temp = (*it)->CreateInputDevice();
        temp->RegisterInputDevice();
      }
    }
  } // end method StartInputDeviceModules
void AffineAdaptedFeature2D::operator()(InputArray _image, InputArray _mask,
                                        vector<KeyPoint>& _keypoints,
                                        OutputArray _descriptors,
                                        bool useProvidedKeypoints) const
{
    Mat image = _image.getMat();
    Mat mask = _mask.getMat();

    CV_Assert(useProvidedKeypoints == false);
    CV_Assert(!affineTransformParams.empty());

    vector<vector<KeyPoint> > keypoints(affineTransformParams.size());
    vector<Mat> descriptors(affineTransformParams.size());

#pragma omp parallel for
    for(size_t paramsIndex = 0; paramsIndex < affineTransformParams.size(); paramsIndex++)
    {
        const Vec2f& params = affineTransformParams[paramsIndex];
        const float tilt = params[0];
        const float phi = params[1];

        if(tilt == 1.f) // tilt
        {
            detectAndComputeImpl(image, mask, keypoints[paramsIndex], descriptors[paramsIndex]);
        }
        else
        {
            Mat transformedImage, transformedMask;
            Mat Ainv = affineSkew(image, mask, tilt, phi, transformedImage, transformedMask);

            detectAndComputeImpl(transformedImage, transformedMask, keypoints[paramsIndex], descriptors[paramsIndex]);

            // correct keypoints coordinates
            CV_Assert(Ainv.type() == CV_32FC1);
            const float* Ainv_ptr = Ainv.ptr<const float>();
            for(size_t kpIndex = 0; kpIndex < keypoints[paramsIndex].size(); kpIndex++)
            {
                KeyPoint& kp = keypoints[paramsIndex][kpIndex];
                float tx = Ainv_ptr[0] * kp.pt.x + Ainv_ptr[1] * kp.pt.y + Ainv_ptr[2];
                float ty = Ainv_ptr[3] * kp.pt.x + Ainv_ptr[4] * kp.pt.y + Ainv_ptr[5];
                kp.pt.x = tx;
                kp.pt.y = ty;
            }
        }
    }

    // copy keypoints and descriptors to the output
    _keypoints.clear();
    Mat allDescriptors;
    for(size_t paramsIndex = 0; paramsIndex < affineTransformParams.size(); paramsIndex++)
    {
        _keypoints.insert(_keypoints.end(), keypoints[paramsIndex].begin(), keypoints[paramsIndex].end());
        allDescriptors.push_back(descriptors[paramsIndex]);
    }

    _descriptors.create(allDescriptors.size(), allDescriptors.type());
    Mat _descriptorsMat = _descriptors.getMat();
    allDescriptors.copyTo(_descriptorsMat);
}
Beispiel #5
0
int BundlerMatcher::extractSiftFeature(int fileIndex)
{
    std::stringstream filepath;
    filepath << mInputPath << mFilenames[fileIndex];

    std::string tmp = filepath.str();
    char* filename = &tmp[0];
    bool extracted = true;

    unsigned int imgId = 0;
    ilGenImages(1, &imgId);
    ilBindImage(imgId);
    int nbFeatureFound = -1;

    if(ilLoadImage(filename))
    {
        int w = ilGetInteger(IL_IMAGE_WIDTH);
        int h = ilGetInteger(IL_IMAGE_HEIGHT);
        int format = ilGetInteger(IL_IMAGE_FORMAT);

        if (mSift->RunSIFT(w, h, ilGetData(), format, GL_UNSIGNED_BYTE))
        {
            int num = mSift->GetFeatureNum();

            SiftKeyDescriptors descriptors(128*num);
            SiftKeyPoints keys(num);

            mSift->GetFeatureVector(&keys[0], &descriptors[0]);

            //Save Feature in RAM
            mFeatureInfos.push_back(FeatureInfo(w, h, keys, descriptors));

            nbFeatureFound = num;
        }
        else
        {
            extracted = false;
        }
    }
    else
    {
        extracted = false;
    }

    ilDeleteImages(1, &imgId);

    if (!extracted)
    {
        std::cout << "Error while reading : " <<filename <<std::endl;
    }

    return nbFeatureFound;
}
std::vector<cv::Mat> SiftMatcher::compute(Keypoints& keypoints)
{
  std::vector<cv::Mat> descriptors(keypoints.size());
  cv::SiftDescriptorExtractor extractor;

  for (size_t i = 0; i < keypoints.size(); ++i)
  {
    extractor.compute(m_images[i], keypoints[i], descriptors[i]);
  }

  return descriptors;
}
Beispiel #7
0
int
main(int argc, char** argv)
{
	// Object for storing the point cloud.
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
	// Object for storing the normals.
	pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
	// Object for storing the SHOT descriptors for each point.
	pcl::PointCloud<pcl::SHOT352>::Ptr descriptors(new pcl::PointCloud<pcl::SHOT352>());
 
	// Read a PCD file from disk.
	if (pcl::io::loadPCDFile<pcl::PointXYZ>(argv[1], *cloud) != 0)
	{
		return -1;
	}
 
	// Note: you would usually perform downsampling now. It has been omitted here
	// for simplicity, but be aware that computation can take a long time.
 
	// Estimate the normals.
	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normalEstimation;
	normalEstimation.setInputCloud(cloud);
	normalEstimation.setRadiusSearch(10);
	pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
	normalEstimation.setSearchMethod(kdtree);
	normalEstimation.compute(*normals);
 
	// SHOT estimation object.
	pcl::SHOTEstimation<pcl::PointXYZ, pcl::Normal, pcl::SHOT352> shot;
	shot.setInputCloud(cloud);
	shot.setInputNormals(normals);
	// The radius that defines which of the keypoint's neighbors are described.
	// If too large, there may be clutter, and if too small, not enough points may be found.
	shot.setRadiusSearch(2);
 
	shot.compute(*descriptors);
  
  std::string ss_temp;
  ss_temp.append(argv[1]);
  std::size_t found = ss_temp.find_last_of("/\\");
  ss_temp = ss_temp.substr(found+1);
  std::string ss = "SHOT_";
  ss.append(ss_temp);
  std::cout << ss << '\n';  
 
  pcl::io::savePCDFileASCII (ss, *descriptors);
  
  
}
SamplesDesctiptors ParseDescriptors(const std::string &descriptorsFilename) {
  std::ifstream descriptors(descriptorsFilename.c_str());
  SamplesDesctiptors result;
  while(!descriptors.eof()) {
    std::string name;
    std::getline(descriptors, name, '\t');
    if(name == std::string()) {
      break;
    }
    descriptors.get();
    std::string gender;
    std::getline(descriptors, gender, '\n');
    result.push_back(std::make_pair(TRAIN_IMAGES + name,
                                    gender == "Male" ? 0 : 1));
  }
  descriptors.close();
  return result;
}
void PatternDetector::train_matcher(const Pattern& pattern)
{
    // Store the pattern object
    m_pattern = pattern;

    // API of cv::DescriptorMatcher is somewhat tricky
    // First we clear old train data:
    m_matcher->clear();

    // Then we add vector of descriptors (each descriptors matrix describe one image). 
    // This allows us to perform search across multiple images:
    std::vector<cv::Mat> descriptors(1);
    descriptors[0] = pattern.descriptors.clone(); 
    m_matcher->add(descriptors);

    // After adding train data perform actual train:
    m_matcher->train();
}
pcl::PointCloud<DescriptorType>::Ptr PCLHighLevelCtl::computeFPFHDescriptors(float radius_search) {
  pcl::PointCloud<DescriptorType>::Ptr descriptors(new pcl::PointCloud<DescriptorType>());
  pcl::FPFHEstimationOMP<PointType, NormalType, DescriptorType> fpfh;
  pcl::search::KdTree<PointType>::Ptr tree(new pcl::search::KdTree<PointType>);
	
  if(!this->normals)
    this->computeNormals();
		
  if(!this->keypoints)
    this->keypointsExtraction();
	
  fpfh.setInputCloud(this->keypoints);
  fpfh.setRadiusSearch(radius_search);
  fpfh.setSearchSurface(this->cloud);
  fpfh.setInputNormals(this->normals);
  fpfh.setSearchMethod(tree);
  fpfh.compute(*descriptors);
	
  return descriptors;
}
Beispiel #11
0
std::vector<rafl::Descriptor_CPtr> ForestUtil::make_descriptors(const ORUtils::MemoryBlock<float>& featuresMB, size_t descriptorCount, size_t featureCount)
{
  // Make sure that the features are available and up-to-date on the CPU.
  featuresMB.UpdateHostFromDevice();

  // Make the rafl feature descriptors.
  const float *features = featuresMB.GetData(MEMORYDEVICE_CPU);
  std::vector<rafl::Descriptor_CPtr> descriptors(descriptorCount);

#ifdef WITH_OPENMP
  #pragma omp parallel for
#endif
  for(int i = 0; i < static_cast<int>(descriptorCount); ++i)
  {
    // Copy the relevant features into a descriptor and add it.
    const float *featuresForDescriptor = features + i * featureCount;
    descriptors[i].reset(new rafl::Descriptor(featuresForDescriptor, featuresForDescriptor + featureCount));
  }

  return descriptors;
}
int
main(int argc, char** argv)
{
	// Object for storing the point cloud.
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
	// Object for storing the normals.
	pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
	// Object for storing the spin image for each point.
	pcl::PointCloud<SpinImage>::Ptr descriptors(new pcl::PointCloud<SpinImage>());

	// Read a PCD file from disk.
	if (pcl::io::loadPCDFile<pcl::PointXYZ>(argv[1], *cloud) != 0)
	{
		cout << "Returning -1\n";
		return -1;
	}

	// Note: you would usually perform downsampling now. It has been omitted here
	// for simplicity, but be aware that computation can take a long time.

	// Estimate the normals.
	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normalEstimation;
	normalEstimation.setInputCloud(cloud);
	normalEstimation.setRadiusSearch(0.03);
	pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
	normalEstimation.setSearchMethod(kdtree);
	normalEstimation.compute(*normals);

	// Spin image estimation object.
	pcl::SpinImageEstimation<pcl::PointXYZ, pcl::Normal, SpinImage> si;
	si.setInputCloud(cloud);
	si.setInputNormals(normals);
	// Radius of the support cylinder.
	si.setRadiusSearch(0.02);
	// Set the resolution of the spin image (the number of bins along one dimension).
	// Note: you must change the output histogram size to reflect this.
	si.setImageWidth(8);

	si.compute(*descriptors);
}
void YawAngleEstimator::train()
{
	printf("YawAngleEstimator:train\n");
	vector<vector<KeyPoint>> kp(AngleNum,vector<KeyPoint>());
	vector<Mat> descriptors(AngleNum,Mat());
	featureExtract(YawTemplate, kp, descriptors, Feature);

	if (useIndex)
	{
		//build Index with Lsh and Hamming distance
		for (int i = 0; i < AngleNum; i++)
		{
			flann::Index tempIndex;
			if (Feature == USE_SIFT)
			{
				tempIndex.build(descriptors[i], flann::KDTreeIndexParams(4), cvflann::FLANN_DIST_L2);
			}
			else
			{
				tempIndex.build(descriptors[i], flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
			}
			YawIndex.push_back(tempIndex);
		}
	}
	else
	{
		//build BFMathers
		for (int i = 0; i < AngleNum; i++)
		{
			//record
			fss<<"\nKeypoints number of template "<< i <<" is "<< descriptors[i].rows << endl;

			BFMatcher tempMatcher;
			vector<Mat> train_des(1, descriptors[i]);
			tempMatcher.add(train_des);
			tempMatcher.train();
			matchers.push_back(tempMatcher);
		}
	}
}
Beispiel #14
0
TEST(KMajority, SaveLoad) {

	std::vector<std::string> filenames;
	filenames.push_back("brief_0.bin");
	vlr::Mat descriptors(filenames);

	vlr::KMajorityParams params;
	params["num.clusters"] = 10;
	params["max.iterations"] = 10;
	params["nn.type"] = vlr::indexType::LINEAR;

	vlr::KMajority bofModel(descriptors);
	bofModel.build();
	bofModel.save("test_vocab.yaml.gz");

	vlr::KMajority bofModelLoaded;
	bofModelLoaded.load("test_vocab.yaml.gz");

	EXPECT_TRUE(
			std::equal(bofModel.getCentroids().begin<uchar>(),
					bofModel.getCentroids().end<uchar>(),
					bofModelLoaded.getCentroids().begin<uchar>()));

}
folly::Future<int32_t> service_with_special_namesSvIf::future_descriptors() {
  return apache::thrift::detail::si::future([&] { return descriptors(); });
}
int main(int argc, char* argv[])
{
    //-- Main program parameters (default values)
    double threshold = 0.03;
    std::string output_histogram_image = "histogram_image.m";
    std::string output_rsd_data = "rsd_data.m";
    double rsd_normal_radius = 0.05;
    double rsd_curvature_radius = 0.07;
    double rsd_plane_threshold = 0.2;

    //-- Show usage
    if (pcl::console::find_switch(argc, argv, "-h") || pcl::console::find_switch(argc, argv, "--help"))
    {
        show_usage(argv[0]);
        return 0;
    }

    //-- Check for parameters in arguments
    if (pcl::console::find_switch(argc, argv, "-t"))
        pcl::console::parse_argument(argc, argv, "-t", threshold);
    else if (pcl::console::find_switch(argc, argv, "--threshold"))
        pcl::console::parse_argument(argc, argv, "--threshold", threshold);

    if (pcl::console::find_switch(argc, argv, "--histogram"))
        pcl::console::parse_argument(argc, argv, "--histogram", output_histogram_image);

    if (pcl::console::find_switch(argc, argv, "-r"))
        pcl::console::parse_argument(argc, argv, "-r", output_rsd_data);
    else if (pcl::console::find_switch(argc, argv, "--rsd"))
        pcl::console::parse_argument(argc, argv, "--rsd", output_rsd_data);

    if (pcl::console::find_switch(argc, argv, "--rsd-params"))
        pcl::console::parse_3x_arguments(argc, argv, "--rsd-params", rsd_normal_radius, rsd_curvature_radius,
                                         rsd_plane_threshold);

    //-- Get point cloud file from arguments
    std::vector<int> filenames;
    bool file_is_pcd = false;

    filenames = pcl::console::parse_file_extension_argument(argc, argv, ".ply");

    if (filenames.size() != 1)
    {
        filenames = pcl::console::parse_file_extension_argument(argc, argv, ".pcd");

        if (filenames.size() != 1)
        {
            std::cerr << "No input file specified!" << std::endl;
            show_usage(argv[0]);
            return -1;
        }

        file_is_pcd = true;
    }

    //-- Print arguments to user
    std::cout << "Selected arguments: " << std::endl
              << "\tRANSAC threshold: " << threshold << std::endl
#ifdef HISTOGRAM
              << "\tHistogram image: " << output_histogram_image << std::endl
#endif
#ifdef CURVATURE
              << "\tRSD:" << std::endl
              << "\t\tFile: " << output_rsd_data << std::endl
              << "\t\tNormal search radius: " << rsd_normal_radius << std::endl
              << "\t\tCurvature search radius: " << rsd_curvature_radius << std::endl
              << "\t\tPlane threshold: " << rsd_plane_threshold << std::endl
#endif
              << "\tInput file: " << argv[filenames[0]] << std::endl;



    //-- Load point cloud data
    pcl::PointCloud<pcl::PointXYZ>::Ptr source_cloud(new pcl::PointCloud<pcl::PointXYZ>);

    if (file_is_pcd)
    {
        if (pcl::io::loadPCDFile(argv[filenames[0]], *source_cloud) < 0)
        {
            std::cout << "Error loading point cloud " << argv[filenames[0]] << std::endl << std::endl;
            show_usage(argv[0]);
            return -1;
        }
    }
    else
    {
        if (pcl::io::loadPLYFile(argv[filenames[0]], *source_cloud) < 0)
        {
            std::cout << "Error loading point cloud " << argv[filenames[0]] << std::endl << std::endl;
            show_usage(argv[0]);
            return -1;
        }
    }

    /********************************************************************************************
    * Stuff goes on here
    *********************************************************************************************/
    //-- Initial pre-processing of the mesh
    //------------------------------------------------------------------------------------
    std::cout << "[+] Pre-processing the mesh..." << std::endl;
    pcl::PointCloud<pcl::PointXYZ>::Ptr garment_points(new pcl::PointCloud<pcl::PointXYZ>);
    MeshPreprocessor<pcl::PointXYZ> preprocessor;
    preprocessor.setRANSACThresholdDistance(threshold);
    preprocessor.setInputCloud(source_cloud);
    preprocessor.process(*garment_points);

    //-- Find bounding box (not really required)
    pcl::MomentOfInertiaEstimation<pcl::PointXYZ> feature_extractor;
    pcl::PointXYZ min_point_AABB, max_point_AABB;
    pcl::PointXYZ min_point_OBB,  max_point_OBB;
    pcl::PointXYZ position_OBB;
    Eigen::Matrix3f rotational_matrix_OBB;

    feature_extractor.setInputCloud(garment_points);
    feature_extractor.compute();
    feature_extractor.getAABB(min_point_AABB, max_point_AABB);
    feature_extractor.getOBB(min_point_OBB, max_point_OBB, position_OBB, rotational_matrix_OBB);

#ifdef CURVATURE
    //-- Curvature stuff
    //-----------------------------------------------------------------------------------
    std::cout << "[+] Calculating curvature descriptors..." << std::endl;
    // Object for storing the normals.
    pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
    // Object for storing the RSD descriptors for each point.
    pcl::PointCloud<pcl::PrincipalRadiiRSD>::Ptr descriptors(new pcl::PointCloud<pcl::PrincipalRadiiRSD>());


    // Note: you would usually perform downsampling now. It has been omitted here
    // for simplicity, but be aware that computation can take a long time.

    // Estimate the normals.
    pcl::NormalEstimationOMP<pcl::PointXYZ, pcl::Normal> normalEstimation;
    normalEstimation.setInputCloud(garment_points);
    normalEstimation.setRadiusSearch(rsd_normal_radius);
    pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
    normalEstimation.setSearchMethod(kdtree);
    normalEstimation.setViewPoint(0, 0 , 2);
    normalEstimation.compute(*normals);

    // RSD estimation object.
    pcl::RSDEstimation<pcl::PointXYZ, pcl::Normal, pcl::PrincipalRadiiRSD> rsd;
    rsd.setInputCloud(garment_points);
    rsd.setInputNormals(normals);
    rsd.setSearchMethod(kdtree);
    // Search radius, to look for neighbors. Note: the value given here has to be
    // larger than the radius used to estimate the normals.
    rsd.setRadiusSearch(rsd_curvature_radius);
    // Plane radius. Any radius larger than this is considered infinite (a plane).
    rsd.setPlaneRadius(rsd_plane_threshold);
    // Do we want to save the full distance-angle histograms?
    rsd.setSaveHistograms(false);

    rsd.compute(*descriptors);

    //-- Save to mat file
    std::ofstream rsd_file(output_rsd_data.c_str());
    for (int i = 0; i < garment_points->points.size(); i++)
    {
        rsd_file << garment_points->points[i].x << " "
                 << garment_points->points[i].y << " "
                 << garment_points->points[i].z << " "
                 << descriptors->points[i].r_min << " "
                 << descriptors->points[i].r_max << "\n";
    }
    rsd_file.close();

#endif

#ifdef HISTOGRAM
    //-- Obtain histogram image
    //-----------------------------------------------------------------------------------
    std::cout << "[+] Calculating histogram image..." << std::endl;
    HistogramImageCreator<pcl::PointXYZ> histogram_image_creator;
    histogram_image_creator.setInputPointCloud(garment_points);
    histogram_image_creator.setResolution(1024);
    histogram_image_creator.setUpsampling(true);
    histogram_image_creator.compute();
    Eigen::MatrixXi image = histogram_image_creator.getDepthImageAsMatrix();

    //-- Temporal fix to get image (through file)
    std::ofstream file(output_histogram_image.c_str());
    file << image;
    file.close();
#endif

    return 0;
}
Beispiel #17
0
int SiftGPU::DetectAndGenerateDesc()
{
	printf("\n ----------- DetectAndGenerateDesc inside --------------- \n");
	float prelim_contrastThreshold = 0.5 * contrastThreshold / intvls;
	struct detection_data* ddata;
	int o, i, r, c;
	int num=0;				// Number of keypoins detected
	int numRemoved=0;		// The number of key points rejected because they failed a test
	int numberExtrema = 0;
	int number = 0;

	int intvlsSum = intvls + 3;
	int OffsetAct = 0;
	int OffsetNext = 0;
	int OffsetPrev = 0;

	Keys keysArray[SIFT_MAX_NUMBER_KEYS];
	/*for (int j = 0 ; j < SIFT_MAX_NUMBER_KEYS ; j++)
	{
		keysArray[j].x = 0.0;
		keysArray[j].y = 0.0;
		keysArray[j].intvl = 0.0;
		keysArray[j].octv = 0.0;
		keysArray[j].subintvl = 0.0;
		keysArray[j].scx = 0.0;
		keysArray[j].scy = 0.0;
		keysArray[j].ori = 0.0;
	}*/


	for( o = 0; o < octvs; o++ )
	{
		for( i = 0; i < intvlsSum; i++ )
		{
			OffsetNext += sizeOfImages[o];
				
			if( i > 0 && i <= intvls )
			    {
				num = 0;
				detectExtremaGPU->Process(subtractGPU->cmBufPyramid, gaussFilterGPU->cmBufPyramid, imageWidthInPyramid[o], imageHeightInPyramid[o], OffsetPrev, OffsetAct, OffsetNext, &num, prelim_contrastThreshold, i, o, keysArray);
				
				number = num;
				struct detection_data* ddata;
		  
				cv::Mat descriptors(number,128,CV_32F,0.0);
				//descriptors = 
		  
				for(int ik = 0; ik < number ; ik++)
				{ 
				    cv::KeyPoint* key = new cv::KeyPoint(keysArray[ik].scx, keysArray[ik].scy, 1);
				    key->octave = keysArray[ik].octv;
				    key->angle = keysArray[ik].ori;
				    keypoints.push_back(*key);
		  
		  
				    for(int i = 0; i < 128 ; i++ )
				    {
				        descriptors.at<float>(ik,i) = keysArray[ik].desc[i];
				    }
		  
		  
		  
		  
				}
			    } 
			OffsetPrev = OffsetAct;
			OffsetAct += sizeOfImages[o];
		}
	}

	return 0;
}
void xyzrgb2xyzsift::compute() {
    CLOG(LTRACE) << "xyzrgb2xyzsift::compute";
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud = in_cloud_xyzrgb.read();

    cloud->is_dense = true ;

    cv::Mat rgbimg(cloud->height, cloud->width, CV_8UC3, cv::Scalar::all(0)) ;
    for (register int row = 0; row  < cloud->height; row++)
        for (register int col = 0; col < cloud->width ; col++) {
            pcl::PointXYZRGB& pt = cloud->at(col, row) ;
            cv::Vec3b &rgbvec = rgbimg.at<cv::Vec3b>(row, col) ;
            rgbvec[2] = pt.r ; //Creating image for all pixels: normal and NaN
            rgbvec[1] = pt.g ;
            rgbvec[0] = pt.b ;
    }


    std::vector<cv::KeyPoint> keypoints;
    cv::Mat descriptors;
    try {
        //-- Step 1: Detect the keypoints.
        cv::SiftFeatureDetector detector;

        detector.detect(rgbimg, keypoints);

        //-- Step 2: Calculate descriptors (feature vectors).
        cv::SiftDescriptorExtractor extractor;

        extractor.compute( rgbimg, keypoints, descriptors);
    } catch (...) {
        LOG(LERROR) << "sdasdas\n";
    }

    pcl::PointCloud<PointXYZSIFT>::Ptr cloud_xyzsift(new pcl::PointCloud<PointXYZSIFT>);

    //Create output point cloud (this time - unorganized)
    cloud_xyzsift->height = 1 ;
    cloud_xyzsift->width = keypoints.size() ;
    cloud_xyzsift->is_dense = false ;
    cloud_xyzsift->points.resize(cloud_xyzsift->height * cloud_xyzsift->width) ;



    //Convert SIFT keypoints/descriptors into PointXYZRGBSIFT cloud
    pcl::PointCloud<PointXYZSIFT>::iterator pt_iter = cloud_xyzsift->begin();
    for (register int i = 0; i < keypoints.size() ; i++) {

        //std::cout << " " << keypoints[i].pt.x << " " << keypoints[i].pt.y << std::endl ;
        int keyx = std::min(std::max(((unsigned int) round(keypoints[i].pt.x)), 0u), cloud->width) ;
        int keyy = std::min(std::max(((unsigned int) round(keypoints[i].pt.y)), 0u), cloud->height) ;
        //std::cout << keyx << " " << keyy << std::endl ;
        cv::Mat desc = descriptors(cv::Range(i,i + 1), cv::Range::all()) ;
        //std::cout << "descriptor " << desc << std::endl ;

        //Make XYZSIFT point
        PointXYZSIFT& pt_out = *pt_iter++ ;
        pcl::PointXYZRGB& pt_in = cloud->at(keyx, keyy) ;
        pt_out.x = pt_in.x; pt_out.y = pt_in.y; pt_out.z = pt_in.z ;
        for(int j=0; j<descriptors.cols;j++){
            pt_out.descriptor[j] = descriptors.row(i).at<float>(j);
        }
    }

    out_cloud_xyzsift.write(cloud_xyzsift);
}