Пример #1
0
    math::DynMatrix<bool> CurvatureFeatureExtractor::apply(const core::Img32f &depthImg, core::DataSegment<float,4> &xyz, math::DynMatrix<bool> &initialMatrix,
                          std::vector<SurfaceFeatureExtractor::SurfaceFeature> features,
                          std::vector<std::vector<int> > &surfaces, core::DataSegment<float,4> &normals, bool useOpenObjects, bool useOccludedObjects, 
                          float histogramSimilarity, int distance, float maxError, int ransacPasses, float distanceTolerance, float outlierTolerance){
      int w = depthImg.getSize().width;
      math::DynMatrix<bool> curvature = math::DynMatrix<bool>(initialMatrix.rows(),initialMatrix.rows(), true);//result matrix
      //initialize
      for(size_t i=0; i<initialMatrix.rows(); i++){
        for(size_t j=0; j<initialMatrix.rows(); j++){
          //only test pairs of non-adjacent curved surfaces 
          if(initialMatrix(i,j)==true || 
              (features[i].curvatureFactor!=SurfaceFeatureExtractor::CURVED_1D && features[i].curvatureFactor!=SurfaceFeatureExtractor::CURVED_2D) || 
              (features[j].curvatureFactor!=SurfaceFeatureExtractor::CURVED_1D && features[j].curvatureFactor!=SurfaceFeatureExtractor::CURVED_2D) ){
            curvature(i,j)=false;
          }
        }
      }
      for(size_t i=0; i<curvature.rows(); i++){
        for(size_t j=i+1; j<curvature.cols(); j++){//dont check pairs twice
          if(curvature(i,j)==true){//candidate
            bool proceed=true;

            //joint criterion: similar surface shape and orientation (normal histogram matching)
            float similarityScore = SurfaceFeatureExtractor::matchNormalHistograms(features[i].normalHistogram, features[j].normalHistogram);
            if(similarityScore<histogramSimilarity){
              proceed=false;
            }
            
            //compute cases
            if(proceed){
              proceed=false;
              if(useOpenObjects){
                proceed = computeOpenObject(normals, features[i], features[j], 
                                    surfaces[i], surfaces[j], distance, w);
              }
              if(useOccludedObjects && !proceed){//only if first case does not match
                proceed = computeOccludedObject(depthImg, xyz, normals, features[i], features[j], 
                                    surfaces[i], surfaces[j], w, maxError, ransacPasses, distanceTolerance, outlierTolerance);
              }
            } 
 
            if(!proceed){//remove if no case succeeded
              curvature(i,j)=false;
              curvature(j,i)=false;
            }                     
          }          
        }
      }
           
      return curvature;
    }
Пример #2
0
vector<float> GMExperiment6_1::computeCurvature(const vector<Vector2f> &data)
{
    vector<float> curvature(data.size());
    for(int i = 0; i < data.size(); ++i)
    {
        Vector2f c1,c2;
        if(i == 0)
        {
            c1 = data[1]-data[0];
            continue;
        }
        else if(i == (data.size()-1))
        {
            c1 = data[data.size()-1] - data[data.size()-2];
            continue;
        }
        else
        {
            c1 = (data[i+1]-data[i-1])*0.5;
            c2 = data[i+1]-data[i]*2+data[i-1];
        }

        curvature[i] = (-c2[0]*c1[1] + c2[1]*c1[0])/pow(c1.getSqrNorm(), 1.5);
    }
    curvature[0] = curvature[1];
    curvature[data.size()-1] = curvature[data.size()-2];
    return curvature;
}
Пример #3
0
float BezierCurve::maxCurvature(const Vector3 &_p1,
					            const Vector3 &_p2,
							    const Vector3 &_p3,
							    int _steps,
							    float _dt) {
	float max = 0.f;
	for (int i=0; i<_steps; ++i) {
        float t = (float)i/(float)(_steps-1);
         float temp = curvature(_p1, _p2, _p3, t, _dt);
         if (temp > max) max = temp;
     }
    //std::cout << "max local: " << max << std::endl;
	return max;
}
Пример #4
0
int main(int argc, char **argv) {
    std::vector<double> p1(2),p2(2),p3(2);
    p1.at(0)=0.0;
    p1.at(1)=1.0;
    p2.at(0)=1.0;
    p2.at(1)=0.0;
    p3.at(0)=1.0-sqrt(2.0)/2.0;
    p3.at(1)=1.0-sqrt(2.0)/2.0;
    
    double r=curvature(p1,p2,p3);
    printf("r: %f\n",r);
    
    return 0;
}
Пример #5
0
vector<double> curvature(vector<Point> contour) {
    vector<double> cs;
    // adjust contour to handle start and end positions
    contour.insert(contour.begin(), contour[contour.size() - 1]);
    contour.push_back(contour[1]);
    // assign contour calculation for all points in original contour
    int i;
    for ( i = 1; i < contour.size() - 1; ++i ) {
        cs.push_back(curvature(contour[i-1], contour[i], contour[i+1]));
    }

    normalize(cs);

    return cs;
}
int main(int argc, char *argv[])
{
#   include "setRootCase.H"
#   include "createTime.H"
#   include "createMesh.H"

#   include "createFaMesh.H"

    volSurfaceMapping vsm(aMesh);

    // Curvature calculated from point data

    volScalarField curvature
    (
        IOobject
        (
            "curvature",
            runTime.timeName(),
            mesh,
            IOobject::NO_READ,
            IOobject::AUTO_WRITE
        ),
        mesh,
        dimensionedScalar("0", dimless, 0)
    );

    const areaScalarField& c = aMesh.faceCurvatures();

    Info<< "Curvature: min = " << Foam::min(c).value()
        << " max = " << Foam::max(c).value() << endl;

    vsm.mapToVolume(c, curvature.boundaryField());

    curvature.write();

    return(0);
}
Пример #7
0
Foam::tmp<Foam::triSurfacePointScalarField> Foam::automatic::load()
{
    Info<< indent
        << "Calculating cell size on surface: " << surfaceName_ << endl;

    tmp<triSurfacePointScalarField> tPointCellSize
    (
        new triSurfacePointScalarField
        (
            IOobject
            (
                surfaceName_ + ".cellSize",
                surface_.searchableSurface::time().constant(),
                "triSurface",
                surface_.searchableSurface::time(),
                IOobject::NO_READ,
                IOobject::NO_WRITE
            ),
            surface_,
            dimLength,
            scalarField(surface_.nPoints(), maximumCellSize_)
        )
    );

    triSurfacePointScalarField& pointCellSize = tPointCellSize();

    if (readCurvature_)
    {
        Info<< indent
            << "Reading curvature         : " << curvatureFile_ << endl;

        triSurfacePointScalarField curvature
        (
            IOobject
            (
                curvatureFile_,
                surface_.searchableSurface::time().constant(),
                "triSurface",
                surface_.searchableSurface::time(),
                IOobject::MUST_READ,
                IOobject::NO_WRITE
            ),
            surface_,
            dimLength,
            true
        );

        forAll(pointCellSize, pI)
        {
            pointCellSize[pI] =
                min
                (
                    1.0
                   /max
                    (
                        (1.0/curvatureCellSizeCoeff_)*mag(curvature[pI]),
                        1.0/maximumCellSize_
                    ),
                    pointCellSize[pI]
                );
        }
    }
Пример #8
0
int main(int argc, char* argv[])
{
	/// generate XML for microglia video images
	std::string str = std::string("XML");
	if( argc == 5 && std::string(argv[1]) == str && atoi(argv[2]) >= 1)  // generate project file for the image sequences
	{
		SomaExtractor *Somas = new SomaExtractor();	
		SomaExtractor::OutputImageType::Pointer inputImage = Somas->Read8BitImage("1_8bit.tif");
		int width = inputImage->GetLargestPossibleRegion().GetSize()[0];
		int height = inputImage->GetLargestPossibleRegion().GetSize()[1];
		int row = atoi(argv[3]);   // arrange them row by col
		int col = atoi(argv[4]);
		std::ofstream ofs("Project.xml");
		ofs<< "<?xml	version=\"1.0\"	?>"<<std::endl;
		ofs<< "<Source>"<<std::endl;
		for( int i = 1; i <= atoi(argv[2]); i++)
		{
			int rown = (i-1) / col;
			int coln = (i-1) % col;
			int tx = width * 1.1 * coln;
			int ty = height * 1.1 * rown;
			ofs<<"\t"<<"<File	FileName=\""<<i<<"_CV_ANT.swc\""<<"\t"<<"Type=\"Trace\"\ttX=\""<<tx<<"\"\ttY=\""<<ty<<"\"\ttZ=\"0\"/>"<<std::endl;
			ofs<<"\t"<<"<File	FileName=\""<<i<<"_8bit.tif\""<<"\t"<<"Type=\"Image\"\ttX=\""<<tx<<"\"\ttY=\""<<ty<<"\"\ttZ=\"0\"/>"<<std::endl;
			ofs<<"\t"<<"<File	FileName=\""<<i<<"_soma.mhd\""<<"\t"<<"Type=\"Soma\"\ttX=\""<<tx<<"\"\ttY=\""<<ty<<"\"\ttZ=\"0\"/>"<<std::endl;
			//ofs<<"\t"<<"<File	FileName=\""<<i<<"_soma_features.txt\""<<"\t"<<"Type=\"Nuclei_Table\"\ttX=\""<<tx<<"\"\ttY=\""<<ty<<"\"\ttZ=\"0\"/>"<<std::endl;
		}
		ofs<< "</Source>"<<std::endl;
		delete Somas;
		return 0;
	}

	if( argc < 2  || atoi(argv[1]) < 0 || atoi(argv[1]) > 6)
	{
		std::cout<<"Debris: SomaExtraction <0> <IntensityImage> <DebrisImage> <SomaSeeds.txt>"<<std::endl;
		//std::cout<<"Derbis: SomaExtraction <InputImageFileName> <Centroids.txt> <DiceWidth (typically 100)> <hole filling (typically 10)>\n";
		std::cout<<"SomaExtraction: SomaExtraction <1> <InputImageFileName> <InitialContourLabeledImage> <Options>\n";
		std::cout<<"SomaExtraction without seeds: SomaExtraction <2> <InputImageFileName> <Options>\n";
		std::cout<<"Get Statistics of the image: SomaExtraction <3> <InputImageFileName> \n";
		std::cout<<"Normalize intensity: SomaExtraction <4> <InputImageFileName> <Gaussian Blur Sigma> <Global Median> <ratio threshold, if below, autothresholding to keep background>\n";
		return 0;
	}

	SomaExtractor *Somas = new SomaExtractor();	
	if( atoi(argv[1]) == 0)   /// debris accumulation  
	{
		std::cout<< "Reading Montage1"<<std::endl;
		SomaExtractor::OutputImageType::Pointer inputImage = Somas->Read8BitImage(argv[2]);
		std::cout<< "Reading Montage2"<<std::endl;
		SomaExtractor::OutputImageType::Pointer debrisImage = Somas->Read8BitImage(argv[3]);
		std::vector< itk::Index<3> > somaSeeds;
		std::vector< itk::Index<3> > debrisSeeds;

		Somas->ReadSeedpoints( argv[4], somaSeeds, 0);
		std::cout<< somaSeeds.size()<<std::endl;
		std::cout<< "Generating Debris Centroids..."<<std::endl;
		Somas->GetDebrisCentroids( debrisImage, debrisSeeds);
		//Somas->ReadSeedpoints( argv[3], debrisSeeds, 0);
		std::cout<< debrisSeeds.size()<<std::endl;
		std::cout<< "Associating Debris with Nucleus..."<<std::endl;
		Somas->AssociateDebris(inputImage, somaSeeds, debrisSeeds);
	}
	else if( atoi(argv[1]) == 1) /// soma segmentation with initial contours
	{
		Somas->LoadOptions( argv[4]); // Load params
		
		std::cout << "Set input image" << std::endl;
		//SomaExtractor::ProbImageType::Pointer image = Somas->SetInputImageByPortion(argv[1]); // Load microglia image by portion
		SomaExtractor::ProbImageType::Pointer image = Somas->SetInputImage(argv[2]); // Load microglia image 16bit

		std::string InputFilename = std::string(argv[2]);
		std::string somaImageName = InputFilename;
		somaImageName.erase(somaImageName.length()-4,somaImageName.length());
		somaImageName.append("_soma.mhd");
		std::string somaCentroidName = InputFilename;
		somaCentroidName.erase(somaCentroidName.length()-4,somaCentroidName.length());
		somaCentroidName.append("_centroids.txt");
		std::string somaFeatureName = InputFilename;
		somaFeatureName.erase(somaFeatureName.length()-4,somaFeatureName.length());
		somaFeatureName.append("_soma_features.txt");

		std::cout << "Set initial contour" << std::endl;
		SomaExtractor::SegmentedImageType::Pointer initialContourImage = Somas->SetInitalContourImage(argv[3]); // Load labeled nucleus image
		
		clock_t SomaExtraction_start_time = clock();
		
		std::cout<< "Segmenting..."<<std::endl;

		std::vector< itk::Index<3> > seedVector;
		/// SegmentSoma2: GVF Active Contour
		SomaExtractor::SegmentedImageType::Pointer segImage = Somas->SegmentSomaUsingGradient(image, initialContourImage, seedVector);
		std::cout << "Total time for SomaExtraction is: " << (clock() - SomaExtraction_start_time) / (float) CLOCKS_PER_SEC << std::endl;

		/// Compute soma features and write new seeds back
		if( segImage)
		{
			std::cout<< "Writing "<< somaImageName<<std::endl;
			Somas->writeImage(somaImageName.c_str(), segImage);
			std::cout<< "Writing "<< somaCentroidName<<std::endl;
			Somas->writeCentroids( somaCentroidName.c_str() ,seedVector);
			vtkSmartPointer<vtkTable> table = Somas->ComputeSomaFeatures(segImage);
			std::cout<< "Writing "<< somaFeatureName<<std::endl;
			ftk::SaveTable(somaFeatureName.c_str(), table);
		}
	}
	else if( atoi(argv[1]) == 2)  /// soma segmentation without initial seeds
	{
		Somas->LoadOptions( argv[3]); // Load params
		std::cout << "Set input image" << std::endl;
		SomaExtractor::OutputImageType::Pointer image = Somas->Read16BitImage(argv[2]); // Load microglia image 16bit	
		
		std::string InputFilename = std::string(argv[2]);
		//std::string bit8FileName = InputFilename;
		//bit8FileName.erase(bit8FileName.length()-4,bit8FileName.length());
		//bit8FileName.append("_8bit.tif");
		//Somas->writeImage(bit8FileName.c_str(), image);
        
		std::string somaImageName = InputFilename;
		somaImageName.erase(somaImageName.length()-4,somaImageName.length());
		somaImageName.append("_soma.nrrd");
		std::string somaCentroidName = InputFilename;
		somaCentroidName.erase(somaCentroidName.length()-4,somaCentroidName.length());
		somaCentroidName.append("_centroids.txt");
		std::string somaFeatureName = InputFilename;
		somaFeatureName.erase(somaFeatureName.length()-4,somaFeatureName.length());
		somaFeatureName.append("_soma_features.txt");

		std::vector< itk::Index<3> > seedVector;
		SomaExtractor::ProbImageType::Pointer binImagePtr = Somas->GenerateSeedPoints(image, seedVector);
		//Somas->ReadSeedpoints(argv[3], seedVector, false);

		clock_t SomaExtraction_start_time = clock();
		
		std::cout<< "Segmenting..."<<std::endl;

		/// SegmentSoma1: Active Contour without GVF, eliminate small objects
		SomaExtractor::SegmentedImageType::Pointer segImage = Somas->SegmentSoma(seedVector, binImagePtr);
		std::cout << "Total time for SomaExtraction is: " << (clock() - SomaExtraction_start_time) / (float) CLOCKS_PER_SEC << std::endl;

		/// Compute soma features and write new seeds back
		if( segImage)
		{
			std::cout<< "Writing "<< somaImageName<<std::endl;
			Somas->writeImage(somaImageName.c_str(), segImage);
			std::cout<< "Writing "<< somaCentroidName<<std::endl;
			Somas->writeCentroids( somaCentroidName.c_str() ,seedVector);
			vtkSmartPointer<vtkTable> table = Somas->ComputeSomaFeatures(segImage);
			std::cout<< "Writing "<< somaFeatureName<<std::endl;
			ftk::SaveTable(somaFeatureName.c_str(), table);
		}
	} 
	else if( atoi(argv[1]) == 3 && argc == 3)   // print out mean and std and the ratio
	{
		std::string InputFilename = std::string(argv[2]);
		char * pch = argv[2];
		std::string str;
		str += "/";
		char * token1 = strtok(pch,"/");
		char * token2 = strtok(NULL,"/");
		while( token2 != NULL)
		{
			str += std::string(token1) + "/";
			token1 = token2;
			token2 = strtok(NULL,"/");
		}
		str += "statistics.txt";
		std::cout<<str<<std::endl;
		SomaExtractor::ProbImageType::Pointer image = Somas->SetInputImage(InputFilename.c_str()); 
		Somas->CaculateMeanStd(str, image);
	}
	else if( atoi(argv[1]) == 4 && argc == 6)  /// normalize the intensity: get background image
	{
		std::string InputFilename = std::string(argv[2]);
		SomaExtractor::ProbImageType::Pointer image = Somas->SetInputImage(InputFilename.c_str()); 
		SomaExtractor::ProbImageType2D::Pointer backgroundImage = Somas->GetBackgroundImageByFirstSlice(image, atof(argv[3]));
		std::string imageName = InputFilename;
		imageName.erase(imageName.length()-7,imageName.length());
		imageName.append("Ndsu.TIF");
		SomaExtractor::UShortImageType::Pointer rescaledImage = Somas->DevideAndScale(image, backgroundImage, atof(argv[4]), atof(argv[5]));
		Somas->writeImage(imageName.c_str(), rescaledImage);
	}
	else if( atoi( argv[1]) == 5)
	{
		SomaExtractor::ProbImageType::Pointer inputImage = Somas->SetInputImage8bit(argv[2]);
		//std::vector< itk::Index<3> > seedVector;
		//Somas->ReadSeedpoints(argv[3], seedVector, true);
		Somas->LoadOptions( argv[4]); // Load params

		vnl_vector<int> seperator(4);

		seperator[0] = 16;
		seperator[1] = 23;
		seperator[2] = 30;
		seperator[3] = 49;
		vnl_vector<double> curvature(4);
		curvature[0] = 0.5;
		curvature[1] = 0.45;
		curvature[2] = 0.4;
		curvature[3] = 0.45;
		SomaExtractor::SegmentedImageType::Pointer segImage = Somas->SegmentHeart(argv[2], argv[3], inputImage, seperator, curvature);
		if( segImage)
		{
			std::string inputName = std::string(argv[2]);
			std::string somaImageName = inputName;
			somaImageName.erase(somaImageName.length()-4,somaImageName.length());
			somaImageName.append("_seg.mhd");
			Somas->writeImage(somaImageName.c_str(), segImage);

			//Somas->writeCentroids( argv[3],seedVector);
			inputName.erase(inputName.length()-4,inputName.length());
			inputName.erase(inputName.begin());
			int id = atoi(inputName.c_str());
			vtkSmartPointer<vtkTable> table = Somas->ComputeHeartFeatures(segImage);
			ftk::SaveTableAppend("features.txt", table, id);
		}
	}
	//else if( atoi(argv[1]) == 5 && argc == 5)  /// normalize by the input background
	//{
	//	std::string InputFilename = std::string(argv[2]);
	//	SomaExtractor::ProbImageType::Pointer image = Somas->SetInputImage(InputFilename.c_str()); 
	//	SomaExtractor::ProbImageType2D::Pointer backgroundImage = Somas->SetInputImageFloat2D(argv[3]);
	//	SomaExtractor::UShortImageType::Pointer rescaledImage = Somas->DevideAndScaleToOriginalMean(image, backgroundImage, atoi(argv[4]));
	//	
	//	std::string imageName = InputFilename;
	//	imageName.erase(imageName.length()-4,imageName.length());
	//	imageName.append("_normalize.tif");
	//	Somas->writeImage(imageName.c_str(), rescaledImage);
	//}
	//else if( atoi(argv[1]) == 6 && argc == 5)  /// normalize the intensity: get background image
	//{
	//	std::string InputFilename = std::string(argv[2]);
	//	//SomaExtractor::ProbImageType::Pointer image = Somas->SetInputImage(argv[2]); 
	//	//SomaExtractor::ProbImageType2D::Pointer backgroundImage = Somas->GetBackgroundImage(image, atof(argv[3]));
	//	SomaExtractor::ProbImageType2D::Pointer backModel = Somas->SetInputImage2D(argv[2]);
	//	SomaExtractor::ProbImageType2D::Pointer backimage = Somas->SetInputImage2D(argv[3]);

	//	//std::string imageName = InputFilename;
	//	//imageName.erase(imageName.length()-4,imageName.length());
	//	//imageName.append("_background.nrrd");
	//	//Somas->WriteFloat2DImage(imageName.c_str(), backgroundImage);

	//	//std::cout<<"Read background image"<<std::endl;
	//	//
	//	//std::cout<<"NormalizeUsingBackgroundImage"<<std::endl;
	//	Somas->NormalizeUsingBackgroundImage(backModel, backimage, atof(argv[4]));
	//	/*std::string imageName = InputFilename;
	//	imageName.erase(imageName.length()-4,imageName.length());
	//	imageName.append("_normalize.tif");
	//	SomaExtractor::UShortImageType::Pointer normalizedImage = Somas->NormalizeUsingBackgroundImage(image, backgroundImage);
	//	Somas->writeImage(imageName.c_str(), normalizedImage);*/
	//}
	//else if( atoi(argv[1]) == 7 && argc == 7)   // get seed coordinates 
	//{
	//	std::vector< itk::Index<3> > seedVector;
	//	Somas->ReadSeedpoints(argv[2], seedVector, false);
	//	std::cout<< "Original Seed Size: "<<seedVector.size()<<std::endl;

	//	std::string InputFilename = std::string(argv[2]);
	//	std::string outputFilename = InputFilename;
	//	outputFilename.erase(outputFilename.length()-4,outputFilename.length());
	//	outputFilename.append("_crop.txt");
	//	std::vector< itk::Index<3> > seedInRegion;
	//	Somas->GetSeedpointsInRegion(seedVector, seedInRegion, atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]));  
	//	std::cout<< "Seed Size in region: "<<seedInRegion.size()<<std::endl;
	//	Somas->writeCentroids(outputFilename.c_str(), seedInRegion);
	//}

	delete Somas;
	return 0;
}
Пример #9
0
int main (int argc, char** argv)
{

	/* DOWNSAMPLING ********************************************************************************************************************/
	std::ofstream output_file("properties.txt");
	std::ofstream curvature("curvature.txt");
	std::ofstream normals_text("normals.txt");

	/*

	std::cerr << "PointCloud before filtering: " << cloud->width * cloud->height 
		<< " data points (" << pcl::getFieldsList (*cloud) << ")." << std::endl;

	// Create the filtering object
	pcl::VoxelGrid<pcl::PCLPointCloud2> sor;
	sor.setInputCloud (cloud);
	sor.setLeafSize (0.01f, 0.01f, 0.01f);
	sor.filter (*cloud_filtered);

	output_file << "Number of filtered points" << std::endl;
	output_file << cloud_filtered->width * cloud_filtered->height<<std::endl;

	std::cerr << "PointCloud after filtering: " << cloud_filtered->width * cloud_filtered->height 
		<< " data points (" << pcl::getFieldsList (*cloud_filtered) << ")." << std::endl;


	pcl::PointCloud<pcl::PointXYZ>::Ptr descriptor (new pcl::PointCloud<pcl::PointXYZ>);

	descriptor->width = 5000 ;
	descriptor->height = 1 ;
	descriptor->points.resize (descriptor->width * descriptor->height) ;
	std::cerr << descriptor->points.size() << std::endl ;

	pcl::PCDWriter writer;
	writer.write ("out.pcd", *cloud_filtered, Eigen::Vector4f::Zero (), Eigen::Quaternionf::Identity (), false);

	*/
		
	/***********************************************************************************************************************************/

	/* CALCULATING VOLUME AND SURFACE AREA ********************************************************************************************************************/		

	/*pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_hull (new pcl::PointCloud<pcl::PointXYZ>);
	  pcl::ConcaveHull<pcl::PointXYZ> chull;
	  chull.setInputCloud(test);

	  chull.reconstruct(*cloud_hull);*/

	/*for (size_t i=0; i < test->points.size (); i++)
	  {
	  std::cout<< test->points[i].x <<std::endl;
	  std::cout<< test->points[i].y <<std::endl;
	  std::cout<< test->points[i].z <<std::endl;
	  }*/
	//std::cout<<data.size()<<std::endl;

	// Load input file into a PointCloud<T> with an appropriate type
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
	pcl::PCLPointCloud2 cloud_blob;
	pcl::io::loadPCDFile ("mini_soccer_ball_downsampled.pcd", cloud_blob);
	pcl::fromPCLPointCloud2 (cloud_blob, *cloud);		
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud1 (new pcl::PointCloud<pcl::PointXYZ>);
	pcl::fromPCLPointCloud2 (cloud_blob, *cloud1);
		//* the data should be available in cloud

	// Normal estimation*
	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> n;
	pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
	pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
	tree->setInputCloud (cloud);
	n.setInputCloud (cloud);
	n.setSearchMethod (tree);
	n.setKSearch (20);
	n.compute (*normals);
	//* normals should not contain the point normals + surface curvatures

	// Concatenate the XYZ and normal fields*
	pcl::PointCloud<pcl::PointNormal>::Ptr cloud_with_normals (new pcl::PointCloud<pcl::PointNormal>);
	pcl::concatenateFields (*cloud, *normals,*cloud_with_normals);
	//* cloud_with_normals = cloud + normals

	// Create search tree*
	pcl::search::KdTree<pcl::PointNormal>::Ptr tree2 (new pcl::search::KdTree<pcl::PointNormal>);
	tree2->setInputCloud (cloud_with_normals);

	// Initialize objects
	pcl::GreedyProjectionTriangulation<pcl::PointNormal> gp3;
	pcl::PolygonMesh triangles;

	// Set the maximum distance between connected points (maximum edge length)
	gp3.setSearchRadius (0.025);

	// Set typical values for the parameters
	gp3.setMu (2.5);
	gp3.setMaximumNearestNeighbors (100);
	gp3.setMaximumSurfaceAngle(M_PI/4); // 45 degrees
	gp3.setMinimumAngle(M_PI/18); // 10 degrees
	gp3.setMaximumAngle(2*M_PI/3); // 120 degrees
	gp3.setNormalConsistency(false);


	// Get result
	gp3.setInputCloud (cloud_with_normals);
	gp3.setSearchMethod (tree2);
	gp3.reconstruct (triangles);

	// Additional vertex information
	std::vector<int> parts = gp3.getPartIDs();
	std::vector<int> states = gp3.getPointStates();	

	pcl::PolygonMesh::Ptr mesh(&triangles);
	pcl::PointCloud<pcl::PointXYZ>::Ptr triangle_cloud(new pcl::PointCloud<pcl::PointXYZ>);
	pcl::fromPCLPointCloud2(mesh->cloud, *triangle_cloud);	
/*	for(int i = 0; i < 2; i++){
		std::cout<<triangles.polygons[i]<<std::endl;
	}
*/	std::cout<<"first vertice "<<triangles.polygons[0].vertices[0] << std::endl; 	
	
	//std::cout<<"Prolly not gonna work "<<triangle_cloud->points[triangles.polygons[0].vertices[0]] << std::endl; 	


	//pcl::fromPCLPointCloud2(triangles.cloud, triangle_cloud); 
	std::cout << "size of points " << triangle_cloud->points.size() << std::endl ;
	
	std::cout<<triangle_cloud->points[0]<<std::endl;
	for(unsigned i = 0; i < triangle_cloud->points.size(); i++){
		std::cout << triangles.polgyons[i].getVector3fMap() <<"test"<< std::endl;
	} 	
	//std::cout<<"surface: "<<calculateAreaPolygon(triangles, triangle_cloud)<<std::endl;

	/******************************************************************************************************************************************/	

	/* CALCULATING CURVATURE AND NORMALS ********************************************************************************************************************/

	// Create the normal estimation class, and pass the input dataset to it
	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;	

	ne.setInputCloud (cloud);

	// Create an empty kdtree representation, and pass it to the normal estimation object.
	// Its content will be filled inside the object, based on the given input dataset (as no other search surface is given).
	pcl::search::KdTree<pcl::PointXYZ>::Ptr tree3 (new pcl::search::KdTree<pcl::PointXYZ> ());

	ne.setSearchMethod (tree3);

	// Output datasets
	pcl::PointCloud<pcl::Normal>::Ptr cloud_normals (new pcl::PointCloud<pcl::Normal>);

	// Use all neighbors in a sphere of radius 3cm
	ne.setRadiusSearch (0.03);

	// Compute the features
	ne.compute (*cloud_normals);

	output_file << "size of points " << cloud->points.size() << std::endl ;

	output_file << "size of the normals " << cloud_normals->points.size() << std::endl ; 	

	//pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr (new pcl::PointCloud<pcl::PointXYZRGB>);

	/************************************************************************************************************************************/

	/* PARSING DATA ********************************************************************************************************************/

	int k=0 ;
	float dist ;
	float square_of_dist ;
	float x1,y1,z1,x2,y2,z2 ;
	float nu[3], nv[3], pv_pu[3], pu_pv[3] ;
	float highest = triangle_cloud->points[0].z;
	float lowest = triangle_cloud->points[0].z;


	for ( int i = 0; i < cloud_normals->points.size() ; i++)
	{
		output_file<<i<<": triangulated "<<triangle_cloud->points[i].x<<", "<<triangle_cloud->points[i].y<<", "<<triangle_cloud->points[i].z<<std::endl;
		output_file<<i<<": normal"<<cloud1->points[i].x<<", "<<cloud1->points[i].y<<", "<<cloud1->points[i].z<<std::endl;
		if(triangle_cloud->points[i].z > highest)
		{
			highest = triangle_cloud->points[i].z;
		}
		if(triangle_cloud->points[i].z < lowest)
		{
			lowest = triangle_cloud->points[i].z;
		}
		normals_text <<i+1 <<": "<<" x-normal-> "<<cloud_normals->points[i].normal_x<<" y-normal-> "<<cloud_normals->points[i].normal_y<<" z-normal-> "<<cloud_normals->points[i].normal_z<<std::endl;
		curvature <<i+1 <<": curvature: "<<cloud_normals->points[i].curvature<<std::endl;
		
		float x, y, z, dist, nx, ny, nz, ndist;
		
		/*

		if(i != cloud_normals->points.size()-1){
			x = cloud->points[i+1].x - cloud->points[i].x;
			y = cloud->points[i+1].y - cloud->points[i].y;
			z = cloud->points[i+1].z - cloud->points[i].z;
			dist = sqrt(pow(x, 2)+pow(y, 2) + pow(z, 2));
			output_file << i+1 <<" -> "<< i+2 << " distance normal: " << dist <<std::endl;
			
			nx = triangle_cloud[i+1].indices[0] - triangle_cloud.points[i].x;
			ny = triangle_cloud.points[i+1].y - triangle_cloud.points[i].y;
			nz = triangle_cloud.points[i+1].z - triangle_cloud.points[i].z;
			ndist = sqrt(pow(nx, 2)+pow(ny, 2) + pow(nz, 2));
			output_file << i+1 <<" -> "<< i+2 << " distance triangulated: " << ndist <<std::endl;
		}

		*/	
		/*
		   pcl::PointXYZRGB point;
		   point.x = cloud_filtered_converted->points[i].x;
		   point.y = cloud_filtered_converted->points[i].y;
		   point.z = cloud_filtered_converted->points[i].z;
		   point.r = 0;
		   point.g = 100;
		   point.b = 200;
		   point_cloud_ptr->points.push_back (point);
		 */
	}
	output_file << "highest point: "<< highest<<std::endl;
	output_file << "lowest point: "<< lowest<<std::endl;	
	//pcl::PointCloud<pcl::PointXYZ>::Ptr test (new pcl::PointCloud<pcl::PointXYZ>);
	//float surface_area = calculateAreaPolygon(test);
	//std::cout<< surface_area<<std::endl;

	/*

	   descriptor->width = k ;
	   descriptor->height = 1 ;
	   descriptor->points.resize (descriptor->width * descriptor->height) ;
	   std::cerr << descriptor->points.size() << std::endl ;
	   float voxelSize = 0.01f ;  // how to find appropriate voxel resolution
	   pcl::octree::OctreePointCloud<pcl::PointXYZ> octree (voxelSize);
	   octree.setInputCloud(descriptor) ;
	   ctree.defineBoundingBox(0.0,0.0,0.0,3.14,3.14,3.14) ;   //octree.defineBoundingBox (minX, minY, minZ, maxX, maxY, maxZ)
	   octree.addPointsFromInputCloud ();   // octree created for block

	   int k_ball=0 ;
	   float dist_ball ;
	   float square_of_dist_ball ;
	   double X,Y,Z ;
	   bool occupied ;
	   highest = cloud_ball->points[0].z;

	   for ( int i = 0; i < cloud_normals_ball->points.size() ; i++)
	   {
	   if(cloud->points[i].z > highest){
	   highest = cloud_ball->points[i].z;
	   }
	   for (int j = i+1 ; (j < cloud_normals_ball->points.size()) ; j++)     
	   {
	   x1 = cloud_ball->points[i].x ;
	   y1 = cloud_ball->points[i].y ;
	   z1 = cloud_ball->points[i].z ;
	   nu[0] = cloud_normals_ball->points[i].normal_x ;
	   nu[1] = cloud_normals_ball->points[i].normal_y ;
	   nu[2] = cloud_normals_ball->points[i].normal_z ;
	   x2 = cloud_ball->points[j].x ;
	   y2 = cloud_ball->points[j].y ;
	   z2 = cloud_ball->points[j].z ;
	   nv[0] = cloud_normals_ball->points[j].normal_x ;
	   nv[1] = cloud_normals_ball->points[j].normal_y ;
	   nv[2] = cloud_normals_ball->points[j].normal_z ;
	   square_of_dist = ((x2-x1)*(x2-x1)) + ((y2-y1)*(y2-y1)) + ((z2-z1)*(z2-z1)) ;
	   dist = sqrt(square_of_dist) ;
	//std::cerr << dist ;
	pv_pu[0] = x2-x1 ;
	pv_pu[1] = y2-y1 ;
	pv_pu[2] = z2-z1 ;
	pu_pv[0] = x1-x2 ;
	pu_pv[1] = y1-y2 ;
	pu_pv[2] = z1-z2 ;
	if ((dist > 0.0099) && (dist < 0.0101))
	{
	X = angle_between_vectors (nu, nv) ;
	Y  = angle_between_vectors (nu, pv_pu) ;
	Z = angle_between_vectors (nv, pu_pv) ;
	// output_file << descriptor->points[k].x << "\t" << descriptor->points[k].y << "\t" << descriptor->points[k].z  ;
	// output_file << "\n";	
	//k_ball = k_ball + 1 ;
	occupied = octree.isVoxelOccupiedAtPoint (X,Y,Z) ;
	if (occupied == 1)
	{
	//k_ball = k_ball + 1 ;
	std::cerr << "Objects Matched" << "\t" << k_ball << std::endl ;
	return(0); 
	}

	}

	}

	}	

	 */

	/***********************************************************************************************************************************/


	/*

	   points.open("secondItemPoints.txt");
	   myfile<<"Second point \n";
	   myfile<<"second highest "<<highest;
	   for(int i = 0; i < cloud_normals->points.size(); i++){
	   points<<cloud->points[i].x<<", "<<cloud->points[i].y<<", "<<cloud->points[i].z<<"\n";
	   if(cloud->points[i].z >= highest - (highest/100)){
	   myfile<<cloud->points[i].x<<", "<<cloud->points[i].y<<", "<<cloud->points[i].z<<"\n";
	   }
	   }
	   points.close();
	   myfile.close();

	   std::cerr << "Objects Not Matched" << "\t" << k_ball << std::endl ;

	 */

	//output_file <<"Volume: "<<volume <<std::endl;
	//output_file <<"Surface Area: "<<surface_area <<std::endl;

	return (0);
}
Пример #10
0
		inline bool operator!=(const State& b) const    {
            return !(xCordinate_==b.x() && yCordinate_==b.y() && theta_==b.theta() && curvature_==curvature());
        }
// -----------------------------------------------------------------------------
//
// -----------------------------------------------------------------------------
void FeatureFaceCurvatureFilter::execute()
{
  setErrorCondition(0);
  dataCheck();
  if(getErrorCondition() < 0) { return; }

  DataContainer::Pointer sm = getDataContainerArray()->getDataContainer(getSurfaceMeshFaceLabelsArrayPath().getDataContainerName());

  // Get our Reference counted Array of Face Structures
  TriangleGeom::Pointer triangleGeom = sm->getGeometryAs<TriangleGeom>();

  // Just to double check we have everything.
  int64_t numTriangles = triangleGeom->getNumberOfTris();

  // Make sure the Face Connectivity is created because the FindNRing algorithm needs this and will
  // assert if the data is NOT in the SurfaceMesh Data Container
  ElementDynamicList::Pointer vertLinks = triangleGeom->getElementsContainingVert();
  if (NULL == vertLinks.get())
  {
    triangleGeom->findElementsContainingVert();
  }

  // get the QMap from the SharedFeatureFaces filter
  SharedFeatureFaces_t sharedFeatureFaces;

  int32_t maxFaceId = 0;
  for (int64_t t = 0; t < numTriangles; ++t)
  {
    if (m_SurfaceMeshFeatureFaceIds[t] > maxFaceId) { maxFaceId = m_SurfaceMeshFeatureFaceIds[t]; }
  }
  std::vector<int32_t> faceSizes(maxFaceId + 1, 0);
  // Loop through all the Triangles and assign each one to a unique Feature Face Id.
  for (int64_t t = 0; t < numTriangles; ++t)
  {
    faceSizes[m_SurfaceMeshFeatureFaceIds[t]]++;
  }

  // Allocate all the vectors that we need
  for (size_t iter = 0; iter < faceSizes.size(); ++iter)
  {
    FaceIds_t v;
    v.reserve(faceSizes[iter]);
    sharedFeatureFaces[iter] = v;
  }

  // Loop through all the Triangles and assign each one to a unique Feature Face Id.
  for(int64_t t = 0; t < numTriangles; ++t)
  {
    sharedFeatureFaces[m_SurfaceMeshFeatureFaceIds[t]].push_back(t);
  }

  m_TotalFeatureFaces = sharedFeatureFaces.size();
  m_CompletedFeatureFaces = 0;

#ifdef SIMPLib_USE_PARALLEL_ALGORITHMS
  tbb::task_scheduler_init init;
  bool doParallel = true;
#endif


#ifdef SIMPLib_USE_PARALLEL_ALGORITHMS
  tbb::task_group* g = new tbb::task_group;
#else

#endif
  // typedef here for conveneince
  typedef SharedFeatureFaces_t::iterator SharedFeatureFaceIterator_t;

  for(SharedFeatureFaceIterator_t iter = sharedFeatureFaces.begin(); iter != sharedFeatureFaces.end(); ++iter)
  {
    QString ss = QObject::tr("Working on Face Id %1/%2").arg((*iter).first).arg(maxFaceId);
    notifyStatusMessage(getMessagePrefix(), getHumanLabel(), ss);

    FaceIds_t& triangleIds = (*iter).second;
#ifdef SIMPLib_USE_PARALLEL_ALGORITHMS
    if (doParallel == true)
    {
      g->run(CalculateTriangleGroupCurvatures(m_NRing, triangleIds, m_UseNormalsForCurveFitting,
                                              m_SurfaceMeshPrincipalCurvature1sPtr.lock(), m_SurfaceMeshPrincipalCurvature2sPtr.lock(),
                                              m_SurfaceMeshPrincipalDirection1sPtr.lock(), m_SurfaceMeshPrincipalDirection2sPtr.lock(),
                                              m_SurfaceMeshGaussianCurvaturesPtr.lock(), m_SurfaceMeshMeanCurvaturesPtr.lock(), triangleGeom,
                                              m_SurfaceMeshFaceLabelsPtr.lock(),
                                              m_SurfaceMeshFaceNormalsPtr.lock(),
                                              m_SurfaceMeshTriangleCentroidsPtr.lock(),
                                              this ) );
    }
    else
#endif
    {
      CalculateTriangleGroupCurvatures curvature(m_NRing, triangleIds, m_UseNormalsForCurveFitting,
                                                 m_SurfaceMeshPrincipalCurvature1sPtr.lock(), m_SurfaceMeshPrincipalCurvature2sPtr.lock(),
                                                 m_SurfaceMeshPrincipalDirection1sPtr.lock(), m_SurfaceMeshPrincipalDirection2sPtr.lock(),
                                                 m_SurfaceMeshGaussianCurvaturesPtr.lock(), m_SurfaceMeshMeanCurvaturesPtr.lock(), triangleGeom,
                                                 m_SurfaceMeshFaceLabelsPtr.lock(),
                                                 m_SurfaceMeshFaceNormalsPtr.lock(),
                                                 m_SurfaceMeshTriangleCentroidsPtr.lock(),
                                                 this );
      curvature();
    }
  }
  // *********************** END END END END END END  ********************************************************************

#ifdef SIMPLib_USE_PARALLEL_ALGORITHMS
  g->wait(); // Wait for all the threads to complete before moving on.
  delete g;
#endif

  /* Let the GUI know we are done with this filter */
  notifyStatusMessage(getHumanLabel(), "Complete");
}
Пример #12
0
int
main(int argc,
     char ** argv)
{
/// @section csv_segmentation Chan-Sandberg-Vese segmentation
///
/// @subsection csv_theory Theory
/// Since the routine contains too many free parameters which makes it unreasonable to place it into a separate
/// function, all the code is kept in main(). Here's a rough explanation of what's Chan-Sandberg-Vese all about,
/// which is based on paper @cite Getreuer2012.
///
/// The Chan-Vese method seeks a contour @f$\mathcal{C}@f$ which minimizes the functional
/// @f[
///    \mathcal{F}[I;\,\mathcal{C},\,c_{1},\,c_{2}]=
///        \mu\mathrm{Length}(\mathcal{C})+
///        \nu\mathrm{Area}(\mathcal{C})+
///        \lambda_{1}\int_{\mathcal{C}}|I-c_{1}|^{2}\,\mathrm{d}x\mathrm{d}y+
///        \lambda_{2}\int_{\Omega\setminus\mathcal{C}}|I-c_{2}|^{2}\,\mathrm{d}x\mathrm{d}y\,,
/// @f]
/// where
///    - the single-channel image @f$I=I(x,\,y)@f$ is defined on the region @f$\Omega=[0,\,a]\times[0,\,b]@f$;
///         - regions in the integral limits, @f$\mathcal{C}@f$ and @f$\Omega\setminus\mathcal{C}@f$,
///           denote the region enclosed by the contour and the region outside the contour, respectively;
///    -  @f$\mu(=0.5)@f$, @f$\nu(=0)@f$, @f$\lambda_{1}(=1)@f$ and @f$\lambda_{2}(=1)@f$ are free parameters,
///       whereby only @f$\nu@f$ can be negative (default values in parentheses);
///    - @f$c_{1}@f$ and @f$c_{2}@f$ are constants that depend on the information of the regions enclosed by and
///      outside of the contour.
///
/// Instead of dealing with @f$\mathcal{C}@f$ explicitly, it's custom to define a level set function @f$u(x,\,y;\,t)@f$
/// so that its zero-level iso-surface (also: zero level set) coincides with the contour:
/// @f$\mathcal{C}=\{\Omega\ni(x,\,y)\,:\,u(x,\,y;\,t)=0\forall t\}@f$. This in turn leads us to a new definition
/// of the functional:
/// @f[
///      \mathcal{F}[I;\,u,\,c_{1},\,c_{2}] =
///             \mu\left(\int_{\Omega}|\nabla H(u)|\,\mathrm{d}x\mathrm{d}y\right)^{p}+
///             \nu\int_{\Omega}H(u)\,\mathrm{d}x\mathrm{d}y+
///             \lambda_{1}\int_{\Omega}|I-c_{1}|^{2}H(u)\,\mathrm{d}x\mathrm{d}y+
///             \lambda_{2}\int_{\Omega}|I-c_{2}|^{2}(1-H(u))\,\mathrm{d}x\mathrm{d}y\,.
/// @f]
/// In our implementation we've picked @f$p=1@f$, so that the first integral reduces to
/// @f[
///      \left.\mu\left(\int_{\Omega}|\nabla H(u)|\,\mathrm{d}x\mathrm{d}y\right)^{p}\right|_{p=1}=
///       \mu\int_{\Omega}\delta(u)|\nabla u|\,\mathrm{d}x\mathrm{d}y\,,
/// @f]
/// where @f$H(x)@f$ and @f$\delta(x)=H'(x)@f$ are Heaviside's step and Dirac's delta functions.
/// In this prescription @f$c_{1}@f$ and @f$c_{2}@f$ are now region averages and take the following form:
/// @f[
///      c_{1}=\frac{\int_{\Omega}IH(u)\mathrm{d}x\mathrm{d}y}{\int_{\Omega}H(u)\,\mathrm{d}x\mathrm{d}y}\,,\quad
///      c_{2}=\frac{\int_{\Omega}I(1-H(u))\mathrm{d}x\mathrm{d}y}{\int_{\Omega}(1 - H(u))\,\mathrm{d}x\mathrm{d}y}\,.
/// @f]
/// For practical reasons the functions are replaced by smooth/regularized versions (see regularized_heaviside() and
/// regularized_delta()):
/// @f[
///      H_{\epsilon}(x)=\frac{1}{2}\left[1+\frac{2}{\pi}\arctan\left(\frac{x}{\epsilon}\right)\right]\,,\quad
///      \delta_{\epsilon}(x)=\frac{\epsilon}{\pi\left(\epsilon^{2}+x^{2}\right)}\,,
/// @f]
/// with @f$\epsilon=1@f$ by default.
/// The interpretation of the functional @f$\mathcal{F}@f$ is the following:
///     - the first term penalizes the length of @f$\mathcal{C}@f$;
///     - the second term penalizes the area enclosed by the curve;
///     - the 3rd and 4th term penalize region averages inside and outside of the contour; in other words
///       it keeps track of the discrepancy between the two regions.
///
/// A stationary solution to @f$\mathcal{F}@f$, or equivalently the equation of motion (e.o.m) for the contour,
/// can be found by solving it with Euler-Lagrange equation, which results in
/// @f[
///    u_{t} = \delta_{\epsilon}(u)\left[\mu\kappa-\nu-\lambda_{1}(I-c_{1})^{2}+\lambda_{2}(I-c_{2})^{2}\right]\,,
/// @f]
/// where @f$\kappa=\nabla\cdot\left(\frac{\nabla u}{|\nabla u|}\right)@f$ is curvature of @f$u@f$.
///
/// If the (still 2D) image has @f$I@f$ has @f$N@f$ channels @f$\{I_{i}(x,\,y)\}_{i=1}^{N}@f$, there should still
/// be a single level set @f$u@f$, which leads us the following functional:
/// @f[
///    \mathcal{F}[I;\,u,\,\mathbf{c_{1}},\,\mathbf{c}_{2}]=
///      \mu\int_{\Omega}|\nabla H(u)|\mathrm{d}x\mathrm{d}y+
///      \nu\int_{\Omega}H(u)\mathrm{d}x\mathrm{d}y+
///      \int_{\Omega}\frac{1}{N}\sum_{i=1}^{N}\lambda_{1}^{(i)}|I_{i}-c_{1}^{(i)}|^{2}H(u)\mathrm{d}x\mathrm{d}y+
///      \int_{\Omega}\frac{1}{N}\sum_{i=1}^{N}\lambda_{2}^{(i)}|I_{i}-c_{2}^{(i)}|^{2}(1-H(u))\mathrm{d}x\mathrm{d}y\,.
/// @f]
/// Variables @f$\{c_{1}^{(i)},\,c_{2}^{(i)}\}_{i=1}^{N}@f$ retain their original meaning,
/// @f[
///     c_{1}^{(i)}=\frac{\int_{\Omega}I_{i}H(u)\mathrm{d}x\mathrm{d}y}{\int_{\Omega}H(u)\mathrm{d}x\mathrm{d}y}\,,\quad
///     c_{2}^{(i)}=\frac{\int_{\Omega}I_{i}(1-H(u))\mathrm{d}x\mathrm{d}y}{\int_{\Omega}(1-H(u))\mathrm{d}x\mathrm{d}y}
///     \quad\forall i=\{1,\,\ldots,\,N\}\,;
/// @f]
/// the constants @f$\{\lambda_{1}^{(i)},\,\lambda_{2}^{(i)}\}_{i=1}^{N}@f$ are defined for each channel separately.
/// This implementation consider only grayscale (@f$N=1@f$) and RGB (@f$N=3@f$) images, for which @f$\lambda_{i}=1@f$
/// by default for any @f$i@f$-th channel.
/// The corresponding e.o.m reads
/// @f[
///      u_{t}=\delta_{\epsilon}(u)\left[\mu\kappa-\nu-
///            \frac{1}{N}\sum_{i=1}^{N}\lambda_{1}^{(i)}\left(I_{i}-c_{1}^{(i)}\right)^{2}+
///            \frac{1}{N}\sum_{i=1}^{N}\lambda_{2}^{(i)}\left(I_{i}-c_{2}^{(i)}\right)^{2}\right]\,.
/// @f]
///
/// @subsection csv_numsch Numerical scheme
///
/// Finite difference expression for the curvature @f$\kappa@f$ is explained in curvature(). The advantage of this scheme
/// is that we only need nearest neighbouring points at current point while keeping the derivative centered at current point,
/// whereas naive implementation would use more distant points. Since we're dealing with a finite domain and therefore
/// boundaries, we don't have to "extend" the region by two pixels each direction. Instead, we just duplicate border pixels.
///
/// Rest of the calculation is actually quite straightforward -- the zero level set is iteratively updated with
/// @f[
///      u_{i,j}^{n+1}=u_{i,j}^{n}+\mathrm{d}t\;\delta_{\epsilon}(u_{i,j}^{n})\left[\kappa_{i,j}^{n}-\nu-
///      \frac{1}{N}\sum_{k=1}^N\lambda_{1}^{(k)}\left(I_{i,j}-c_{1}^{n,(k)}\right)+
///      \frac{1}{N}\sum_{k=1}^N\lambda_{2}^{(k)}\left(I_{i,j}-c_{2}^{n,(k)}\right)\right]\,.
/// @f]
/// The method is inherently implicit and is implemented with ordinary matrix operations. The first term in the brackets
/// has already been discussed; the second term is trivial; the final two terms are explained in region_variance() and
/// variance_penalty().
///
/// There are various ways to initialize the level set, and since we're solving a differential equation, different initial
/// conditions lead to different outcome. The simplest way is to let the user draw either rectangular or circular contour.
/// The level set will be evaluated with @f$+1@f$'s inside the contour and with @f$-1@f$'s outside of it.
/// A more optimal (here the default) contour would be checkerboard
/// @f[
///      u(i,\,j;\,0)=\sin\left(\frac{\pi}{5}i\right)\sin\left(\frac{\pi}{5}j\right)\,,
/// @f]
/// because it converges faster to a solution (see levelset_checkerboard()). The solution is reached when the maximum number
/// of iterations, @f$T_\max@f$, is reached or when @f$||u_{i,j}^{n+1}-u_{i,j}^{n}||_{2}\leqslant\delta ||\bar{I}||_{2}@f$,
/// where the subscript denotes @f$L_{2}@f$-norm, @f$\delta=(10^{-3})@f$ is tolerance parameter and @f$\bar{I}@f$ is
/// the intensity average in the image (averaged across the channels).
///
/// @subsection csv_summary Summary
///
/// The main logic described above starts with a timestep loop (look for the comment below); everything else preciding
/// that is actually sugar coating just to make the program usable for anyone.
///
/// If it isn't clear from above text or the code below, here is the list of variables which the user can pass as an argument
/// (the default values in the parentheses): @f$\mu(=0.5)@f$, @f$\nu(=0)@f$, @f$\mathrm{d}t(=1)@f$,
/// @f$\lambda_{1}^{(i)}(=1)@f$ and @f$\lambda_{1}^{(i)}(=1)@f$ @f$\forall i=1\ldots N@f$, @f$\epsilon(=1)@f$,
/// @f$\delta(=10^{-3})@f$, @f$T_\max@f$(=INT_MAX), @f$N(=1\;\mbox{or}\;3)@f$ (number of channels).
///
/// Other general options include:
///    - object selection (-s) -- the region enclosed by the contour will be cut out and placed onto white canvas and saved;
///    - region inversion (-I) -- sometimes the ROI is inverted; there's an option to circumvent that (goes with -s option);
///    - video output (-V) -- see contour evolution in a video (*.avi, the same filename as the image; see VideoWriterManager);
///    - overlay text (-O) -- puts overlay text (timesteps) on the video (goes with the previous option);
///    - frame rate (-f) -- frame rate of the video;
///    - line color (-l) -- color of the contour line (see Colors);
///    - rectangular (-R) or circular (-C) contour -- lets the user draw it on the image (see InteractiveData and its subclasses);
///    - grayscale image (-g) -- sometimes we just want do perform it on a black-white image, but the original source is RGB.
///
/// For Perona-Malik-specific parameters @f$K@f$, @f$L@f$, @f$T@f$, see perona_malik().
///
/// @sa curvature, region_variance, variance_penalty, levelset_checkerboard, VideoWriterManager, InteractiveData, Colors, perona_malik

  double mu, nu, eps, tol, dt, fps, K, L, T;
  int max_steps;
  std::vector<double> lambda1,
                      lambda2;
  std::string input_filename,
              text_position,
              line_color_str;
  bool grayscale         = false,
       write_video       = false,
       overlay_text      = false,
       object_selection  = false,
       invert            = false,
       segment           = false,
       rectangle_contour = false,
       circle_contour    = false;
  ChanVese::TextPosition pos = ChanVese::TextPosition::TopLeft;
  cv::Scalar contour_color = ChanVese::Colors::blue;

//-- Parse command line arguments
//   Negative values in multitoken are not an issue, b/c it doesn't make much sense
//   to use negative values for lambda1 and lambda2
  try
  {
    namespace po = boost::program_options;
    po::options_description desc("Allowed options", get_terminal_width());
    desc.add_options()
      ("help,h",                                                                               "this message")
      ("input,i",            po::value<std::string>(&input_filename),                          "input image")
      ("mu",                 po::value<double>(&mu) -> default_value(0.5),                     "length penalty parameter (must be positive or zero)")
      ("nu",                 po::value<double>(&nu) -> default_value(0),                       "area penalty parameter")
      ("dt",                 po::value<double>(&dt) -> default_value(1),                       "timestep")
      ("lambda1",            po::value<std::vector<double>>(&lambda1) -> multitoken(),         "penalty of variance inside the contour (default: 1's)")
      ("lambda2",            po::value<std::vector<double>>(&lambda2) -> multitoken(),         "penalty of variance outside the contour (default: 1's)")
      ("epsilon,e",          po::value<double>(&eps) -> default_value(1),                      "smoothing parameter in Heaviside/delta")
      ("tolerance,t",        po::value<double>(&tol) -> default_value(0.001),                  "tolerance in stopping condition")
      ("max-steps,N",        po::value<int>(&max_steps) -> default_value(-1),                  "maximum nof iterations (negative means unlimited)")
      ("fps,f",              po::value<double>(&fps) -> default_value(10),                     "video fps")
      ("overlay-pos,P",      po::value<std::string>(&text_position) -> default_value("TL"),    "overlay tex position; allowed only: TL, BL, TR, BR")
      ("line-color,l",       po::value<std::string>(&line_color_str) -> default_value("blue"), "contour color (allowed only: black, white, R, G, B, Y, M, C")
      ("edge-coef,K",        po::value<double>(&K) -> default_value(10),                       "coefficient for enhancing edge detection in Perona-Malik")
      ("laplacian-coef,L",   po::value<double>(&L) -> default_value(0.25),                     "coefficient in the gradient FD scheme of Perona-Malik (must be [0, 1/4])")
      ("segment-time,T",     po::value<double>(&T) -> default_value(20),                       "number of smoothing steps in Perona-Malik")
      ("segment,S",          po::bool_switch(&segment),                                        "segment the image with Perona-Malik beforehand")
      ("grayscale,g",        po::bool_switch(&grayscale),                                      "read in as grayscale")
      ("video,V",            po::bool_switch(&write_video),                                    "enable video output (changes the extension to '.avi')")
      ("overlay-text,O",     po::bool_switch(&overlay_text),                                   "add overlay text")
      ("invert-selection,I", po::bool_switch(&invert),                                         "invert selected region (see: select)")
      ("select,s",           po::bool_switch(&object_selection),                               "separate the region encolosed by the contour (adds suffix '_selection')")
      ("rectangle,R",        po::bool_switch(&rectangle_contour),                              "select rectangular contour interactively")
      ("circle,C",           po::bool_switch(&circle_contour),                                 "select circular contour interactively")
    ;
    po::variables_map vm;
    po::store(po::command_line_parser(argc, argv).options(desc).run(), vm);
    po::notify(vm);

    if(vm.count("help"))
    {
      std::cout << desc << "\n";
      return EXIT_SUCCESS;
    }
    if(! vm.count("input"))
      msg_exit("Error: you have to specify input file name!");
    else if(vm.count("input") && ! boost::filesystem::exists(input_filename))
      msg_exit("Error: file \"" + input_filename + "\" does not exists!");
    if(vm.count("dt") && dt <= 0)
      msg_exit("Cannot have negative or zero timestep: " + std::to_string(dt) + ".");
    if(vm.count("mu") && mu < 0)
      msg_exit("Length penalty parameter cannot be negative: " + std::to_string(mu) + ".");
    if(vm.count("lambda1"))
    {
      if(grayscale && lambda1.size() != 1)
        msg_exit("Too many lambda1 values for a grayscale image.");
      else if(! grayscale && lambda1.size() != 3)
        msg_exit("Number of lambda1 values must be 3 for a colored input image.");
      else if(grayscale && lambda1[0] < 0)
        msg_exit("The value of lambda1 cannot be negative.");
      else if(! grayscale && (lambda1[0] < 0 || lambda1[1] < 0 || lambda1[2] < 0))
        msg_exit("Any value of lambda1 cannot be negative.");
    }
    else if(! vm.count("lambda1"))
    {
      if(grayscale) lambda1 = {1};
      else          lambda1 = {1, 1, 1};
    }
    if(vm.count("lambda2"))
    {
      if(grayscale && lambda2.size() != 1)
        msg_exit("Too many lambda2 values for a grayscale image.");
      else if(! grayscale && lambda2.size() != 3)
        msg_exit("Number of lambda2 values must be 3 for a colored input image.");
      else if(grayscale && lambda2[0] < 0)
        msg_exit("The value of lambda2 cannot be negative.");
      else if(! grayscale && (lambda2[0] < 0 || lambda2[1] < 0 || lambda2[2] < 0))
        msg_exit("Any value of lambda2 cannot be negative.");
    }
    else if(! vm.count("lambda2"))
    {
      if(grayscale) lambda2 = {1};
      else          lambda2 = {1, 1, 1};
    }
    if(vm.count("eps") && eps < 0)
      msg_exit("Cannot have negative smoothing parameter: " + std::to_string(eps) + ".");
    if(vm.count("tol") && tol < 0)
      msg_exit("Cannot have negative tolerance: " + std::to_string(tol) + ".");
    if(vm.count("overlay-pos"))
    {
      if     (boost::iequals(text_position, "TL")) pos = ChanVese::TextPosition::TopLeft;
      else if(boost::iequals(text_position, "BL")) pos = ChanVese::TextPosition::BottomLeft;
      else if(boost::iequals(text_position, "TR")) pos = ChanVese::TextPosition::TopRight;
      else if(boost::iequals(text_position, "BR")) pos = ChanVese::TextPosition::BottomRight;
      else
        msg_exit("Invalid text position requested.\n"\
                 "Correct values are: TL -- top left\n"\
                 "                    BL -- bottom left\n"\
                 "                    TR -- top right\n"\
                 "                    BR -- bottom right"\
                );
    }
    if(vm.count("line-color"))
    {
      if     (boost::iequals(line_color_str, "red"))     contour_color = ChanVese::Colors::red;
      else if(boost::iequals(line_color_str, "green"))   contour_color = ChanVese::Colors::green;
      else if(boost::iequals(line_color_str, "blue"))    contour_color = ChanVese::Colors::blue;
      else if(boost::iequals(line_color_str, "black"))   contour_color = ChanVese::Colors::black;
      else if(boost::iequals(line_color_str, "white"))   contour_color = ChanVese::Colors::white;
      else if(boost::iequals(line_color_str, "magenta")) contour_color = ChanVese::Colors::magenta;
      else if(boost::iequals(line_color_str, "yellow"))  contour_color = ChanVese::Colors::yellow;
      else if(boost::iequals(line_color_str, "cyan"))    contour_color = ChanVese::Colors::cyan;
      else
        msg_exit("Invalid contour color requested.\n"\
                 "Correct values are: red, green, blue, black, white, magenta, yellow, cyan.");
    }
    if(vm.count("laplacian-coef") && (L > 0.25 || L < 0))
      msg_exit("The Laplacian coefficient in Perona-Malik segmentation must be between 0 and 0.25.");
    if(vm.count("segment-time") && (T < L))
      msg_exit("The segmentation duration must exceed the value of Laplacian coefficient, " +
               std::to_string(L) + ".");
    if(rectangle_contour && circle_contour)
      msg_exit("Cannot initialize with both rectangular and circular contour");
  }
  catch(std::exception & e)
  {
    msg_exit("error: " + std::string(e.what()));
  }

//-- Read the image (grayscale or BGR? RGB? BGR? help)
  cv::Mat _img;
  if(grayscale) _img = cv::imread(input_filename, CV_LOAD_IMAGE_GRAYSCALE);
  else          _img = cv::imread(input_filename, CV_LOAD_IMAGE_COLOR);
  if(! _img.data)
    msg_exit("Error on opening \"" + input_filename + "\" (probably not an image)!");

//-- Second conversion needed since we want to display a colored contour on a grayscale image
  cv::Mat img;
  if(grayscale) cv::cvtColor(_img, img, CV_GRAY2RGB);
  else          img = _img;
  _img.release();

//-- Determine the constants and define functionals
  max_steps = max_steps < 0 ? std::numeric_limits<int>::max() : max_steps;
  const int h = img.rows;
  const int w = img.cols;
  const int nof_channels = grayscale ? 1 : img.channels();
  const auto heaviside = std::bind(regularized_heaviside, std::placeholders::_1, eps);
  const auto delta = std::bind(regularized_delta, std::placeholders::_1, eps);

//-- Construct the level set
  cv::Mat u;
  if(rectangle_contour || circle_contour)
  {
    std::unique_ptr<InteractiveData> id;
    cv::startWindowThread();
    cv::namedWindow(WINDOW_TITLE, cv::WINDOW_NORMAL);

    if     (rectangle_contour)
      id = std::unique_ptr<InteractiveDataRect>(new InteractiveDataRect(&img, contour_color));
    else if(circle_contour)
      id = std::unique_ptr<InteractiveDataCirc>(new InteractiveDataCirc(&img, contour_color));

    if(id) cv::setMouseCallback(WINDOW_TITLE, on_mouse, id.get());
    cv::imshow(WINDOW_TITLE, img);
    cv::waitKey();
    cv::destroyWindow(WINDOW_TITLE);

    if(id)
    {
      if(! id -> is_ok())
        msg_exit("You must specify the contour with non-zero dimensions");
      u = id -> get_levelset(h, w);
    }
  }
  else
    u = levelset_checkerboard(h, w);

//-- Set up the video writer (and save the first frame)
  VideoWriterManager vwm;
  if(write_video)
  {
    vwm = VideoWriterManager(input_filename, img, contour_color, fps, pos, overlay_text);
    vwm.write_frame(u, overlay_text ? "t = 0" : "");
  }

//-- Split the channels
  std::vector<cv::Mat> channels;
  channels.reserve(nof_channels);
  cv::split(img, channels);
  if(grayscale) channels.erase(channels.begin() + 1, channels.end());

//-- Smooth the image with Perona-Malik
  cv::Mat smoothed_img;
  if(segment)
  {
    smoothed_img = perona_malik(channels, h, w, K, L, T);
    channels.clear();
    cv::split(smoothed_img, channels);
    cv::imwrite(add_suffix(input_filename, "pm"), smoothed_img);
  }

//-- Find intensity sum and derive the stopping condition
  cv::Mat intensity_avg = cv::Mat(h, w, CV_64FC1);
#pragma omp parallel for num_threads(nof_channels)
  for(int k = 0; k < nof_channels; ++k)
  {
    cv::Mat channel(h, w, intensity_avg.type());
    channels[k].convertTo(channel, channel.type());
    intensity_avg += channel;
  }
  intensity_avg /= nof_channels;
  double stop_cond = tol * cv::norm(intensity_avg, cv::NORM_L2);
  intensity_avg.release();

//-- Timestep loop
  for(int t = 1; t <= max_steps; ++t)
  {
    cv::Mat u_diff(cv::Mat::zeros(h, w, CV_64FC1));

//-- Channel loop
#pragma omp parallel for num_threads(nof_channels)
    for(int k = 0; k < nof_channels; ++k)
    {
      cv::Mat channel = channels[k];
//-- Find the average regional variances
      const double c1 = region_variance(channel, u, h, w, ChanVese::Region::Inside, heaviside);
      const double c2 = region_variance(channel, u, h, w, ChanVese::Region::Outside, heaviside);

//-- Calculate the contribution of one channel to the level set
      const cv::Mat variance_inside = variance_penalty(channel, h, w, c1, lambda1[k]);
      const cv::Mat variance_outside = variance_penalty(channel, h, w, c2, lambda2[k]);
      u_diff += -variance_inside + variance_outside;
    }
//-- Calculate the curvature (divergence of normalized gradient)
    const cv::Mat kappa = curvature(u, h, w);

//-- Mash the terms together
    u_diff = dt * (mu * kappa - nu + u_diff / nof_channels);

//-- Run delta function on the level set
    cv::Mat u_cp = u.clone();
    cv::parallel_for_(cv::Range(0, h * w), ParallelPixelFunction(u_cp, w, delta));

//-- Shift the level set
    cv::multiply(u_diff, u_cp, u_diff);
    const double u_diff_norm = cv::norm(u_diff, cv::NORM_L2);
    u += u_diff;

//-- Save the frame
    if(write_video) vwm.write_frame(u, overlay_text ? "t = " + std::to_string(t) : "");

//-- Check if we have achieved the desired precision
    if(u_diff_norm <= stop_cond) break;
  }

//-- Select the region enclosed by the contour and save it to the disk
  if(object_selection)
    cv::imwrite(add_suffix(input_filename, "selection"), separate(img, u, h, w, invert));

  return EXIT_SUCCESS;
}