コード例 #1
0
void CTRCalibration::Initialize()
{
	// set variables
	m_calibratedCTR = NULL;
	m_nPointsAtEachConf = 1;
	m_tol = 0.000000000000001;
	m_maxIter = 1000;
	m_numOfGridPoints = 100;

	// set init params
	CTR* tempRobot = CTRFactory::buildCTR("");
	int num_of_free_params = tempRobot->GetFreeParameters().size();
	m_params.resize(num_of_free_params);
	for(int i = 0 ; i < m_params.size(); i++)
		m_params(i) = *(tempRobot->GetFreeParameters()[i]);
	delete tempRobot;

	// set scale factors
	m_scaleFactor.resize(num_of_free_params);
	double temp_scale_factor[9] = {100, 100, 1, 100, 100, 1, 100, 100, 1};
	//double temp_scale_factor[8] = {100, 100, 1, 100, 100, 1, 100, 100};
	for(int i = 0 ; i < m_scaleFactor.size(); i++)
		m_scaleFactor(i) = temp_scale_factor[i];
	

	// init timer
	Tic();
}
コード例 #2
0
ファイル: extrai_bic.c プロジェクト: mhgoncalves/wild
int main(int argc, char** argv)
{
    timer *tic, *toc;
    CImage *cimg=NULL;
    Image *mask=NULL;
    Histogram *hist=NULL;

    if (argc != 4) {
        fprintf(stderr,"usage: extrai_bic <image> <mask> <outputfile>\n");
        exit(-1);
    }

    cimg = ReadJPEGFile(argv[1]);
    mask = ReadImage(argv[2]);

    tic = Tic();
    hist = BIC(cimg, mask);
    toc = Toc();
    printf("BIC extracted in %f milliseconds\n\n",CTime(tic, toc));

    FILE *fp;
    fp = fopen(argv[3],"w");
    if (fp == NULL) {
        fprintf(stderr,"Cannot open %s\n",argv[3]);
        exit(-1);
    }

    WriteFHist(hist, fp);

    DestroyCImage(&cimg);
    DestroyHistogram(&hist);
    DestroyImage(&mask);
    fclose(fp);
    return(0);
}
コード例 #3
0
ファイル: ppu.c プロジェクト: wolfgarnet/numpycbe
double SNRM2( PyArrayObject *pyobj1, unsigned int shadersize, unsigned int *shader )
{
	Tic();
	Operation_t op1;
	op1.shaderSize = shadersize;
	op1.EA_shader  = shader;
	op1.obj[0]     = pyobj1;
	op1.num_SPES   = speThreads;

	float *results[6];

	unsigned int state = 2, r;
	unsigned int i;
	//printf( "Sending states to SPEs\n" );
  	for ( i = 0 ; i < speThreads ; i++ )
	{
  		results[i] = (float *)memalign( 128, ( 4 + 127 ) & ~127 );
  		r = (unsigned int)results[i];
  		spe_pointer_addr[i][0] = (unsigned int)&op1;
  		spe_in_mbox_write ( speData[i].spe_ctx, &state, 1, SPE_MBOX_ALL_BLOCKING );
  		spe_in_mbox_write ( speData[i].spe_ctx, &r, 1, SPE_MBOX_ALL_BLOCKING );
	}

  	// Waiting for SPEs!
  	//printf( "Waiting for SPEs\n" );
  	unsigned int checked = 0;
	while( checked < speThreads )
	{
		if ( spe_out_mbox_status( speData[checked].spe_ctx ) )
		{
			spe_out_mbox_read( speData[checked].spe_ctx, &r, 1 );
			checked++;
		}
	}
	float r1 = 0.0f;
	for ( i = 0 ; i < speThreads ; i++ )
	{
		r1 += results[i][0];
		// Cleanup
		free( results[i] );
	}

	r1 = sqrtf( r1 );

	double time = Toc_d();
	PrintTicToc( "Finished at ", Toc() );

	printf( "Result=%f\n", r1 );

	return time;
}
コード例 #4
0
void CTRCalibration::Calibrate()
{
	double error_prev = 1e10;
	double max_error = 0.0;
	double error = ComputeErrorOnTrainingSet(max_error);
	int iter = 0;
	double init_Step = 0.0001;
	double step = init_Step;

	double error_val = 0;
	double error_val_max = 0;
	int id;

	// start timer
	Tic();
	::std::cout << "Calibration started" << ::std::endl;
	while( ::std::abs(error - error_prev) > m_tol && iter < m_maxIter )
	{
		error_prev = error;

		// update parameters
		max_error = 0.0;
		error = UpdateParams(step, max_error);

		error_val = this->ComputeErrorOnValidationSet(error_val_max, id);
		// update stepsize
		if (error > error_prev)
			step = ::std::max(0.95*step, 0.5*init_Step);

		// update iter
		iter++;

		// print intermadiate results
		if (iter % 10 == 0)
			PrintOnConsole(error, max_error, iter, step, error_val);

		if (iter % 50 == 0)
			PrintInFile(error, max_error, error_val);
	}
	m_stream.close();

	// print result
	::std::cout << "Calibrated model parameters:" << m_params.transpose() << ::std::endl;  

	// builed calibrated ctr
	m_calibratedCTR = CTRFactory::buildCTR("");
	SetParamsToCTR(m_calibratedCTR, m_params);
}
コード例 #5
0
int main(int argc, char *argv[]) 
{
  Image  *img[2]; 
  Kernel *K;
  timer  *t1, *t2; 

  /*--------------------------------------------------------*/

  void *trash = malloc(1);                 
  struct mallinfo info;   
  int MemDinInicial, MemDinFinal;
  free(trash); 
  info = mallinfo();
  MemDinInicial = info.uordblks;

  /*--------------------------------------------------------*/

  if (argc != 4)
    Error("Usage: linearfilter <input.scn> <output.scn> <adj. radius>","windowlevel.c");


  t1 = Tic();

  img[0] = ReadImage(argv[1]); 
  K      = GaussianKernel(atof(argv[3]));
  img[1] = LinearFilter(img[0],K);
  WriteImage(img[1],argv[2]); 
  DestroyImage(img[0]); 
  DestroyImage(img[1]); 
  DestroyKernel(K);

  t2 = Toc();
  fprintf(stdout,"Linear filtering in %f ms\n",CompTime(t1,t2)); 


  /* ------------------------------------------------------ */

  info = mallinfo();
  MemDinFinal = info.uordblks;
  if (MemDinInicial!=MemDinFinal)
    printf("\n\nDinamic memory was not completely deallocated (%d, %d)\n",
	   MemDinInicial,MemDinFinal);   

  return(0); 
}
コード例 #6
0
ファイル: strsm.c プロジェクト: wolfgarnet/numpycbe
void RUNSGEMM( unsigned int size )
{
	float *A = (float*)malloc( size*size*4 );
	float *B = (float*)malloc( size*size*4 );

	unsigned int i;

	for(i=0;i<size*size;i++)
	{
		A[i] = 1;
		B[i] = 1;
	}

	Tic();

	cblas_sgemm( 101, 111, 111, size, size, size, -1, A, size, A, size, 1, B, size );

	PrintTicToc( "Time taken: ", Toc() );
}
コード例 #7
0
ファイル: ppu.c プロジェクト: wolfgarnet/numpycbe
double SSCAL( PyArrayObject *pyobj1, PyArrayObject *pyscalar1, unsigned int shadersize, unsigned int *shader )
{
	Tic();
	Operation_t op1;
	op1.shaderSize = shadersize;
	op1.EA_shader  = shader;
	op1.obj[0]     = pyobj1;
	op1.scalar[0]  = pyscalar1;
	op1.num_SPES   = speThreads;


	unsigned int state = 1, r;
	unsigned int i;
	//printf( "Sending states to SPEs\n" );
  	for ( i = 0 ; i < speThreads ; i++ )
	{
  		spe_pointer_addr[i][0] = (unsigned int)&op1;
  		spe_in_mbox_write ( speData[i].spe_ctx, &state, 1, SPE_MBOX_ALL_BLOCKING );
	}

  	// Waiting for SPEs!
  	//printf( "Waiting for SPEs\n" );
  	unsigned int checked = 0;
	while( checked < speThreads )
	{
		if ( spe_out_mbox_status( speData[checked].spe_ctx ) )
		{
			spe_out_mbox_read( speData[checked].spe_ctx, &r, 1 );
			checked++;
		}
	}

	double time = Toc_d();
	PrintTicToc( "Finished at ", Toc() );

	for( i = 0 ; i < 3 ; i++ )
	{
		printf( "%u=%f\n", i, ((float*)pyobj1->blockData[0])[i] );
	}

	return time;
}
コード例 #8
0
ファイル: main.cpp プロジェクト: jfalquez/Junkbox
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// da main
int main( int argc, char** argv )
{
//	rpg::Robot Agnos;
//	Agnos.Init("../../RPG/dev/jmf/GwSim/rpg_config.xml");


    // init driver
	CameraDevice Cam;
	Cam.SetProperty("Host", "localhost:5556");
	Cam.InitDriver("NodeCam");
	std::vector< rpg::ImageWrapper >   Images;       // Image to show on screen


	long unsigned int count = 0;
	double t = Tic();

    while(1) {
        if( Cam.Capture(Images) ) {
			cv::imshow( "SimL", Images[0].Image );
//			cv::imshow( "SimR", Images[1] );
		}


		count ++;
		if(count > 5000) {
			break;
		}


		char c;
		c = cv::waitKey(2);
		if (c == 27) break;
    }

	double time = Toc(t);
	printf("Framerate %.2f\n",count/time);


    return 0;
}
コード例 #9
0
ファイル: clientUDP.c プロジェクト: vitorbraga/networks1
int main(int argc, char *argv[]){

 	int sockfd, n;
 	timer *t1, *t2; 
   	struct sockaddr_in servaddr, cliaddr;
   	struct hostent *hp;
  	char sendline[9999];
   	char recvline[9999], resp[9999];

   	if (argc != 3) {
      	printf("usage: ./clientUDP <IP address> <port>\n");
      	exit(1);
   	}

	sprintf(resp, "\nChoose an option:\nCheck Position: [Position [posX,posY]]\n1 - All registered merchants: [1]\n2 - All merchants within 100 meters radius: [2 posX posY]\n3 - Merchants in 100 meters area with category: [3 posX posY category]\n4 - Merchant info (id): [4 merchantId]\n5 - Punctuate merchant: [5 merchantId newPoints]\n\n");
	printf("%s", resp); 

	char *host = argv[1]; 			/* ip */
	int port = atoi(argv[2]);		/* port */


	hp = gethostbyname(host);
   	sockfd = socket(AF_INET, SOCK_DGRAM, 0);

   	bzero(&servaddr, sizeof(servaddr));
   	servaddr.sin_family = AF_INET;
   	bcopy(hp->h_addr, (char *) &servaddr.sin_addr.s_addr, hp->h_length);
   	servaddr.sin_port = htons(port);

   	while (fgets(sendline, 10000,stdin) != NULL) {
   		t1 = Tic();
    	sendto(sockfd, sendline, strlen(sendline), 0, (struct sockaddr *)&servaddr, sizeof(servaddr));
      	n = recvfrom(sockfd, recvline, 9999, 0, NULL, NULL);
      	recvline[n] = '\0';
      	fputs(recvline, stdout);
      	t2 = Toc();
  		fprintf(stdout, "Message sent and received in %f ms\n", CompTime(t1,t2)); 
   	}
}
コード例 #10
0
ファイル: diffwatershed.c プロジェクト: gagallo7/OpenCLStudy
int main(int argc, char **argv)
{
  timer    *t1=NULL,*t2=NULL;
  Image    *img=NULL,*grad=NULL;
  ImageForest *fst=NULL;
  CImage   *cimg=NULL;
  Set      *Obj=NULL,*Bkg=NULL;
  char     outfile[100];
  char     *file_noext;
  /*--------------------------------------------------------*/

  void *trash = malloc(1);
  struct mallinfo info;
  int MemDinInicial, MemDinFinal;
  free(trash);
  info = mallinfo();
  MemDinInicial = info.uordblks;

  /*--------------------------------------------------------*/

  if (argc!=4){
    fprintf(stderr,"Usage: diffwatershed <image.pgm> <gradient.pgm> <seeds.txt>\n");
    fprintf(stderr,"image.pgm: image to overlay the watershed lines on it\n");
    fprintf(stderr,"gradient.pgm: gradient image to compute the watershed segmentation\n");
    fprintf(stderr,"seeds.txt: seed pixels\n");
    exit(-1);
  }

  img   = ReadImage(argv[1]);
  grad  = ReadImage(argv[2]);
  ReadSeeds(argv[3],&Obj,&Bkg);
  fst   = CreateImageForest(img);

  file_noext = strtok(argv[1],".");

  // Add object and background seeds

  t1 = Tic();
  DiffWatershed(grad,fst,Obj,Bkg,NULL); 
  t2 = Toc();
  fprintf(stdout,"Adding object and background seeds in %f ms\n",CTime(t1,t2));
  cimg = DrawLabeledRegions(img,fst->label);
  sprintf(outfile,"%s_result-a.ppm",file_noext);
  WriteCImage(cimg,outfile);
  DestroyCImage(&cimg);
  
  // Remove background trees 

  t1 = Tic();
  DiffWatershed(grad,fst,NULL,NULL,Bkg);
  t2 = Toc();
  fprintf(stdout,"Removing background trees in %f ms\n",CTime(t1,t2));
  cimg = DrawLabeledRegions(img,fst->label);
  sprintf(outfile,"%s_result-b.ppm",file_noext);
  WriteCImage(cimg,outfile);
  DestroyCImage(&cimg);

  // Adding background trees back to the forest  

  t1 = Tic();
  DiffWatershed(grad,fst,NULL,Bkg,NULL);
  t2 = Toc();
  fprintf(stdout,"Adding the removed background trees in %f ms\n",CTime(t1,t2));
  cimg = DrawLabeledRegions(img,fst->label);
  sprintf(outfile,"%s_result-c.ppm",file_noext);
  WriteCImage(cimg,outfile);
  DestroyCImage(&cimg);


  DestroyImageForest(&fst);
  DestroyImage(&img);
  DestroyImage(&grad);
  DestroySet(&Obj);
  DestroySet(&Bkg);


  /* ---------------------------------------------------------- */

  info = mallinfo();
  MemDinFinal = info.uordblks;
  if (MemDinInicial!=MemDinFinal)
    printf("\n\nDinamic memory was not completely deallocated (%d, %d)\n",
	   MemDinInicial,MemDinFinal);

  return(0);
}
コード例 #11
0
ファイル: timer.cpp プロジェクト: erik-nelson/berkeley_sfm
Timer::Timer() {
  Tic();
}
コード例 #12
0
// CLUSTERING METHOD
std::vector<poseEstimationSI::clusterPtr> poseEstimationSI::poseClustering(std::vector < posePtr > & bestPoses)
{
	// Sort clusters
	std::sort(bestPoses.begin(), bestPoses.end(), pose::compare);

	std::vector< boost::shared_ptr<poseCluster> > centroids;
	std::vector< boost::shared_ptr<cluster> > transformations;    

	float _orientationDistance;	
	float _positionDistance;
	float _scaleDistance;

	bool found;
	
	int nearestCentroid;

	///////////////////////////////
	// Initialize first centroid //
	///////////////////////////////

	// If there are no poses, return
	if(bestPoses.empty())
		return transformations;


	centroids.push_back(boost::shared_ptr<poseCluster> (new poseCluster(0,bestPoses[0])));
	//centroids.back()->poses.push_back(bestPoses[0]);

	Tic();	
	// For each pose
	for(size_t p=1; p< bestPoses.size(); ++p)
	{
		found=false;
		for(size_t c=0; c < centroids.size(); ++c)
		{
			//std::cout << std::endl << "\tcheck if centroid:" << std::endl;
			// Compute (squared) distance to the cluster center
			_orientationDistance=orientationDistance(bestPoses[p],bestPoses[centroids[c]->poseIndex]);
			_positionDistance=positionDistance(bestPoses[p],bestPoses[centroids[c]->poseIndex]);
			_scaleDistance=scaleDistance(bestPoses[p],bestPoses[centroids[c]->poseIndex]);
			// If the cluster is nearer than a threshold add current pose to the cluster
			//if((_orientationDistance>=poseAngleStepCos) && _positionDistance<=((model->maxModelDistSquared)*0.05*0.05))
			//if((_orientationDistance>=cos(acos(poseAngleStepCos)) ) && _positionDistance<=((model->maxModelDistSquared)*0.1*0.1))
			if((_orientationDistance>=pointPair::angleStepCos) && (_positionDistance<=model->halfDistanceStepSquared) &&(equalFloat(_scaleDistance,0.0))) // VER ISTO
			{

				nearestCentroid=c;
				found=true;
				break;
			}		
		}			
			
		if(found)
		{
			centroids[nearestCentroid]->update(bestPoses[p]);
		}
		else
		{
			boost::shared_ptr<poseCluster> clus(new poseCluster(p,bestPoses[p]));
			centroids.push_back(clus);
		}
	}
	Tac();	

	//////////////////////
	// Compute clusters //
	//////////////////////
	std::sort(centroids.begin(), centroids.end(), poseCluster::compare);
	//std::cout << std::endl << "Number of poses:" <<bestPoses.size() << std::endl << std::endl;
	//std::cout << std::endl << "Best pose votes:" <<bestPoses[0]->votes << std::endl << std::endl;
	//for(size_t c=0; c < centroids.size(); ++c)
	if(filterOn)
	{
		for(size_t c=0; c < centroids.size(); ++c)
		//for(size_t c=0; c < 1; ++c)
		{
			//std::cout << centroids[c]->votes << std::endl;
			transformations.push_back(boost::shared_ptr<cluster> (new cluster( boost::shared_ptr<pose> (new pose (centroids[c]->votes, boost::shared_ptr<transformation>( new transformation(centroids[c]->rotation(),centroids[c]->translation(),centroids[c]->scaling() ) ) ) ),centroids[c]->poses ) ) );
		}
	}
	else
	{
		transformations.push_back(boost::shared_ptr<cluster> (new cluster( boost::shared_ptr<pose> (new pose (centroids[0]->votes, boost::shared_ptr<transformation>( new transformation(centroids[0]->rotation(),centroids[0]->translation(),centroids[0]->scaling() ) ) ) ),centroids[0]->poses ) ) );
	}

	return transformations;
}
コード例 #13
0
ファイル: CaptureDelegate.cpp プロジェクト: aharrison24/HAL
inline double TocMS(double dTic) {
    return ( Tic() - dTic)*1000.;
}
コード例 #14
0
ファイル: CaptureDelegate.cpp プロジェクト: aharrison24/HAL
inline double Toc(double dTic) {
    return Tic() - dTic;
}
コード例 #15
0
ファイル: CaptureDelegate.cpp プロジェクト: aharrison24/HAL
inline double RealTime() {
    return Tic();
}
コード例 #16
0
ファイル: CaptureDelegate.cpp プロジェクト: aharrison24/HAL
}

////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////

inline double Toc(double dTic) {
    return Tic() - dTic;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

inline double TocMS(double dTic) {
    return ( Tic() - dTic)*1000.;
}

double ttime = Tic();
long unsigned int count = 0;


CaptureDelegate::CaptureDelegate(const int bufferCount, int nNumImages, int nImageWidth, int nImageHeight) : m_maxFrames(-1), m_timecodeFormat(0) {
    m_frameCount = 0;
    m_refCount = 0;
    m_nImageWidth = nImageWidth;
    m_nImageHeight = nImageHeight;
    m_nNumImages = nNumImages;

    m_nBufferCount = bufferCount;
    pthread_mutex_init(&m_mutex, NULL);

    //fill the used and free buffers
    for (unsigned int ii = 0; ii < m_nBufferCount; ii++) {
コード例 #17
0
ファイル: watergray.c プロジェクト: afalcao/ift-demo
int main(int argc, char **argv)
{
  timer    *t1=NULL,*t2=NULL;
  Image    *img=NULL,*grad=NULL,*marker=NULL;
  Image    *label=NULL;
  CImage   *cimg=NULL;
  AdjRel   *A=NULL;
  char     outfile[100];
  char     *file_noext;
  /*--------------------------------------------------------*/

  void *trash = malloc(1);
  struct mallinfo info;
  int MemDinInicial, MemDinFinal;
  free(trash);
  info = mallinfo();
  MemDinInicial = info.uordblks;

  /*--------------------------------------------------------*/

  if (argc!=4){
    fprintf(stderr,"Usage: watergray <image.pgm> <gradient.pgm> <H>\n");
    fprintf(stderr,"image.pgm: grayscale image to overlay the watershed lines on it\n");
    fprintf(stderr,"gradient.pgm: gradient image to compute the watershed segmentation\n");
    fprintf(stderr,"H: an integer that will be added to the gradient to eliminate irrelevant basins (typically <= 100)\n");
    exit(-1);
  }

  img    = ReadImage(argv[1]);
  grad   = ReadImage(argv[2]);

  file_noext = strtok(argv[1],".");

  // A grayscale marker can be created by any extensive operation:
  // A value H may be added to eliminate irrelevant basins, for instance.

  marker = AddValue(grad,atoi(argv[3]));

  // Watershed from grayscale marker

  A = Circular(1.0); // try also higher adjacency radii: 1.5, 2.5, etc.

  t1 = Tic();

  label = WaterGray(grad,marker,A);

  t2 = Toc();

  fprintf(stdout,"Processing time in %f ms\n",CTime(t1,t2));

  // Draw watershed lines

  cimg = DrawLabeledRegions(img,label);
  sprintf(outfile,"%s_result.ppm",file_noext);
  WriteCImage(cimg,outfile);
  DestroyImage(&grad);
  DestroyImage(&img);
  DestroyImage(&marker);
  DestroyCImage(&cimg);
  DestroyImage(&label);
  DestroyAdjRel(&A);


  /* ---------------------------------------------------------- */

  info = mallinfo();
  MemDinFinal = info.uordblks;
  if (MemDinInicial!=MemDinFinal)
    printf("\n\nDinamic memory was not completely deallocated (%d, %d)\n",
	   MemDinInicial,MemDinFinal);

  return(0);
}
コード例 #18
0
ファイル: InvertedIndex.C プロジェクト: aalto-cbir/PicSOM
  bool InvertedIndex::ReadMetaDataFile(bool force, bool nodata, bool wrlocked, 
			       string filename, size_t maxn) {
    bool debug = false;
    string msg = "Index::ReadMetaDataFile() : ";

    if (force || /*nodata ||*/ wrlocked) {
      cerr << "Index::ReadMetaDataFile(" << force << ", " << nodata << ", "
	   << wrlocked << ")" << endl;
      return ShowError(msg+"strange arguments");
    }

    // obs! feature_target should be solved here!
    if (filename=="")
      filename = metfile_str;
    ifstream met(filename.c_str());
    if (!met)
      return ShowError(msg+"could not read <"+filename+">");

    FloatVectorSet cols(DataSetSize());
    string comment = "Automatically generated meta data\n"
      "Field names (components):\n";

    Tic("ReadMetaDataFile");

    size_t linecount=0;
    for (;;) {
      string line;
      getline(met, line);
      if (!met)
	break;
    
      if (line.find_first_not_of(" \t")==string::npos)
	continue;

      if (line[0]=='#') {
	if (line.find("indices")!=string::npos)
	  binary_classindices = true;

	continue;
      }

      int i = line.size()-1;
      while (i>=0 && isspace(line[i]))
	line.erase(i--);

      Tic("GroundTruthExpression");
      bool expand = false; // was true (and much slower) until 5.4.2006
      ground_truth ivec = db->GroundTruthExpression(line, feature_target, -1,
						    expand);
      Tac("GroundTruthExpression");

      if (debug)
	db->GroundTruthSummaryTable(ivec);

      if (ivec.empty()) {
	meta_data_fields.clear();
	return ShowError(msg+"<"+filename+"> field \"", line, "\" not known");
      }

      if (!nodata) {
	Tic("loop-1");
	FloatVector fvec(ivec.size());
	for (int j=0; j<fvec.Length(); j++)
	  fvec[j] = ivec[j];
	cols.AppendCopy(fvec);
	linecount++;
	Tac("loop-1");
      }

      int ncols = 1;
      pair<string,int> mdf(line, ncols);
      meta_data_fields.push_back(mdf);

      stringstream ss;
      ss << " " << line << " (" << ncols << ")";;;
      comment += ss.str();
      // cout << "!!! [" << line << "] OK !!!" << endl;
      if (maxn != 0 && linecount >= maxn)
	break;
    }

    if (!nodata) {
      bool check_type = true, check_restr = true;  // added 3.5.2006
      Tic("loop-2");
      data.Delete();
      data.VectorLength(cols.Nitems());
      for (int i=0; i<cols.VectorLength(); i++) {
	if (check_type && !db->ObjectsTargetTypeContains(i, feature_target))
	  continue;

	if (check_restr && !db->OkWithRestriction(i))
	  continue;

	FloatVector v(cols.Nitems(), NULL, db->LabelP(i));
	v.Number(i);
	for (int j=0; j<v.Length(); j++)
	  v[j] = cols[j][i];
	data.AppendCopy(v);
      }
      data.SetDescription(comment.c_str());
      Tac("loop-2");
    }

    Tac("ReadMetaDataFile");

    string log = "Read meta data file <"+ShortFileName(filename)+">";
    if (!nodata)
      log += " and processed to "+ToStr(data.Nitems())+" vectors of length "+
	ToStr(data.VectorLength());
    else
      log += " without processing";

    WriteLog(log);

    return true;
  }
コード例 #19
0
// METHOD THAT RECEIVES SURFLET CLOUDS (USED FOR MIAN DATASED)
std::vector<cluster> poseEstimationSV::poseEstimationCore(pcl::PointCloud<pcl::PointNormal>::Ptr cloud)
{

	int bestPoseAlpha;
	int bestPosePoint;
	int bestPoseVotes;
	
	pcl::PointIndices normals_nan_indices;


	float alpha;
	unsigned int alphaBin,index;
	// Iterators
	std::vector<int>::iterator sr; // scene reference point
	pcl::PointCloud<pcl::PointNormal>::iterator si;	// scene paired point
	std::vector<pointPairSV>::iterator sameFeatureIt; // same key on hash table
	std::vector<boost::shared_ptr<pose> >::iterator bestPosesIt;

	Eigen::Vector4f feature;

	/*std::cout << "\tDownsample dense surflet cloud... " << std::endl;
	cout << "\t\tSurflet cloud size before downsampling: " << cloud->size() << endl;
 	// Create the filtering object
  	pcl::VoxelGrid<pcl::PointNormal> sor;
  	sor.setInputCloud (cloud);
  	sor.setLeafSize (model->distanceStep,model->distanceStep,model->distanceStep);
  	sor.filter (*cloudNormals);
	cout << "\t\tSurflet cloud size after downsampling: " << cloudNormals->size() << endl;
	std::cout<< "\tDone" << std::endl;*/

	cloudNormals=cloud;
	/*boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer2 = objectModel::viewportsVis(cloudNormals);

  	while (!viewer2->wasStopped ())
  	{
   		viewer2->spinOnce (100);
    		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
  	}*/
	/*viewer2 = objectModel::viewportsVis(cloudNormals);

  	while (!viewer2->wasStopped ())
  	{
   		viewer2->spinOnce (100);
    		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
  	}*/
	/*std::cout<< "  Re-estimate normals... " << std::endl;
	pcl::PointCloud<pcl::PointXYZ>::Ptr modelCloudPoint(new pcl::PointCloud<pcl::PointXYZ>);
	for(si=cloudNormals->begin(); si<cloudNormals->end(); ++si)
	{
		modelCloudPoint->push_back(pcl::PointXYZ(si->x,si->y,si->z));
	}
	model->computeNormals(modelCloudPoint,cloudNormals);
	std::cout << "  Done" << std::endl;*/

	//////////////////////////////////////////////////////////////////////////////
	// Filter again to remove spurious normals nans (and it's associated point) //
	//////////////////////////////////////////////////////////////////////////////

	for (unsigned int i = 0; i < cloudNormals->points.size(); ++i) 
	{
		if (isnan(cloudNormals->points[i].normal[0]) || isnan(cloudNormals->points[i].normal[1]) || isnan(cloudNormals->points[i].normal[2]))
		{
	   		normals_nan_indices.indices.push_back(i);
		}
	}


	pcl::ExtractIndices<pcl::PointNormal> nan_extract;
	nan_extract.setInputCloud(cloudNormals);
	nan_extract.setIndices(boost::make_shared<pcl::PointIndices> (normals_nan_indices));
	nan_extract.setNegative(true);
	nan_extract.filter(*cloudWithNormalsDownSampled);
	std::cout<< "\tCloud size after removing NaN normals: " << cloudWithNormalsDownSampled->size() << endl;

	//////////////
	// VOTATION //
	//////////////

	

	/////////////////////////////////////////////
	// Extract reference points from the scene //
	/////////////////////////////////////////////
	//pcl::RandomSample< pcl::PointCloud<pcl::PointNormal> > randomSampler;
	//randomSampler.setInputCloud(cloudWithNormalsDownSampled);

	int numberOfPoints=(int) (cloudWithNormalsDownSampled->size () )*referencePointsPercentage;
	int totalPoints=(int) (cloudWithNormalsDownSampled->size ());
	std::cout << "\tUniform sample a set of " << numberOfPoints << "(" << referencePointsPercentage*100 <<  "%)... ";
	referencePointsIndices->indices.clear();
	extractReferencePointsUniform(referencePointsPercentage,totalPoints);
	std::cout << "Done" << std::endl;
	//std::cout << referencePointsIndices->indices.size() << std::endl;
	//pcl::PointCloud<pcl::PointNormal>::Ptr testeCloud=pcl::PointCloud<pcl::PointNormal>::Ptr (new pcl::PointCloud<pcl::PointNormal>);
	/*for(sr=referencePointsIndices->indices.begin(); sr < referencePointsIndices->indices.end(); ++sr)
	{
		testeCloud->push_back(cloudWithNormalsDownSampled->points[*sr]);
	}*/
	//std::cout <<  totalPoints << std::endl;
	/*boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer2 = objectModel::viewportsVis(cloudWithNormalsDownSampled);

  	while (!viewer2->wasStopped ())
  	{
   		viewer2->spinOnce (100);
    		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
  	}*/
	//////////////
	// Votation //
	//////////////
	// Clear all transformations stored
	std::cout<< "\tVotation... ";
  //std::vector<int>::size_type sz;

  bestPoses.clear();
  //sz = bestPoses.capacity();

  std::vector<int> matches_per_feature;



	std::vector<double> scene_to_global_time;
	std::vector<double> reset_accumulator_time;
	std::vector<double> ppf_time;
	std::vector<double> hash_time;
	std::vector<double> matching_time;
	std::vector<double> get_best_peak_time;

	//Tic();
	for(sr=referencePointsIndices->indices.begin(); sr < referencePointsIndices->indices.begin()+1; ++sr)
	//for(sr=referencePointsIndices->indices.begin(); sr < referencePointsIndices->indices.end(); ++sr)
	{
		Tic();
		Eigen::Vector3f scenePoint=cloudWithNormalsDownSampled->points[*sr].getVector3fMap();
		Eigen::Vector3f sceneNormal=cloudWithNormalsDownSampled->points[*sr].getNormalVector3fMap ();


		Eigen::Vector3f cross=sceneNormal.cross (Eigen::Vector3f::UnitX ()). normalized();
		Eigen::Affine3f rotationSceneToGlobal;

		// Get transformation from scene frame to global frame
		if(sceneNormal.isApprox(Eigen::Vector3f::UnitX (),0.00001))
		{
			rotationSceneToGlobal=Eigen::AngleAxisf(0.0,Eigen::Vector3f::UnitX ());
		}
		else
		{
			cross=sceneNormal.cross (Eigen::Vector3f::UnitX ()). normalized();

			if (isnan(cross[0]))
			{
				std::cout << "YA"<< std::endl;
				exit(-1);
				rotationSceneToGlobal=Eigen::AngleAxisf(0.0,Eigen::Vector3f::UnitX ());
			}
			else
			{
				rotationSceneToGlobal=Eigen::AngleAxisf(acosf (sceneNormal.dot (Eigen::Vector3f::UnitX ())),cross);
			}
		}

		Eigen::Affine3f transformSceneToGlobal = Eigen::Translation3f ( rotationSceneToGlobal* ((-1)*scenePoint)) * rotationSceneToGlobal;

		Tac();
		scene_to_global_time.push_back(timeEnd);
		//////////////////////
		// Choose best pose //
		//////////////////////

		// Reset pose accumulator
		Tic();
		for(std::vector<std::vector<int> >::iterator accumulatorIt=accumulator.begin();accumulatorIt < accumulator.end(); ++accumulatorIt)
		{
			std::fill(accumulatorIt->begin(),accumulatorIt->end(),0); 
		}
		Tac();
		reset_accumulator_time.push_back(timeEnd);

		for(si=cloudWithNormalsDownSampled->begin(); si < cloudWithNormalsDownSampled->end();++si)
		{
			// if same point, skip point pair
			if( (cloudWithNormalsDownSampled->points[*sr].x==si->x) && (cloudWithNormalsDownSampled->points[*sr].y==si->y) && (cloudWithNormalsDownSampled->points[*sr].z==si->z))
			{
				continue;
			}	
			float squaredDist=(cloudWithNormalsDownSampled->points[*sr].x-si->x)*
					  (cloudWithNormalsDownSampled->points[*sr].x-si->x)+
					  (cloudWithNormalsDownSampled->points[*sr].y-si->y)*
					  (cloudWithNormalsDownSampled->points[*sr].y-si->y)+
					  (cloudWithNormalsDownSampled->points[*sr].z-si->z)*
					  (cloudWithNormalsDownSampled->points[*sr].z-si->z);
			if(squaredDist>model->maxModelDistSquared)
			{
				continue;
			}
			Tic();
			float dist=sqrt(squaredDist);
			// Compute PPF
			pointPairSV PPF=pointPairSV(cloudWithNormalsDownSampled->points[*sr],*si,transformSceneToGlobal);
			Tac();
			ppf_time.push_back(timeEnd);
			Tic();
			// Compute index
			index=PPF.getHash(*si,model->distanceStepInverted);
			Tac();
			hash_time.push_back(timeEnd);
			// If distance between point pairs is bigger than the maximum for this model, skip point pair
			if(index>=pointPairSV::maxHash)
			{
				//std::cout << "DEBUG" << std::endl;
				continue;
			}

			// If there is no similar point pair features in the model, skip point pair and avoid computing the alpha
			if(model->hashTable[index].size()==0)
				continue; 

			int matches=0; //Tests
			Tic();
			// Iterate over similar point pairs
			for(sameFeatureIt=model->hashTable[index].begin(); sameFeatureIt<model->hashTable[index].end(); ++sameFeatureIt)
			{
				// Vote on the reference point and angle (and object)
				alpha=sameFeatureIt->alpha-PPF.alpha; // alpha values between [-360,360]

				// alpha values should be between [-180,180] ANGLE_MAX = 2*PI
				if(alpha<(-PI))
					alpha=ANGLE_MAX+alpha;
				else if(alpha>(PI))
					alpha=alpha-ANGLE_MAX;

				alphaBin=static_cast<unsigned int> ( round((alpha+PI)*pointPair::angleStepInverted) ); // division is slower than multiplication

				if(alphaBin>=pointPair::angleBins)
				{	
					alphaBin=0;
				}

				accumulator[sameFeatureIt->id][alphaBin]+=sameFeatureIt->weight;
				++matches;//Tests
			}
			Tac();
			matching_time.push_back(timeEnd);
			matches_per_feature.push_back(matches);
		}
		//Tac();

		// Choose best pose (highest peak on the accumulator[peak with more votes])
		bestPosePoint=0;
		bestPoseAlpha=0;
		bestPoseVotes=0;
		Tic();
		for(size_t p=0; p < model->modelCloud->size(); ++p)
		{
			for(unsigned int a=0; a < pointPair::angleBins; ++a)
			{
				if(accumulator[p][a]>bestPoseVotes)
				{
					bestPoseVotes=accumulator[p][a];
					bestPosePoint=p;
					bestPoseAlpha=a;
				}
			}
		}
		Tac();
		get_best_peak_time.push_back(timeEnd);

		// A candidate pose was found
		if(bestPoseVotes!=0)
		{
			// Compute and store transformation from model to scene
			bestPoses.push_back(pose( bestPoseVotes,model->modelToScene(bestPosePoint,transformSceneToGlobal,static_cast<float>(bestPoseAlpha)*pointPair::angleStep-PI) ));
		}
		else 
		{
			continue;
		}

		// Choose poses whose votes are a percentage above a given threshold of the best pose
		accumulator[bestPosePoint][bestPoseAlpha]=0; 	// This is more efficient than having an if condition to verify if we are considering the best pose again
		for(size_t p=0; p < model->modelCloud->size(); ++p)
		{
			for(unsigned int a=0; a < pointPair::angleBins; ++a)
			{
				if(accumulator[p][a]>=accumulatorPeakThreshold*bestPoseVotes)
				{
					bestPoses.push_back(pose( accumulator[p][a],model->modelToScene(p,transformSceneToGlobal,static_cast<float>(a)*pointPair::angleStep-PI) ));
				}
			}
		}

   /* if (sz!=bestPoses.capacity()) {
      sz = bestPoses.capacity();
      std::cout << "capacity changed: " << sz << '\n';
      exit(-1);
    }*/

	}
//Tac();
	std::cout << "Done" << std::endl;

	if(bestPoses.size()==0)
	{

		clusters.clear();
		return clusters;
	}


	//////////////////////
	// Compute clusters //
	//////////////////////

	std::cout << "\tCompute clusters... ";
Tic();
	clusters=poseClustering(bestPoses);
	std::cout << "Done" << std::endl;
	clusters[0].voting_time=timeEnd;
	std::cout << timeEnd << " " << clusters[0].voting_time << std::endl;
Tac();
	clusters[0].clustering_time=timeEnd;
	clusters[0].matches_per_feature=matches_per_feature;
  clusters[0].scene_to_global_time=scene_to_global_time;
	clusters[0].reset_accumulator_time=reset_accumulator_time;
	clusters[0].ppf_time=ppf_time;
	clusters[0].hash_time=hash_time;
	clusters[0].matching_time=matching_time;
	clusters[0].get_best_peak_time=get_best_peak_time;

	std::cout << "clusters size:" << clusters.size()<< std::endl;
	return clusters;
}
コード例 #20
0
// METHOD THAT RECEIVES POINT CLOUDS (OPEN MP)
std::vector<cluster> poseEstimationSV::poseEstimationCore_openmp(pcl::PointCloud<pcl::PointXYZ>::ConstPtr cloud)
{
Tic();
	std::vector <std::vector < pose > > bestPosesAux;
	bestPosesAux.resize(omp_get_num_procs());

	//int bestPoseAlpha;
	//int bestPosePoint;
	//int bestPoseVotes;
	
	Eigen::Vector3f scenePoint;
	Eigen::Vector3f sceneNormal;


	pcl::PointIndices normals_nan_indices;
	pcl::ExtractIndices<pcl::PointNormal> nan_extract;

	float alpha;
	unsigned int alphaBin,index;
	// Iterators
	//unsigned int sr; // scene reference point
	pcl::PointCloud<pcl::PointNormal>::iterator si;	// scene paired point
	std::vector<pointPairSV>::iterator sameFeatureIt; // same key on hash table
	std::vector<boost::shared_ptr<pose> >::iterator bestPosesIt;

	Eigen::Vector4f feature;
	Eigen::Vector3f _pointTwoTransformed;
	std::cout<< "\tCloud size: " << cloud->size() << endl;
	//////////////////////////////////////////////
	// Downsample point cloud using a voxelgrid //
	//////////////////////////////////////////////

	pcl::PointCloud<pcl::PointXYZ>::Ptr cloudDownsampled(new pcl::PointCloud<pcl::PointXYZ> ());
  	// Create the filtering object
  	pcl::VoxelGrid<pcl::PointXYZ> sor;
  	sor.setInputCloud (cloud);
  	sor.setLeafSize (model->distanceStep,model->distanceStep,model->distanceStep);
  	sor.filter (*cloudDownsampled);
	std::cout<< "\tCloud size after downsampling: " << cloudDownsampled->size() << endl;

	// Compute point cloud normals (using cloud before downsampling information)
	std::cout<< "\tCompute normals... ";
	cloudNormals=model->computeSceneNormals(cloudDownsampled);
	std::cout<< "Done" << endl;

	/*boost::shared_ptr<pcl_visualization::PCLVisualizer> viewer2 = objectModel::viewportsVis(cloudFilteredNormals);

  	while (!viewer2->wasStopped ())
  	{
   		viewer2->spinOnce (100);
    		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
  	}*/

	/*boost::shared_ptr<pcl_visualization::PCLVisualizer> viewer2 = objectModel::viewportsVis(model->modelCloud);

  	while (!viewer2->wasStopped ())
  	{
   		viewer2->spinOnce (100);
    		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
  	}*/
	//////////////////////////////////////////////////////////////////////////////
	// Filter again to remove spurious normals nans (and it's associated point) //
	////////////////////////////////////////////////fa//////////////////////////////

	for (unsigned int i = 0; i < cloudNormals->points.size(); ++i) 
	{
		if (isnan(cloudNormals->points[i].normal[0]) || isnan(cloudNormals->points[i].normal[1]) || isnan(cloudNormals->points[i].normal[2]))
		{
	   		normals_nan_indices.indices.push_back(i);
		}
	}

	nan_extract.setInputCloud(cloudNormals);
	nan_extract.setIndices(boost::make_shared<pcl::PointIndices> (normals_nan_indices));
	nan_extract.setNegative(true);
	nan_extract.filter(*cloudWithNormalsDownSampled);
	std::cout<< "\tCloud size after removing NaN normals: " << cloudWithNormalsDownSampled->size() << endl;


	/////////////////////////////////////////////
	// Extract reference points from the scene //
	/////////////////////////////////////////////

	//pcl::RandomSample< pcl::PointCloud<pcl::PointNormal> > randomSampler;
	//randomSampler.setInputCloud(cloudWithNormalsDownSampled);
	// Create the filtering object
	int numberOfPoints=(int) (cloudWithNormalsDownSampled->size () )*referencePointsPercentage;
	int totalPoints=(int) (cloudWithNormalsDownSampled->size ());
	std::cout << "\tUniform sample a set of " << numberOfPoints << "(" << referencePointsPercentage*100 <<  "%)... ";
	referencePointsIndices->indices.clear();
	extractReferencePointsUniform(referencePointsPercentage,totalPoints);
	std::cout << "Done" << std::endl;
	//std::cout << referencePointsIndices->indices.size() << std::endl;

	//////////////
	// Votation //
	//////////////

	std::cout<< "\tVotation... ";

	omp_set_num_threads(omp_get_num_procs());
	//omp_set_num_threads(1);
	//int iteration=0;

        bestPoses.clear();
	#pragma omp parallel for private(alpha,alphaBin,alphaScene,sameFeatureIt,index,feature,si,_pointTwoTransformed) //reduction(+:iteration)  //nowait
	for(unsigned int sr=0; sr < referencePointsIndices->indices.size(); ++sr)
	{
	
		//++iteration;
		//std::cout << "iteration: " << iteration << " thread:" << omp_get_thread_num() << std::endl;
		//printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
		scenePoint=cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].getVector3fMap();
		sceneNormal=cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].getNormalVector3fMap();

		// Get transformation from scene frame to global frame
		Eigen::Vector3f cross=sceneNormal.cross (Eigen::Vector3f::UnitX ()). normalized();

		Eigen::Affine3f rotationSceneToGlobal;
		if(isnan(cross[0]))
		{
			rotationSceneToGlobal=Eigen::AngleAxisf(0.0,Eigen::Vector3f::UnitX ());
		}
		else
			rotationSceneToGlobal=Eigen::AngleAxisf(acosf (sceneNormal.dot (Eigen::Vector3f::UnitX ())),cross);

		Eigen::Affine3f transformSceneToGlobal = Eigen::Translation3f ( rotationSceneToGlobal* ((-1)*scenePoint)) * rotationSceneToGlobal;

		//////////////////////
		// Choose best pose //
		//////////////////////

		// Reset pose accumulator
		for(std::vector<std::vector<int> >::iterator accumulatorIt=accumulatorParallelAux[omp_get_thread_num()].begin();accumulatorIt < accumulatorParallelAux[omp_get_thread_num()].end(); ++accumulatorIt)
		{
			std::fill(accumulatorIt->begin(),accumulatorIt->end(),0); 
		}
		

		//std::cout << std::endl;
		for(si=cloudWithNormalsDownSampled->begin(); si < cloudWithNormalsDownSampled->end();++si)
		{
			// if same point, skip point pair
			if( (cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].x==si->x) && (cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].y==si->y) && (cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].z==si->z))
			{
				//std::cout << si->x << " " << si->y << " " << si->z << std::endl;
				continue;
			}	

			// Compute PPF
			pointPairSV PPF=pointPairSV(cloudWithNormalsDownSampled->points[sr],*si, transformSceneToGlobal);

			// Compute index
			index=PPF.getHash(*si,model->distanceStepInverted);

			// If distance between point pairs is bigger than the maximum for this model, skip point pair
			if(index>pointPairSV::maxHash)
			{
				//std::cout << "DEBUG" << std::endl;
				continue;
			}

			// If there is no similar point pair features in the model, skip point pair and avoid computing the alpha
			if(model->hashTable[index].size()==0)
				continue; 

			for(sameFeatureIt=model->hashTable[index].begin(); sameFeatureIt<model->hashTable[index].end(); ++sameFeatureIt)
			{
				// Vote on the reference point and angle (and object)
				alpha=sameFeatureIt->alpha-PPF.alpha; // alpha values between [-360,360]

				// alpha values should be between [-180,180] ANGLE_MAX = 2*PI
				if(alpha<(-PI))
					alpha=ANGLE_MAX+alpha;
				else if(alpha>(PI))
					alpha=alpha-ANGLE_MAX;
				//std::cout << "alpha after: " << alpha*RAD_TO_DEG << std::endl;
				//std::cout << "alpha after2: " << (alpha+PI)*RAD_TO_DEG << std::endl;
				alphaBin=static_cast<unsigned int> ( round((alpha+PI)*pointPair::angleStepInverted) ); // division is slower than multiplication
				//std::cout << "angle1: " << alphaBin << std::endl;
           			/*alphaBin = static_cast<unsigned int> (floor (alpha) + floor (PI *poseAngleStepInverted));
				std::cout << "angle2: " << alphaBin << std::endl;*/
				//alphaBin=static_cast<unsigned int> ( floor(alpha*poseAngleStepInverted) + floor(PI*poseAngleStepInverted) );
				if(alphaBin>=pointPair::angleBins)
				{	
					alphaBin=0;
					//ROS_INFO("naoooo");
					//exit(1);
				}

//#pragma omp critical
//{std::cout << index <<" "<<sameFeatureIt->id << " " << alphaBin << " " << omp_get_thread_num() << " " << accumulatorParallelAux[omp_get_thread_num()][sameFeatureIt->id][alphaBin] << std::endl;}

				accumulatorParallelAux[omp_get_thread_num()][sameFeatureIt->id][alphaBin]+=sameFeatureIt->weight;
			}
		}
		//ROS_INFO("DISTANCE:%f DISTANCE SQUARED:%f", model->maxModelDist, model->maxModel

		// Choose best pose (highest peak on the accumulator[peak with more votes])

		int bestPoseAlpha=0;
		int bestPosePoint=0;
		int bestPoseVotes=0;

		for(size_t p=0; p < model->modelCloud->size(); ++p)
		{
			for(unsigned int a=0; a < pointPair::angleBins; ++a)
			{
				if(accumulatorParallelAux[omp_get_thread_num()][p][a]>bestPoseVotes)
				{
					bestPoseVotes=accumulatorParallelAux[omp_get_thread_num()][p][a];
					bestPosePoint=p;
					bestPoseAlpha=a;
				}
			}
		}

		// A candidate pose was found
		if(bestPoseVotes!=0)
		{
			// Compute and store transformation from model to scene
			//boost::shared_ptr<pose> bestPose(new pose( bestPoseVotes,model->modelToScene(model->modelCloud->points[bestPosePoint],transformSceneToGlobal,static_cast<float>(bestPoseAlpha)*pointPair::angleStep-PI) ));

			bestPosesAux[omp_get_thread_num()].push_back(pose( bestPoseVotes,model->modelToScene(bestPosePoint,transformSceneToGlobal,static_cast<float>(bestPoseAlpha)*pointPair::angleStep-PI) ));
			//bestPoses.push_back(bestPose);

			//std::cout << bestPosesAux[omp_get_thread_num()].size() <<" " <<omp_get_thread_num()<< std::endl;
		}
		else 
		{
			continue;
		}

		// Choose poses whose votes are a percentage above a given threshold of the best pose
		accumulatorParallelAux[omp_get_thread_num()][bestPosePoint][bestPoseAlpha]=0; 	// This is more efficient than having an if condition to verify if we are considering the best pose again
		for(size_t p=0; p < model->modelCloud->size(); ++p)
		{
			for(unsigned int a=0; a < pointPair::angleBins; ++a)
			{
				if(accumulatorParallelAux[omp_get_thread_num()][p][a]>=accumulatorPeakThreshold*bestPoseVotes)
				{
					// Compute and store transformation from model to scene
					//boost::shared_ptr<pose> bestPose(new pose( accumulatorParallelAux[omp_get_thread_num()][p][a],model->modelToScene(model->modelCloud->points[p],transformSceneToGlobal,static_cast<float>(a)*pointPair::angleStep-PI ) ));


					//bestPoses.push_back(bestPose);
					bestPosesAux[omp_get_thread_num()].push_back(pose( bestPoseVotes,model->modelToScene(bestPosePoint,transformSceneToGlobal,static_cast<float>(bestPoseAlpha)*pointPair::angleStep-PI) ));
					//std::cout << bestPosesAux[omp_get_thread_num()].size() <<" " <<omp_get_thread_num()<< std::endl;
				}
			}
		}
	}

	std::cout << "Done" << std::endl;


	for(int i=0; i<omp_get_num_procs(); ++i)
	{
		for(unsigned int j=0; j<bestPosesAux[i].size(); ++j)
			bestPoses.push_back(bestPosesAux[i][j]);
	}
	std::cout << "\thypothesis number: " << bestPoses.size() << std::endl << std::endl;

	if(bestPoses.size()==0)
	{
		clusters.clear();
		return clusters;
	}

	
	//////////////////////
	// Compute clusters //
	//////////////////////
Tac();
	std::cout << "\tCompute clusters... ";
Tic();
	clusters=poseClustering(bestPoses);
Tac();
	std::cout << "Done" << std::endl;

	return clusters;
}
コード例 #21
0
ファイル: ppu.c プロジェクト: wolfgarnet/numpycbe
double SDOT2( PyArrayObject *pyobj1, PyArrayObject *pyobj2, unsigned int shadersize, unsigned int *shader )
{
	Operation_t op1;
	op1.shaderSize = shadersize;
	op1.EA_shader  = shader;
	op1.obj[0]     = pyobj1;
	op1.obj[1]     = pyobj2;
	op1.num_SPES   = speThreads;

	unsigned int state = 4;
	unsigned int i, r;
	unsigned int checked = 0;
	float *results[6];

	// Setup SPEs
  	for ( i = 0 ; i < speThreads ; i++ )
	{
  		results[i] = (float *)memalign( 128, ( 4 + 127 ) & ~127 );
  		r = (unsigned int)results[i];
  		spe_pointer_addr[i][0] = (unsigned int)&op1;
  		spe_in_mbox_write ( speData[i].spe_ctx, &state, 1, SPE_MBOX_ALL_BLOCKING );
  		spe_in_mbox_write ( speData[i].spe_ctx, &r, 1, SPE_MBOX_ALL_BLOCKING );
	}

  	// Waiting for SPEs!
  	while( checked < speThreads )
	{
		if ( spe_out_mbox_status( speData[checked].spe_ctx ) )
		{
			spe_out_mbox_read( speData[checked].spe_ctx, &r, 1 );
			checked++;
		}
	}

  	Tic();
  	// Make SPEs run
  	state = 6;
 	for ( i = 0 ; i < speThreads ; i++ )
	{
  		spe_in_mbox_write ( speData[i].spe_ctx, &state, 1, SPE_MBOX_ALL_BLOCKING );
	}

  	// Waiting for SPEs!
 	checked = 0;
  	while( checked < speThreads )
	{
		if ( spe_out_mbox_status( speData[checked].spe_ctx ) )
		{
			spe_out_mbox_read( speData[checked].spe_ctx, &r, 1 );
			checked++;
		}
	}


  	// Get result
	float r1 = 0.0f;
	for ( i = 0 ; i < speThreads ; i++ )
	{
		r1 += results[i][0];
		// Cleanup
		free( results[i] );
	}

	double time = Toc_d();
	PrintTicToc( "Finished at ", Toc() );

	printf( "Result=%f\n", r1 );

	return time;
}