Example #1
0
/*!
  Initialise the tracking by extracting KLT keypoints on the provided image.

  \param I : Grey level image used as input. This image should have only 1 channel.
  \param mask : Image mask used to restrict the keypoint detection area.
  If mask is NULL, all the image will be considered.

  \exception vpTrackingException::initializationError : If the image I is not
  initialized, or if the image or the mask have bad coding format.
*/
void vpKltOpencv::initTracking(const IplImage *I, const IplImage *mask)
{
  if (!I) {
    throw(vpException(vpTrackingException::initializationError,  "Image Not initialized")) ;
  }

  if (I->depth != IPL_DEPTH_8U || I->nChannels != 1)  {
    throw(vpException(vpTrackingException::initializationError,  "Bad Image format")) ;
  }

  if (mask) {
    if (mask->depth != IPL_DEPTH_8U || I->nChannels != 1) 	{
      throw(vpException(vpTrackingException::initializationError,  "Bad Image format")) ;
    }
  }

  //Creation des buffers
  CvSize Sizeim, SizeI;
  SizeI = cvGetSize(I);
  bool b_imOK = true;
  if(image != NULL){
    Sizeim = cvGetSize(image);
    if(SizeI.width != Sizeim.width || SizeI.height != Sizeim.height) b_imOK = false;
  }
  if(image == NULL || prev_image == NULL || pyramid==NULL || prev_pyramid ==NULL || !b_imOK){
    reset();
    image = cvCreateImage(cvGetSize(I), 8, 1);image->origin = I->origin;
    prev_image = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
    pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
    prev_pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
  }else{
    swap_temp = 0;
    countFeatures = 0;
    countPrevFeatures = 0;
    flags = 0;
    initialized = 0;
    globalcountFeatures = 0;   
  }

  initialized = 1;

  //Import
  cvCopy(I, image, 0);

  //Recherche de points d'int�rets
  countFeatures = maxFeatures;
  countPrevFeatures = 0;
  IplImage* eig = cvCreateImage(cvGetSize(image), 32, 1);
  IplImage* temp = cvCreateImage(cvGetSize(image), 32, 1);
  cvGoodFeaturesToTrack(image, eig, temp, features,
			&countFeatures, quality, min_distance,
      mask, block_size, use_harris, harris_free_parameter);
  cvFindCornerSubPix(image, features, countFeatures, cvSize(win_size, win_size),
		     cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|
						  CV_TERMCRIT_EPS,20,0.03));
  cvReleaseImage(&eig);
  cvReleaseImage(&temp);

  if (OnInitialize)
    OnInitialize(_tid);

  //printf("Number of features at init: %d\n", countFeatures);
  for (int boucle=0; boucle<countFeatures;boucle++)  {
    featuresid[boucle] = globalcountFeatures;
    globalcountFeatures++;
    
    if (OnNewFeature){
      OnNewFeature(_tid, boucle, featuresid[boucle], features[boucle].x,
		   features[boucle].y);
    }
  }
}
Example #2
0
// Lucas-Kanade
Eigen::Matrix<double, 4, 150> lk2(IplImage* imgI, IplImage* imgJ, Eigen::Matrix<double, 2,
		150> const & pointsI, Eigen::Matrix<double, 2, 150> const & pointsJ,
		unsigned int sizeI, unsigned int sizeJ, unsigned int level) {

	double nan = std::numeric_limits<double>::quiet_NaN();

	int Level;
	if (level != 0) {
		Level = (int) level;
	} else {
		Level = 5;
	}

	int I = 0;
	int J = 1;
	int Winsize = 10;

	// Images
	if (IMG[I] != 0) {
		IMG[I] = imgI;
	} else {
		CvSize imageSize = cvGetSize(imgI);
		IMG[I] = cvCreateImage(imageSize, 8, 1);
		PYR[I] = cvCreateImage(imageSize, 8, 1);
		IMG[I] = imgI;
	}

	if (IMG[J] != 0) {
		IMG[J] = imgJ;
	} else {
		CvSize imageSize = cvGetSize(imgJ);
		IMG[J] = cvCreateImage(imageSize, 8, 1);
		PYR[J] = cvCreateImage(imageSize, 8, 1);
		IMG[J] = imgJ;
	}

	// Points
	int nPts = sizeI;

	if (nPts != sizeJ) {
		std::cout << "Inconsistent input!" << std::endl;
		return Eigen::MatrixXd::Zero(1, 1);
	}

	points[0] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // template
	points[1] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // target
	points[2] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // forward-backward

	for (int i = 0; i < nPts; i++) {
		points[0][i].x = pointsI(0, i);
		points[0][i].y = pointsI(1, i);
		points[1][i].x = pointsJ(0, i);
		points[1][i].y = pointsJ(1, i);
		points[2][i].x = pointsI(0, i);
		points[2][i].y = pointsI(1, i);
	}

	float *ncc = (float*) cvAlloc(nPts * sizeof(float));
	float *fb = (float*) cvAlloc(nPts * sizeof(float));
	char *status = (char*) cvAlloc(nPts);

	cvCalcOpticalFlowPyrLK(IMG[I], IMG[J], PYR[I], PYR[J], points[0],
			points[1], nPts, cvSize(win_size, win_size), Level, status, 0,
			cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
			CV_LKFLOW_INITIAL_GUESSES);
	cvCalcOpticalFlowPyrLK(IMG[J], IMG[I], PYR[J], PYR[I], points[1],
			points[2], nPts, cvSize(win_size, win_size), Level, 0, 0,
			cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
			CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY
					| CV_LKFLOW_PYR_B_READY );

	normCrossCorrelation(IMG[I], IMG[J], points[0], points[1], nPts, status,
			ncc, Winsize, CV_TM_CCOEFF_NORMED);
	euclideanDistance(points[0], points[2], fb, nPts);

	// Output
	int M = 4;
	Eigen::MatrixXd output(M, 150);
	for (int i = 0; i < nPts; i++) {
		if (status[i] == 1) {
			output(0, i) = (double) points[1][i].x;
			output(1, i) = (double) points[1][i].y;
			output(2, i) = (double) fb[i];
			output(3, i) = (double) ncc[i];
		} else {
			output(0, i) = nan;
			output(1, i) = nan;
			output(2, i) = nan;
			output(3, i) = nan;
		}
	}

	return output;
}
Example #3
0
int main(int argc, char* argv[]) {
  if(argc != 6){
	printf("too few args\n");
	return -1;
  }
  // INPUT PARAMETERS:
  //
  int       board_w    = atoi(argv[1]);
  int       board_h    = atoi(argv[2]);
  int       board_n    = board_w * board_h;
  CvSize    board_sz   = cvSize( board_w, board_h );
  CvMat*    intrinsic  = (CvMat*)cvLoad(argv[3]);
  CvMat*    distortion = (CvMat*)cvLoad(argv[4]);
  IplImage* image      = 0;
  IplImage* gray_image = 0;
  if( (image = cvLoadImage(argv[5])) == 0 ) {
    printf("Error: Couldn't load %s\n",argv[5]);
    return -1;
  }
  
  CvMat* image_points      = cvCreateMat(1*board_n,2,CV_32FC1);
  CvMat* object_points     = cvCreateMat(1*board_n,3,CV_32FC1);
  
  CvMat* objdrawpoints = cvCreateMat(1,1,CV_32FC3);
  CvMat* imgdrawpoints = cvCreateMat(1,1,CV_32FC2);
  float x=0;
  float y=0;
  float z=0;
  
  double grid_width=2.85;
  gray_image = cvCreateImage( cvGetSize(image), 8, 1 );
  cvCvtColor(image, gray_image, CV_BGR2GRAY );

  CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
  int corner_count = 0;
  int found = cvFindChessboardCorners(
	gray_image,
	board_sz,
	corners,
	&corner_count,
	CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
  );
  if(!found){
	printf("Couldn't aquire chessboard on %s, "
	  "only found %d of %d corners\n",
	  argv[5],corner_count,board_n
	);
	return -1;
  }
  //Get Subpixel accuracy on those corners:
  cvFindCornerSubPix(
	gray_image,
	corners,
	corner_count,
	cvSize(11,11),
	cvSize(-1,-1),
	cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1 )
  );

// If we got a good board, add it to our data
  for( int i=0, j=0; j<board_n; ++i,++j ) {
	CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
	CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
	CV_MAT_ELEM(*object_points,float,i,0) =grid_width*( j/board_w);
	//  cout<<j/board_w<<" "<<j%board_w<<endl;
	CV_MAT_ELEM(*object_points,float,i,1) = grid_width*(j%board_w);
	CV_MAT_ELEM(*object_points,float,i,2) = 0.0f;
  }

  // DRAW THE FOUND CHESSBOARD
  //
  cvDrawChessboardCorners(
	image,
	board_sz,
	corners,
	corner_count,
	found
  );

  // FIND THE HOMOGRAPHY
  //
  CvMat *trans = cvCreateMat( 1, 3, CV_32F);
  CvMat *rot = cvCreateMat( 1, 3, CV_32F);

  // LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
  //
  cvFindExtrinsicCameraParams2(object_points,image_points,intrinsic,distortion,rot,trans);
  
//  cvSave("trans.xml",trans); 
//  cvSave("rot.xml",rot); 
  int key = 0;
  IplImage *drawn_image = cvCloneImage(image);
  cvNamedWindow("translation");

  // LOOP TO ALLOW USER TO PLAY WITH HEIGHT:
  //
  // escape key stops
  //
  
//  cvSetZero(trans);
//  cvSetZero(rot);
  while(key != 27) {
	cvCopy(image,drawn_image);
	
	if(key==97)x--;
	else if(key==113)x++;
	else if(key==115)y--;
	else if(key==119)y++;
	else if(key==100)z--;
	else if(key==101)z++;
	
	((float*)(objdrawpoints->data.ptr))[0]=x;
	((float*)(objdrawpoints->data.ptr))[1]=y;
	((float*)(objdrawpoints->data.ptr))[2]=z;
	printf("%f %f %f\n",x,y,z);
	cvProjectPoints2(objdrawpoints,rot,trans,intrinsic,distortion,imgdrawpoints);
	cvCircle(drawn_image,cvPoint(((float*)(imgdrawpoints->data.ptr))[0],((float*)(imgdrawpoints->data.ptr))[1]),5,cvScalar(255,0,0),-1);
	printf("%f %f\n",((float*)(imgdrawpoints->data.ptr))[0],((float*)(imgdrawpoints->data.ptr))[1]);
	cvShowImage( "translation", drawn_image );
	key = cvWaitKey(3);
  }
  cvDestroyWindow( "translation" );
  //must add a lot of memory releasing here
  return 0;
}
Example #4
0
int main(int argc, char * argv[])
{
	if(argc < 2) {
		fprintf(stderr, "%s image1 image2\n", argv[0]);
		return 1;
	}

	char * im1fname = argv[1];
	char * im2fname = argv[2];

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_GRAYSCALE);

	IplImage * eigenvalues = cvCreateImage(cvGetSize(image1), 32, 1);
	IplImage * temp = cvCreateImage(cvGetSize(image1), 32, 1);

	int count = MAX_COUNT;
	double quality = 0.5;
	// double min_distance = 2;
	double min_distance = 50;
	int block_size = 7;
	int use_harris = 0;
	int win_size = 10;
	int flags = 0;

	CvPoint2D32f * source_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * dest_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	cvGoodFeaturesToTrack( image1, eigenvalues, temp, source_points, &count,
			quality, min_distance, 0, block_size, use_harris, 0.04 );

	printf("%d features\n",count);

	setbuf(stdout, NULL);

	printf("Finding corner subpix...");
	cvFindCornerSubPix( image1, source_points, count,
			cvSize(win_size,win_size), cvSize(-1,-1),
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	printf("done.\n");

	cvReleaseImage(&eigenvalues);
	cvReleaseImage(&temp);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_GRAYSCALE);

	char * status = (char*)cvAlloc(sizeof(char)*MAX_COUNT);

	IplImage * pyramid = cvCreateImage( cvGetSize(image1), IPL_DEPTH_8U, 1 );
	IplImage * second_pyramid = cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 1 );

	printf("Computing optical flow...");	
	cvCalcOpticalFlowPyrLK(image1, image2, pyramid, second_pyramid, source_points,
		dest_points, count, cvSize(win_size,win_size), 4, status, 0,
		cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03),
		flags);
	printf("done.\n");

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 30;
	int offset = 200;
	
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	cvReleaseImage(&image1);
	cvReleaseImage(&image2);
	
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					delaunay_points[i] = (cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;

	cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );

	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total; i++ ) {
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		bool is_corner = false;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			/*	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height)) ||(curtri.points[j] == cvPoint(image1->width,0)) ||(curtri.points[j] == cvPoint(image1->width,image1->height))) {
				is_corner = true;
				break;
			}
			*/

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(!is_corner) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];
			

					if( (result->data.fl[0] > 0) && (result->data.fl[1] > 0) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ ) {
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						/*
						if((i == 143) && (y == 3577) && (x > 2055) && (x < 2087)) {
							printf("%d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
									sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
						}
						*/
	
						cvReleaseMat( &sourcepoint );
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}
	/*
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
			CvMat * result = cvCreateMat(3, 1, CV_32FC1);
			curp->data.fl[0] = x;
			curp->data.fl[1] = y;
			curp->data.fl[2] = 1;
			for(unsigned int i = 0; i < baryinvvec.size(); i++) {
				cvMatMul( baryinvvec[i], curp, result );
				double u = result->data.fl[0]/result->data.fl[2];
				double v = result->data.fl[1]/result->data.fl[2];
				if((u > 0) && (v > 0) && (u + v < 1)) {
					// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
					//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					break;
				}
			}
			cvReleaseMat( &result );
			cvReleaseMat( &curp );
		}
	}
	*/

	cvReleaseImage( &clean_nonthresh );

#ifdef OLD_BUSTED
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvSubdiv2DPointLocation locate_result;
			CvSubdiv2DEdge on_edge;
			CvSubdiv2DPoint * on_vertex;
			CvPoint curpoint = cvPoint( x, y );
			locate_result = cvSubdiv2DLocate( delaunay, cvPointTo32f( curpoint ),
				&on_edge, &on_vertex );
			if( (locate_result != CV_PTLOC_OUTSIDE_RECT) && (locate_result != CV_PTLOC_ERROR) ) {
				if( locate_result == CV_PTLOC_VERTEX ) { // this point is on a vertex
					for(int i = 0; i < count; i++) {
						if(((on_vertex->pt).x == delaunay_points[i].x) && ((on_vertex->pt).y == delaunay_points[i].y)) {
							cvSet2D( image1, y, x, cvGet2D( image2, cvPointFrom32f(dest_points[i]).y, cvPointFrom32f(dest_points[i]).x ) );
							break;
						}
					}
				}
				else if( locate_result == CV_PTLOC_ON_EDGE ) { // this point is on an edge
					CvSubdiv2DPoint* org_pt;
					CvSubdiv2DPoint* dst_pt;
					CvPoint org_pt_warp;
					CvPoint dst_pt_warp;
						
					org_pt = cvSubdiv2DEdgeOrg(on_edge);
					dst_pt = cvSubdiv2DEdgeDst(on_edge);

					for(int i = 0; i < count; i++) {
						if(((org_pt->pt).x == delaunay_points[i].x) && ((org_pt->pt).y == delaunay_points[i].y)) {
							org_pt_warp = cvPointFrom32f(dest_points[i]);
						}
						if(((dst_pt->pt).x == delaunay_points[i].x) && ((dst_pt->pt).y == delaunay_points[i].y)) {
							dst_pt_warp = cvPointFrom32f(dest_points[i]);
						}
					}

					// compute vector length of original edge and current point
					double original_length;
					double cur_length; 
					if( (int)((org_pt->pt).x) == curpoint.x ) { // vertical line
						original_length = fabs((org_pt->pt).y - (dst_pt->pt).y);
						cur_length = fabs((org_pt->pt).y - curpoint.y);
					}
					else if( (int)((org_pt->pt).y) == curpoint.y ) { // horizontal line
						original_length = fabs((org_pt->pt).x - (dst_pt->pt).x);
						cur_length = fabs((org_pt->pt).x - curpoint.x);
					}
					else { // sloped line
				 		original_length = sqrt(pow((org_pt->pt).x - (dst_pt->pt).x, 2.0) + pow((org_pt->pt).y - (dst_pt->pt).y, 2.0));
						cur_length = sqrt(pow((org_pt->pt).x - curpoint.x, 2.0) + pow((org_pt->pt).y - curpoint.y, 2.0));
					}
					// compute ratio of this point on the edge
					double ratio = cur_length / original_length;
					// copy this point from the destination edge
					CvPoint point_in_original;
					int warped_x = (int)(org_pt_warp.x - dst_pt_warp.x);
					int warped_y = (int)(org_pt_warp.y - dst_pt_warp.y);
					if( org_pt_warp.x == curpoint.x ) { // vertical line
						point_in_original.y = (int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y)));
						point_in_original.x = org_pt_warp.x;
					}
					else if(org_pt_warp.y == curpoint.y) { // horizontal line
						point_in_original.x = (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x)));
						point_in_original.y = org_pt_warp.y;
					}
					else { // sloped line
						double destination_length = sqrt(pow((org_pt_warp).x - (dst_pt_warp).x, 2.0) + pow((org_pt_warp).y - (dst_pt_warp).y, 2.0));
						double scaled_length = ratio * destination_length;
						double dest_angle = atan(fabs( (double)warped_y / (double)warped_x ));
						double xdist = scaled_length * cos(dest_angle);
						double ydist = scaled_length * sin(dest_angle);
						xdist = warped_x > 0 ? xdist : xdist * -1;
						ydist = warped_y > 0 ? ydist : ydist * -1;
						point_in_original.x = (int)( org_pt_warp.x + xdist);
						point_in_original.y = (int)( org_pt_warp.y + ydist);
					}
					
					if((point_in_original.x >= 0) && (point_in_original.y >= 0) && (point_in_original.x < (image1->width)) && (point_in_original.y < (image1->height))) {
						cvSet2D( image1, y, x, cvGet2D( image2, point_in_original.y, point_in_original.x ) );
					}
					else {
						printf("Edge point outside image\n");
					}
					// cvSet2D( image1, y, x, cvGet2D( image2, (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x))), 
					//			(int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y))) ) );
				}
				else if( locate_result == CV_PTLOC_INSIDE ) { // this point is inside a facet (triangle)
					/*
					printf("Point inside facet: %d, %d\n",curpoint.x,curpoint.y);
					int count = 0;
					CvPoint * origins = (CvPoint*)malloc(sizeof(CvPoint)*3);
					CvSubdiv2DEdge t = on_edge;
					// count number of edges
					do {
						CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
						if(count < 3) {
							origins[count] = cvPoint( cvRound(pt->pt.x), cvRound(pt->pt.y));
							printf("%d,%d\t",origins[count].x,origins[count].y);
						}
						count++;
						t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
					} while(t != on_edge);
					printf("\n");

					free(origins);
					*/
				}
			}
		}
	}
#endif // OLD_BUSTED
	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
#define CV_CALIB_FIX_K5  4096
#define CV_CALIB_FIX_K6  8192
#define CV_CALIB_RATIONAL_MODEL 16384

/* Finds intrinsic and extrinsic camera parameters
   from a few views of known calibration pattern */
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
                                const CvMat* image_points,
                                const CvMat* point_counts,
                                CvSize image_size,
                                CvMat* camera_matrix,
                                CvMat* distortion_coeffs,
                                CvMat* rotation_vectors CV_DEFAULT(NULL),
                                CvMat* translation_vectors CV_DEFAULT(NULL),
                                int flags CV_DEFAULT(0),
                                CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
                                    CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );

/* Computes various useful characteristics of the camera from the data computed by
   cvCalibrateCamera2 */
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
                                CvSize image_size,
                                double aperture_width CV_DEFAULT(0),
                                double aperture_height CV_DEFAULT(0),
                                double *fovx CV_DEFAULT(NULL),
                                double *fovy CV_DEFAULT(NULL),
                                double *focal_length CV_DEFAULT(NULL),
                                CvPoint2D64f *principal_point CV_DEFAULT(NULL),
                                double *pixel_aspect_ratio CV_DEFAULT(NULL));

#define CV_CALIB_FIX_INTRINSIC  256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
Example #6
0
int color_cluster(char *filename)
{
	IplImage* originimg=cvLoadImage(filename);

	int i,j;
	CvMat *samples=cvCreateMat((originimg->width)*(originimg->height),1,CV_32FC3);//创建样本矩阵,CV_32FC3代表32位浮点3通道(彩色图像)
	CvMat *clusters=cvCreateMat((originimg->width)*(originimg->height),1,CV_32SC1);//创建类别标记矩阵,CV_32SF1代表32位整型1通道

	int k=0;
	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			CvScalar s;
			//获取图像各个像素点的三通道值(BGR)
			s.val[0]=(float)cvGet2D(originimg,j,i).val[0];//B
			s.val[1]=(float)cvGet2D(originimg,j,i).val[1];//G
			s.val[2]=(float)cvGet2D(originimg,j,i).val[2];//R
			cvSet2D(samples,k++,0,s);//将像素点三通道的值按顺序排入样本矩阵
		}
	}

	int nCuster=2;//聚类类别数,后期可以通过学习确定分类数。
	cvKMeans2(samples,nCuster,clusters,cvTermCriteria(CV_TERMCRIT_ITER,100,1.0));//开始聚类,迭代100次,终止误差1.0

	//创建整体显示聚类后的图像
	IplImage *clusterimg=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	
	//创建用于单独显示每个聚类结果的图像
	IplImage *cluster_img0=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img1=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img2=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);



	k=0;
	int val=0;
	float step=255/(nCuster-1);
	CvScalar bg={223,124,124,0};//背景设置为白色
	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			cvSet2D(cluster_img0,j,i,bg);
			cvSet2D(cluster_img1,j,i,bg);
			cvSet2D(cluster_img1,j,i,bg);
		}
	}

	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			val=(int)clusters->data.i[k++];
			CvScalar s;
			s.val[0]=255-val*step;//这个是将不同类别取不同的像素值,
			cvSet2D(clusterimg,j,i,s);	//存储聚类后的图像

			//将每个聚类进行分离
			switch(val)
			{
				case 0:
					cvSet2D(cluster_img0,j,i,s);break;//白色类
				case 1:
					cvSet2D(cluster_img1,j,i,s);break;//灰色类
				case 2:
					cvSet2D(cluster_img2,j,i,s);break;//黑色类
				default:
					break;
			}	
		
		}
    }


	//cvSaveImage("PicVideo//cluster_img0.png",cluster_img0);
	//cvSaveImage("PicVideo//cluster_img1.png",cluster_img1);
	//cvSaveImage("PicVideo//cluster_img2.png",cluster_img2);


	cvNamedWindow( "原始图像", 1 ); 
	cvNamedWindow( "聚类图像", 1 );

	cvShowImage( "原始图像", originimg  );
	cvShowImage( "聚类图像", clusterimg  );
	cvSaveImage("clusterimg.png",clusterimg);//结果保存
	
	cvWaitKey(0); 

	cvDestroyWindow( "原始图像" );
	cvDestroyWindow( "聚类图像" );

	cvReleaseImage( &originimg ); 
	cvReleaseImage( &clusterimg );
	cvReleaseImage(&cluster_img0);
	cvReleaseImage(&cluster_img1);
	cvReleaseImage(&cluster_img0);

	return 0;

}
Example #7
0
int main(int argc, char * argv[])
{
	int corner_count;
	int successes = 0;
	int step, frame = 0;

	const char* intrinsics_path = argv[1];
	const char* distortions_path = argv[2];
	int total = argc - 3 ; 
	int start = 3;

	const char* loc = argv[start] ;

	board_w = 7; // Board width in squares
	board_h = 4; // Board height 
	n_boards = total; // Number of boards
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize( board_w, board_h );
	// Allocate Sotrage
	CvMat* image_points		= cvCreateMat( n_boards*board_n, 2, CV_32FC1 );
	CvMat* object_points		= cvCreateMat( n_boards*board_n, 3, CV_32FC1 );
	CvMat* point_counts		= cvCreateMat( n_boards, 1, CV_32SC1 );
	CvMat* intrinsic_matrix		= cvCreateMat( 3, 3, CV_32FC1 );
	CvMat* distortion_coeffs	= cvCreateMat( 5, 1, CV_32FC1 );

	CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];

	IplImage *image = cvLoadImage( loc );
	//IplImage *image = cvQueryFrame(capture);

	IplImage *gray_image = cvCreateImage( cvGetSize( image ), 8, 1 );

	// Capture Corner views loop until we've got n_boards
	// succesful captures (all corners on the board are found)

	while( start < total ){
		// Skp every board_dt frames to allow user to move chessboard
		//		if( frame++ % board_dt == 0 ){
		// Find chessboard corners:
		int found = cvFindChessboardCorners( image, board_sz, corners,
				&corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );


		// Get subpixel accuracy on those corners
		cvCvtColor( image, gray_image, CV_BGR2GRAY );
		cvFindCornerSubPix( gray_image, corners, corner_count, cvSize( 11, 11 ), 
				cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

		// Draw it
		cvDrawChessboardCorners( image, board_sz, corners, corner_count, found );
		if( found )
		{            
			cvSaveImage( "/tmp/grid_save.png", image);  
		}

		// If we got a good board, add it to our data
		if( corner_count == board_n ){
			step = successes*board_n;
			for( int i=step, j=0; j < board_n; ++i, ++j ){
				CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
				CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
				CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
				CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
				CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
			}
			CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
			successes++;
		}
		//		} 


		if( start < total )
		{
			start++; 
		}
		else if ( start == total )
		{
			start = 1;
			//  return -1;
		}

		loc = argv[start] ;

		image = cvLoadImage( loc );
	} // End collection while loop
Example #8
0
// поиск оптического потока
void OpticalFlowLK::make()
{
	if(!imgA || !imgB || !eig_image || !tmp_image)
	{
		return;
	}

	int i=0;

#if 1
	cornerCount = LK_MAX_CORNERS;
	//
	// находим точки для отслеживания перемещения
	//
	cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, 
							cornersA,		// возвращаемое значение найденых углов
							&cornerCount,	// возвращаемое значение числа найденых углов
							0.01,			// множитель, определяющий минимально допустимое качество углов
							5.0,			// предел, определяющий минимально-возможную дистанцию между углами
							0,				// маска, определяющая ROI (если NULL, то поиск по всему изображению)
							5,				// размер среднего блока
							0,				// если !=0 используется cvCornerHarris(), иначе cvCornerMinEigenVal()
							0.04 );			// параметр для cvCornerHarris()
#else
	//
	// Покроем изображение равномерной сеткой из точек
	//
	int step_x = imgA->width / 5;
	int step_y = imgA->height / 5;

	int points_count = (imgA->width / step_x + 1) * (imgA->height / step_y + 1);

	if(points_count>LK_MAX_CORNERS){
		delete []cornersA;
		cornersA=0;
		delete []cornersB;
		cornersB=0;

		cornersA= new CvPoint2D32f[ points_count ];
		cornersB= new CvPoint2D32f[ points_count ];
		featuresFound = new char[ points_count ];
		featureErrors = new float[ points_count ];
		assert(cornersA);
		assert(cornersB);
		assert(featuresFound);
		assert(featureErrors);
	}

	cornerCount = 0;
	for ( j = 1; j < imgA->height; j += step_y){
		for ( i = 1; i < imgA->width; i += step_x){
			cornersA[cornerCount] = cvPoint2D32f((float)i, (float)j);
			cornerCount++;
		}
	}
#endif

	//
	// уточнение координат точек с субпиксельной точностью
	//
	cvFindCornerSubPix( imgA, cornersA, cornerCount,
						cvSize(LK_WINDOW_SIZE, LK_WINDOW_SIZE), // размер половины длины окна для поиска
						cvSize(-1,-1),
						cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, LK_ITER_COUNT, 0.03) );

	// определяем размер пирамиды 
	CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );

	if(pyrA!=0)
	{
		cvReleaseImage(&pyrA);
		cvReleaseImage(&pyrB);
	}
	pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );

	//
	// находим оптический поток
	//
	cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB,
							cornersA,
							cornersB,
							cornerCount,
							cvSize( LK_WINDOW_SIZE, LK_WINDOW_SIZE ),// размер окна поиска каждого уровня пирамиды
							5,										 // максимальный уровень пирамиды.
							featuresFound, // если элемент массива установлен в 1, то соответсвующая особая точка была обнаружена
							featureErrors, // массив разности между оригинальными и сдвинутыми точками (может быть NULL)
							cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, LK_ITER_COUNT, .3 ),
							0 );

	center.x=0.0;
	center.y=0.0;
	cornerCountGood = 0;
	for( i=0; i<cornerCount; i++ )
	{
		// пропускаем ненайденные точки и точки с большой ошибкой
		if( featuresFound[i]==0 || featureErrors[i]>LK_MAX_FEATURE_ERROR ) {
			center.x += cornersB[i].x;
			center.y += cornersB[i].y;
			cornerCountGood++;
		}
	}

	if(cornerCountGood)
	{
		center.x /= cornerCountGood;
		center.y /= cornerCountGood;
	}

}
Example #9
0
SVMConstructor::SVMConstructor(){
    _params.svm_type = CvSVM::C_SVC;
    _params.kernel_type = CvSVM::LINEAR;
    _params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER,1000,1e-6);
}
Example #10
0
int main222( int argc,   char** argv )
{
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    printf( "Hot keys: \n"
        "\tESC - quit the program\n"
        "\tc - stop the tracking\n"
        "\tb - switch to/from backprojection view\n"
        "\th - show/hide object histogram\n"
        "To initialize tracking, select the object with mouse\n" );

    cvNamedWindow( "Histogram", 1 );
    cvNamedWindow( "CamShiftDemo", 1 );
    cvSetMouseCallback( "CamShiftDemo", on_mouse, 0 );
    cvCreateTrackbar( "Vmin", "CamShiftDemo", &vmin, 256, 0 );
    cvCreateTrackbar( "Vmax", "CamShiftDemo", &vmax, 256, 0 );
    cvCreateTrackbar( "Smin", "CamShiftDemo", &smin, 256, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, bin_w, c;


        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );

        if( track_object )
        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );

            if( track_object < 0 )
            {
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
            }

            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;

            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( !image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        cvShowImage( "CamShiftDemo", image );
        cvShowImage( "Histogram", histimg );

        c = cvWaitKey(10);
        if( (char) c == 27 )
            break;
        switch( (char) c )
        {
        case 'b':
            backproject_mode ^= 1;
            break;
        case 'c':
            track_object = 0;
            cvZero( histimg );
            break;
        case 'h':
            show_hist ^= 1;
            if( !show_hist )
                cvDestroyWindow( "Histogram" );
            else
                cvNamedWindow( "Histogram", 1 );
            break;
        default:
            ;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("CamShiftDemo");

    return 0;
}
Example #11
0
void mexFunction(int plhs_size, mxArray *plhs[], int prhs_size, const mxArray *prhs[])
{
    // Load images

    if (prhs_size ==4) {
        win_size = *mxGetPr(prhs[3]);
    }

    int N = mxGetN(prhs[0]);
    int M = mxGetM(prhs[0]);
    grey0 = cvCreateImage( cvSize(N, M), 8, 1 );
    grey1 = cvCreateImage( cvSize(N, M), 8, 1 );
    loadImageFromMatlab(prhs[0],grey0);
    loadImageFromMatlab(prhs[1],grey1);

    // Load feature points
    double *fp = mxGetPr(prhs[2]);

    int num_pts = mxGetN(prhs[2]);
    points[0] = (CvPoint2D32f*)cvAlloc(num_pts*sizeof(points[0][0]));
    points[1] = (CvPoint2D32f*)cvAlloc(num_pts*sizeof(points[0][0]));
    char *status = (char*)cvAlloc(num_pts);
    float *error = (float*) cvAlloc(num_pts*sizeof(float));
    for (int i = 0; i < num_pts; i++) {
        points[0][i].x = fp[2*i];
        points[0][i].y = fp[2*i+1];
    }
    // neni treba, urychleni z fpt 40 -> fps 200
    //cvFindCornerSubPix( grey0, points[0], num_pts, cvSize(win_size,win_size), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));

    pyramid1 = cvCreateImage( cvGetSize(grey1), 8, 1 );
    pyramid0 = cvCreateImage( cvGetSize(grey1), 8, 1 );

    cvCalcOpticalFlowPyrLK( grey0, grey1, pyramid0, pyramid1, points[0], points[1], num_pts, cvSize(win_size,win_size), 6, status, error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0 );

    // Output

    plhs[0] = mxCreateDoubleMatrix(6, num_pts, mxREAL);
    double *output = mxGetPr(plhs[0]);
    for (int i = 0; i < num_pts; i++) {
        output[6*i]   = (double) points[0][i].x;
        output[6*i+1] = (double) points[0][i].y;
        output[6*i+2] = (double) points[1][i].x;
        output[6*i+3] = (double) points[1][i].y;
        output[6*i+4] = (double) error[i];
        output[6*i+5] = (double) status[i];

        //output[5*i+5] = (double) error[i];
    }

    // Tidy up
    cvReleaseImage( &pyramid0 );
    cvReleaseImage( &pyramid1 );
    cvReleaseImage( &grey0 );
    cvReleaseImage( &grey1 );
    return;
}
/*!
    \fn CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
 */
void CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
{
  if( db_type == XM2VTS )
  {
    printf("Training an SVM classifier  ................\n");
    CvXm2vts *xm2vts = (CvXm2vts*)database;
    int nTrainingExample = 200*4;
    CvMat* trainData = cvCreateMat(nTrainingExample, nofeatures, CV_32FC1);
    CvMat* response = cvCreateMat(nTrainingExample, 1, CV_32FC1);
    
    for (int i = 0; i < nofeatures; i++)
    {
      /* load feature value */
      CvGaborFeature *feature;
      feature = new_pool->getfeature(i);
      printf("Getting the %d feature ............\n", i+1);
      
      char *filename = new char[50];
      //training validation
      double l, t;
      int fal = 0;
      for(int sub = 1; sub <= 200; sub++)
      {
        if (((CvXm2vts*)database)->getGender( sub )) t = 1.0;
        else t = 2.0;
        
        for(int pic = 1; pic <= 4; pic++)
        {
          sprintf(filename, "%s/%d_%d.bmp", path, sub, pic);
          IplImage *img = cvLoadImage( filename, CV_LOAD_IMAGE_ANYCOLOR );
          IplImage *grayimg = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
          if ( img->nChannels == 1 )  cvCopy( img, grayimg, NULL );
          else if (img->nChannels == 3)   cvCvtColor( img, grayimg, CV_RGB2GRAY );
          
          double vfeature = feature->val( img );
          cvSetReal2D( trainData, ((sub-1)*4+(pic-1)), i, vfeature );
          cvSetReal1D( response, ((sub-1)*4+(pic-1)), t );
          cvReleaseImage(&img);
          cvReleaseImage(&grayimg);
        }
      }
      delete [] filename;
    }
    
    
    printf("building the svm classifier .........................\n");
    CvTermCriteria term_crit = cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 200, 0.8);
  /*Type of SVM, one of the following types:
    CvSVM::C_SVC - n-class classification (n>=2), allows imperfect separation of classes with penalty multiplier C for outliers.
    CvSVM::NU_SVC - n-class classification with possible imperfect separation. Parameter nu (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of C.
    CvSVM::ONE_CLASS - one-class SVM. All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space.
    CvSVM::EPS_SVR - regression. The distance between feature vectors from the training set and the fitting hyperplane must be less than p. For outliers the penalty multiplier C is used.
    CvSVM::NU_SVR - regression; nu is used instead of p. */
    int _svm_type = CvSVM::NU_SVC;
  /*The kernel type, one of the following types:
    CvSVM::LINEAR - no mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. d(x,y) = x•y == (x,y)
    CvSVM::POLY - polynomial kernel: d(x,y) = (gamma*(x•y)+coef0)degree
    CvSVM::RBF - radial-basis-function kernel; a good choice in most cases: d(x,y) = exp(-gamma*|x-y|2)
    CvSVM::SIGMOID - sigmoid function is used as a kernel: d(x,y) = tanh(gamma*(x•y)+coef0) */
    
    int _kernel_type = CvSVM::POLY;
    
    double _degree = 3.0;
    double _gamma = 1.0;
    double _coef0 = 0.0;
    double _C = 1.0;
    double _nu = 1.0;
    double _p = 1.0;
    
    CvSVMParams  params( CvSVM::C_SVC, CvSVM::POLY, _degree, _gamma, _coef0, _C, _nu, _p,
                         0, term_crit );
    
    svm->train( trainData, response, 0, 0, params );
    
    svm->save( "svm.xml", "svm" );
    cvReleaseMat(&response);
    cvReleaseMat(&trainData);
  }
}
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
Example #14
0
void vpKltOpencv::track(const IplImage *I)
{
  if (!initialized) {
    vpERROR_TRACE("KLT Not initialized") ;
    throw(vpException(vpTrackingException::initializationError,
		      "KLT Not initialized")) ;
  }

  if (!I) {
    throw(vpException(vpTrackingException::initializationError,
		      "Image Not initialized")) ;
  }

  if (I->depth != IPL_DEPTH_8U || I->nChannels != 1)  {
    throw(vpException(vpTrackingException::initializationError,
		      "Bad Image format")) ;
  }

  

  CV_SWAP(prev_image, image, swap_temp);
  CV_SWAP(prev_pyramid, pyramid, swap_temp);
  
  cvCopy(I, image, 0);
  
  if(!initial_guess){
    // Save current features as previous features
    countPrevFeatures = countFeatures;
    for (int boucle=0; boucle<countFeatures;boucle++)  {
      prev_featuresid[boucle] = featuresid[boucle];
    }
    
    CvPoint2D32f *swap_features = 0;
    CV_SWAP(prev_features, features, swap_features);
  }
  
  if (countFeatures <= 0) return;

  cvCalcOpticalFlowPyrLK( prev_image, image, prev_pyramid, pyramid,
			  prev_features, features, countFeatures,
			  cvSize(win_size, win_size), pyramid_level,
			  status, 0, cvTermCriteria(CV_TERMCRIT_ITER
						    |CV_TERMCRIT_EPS,20,0.03),
			  flags );
  
  if(!initial_guess)
    flags |= CV_LKFLOW_PYR_A_READY;
  else{
    flags = CV_LKFLOW_PYR_A_READY;
    initial_guess = false;
  }

  int i,k;
  for (i = k = 0; i < countFeatures ; i++)  {
    if (!status[i]) 	{
      lostDuringTrack[i] = 1;
      if (OnFeatureLost)
	OnFeatureLost(_tid, i, featuresid[i], features[i].x,
		      features[i].y);
      continue;
    }
    
    if (IsFeatureValid)	{
      if (!IsFeatureValid(_tid, features[i].x, features[i].y))   {
	lostDuringTrack[i] = 1;
	if (OnFeatureLost)
	  OnFeatureLost(_tid, i, featuresid[i], features[i].x, features[i].y);
	continue;
      }
    }
    features[k] = features[i];
    featuresid[k] = featuresid[i];

    if (OnMeasureFeature) OnMeasureFeature(_tid, k, featuresid[k], features[k].x, features[k].y);
    
    lostDuringTrack[i] = 0;
    k++;
  }
  countFeatures = k;
}
Example #15
0
int main(int argc, char** argv) 
{
  // GLOBAL SETTINGS
  static int framecounter=0;
  const CvSize imsize = cvSize(320,240);
  int delay = 0;
  
  const int win_size = 10;
  CvSize pyr_sz = cvSize( imsize.width+8, imsize.height/3 );
  IplImage * pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
  IplImage * pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
  IplImage * rawImage_resized = cvCreateImage( imsize, IPL_DEPTH_8U, 3);

  cvNamedWindow("Test");
  CvGenericTracker tracker;

  // LOAD INPUT FILE
  CvCapture * capture = NULL;
  if (argc==1) {
    capture = cvCreateCameraCapture(0);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, imsize.width);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, imsize.height);
  }else{
    capture = cvCreateFileCapture(argv[1]);
  }
  if (!capture) {fprintf(stderr, "Error: fail to open source video!\n");return 0;}
  cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter);

  // START ENDLESS LOOP
  while(1)
  {
	// GET NEXT FRAME
    if (1){
      cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter++);
    }else{
      framecounter++;
    }
    IplImage * rawImage = cvQueryFrame(capture);
	cvResize(rawImage,rawImage_resized);
    if (!rawImage) {fprintf(stderr, "Info: end of video!\n"); break;}
    if (tracker.initialized()){
      tracker.update(rawImage_resized);
    }else{
      tracker.initialize(rawImage_resized);
      tracker.m_framecounter=framecounter;
    }

    // START PROCESSING HERE
    {
	  // Initialize, load two images from the file system, and
	  // allocate the images and other structures we will need for
	  // results.
	  CvMat * imgA = tracker.m_currImage;
	  IplImage * imgB = tracker.m_nextImage;
	  IplImage * imgC = cvCloneImage(rawImage_resized);
  
	  // The first thing we need to do is get the features
	  // we want to track.
	  IplImage * eig_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
	  IplImage * tmp_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
	  int corner_count = MAX_CORNERS;
	  CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
	  cvGoodFeaturesToTrack(imgA,eig_image,tmp_image,cornersA,&corner_count,0.01,5.0,0,3,0,0.04);
	  cvFindCornerSubPix(imgA,cornersA,corner_count,cvSize(win_size,win_size),cvSize(-1,-1),
						 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));

	  // Call the Lucas Kanade algorithm
	  char features_found[ MAX_CORNERS ];
	  float feature_errors[ MAX_CORNERS ];
	  CvPoint2D32f * cornersB = new CvPoint2D32f[ MAX_CORNERS ];
	  cvCalcOpticalFlowPyrLK(imgA,imgB,pyrA,pyrB,
							 cornersA,cornersB,corner_count,cvSize( win_size,win_size ),
							 5,features_found,feature_errors,
							 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
							 (framecounter<2)?0:CV_LKFLOW_PYR_B_READY);

	  // Now make some image of what we are looking at:
	  for( int i=0; i<corner_count; i++ ) {
		if( features_found[i]==0|| feature_errors[i]>550 ) {
		  fprintf(stderr,"error=%f\n",feature_errors[i]);continue;
		}
		CvPoint p0 = cvPoint(cvRound( cornersA[i].x ),cvRound( cornersA[i].y ));
		CvPoint p1 = cvPoint(cvRound( cornersB[i].x ),cvRound( cornersB[i].y ));
		cvLine( imgC, p0, p1, CV_RGB(255,0,0), 1 );
	  }

	  cvShowImage("Test",imgC);
	  cvReleaseImage(&imgC);
	  cvReleaseImage(&eig_image);
	  cvReleaseImage(&tmp_image);
	  delete [] cornersA;
	  delete [] cornersB;
	}
	
	// DISPLAY PROCESSING RESULT
	int key = cvWaitKey(delay)&0xff;
	if (key==27){
	  break;
	}else if (key==' '){
	  if (delay){ delay = 0; }else{ delay = 30; }
	}else if (key=='f'){ // skip to next frame
	}else if (key=='S'){ // skip to next frame
	  framecounter+=10;fprintf(stderr,"framecount:%d\n",framecounter);
	}else if (key=='Q'){ // skip to next frame
	  framecounter=MAX(1,framecounter-10);fprintf(stderr,"framecount:%d\n",framecounter);
	}else if (key!=0xff){
	  fprintf(stderr, "Warning: Unknown key press : %c\n", key);
	} // end of key press processing
  } // end of video

  cvReleaseImage(&pyrA);
  cvReleaseImage(&pyrB);
  cvReleaseImage(&rawImage_resized);

  return 0;
}
int main(int argc, char **argv) {
  // Check parameters
  if (argc < 2) {
    fprintf(stderr, "%s: %s\n", APP_NAME, "No video name given");
    fprintf(stderr, "Usage: %s <video file name> [output file name]\n", APP_NAME);

    exit(EXIT_FAILURE);
  }

  char *output_file_name;
  if (argc == 3) {
    output_file_name = argv[2];
  }
  else {
    output_file_name = OUTPUT_FILE_NAME;
  }

  // Load video
  char *file_name = argv[1];
  CvCapture *video = cvCaptureFromFile(file_name);

  if (!video) {
    exit(EXIT_FAILURE);
  }

  // Extract video parameters
  CvSize video_frame_size;
  video_frame_size.width = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_WIDTH);
  video_frame_size.height = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_HEIGHT);
  double video_fps = cvGetCaptureProperty(video, CV_CAP_PROP_FPS);
  long video_frame_count = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_COUNT);

  // Initialize video writer
  CvVideoWriter *video_writer = cvCreateVideoWriter(output_file_name,
    FOURCC, video_fps, video_frame_size, true);

  // Initialize variables for optical flow calculation
  IplImage *current_frame = cvCreateImage(video_frame_size, IPL_DEPTH_8U, 3);
  IplImage *eigen_image = cvCreateImage(video_frame_size, IPL_DEPTH_32F, 1);
  IplImage *temp_image = cvCreateImage(video_frame_size, IPL_DEPTH_32F, 1);

  int corner_count = MAX_CORNERS;
  CvPoint2D32f corners[2][MAX_CORNERS];
  char features_found[MAX_CORNERS];
  float feature_errors[MAX_CORNERS];

  IplImage *frame_buffer[2];
  IplImage *pyramid_images[2];
  CvSize pyramid_size = cvSize(video_frame_size.width + 8, video_frame_size.height / 3);

  int i;
  for (i = 0; i < 2; i++) {
    frame_buffer[i] = cvCreateImage(video_frame_size, IPL_DEPTH_8U, 1);
    pyramid_images[i] = cvCreateImage(pyramid_size, IPL_DEPTH_32F, 1);
  }

  // Process video
  while (query_frame(video, frame_buffer, current_frame)) {
    // Corner finding with Shi and Thomasi
    cvGoodFeaturesToTrack(
      frame_buffer[0],
      eigen_image,
      temp_image,
      corners[0],
      &corner_count,
      0.01,
      5.0,
      0,
      3,
      0,
      0.4);

    cvFindCornerSubPix(
      frame_buffer[0],
      corners[0],
      corner_count,
      cvSize(WINDOW_SIZE, WINDOW_SIZE),
      cvSize(-1, -1),
      cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3));

    // Pyramid Lucas-Kanade
    cvCalcOpticalFlowPyrLK(
      frame_buffer[0],
      frame_buffer[1],
      pyramid_images[0],
      pyramid_images[1],
      corners[0],
      corners[1],
      corner_count,
      cvSize(WINDOW_SIZE, WINDOW_SIZE),
      5,
      features_found,
      feature_errors,
      cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3),
      0);

    // Draw optical flow vectors
    int i;
	double l_max, l;
	for (i = 0; i < corner_count; i++) {
      if (features_found[i] == 0 || feature_errors[i] > 550) {
        continue;
      }
	  l = sqrt(corners[1][i].x*corners[1][i].x+corners[1][i].y*corners[1][i].y);
	  if(l>l_max) l_max = l;	  
	}
	
    for (i = 0; i < corner_count; i++) {
      if (features_found[i] == 0 || feature_errors[i] > 550) {
        continue;
      }
	  
	  double spinSize = 5.0 * l/l_max; 

      CvPoint points[2];
      points[0] = cvPoint(cvRound(corners[0][i].x), cvRound(corners[0][i].y));
      points[1] = cvPoint(cvRound(corners[1][i].x), cvRound(corners[1][i].y));

      cvLine(current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);
	  
	  double angle;                                                                          
	  angle = atan2( (double) points[0].y - points[1].y, (double) points[0].x - points[1].x );
	
	  points[0].x = (int) (points[1].x + spinSize * cos(angle + 3.1416 / 4));
	  points[0].y = (int) (points[1].y + spinSize * sin(angle + 3.1416 / 4));
	  cvLine(current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);

	  points[0].x = (int) (points[1].x + spinSize * cos(angle - 3.1416 / 4));
	  points[0].y = (int) (points[1].y + spinSize * sin(angle - 3.1416 / 4));
	  cvLine( current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);
    }

    cvWriteFrame(video_writer, current_frame);
  }

  // Clean up
  cvReleaseImage(&current_frame);
  cvReleaseImage(&eigen_image);
  cvReleaseImage(&temp_image);

  for (i = 0; i < 2; i++) {
    cvReleaseImage(&frame_buffer[0]);
    cvReleaseImage(&pyramid_images[0]);
  }
  cvReleaseCapture(&video);
  cvReleaseVideoWriter(&video_writer);

  return 0;
}
Example #17
0
// Parameters
// - imgA and imgB: frames in sequence (8U single channel)
// - imgC: original frame to mark (RGB is fine)
void calcOpticalFlowAndMark(IplImage *imgA, IplImage *imgB, IplImage *imgC) {
    // Create buffers if necessary
    CvSize img_sz = cvGetSize( imgA );
    if( !eig_image )
        eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
    if( !tmp_image )
        tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );

    // Find features to track
    int corner_count = MAX_CORNERS;
    cvGoodFeaturesToTrack(
        imgA,
        eig_image,
        tmp_image,
        cornersA,
        &corner_count,
        0.03, // quality_level
        5.0, // min_distance
        NULL,
        3, // block_size (default)
        0, // use_harris (default)
        0.04 // k (default)
    );
    cvFindCornerSubPix(
        imgA,
        cornersA,
        corner_count,
        cvSize(win_size, win_size),
        cvSize(-1, -1),
        cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03)
    );

    // Call the Lucas-Kanade algorithm
    int flags = CV_LKFLOW_PYR_A_READY;
    CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
    if( !pyrA || !pyrB ) {
        pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        flags = 0;
    }

    cvCalcOpticalFlowPyrLK(
        imgA,
        imgB,
        pyrA,
        pyrB,
        cornersA,
        cornersB,
        corner_count,
        cvSize( win_size, win_size ),
        5,
        features_found,
        feature_errors,
        cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
        flags
    );

    // Draw resulting velocity vectors
    for( int i = 0; i < corner_count; i++ ) {
        if( features_found[i] == 0 || feature_errors[i] > 550 ) {
            // printf("Error is %f/n", feature_errors[i]);
            continue;
        }

        double x0 = cornersA[i].x;
        double y0 = cornersA[i].y;
        CvPoint p = cvPoint( cvRound(x0), cvRound(y0) );
        double x1 = cornersB[i].x;
        double y1 = cornersB[i].y;
        CvPoint q = cvPoint( cvRound(x1), cvRound(y1) );
        //if( sqrt( (double) (y1-y0)*(y1-y0) + (x1-x0)*(x1-x0) ) < 0.1 )
        //if(fabs(y1 - y0) < .5 || fabs(x1 - x0) < .5)
        //  continue;
        //printf("%.4lf %.4lf -> %.4lf %.4lf\n", x0, y0, x1, y1);

        CvScalar line_color = CV_RGB(255, 0, 0);
        int line_thickness = 1;
        // Main line (p -> q)
        //cvLine( imgC, p, q, CV_RGB(255,0,0), 2 );

        // Main line (p -> q) lengthened
        double angle = atan2( (double) y1 - y0, (double) x1 - x0 );
        double hypotenuse = sqrt( (double) (y1-y0)*(y1-y0) + (x1-x0)*(x1-x0) );
        if(hypotenuse < 1.01)
            hypotenuse = 1.01;
        if(hypotenuse > 1.99)
            hypotenuse = 1.99;
        q.x = cvRound(x0 + 6 * hypotenuse * cos(angle));
        q.y = cvRound(y0 + 6 * hypotenuse * sin(angle));
        cvLine( imgC, p, q, line_color, line_thickness, CV_AA, 0 );

        // Arrows
        p.x = (int) (x0 + 5 * hypotenuse * cos(angle + pi / 4));
        p.y = (int) (y0 + 5 * hypotenuse * sin(angle + pi / 4));
        cvLine( imgC, p, q, line_color, line_thickness, CV_AA, 0 );

        p.x = (int) (x0 + 5 * hypotenuse * cos(angle - pi / 4));
        p.y = (int) (y0 + 5 * hypotenuse * sin(angle - pi / 4));
        cvLine( imgC, p, q, line_color, line_thickness, CV_AA, 0 );
    }
}
Example #18
0
	static
	int build_mlp_classifier( char* data_filename,
	   char* filename_to_save, char* filename_to_load )
	{
	    const int class_count = 26;
	    CvMat* data = 0;
	    CvMat train_data;
	    CvMat* responses = 0;
	    CvMat* mlp_response = 0;

	    int ok = read_num_class_data( data_filename, 16, &data, &responses );
	    int nsamples_all = 0, ntrain_samples = 0;
	    int i, j;
	    double train_hr = 0, test_hr = 0;
	    CvANN_MLP mlp;

	    if( !ok )
	    {
	        printf( "Could not read the database %s\n", data_filename );
	        return -1;
	    }

	    printf( "The database %s is loaded.\n", data_filename );
	    nsamples_all = data->rows;
	    ntrain_samples = (int)(nsamples_all*0.8);

	    // Create or load MLP classifier
	    if( filename_to_load )
	    {
	        // load classifier from the specified file
	        mlp.load( filename_to_load );
	        ntrain_samples = 0;
	        if( !mlp.get_layer_count() )
	        {
	            printf( "Could not read the classifier %s\n", filename_to_load );
	            return -1;
	        }
	        printf( "The classifier %s is loaded.\n", data_filename );
	    }
	    else
	    {
	        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
	        //
	        // MLP does not support categorical variables by explicitly.
	        // So, instead of the output class label, we will use
	        // a binary vector of <class_count> components for training and,
	        // therefore, MLP will give us a vector of "probabilities" at the
	        // prediction stage
	        //
	        // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

	        CvMat* new_responses = cvCreateMat( ntrain_samples, class_count, CV_32F );

	        // 1. unroll the responses
	        printf( "Unrolling the responses...\n");
	        for( i = 0; i < ntrain_samples; i++ )
	        {
	            int cls_label = cvRound(responses->data.fl[i]) - 'A';
	            float* bit_vec = (float*)(new_responses->data.ptr + i*new_responses->step);
	            for( j = 0; j < class_count; j++ )
	                bit_vec[j] = 0.f;
	            bit_vec[cls_label] = 1.f;
	        }
	        cvGetRows( data, &train_data, 0, ntrain_samples );

	        // 2. train classifier

	        int layer_sz[] = { data->cols, 100, 100, class_count };
	        CvMat layer_sizes =
	            cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
	        mlp.create( &layer_sizes );
	        printf( "Training the classifier (may take a few minutes)...\n");
	        mlp.train( &train_data, new_responses, 0, 0,
	            CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01),
	//#if 1
	            CvANN_MLP_TrainParams::BACKPROP,0.001));
	//#else
	            //CvANN_MLP_TrainParams::RPROP,0.05));
//	#endif
	        cvReleaseMat( &new_responses );
	        printf("\n");
	    }

	    mlp_response = cvCreateMat( 1, class_count, CV_32F );

	    // compute prediction error on train and test data
	    for( i = 0; i < nsamples_all; i++ )
	    {
	        int best_class;
	        CvMat sample;
	        cvGetRow( data, &sample, i );
	        CvPoint max_loc = {0,0};
	        mlp.predict( &sample, mlp_response );
	        cvMinMaxLoc( mlp_response, 0, 0, 0, &max_loc, 0 );
	        best_class = max_loc.x + 'A';

	        int r = fabs((double)best_class - responses->data.fl[i]) < FLT_EPSILON ? 1 : 0;

	        if( i < ntrain_samples )
	            train_hr += r;
	        else
	            test_hr += r;
	    }

	    test_hr /= (double)(nsamples_all-ntrain_samples);
	    train_hr /= (double)ntrain_samples;
	    printf( "Recognition rate: train = %.1f%%, test = %.1f%%\n",
	            train_hr*100., test_hr*100. );

	    // Save classifier to file if needed
	    if( filename_to_save )
	        mlp.save( filename_to_save );

	    cvReleaseMat( &mlp_response );
	    cvReleaseMat( &data );
	    cvReleaseMat( &responses );

	    return 0;}
Example #19
0
/*F///////////////////////////////////////////////////////////////////////////////////////
//    Name: InitMixSegm
//    Purpose: The function implements the mixture segmentation of the states of the embedded HMM
//    Context: used with the Viterbi training of the embedded HMM
//             Function uses K-Means algorithm for clustering
//
//    Parameters:  obs_info_array - array of pointers to image observations
//                 num_img - length of above array
//                 hmm - pointer to HMM structure   
//     
//    Returns: error status
//
//    Notes: 
//F*/
CvStatus icvInit1DMixSegm(Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm)
{                                      
    int  k, i, j; 
    int* num_samples; /* number of observations in every state */
    int* counter;     /* array of counters for every state */
    
    int**  a_class;   /* for every state - characteristic array */
    
    CvVect32f** samples; /* for every state - pointer to observation vectors */
    int***  samples_mix;   /* for every state - array of pointers to vectors mixtures */   
    
    CvTermCriteria criteria = cvTermCriteria( CV_TERMCRIT_EPS|CV_TERMCRIT_ITER,
                                              1000,    /* iter */
                                              0.01f ); /* eps  */
    
    int total = hmm->num_states; 
    CvEHMMState* first_state = hmm->u.state; 
    
    /* for every state integer is allocated - number of vectors in state */
    num_samples = (int*)icvAlloc( total * sizeof(int) );
    
    /* integer counter is allocated for every state */
    counter = (int*)icvAlloc( total * sizeof(int) );
    
    samples = (CvVect32f**)icvAlloc( total * sizeof(CvVect32f*) ); 
    samples_mix = (int***)icvAlloc( total * sizeof(int**) ); 
    
    /* clear */
    memset( num_samples, 0 , total*sizeof(int) );
    memset( counter, 0 , total*sizeof(int) );
    
    
    /* for every state the number of vectors which belong to it is computed (smth. like histogram) */
    for (k = 0; k < num_img; k++)
    {
        CvImgObsInfo* obs = obs_info_array[k];
        
        for (i = 0; i < obs->obs_x; i++)
        {
            int state = obs->state[ i ];
            num_samples[state] += 1;
        }
    } 
    
    /* for every state int* is allocated */
    a_class = (int**)icvAlloc( total*sizeof(int*) );
    
    for (i = 0; i < total; i++)
    {
        a_class[i] = (int*)icvAlloc( num_samples[i] * sizeof(int) );
        samples[i] = (CvVect32f*)icvAlloc( num_samples[i] * sizeof(CvVect32f) );
        samples_mix[i] = (int**)icvAlloc( num_samples[i] * sizeof(int*) );
    }
    
    /* for every state vectors which belong to state are gathered */
    for (k = 0; k < num_img; k++)
    {  
        CvImgObsInfo* obs = obs_info_array[k];
        int num_obs = obs->obs_x;
        float* vector = obs->obs;

        for (i = 0; i < num_obs; i++, vector+=obs->obs_size )
        {
            int state = obs->state[i];
            
            samples[state][counter[state]] = vector;
            samples_mix[state][counter[state]] = &(obs->mix[i]);
            counter[state]++;            
        }
    } 
    
    /* clear counters */
    memset( counter, 0, total*sizeof(int) );
    
    /* do the actual clustering using the K Means algorithm */
    for (i = 0; i < total; i++)
    {
        if ( first_state[i].num_mix == 1)
        {   
            for (k = 0; k < num_samples[i]; k++)
            {  
                /* all vectors belong to one mixture */
                a_class[i][k] = 0;
            }
        }      
        else if( num_samples[i] )
        {
            /* clusterize vectors  */
            icvKMeans( first_state[i].num_mix, samples[i], num_samples[i], 
                obs_info_array[0]->obs_size, criteria, a_class[i] );
        } 
    }
    
    /* for every vector number of mixture is assigned */
    for( i = 0; i < total; i++ )
    {
        for (j = 0; j < num_samples[i]; j++)
        {
            samples_mix[i][j][0] = a_class[i][j];
        }
    }
    
   for (i = 0; i < total; i++)
    {
        icvFree( &(a_class[i]) );
        icvFree( &(samples[i]) );
        icvFree( &(samples_mix[i]) );
    }

    icvFree( &a_class );
    icvFree( &samples );
    icvFree( &samples_mix );
    icvFree( &counter );
    icvFree( &num_samples );  

    
    return CV_NO_ERR;
}
Example #20
0
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{

	double nan = std::numeric_limits<double>::quiet_NaN();
	double inf = std::numeric_limits<double>::infinity();

	if (nrhs == 0) {
		mexPrintf("Lucas-Kanade\n");
		return;
	}


	switch ((int) *mxGetPr(prhs[0])) {

		// initialize or clean up
		case 0: {
	
			if (IMG!=0 && PYR!=0) {

				for (int i = 0; i < MAX_IMG; i++) {
					cvReleaseImage(&(IMG[i])); IMG[i] = 0;
					cvReleaseImage(&(PYR[i])); PYR[i] = 0;
				}
				free(IMG); IMG = 0;
				free(PYR); PYR = 0;
				//mexPrintf("LK: deallocated\n");
			}

			IMG = (IplImage**) calloc(MAX_IMG,sizeof(IplImage*));
			PYR = (IplImage**) calloc(MAX_IMG,sizeof(IplImage*));
			//mexPrintf("LK: initialized\n");

			return;
				}

		// tracking
		case 2: {

			if (IMG == 0 || (nrhs != 5 && nrhs != 6)) {
				mexPrintf("lk(2,imgI,imgJ,ptsI,ptsJ,Level)\n");
				//            0 1    2    3    4   
				return;
			}
			int Level;
			if (nrhs == 6) {
				Level = (int) *mxGetPr(prhs[5]);
			} else {
				Level = 5;
			}


			int I       = 0;
			int J       = 1;
			int Winsize = 10;

			// Images
			if (IMG[I] != 0) {
				loadImageFromMatlab(prhs[1],IMG[I]);
			} else {
				CvSize imageSize = cvSize(mxGetN(prhs[1]),mxGetM(prhs[1]));
				IMG[I] = cvCreateImage( imageSize, 8, 1 );
				PYR[I] = cvCreateImage( imageSize, 8, 1 );
				loadImageFromMatlab(prhs[1],IMG[I]);
			}

			if (IMG[J] != 0) {
				loadImageFromMatlab(prhs[2],IMG[J]);
			} else {
				CvSize imageSize = cvSize(mxGetN(prhs[2]),mxGetM(prhs[2]));
				IMG[J] = cvCreateImage( imageSize, 8, 1 );
				PYR[J] = cvCreateImage( imageSize, 8, 1 );
				loadImageFromMatlab(prhs[2],IMG[J]);
			}

			// Points
			double *ptsI = mxGetPr(prhs[3]); int nPts = mxGetN(prhs[3]);
			double *ptsJ = mxGetPr(prhs[4]); 

			if (nPts != mxGetN(prhs[4])) {
				mexPrintf("Inconsistent input!\n");
				return;
			}

			points[0] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // template
			points[1] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // target
			points[2] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // forward-backward

			for (int i = 0; i < nPts; i++) {
				points[0][i].x = ptsI[2*i]; points[0][i].y = ptsI[2*i+1];
				points[1][i].x = ptsJ[2*i]; points[1][i].y = ptsJ[2*i+1];
				points[2][i].x = ptsI[2*i]; points[2][i].y = ptsI[2*i+1];
			}

			float *ncc    = (float*) cvAlloc(nPts*sizeof(float));
			float *ssd    = (float*) cvAlloc(nPts*sizeof(float));
			float *fb     = (float*) cvAlloc(nPts*sizeof(float));
			char  *status = (char*)  cvAlloc(nPts);

			cvCalcOpticalFlowPyrLK( IMG[I], IMG[J], PYR[I], PYR[J], points[0], points[1], nPts, cvSize(win_size,win_size), Level, status, 0, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), CV_LKFLOW_INITIAL_GUESSES);
			cvCalcOpticalFlowPyrLK( IMG[J], IMG[I], PYR[J], PYR[I], points[1], points[2], nPts, cvSize(win_size,win_size), Level, 0     , 0, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY | CV_LKFLOW_PYR_B_READY );
			
			normCrossCorrelation(IMG[I],IMG[J],points[0],points[1],nPts, status, ncc, Winsize,CV_TM_CCOEFF_NORMED);
			//normCrossCorrelation(IMG[I],IMG[J],points[0],points[1],nPts, status, ssd, Winsize,CV_TM_SQDIFF);
			euclideanDistance( points[0],points[2],fb,nPts);
			

			// Output
			int M = 4;
			plhs[0] = mxCreateDoubleMatrix(M, nPts, mxREAL);
			double *output = mxGetPr(plhs[0]);
			for (int i = 0; i < nPts; i++) {
				if (status[i] == 1) {
					output[M*i]   = (double) points[1][i].x;
					output[M*i+1] = (double) points[1][i].y;
					output[M*i+2] = (double) fb[i];
					output[M*i+3] = (double) ncc[i];
					//output[M*i+4] = (double) ssd[i];
				} else {
					output[M*i]   = nan;
					output[M*i+1] = nan;
					output[M*i+2] = nan;
					output[M*i+3] = nan;
					//output[M*i+4] = nan;
				}
			}

			return;
				}

	}

}
Example #21
0
File: flow.cpp Project: pushkar/xyz
void Flow::calculate_flow_hs() {
	cvCalcOpticalFlowHS(ampl_img_2, ampl_img_1, 1, velx, vely, 1, cvTermCriteria(1, 10, 0.5));
}
// Estimate face absolute orientations
vector<float> CRecognitionAlgs::CalcAbsoluteOrientations(
    const VO_Shape& iShape2D,
    const VO_Shape& iShape3D,
    VO_Shape& oShape2D)
{
    assert (iShape2D.GetNbOfPoints() == iShape3D.GetNbOfPoints() );
    unsigned int NbOfPoints = iShape3D.GetNbOfPoints();
    Point3f pt3d;
    Point2f pt2d;
    float height1 = iShape2D.GetHeight();
    float height2 = iShape3D.GetHeight();
    VO_Shape tempShape2D = iShape2D;
    tempShape2D.Scale(height2/height1);

    //Create the model points
    std::vector<CvPoint3D32f> modelPoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt3d = iShape3D.GetA3DPoint(i);
        modelPoints.push_back(cvPoint3D32f(pt3d.x, pt3d.y, pt3d.z));
    }

    //Create the image points
    std::vector<CvPoint2D32f> srcImagePoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt2d = tempShape2D.GetA2DPoint(i);
        srcImagePoints.push_back(cvPoint2D32f(pt2d.x, pt2d.y));
    }

    //Create the POSIT object with the model points
    CvPOSITObject *positObject = cvCreatePOSITObject( &modelPoints[0], NbOfPoints );

    //Estimate the pose
    CvMatr32f rotation_matrix = new float[9];
    CvVect32f translation_vector = new float[3];
    CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
    cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );

    //rotation_matrix to Euler angles, refer to VO_Shape::GetRotation
    float sin_beta  = -rotation_matrix[0 * 3 + 2];
    float tan_alpha = rotation_matrix[1 * 3 + 2] / rotation_matrix[2 * 3 + 2];
    float tan_gamma = rotation_matrix[0 * 3 + 1] / rotation_matrix[0 * 3 + 0];

    //Project the model points with the estimated pose
    oShape2D = tempShape2D;
    for ( unsigned int i=0; i < NbOfPoints; ++i )
    {
        pt3d.x = rotation_matrix[0] * modelPoints[i].x +
            rotation_matrix[1] * modelPoints[i].y +
            rotation_matrix[2] * modelPoints[i].z +
            translation_vector[0];
        pt3d.y = rotation_matrix[3] * modelPoints[i].x +
            rotation_matrix[4] * modelPoints[i].y +
            rotation_matrix[5] * modelPoints[i].z +
            translation_vector[1];
        pt3d.z = rotation_matrix[6] * modelPoints[i].x +
            rotation_matrix[7] * modelPoints[i].y +
            rotation_matrix[8] * modelPoints[i].z +
            translation_vector[2];
        if ( pt3d.z != 0 )
        {
            pt2d.x = FOCAL_LENGTH * pt3d.x / pt3d.z;
            pt2d.y = FOCAL_LENGTH * pt3d.y / pt3d.z;
        }
        oShape2D.SetA2DPoint(pt2d, i);
    }

    //return Euler angles
    vector<float> pos(3);
    pos[0] = atan(tan_alpha);    // yaw
    pos[1] = asin(sin_beta);     // pitch
    pos[2] = atan(tan_gamma);    // roll
    return pos;
}
Example #23
0
int lk_work(CAMOBJ * st)
{
        int i, k;
		float mx,my,cx,cy;

    //    frame = cvQueryFrame( capture );
        if( !frame )
            return(1);

        if( !image ) 	            // allocate all the buffers 
		{
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            save_grey = cvCreateImage( cvGetSize(frame), 8, 1 );

            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            save_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );

            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            save_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));

            status = (char*)cvAlloc(MAX_COUNT);
			for (i=0;i<MAX_COUNT;i++) pt_mode[i]=0;
            flags = 0;
			statuscount++;
		}
        cvCopy( frame, image, 0 );

		if (st->mode==1)
		{
		  if (!video_writer)
		       video_writer = cvCreateVideoWriter(st->videofilename,-1,15,cvGetSize(image));
		  	
		  cvWriteFrame(video_writer,image);
  		}


        if (st->enable_tracking)
		{
		cvCvtColor( image, grey, CV_BGR2GRAY );

        if( night_mode )
            cvZero( image );

		if (need_to_init)
		{
		  need_to_init=0;
		  init_flag=0;

		  if (st->trackface)
		  {
			if (detect_face())
			{
				int x;

				count=2;

				cvFindCornerSubPix( grey, points[1], count,
					cvSize(win_size,win_size), cvSize(-1,-1),
					cvTermCriteria(CV_TERMCRIT_ITER,1,1.0));
//					cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
  
	            cvCopy(grey,save_grey,0 );
		        cvCopy(pyramid,save_pyramid,0 );
				cvCopy(grey,prev_grey,0 );
		        cvCopy(pyramid,prev_pyramid,0 );

			    for (x=0;x<count;x++)
				{
					save_points[x].x=points[1][x].x;
					save_points[x].y=points[1][x].y;
					points[0][x].x=points[1][x].x;
					points[0][x].y=points[1][x].y;
					save_pt_mode[x]=pt_mode[x];
				}
				calc_distances(1);
				save_count=count;
				add_remove_pt = 0;
	            flags = 0;
				time_to_restore=0;
				
			} 
		  }
		  else
		  {
			    save_points[0].x=PT1_xpos*100;
				save_points[0].y=PT1_ypos*100;
				points[0][0].x=PT1_xpos*100;
				points[0][0].y=PT1_ypos*100;
				save_pt_mode[0]=0;
				count=1;MAX_COUNT=1;
				calc_distances(1);

  				cvFindCornerSubPix( grey, points[1], 1,
					cvSize(win_size,win_size), cvSize(-1,-1),
					cvTermCriteria(CV_TERMCRIT_ITER,1,1.0));
	            
				// report("hallo");
				cvCopy(grey,save_grey,0 );
		        cvCopy(pyramid,save_pyramid,0 );
				cvCopy(grey,prev_grey,0 );
		        cvCopy(pyramid,prev_pyramid,0 );
				
				save_count=1;
				add_remove_pt = 0;
	            flags = 0;
				//time_to_restore=0;
				
		  }

		}        

		if(count < MAX_COUNT) need_to_init=1;
		else
        {
			
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                points[0], points[1], count, cvSize(win_size,win_size), 5, status, 0,
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;

			mx=0;my=0;
			cx=0;cy=0;mcount=0;ccount=0;
            for( i = k = 0; i < count; i++ )
            {

                if( add_remove_pt )
                {
                    double dx = pt.x - points[1][i].x;
                    double dy = pt.y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 )
                    {
                        add_remove_pt = 0;
                        if (pt_mode[i]==1) {pt_mode[i]=0; continue;}
						pt_mode[i]=1;
                    }
                }
                
                if( !status[i] ) { need_to_init=1; status[i]=true; }
                    

				if (pt_mode[i]==1)
				{
					cx+= (points[0][i].x - points[1][i].x);
					cy+= (points[0][i].y - points[1][i].y);
					ccount++;
				}
				else
				{
					mx += (points[0][i].x - points[1][i].x);
					my += (points[0][i].y - points[1][i].y);
					mcount++;
				}
				
				points[1][k] = points[1][i];
				pt_mode[k++]=pt_mode[i];
				if (need_to_init)
				  cvCircle( image, cvPointFrom32f(points[1][i]), 4, CV_RGB(255,0,0), 2, 8,0);
				else if (pt_mode[i]==1)
                  cvCircle( image, cvPointFrom32f(points[1][i]), 4, CV_RGB(255,255,0), 2, 8,0);
				  else
				   cvCircle( image, cvPointFrom32f(points[1][i]), 4, CV_RGB(0,210,0), 2, 8,0);
            }
            count = k;
			if (k==MAX_COUNT)
			{
				if (init_flag>1)
				{
				x_move=mx/mcount;
				y_move=my/mcount;
				x_click=cx/ccount;
				y_click=cy/ccount;
				}
				if (st->trackface) calc_distances(0); else calc_distances(2);
				
				
				if ((autorestore)) // && (init_flag>5))
				{
				  if (st->trackface)
				  {
					if ((dist_error>=dist_threshold) || (angle_error>=angle_threshold))
						time_to_restore++;
					else time_to_restore=0;

					if (time_to_restore>threshold_time)
					{ need_to_init=1; time_to_restore=0; }
				  }
				  else
				  {
					if ((dist_error>=dist_threshold))
						time_to_restore++;
					else time_to_restore=0;

					if (time_to_restore>threshold_time)
					{ need_to_init=1; time_to_restore=0; }

				  }
				  
				}
				
					
			} 
        }

        if( add_remove_pt && count < MAX_COUNT )
        {
            points[1][count++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + count - 1, 1,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            add_remove_pt = 0;
        }

	  }

	  CV_SWAP( prev_grey, grey, swap_temp );
	  CV_SWAP( prev_pyramid, pyramid, swap_temp );
	  CV_SWAP( points[0], points[1], swap_points );
		
	  if (init_flag<1000) init_flag++;

	  if (st->showlive) cvShowImage( "Camera", image );
	

	return(0);
}
Example #24
0
void findelectrodes::find()
{
    thresh = img.clone();

    //tresholding image by using adaptive thresholding, 203 is the matrix size and has to be adapted if thresholding fails
    cv::adaptiveThreshold(img,thresh,255,CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY,203,0);

    //searching for contours in thresholded image, building hierachy of contours (outercontour -> innercontours)
    cv::findContours( thresh.clone() , contours, hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_NONE );

    //checking every found contour...
    for( int i = 0; i< contours.size(); i++ )
    {
        //counting innercountours from the left and from the right side
        int innercontours = -1;
        for(int next = hierarchy[i][2]; next >= 0; next = hierarchy[next][0])
        {
            innercontours++;
        }
        for(int prev = hierarchy[i][2]; prev >= 0; prev = hierarchy[prev][1])
        {
            innercontours++;
        }

        //checking if contour matches expected perimeter range and expected innercontour size, has to be adapted if image properties are changed extremely
        if( cv::arcLength(contours[i], true) > img.cols/4 && cv::arcLength(contours[i], true) < 30*img.cols &&
            innercontours >= 7 && innercontours <= 24)
        {
            //checking for image contours which are large enough to be considered as an electrode marker
            int largeinnercontours = 0;
            for(int next = hierarchy[i][2]; next >= 0; next = hierarchy[next][0])
            {
                if( (cv::contourArea( contours[next], false) >=  cv::contourArea( contours[i], false)/200) &&
                        (cv::contourArea( contours[next], false) <=  cv::contourArea( contours[i], false)/20) )
                {
                    largeinnercontours++;
                }
            }
            for(int prev = hierarchy[ hierarchy[i][2] ][1]; prev >= 0; prev = hierarchy[prev][1])
            {
                if( (cv::contourArea( contours[prev], false) >=  cv::contourArea( contours[i], false)/200) &&
                        (cv::contourArea( contours[prev], false) <=  cv::contourArea( contours[i], false)/20) )
                {
                    largeinnercontours++;
                }
            }

            if(largeinnercontours >= 7 && largeinnercontours <= 24)
            {
                //saving contour id in list for further analysis
                std::cout << "Contour-Id: " << i << " Innercontours:" << innercontours << " Largeinnercontours:" << largeinnercontours << std::endl;
                stripes.push_back(i);
            }
        }
    }

    for(int i = 0; i < stripes.size(); i++) //analysing every found stripe
    {
        //checking for sizes of inner countous, if size is in the range of electrode marker, calculate centroid of marker
        cv::Moments moment;
        std::vector<cv::Point2f> centroids;
        std::vector<double> centroidPerimeter;
        std::vector<int> centroidContour;
        for(int next = hierarchy[stripes[i]][2]; next >= 0; next = hierarchy[next][0])
        {
            if( (cv::contourArea( contours[next], false) >=  cv::contourArea( contours[stripes[i]], false)/300) &&
                    (cv::contourArea( contours[next], false) <=  cv::contourArea( contours[stripes[i]], false)/20) )
            {
                moment = cv::moments(contours[next], false);
                centroids.push_back( cv::Point2f(moment.m10/moment.m00 ,moment.m01/moment.m00));
                centroidPerimeter.push_back( cv::arcLength( contours[next], true));
                centroidContour.push_back( next);
            }
        }
        for(int prev = hierarchy[ hierarchy[stripes[i]][2] ][1]; prev >= 0; prev = hierarchy[prev][1])
        {
            if( (cv::contourArea( contours[prev], false) >=  cv::contourArea( contours[stripes[i]], false)/300) &&
                    (cv::contourArea( contours[prev], false) <=  cv::contourArea( contours[stripes[i]], false)/20) )
            {
                moment = cv::moments(contours[prev], false);
                centroids.push_back( cv::Point2f(moment.m10/moment.m00 ,moment.m01/moment.m00));
                centroidPerimeter.push_back( cv::arcLength( contours[prev], true));
                centroidContour.push_back( prev);
            }
        }

        //if two centroids are too close, thresholding for marker might be wrong and both centroids have to be joined
        for( int j=0; j < centroids.size(); j++)
        {
            for( int k = j+1; k < centroids.size(); k++)
            {
                double dist = sqrt( (centroids[j].x - centroids[k].x)*(centroids[j].x - centroids[k].x) + (centroids[j].y - centroids[k].y)*(centroids[j].y - centroids[k].y) );
                double averagePerimeter = ( cv::arcLength( contours[centroidContour[j]], true) +  cv::arcLength( contours[centroidContour[k]], true))/2;

                    if( dist <= averagePerimeter/2 //&& additional requirement removed: after long thinking, does not make sense why rectangular markers should not be merged and rhombic markers not
                        //cv::minAreaRect(contours[centroidContour[j]]).size.area() <= 1.7 *  cv::contourArea(contours[centroidContour[j]], false) &&
                        //cv::minAreaRect(contours[centroidContour[k]]).size.area() <= 1.7 *  cv::contourArea(contours[centroidContour[k]], false)  
			)
                    {
                        centroids[j].x = (centroids[j].x + centroids[k].x)/2;
                        centroids[j].y = (centroids[j].y + centroids[k].y)/2;
                        centroidPerimeter[j] += centroidPerimeter[k];
                        centroids[k].x = -1;
                        centroids[k].y = -1;
                    }
            }
        }

        //delete second marker of two joined markers (joined marker is written to first marker position)
        for( int j=0; j < centroids.size(); j++)
        {
            if( centroids[j].x == -1 )
            {
                centroids.erase(centroids.begin()+j);
                centroidPerimeter.erase(centroidPerimeter.begin()+j);
                j--;
            }
        }

        sortCentroids(centroids); //sort found centroids by x or y coordinates

        std::cout << centroids << std::endl;

        std::cout << "\nPotentieller Streifen: ";

        if(centroids.size() <= 12 && centroids.size() > 0) //stripe size is not allowed to be bigger than 12 or smaller than 1
        {
            //getting subpixel information for found centroids
            cv::cornerSubPix(img, centroids, cvSize(4,4), cvSize(1,1) ,cvTermCriteria ( CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,100,0.001 ));

            std::vector<int> id;

            for( int j=0; j < centroids.size(); j++)
            {
                //for every centroid: calculating connection vector of actual and next centroid
                //scaling the vector to the half of the diagonal length of one of the four rectangles which are belonging to an elctrode marker
                //rotating the vector in four directions which are roughly pointing to the diagonal of each reactangle
                //checking for colors of calculated points for getting bit of electrode marker
                //adding bit to id vector: '0','1' are set for bit values, '-2' is set for errors (bit not decoded)
                cv::Point2f direction;
                cv::Point2f rot1;
                cv::Point2f rot2;
                cv::Point2f rot3;
                cv::Point2f rot4;
                if( j!= (centroids.size()-1) )
                {
                    direction.x = centroids[j+1].x - centroids[j].x;
                    direction.y = centroids[j+1].y - centroids[j].y;
                }
                else
                {
                    direction.x = centroids[j].x - centroids[j-1].x;
                    direction.y = centroids[j].y - centroids[j-1].y;
                }
                double length = sqrt( direction.x * direction.x + direction.y * direction.y );
                direction.x = 0.09 * centroidPerimeter[j] * direction.x/length;
                direction.y = 0.09 * centroidPerimeter[j] * direction.y/length;
                rot1.x = ( 0.94 * direction.x - 0.34 * direction.y ); //20°
                rot1.y = ( 0.34 * direction.x + 0.94 * direction.y );
                rot2.x = ( -0.94 * direction.x - 0.34 * direction.y );
                rot2.y = ( 0.34 * direction.x - 0.94 * direction.y );
                rot3.x = ( -0.94 * direction.x + 0.34 * direction.y );
                rot3.y = ( -0.34 * direction.x - 0.94 * direction.y );
                rot4.x = ( 0.94 * direction.x + 0.34 * direction.y );
                rot4.y = ( -0.34 * direction.x + 0.94 * direction.y );

                if( thresh.at<uchar>(centroids[j].y + rot1.y, centroids[j].x + rot1.x) == 255 &&
                        thresh.at<uchar>(centroids[j].y + rot2.y, centroids[j].x + rot2.x) == 0 &&
                        thresh.at<uchar>(centroids[j].y + rot3.y, centroids[j].x + rot3.x) == 255 &&
                        thresh.at<uchar>(centroids[j].y + rot4.y, centroids[j].x + rot4.x) == 0 )
                {
                    id.push_back(1);
                    std::cout << 1;
                }
                else if( thresh.at<uchar>(centroids[j].y + rot1.y, centroids[j].x + rot1.x) == 0 &&
                         thresh.at<uchar>(centroids[j].y + rot2.y, centroids[j].x + rot2.x) == 255 &&
                         thresh.at<uchar>(centroids[j].y + rot3.y, centroids[j].x + rot3.x) == 0 &&
                         thresh.at<uchar>(centroids[j].y + rot4.y, centroids[j].x + rot4.x) == 255 )
                {
                    id.push_back(0);
                    std::cout << 0;
                }
                else if( thresh.at<uchar>(centroids[j].y + 0.8 * rot1.y, centroids[j].x + 0.8 * rot1.x) == 255 &&
                         thresh.at<uchar>(centroids[j].y + 0.8 *rot2.y, centroids[j].x + 0.8 * rot2.x) == 0 &&
                         thresh.at<uchar>(centroids[j].y + 0.8 *rot3.y, centroids[j].x + 0.8 * rot3.x) == 255 &&
                         thresh.at<uchar>(centroids[j].y + 0.8 *rot4.y, centroids[j].x + 0.8 * rot4.x) == 0 )
                {
                    id.push_back(1);
                    std::cout << 1;
                }
                else if( thresh.at<uchar>(centroids[j].y + 0.8 *rot1.y, centroids[j].x + 0.8 * rot1.x) == 0 &&
                         thresh.at<uchar>(centroids[j].y + 0.8 *rot2.y, centroids[j].x + 0.8 * rot2.x) == 255 &&
                         thresh.at<uchar>(centroids[j].y + 0.8 *rot3.y, centroids[j].x + 0.8 * rot3.x) == 0 &&
                         thresh.at<uchar>(centroids[j].y + 0.8 *rot4.y, centroids[j].x + 0.8 * rot4.x) == 255 )
                {
                    id.push_back(0);
                    std::cout << 0;
                }
                else
                {
                    id.push_back(-2);
                    std::cout << -2;
                }

                /*cv::circle(thresh, centroids[j] + rot1 , 2, cv::Scalar(0,0,0), 1);
                cv::circle(thresh, centroids[j] + rot2 , 2, cv::Scalar(0,0,0), 1);
                cv::circle(thresh, centroids[j] + rot3 , 2, cv::Scalar(0,0,0), 1);
                cv::circle(thresh, centroids[j] + rot4 , 2, cv::Scalar(0,0,0), 1);*/
            }
            std::cout << std::endl << std::endl;

            checkid(id, centroids);
        }

        centroids.clear();
    }
}
Example #25
0
int StereoVision::calibrationEnd() {
    calibrationStarted = false;

    // ARRAY AND VECTOR STORAGE:
    double M1[3][3], M2[3][3], D1[5], D2[5];
    double R[3][3], T[3], E[3][3], F[3][3];
    CvMat _M1,_M2,_D1,_D2,_R,_T,_E,_F;

    _M1 = cvMat(3, 3, CV_64F, M1 );
    _M2 = cvMat(3, 3, CV_64F, M2 );
    _D1 = cvMat(1, 5, CV_64F, D1 );
    _D2 = cvMat(1, 5, CV_64F, D2 );
    _R = cvMat(3, 3, CV_64F, R );
    _T = cvMat(3, 1, CV_64F, T );
    _E = cvMat(3, 3, CV_64F, E );
    _F = cvMat(3, 3, CV_64F, F );

    // HARVEST CHESSBOARD 3D OBJECT POINT LIST:
    objectPoints.resize(sampleCount*cornersN);

    for(int k=0; k<sampleCount; k++)
        for(int i = 0; i < cornersY; i++ )
            for(int j = 0; j < cornersX; j++ )
                objectPoints[k*cornersY*cornersX + i*cornersX + j] = cvPoint3D32f(i, j, 0);


    npoints.resize(sampleCount,cornersN);

    int N = sampleCount * cornersN;


    CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
    CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
    CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
    CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
    cvSetIdentity(&_M1);
    cvSetIdentity(&_M2);
    cvZero(&_D1);
    cvZero(&_D2);

    //CALIBRATE THE STEREO CAMERAS
    cvStereoCalibrate( &_objectPoints, &_imagePoints1,
                       &_imagePoints2, &_npoints,
                       &_M1, &_D1, &_M2, &_D2,
                       imageSize, &_R, &_T, &_E, &_F,
                       cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
                       CV_CALIB_FIX_ASPECT_RATIO + CV_CALIB_ZERO_TANGENT_DIST + CV_CALIB_SAME_FOCAL_LENGTH
                     );

    //Always work in undistorted space
    cvUndistortPoints( &_imagePoints1, &_imagePoints1,&_M1, &_D1, 0, &_M1 );
    cvUndistortPoints( &_imagePoints2, &_imagePoints2,&_M2, &_D2, 0, &_M2 );

    //COMPUTE AND DISPLAY RECTIFICATION


    double R1[3][3], R2[3][3];
    CvMat _R1 = cvMat(3, 3, CV_64F, R1);
    CvMat _R2 = cvMat(3, 3, CV_64F, R2);

    //HARTLEY'S RECTIFICATION METHOD
    double H1[3][3], H2[3][3], iM[3][3];
    CvMat _H1 = cvMat(3, 3, CV_64F, H1);
    CvMat _H2 = cvMat(3, 3, CV_64F, H2);
    CvMat _iM = cvMat(3, 3, CV_64F, iM);

    cvStereoRectifyUncalibrated(
        &_imagePoints1,&_imagePoints2, &_F,
        imageSize,
        &_H1, &_H2, 3
    );
    cvInvert(&_M1, &_iM);
    cvMatMul(&_H1, &_M1, &_R1);
    cvMatMul(&_iM, &_R1, &_R1);
    cvInvert(&_M2, &_iM);
    cvMatMul(&_H2, &_M2, &_R2);
    cvMatMul(&_iM, &_R2, &_R2);


    //Precompute map for cvRemap()
    cvReleaseMat(&mx1);
    cvReleaseMat(&my1);
    cvReleaseMat(&mx2);
    cvReleaseMat(&my2);
    mx1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    my1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    mx2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    my2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );

    cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
    cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_M2,mx2,my2);

    calibrationDone = true;

    return RESULT_OK;
}
int main( int argc, char** argv ) 
{ 
     
    FILE *ptr; 
    ptr=fopen("dataerr.dat","w+"); 
    CvCapture* capture = 0; 
 
    int counter1=0; 
 
    IplImage* image2 = 0; 
 
    float sumX=0; 
    float sumY=0; 
 
    float err_X; 
    float err_Y; 
 
    int XX=0; 
    int YY=0; 
 
    CvPoint ipt1; 
 
    int tempxx1=0; 
    int tempyy1=0; 
    int tempxx2=0; 
    int tempyy2=0; 
 
     
 
    char *imgFmt="pgm"; 
    char str1[100]; 
 
    /* Initailize the error array */ 
    for(int kk=0;kk<=400;kk++) 
    { 
        optical_flow_error[0][kk]=0; 
        optical_flow_errorP[0][kk]=0; 
        optical_flow_error[1][kk]=0; 
        optical_flow_errorP[1][kk]=0; 
    } 
 
    //capturing frame from video 
    capture = cvCaptureFromAVI("soccer_track.mpeg"); 
 
    cvNamedWindow( "KLT-Tracking Group_R", 0 ); 
    cvSetMouseCallback( "KLT-Tracking Group_R", on_mouse, 0 ); 
 
    if(add_remove_pt==1) 
    { 
        flagg=1; 
    } 
 
    for(;;) 
    { 
        IplImage* frame = 0; 
         
        int i, k, c; 
 
        //creating file name 
        sprintf(str1,"%d.%s",counter1,imgFmt); 
        err_X=0; 
        err_Y=0; 
        sumX=0; 
        sumY=0; 
 
        //decompressing the grab images 
 
        frame = cvQueryFrame( capture ); 
 
     
        if( !frame ) 
            break; 
 
        if( !image ) 
            //The first frame:to allocation some memories,and do somen initialization work 
        { 
            // allocate all the image buffers  
            image = cvCreateImage( cvGetSize(frame), 8, 3 ); 
            image->origin = frame->origin; 
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );//make it grey 
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );//the previous frame in grey mode 
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//pyramid frame 
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//previous pyramid frame 
            /* Define two pointers */ 
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            status = (char*)cvAlloc(MAX_COUNT); 
            flags = 0; 
        } 
 
        cvCopy( frame, image, 0 );//frame->image 
 
        //converting the image into gray scale for further computation 
        cvCvtColor( image, grey, CV_BGR2GRAY ); 
         
        if( need_to_init ) 
        { 
             
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            double quality = 0.01; 
            double min_distance = 10; 
         
 
            //using good features to track 
            count = MAX_COUNT; 
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count, 
                                   quality, min_distance, 0, 3, 0, 0.04 ); 
            cvFindCornerSubPix( grey, points[1], count, 
            cvSize(win_size,win_size), cvSize(-1,-1), 
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            cvReleaseImage( &eig ); 
            cvReleaseImage( &temp ); 
 
 
 
            add_remove_pt = 0; 
        } 
        else if( count > 0 ) 
        { 
            //using pyramidal optical flow method 
            cvCalcOpticalFlowPyrLK(  
                    prev_grey, grey,  
                    prev_pyramid, pyramid, 
                    points[0], points[1],  
                    count, cvSize(win_size,win_size),  
                    5, status,0, 
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags ); 
             
            flags |= CV_LKFLOW_PYR_A_READY|CV_LKFLOW_PYR_B_READY; 
 
            for( i = k = 0; i < count; i++ ) 
            { 
                /* When need to add or remove the point */ 
                if( add_remove_pt ) 
                { 
 
                    double dx = pt.x - points[1][i].x; 
                    double dy = pt.y - points[1][i].y; 
                    /* Calulate the distance between the point you select and the point tracked  
                    if they are far from less than 5,stop the add or move action     
                    */ 
                    if( dx*dx + dy*dy <= 25 ) 
                    { 
                        add_remove_pt = 0; 
                        continue; 
                    } 
                } 
                 
                if( !status[i] )//if the point is not tracked correctly,pass! 
                    continue; 
                
                points[1][k++] = points[1][i]; 
 
                ipt1=cvPointFrom32f(points[1][i]);//get a point 
                 
            //calculating error here,initalize the error array 
                optical_flow_error[0][i]=ipt1.x; 
                optical_flow_error[1][i]=ipt1.y; 
 
 
            } 
            //taking average error for moving the window 
 
            for(int zz=0; zz<=count;zz++) 
                { 
                    errX[zz]=optical_flow_error[0][zz]- optical_flow_errorP[0][zz]; 
                    errY[zz]=optical_flow_error[1][zz]- optical_flow_errorP[1][zz]; 
 
                    sumX=sumX+errX[zz]; 
                    sumY=sumY+errY[zz]; 
 
                    optical_flow_errorP[0][zz]=optical_flow_error[0][zz]; 
                    optical_flow_errorP[1][zz]=optical_flow_error[1][zz]; 
 
                } 
 
                fprintf(ptr,"%d\n",count); 
                 
                err_X=sumX/count; 
                err_Y=sumY/count; 
 
            if(flagg==1) 
            { 
              int static startonce=0; 
 
            if(startonce==0) 
            { 
                 
             
            tempxx1=pt.x-20; 
            tempyy1=pt.y-20; 
 
            tempxx2=pt.x+20; 
            tempyy2=pt.y+20; 
 
            XX=pt.x; 
            YY=pt.y; 
 
            startonce=1; 
 
            } 
            if(err_X<3) 
            { 
                tempxx1=tempxx1+err_X; 
                tempyy1=tempyy1+err_Y; 
                tempxx2=tempxx2+err_X; 
                tempyy2=tempyy2+err_Y; 
 
                XX=XX+err_X; 
                YY=YY+err_Y; 
                fprintf(ptr,"%f %f\n",err_X,err_Y); 
            } 
 
            printf("\n%f",err_X); 
 
            //moving window 
 
            cvRectangle(image, cvPoint(tempxx1,tempyy1), cvPoint(tempxx2,tempyy2), cvScalar(255,0,0), 1); 
            cvCircle(image, cvPoint(XX,YY), 3, cvScalar(0,0,255), 1); 
        } 
            count = k; 
        } 
 
 
        if( add_remove_pt && count < MAX_COUNT ) 
        { 
            points[1][count++] = cvPointTo32f(pt); 
            cvFindCornerSubPix( grey, points[1] + count - 1, 1, 
                cvSize(win_size,win_size), cvSize(-1,-1), 
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            add_remove_pt = 0; 
        } 
 
        CV_SWAP( prev_grey, grey, swap_temp ); 
        CV_SWAP( prev_pyramid, pyramid, swap_temp ); 
        CV_SWAP( points[0], points[1], swap_points ); 
        need_to_init = 0; 
 
       
        //writing image file to the file 
        //if(!cvSaveImage(str1,image)) printf("Could not save: %s\n",str1); 
        //storing in a video also 
  
         
        cvShowImage( "KLT-Tracking Group_R", image ); 
 
        c = cvWaitKey(100); 
        if( (char)c == 27 ) 
            break; 
        switch( (char) c ) 
        { 
        case 's': 
            need_to_init = 1; 
          } 
 
        counter1++; 
    } 
 
    cvReleaseCapture( &capture ); 
    cvDestroyWindow("KLT-Tracking Group_R"); 
 
    fcloseall(); 
     
    return 0; 
} 
Example #27
0
void processImagePair(const char *file1, const char *file2, CvVideoWriter *out, struct CvMat *currentOrientation) {
  // Load two images and allocate other structures
	IplImage* imgA = cvLoadImage(file1, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgB = cvLoadImage(file2, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgBcolor = cvLoadImage(file2);
 
	CvSize img_sz = cvGetSize( imgA );
	int win_size = 15;
  
	// Get the features for tracking
	IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
	IplImage* tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
 
	int corner_count = MAX_CORNERS;
	CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
 
	cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, cornersA, &corner_count,
		0.05, 3.0, 0, 3, 0, 0.04 );
 
  fprintf(stderr, "%s: Corner count = %d\n", file1, corner_count);
 
	cvFindCornerSubPix( imgA, cornersA, corner_count, cvSize( win_size, win_size ),
		cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 50, 0.03 ) );
 
	// Call Lucas Kanade algorithm
	char features_found[ MAX_CORNERS ];
	float feature_errors[ MAX_CORNERS ];
 
	CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
 
	IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
 
	CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
 
  calcNecessaryImageRotation(imgA);
 
	cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, 
		cvSize( win_size, win_size ), 5, features_found, feature_errors,
		 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
 
   CvMat *transform = cvCreateMat(3,3, CV_32FC1);
   CvMat *invTransform = cvCreateMat(3,3, CV_32FC1);
	// Find a homography based on the gradient
   CvMat cornersAMat = cvMat(1, corner_count, CV_32FC2, cornersA);
   CvMat cornersBMat = cvMat(1, corner_count, CV_32FC2, cornersB);
   cvFindHomography(&cornersAMat, &cornersBMat, transform, CV_RANSAC, 15, NULL);

   cvInvert(transform, invTransform);
   cvMatMul(currentOrientation, invTransform, currentOrientation);
   // save the translated image
 	 IplImage* trans_image = cvCloneImage(imgBcolor);
   cvWarpPerspective(imgBcolor, trans_image, currentOrientation, CV_INTER_CUBIC+CV_WARP_FILL_OUTLIERS);

   printf("%s:\n", file1);
   PrintMat(currentOrientation);

  // cvSaveImage(out, trans_image);
  cvWriteFrame(out, trans_image);

  cvReleaseImage(&eig_image);
  cvReleaseImage(&tmp_image);  
  cvReleaseImage(&trans_image);
  cvReleaseImage(&imgA);
  cvReleaseImage(&imgB);
  cvReleaseImage(&imgBcolor);
  cvReleaseImage(&pyrA);
  cvReleaseImage(&pyrB);
  
  cvReleaseData(transform);
  delete [] cornersA;
  delete [] cornersB;
  
  
}
/////////////////////////////////
// cv3dTrackerCalibrateCameras //
/////////////////////////////////
CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
                   const Cv3dTrackerCameraIntrinsics camera_intrinsics[], // size is num_cameras
                   CvSize etalon_size,
                   float square_size,
                   IplImage *samples[],                                   // size is num_cameras
                   Cv3dTrackerCameraInfo camera_info[])                   // size is num_cameras
{
    CV_FUNCNAME("cv3dTrackerCalibrateCameras");
    const int num_points = etalon_size.width * etalon_size.height;
    int cameras_done = 0;        // the number of cameras whose positions have been determined
    CvPoint3D32f *object_points = NULL; // real-world coordinates of checkerboard points
    CvPoint2D32f *points = NULL; // 2d coordinates of checkerboard points as seen by a camera
    IplImage *gray_img = NULL;   // temporary image for color conversion
    IplImage *tmp_img = NULL;    // temporary image used by FindChessboardCornerGuesses
    int c, i, j;

    if (etalon_size.width < 3 || etalon_size.height < 3)
        CV_ERROR(CV_StsBadArg, "Chess board size is invalid");

    for (c = 0; c < num_cameras; c++)
    {
        // CV_CHECK_IMAGE is not available in the cvaux library
        // so perform the checks inline.

        //CV_CALL(CV_CHECK_IMAGE(samples[c]));

        if( samples[c] == NULL )
            CV_ERROR( CV_HeaderIsNull, "Null image" );

        if( samples[c]->dataOrder != IPL_DATA_ORDER_PIXEL && samples[c]->nChannels > 1 )
            CV_ERROR( CV_BadOrder, "Unsupported image format" );

        if( samples[c]->maskROI != 0 || samples[c]->tileInfo != 0 )
            CV_ERROR( CV_StsBadArg, "Unsupported image format" );

        if( samples[c]->imageData == 0 )
            CV_ERROR( CV_BadDataPtr, "Null image data" );

        if( samples[c]->roi &&
            ((samples[c]->roi->xOffset | samples[c]->roi->yOffset
              | samples[c]->roi->width | samples[c]->roi->height) < 0 ||
             samples[c]->roi->xOffset + samples[c]->roi->width > samples[c]->width ||
             samples[c]->roi->yOffset + samples[c]->roi->height > samples[c]->height ||
             (unsigned) (samples[c]->roi->coi) > (unsigned) (samples[c]->nChannels)))
            CV_ERROR( CV_BadROISize, "Invalid ROI" );

        // End of CV_CHECK_IMAGE inline expansion

        if (samples[c]->depth != IPL_DEPTH_8U)
            CV_ERROR(CV_BadDepth, "Channel depth of source image must be 8");

        if (samples[c]->nChannels != 3 && samples[c]->nChannels != 1)
            CV_ERROR(CV_BadNumChannels, "Source image must have 1 or 3 channels");
    }

    CV_CALL(object_points = (CvPoint3D32f *)cvAlloc(num_points * sizeof(CvPoint3D32f)));
    CV_CALL(points = (CvPoint2D32f *)cvAlloc(num_points * sizeof(CvPoint2D32f)));

    // fill in the real-world coordinates of the checkerboard points
    FillObjectPoints(object_points, etalon_size, square_size);

    for (c = 0; c < num_cameras; c++)
    {
        CvSize image_size = cvSize(samples[c]->width, samples[c]->height);
        IplImage *img;

        // The input samples are not required to all have the same size or color
        // format. If they have different sizes, the temporary images are
        // reallocated as necessary.
        if (samples[c]->nChannels == 3)
        {
            // convert to gray
            if (gray_img == NULL || gray_img->width != samples[c]->width ||
                gray_img->height != samples[c]->height )
            {
                if (gray_img != NULL)
                    cvReleaseImage(&gray_img);
                CV_CALL(gray_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
            }
            
            CV_CALL(cvCvtColor(samples[c], gray_img, CV_BGR2GRAY));

            img = gray_img;
        }
        else
        {
            // no color conversion required
            img = samples[c];
        }

        if (tmp_img == NULL || tmp_img->width != samples[c]->width ||
            tmp_img->height != samples[c]->height )
        {
            if (tmp_img != NULL)
                cvReleaseImage(&tmp_img);
            CV_CALL(tmp_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
        }

        int count = num_points;
        bool found = cvFindChessBoardCornerGuesses(img, tmp_img, 0,
                                                   etalon_size, points, &count) != 0;
        if (count == 0)
            continue;
        
        // If found is true, it means all the points were found (count = num_points).
        // If found is false but count is non-zero, it means that not all points were found.

        cvFindCornerSubPix(img, points, count, cvSize(5,5), cvSize(-1,-1),
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f));

        // If the image origin is BL (bottom-left), fix the y coordinates
        // so they are relative to the true top of the image.
        if (samples[c]->origin == IPL_ORIGIN_BL)
        {
            for (i = 0; i < count; i++)
                points[i].y = samples[c]->height - 1 - points[i].y;
        }

        if (found)
        {
            // Make sure x coordinates are increasing and y coordinates are decreasing.
            // (The y coordinate of point (0,0) should be the greatest, because the point
            // on the checkerboard that is the origin is nearest the bottom of the image.)
            // This is done after adjusting the y coordinates according to the image origin.
            if (points[0].x > points[1].x)
            {
                // reverse points in each row
                for (j = 0; j < etalon_size.height; j++)
                {
                    CvPoint2D32f *row = &points[j*etalon_size.width];
                    for (i = 0; i < etalon_size.width/2; i++)
                        std::swap(row[i], row[etalon_size.width-i-1]);
                }
            }

            if (points[0].y < points[etalon_size.width].y)
            {
                // reverse points in each column
                for (i = 0; i < etalon_size.width; i++)
                {
                    for (j = 0; j < etalon_size.height/2; j++)
                        std::swap(points[i+j*etalon_size.width],
                                  points[i+(etalon_size.height-j-1)*etalon_size.width]);
                }
            }
        }

        DrawEtalon(samples[c], points, count, etalon_size, found);

        if (!found)
            continue;

        float rotVect[3];
        float rotMatr[9];
        float transVect[3];

        cvFindExtrinsicCameraParams(count,
                                    image_size,
                                    points,
                                    object_points,
                                    const_cast<float *>(camera_intrinsics[c].focal_length),
                                    camera_intrinsics[c].principal_point,
                                    const_cast<float *>(camera_intrinsics[c].distortion),
                                    rotVect,
                                    transVect);

        // Check result against an arbitrary limit to eliminate impossible values.
        // (If the chess board were truly that far away, the camera wouldn't be able to
        // see the squares.)
        if (transVect[0] > 1000*square_size
            || transVect[1] > 1000*square_size
            || transVect[2] > 1000*square_size)
        {
            // ignore impossible results
            continue;
        }

        CvMat rotMatrDescr = cvMat(3, 3, CV_32FC1, rotMatr);
        CvMat rotVectDescr = cvMat(3, 1, CV_32FC1, rotVect);

        /* Calc rotation matrix by Rodrigues Transform */
        cvRodrigues2( &rotVectDescr, &rotMatrDescr );

        //combine the two transformations into one matrix
        //order is important! rotations are not commutative
        float tmat[4][4] = { { 1.f, 0.f, 0.f, 0.f },
                             { 0.f, 1.f, 0.f, 0.f },
                             { 0.f, 0.f, 1.f, 0.f },
                             { transVect[0], transVect[1], transVect[2], 1.f } };
        
        float rmat[4][4] = { { rotMatr[0], rotMatr[1], rotMatr[2], 0.f },
                             { rotMatr[3], rotMatr[4], rotMatr[5], 0.f },
                             { rotMatr[6], rotMatr[7], rotMatr[8], 0.f },
                             { 0.f, 0.f, 0.f, 1.f } };


        MultMatrix(camera_info[c].mat, tmat, rmat);

        // change the transformation of the cameras to put them in the world coordinate 
        // system we want to work with.

        // Start with an identity matrix; then fill in the values to accomplish
        // the desired transformation.
        float smat[4][4] = { { 1.f, 0.f, 0.f, 0.f },
                             { 0.f, 1.f, 0.f, 0.f },
                             { 0.f, 0.f, 1.f, 0.f },
                             { 0.f, 0.f, 0.f, 1.f } };

        // First, reflect through the origin by inverting all three axes.
        smat[0][0] = -1.f;
        smat[1][1] = -1.f;
        smat[2][2] = -1.f;
        MultMatrix(tmat, camera_info[c].mat, smat);

        // Scale x and y coordinates by the focal length (allowing for non-square pixels
        // and/or non-symmetrical lenses).
        smat[0][0] = 1.0f / camera_intrinsics[c].focal_length[0];
        smat[1][1] = 1.0f / camera_intrinsics[c].focal_length[1];
        smat[2][2] = 1.0f;
        MultMatrix(camera_info[c].mat, smat, tmat);

        camera_info[c].principal_point = camera_intrinsics[c].principal_point;
        camera_info[c].valid = true;

        cameras_done++;
    }

exit:
    cvReleaseImage(&gray_img);
    cvReleaseImage(&tmp_img);
    cvFree(&object_points);
    cvFree(&points);

    return cameras_done == num_cameras;
}
Example #29
0
IplImage * find_macbeth( const char *img )
{
    IplImage * macbeth_img = cvLoadImage( img,
        CV_LOAD_IMAGE_ANYCOLOR|CV_LOAD_IMAGE_ANYDEPTH );
        
    IplImage * macbeth_original = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, macbeth_img->nChannels );
    cvCopy(macbeth_img, macbeth_original);
        
    IplImage * macbeth_split[3];
    IplImage * macbeth_split_thresh[3];
    
    for(int i = 0; i < 3; i++) {
        macbeth_split[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
        macbeth_split_thresh[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
    }
    
    cvSplit(macbeth_img, macbeth_split[0], macbeth_split[1], macbeth_split[2], NULL);
    
    if( macbeth_img )
    {
        int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C;
        int threshold_type = CV_THRESH_BINARY_INV;
        int block_size = cvRound(
            MIN(macbeth_img->width,macbeth_img->height)*0.02)|1;
        fprintf(stderr,"Using %d as block size\n", block_size);
        
        double offset = 6;
        
        // do an adaptive threshold on each channel
        for(int i = 0; i < 3; i++) {
            cvAdaptiveThreshold(macbeth_split[i], macbeth_split_thresh[i], 255, adaptive_method, threshold_type, block_size, offset);
        }
        
        IplImage * adaptive = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), IPL_DEPTH_8U, 1 );
        
        // OR the binary threshold results together
        cvOr(macbeth_split_thresh[0],macbeth_split_thresh[1],adaptive);
        cvOr(macbeth_split_thresh[2],adaptive,adaptive);
        
        for(int i = 0; i < 3; i++) {
            cvReleaseImage( &(macbeth_split[i]) );
            cvReleaseImage( &(macbeth_split_thresh[i]) );
        }
                
        int element_size = (block_size/10)+2;
        fprintf(stderr,"Using %d as element size\n", element_size);
        
        // do an opening on the threshold image
        IplConvKernel * element = cvCreateStructuringElementEx(element_size,element_size,element_size/2,element_size/2,CV_SHAPE_RECT);
        cvMorphologyEx(adaptive,adaptive,NULL,element,CV_MOP_OPEN);
        cvReleaseStructuringElement(&element);
        
        CvMemStorage* storage = cvCreateMemStorage(0);
        
        CvSeq* initial_quads = cvCreateSeq( 0, sizeof(*initial_quads), sizeof(void*), storage );
        CvSeq* initial_boxes = cvCreateSeq( 0, sizeof(*initial_boxes), sizeof(CvBox2D), storage );
        
        // find contours in the threshold image
        CvSeq * contours = NULL;
        cvFindContours(adaptive,storage,&contours);
        
        int min_size = (macbeth_img->width*macbeth_img->height)/
            (MACBETH_SQUARES*100);
        
        if(contours) {
            int count = 0;
            
            for( CvSeq* c = contours; c != NULL; c = c->h_next) {
                CvRect rect = ((CvContour*)c)->rect;
                // only interested in contours with these restrictions
                if(CV_IS_SEQ_HOLE(c) && rect.width*rect.height >= min_size) {
                    // only interested in quad-like contours
                    CvSeq * quad_contour = find_quad(c, storage, min_size);
                    if(quad_contour) {
                        cvSeqPush( initial_quads, &quad_contour );
                        count++;
                        rect = ((CvContour*)quad_contour)->rect;
                        
                        CvScalar average = contour_average((CvContour*)quad_contour, macbeth_img);
                        
                        CvBox2D box = cvMinAreaRect2(quad_contour,storage);
                        cvSeqPush( initial_boxes, &box );
                        
                        // fprintf(stderr,"Center: %f %f\n", box.center.x, box.center.y);
                        
                        double min_distance = MAX_RGB_DISTANCE;
                        CvPoint closest_color_idx = cvPoint(-1,-1);
                        for(int y = 0; y < MACBETH_HEIGHT; y++) {
                            for(int x = 0; x < MACBETH_WIDTH; x++) {
                                double distance = euclidean_distance_lab(average,colorchecker_srgb[y][x]);
                                if(distance < min_distance) {
                                    closest_color_idx.x = x;
                                    closest_color_idx.y = y;
                                    min_distance = distance;
                                }
                            }
                        }
                        
                        CvScalar closest_color = colorchecker_srgb[closest_color_idx.y][closest_color_idx.x];
                        // fprintf(stderr,"Closest color: %f %f %f (%d %d)\n",
                        //     closest_color.val[2],
                        //     closest_color.val[1],
                        //     closest_color.val[0],
                        //     closest_color_idx.x,
                        //     closest_color_idx.y
                        // );
                        
                        // cvDrawContours(
                        //     macbeth_img,
                        //     quad_contour,
                        //     cvScalar(255,0,0),
                        //     cvScalar(0,0,255),
                        //     0,
                        //     element_size
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     cvScalarAll(255),
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     closest_color,
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*4,
                        //     average,
                        //     -1
                        // );
                        // CvRect rect = contained_rectangle(box);
                        // cvRectangle(
                        //     macbeth_img,
                        //     cvPoint(rect.x,rect.y),
                        //     cvPoint(rect.x+rect.width, rect.y+rect.height),
                        //     cvScalarAll(0),
                        //     element_size
                        // );
                    }
                }
            }
            
            ColorChecker found_colorchecker;

            fprintf(stderr,"%d initial quads found", initial_quads->total);
            if(count > MACBETH_SQUARES) {
                fprintf(stderr," (probably a Passport)\n");
                
                CvMat* points = cvCreateMat( initial_quads->total , 1, CV_32FC2 );
                CvMat* clusters = cvCreateMat( initial_quads->total , 1, CV_32SC1 );
                
                CvSeq* partitioned_quads[2];
                CvSeq* partitioned_boxes[2];
                for(int i = 0; i < 2; i++) {
                    partitioned_quads[i] = cvCreateSeq( 0, sizeof(**partitioned_quads), sizeof(void*), storage );
                    partitioned_boxes[i] = cvCreateSeq( 0, sizeof(**partitioned_boxes), sizeof(CvBox2D), storage );
                }
                
                // set up the points sequence for cvKMeans2, using the box centers
                for(int i = 0; i < initial_quads->total; i++) {
                    CvBox2D box = (*(CvBox2D*)cvGetSeqElem(initial_boxes, i));
                    
                    cvSet1D(points, i, cvScalar(box.center.x,box.center.y));
                }
                
                // partition into two clusters: passport and colorchecker
                cvKMeans2( points, 2, clusters, 
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,
                                           10, 1.0 ) );
        
                for(int i = 0; i < initial_quads->total; i++) {
                    CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i];
                    int cluster_idx = clusters->data.i[i];
                    
                    cvSeqPush( partitioned_quads[cluster_idx],
                               cvGetSeqElem(initial_quads, i) );
                    cvSeqPush( partitioned_boxes[cluster_idx],
                               cvGetSeqElem(initial_boxes, i) );

                    // cvCircle(
                    //     macbeth_img,
                    //     cvPointFrom32f(pt),
                    //     element_size*2,
                    //     cvScalar(255*cluster_idx,0,255-(255*cluster_idx)),
                    //     -1
                    // );
                }
                
                ColorChecker partitioned_checkers[2];
                
                // check each of the two partitioned sets for the best colorchecker
                for(int i = 0; i < 2; i++) {
                    partitioned_checkers[i] =
                        find_colorchecker(partitioned_quads[i], partitioned_boxes[i],
                                      storage, macbeth_img, macbeth_original);
                }
                
                // use the colorchecker with the lowest error
                found_colorchecker = partitioned_checkers[0].error < partitioned_checkers[1].error ?
                    partitioned_checkers[0] : partitioned_checkers[1];
                
                cvReleaseMat( &points );
                cvReleaseMat( &clusters );
            }
            else { // just one colorchecker to test
                fprintf(stderr,"\n");
                found_colorchecker = find_colorchecker(initial_quads, initial_boxes,
                                  storage, macbeth_img, macbeth_original);
            }
            
            // render the found colorchecker
            draw_colorchecker(found_colorchecker.values,found_colorchecker.points,macbeth_img,found_colorchecker.size);
            
            // print out the colorchecker info
            for(int y = 0; y < MACBETH_HEIGHT; y++) {            
                for(int x = 0; x < MACBETH_WIDTH; x++) {
                    CvScalar this_value = cvGet2D(found_colorchecker.values,y,x);
                    CvScalar this_point = cvGet2D(found_colorchecker.points,y,x);
                    
                    printf("%.0f,%.0f,%.0f,%.0f,%.0f\n",
                        this_point.val[0],this_point.val[1],
                        this_value.val[2],this_value.val[1],this_value.val[0]);
                }
            }
            printf("%0.f\n%f\n",found_colorchecker.size,found_colorchecker.error);
            
        }
                
        cvReleaseMemStorage( &storage );
        
        if( macbeth_original ) cvReleaseImage( &macbeth_original );
        if( adaptive ) cvReleaseImage( &adaptive );
        
        return macbeth_img;
    }

    if( macbeth_img ) cvReleaseImage( &macbeth_img );

    return NULL;
}
Example #30
0
Model::Model(const char* type_name)
:m_pModel(NULL)
,m_trainPara(NULL)
{
	m_type_name = type_name;
	if (!strcmp(type_name, CV_TYPE_NAME_ML_SVM))
	{
		m_pModel = new CvSVM();
		//构造初始化默认参数
		CvSVMParams* para = new CvSVMParams();
		para->svm_type = CvSVM::C_SVC;
		para->kernel_type = CvSVM::RBF;
		para->C = 27.68;
		para->gamma = 0.023;
		m_trainPara = para;
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_KNN))
	{
		m_pModel = new CvKNearest();
		m_trainPara = NULL;
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_NBAYES))
	{
		m_pModel = new CvNormalBayesClassifier();
		m_trainPara = NULL;
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_EM))
	{
		m_pModel = new CvEM();
		m_trainPara = new CvEMParams();
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_BOOSTING))
	{
		m_pModel = new CvBoost();
		m_trainPara = new CvBoostParams();
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_TREE))
	{
		m_pModel = new CvDTree();
		m_trainPara = new CvDTreeParams();
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_ANN_MLP))
	{
		m_pModel = new CvANN_MLP();
		m_trainPara = new CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,30000,0.001),CvANN_MLP_TrainParams::BACKPROP, 0.001);
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_RTREES))
	{
		m_pModel = new CvRTrees();
		m_trainPara = new CvRTParams();
	}
	else if (!strcmp(type_name, CV_TYPE_NAME_ML_GBT))
	{
		m_pModel = new CvGBTrees();
		m_trainPara = new CvGBTreesParams(CvGBTrees::DEVIANCE_LOSS, 100, 0.1f, 0.1f, 3, false );
	}
	else
	{
		cerr<<type_name<<"is not supported"<<endl;
		exit(1);
		//m_pModel = NULL;
		//m_trainPara = NULL;
	}
}