Exemple #1
0
static void
icvCalcFMM(const CvMat *f, CvMat *t, CvPriorityQueueFloat *Heap, bool negate) {
   int i, j, ii = 0, jj = 0, q;
   float dist;

   while (Heap->Pop(&ii,&jj)) {

      unsigned known=(negate)?CHANGE:KNOWN;
      CV_MAT_ELEM(*f,uchar,ii,jj) = (uchar)known;

      for (q=0; q<4; q++) {
         i=0; j=0;
         if     (q==0) {i=ii-1; j=jj;}
         else if(q==1) {i=ii;   j=jj-1;}
         else if(q==2) {i=ii+1; j=jj;}
         else {i=ii;   j=jj+1;}
         if ((i<=0)||(j<=0)||(i>f->rows)||(j>f->cols)) continue;

         if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
            dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
                        FastMarching_solve(i+1,j,i,j-1,f,t),
                        FastMarching_solve(i-1,j,i,j+1,f,t),
                        FastMarching_solve(i+1,j,i,j+1,f,t));
            CV_MAT_ELEM(*t,float,i,j) = dist;
            CV_MAT_ELEM(*f,uchar,i,j) = BAND;
            Heap->Push(i,j,dist);
         }
      }
   }
void compose_grabcut_seedmatrix2(CvMat* output, CvRect facebox,  IplImage* seeds, gboolean confidence)
{
  cvSet(output, cvScalar(GCS_PR_BGD), NULL);

  double a=(confidence) ? 0.85 : 0.55;  //extra growing of body box region
  double A=1.20;  // body bbox is lower than face bbox (neck)
  double b=1+a;

  int bodyx0 = ((facebox.x-facebox.width*   a) < 0 ) ? 0 : (facebox.x-facebox.width* a);
  int bodyy0 = ((facebox.y+facebox.height * A) > output->rows) ? output->rows : (facebox.y+facebox.height*A );
  int bodyx1 = ((facebox.x+facebox.width*   b) > output->cols) ? output->cols : (facebox.x+facebox.width*b);
  int bodyy1 = (output->rows);

  double c=-0.10;  //extra growing of face bbox region, horizontal axis
  double C= 0.00;  //extra growing of face bbox region, vertical axis
  double d= 1+c;
  double D= 4+C;

  int facex0 = ((facebox.x-facebox.width  *c) < 0 ) ? 0 : (facebox.x - facebox.width *c);
  int facey0 = ((facebox.y-facebox.height *C) < 0)  ? 0 : (facebox.y - facebox.height*C);
  int facex1 = ((facebox.x+facebox.width  *d) > output->cols) ? output->cols : (facebox.x+facebox.width *d);
  int facey1 = ((facebox.y+facebox.height *D) > output->rows) ? output->rows : (facebox.y+facebox.height*D);

  int x, y;
  for( x = 0; x < output->cols; x++ ){
    for( y = 0; y < output->rows; y++ ){

      // large bbox around face
      if( ( x >= facex0 ) && ( x <= facex1) && ( y >= facey0 ) && ( y <= facey1))
        CV_MAT_ELEM(*output, uchar, y, x) = GCS_PR_FGD;
      // body bbox: ONLY IF WE DONT GET IT FROM CLUSTERING COLOURS (K-MEANS)
#ifndef KMEANS
#if 0
      if( ( x >= bodyx0)  && ( x <= bodyx1) && ( y >= bodyy0)  && ( y <= bodyy1))
        CV_MAT_ELEM(*output, uchar, y, x) = GCS_PR_FGD;
#else
      double delta= 0.30 * facebox.width;
      int bodyxdelta = (delta)*(y-bodyy0)/(bodyy1-bodyy0); //pyramid-like shape
      if( ( x >= bodyx0-bodyxdelta)  && ( x <= bodyx1+bodyxdelta) && 
          ( y >= bodyy0)             && ( y <= bodyy1))
        CV_MAT_ELEM(*output, uchar, y, x) = GCS_PR_FGD;

#endif
#endif //!KMEANS

      // seeds, usually coming from movement, could also be skin or a combination
      if( seeds && ( (cvGetReal2D( seeds, y, x) > 10) ) )
        CV_MAT_ELEM(*output, uchar, y, x) = GCS_PR_FGD ;
    }
  }

  
}
/*	Member function( private )
 * Draws the calculated optical flow on the canvas
 */
void airGest::drawFlowMap( void )
{
	int x, y;
	CvMat flowMat = flowMap;
	
	for( y = FEATURE_STEP/2; y < canvas.rows; y += FEATURE_STEP ) {
        for( x = FEATURE_STEP/2; x < canvas.cols; x += FEATURE_STEP ) {
            CvPoint2D32f fxy = CV_MAT_ELEM( flowMat, CvPoint2D32f, y, x);
            line( canvas, 
				  Point(x,y), 
				  Point(cvRound(x+fxy.x), cvRound(y+fxy.y)), 
				  COLOR_FLOWMAP, 1, 8, 0 );
		}
	}
	
	//CvMat accFlowMat = accFlow;
	
	//std::cout << "dim( accFlow ) = " << accFlow.rows << "x" << accFlow.cols << "\n";
	//for( y = FEATURE_STEP/2; y < canvas.rows; y += FEATURE_STEP ) {
		//for( x = FEATURE_STEP/2; x < canvas.cols; x += FEATURE_STEP ) {
			//CvPoint2D32f fxy = CV_MAT_ELEM( accFlowMat, CvPoint2D32f, y/FEATURE_STEP, x/FEATURE_STEP );
			//line( accCanvas,
				  //Point( x, y ),
				  //Point( cvRound( x + fxy.x ), cvRound( y + fxy.y ) ),
				  //COLOR_FLOWMAP, 1, 8, 0 );
			//std::cout << "(x,y) = " << x << "," << y << std::endl;
		//}
	//}
}
Exemple #4
0
    bool Init( const CvMat* f )
    {
        int i,j;
        for( i = num = 0; i < f->rows; i++ )
        {
            for( j = 0; j < f->cols; j++ )
                num += CV_MAT_ELEM(*f,uchar,i,j)!=0;
        }
        if (num<=0) return false;
        mem = (CvHeapElem*)cvAlloc((num+2)*sizeof(CvHeapElem));
        if (mem==NULL) return false;

        head       = mem;
        head->i    = head->j = -1;
        head->prev = NULL;
        head->next = mem+1;
        head->T    = -FLT_MAX;
        empty      = mem+1;
        for (i=1; i<=num; i++) {
            mem[i].prev   = mem+i-1;
            mem[i].next   = mem+i+1;
            mem[i].i      = mem[i].i = -1;
            mem[i].T      = FLT_MAX;
        }
        tail       = mem+i;
        tail->i    = tail->j = -1;
        tail->prev = mem+i-1;
        tail->next = NULL;
        tail->T    = FLT_MAX;
        return true;
    }
ofPoint ofxOpticalFlowFarneback::getVelAtPixel(int x, int y) {
	x = ofClamp(x, 0, flow->width - 1);
	y = ofClamp(y, 0, flow->height - 1);

	ofPoint p;
	CvPoint2D32f fxy = CV_MAT_ELEM(*flow, CvPoint2D32f, y, x);
	p.x = fxy.x;
	p.y = fxy.y;

	return p;
}
Exemple #6
0
 bool Add(const CvMat* f) {
     int i,j;
     for (i=0; i<f->rows; i++) {
         for (j=0; j<f->cols; j++) {
             if (CV_MAT_ELEM(*f,uchar,i,j)!=0) {
                 if (!Push(i,j,0)) return false;
             }
         }
     }
     return true;
 }
Exemple #7
0
    void ReAllocKernel(int  w, int h) {
        int     x, y;
        float   x0 = 0.5f * (w - 1);
        float   y0 = 0.5f * (h - 1);
        assert(w > 0);
        assert(h > 0);
        m_ObjSize = cvSize(w, h);

        if (m_KernelHist) { cvReleaseMat(&m_KernelHist); }
        if (m_KernelMeanShift) { cvReleaseMat(&m_KernelMeanShift); }
        m_KernelHist = cvCreateMat(h, w, DefHistTypeMat);
        m_KernelMeanShift = cvCreateMat(h, w, DefHistTypeMat);

        for (y = 0; y < h; ++y) for (x = 0; x < w; ++x) {
                double r2 = ((x - x0) * (x - x0) / (x0 * x0) + (y - y0) * (y - y0) / (y0 * y0));
//            double r2 = ((x-x0)*(x-x0)+(y-y0)*(y-y0))/((y0*y0)+(x0*x0));
                CV_MAT_ELEM(m_KernelHist[0], DefHistType, y, x) = (DefHistType)GetKernelHist(r2);
                CV_MAT_ELEM(m_KernelMeanShift[0], DefHistType, y, x) = (DefHistType)GetKernelMeanShift(r2);
            }
    }
Exemple #8
0
float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t)
{
    double sol, a11, a22, m12;
    a11=CV_MAT_ELEM(*t,float,i1,j1);
    a22=CV_MAT_ELEM(*t,float,i2,j2);
    m12=MIN(a11,a22);
    
    if( CV_MAT_ELEM(*f,uchar,i1,j1) != INSIDE )
        if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
            if( fabs(a11-a22) >= 1.0 )
                sol = 1+m12;
            else
                sol = (a11+a22+sqrt((double)(2-(a11-a22)*(a11-a22))))*0.5;
        else
            sol = 1+a11;
    else if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
        sol = 1+a22;
    else
        sol = 1+m12;
            
    return (float)sol;
}
void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
                    double scale, CvScalar color)
{
    int x, y;
    for( y = 0; y < cflowmap->rows; y += step)
        for( x = 0; x < cflowmap->cols; x += step)
        {
            CvPoint2D32f fxy = CV_MAT_ELEM(*flow, CvPoint2D32f, y, x);
            cvLine(cflowmap, cvPoint(x,y), cvPoint(cvRound(x+fxy.x), cvRound(y+fxy.y)),
                 color, 1, 8, 0);
            cvCircle(cflowmap, cvPoint(x,y), 2, color, -1, 8, 0);
        }
}
Exemple #10
0
void compose_grabcut_seedmatrix3(CvMat* output, IplImage* ghost ,  IplImage* seeds)
{
  cvSet(output, cvScalar(GCS_PR_BGD), NULL);

  int x, y;
  float  val;
  for( x = 0; x < output->cols; x++ ){
    for( y = 0; y < output->rows; y++ ){      
      val = cvGetReal2D( ghost, y, x);

      CV_MAT_ELEM(*output, uchar, y, x) = 
        (val < 100 ) ? GCS_BGD : \
         (val < 150 ) ? GCS_PR_BGD : \
          (val < 200 ) ? GCS_PR_FGD : \
                          GCS_FGD ;

      // seeds, usually coming from movement, could also be skin or a combination
      if( seeds && ( (cvGetReal2D( seeds, y, x) > 10) ) )
        CV_MAT_ELEM(*output, uchar, y, x) = (CV_MAT_ELEM(*output, uchar, y, x)== GCS_FGD) ? GCS_FGD : GCS_PR_FGD ;
    }
  }

  
}
void calcKernelEpanechnikov(CvMat* pK)
{    /* Allocate kernel for histogramm creation: */
    int     x,y;
    int     w = pK->width;
    int     h = pK->height;
    float   x0 = 0.5f*(w-1);
    float   y0 = 0.5f*(h-1);

    for(y=0; y<h; ++y) for(x=0; x<w; ++x)
    {
//                float   r2 = ((x-x0)*(x-x0)/(x0*x0)+(y-y0)*(y-y0)/(y0*y0));
        float   r2 = ((x-x0)*(x-x0)+(y-y0)*(y-y0))/((x0*x0)+(y0*y0));
        CV_MAT_ELEM(pK[0],DefHistType, y, x) = (DefHistType)((r2<1)?(1-r2):0);
    }
}   /* Allocate kernel for histogram creation. */
Exemple #12
0
void icvWriteVecSample( FILE* file, CvArr* sample )
{
    CvMat* mat, stub;
    int r, c;
    short tmp;
    uchar chartmp;

    mat = cvGetMat( sample, &stub );
    chartmp = 0;
    fwrite( &chartmp, sizeof( chartmp ), 1, file );
    for( r = 0; r < mat->rows; r++ )
    {
        for( c = 0; c < mat->cols; c++ )
        {
            tmp = (short) (CV_MAT_ELEM( *mat, uchar, r, c ));
            fwrite( &tmp, sizeof( tmp ), 1, file );
        }
    }
}
float motionfactor(const CvMat* fback_flow_map, CvMat* cflowmap, int step) {
    int x, y;
    float totalx = 0;
    float totaly = 0;
    float dx = 0;
    float dy = 0;
    float overall_total = 0;

    for( y = 0; y < cflowmap->rows; y += step) {
        for( x = 0; x < cflowmap->cols; x += step) {
            CvPoint2D32f fxy = CV_MAT_ELEM(*fback_flow_map, CvPoint2D32f, y, x);

            //if the value is negative (as values are vector velocity changes between image pixels)
            //then make it positive only want scalar values to calculate totals.

            if(fxy.x < 0) {
                dx  = (fxy.x * -1);
            }else{
                dx = fxy.x;
            }

            if(fxy.y < 0)
            {
                dy  = (fxy.y * -1);
            }else{
                dy = fxy.y;
            }

            totalx =  totalx+dx;
            totaly = totaly+dy;
        }
    }

    overall_total = totalx + totaly;


    return overall_total;

}
Exemple #14
0
int opticaltri( CvMat * &clean_texture, int verts )
{
	char * im1fname = "conhull-dirty-thresh.jpg";
	char * im2fname = "conhull-clean-thresh.jpg";

	int count = MAX_COUNT;
	char * status;
	
	CvPoint2D32f * source_points;
	CvPoint2D32f * dest_points;
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	// count = opticalflow( im1fname, im2fname, source_points, dest_points, status ); 
	count = findsiftpoints( "conhull-dirty.jpg", "conhull-clean.jpg", source_points, dest_points, status ); 

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);

	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 50;
	int offset = 200;	

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					// delaunay_points[i] = 
					(cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;


	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total*2; i++ ) {
		if((i == 0) || (i == total)) {
			cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );
		}
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				if(i < total)
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
				else
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_RIGHT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		int is_corner = 0;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height - 1)) ||(curtri.points[j] == cvPoint(image1->width - 1,0)) ||(curtri.points[j] == cvPoint(image1->width - 1,image1->height - 1))) {
				is_corner++;
			}
			

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(is_corner < 3) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];

					/*
					if((i == 3019) && (y == 1329) && (x > 2505) && (x < 2584)) {
						printf("Range %d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
								sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
					}
					*/

					if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ )
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);
						
						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );	
					
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						cvReleaseMat( &sourcepoint );
					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			
			for(int k = 0; k < verts; k++) {
				double x = clean_texture->data.fl[2*k+0];
				double y = clean_texture->data.fl[2*k+1];
				
				// check to see if we're inside this triangle
				CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
				CvMat * result = cvCreateMat(3, 1, CV_32FC1);
				curp->data.fl[0] = x;
				curp->data.fl[1] = y;
				curp->data.fl[2] = 1;
				cvMatMul( baryinvvec[i], curp, result );
			
				if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					
					CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
					cvMatMul( newcorners, result, sourcepoint );	
				
					double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
					double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
					if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
						clean_texture->data.fl[2*k+0] = sourcex;
						clean_texture->data.fl[2*k+1] = sourcey;
						// cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
					}

					cvReleaseMat( &sourcepoint );
				}
				cvReleaseMat( &result );
				cvReleaseMat( &curp );
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}

	cvReleaseImage( &clean_nonthresh );

	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	// draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
void mcvGetIPM(const CvMat* inImage, CvMat* outImage,
               IPMInfo *ipmInfo, const CameraInfo *cameraInfo,
               list<CvPoint> *outPoints)
{
    //check input images types
    //CvMat inMat, outMat;
    //cvGetMat(inImage, &inMat);
    //cvGetMat(outImage, &outMat);
    //cout << CV_MAT_TYPE(inImage->type) << " " << CV_MAT_TYPE(FLOAT_MAT_TYPE) <<  " " << CV_MAT_TYPE(INT_MAT_TYPE)<<"\n";
    if (!(CV_ARE_TYPES_EQ(inImage, outImage) &&
          (CV_MAT_TYPE(inImage->type)==CV_MAT_TYPE(FLOAT_MAT_TYPE) ||
           (CV_MAT_TYPE(inImage->type)==CV_MAT_TYPE(INT_MAT_TYPE)))))
    {
        if(CV_ARE_TYPES_EQ(inImage, outImage)){
            cerr << "Types are equal" << CV_MAT_TYPE(inImage->type);
        }else{
            cerr << "Types are NOT equal" << CV_MAT_TYPE(inImage->type);
        }


        cerr << "Unsupported image types in mcvGetIPM";
        exit(1);
    }

    //get size of input image
    FLOAT u, v;
    v = inImage->height;
    u = inImage->width;

    //get the vanishing point
    FLOAT_POINT2D vp;
    vp = mcvGetVanishingPoint(cameraInfo);
    vp.y = MAX(0, vp.y);
    //vp.y = 30;

    //get extent of the image in the xfyf plane
    FLOAT_MAT_ELEM_TYPE eps = ipmInfo->vpPortion * v;//VP_PORTION*v;
    ipmInfo->ipmLeft = MAX(0, ipmInfo->ipmLeft);
    ipmInfo->ipmRight = MIN(u-1, ipmInfo->ipmRight);
    ipmInfo->ipmTop = MAX(vp.y+eps, ipmInfo->ipmTop);
    ipmInfo->ipmBottom = MIN(v-1, ipmInfo->ipmBottom);
    FLOAT_MAT_ELEM_TYPE uvLimitsp[] = {vp.x,
                                       ipmInfo->ipmRight, ipmInfo->ipmLeft, vp.x,
                                       ipmInfo->ipmTop, ipmInfo->ipmTop,   ipmInfo->ipmTop,  ipmInfo->ipmBottom};
    //{vp.x, u, 0, vp.x,
    //vp.y+eps, vp.y+eps, vp.y+eps, v};
    CvMat uvLimits = cvMat(2, 4, FLOAT_MAT_TYPE, uvLimitsp);

    //get these points on the ground plane
    CvMat * xyLimitsp = cvCreateMat(2, 4, FLOAT_MAT_TYPE);
    CvMat xyLimits = *xyLimitsp;
    mcvTransformImage2Ground(&uvLimits, &xyLimits,cameraInfo);


    //SHOW_MAT(xyLimitsp, "xyLImits");

    //get extent on the ground plane
    CvMat row1, row2;
    cvGetRow(&xyLimits, &row1, 0);
    cvGetRow(&xyLimits, &row2, 1);
    double xfMax, xfMin, yfMax, yfMin;
    cvMinMaxLoc(&row1, (double*)&xfMin, (double*)&xfMax, 0, 0, 0);
    cvMinMaxLoc(&row2, (double*)&yfMin, (double*)&yfMax, 0, 0, 0);

    INT outRow = outImage->height;
    INT outCol = outImage->width;

    FLOAT_MAT_ELEM_TYPE stepRow = (yfMax-yfMin)/outRow;
    FLOAT_MAT_ELEM_TYPE stepCol = (xfMax-xfMin)/outCol;

    //construct the grid to sample
    CvMat *xyGrid = cvCreateMat(2, outRow*outCol, FLOAT_MAT_TYPE);
    INT i, j;
    FLOAT_MAT_ELEM_TYPE x, y;
    //fill it with x-y values on the ground plane in world frame
    for (i=0, y=yfMax-.5*stepRow; i<outRow; i++, y-=stepRow)
        for (j=0, x=xfMin+.5*stepCol; j<outCol; j++, x+=stepCol)
        {
            CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, i*outCol+j) = x;
            CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, i*outCol+j) = y;
        }
    //get their pixel values in image frame
    CvMat *uvGrid = cvCreateMat(2, outRow*outCol, FLOAT_MAT_TYPE);
    mcvTransformGround2Image(xyGrid, uvGrid, cameraInfo);
    //now loop and find the nearest pixel value for each position
    //that's inside the image, otherwise put it zero

    //generic loop to work for both float and int matrix types

    if (CV_MAT_TYPE(inImage->type)==FLOAT_MAT_TYPE)
    {
        //test<int>();
        //MCV_GET_IPM(FLOAT_MAT_ELEM_TYPE)
        interpolation<FLOAT_MAT_ELEM_TYPE>(inImage, outImage, uvGrid,outCol, outRow, ipmInfo);
    }
    else
    {
        //test<double>();
        //MCV_GET_IPM(INT_MAT_ELEM_TYPE)
        interpolation<INT_MAT_ELEM_TYPE>(inImage, outImage, uvGrid,outCol, outRow, ipmInfo);
    }
    //return the ipm info
    ipmInfo->xLimits[0] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, 0);
    ipmInfo->xLimits[1] =
            CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, (outRow-1)*outCol+outCol-1);
    ipmInfo->yLimits[1] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, 0);
    ipmInfo->yLimits[0] =
            CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, (outRow-1)*outCol+outCol-1);
    ipmInfo->xScale = 1/stepCol;
    ipmInfo->yScale = 1/stepRow;
    ipmInfo->width = outCol;
    ipmInfo->height = outRow;

    //clean
    cvReleaseMat(&xyLimitsp);
    cvReleaseMat(&xyGrid);
    cvReleaseMat(&uvGrid);
}
void interpolation(const CvMat *inImage, CvMat *outImage, CvMat *uvGrid, int outCol, int outRow,const IPMInfo *ipmInfo, list<CvPoint>* outPoints){
    FLOAT_MAT_ELEM_TYPE ui, vi;
    //get mean of the input image
    CvScalar means = cvAvg(inImage);
    double mean = means.val[0];
    for (int i=0; i<outRow; i++)
        for (int j=0; j<outCol; j++)
        {
            /*get pixel coordiantes*/

            ui = CV_MAT_ELEM(*uvGrid, FLOAT_MAT_ELEM_TYPE, 0, i*outCol+j);
            vi = CV_MAT_ELEM(*uvGrid, FLOAT_MAT_ELEM_TYPE, 1, i*outCol+j);

            if(i*outCol+j >  uvGrid->cols || i*outCol+j < 0)
            {
                cout << "PROBLEM 1" <<endl;
            }


            if(isnan(ui)){
                ui = 0;
            }
            if(isnan(vi)){
                vi=0;
            }

            /*check if out-of-bounds*/ \
            /*if (ui<0 || ui>u-1 || vi<0 || vi>v-1) \*/ \
            if (ui < ipmInfo->ipmLeft || ui > ipmInfo->ipmRight ||
                    vi < ipmInfo->ipmTop || vi > ipmInfo->ipmBottom)
            {

                CV_MAT_ELEM(*outImage, T, i, j) = (T)mean;
            }
            /*not out of bounds, then get nearest neighbor*/
            else
            {

                /*Bilinear interpolation*/
                if (ipmInfo->ipmInterpolation == 0)
                {

                    int x1 = int(ui), x2 = int(ui+1);
                    int y1 = int(vi), y2 = int(vi+1);
                    float x = ui - x1, y = vi - y1;
                    float val = CV_MAT_ELEM(*inImage, T, y1, x1) * (1-x) * (1-y) +
                            CV_MAT_ELEM(*inImage, T, y1, x2) * x * (1-y) +
                            CV_MAT_ELEM(*inImage, T, y2, x1) * (1-x) * y +
                            CV_MAT_ELEM(*inImage, T, y2, x2) * x * y;
                    CV_MAT_ELEM(*outImage, T, i, j) =  (T)val;

                }
                /*nearest-neighbor interpolation*/
                else
                {
                    if(vi+.5 < inImage->rows && ui +.5 < inImage->cols)
                    {
                        CV_MAT_ELEM(*outImage, T, i, j) =
                                CV_MAT_ELEM(*inImage, T, int(vi+.5), int(ui+.5));
                    }
                    else
                        cout << "vi+.5 " << (vi+.5) << " inImage->rows " << inImage->rows << " ui +.5 "
                             << (ui +.5) << " inImage->cols " << inImage->cols <<endl;


                }
            }
            if (outPoints &&
                    (ui<ipmInfo->ipmLeft+10 || ui>ipmInfo->ipmRight-10 ||
                     vi<ipmInfo->ipmTop || vi>ipmInfo->ipmBottom-2) )
                outPoints->push_back(cvPoint(j, i));
        }
}
Exemple #17
0
void ProjectionModel::calculateProjection()
{
    if(intrinsic_matrix != 0 && distortion_coeffs != 0)
    {
        int corner_count = Chessboard::board_total;

        cvCvtColor(sourceImg, gray_image, CV_RGB2GRAY);

        int found = cvFindChessboardCorners(gray_image,
                                            board_sz,
                                            corners,
                                            &corner_count,
                                            CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
        if(!found)
        {
            return;
        }

        cvFindCornerSubPix(gray_image,
                           corners,
                           corner_count,
                           cvSize(11,11),
                           cvSize(-1,-1),
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));

        objPts[0].x = 0;
        objPts[0].y = 0;
        objPts[1].x = Chessboard::board_w - 1;
        objPts[1].y = 0;
        objPts[2].x = 0;
        objPts[2].y = Chessboard::board_h - 1;
        objPts[3].x = Chessboard::board_w - 1;
        objPts[3].y = Chessboard::board_h - 1;
        imgPts[3] = corners[0];
        imgPts[2] = corners[Chessboard::board_w - 1];
        imgPts[1] = corners[(Chessboard::board_h - 1) * Chessboard::board_w];
        imgPts[0] = corners[(Chessboard::board_h - 1) * Chessboard::board_w + Chessboard::board_w - 1];

        cvGetPerspectiveTransform(objPts, imgPts, H);

        for(int i = 0; i < 4; ++i)
        {
            CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
            CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) = cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
        }

        cvFindExtrinsicCameraParams2(object_points,
                                     image_points,
                                     intrinsic_matrix,
                                     distortion_coeffs,
                                     rotation_vector,
                                     translation_vector);

        cvRodrigues2(rotation_vector, rotation_matrix);

        for(int f = 0; f < 3; f++)
        {
            for(int c = 0; c < 3; c++)
            {
                fullMatrix[c * 4 + f] = rotation_matrix->data.fl[f * 3 + c];   //transposed
            }
        }

        fullMatrix[3] = 0.0;
        fullMatrix[7] = 0.0;
        fullMatrix[11] = 0.0;
        fullMatrix[12] = translation_vector->data.fl[0];
        fullMatrix[13] = translation_vector->data.fl[1];
        fullMatrix[14] = translation_vector->data.fl[2];
        fullMatrix[15] = 1.0;
    }
}
Exemple #18
0
void deblur_image(int image_num, int n, IplImage *result, IplImage *result_luck)
{
	cvSetZero(result);
	cvSetZero(result_luck);
	IplImage *trans[MAX_IMAGE];//转换后的结果?
	IplImage *trans_luck[MAX_IMAGE];
	IplImage *blur[MAX_IMAGE];
	for (int i = 0; i < image_num; ++i)
	{
		trans[i] = cvCreateImage(image_size, IPL_DEPTH_8U, 3);
		cvWarpPerspective(images[i], trans[i], hom[i][n], CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		//对图像进行透视变换得到trans后的图像
		trans_luck[i] = cvCreateImage(image_size, IPL_DEPTH_32F, 4);
		cvWarpPerspective(images_luck[i], trans_luck[i], hom[i][n], CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		//images_luck 是每一帧各个点的luckiness指数
		blur[i] = cvCreateImage(image_size, IPL_DEPTH_8U, 3);
		blur_function(trans[i], blur[i], hom[n-1][n], hom[n][n+1]);
	}
	
	
	for (int i = 0; i < image_num; ++i)
	{
		char wname[16];
		sprintf(wname, "Homography%d", i);
		cvNamedWindow(wname, CV_WINDOW_AUTOSIZE);
		cvMoveWindow(wname, i*50, i*50);
		cvShowImage(wname, trans[i]);
		sprintf(wname, "Blurred%d", i);
		cvNamedWindow(wname, CV_WINDOW_AUTOSIZE);
		cvMoveWindow(wname, i*50+100, i*50);
		cvShowImage(wname, blur[i]);
	}
	cvWaitKey(0);
	cvDestroyAllWindows();
	
	int grid_r = (image_size.height-PATCH_SIZE/2-1) / (PATCH_SIZE/2);
	int grid_c = (image_size.width-PATCH_SIZE/2-1) / (PATCH_SIZE/2);
	if (grid_r > 0 && grid_c > 0)
	{
		CvMat *patch = cvCreateMat(grid_r, grid_c, CV_64FC4);
		for (int i = 0; i < grid_r; ++i)
		{
			int y = (i+1)*(PATCH_SIZE/2);
			
			int t1 = clock();
			for (int j = 0; j < grid_c; ++j)
			{
				CvScalar res;
				int x = (j+1)*(PATCH_SIZE/2);
				
				
				if (deblur_patch(blur, trans_luck, image_num, n, x, y, &res) != 0)
				{
					printf("deblur_patch: %d:%d,%d failed.\n", n, x, y);
					res.val[0] = n;
					res.val[1] = x;
					res.val[2] = y;
					res.val[3] = 0;
					//copy_pixel(result, x, y, images[n], x, y);
				}
				
				/*
				res.val[0] = 2;
				res.val[1] = x;
				res.val[2] = y;
				res.val[3] = 100000;*/
				
				res.val[3] = exp(-res.val[3]/(2*SIGMA_W*SIGMA_W));
				
				CV_MAT_ELEM(*patch, CvScalar, i, j) = res;
			}
			int t2 = clock();
			
			printf("y:%d/%d  %d ms\n", y, image_size.height, (t2-t1)*1000/CLOCKS_PER_SEC);
		}
		
		cvNamedWindow("origin", CV_WINDOW_AUTOSIZE);
		cvShowImage("origin", images[n]);
		cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
		
		// 中心部分
		for (int i = 1; i < grid_r; ++i)
		{
			int miny = i*(PATCH_SIZE/2);
			for (int j = 1; j < grid_c; ++j)
			{
				CvScalar pres1 = CV_MAT_ELEM(*patch, CvScalar, i-1, j-1);
				CvScalar pres2 = CV_MAT_ELEM(*patch, CvScalar, i-1, j);
				CvScalar pres3 = CV_MAT_ELEM(*patch, CvScalar, i, j-1);
				CvScalar pres4 = CV_MAT_ELEM(*patch, CvScalar, i, j);
				int minx = j*(PATCH_SIZE/2);
				for (int y = 0; y < PATCH_SIZE/2; ++y)
					for (int x = 0; x < PATCH_SIZE/2; ++x)
					{
						CvScalar v[4];
						v[0] = cvGet2D(trans[(int)pres1.val[0]], (int)pres1.val[2]+y, (int)pres1.val[1]+x);
						v[1] = cvGet2D(trans[(int)pres2.val[0]], (int)pres2.val[2]+y, (int)pres2.val[1]+x-PATCH_SIZE/2);
						v[2] = cvGet2D(trans[(int)pres3.val[0]], (int)pres3.val[2]+y-PATCH_SIZE/2, (int)pres3.val[1]+x);
						v[3] = cvGet2D(trans[(int)pres4.val[0]], (int)pres4.val[2]+y-PATCH_SIZE/2, (int)pres4.val[1]+x-PATCH_SIZE/2);
						double w[4] = {pres1.val[3], pres2.val[3], pres3.val[3], pres4.val[3]};
						
						CvScalar pv = weighted_average(v, w, 4);
						cvSet2D(result, y+miny, x+minx, pv);
						
						/*
						printf("p %d %d\n", y+miny, x+minx);
						for (int a = 0; a < 4; ++a)
						{
							printf("v%d: %f %f %f w %g\n", a, v[a].val[0], v[a].val[1], v[a].val[2], w[a]);
						}
						printf("pv: %f %f %f\n", pv.val[0], pv.val[1], pv.val[2]);
						*/
					}
			}
			cvShowImage("result", result);
			cvWaitKey(20);
		}
		
		// 四周特殊情况
		for (int y = 0; y < PATCH_SIZE/2; ++y)
			for (int x = 0; x < PATCH_SIZE/2; ++x)
			{
				CvScalar pres = CV_MAT_ELEM(*patch, CvScalar, 0, 0);
				CvScalar pv = cvGet2D(trans[(int)pres.val[0]], (int)pres.val[2]+y-PATCH_SIZE/2, (int)pres.val[1]+x-PATCH_SIZE/2);
				cvSet2D(result, y, x, pv);
				
				pres = CV_MAT_ELEM(*patch, CvScalar, 0, grid_c-1);
				pv = cvGet2D(trans[(int)pres.val[0]], (int)pres.val[2]+y-PATCH_SIZE/2, (int)pres.val[1]+x);
				cvSet2D(result, y, grid_c*(PATCH_SIZE/2)+x, pv);
				
				pres = CV_MAT_ELEM(*patch, CvScalar, grid_r-1, 0);
				pv = cvGet2D(trans[(int)pres.val[0]], (int)pres.val[2]+y, (int)pres.val[1]+x-PATCH_SIZE/2);
				cvSet2D(result, grid_r*(PATCH_SIZE/2)+y, x, pv);
				
				pres = CV_MAT_ELEM(*patch, CvScalar, grid_r-1, grid_c-1);
				pv = cvGet2D(trans[(int)pres.val[0]], (int)pres.val[2]+y, (int)pres.val[1]+x);
				cvSet2D(result, grid_r*(PATCH_SIZE/2)+y, grid_c*(PATCH_SIZE/2)+x, pv);
			}
		for (int j = 1; j < grid_c; ++j)
		{
			CvScalar pres1 = CV_MAT_ELEM(*patch, CvScalar, 0, j-1);
			CvScalar pres2 = CV_MAT_ELEM(*patch, CvScalar, 0, j);
			CvScalar pres3 = CV_MAT_ELEM(*patch, CvScalar, grid_r-1, j-1);
			CvScalar pres4 = CV_MAT_ELEM(*patch, CvScalar, grid_r-1, j);
			int minx = j*(PATCH_SIZE/2);
			for (int y = 0; y < PATCH_SIZE/2; ++y)
				for (int x = 0; x < PATCH_SIZE/2; ++x)
				{
					CvScalar v[2];
					v[0] = cvGet2D(trans[(int)pres1.val[0]], (int)pres1.val[2]+y-PATCH_SIZE/2, (int)pres1.val[1]+x);
					v[1] = cvGet2D(trans[(int)pres2.val[0]], (int)pres2.val[2]+y-PATCH_SIZE/2, (int)pres2.val[1]+x-PATCH_SIZE/2);
					double w[2] = {pres1.val[3], pres2.val[3]};
					CvScalar pv = weighted_average(v, w, 2);
					cvSet2D(result, y, minx+x, pv);
					
					v[0] = cvGet2D(trans[(int)pres3.val[0]], (int)pres3.val[2]+y, (int)pres3.val[1]+x);
					v[1] = cvGet2D(trans[(int)pres4.val[0]], (int)pres4.val[2]+y, (int)pres4.val[1]+x-PATCH_SIZE/2);
					w[0] = pres3.val[3];
					w[0] = pres4.val[3];
					pv = weighted_average(v, w, 2);
					cvSet2D(result, grid_r*(PATCH_SIZE/2)+y, minx+x, pv);
				}
		}
		for (int i = 1; i < grid_r; ++i)
		{
			CvScalar pres1 = CV_MAT_ELEM(*patch, CvScalar, i-1, 0);
			CvScalar pres2 = CV_MAT_ELEM(*patch, CvScalar, i, 0);
			CvScalar pres3 = CV_MAT_ELEM(*patch, CvScalar, i-1, grid_c-1);
			CvScalar pres4 = CV_MAT_ELEM(*patch, CvScalar, i, grid_c-1);
			int miny = i*(PATCH_SIZE/2);
			for (int y = 0; y < PATCH_SIZE/2; ++y)
				for (int x = 0; x < PATCH_SIZE/2; ++x)
				{
					CvScalar v[2];
					v[0] = cvGet2D(trans[(int)pres1.val[0]], (int)pres1.val[2]+y, (int)pres1.val[1]+x-PATCH_SIZE/2);
					v[1] = cvGet2D(trans[(int)pres2.val[0]], (int)pres2.val[2]+y-PATCH_SIZE/2, (int)pres2.val[1]+x-PATCH_SIZE/2);
					double w[2] = {pres1.val[3], pres2.val[3]};
					CvScalar pv = weighted_average(v, w, 2);
					cvSet2D(result, miny+y, x, pv);
					
					v[0] = cvGet2D(trans[(int)pres3.val[0]], (int)pres3.val[2]+y, (int)pres3.val[1]+x);
					v[1] = cvGet2D(trans[(int)pres4.val[0]], (int)pres4.val[2]+y-PATCH_SIZE/2, (int)pres4.val[1]+x);
					w[0] = pres3.val[3];
					w[0] = pres4.val[3];
					pv = weighted_average(v, w, 2);
					cvSet2D(result, miny+y, grid_c*(PATCH_SIZE/2)+x, pv);
				}
		}
		cvShowImage("result", result);
		
		/*
		IplImage *res_diff = cvCreateImage(image_size, IPL_DEPTH_8U, 3);
		cvAbsDiff(result, images[n], res_diff);
		cvNamedWindow("diff", CV_WINDOW_AUTOSIZE);
		cvShowImage("diff", res_diff);*/
	
		char name[16];
		sprintf(name, "result%d.png", n);
		cvSaveImage(name, result, NULL);
		sprintf(name, "origin%d.png", n);
		cvSaveImage(name, images[n], NULL);
		
		cvReleaseMat(&patch);
	}
	
	for (int i = 0; i < image_num; ++i)
	{
		cvReleaseImage(&trans[i]);
		cvReleaseImage(&trans_luck[i]);
		cvReleaseImage(&blur[i]);
	}
}
Exemple #19
0
int main(int argc, char * argv[])
{
	if(argc < 2) {
		fprintf(stderr, "%s image1 image2\n", argv[0]);
		return 1;
	}

	char * im1fname = argv[1];
	char * im2fname = argv[2];

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_GRAYSCALE);

	IplImage * eigenvalues = cvCreateImage(cvGetSize(image1), 32, 1);
	IplImage * temp = cvCreateImage(cvGetSize(image1), 32, 1);

	int count = MAX_COUNT;
	double quality = 0.5;
	// double min_distance = 2;
	double min_distance = 50;
	int block_size = 7;
	int use_harris = 0;
	int win_size = 10;
	int flags = 0;

	CvPoint2D32f * source_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * dest_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	cvGoodFeaturesToTrack( image1, eigenvalues, temp, source_points, &count,
			quality, min_distance, 0, block_size, use_harris, 0.04 );

	printf("%d features\n",count);

	setbuf(stdout, NULL);

	printf("Finding corner subpix...");
	cvFindCornerSubPix( image1, source_points, count,
			cvSize(win_size,win_size), cvSize(-1,-1),
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	printf("done.\n");

	cvReleaseImage(&eigenvalues);
	cvReleaseImage(&temp);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_GRAYSCALE);

	char * status = (char*)cvAlloc(sizeof(char)*MAX_COUNT);

	IplImage * pyramid = cvCreateImage( cvGetSize(image1), IPL_DEPTH_8U, 1 );
	IplImage * second_pyramid = cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 1 );

	printf("Computing optical flow...");	
	cvCalcOpticalFlowPyrLK(image1, image2, pyramid, second_pyramid, source_points,
		dest_points, count, cvSize(win_size,win_size), 4, status, 0,
		cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03),
		flags);
	printf("done.\n");

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 30;
	int offset = 200;
	
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	cvReleaseImage(&image1);
	cvReleaseImage(&image2);
	
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					delaunay_points[i] = (cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;

	cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );

	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total; i++ ) {
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		bool is_corner = false;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			/*	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height)) ||(curtri.points[j] == cvPoint(image1->width,0)) ||(curtri.points[j] == cvPoint(image1->width,image1->height))) {
				is_corner = true;
				break;
			}
			*/

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(!is_corner) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];
			

					if( (result->data.fl[0] > 0) && (result->data.fl[1] > 0) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ ) {
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						/*
						if((i == 143) && (y == 3577) && (x > 2055) && (x < 2087)) {
							printf("%d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
									sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
						}
						*/
	
						cvReleaseMat( &sourcepoint );
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}
	/*
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
			CvMat * result = cvCreateMat(3, 1, CV_32FC1);
			curp->data.fl[0] = x;
			curp->data.fl[1] = y;
			curp->data.fl[2] = 1;
			for(unsigned int i = 0; i < baryinvvec.size(); i++) {
				cvMatMul( baryinvvec[i], curp, result );
				double u = result->data.fl[0]/result->data.fl[2];
				double v = result->data.fl[1]/result->data.fl[2];
				if((u > 0) && (v > 0) && (u + v < 1)) {
					// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
					//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					break;
				}
			}
			cvReleaseMat( &result );
			cvReleaseMat( &curp );
		}
	}
	*/

	cvReleaseImage( &clean_nonthresh );

#ifdef OLD_BUSTED
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvSubdiv2DPointLocation locate_result;
			CvSubdiv2DEdge on_edge;
			CvSubdiv2DPoint * on_vertex;
			CvPoint curpoint = cvPoint( x, y );
			locate_result = cvSubdiv2DLocate( delaunay, cvPointTo32f( curpoint ),
				&on_edge, &on_vertex );
			if( (locate_result != CV_PTLOC_OUTSIDE_RECT) && (locate_result != CV_PTLOC_ERROR) ) {
				if( locate_result == CV_PTLOC_VERTEX ) { // this point is on a vertex
					for(int i = 0; i < count; i++) {
						if(((on_vertex->pt).x == delaunay_points[i].x) && ((on_vertex->pt).y == delaunay_points[i].y)) {
							cvSet2D( image1, y, x, cvGet2D( image2, cvPointFrom32f(dest_points[i]).y, cvPointFrom32f(dest_points[i]).x ) );
							break;
						}
					}
				}
				else if( locate_result == CV_PTLOC_ON_EDGE ) { // this point is on an edge
					CvSubdiv2DPoint* org_pt;
					CvSubdiv2DPoint* dst_pt;
					CvPoint org_pt_warp;
					CvPoint dst_pt_warp;
						
					org_pt = cvSubdiv2DEdgeOrg(on_edge);
					dst_pt = cvSubdiv2DEdgeDst(on_edge);

					for(int i = 0; i < count; i++) {
						if(((org_pt->pt).x == delaunay_points[i].x) && ((org_pt->pt).y == delaunay_points[i].y)) {
							org_pt_warp = cvPointFrom32f(dest_points[i]);
						}
						if(((dst_pt->pt).x == delaunay_points[i].x) && ((dst_pt->pt).y == delaunay_points[i].y)) {
							dst_pt_warp = cvPointFrom32f(dest_points[i]);
						}
					}

					// compute vector length of original edge and current point
					double original_length;
					double cur_length; 
					if( (int)((org_pt->pt).x) == curpoint.x ) { // vertical line
						original_length = fabs((org_pt->pt).y - (dst_pt->pt).y);
						cur_length = fabs((org_pt->pt).y - curpoint.y);
					}
					else if( (int)((org_pt->pt).y) == curpoint.y ) { // horizontal line
						original_length = fabs((org_pt->pt).x - (dst_pt->pt).x);
						cur_length = fabs((org_pt->pt).x - curpoint.x);
					}
					else { // sloped line
				 		original_length = sqrt(pow((org_pt->pt).x - (dst_pt->pt).x, 2.0) + pow((org_pt->pt).y - (dst_pt->pt).y, 2.0));
						cur_length = sqrt(pow((org_pt->pt).x - curpoint.x, 2.0) + pow((org_pt->pt).y - curpoint.y, 2.0));
					}
					// compute ratio of this point on the edge
					double ratio = cur_length / original_length;
					// copy this point from the destination edge
					CvPoint point_in_original;
					int warped_x = (int)(org_pt_warp.x - dst_pt_warp.x);
					int warped_y = (int)(org_pt_warp.y - dst_pt_warp.y);
					if( org_pt_warp.x == curpoint.x ) { // vertical line
						point_in_original.y = (int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y)));
						point_in_original.x = org_pt_warp.x;
					}
					else if(org_pt_warp.y == curpoint.y) { // horizontal line
						point_in_original.x = (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x)));
						point_in_original.y = org_pt_warp.y;
					}
					else { // sloped line
						double destination_length = sqrt(pow((org_pt_warp).x - (dst_pt_warp).x, 2.0) + pow((org_pt_warp).y - (dst_pt_warp).y, 2.0));
						double scaled_length = ratio * destination_length;
						double dest_angle = atan(fabs( (double)warped_y / (double)warped_x ));
						double xdist = scaled_length * cos(dest_angle);
						double ydist = scaled_length * sin(dest_angle);
						xdist = warped_x > 0 ? xdist : xdist * -1;
						ydist = warped_y > 0 ? ydist : ydist * -1;
						point_in_original.x = (int)( org_pt_warp.x + xdist);
						point_in_original.y = (int)( org_pt_warp.y + ydist);
					}
					
					if((point_in_original.x >= 0) && (point_in_original.y >= 0) && (point_in_original.x < (image1->width)) && (point_in_original.y < (image1->height))) {
						cvSet2D( image1, y, x, cvGet2D( image2, point_in_original.y, point_in_original.x ) );
					}
					else {
						printf("Edge point outside image\n");
					}
					// cvSet2D( image1, y, x, cvGet2D( image2, (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x))), 
					//			(int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y))) ) );
				}
				else if( locate_result == CV_PTLOC_INSIDE ) { // this point is inside a facet (triangle)
					/*
					printf("Point inside facet: %d, %d\n",curpoint.x,curpoint.y);
					int count = 0;
					CvPoint * origins = (CvPoint*)malloc(sizeof(CvPoint)*3);
					CvSubdiv2DEdge t = on_edge;
					// count number of edges
					do {
						CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
						if(count < 3) {
							origins[count] = cvPoint( cvRound(pt->pt.x), cvRound(pt->pt.y));
							printf("%d,%d\t",origins[count].x,origins[count].y);
						}
						count++;
						t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
					} while(t != on_edge);
					printf("\n");

					free(origins);
					*/
				}
			}
		}
	}
#endif // OLD_BUSTED
	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
void airGest::filterFlow( void ) {
	//~ std::cout << "in filterFlow " << ", ";
	float xFlow, yFlow;
					 
	Mat currFlow = Mat( OPTFLW_FRAME_WIDTH/FEATURE_STEP,
						OPTFLW_FRAME_HEIGHT/FEATURE_STEP,
						CV_32FC2 );
	
	vector<Mat> sampledChannels;
	split( currFlow, sampledChannels );				
	
	CvMat flowMat = flowMap;	
	float xMin = CV_MAT_ELEM( flowMat, CvPoint2D32f, FEATURE_STEP/2, FEATURE_STEP/2).x;
	float xMax = CV_MAT_ELEM( flowMat, CvPoint2D32f, FEATURE_STEP/2, FEATURE_STEP/2).x;
	float yMin = CV_MAT_ELEM( flowMat, CvPoint2D32f, FEATURE_STEP/2, FEATURE_STEP/2).y;
	float yMax = CV_MAT_ELEM( flowMat, CvPoint2D32f, FEATURE_STEP/2, FEATURE_STEP/2).y;
	for( int y = FEATURE_STEP/2; y < canvas.rows; y += FEATURE_STEP ) {
        for( int x = FEATURE_STEP/2; x < canvas.cols; x += FEATURE_STEP ) {
            CvPoint2D32f fxy = CV_MAT_ELEM( flowMat, CvPoint2D32f, y, x);
            
            fxy.x = ( ( fxy.x > -2.0 && fxy.x < 2.0 ) || ( fxy.x != fxy.x ) )? 0.00: fxy.x;
            fxy.y = ( ( fxy.y > -2.0 && fxy.y < 2.0 ) || ( fxy.y != fxy.y ) )? 0.00: fxy.y;
            
            sampledChannels[PLANE_XCMP].at<float>( x/FEATURE_STEP, y/FEATURE_STEP ) = fxy.x;
            sampledChannels[PLANE_YCMP].at<float>( y/FEATURE_STEP, y/FEATURE_STEP ) = fxy.y;
            
            if( fxy.x < xMin ) xMin = fxy.x;
            else if( fxy.x > xMax ) xMax = fxy.x;
            
            if( fxy.y < yMin ) yMin = fxy.y;
            else if( fxy.y > yMax ) yMax = fxy.y;
            
		}
	}
	
	float xAbsMax = ( xMax > -xMin )? xMax: -xMin;
	float yAbsMax = ( yMax > -yMin )? yMax: -yMin;
	
	//std::cout << "Filter : xMax/yMax -- ["
	//		  << xMin << ", " << xMax << ", "
	//		  << yMin << ", " << yMax << "] => " 
	//		  << xAbsMax << "," << yAbsMax << "\t\t";
			  
	//std::cout << sampledChannels[PLANE_XCMP] << "\n"
	//		  << sampledChannels[PLANE_YCMP] << "\n";
			  
	vector<Mat> hAccFlow;
	split( accFlow, hAccFlow );
	
	hAccFlow[PLANE_XCMP] += sampledChannels[PLANE_XCMP];
	hAccFlow[PLANE_YCMP] += sampledChannels[PLANE_YCMP];	
	
	flowHistory.push_back( sampledChannels[PLANE_XCMP] );	//first push X
    flowHistory.push_back( sampledChannels[PLANE_YCMP] );	//then push Y
    if( flowHistory.size() > 2 * QUEUESIZE_FLOWHIST ) {
		//now remove the first element
		hAccFlow[PLANE_XCMP] -= flowHistory[0];		//substract the first element X_CMP from AccFlow
		hAccFlow[PLANE_YCMP] -= flowHistory[1];		//substract the second element Y_CMP from AccFlow
		
		//remove those planes from history
		flowHistory.pop_front();
		flowHistory.pop_front();
	}
	merge( hAccFlow, accFlow );
	//std::cout << "historySize#" << flowHistory.size() << "\n";
	//std::cout << "history = \n" << accFlow;
	
	Mat motionDir = Mat( OPTFLW_FRAME_WIDTH/FEATURE_STEP,
						 OPTFLW_FRAME_HEIGHT/FEATURE_STEP,
						 CV_32FC1 );
						 
	//~ float xMotion[OPTFLW_FRAME_HEIGHT/FEATURE_STEP][OPTFLW_FRAME_WIDTH/FEATURE_STEP];
	
	Mat xMotion( OPTFLW_FRAME_HEIGHT/FEATURE_STEP, OPTFLW_FRAME_WIDTH/FEATURE_STEP, CV_32FC1, Scalar( 0 ));
	xMotion = hAccFlow[PLANE_XCMP];
	for( int y = 0; y < accFlow.rows; y++ ) {
		for( int x = 0; x < accFlow.cols; x++ ) {
			//~ float angle = atan2f( hAccFlow[PLANE_YCMP].at<float>( y, x ),
								  //~ hAccFlow[PLANE_XCMP].at<float>( y, x ) );
			//~ float tmp = hAccFlow[PLANE_XCMP].at<float>( y, x );
			//~ xMotion.at<float>( y, x ) = tmp;
			
			//~ angle = ( angle < 0 )? 2.0*22.0/7.0 + angle: angle;
			//~ if( angle != angle ) angle = 0.00;	//avoid nan
			//~ motionDir.at<float>( y, x ) = angle;
		}
	}
	//~ std::cout << "::calculating decision::" << ", ";
	decision = 0.00;
	for( int y = 0; y < OPTFLW_FRAME_HEIGHT/FEATURE_STEP; y++ ) {
		for( int x = 0; x < OPTFLW_FRAME_WIDTH/FEATURE_STEP; x++ ) {
			// decision += xMotionWeights[y][x] * xMotion[y][x];
			//~ decision += (*xMotionWeights[y*OPTFLW_FRAME_WIDTH/FEATURE_STEP+x]) * xMotion.at<float>(y,x);//[y][x];
			decision += *(xMotionWeights+y*OPTFLW_FRAME_WIDTH/FEATURE_STEP+x) * xMotion.at<float>(y,x);//[y][x];
			// std::cout << xMotion[y][x] << ", ";
		}
		//std::cout << "\n";
	}
	
	std::cout << "decision = " << decision << "\t";
	
	decision = ( decision < -50.00 ) ? -1.00: 
			   ( decision > +50.00 ) ? +1.00: 0.00;
	
	std::cout << "actual decision = " << decision << "\t";;
	
	Mat sampledVector = Mat( 1, motionDir.rows * motionDir.cols, CV_32FC1 );
	for( int rowId = 0; rowId < motionDir.rows; rowId++ ) {
		sampledVector.colRange( rowId*motionDir.cols, rowId*motionDir.cols+motionDir.cols ) = motionDir.rowRange( rowId, rowId );
	}
	//std::cout << "Sampled vector : " << sampledVector << "\n";
}
Exemple #22
0
static void
cvTsCalcHist( IplImage** _images, CvHistogram* hist, IplImage* _mask, int* channels )
{
    int x, y, k, cdims;
    union
    {
        float* fl;
        uchar* ptr;
    }
    plane[CV_MAX_DIM];
    int nch[CV_MAX_DIM];
    int dims[CV_MAX_DIM];
    int uniform = CV_IS_UNIFORM_HIST(hist);
    CvSize img_size = cvGetSize(_images[0]);
    CvMat images[CV_MAX_DIM], mask = cvMat(1,1,CV_8U);
    int img_depth = _images[0]->depth;

    cdims = cvGetDims( hist->bins, dims );

    cvZero( hist->bins );

    for( k = 0; k < cdims; k++ )
    {
        cvGetMat( _images[k], &images[k] );
        nch[k] = _images[k]->nChannels;
    }

    if( _mask )
        cvGetMat( _mask, &mask );

    for( y = 0; y < img_size.height; y++ )
    {
        const uchar* mptr = _mask ? &CV_MAT_ELEM(mask, uchar, y, 0 ) : 0;

        if( img_depth == IPL_DEPTH_8U )
            for( k = 0; k < cdims; k++ )
                plane[k].ptr = &CV_MAT_ELEM(images[k], uchar, y, 0 ) + channels[k];
        else
            for( k = 0; k < cdims; k++ )
                plane[k].fl = &CV_MAT_ELEM(images[k], float, y, 0 ) + channels[k];

        for( x = 0; x < img_size.width; x++ )
        {
            float val[CV_MAX_DIM];
            int idx[CV_MAX_DIM];
            
            if( mptr && !mptr[x] )
                continue;
            if( img_depth == IPL_DEPTH_8U )
                for( k = 0; k < cdims; k++ )
                    val[k] = plane[k].ptr[x*nch[k]];
            else
                for( k = 0; k < cdims; k++ )
                    val[k] = plane[k].fl[x*nch[k]];

            idx[cdims-1] = -1;

            if( uniform )
            {
                for( k = 0; k < cdims; k++ )
                {
                    double v = val[k], lo = hist->thresh[k][0], hi = hist->thresh[k][1];
                    idx[k] = cvFloor((v - lo)*dims[k]/(hi - lo));
                    if( idx[k] < 0 || idx[k] >= dims[k] )
                        break;
                }
            }
            else
            {
                for( k = 0; k < cdims; k++ )
                {
                    float v = val[k];
                    float* t = hist->thresh2[k];
                    int j, n = dims[k];

                    for( j = 0; j <= n; j++ )
                        if( v < t[j] )
                            break;
                    if( j <= 0 || j > n )
                        break;
                    idx[k] = j-1;
                }
            }

            if( k < cdims )
                continue;

            (*(float*)cvPtrND( hist->bins, idx ))++;
        }
    }
}