示例#1
0
void AffineTransformPatch(IplImage* src, IplImage* dst, CvAffinePose pose)
{
	CvRect src_large_roi = cvGetImageROI(src);

	IplImage* temp = cvCreateImage(cvSize(src_large_roi.width, src_large_roi.height), IPL_DEPTH_32F, src->nChannels);
	cvSetZero(temp);
	IplImage* temp2 = cvCloneImage(temp);
	CvMat* rotation_phi = cvCreateMat(2, 3, CV_32FC1);

	CvSize new_size = cvSize(temp->width*pose.lambda1, temp->height*pose.lambda2);
	IplImage* temp3 = cvCreateImage(new_size, IPL_DEPTH_32F, src->nChannels);

	cvConvertScale(src, temp);
	cvResetImageROI(temp);


	cv2DRotationMatrix(cvPoint2D32f(temp->width/2, temp->height/2), pose.phi, 1.0, rotation_phi);
	cvWarpAffine(temp, temp2, rotation_phi);

	cvSetZero(temp);

	cvResize(temp2, temp3);

	cv2DRotationMatrix(cvPoint2D32f(temp3->width/2, temp3->height/2), pose.theta - pose.phi, 1.0, rotation_phi);
	cvWarpAffine(temp3, temp, rotation_phi);

	cvSetImageROI(temp, cvRect(temp->width/2 - src_large_roi.width/4, temp->height/2 - src_large_roi.height/4,
		src_large_roi.width/2, src_large_roi.height/2));
	cvConvertScale(temp, dst);
	cvReleaseMat(&rotation_phi);

	cvReleaseImage(&temp3);
	cvReleaseImage(&temp2);
	cvReleaseImage(&temp);
}
示例#2
0
void the_project::project_init()
{
	car_of_pro = new the_car();


	//camera  480*640

	for_cam = cvCreateCameraCapture(1);
	for_video = cvCreateFileCapture("test.avi");
	image_size = cvSize(cvGetCaptureProperty(for_cam,3),cvGetCaptureProperty(for_cam,4));
	wr1 = cvCreateVideoWriter("record_ori.avi",CV_FOURCC('X','V','I','D') ,15,image_size);
	wr2 = cvCreateVideoWriter("record_cha.avi",CV_FOURCC('X','V','I','D') ,15,image_size);

	newpoints[0]=cvPoint2D32f(0,0);
	newpoints[1]=cvPoint2D32f(0,image_size.height);
	newpoints[2]=cvPoint2D32f(image_size.width,image_size.height);
	newpoints[3]=cvPoint2D32f(image_size.width,0);

	red_min=200;
	rg_max=100;
	rb_max=100;
	green_min=200;
	gb_max=100;
	gr_max=100;

}
示例#3
0
void GenerateAffineTransformFromPose(CvSize size, CvAffinePose pose, CvMat* transform)
{
	CvMat* temp = cvCreateMat(3, 3, CV_32FC1);
	CvMat* final = cvCreateMat(3, 3, CV_32FC1);
	cvmSet(temp, 2, 0, 0.0f);
	cvmSet(temp, 2, 1, 0.0f);
	cvmSet(temp, 2, 2, 1.0f);

	CvMat rotation;
	cvGetSubRect(temp, &rotation, cvRect(0, 0, 3, 2));

	cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.phi, 1.0, &rotation);
	cvCopy(temp, final);

	cvmSet(temp, 0, 0, pose.lambda1);
	cvmSet(temp, 0, 1, 0.0f);
	cvmSet(temp, 1, 0, 0.0f);
	cvmSet(temp, 1, 1, pose.lambda2);
	cvmSet(temp, 0, 2, size.width/2*(1 - pose.lambda1));
	cvmSet(temp, 1, 2, size.height/2*(1 - pose.lambda2));
	cvMatMul(temp, final, final);

	cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.theta - pose.phi, 1.0, &rotation);
	cvMatMul(temp, final, final);

	cvGetSubRect(final, &rotation, cvRect(0, 0, 3, 2));
	cvCopy(&rotation, transform);

	cvReleaseMat(&temp);
	cvReleaseMat(&final);
}
int ofxSurfObjCorners(IpPairVec & matches,const ofPoint src_crn[4],ofPoint dst_crn[4]) {
    double h[9];
    CvMat _h = cvMat(3,3,CV_64F,h);
    vector<CvPoint2D32f> pt1,pt2;
    CvMat _pt1,_pt2;

    int n = (int)(matches.size());
    if(n<4)return 0;

    pt1.resize(n);
    pt2.resize(n);

    for(int i=0; i<n; i++) {
        pt1[i] = cvPoint2D32f(matches[i].second.x,matches[i].second.y);
        pt2[i] = cvPoint2D32f(matches[i].first.x,matches[i].first.y);
    }
    _pt1 = cvMat(1,n,CV_32F,&pt1[0]);
    _pt2 = cvMat(1,n,CV_32F,&pt2[0]);



    //if(!cvFindHomography(&_pt1,&_pt2,&_h,CV_RANSAC,5))return 0;
    /*for(int i=0;i<4;i++){
        double x = (double)src_crn[i].x;
        double y = (double)src_crn[i].y;
        double Z = 1./(h[6]*x + h[7]*y + h[8]);
        double X = (h[0]*x + h[1]*y + h[2])*Z;
        double Y = (h[3]*x + h[4]*y + h[5])*Z;
        dst_crn[i].set(cvRound(X),cvRound(Y));
    }*/

    return 1;
}
void SimpleImageProjector::project(IplImage* dst, A4PreciseDetectedRecord dstRecord)
{
	CvPoint2D32f dstCorners[4];
	dstCorners[0] = cvPoint2D32f(dstRecord.UL.x, dstRecord.UL.y);
	dstCorners[1] = cvPoint2D32f(dstRecord.UR.x, dstRecord.UR.y);
	dstCorners[2] = cvPoint2D32f(dstRecord.DL.x, dstRecord.DL.y);
	dstCorners[3] = cvPoint2D32f(dstRecord.DR.x, dstRecord.DR.y);
	cvWarpPerspective(projection, dst, transformMat, CV_INTER_LINEAR);
}
 void cameraInfoCb(const sensor_msgs::CameraInfoConstPtr& msg){
     cameraInfo.focalLength = cvPoint2D32f(msg->K.elems[0], msg->K.elems[4]);
     cameraInfo.imageHeight = msg->height;
     cameraInfo.imageWidth = msg->width;
     cameraInfo.pitch  =   (1.0)*3.14/180;
     cameraInfo.yaw = 0 ;
     cameraInfo.opticalCenter = cvPoint2D32f(msg->K.elems[2], msg->K.elems[5]);
     cameraInfo.cameraHeight = 1430;
     //camera_sub_.shutdown();
 }
示例#7
0
void Lines::Line::drawInfiniteLine(IplImage* img, CvScalar color)
{

	CvPoint pt1 = cvPointFrom32f(cvPoint2D32f(-1000, -1000*Slope + Intercept));
	CvPoint pt2 = cvPointFrom32f(cvPoint2D32f(1000, 1000*Slope + Intercept));
	if (isVertical == false)
		cvLine(img, pt1, pt2, color, 1, 8, 0);
	else
		cvLine(img, cvPoint(point1.x, 0), cvPoint(point1.x, 1000), color, 1, 8, 0);
}
SimpleImageProjector::SimpleImageProjector(char* pathToProjection)
{
	projection = cvLoadImage(pathToProjection);
	if (projection == nullptr) {
		throw std::exception("Can not load projection image");
	}
	transformMat = cvCreateMat(3, 3, CV_32FC1);
	corners[0] = cvPoint2D32f(0, 0);
	corners[1] = cvPoint2D32f(projection->width-1, 0);
	corners[2] = cvPoint2D32f(0, projection->height-1);
	corners[3] = cvPoint2D32f(projection->width-1, projection->height-1);
}
示例#9
0
CV_IMPL void
cvInitSubdivDelaunay2D( CvSubdiv2D * subdiv, CvRect rect )
{
    float big_coord = 3.f * MAX( rect.width, rect.height );
    CvPoint2D32f ppA, ppB, ppC;
    CvSubdiv2DPoint *pA, *pB, *pC;
    CvSubdiv2DEdge edge_AB, edge_BC, edge_CA;
    float rx = (float) rect.x;
    float ry = (float) rect.y;

    CV_FUNCNAME( "cvSubdivDelaunay2DInit" );

    __BEGIN__;

    if( !subdiv )
        CV_ERROR( CV_StsNullPtr, "" );

    cvClearSet( (CvSet *) (subdiv->edges) );
    cvClearSet( (CvSet *) subdiv );

    subdiv->quad_edges = 0;
    subdiv->recent_edge = 0;
    subdiv->is_geometry_valid = 0;

    subdiv->topleft = cvPoint2D32f( rx, ry );
    subdiv->bottomright = cvPoint2D32f( rx + rect.width, ry + rect.height );

    ppA = cvPoint2D32f( rx + big_coord, ry );
    ppB = cvPoint2D32f( rx, ry + big_coord );
    ppC = cvPoint2D32f( rx - big_coord, ry - big_coord );

    pA = cvSubdiv2DAddPoint( subdiv, ppA, 0 );
    pB = cvSubdiv2DAddPoint( subdiv, ppB, 0 );
    pC = cvSubdiv2DAddPoint( subdiv, ppC, 0 );

    edge_AB = cvSubdiv2DMakeEdge( subdiv );
    edge_BC = cvSubdiv2DMakeEdge( subdiv );
    edge_CA = cvSubdiv2DMakeEdge( subdiv );

    cvSubdiv2DSetEdgePoints( edge_AB, pA, pB );
    cvSubdiv2DSetEdgePoints( edge_BC, pB, pC );
    cvSubdiv2DSetEdgePoints( edge_CA, pC, pA );

    cvSubdiv2DSplice( edge_AB, cvSubdiv2DSymEdge( edge_CA ));
    cvSubdiv2DSplice( edge_BC, cvSubdiv2DSymEdge( edge_AB ));
    cvSubdiv2DSplice( edge_CA, cvSubdiv2DSymEdge( edge_BC ));

    subdiv->recent_edge = edge_AB;

    
    __END__;
}
示例#10
0
void StereoDisplay::Draw()												// Draw Our Scene
{  

    IplImage* camera_image;
    GLfloat z=-20.0;
    if( show_right_ == TRUE)
      camera_image = camera1_->QueryFrame();
    else
      camera_image = camera0_->QueryFrame();

    show_right_ = !show_right_;

    glLoadIdentity();										// Reset The Modelview Matrix
    glBegin(GL_QUADS);										// Begin drawing the image texture
	   // Front Face
	   glTexCoord2f(1.0f, 1.0f); glVertex3f( 11.0f,  8.3f, z);
	   glTexCoord2f(0.0f, 1.0f); glVertex3f(-11.0f,  8.3f, z);
	   glTexCoord2f(0.0f, 0.0f); glVertex3f(-11.0f, -8.3f, z);
	   glTexCoord2f(1.0f, 0.0f); glVertex3f( 11.0f, -8.3f, z);
    glEnd();												// Done drawing texture
	 
    glFlush ();													// Flush The GL Rendering Pipeline
    
    if( true == recording_  )
    {
        cvLogPolar( camera_image, logpolarframe_, 
                    cvPoint2D32f(camera_image->width/2,camera_image->height/2), 
                    40, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS);
        cvWriteFrame( writer_, logpolarframe_);
    }
   
}
示例#11
0
void SCSM::TransformPoint(int angle, float scale, CvPoint orig_point,
    CvPoint &transform_point, CvPoint2D32f &transform_vector) {
    double origvec_x = static_cast<double>(orig_point.x - fixedCenter.x);
	double origvec_y = static_cast<double>(orig_point.y - fixedCenter.y);
    TransformVector(angle, scale, cvPoint2D32f(origvec_x, origvec_y),
        transform_point, transform_vector);
}
示例#12
0
void CalcFourierDescriptorCoeff(CvSeq* seq_pts, int n_fourier,CvSeq* seq_fourier)
{
	int count = seq_pts->total;
	double *coeff_cos, *coeff_sin;
	coeff_cos = (double*)malloc(count*sizeof(double));
	coeff_sin = (double*)malloc(count*sizeof(double));
	int i;
	for(i = 0; i < count; i++)
	{
		coeff_sin[i] = sin(2*i*CV_PI/count);
		coeff_cos[i] = cos(2*i*CV_PI/count);
	}

	cvClearSeq(seq_fourier);
	for(int u = 0; u < n_fourier; u++)
	{
		CvPoint2D32f point_coeff = cvPoint2D32f(0, 0);
		for(i = 0; i < count; i+=4)
		{
			CvPoint* pt = (CvPoint*)cvGetSeqElem(seq_pts, i);
			point_coeff.x += (float)(pt->x*coeff_cos[(i*u)%count] + pt->y*coeff_sin[(i*u)%count]);
			point_coeff.y += (float)(pt->y*coeff_cos[(i*u)%count] - pt->x*coeff_sin[(i*u)%count]);
		}
		//point_coeff.x/=count;
		//point_coeff.y/=count;
		cvSeqPush(seq_fourier, &point_coeff);
	} 
	free(coeff_cos);
	free(coeff_sin);
}
示例#13
0
void moCalibrationModule::triangulate() {
	// We first triangulate all the surfacePoints.
	// Afterwards, in transform mode when a new touch occurrs, we can
	// simply look up the triangle in which the touch was performed
	// and get the barycentric parameters of the touch in that triangle.
	// We then use these to compute the on screen coordinate of the touch.
	moPointList screenPoints  = this->property("screenPoints").asPointList();
	moPointList surfacePoints = this->property("surfacePoints").asPointList();
	assert(screenPoints.size() == surfacePoints.size());

	this->delaunayToScreen.clear();
    this->subdiv = cvCreateSubdivDelaunay2D(this->rect, this->storage);

	//add all the  surfacepoints we collected to the subdivision
	//use the delaunayToScreen map to associate them with corrosponding screen point
	moPointList::iterator it, its;
	for(it = surfacePoints.begin(), its = screenPoints.begin(); it != surfacePoints.end();  it++, its++) {
		CvPoint2D32f fp = cvPoint2D32f(it->x, it->y);
		CvSubdiv2DPoint *delaunayPoint = cvSubdivDelaunay2DInsert(subdiv, fp);
		this->delaunayToScreen[delaunayPoint] = (*its);
	}

	this->retriangulate = false;
	this->notifyGui();
}
示例#14
0
/*
 * Return center point of rectangle.
 */
VALUE
rb_center(VALUE self)
{
  CvRect *rect = CVRECT(self);  
  return cCvPoint2D32f::new_object(cvPoint2D32f((float)rect->x + (float)rect->width / 2.0,
                                                (float)rect->y + (float)rect->height / 2.0));
}
示例#15
0
int main(int argc, const char * argv[]) {
    CvCapture* capture = cvCreateFileCapture( argv[1] );
    if (!capture) return -1;
    
    IplImage* bgr_frame = cvQueryFrame( capture );
    double fps = cvGetCaptureProperty( capture ,  CV_CAP_PROP_FPS );
    CvSize size = cvSize(
        (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH),
        (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT)
    );
    CvVideoWriter* writer = cvCreateVideoWriter( argv[2], CV_FOURCC('M', 'J', 'P', 'G'), fps, size);
    IplImage* logpolar_frame = cvCreateImage(size, IPL_DEPTH_8U, 3);
    
    while ( (bgr_frame = cvQueryFrame(capture)) != NULL ) {
        cvLogPolar(bgr_frame, logpolar_frame,
                   cvPoint2D32f(bgr_frame->width/2, bgr_frame->height/2),
                   40,
                   CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
        cvWriteFrame(writer, logpolar_frame);
    }
    cvReleaseVideoWriter( &writer );
    cvReleaseImage( &logpolar_frame );
    cvReleaseCapture( &capture );
    return 0;
}
void com_update()
{
	float thresholdy, track_rate = 0.2;
	int through;
	cp0c = cpc;
	thresholdy = boundh/2-r1;
	if(com_change_goal && thresholdy>bpc.y) {
		com_change_goal = 0;
		
		thresholdy = bpc.y;
		
		if(bv.y>-150) {
			gpc.x = bpc.x;
			gpc.y = bpc.y;
		}
		else {
			gpc.y = r1+rand()%((int)thresholdy-r1);
			gpc.x = bpc.x + bv.x*(bpc.y-gpc.y)/(-bv.y);
			through = gpc.x/boundw;
			gpc.x = Abs( (through+through%2)*boundw-gpc.x );
			gpc.y -= 15;
		}
	}
	else if(bpc.y>boundh/2 && bv.y>=0) {
		gpc = cvPoint2D32f(boundw/2, criticr);
	}
	cpc.x += track_rate*(gpc.x-cpc.x);
	cpc.y += track_rate*(gpc.y-cpc.y);
}
void setPoints() {
    perspectivecount = 0;
    cvSetMouseCallback("monitor", perspective_mouse);
    while (perspectivecount <= 3) {
        int temp = perspectivecount;
        cvWaitKey(30);
        if (temp < perspectivecount) {
            originalPoints[perspectivecount-1] = cvPoint2D32f(perspectiveX, perspectiveY);
        }
    }
    CvPoint2D32f temp;
    for (int i = 0 ; i < 3 ; ++i) {
        for (int j = 0 ; j < 3-i; ++j) {
            if (originalPoints[j].y > originalPoints[j+1].y) {
                temp = originalPoints[j];
                originalPoints[j]=originalPoints[j+1];
                originalPoints[j+1]=temp;
            }
        }
    }
    if (originalPoints[0].x > originalPoints[1].x) {
        temp = originalPoints[0];
        originalPoints[0]=originalPoints[1];
        originalPoints[1]=temp;
    }
    if (originalPoints[2].x > originalPoints[3].x) {
        temp = originalPoints[2];
        originalPoints[2]=originalPoints[3];
        originalPoints[3]=temp;
    }
}
void easymouse(int event, int x, int y, int flag, void *imgv)
{
	// record mouse position
	mp = cvPoint2D32f(x, y);
	// while mouse click
	if(event==CV_EVENT_LBUTTONDOWN) {
		if(!pauseflag) {
			if(sqr(pbuttonp.x-x)+sqr(pbuttonp.y-y)<sqr(r1+buttonr)) {
				pauseflag = 1;
			}
		}
		else {
			if(mp.x>rbutton1.x && mp.x<rbutton3.x) {
				if(mp.y>rbutton1.y && mp.y<rbutton3.y) {
					restartflag = 1;
					pauseflag   = 0;
				}
				else if(mp.y>ebutton1.y && mp.y<ebutton3.y) {
					escflag   = 1;
					pauseflag = 0;
				}
				else if(mp.y>sbutton1.y && mp.y<sbutton3.y) {
					pauseflag = 0;
				}
			}
		}
	}
}
示例#19
0
main( int argc, char* argv[] ) {

    // Choose a negative floating point number.  Take its absolute value,
    // round it, and then take its ceiling and floor.
    double a = -1.23;
    printf( "CV_IABS(a) = %d\n", CV_IABS(a) );
    printf( "cvRound(a) = %d\n", cvRound(a) );
    printf( "cvCeil(a) = %d\n", cvCeil(a) );
    printf( "cvFloor(a) = %d\n", cvFloor(a) );


    // Generate some random numbers.
    CvRNG rngState = cvRNG(-1);
    for (int i = 0; i < 10; i++) {
        printf( "%u %f\n", cvRandInt( &rngState ),
                           cvRandReal( &rngState ) );
    }

    // Create a floating point CvPoint2D32f and convert it to an integer
    // CvPoint.
    CvPoint2D32f point_float1 = cvPoint2D32f(1.0, 2.0);
    CvPoint point_int1 = cvPointFrom32f( point_float1 );

    // Convert a CvPoint to a CvPoint2D32f.
    CvPoint point_int2 = cvPoint(3, 4);
    CvPoint2D32f point_float2 = cvPointTo32f( point_int2 );

}
示例#20
0
int main( int argc, char** argv )
{
    IplImage* img;
    if( argc < 2 )
    {
        fprintf( stderr, "./a.exe <image_file>" );
        exit( 1 );
    }
    img = cvLoadImage( argv[1] );
    CvMat* affine = cvCreateMat( 2, 3, CV_64FC1 );
    cvCreateAffine( affine, cvRect32f( 0, 0, 1/4.0, 1/4.0, 45 ), cvPoint2D32f( 0, 0 ) );
    IplImage* transformed;

    transformed = cvCreateAffineImage( img, affine, CV_AFFINE_SAME );
    cvNamedWindow( "hoge" );
    cvShowImage( "hoge", transformed );
    cvWaitKey( 0 );
    cvReleaseImage( &transformed );

    CvPoint origin;
    transformed = cvCreateAffineImage( img, affine, CV_AFFINE_FULL, &origin );
    cvNamedWindow( "hoge" );
    cvShowImage( "hoge", transformed );
    printf( "origin.x = %d origin.y = %d\n", origin.x, origin.y );
    cvWaitKey( 0 );
    cvReleaseImage( &transformed );

    IplImage *mask = cvCreateAffineMask( img, affine, CV_AFFINE_FULL );
    cvNamedWindow( "hoge" );
    //cvShowImage( "hoge", mask ); // 1 is 1/255...
    //cvWaitKey( 0 );
    cvReleaseImage( &mask );

    cvReleaseMat( &affine );
}
示例#21
0
//! Find homography between matched points and translate src_corners to dst_corners
int translateCorners(IpPairVec &matches, const CvPoint src_corners[4], CvPoint dst_corners[4])
{
#ifndef LINUX
  double h[9];
  cv::Mat _h = cv::Mat(3, 3, CV_64F, h);
  std::vector<CvPoint2D32f> pt1, pt2;
  cv::Mat _pt1, _pt2;
  
  int n = (int)matches.size();
  if( n < 4 ) return 0;

  // Set vectors to correct size
  pt1.resize(n);
  pt2.resize(n);

  // Copy Ipoints from match vector into cvPoint vectors
  for(int i = 0; i < n; i++ )
  {
    pt1[i] = cvPoint2D32f(matches[i].second.x, matches[i].second.y);
    pt2[i] = cvPoint2D32f(matches[i].first.x, matches[i].first.y);
  }
  _pt1 = cv::Mat(1, n, CV_32FC2, &pt1[0] );
  _pt2 = cv::Mat(1, n, CV_32FC2, &pt2[0] );

  cv::InputArray _pt1_ia(_pt1);
  cv::InputArray _pt2_ia(_pt2);
  cv::OutputArray _h_ia(_h);

  cv::Mat hMat = cv::findHomography(_pt1_ia, _pt2_ia, _h_ia, CV_RANSAC, 5.0);
   // Find the homography (transformation) between the two sets of points
  if(hMat.empty())  // this line requires opencv 1.1
    return 0;

  // Translate src_corners to dst_corners using homography
  for(int i = 0; i < 4; i++ )
  {
    double x = src_corners[i].x, y = src_corners[i].y;
    double Z = 1./(h[6]*x + h[7]*y + h[8]);
    double X = (h[0]*x + h[1]*y + h[2])*Z;
    double Y = (h[3]*x + h[4]*y + h[5])*Z;
    dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
  }
#endif
  return 1;
}
示例#22
0
void run(void)
{
    char win[] = "source";
    int i;
    CvRect rect = { 0, 0, 600, 600 };
    CvMemStorage* storage;
    CvSubdiv2D* subdiv;
    IplImage* img;
    CvScalar active_facet_color, delaunay_color, voronoi_color, bkgnd_color;

    active_facet_color = CV_RGB( 255, 0, 0 );
    delaunay_color  = CV_RGB( 0,0,0);
    voronoi_color = CV_RGB(0, 180, 0);
    bkgnd_color = CV_RGB(255,255,255);

    img = cvCreateImage( cvSize(rect.width,rect.height), 8, 3 );
    cvSet( img, bkgnd_color, 0 );

    cvNamedWindow( win, 1 );

    storage = cvCreateMemStorage(0);
    subdiv = init_delaunay( storage, rect );

    printf("Delaunay triangulation will be build now interactively.\n"
           "To stop the process, press any key\n\n");

    for( i = 0; i < 200; i++ )
    {
        CvPoint2D32f fp = cvPoint2D32f( (float)(rand()%(rect.width-10)+5),
                                        (float)(rand()%(rect.height-10)+5));

        locate_point( subdiv, fp, img, active_facet_color );
        cvShowImage( win, img );

        if( cvWaitKey( 100 ) >= 0 )
            break;

        cvSubdivDelaunay2DInsert( subdiv, fp );
        cvCalcSubdivVoronoi2D( subdiv );
        cvSet( img, bkgnd_color, 0 );
        draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
        cvShowImage( win, img );

        if( cvWaitKey( 100 ) >= 0 )
            break;
    }

    cvSet( img, bkgnd_color, 0 );
    paint_voronoi( subdiv, img );
    cvShowImage( win, img );

    cvWaitKey(0);

    cvReleaseMemStorage( &storage );
    cvReleaseImage(&img);
    cvDestroyWindow( win );
}
示例#23
0
void COpenCVMFCView::OnWarpAffine()
{
	// TODO: Add your command handler code here

	CvPoint2D32f srcTri[3], dstTri[3];
	CvMat* rot_mat  = cvCreateMat(2,3,CV_32FC1);
	CvMat* warp_mat = cvCreateMat(2,3,CV_32FC1);
	IplImage *src=0, *dst=0;

	src = cvCloneImage(workImg);
	cvFlip(src);
	dst = cvCloneImage(src);
	dst->origin = src->origin;
	cvZero(dst);

	//COMPUTE WARP MATRIX
	srcTri[0].x = 0;                          //src Top left
	srcTri[0].y = 0;
	srcTri[1].x = (float) src->width - 1;     //src Top right
	srcTri[1].y = 0;
	srcTri[2].x = 0;                          //src Bottom left
	srcTri[2].y = (float) src->height - 1;
	//- - - - - - - - - - - - - - -//
	dstTri[0].x = (float)(src->width*0.0);    //dst Top left
	dstTri[0].y = (float)(src->height*0.33);
	dstTri[1].x = (float)(src->width*0.85);   //dst Top right
	dstTri[1].y = (float)(src->height*0.25);
	dstTri[2].x = (float)(src->width*0.15);   //dst Bottom left
	dstTri[2].y = (float)(src->height*0.7);
	cvGetAffineTransform(srcTri,dstTri,warp_mat);
	cvWarpAffine(src,dst,warp_mat);
	cvCopy(dst,src);

	//COMPUTE ROTATION MATRIX
	CvPoint2D32f center = cvPoint2D32f(src->width/2,src->height/2);
	double angle = -50.0;
	double scale = 0.6;
	cv2DRotationMatrix(center,angle,scale,rot_mat);
	cvWarpAffine(src,dst,rot_mat);

	//DO THE TRANSFORM:
	cvNamedWindow( "Affine_Transform", 1 );
	cvShowImage( "Affine_Transform", dst );

	m_ImageType = -3;

	cvWaitKey();

	cvDestroyWindow( "Affine_Transform" );
	cvReleaseImage(&src);
	cvReleaseImage(&dst);
	cvReleaseMat(&rot_mat);
	cvReleaseMat(&warp_mat);

	m_ImageType=imageType(workImg);
}
示例#24
0
void cbMouse(int evt, int x, int y, int flags, void* param)
{
	if (CV_EVENT_LBUTTONDOWN&evt){  // point
	//if (CV_EVENT_FLAG_LBUTTON&flags){ // drag
		fprintf(stderr, "(%d,%d)\n", x, y);
		cvCircle((IplImage*)((stMouseParam*)param)->img, cvPoint(x,y),1,CV_BLACK,-1);
		((stMouseParam*)param)->pt = cvPoint2D32f(x,y);

	}
}
示例#25
0
/*
 * call-seq:
 *   gravity_center -> cvpoint2d32f
 *
 * Return gravity center.
 */
VALUE
rb_gravity_center(VALUE self)
{
  CvMoments *moments = CVMOMENTS(self);
  double
    m00 = cvGetSpatialMoment(moments, 0, 0),
    m10 = cvGetSpatialMoment(moments, 1, 0),
    m01 = cvGetSpatialMoment(moments, 0, 1);
  return cCvPoint2D32f::new_object(cvPoint2D32f(m10 / m00, m01 / m00));
}
int Simulator::computeObservations(Mocap_object* mo, Camera* cam,  bool show_image){


  char img_name[100];
  if (show_image){
    sprintf(img_name,"Projection_%i", cam->id);
    cvNamedWindow(img_name,1);
  }

  cam->obs.clear();

  Eigen::Affine3f c2w = cam->pose.inverse();

  for (uint i=0; i<mo->points.size(); ++i){
    if (!mo->point_valid[i]) continue;
    Eigen::Vector3f  c = mo->points[i];

    c = c2w*c;

//    ROS_INFO("pt in cam frame: %f %f %f", c.x(), c.y(), c.z());

    float x_px = c.x()/c.z()*cam->f_x+cam->c_x;
    float y_px = c.y()/c.z()*cam->f_y+cam->c_y;

    // point behind camera or not within field of view
    if (c.z() < 0 || x_px < 0 || y_px < 0 || x_px >= cam->c_width || y_px >= cam->c_height){
      continue;
    }

    cam->obs[i] = cvPoint2D32f(x_px,y_px);

  }

  if (show_image) {
    // print on image
    cvSet(img, cvScalar(0,0,0));

    for (Observations::iterator it = cam->obs.begin(); it!=cam->obs.end(); ++it){
      cvCircle(img, cvPoint(it->second.x,it->second.y),3, CV_RGB(0,255,0),2);
    }

//    for (uint i=0; i<prj.size(); ++i){
//      float x = prj[i].x; float y = prj[i].y;
//      float x_ = prj[(i+1)%prj.size()].x; float y_ = prj[(i+1)%prj.size()].y;
//      cvLine(img,cvPoint(x,y),cvPoint(x_,y_), CV_RGB(255,0,0),2);
//    }
    cvShowImage(img_name, img);
    cvWaitKey(5);
  }


  return cam->obs.size();

}
示例#27
0
void FrameProcessor::processStereoFrame(const Mat & frameL, const Mat & frameR, Mat & pointCloud){   

	Mat disparityMap, disparityMapNormalized;
    Mat frameTransposedL, frameTransposedR, frameRemappedL, frameRemappedR, frameGrayscaleL, frameGrayscaleR;     

    Mat rotMatL = cvCreateMat(2,3,CV_32FC1);
    Mat rotMatR = cvCreateMat(2,3,CV_32FC1);
    // Compute rotation matrix
    CvPoint2D32f centerL = cvPoint2D32f( frameL.cols/2, frameL.rows/2 );
    rotMatL = getRotationMatrix2D( centerL, 90, 1 );

    CvPoint2D32f centerR = cvPoint2D32f( frameR.cols/2, frameR.rows/2 );
    rotMatR = getRotationMatrix2D( centerR, 90, 1 );

    warpAffine(frameL, frameTransposedL, rotMatL, frameL.size() );
    warpAffine(frameR, frameTransposedR, rotMatR, frameR.size() );




    //transpose(frameL, frameTransposedL);
    //transpose(frameR, frameTransposedR);


    remap(frameTransposedL, frameRemappedL, rmap[0][0], rmap[0][1], CV_INTER_LINEAR);
    remap(frameTransposedR, frameRemappedR, rmap[1][0], rmap[1][1], CV_INTER_LINEAR);

    //imshow("LiveFeedL",frameTransposedL);
   //imshow("LiveFeedR",frameTransposedR);

    cvtColor(frameRemappedL, frameGrayscaleL, CV_RGB2GRAY);
    cvtColor(frameRemappedR, frameGrayscaleR, CV_RGB2GRAY);


    BlockMatcher( frameGrayscaleL, frameGrayscaleR, disparityMap, CV_32F);
    normalize(disparityMap, disparityMapNormalized, 0, 255, CV_MINMAX, CV_8U);

    imshow("Disparity", disparityMapNormalized);

    reprojectImageTo3D( disparityMap, pointCloud, Q, false);
}
示例#28
0
CHandPoint CTransformImage::findFinger()
{
	findCenter();

	if(!m_transImage)
		return CHandPoint();

	int width   = m_transImage->width;
	int height  = 180;
	int moveX   = 0,     moveY  = height;
	BOOL bClick = FALSE, bWheel = FALSE;
	unsigned char ch;
	for(int y = m_center.y; y < height; ++y)
	{
		for(int x = m_center.x-100; x < m_center.x+50; ++x)
		{
			if(x < 0 || x >= width || y < 0 || y >= height)
				continue;

			ch = m_transImage->imageData[y*width+x];
			if(ch == 255)
			{
				moveX = x, moveY = y;
				if(x < m_center.x-50)
					bClick = TRUE;
				break;
			}

			// 			CvBox2D box;
			// 			box.center = cvPoint2D32f(x, y);
			// 			box.size   = cvSize2D32f(2, 2);
			// 			box.angle  = 90;
			// 			cvEllipseBox(m_image, box, CV_RGB(0,255,255), 1);
		}

		if(moveY != y)
			break;
	}

	// 좌표가 조금씩 흔들리는 것을 방지하기 위한 부분
	if(abs(m_pastPt.x-moveX) < 2 || abs(m_pastPt.y-moveY) < 2)
		moveX = m_pastPt.x, moveY = m_pastPt.y;

	m_pastPt.x = moveX, m_pastPt.y = moveY;

	CvBox2D box;
	box.center = cvPoint2D32f(moveX, moveY);
	box.size   = cvSize2D32f(2, 2);
	box.angle  = 90;
	cvEllipseBox(m_image, box, CV_RGB(0,255,0), 1);

	return CHandPoint(moveX, height-moveY, bClick, bWheel);
}
示例#29
0
/****************************************
 * returns an align face in an image
 * returned img : 128x128 (Gray-scale)
 ****************************************/
Mat 
FaceVue::align_Face(const Mat &frame, const CvRect &faceROI)
{
	Mat warp_dst = Mat::zeros (128, 128, CV_8UC1);
	is_aligned = false;
	Mat input;
	cv::cvtColor (frame, input, CV_RGB2GRAY);

	Point2f* pt1 = new Point2f[3];
	Point2f* pt2 = new Point2f[3];
	pt2[0] = cvPoint2D32f(27,46);
	pt2[1] = cvPoint2D32f(101,46);
	pt2[2] = cvPoint2D32f(64,101);

	pt1[0] = cvPoint2D32f(target_Face->right_eye_x - faceROI.x,target_Face->right_eye_y - faceROI.y);
	pt1[1] = cvPoint2D32f(target_Face->left_eye_x - faceROI.x,target_Face->left_eye_y - faceROI.y);
	pt1[2] = cvPoint2D32f(target_Face->mouth_x - faceROI.x,target_Face->mouth_y - faceROI.y);

	if(pt1[0].x >= 0 && pt1[0].y>=0 &&
			pt1[1].x >= 0 && pt1[1].y>=0 &&
			pt1[2].x >= 0 && pt1[2].y>=0 &&
			pt1[0].x < frame.size().width && pt1[0].y < frame.size().height &&
			pt1[1].x < frame.size().width && pt1[1].y < frame.size().height&&
			pt1[2].x < frame.size().width && pt1[2].y < frame.size().height)
	{
		is_aligned = true;
		Mat warp_mat = cv::getAffineTransform (pt1, pt2);
		Mat face = input (faceROI);
		cv::warpAffine(face, warp_dst, warp_mat, warp_dst.size());
	}

	delete[] pt1;
	delete[] pt2;
	return warp_dst;
}
示例#30
0
void HarrisBuffer::OpticalFlowFromLK()
{
  //cvCalcOpticalFlowLK(prevgray, gray, cvSize(15,15), OFx, OFy);
  //cvCalcOpticalFlowHS(prevgray, gray, 0, OFx, OFy, 0.1, cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,100,1e5));

  float subf=5;
  int xsz=gray->width, ysz=gray->height;
  int pxn=int(xsz/subf), pyn=int(ysz/subf);
  CvPoint2D32f *p1 = new CvPoint2D32f[pxn*pyn];
  CvPoint2D32f *p2 = new CvPoint2D32f[pxn*pyn];
  for (int i=0; i<pyn; i++)
    for (int j=0; j<pxn; j++){
      p1[i*pxn+j]=cvPoint2D32f(j*subf,i*subf);
      p2[i*pxn+j]=cvPoint2D32f(j*subf,i*subf);
    }

    char *sts = new char[pxn*pyn];
    CvTermCriteria termination = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 100, 1e5);
    cvCalcOpticalFlowPyrLK(prevgray, gray, NULL, NULL, 

      p1, p2, int(pxn*pyn), cvSize(int(10),int(10)),

      3,sts,NULL,termination,CV_LKFLOW_INITIAL_GUESSES);

    IplImage* OFxsub= cvCreateImage(cvSize(pxn,pyn),IMGTYPE,1);
    IplImage* OFysub= cvCreateImage(cvSize(pxn,pyn),IMGTYPE,1);
    IMG_ELEM_TYPE *ptrOFxsub=(IMG_ELEM_TYPE*)cvPtr2D(OFxsub,0,0);
    IMG_ELEM_TYPE *ptrOFysub=(IMG_ELEM_TYPE*)cvPtr2D(OFysub,0,0);
    for (int i=0; i<pyn; i++)
      for (int j=0; j<pxn; j++){
        ptrOFxsub[i*pxn+j]=p2[i*pxn+j].x-p1[i*pxn+j].x;
        ptrOFysub[i*pxn+j]=p2[i*pxn+j].y-p1[i*pxn+j].y;

      }
      cvResize(OFxsub,OFx,CV_INTER_NN);
      cvResize(OFysub,OFy,CV_INTER_NN);
      cvReleaseImage(&OFxsub);
      cvReleaseImage(&OFysub);
}