Пример #1
0
  void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode, CvFont *font)
  {
    CV_FUNCNAME("cvRenderTracks");
    __CV_BEGIN__;

    CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));

    if ((mode&CV_TRACK_RENDER_ID)&&(!font))
    {
      if (!defaultFont)
      {
	font = defaultFont = new CvFont;
	cvInitFont(font, CV_FONT_HERSHEY_DUPLEX, 0.5, 0.5, 0, 1);
	// Other fonts:
	//   CV_FONT_HERSHEY_SIMPLEX, CV_FONT_HERSHEY_PLAIN,
	//   CV_FONT_HERSHEY_DUPLEX, CV_FONT_HERSHEY_COMPLEX,
	//   CV_FONT_HERSHEY_TRIPLEX, CV_FONT_HERSHEY_COMPLEX_SMALL,
	//   CV_FONT_HERSHEY_SCRIPT_SIMPLEX, CV_FONT_HERSHEY_SCRIPT_COMPLEX
      }
      else
	font = defaultFont;
    }

    if (mode)
    {
      for (CvTracks::const_iterator it=tracks.begin(); it!=tracks.end(); ++it)
      {
	if (mode&CV_TRACK_RENDER_ID)
	  if (!it->second->inactive)
	  {
	    stringstream buffer;
	    buffer << it->first;
	    cvPutText(imgDest, buffer.str().c_str(), cvPoint((int)it->second->centroid.x, (int)it->second->centroid.y), font, CV_RGB(0.,255.,0.));
	  }

	if (mode&CV_TRACK_RENDER_BOUNDING_BOX)
	  if (it->second->inactive)
	    cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 50.));
	  else
	    cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 255.));

	if (mode&CV_TRACK_RENDER_TO_LOG)
	{
	  clog << "Track " << it->second->id << endl;
	  if (it->second->inactive)
	    clog << " - Inactive for " << it->second->inactive << " frames" << endl;
	  else
	    clog << " - Associated with blob " << it->second->label << endl;
	  clog << " - Lifetime " << it->second->lifetime << endl;
	  clog << " - Active " << it->second->active << endl;
	  clog << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
	  clog << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
	  clog << endl;
	}

	if (mode&CV_TRACK_RENDER_TO_STD)
	{
	  cout << "Track " << it->second->id << endl;
	  if (it->second->inactive)
	    cout << " - Inactive for " << it->second->inactive << " frames" << endl;
	  else
	    cout << " - Associated with blobs " << it->second->label << endl;
	  cout << " - Lifetime " << it->second->lifetime << endl;
	  cout << " - Active " << it->second->active << endl;
	  cout << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
	  cout << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
	  cout << endl;
	}
      }
    }

    __CV_END__;
  }
Пример #2
0
// Runs the dot detector and sends detected dots to server on port TODO Implement headless. Needs more config options and/or possibly a config file first though
int run( const char *serverAddress, const int serverPort, char headless ) {
    char calibrate_exposure = 0, show = ~0, flip = 0, vflip = 0, done = 0, warp = 0; //"Boolean" values used in this loop
    char noiceReduction = 2; //Small counter, so char is still ok.
    int i, sockfd; //Generic counter
    int dp = 0, minDist = 29, param1 = 0, param2 = 5; // Configuration variables for circle detection 
    int minDotRadius = 1;
    int detected_dots; //Detected dot counter
    int returnValue = EXIT_SUCCESS;
    int captureControl; //File descriptor for low-level camera controls
    int currentExposure = 150;
    int maxExposure = 1250; //Maximum exposure supported by the camera TODO Get this from the actual camera
    Color min = { 0, 70, 0, 0 }; //Minimum color to detect
    Color max = { 255, 255, 255, 0 }; //Maximum color to detect
    CvScalar colorWhite = cvScalar( WHITE ); //Color to draw detected dots on black and white surface
    BoundingBox DD_mask; //The box indicating what should and what should not be considered for dot search
    BoundingBox DD_transform; //The box indicating the plane we are looking at( and as such is the plane we would transform from )
    BoundingBox DD_transform_to; //The plane we are transforming to
    CvCapture *capture = NULL; //The camera
    CvMemStorage *storage; //Low level memory area used for dynamic structures in OpenCV
    CvSeq *seq; //Sequence to store detected dots in
    IplImage *grabbedImage = NULL; //Raw image from camera( plus some overlay in the end )
    IplImage *imgThreshold = NULL; //Image with detected dots
    IplImage *mask = NULL; //Mask to be able to remove uninteresting areas
    IplImage *coloredMask = NULL; //Mask to be able to indicate above mask on output image
    CvFont font; //Font for drawing text on images
    SendQueue *queue; //Head of the linked list that is the send queue
    char strbuf[255]; //Generic buffer for text formatting( with sprintf())
    struct timeval oldTime, time, diff; //Structs for measuring FPS
    float lastKnownFPS = 0; //Calculated FPS
    CvMat* pointRealMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* pointTransMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* transMat = cvCreateMat( 3,3,CV_32FC1 ); //Translation matrix for transforming input to a straight rectangle
    ClickParams clickParams = { TOP_LEFT, NULL, &DD_transform_to, transMat }; //Struct holding data needed by mouse-click callback function

    // Set up network
    sockfd = initNetwork( serverAddress, serverPort );
    if( sockfd == -1 ) {
        fprintf( stderr, "ERROR: initNetwork returned -1\n");
        return EXIT_FAILURE;
    }
    queue = initSendQueue();

    if( openCamera( &capture, &captureControl ) == 0 ) {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        return EXIT_FAILURE;
    }

    if( ( disableAutoExposure( captureControl ) ) == -1 ) {
        fprintf( stderr, "ERROR: Cannot disable auto exposure \n" );
        //return EXIT_FAILURE;
    }

    if( ( updateAbsoluteExposure( captureControl, currentExposure ) ) == 0 ) {
        fprintf( stderr, "ERROR: Cannot set exposure\n");
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( imagewindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the configuration sliders and the detection frame TODO This is kind of a hack. Make a better solution
    cvNamedWindow( configwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the transformed image. Handy to see how the dots are translated, but not needed for functionality
    if( warp ) cvNamedWindow( warpwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create sliders to adjust the lower color boundry
    cvCreateTrackbar( red_lable  , configwindowname, &min.red,   255, NULL );
    cvCreateTrackbar( green_lable, configwindowname, &min.green, 255, NULL );
    cvCreateTrackbar( blue_lable , configwindowname, &min.blue,  255, NULL );

    //Create sliters for the contour based dot detection
    cvCreateTrackbar( min_area_lable, configwindowname, &minDotRadius,255, NULL );

    /* Slider for manual exposure setting */
    cvCreateTrackbar( exposure_lable, configwindowname, &currentExposure, maxExposure, NULL );

    //Create the memory storage
    storage = cvCreateMemStorage( 0 );

    // void cvInitFont( font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8 );

    // Grab an initial image to be able to fetch image size before the main loop.
    grabbedImage = cvQueryFrame( capture );

    //Move the two windows so both are visible at the same time
    cvMoveWindow( imagewindowname, 0, 10 );
    cvMoveWindow( configwindowname, grabbedImage->width+2, 10 );

    //TODO Move these three inits to a function
    // Set masking defaults TODO load from file? Specify file for this file loading?
    DD_mask.topLeft.x = 0;  
    DD_mask.topLeft.y = 0;

    DD_mask.topRight.x = grabbedImage->width-1;
    DD_mask.topRight.y = 0;

    DD_mask.bottomLeft.x = 0;
    DD_mask.bottomLeft.y = grabbedImage->height-1;

    DD_mask.bottomRight.x = grabbedImage->width-1;
    DD_mask.bottomRight.y = grabbedImage->height-1;

    // Set transformation defaults TODO load from file? Specify file for this file loading?
    DD_transform.topLeft.x = 0;  
    DD_transform.topLeft.y = 0;

    DD_transform.topRight.x = grabbedImage->width-1;
    DD_transform.topRight.y = 0;

    DD_transform.bottomLeft.x = 0;
    DD_transform.bottomLeft.y = grabbedImage->height-1;

    DD_transform.bottomRight.x = grabbedImage->width-1;
    DD_transform.bottomRight.y = grabbedImage->height-1;

    // Set the transformation destination
    DD_transform_to.topLeft.x = 0;  
    DD_transform_to.topLeft.y = 0;

    DD_transform_to.topRight.x = grabbedImage->width-1;
    DD_transform_to.topRight.y = 0;

    DD_transform_to.bottomLeft.x = 0;
    DD_transform_to.bottomLeft.y = grabbedImage->height-1;

    DD_transform_to.bottomRight.x = grabbedImage->width-1;
    DD_transform_to.bottomRight.y = grabbedImage->height-1;

    calculateTransformationMatrix( &DD_transform, &DD_transform_to, transMat );

    // Set callback function for mouse clicks
    cvSetMouseCallback( imagewindowname, calibrateClick, ( void* ) &clickParams );

    gettimeofday( &oldTime, NULL );

    // Main loop. Grabbs an image from cam, detects dots, sends dots,and prints dots to images and shows to user
    while( !done ) {

        //PROFILING_PRO_STAMP(); //Uncomment this and the one in the end of the while-loop, and comment all other PROFILING_* to profile main-loop

        // ------ Common actions
        cvClearMemStorage( storage );
        detected_dots = 0;

        //Grab a fram from the camera
        PROFILING_PRO_STAMP();
        grabbedImage = cvQueryFrame( capture );
        PROFILING_POST_STAMP( "cvQueryFrame");

        if( grabbedImage == NULL ) {
            fprintf( stderr, "ERROR: frame is null...\n" );
            getchar();
            returnValue = EXIT_FAILURE;
            break;
        }

        //Flip images to act as a mirror. 
        if( show && flip ) {
            cvFlip( grabbedImage, grabbedImage, 1 );
        }
        if( show && vflip ) {
            cvFlip( grabbedImage, grabbedImage, 0 );
        }

        // ------ State based actions
        switch( state ) {
            case GRAB_DOTS:

                //Create detection image
                imgThreshold = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvInRangeS( grabbedImage, cvScalar( DD_COLOR( min )), cvScalar( DD_COLOR( max )), imgThreshold );

                //Mask away anything not in our calibration area
                mask = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvZero( mask );
                cvFillConvexPoly( mask, ( CvPoint* ) &DD_mask, 4, cvScalar( WHITE ), 1, 0 );
                cvAnd( imgThreshold, mask, imgThreshold, NULL );

                // Invert mask, increase the number of channels in it and overlay on grabbedImage //TODO Tint the mask red before overlaying
                cvNot( mask, mask );
                coloredMask = cvCreateImage( cvGetSize( grabbedImage ), grabbedImage->depth, grabbedImage->nChannels );
                cvCvtColor( mask, coloredMask, CV_GRAY2BGR );
                cvAddWeighted( grabbedImage, 0.95, coloredMask, 0.05, 0.0, grabbedImage );


                // Reduce noise. 
                // Erode is kind of floor() of pixels, dilate is kind of ceil()
                // I'm not sure which gives the best result.
                switch( noiceReduction ) {
                    case 0: break; //No noice reduction at all
                    case 1: cvErode( imgThreshold, imgThreshold, NULL, 2 ); break;
                    case 2: cvDilate( imgThreshold, imgThreshold, NULL, 2 ); break;
                }

                // Warp the warp-image. We are reusing the coloredMask variable to save some space
                PROFILING_PRO_STAMP();
                if( show && warp ) cvWarpPerspective( grabbedImage, coloredMask, transMat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ));
                PROFILING_POST_STAMP( "Warping perspective" );


                // Find all dots in the image
                PROFILING_PRO_STAMP();

                // Clear old data from seq
                seq = 0;

                // Find the dots
                cvFindContours(
                        imgThreshold,
                        storage,
                        &seq,
                        sizeof( CvContour ),
                        CV_RETR_LIST,
                        CV_CHAIN_APPROX_SIMPLE,
                        cvPoint( 0,0 )
                        );
                // cvFindContours destroys the original image, so we wipe it here
                // and then repaints the detected dots later
                cvZero( imgThreshold );

                PROFILING_POST_STAMP( "Dot detection" );

                //Process all detected dots
                PROFILING_PRO_STAMP();
                for( ; seq != 0; seq = seq->h_next ) {

                    // Calculate radius of the detected contour
                    CvRect rect =( ( CvContour * )seq )->rect;
                    float relCenterX = rect.width / 2;
                    float relCenterY = rect.height / 2;

                    // Make sure the dot is big enough
                    if( relCenterX < minDotRadius || relCenterY < minDotRadius ) {
                        continue;
                    }

                    // Note that we have found another dot
                    ++detected_dots;

                    // Transform the detected dot according to transformation matrix.
                    float absCenter[] = { rect.x + relCenterX, rect.y + relCenterY };
                    pointRealMat->data.fl = absCenter;
                    cvPerspectiveTransform( pointRealMat, pointTransMat, transMat );

                    // Draw the detected contour back to imgThreshold
                    // Draw the detected dot both to real image and to warped( if warp is active )
                    if( show ) {
                        cvDrawContours( imgThreshold, seq, colorWhite, colorWhite, -1, CV_FILLED, 8, cvPoint( 0,0 ) );
                        drawCircle( absCenter[0], absCenter[1], ( relCenterX + relCenterY ) / 2, grabbedImage );
                        if( warp ) {
                            drawCircle( pointTransMat->data.fl[0], pointTransMat->data.fl[1], ( relCenterX + relCenterY ) / 2, coloredMask );
                        }
                    }

                    // Add detected dot to to send queue
                    addPointToSendQueue( pointTransMat->data.fl, queue ); 
                }

                PROFILING_POST_STAMP("Painting dots");

                //Calculate framerate
                gettimeofday( &time, NULL );
                timeval_subtract( &diff, &time, &oldTime );
                lastKnownFPS = lastKnownFPS * 0.7 + ( 1000000.0 / diff.tv_usec ) * 0.3; //We naïvly assume we have more then 1 fps
                oldTime = time;

                //Send the dots detected this frame to the server
                PROFILING_PRO_STAMP();
                sendQueue( sockfd, queue );
                clearSendQueue( queue );
                PROFILING_POST_STAMP( "Sending dots" );

                /* If calibrating, do the calibration */
                if( calibrate_exposure ) {
                    int ret;
                    ret = calibrateExposureLow( captureControl, detected_dots, &currentExposure, DD_MAX_EXPOSURE, lastKnownFPS );
                    switch( ret ) {
                        case 0: // We are done. Let's leave calibration mode
                            calibrate_exposure = 0;
                            printf( "done\n" );
                            break;

                        case -1: // We hit the upper limit with no detected dots
                            fprintf( stderr, "Reached upper limit (%d). Aborting!\n", DD_MAX_EXPOSURE );
                            calibrate_exposure = 0;
                            break;

                        case -2: // We hit lower limit with more then one dot detected
                            fprintf( stderr, "Too bright. More then one dot found even with minimal exposure. Aborting!\n");
                            calibrate_exposure = 0;
                            break;

                        case -3: //No conclusive results.
                            fprintf( stderr, "No conclusive results. Giving up\n" );
                            calibrate_exposure = 0;
                            break;
                    }
                }

                break; //End of GRAB_DOTS

            case SELECT_TRANSFORM:
                //Falling through here. Poor man's multi-case clause. Not putting this in default as we might
                //want to do different things in these two some day.
            case SELECT_MASK:
                snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
                cvDisplayOverlay( imagewindowname, strbuf, 5 );
                break; //End of SELECT_MASK and SELECT_TRANSFORM
        }

        // Paint the corners of the detecting area and the calibration area
        paintOverlayPoints( grabbedImage, &DD_transform );

        //Print some statistics to the image
        if( show ) {
            snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
            snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
            cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
        }

        //Show images 
        PROFILING_PRO_STAMP();
        if( show ) {
            cvShowImage( configwindowname, imgThreshold );
            cvShowImage( imagewindowname, grabbedImage );
            if( warp ) cvShowImage( warpwindowname, coloredMask );
        }
        PROFILING_POST_STAMP("Showing images");

        //Release the temporary images
        cvReleaseImage( &imgThreshold );
        cvReleaseImage( &mask );
        cvReleaseImage( &coloredMask );

        /* Update exposure if needed */
        updateAbsoluteExposure( captureControl, currentExposure );
        cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );

        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
        //remove higher bits using AND operator
        i = ( cvWaitKey( 10 ) & 0xff );
        switch( i ) {
            case 'g': 
                makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
                updateAbsoluteExposure( captureControl, currentExposure+1 );
                break;

            case 'e': 
                toggleCalibrationMode( &calibrate_exposure, &currentExposure );
                break; /* Toggles calibration mode */

            case 'c':
                openCamera( &capture, &captureControl );
                break;

            case 's': 
                show = ~show;
                break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze

            case 'm': 
                state = SELECT_MASK;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_mask;
                break; //Starts selection of masking area. Will return to dot detection once all four points are set

            case 't':
                state = SELECT_TRANSFORM;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_transform;
                break; //Starts selection of the transformation area. Returns to dot detection when done.

            case 'f':
                flip = ~flip;
                break; //Toggles horizontal flipping of the image
            case 'v':
                vflip = ~vflip;
                break; //Toggles vertical flipping of the image

            case 'w':
                warp = ~warp;
                toggleWarpOutput( warp );
                break; //Toggles showing the warped image

            case 'n':
                noiceReduction = ( noiceReduction + 1 ) % 3;
                break; //Cycles noice reduction algorithm

            case 'q': //falling through here to quit

            case  27: 
                done = 1;
                break; //ESC. Kills the whole thing( in a nice and controlled manner )
        }
        fflush( stdout ); //Make sure everything in the buffer is printed before we go on

        //PROFILING_POST_STAMP("Main loop");
    } //End of main while-loop

    // Release the capture device and do some housekeeping
    cvReleaseImage( &grabbedImage );
    cvReleaseCapture( &capture );
    cvReleaseMemStorage( &storage );
    cvDestroyWindow( imagewindowname );
    cvDestroyWindow( configwindowname );
    if( warp ) cvDestroyWindow( warpwindowname ); //If now warp it is already destroyed
    destroySendQueue( queue );
    close( sockfd );
    close( captureControl );
    return returnValue;
}
//将所有模块连接使用的函数
//根据这个来修改自己的
 int RunBlobTrackingAuto2323(CvCapture* pCap, CvBlobTrackerAuto* pTracker, char* fgavi_name , char* btavi_name )
{
	int                     OneFrameProcess = 0;
	int                     key;
	int                     FrameNum = 0;
	CvVideoWriter*          pFGAvi = NULL;
	CvVideoWriter*          pBTAvi = NULL;

	/* Main loop: */
	/*OneFrameProcess =0 时,为waitkey(0) 不等待了,返回-1,waitkey(1)表示等1ms,如果按键了返回按键,超时返回-1*/
	for (FrameNum = 0; pCap && (key = cvWaitKey(OneFrameProcess ? 0 : 1)) != 27;//按下esc键整个程序结束。 
		FrameNum++)
	{   /* Main loop: */// 整个程序的主循环。这个循环终止,意味着这个程序结束。
		IplImage*   pImg = NULL;
		IplImage*   pMask = NULL;

		if (key != -1)
		{
			OneFrameProcess = 1;
			if (key == 'r')OneFrameProcess = 0;
		}

		pImg = cvQueryFrame(pCap);//读取视频
		if (pImg == NULL) break;


		/* Process: */
		pTracker->Process(pImg, pMask);//处理图像。这个函数应该执行完了所有的处理过程。

		if (fgavi_name)//参数设置了fg前景要保存的文件名
		if (pTracker->GetFGMask())//前景的图像的mask存在的话,保存前景。画出团块 
		{   /* Debug FG: */
			IplImage*           pFG = pTracker->GetFGMask();//得到前景的mask
			CvSize              S = cvSize(pFG->width, pFG->height);
			static IplImage*    pI = NULL;

			if (pI == NULL)pI = cvCreateImage(S, pFG->depth, 3);
			cvCvtColor(pFG, pI, CV_GRAY2BGR);

			if (fgavi_name)//保存前景到视频
			{   /* Save fg to avi file: */
				if (pFGAvi == NULL)
				{
					pFGAvi = cvCreateVideoWriter(
						fgavi_name,
						CV_FOURCC('x', 'v', 'i', 'd'),
						25,
						S);
				}
				cvWriteFrame(pFGAvi, pI);//写入一张图
			}

			//画出团块的椭圆
			if (pTracker->GetBlobNum() > 0) //pTracker找到了blob
			{   /* Draw detected blobs: */
				int i;
				for (i = pTracker->GetBlobNum(); i > 0; i--)
				{
					CvBlob* pB = pTracker->GetBlob(i - 1);//得到第i-1个blob
					CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB));//团块中心
					//这个宏竟然是个强制转换得来的。见下行。
					//#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y)
					CvSize  s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB))), MAX(1, cvRound(CV_BLOB_RY(pB))));
					//通过宏 获得团块的w 和h 的size
					int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB)));
					cvEllipse(pI,//在图中,对团块画圆
						p,
						s,
						0, 0, 360,
						CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * c) / 255));
				}   /* Next blob: */;
			}
			cvNamedWindow("FG", 0);
			cvShowImage("FG", pI);
		}   /* Debug FG. *///如果要保存结果,对前景保存,画出团块


		//在原图上:找到的blob附近写下id
		/* Draw debug info: */
		if (pImg)//原始的每帧图像。
		{   /* Draw all information about test sequence: */
			char        str[1024];
			int         line_type = CV_AA;   // Change it to 8 to see non-antialiased graphics.
			CvFont      font;
			int         i;
			IplImage*   pI = cvCloneImage(pImg);

			cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, line_type);

			for (i = pTracker->GetBlobNum(); i > 0; i--)
			{
				CvSize  TextSize;
				CvBlob* pB = pTracker->GetBlob(i - 1);
				CvPoint p = cvPoint(cvRound(pB->x * 256), cvRound(pB->y * 256));
				CvSize  s = cvSize(MAX(1, cvRound(CV_BLOB_RX(pB) * 256)), MAX(1, cvRound(CV_BLOB_RY(pB) * 256)));
				int c = cvRound(255 * pTracker->GetState(CV_BLOB_ID(pB)));


				//画团块到原始图像上
				cvEllipse(pI,
					p,
					s,
					0, 0, 360,
					CV_RGB(c, 255 - c, 0), cvRound(1 + (3 * 0) / 255), CV_AA, 8);


				//下面代码的大概意思就是在找到的blob附近写下id
				p.x >>= 8;
				p.y >>= 8;
				s.width >>= 8;
				s.height >>= 8;
				sprintf(str, "%03d", CV_BLOB_ID(pB));
				cvGetTextSize(str, &font, &TextSize, NULL);
				p.y -= s.height;
				cvPutText(pI, str, p, &font, CV_RGB(0, 255, 255));
				{
					const char* pS = pTracker->GetStateDesc(CV_BLOB_ID(pB));

					if (pS)
					{
						char* pStr = MY_STRDUP(pS);
						char* pStrFree = pStr;

						while (pStr && strlen(pStr) > 0)
						{
							char* str_next = strchr(pStr, '\n');

							if (str_next)
							{
								str_next[0] = 0;
								str_next++;
							}

							p.y += TextSize.height + 1;
							cvPutText(pI, pStr, p, &font, CV_RGB(0, 255, 255));
							pStr = str_next;
						}
						free(pStrFree);
					}
				}

			}   /* Next blob. */;

			cvNamedWindow("Tracking", 0);
			cvShowImage("Tracking", pI);

			if (btavi_name && pI)//如果这一帧存在且,你想把图像存起来,就是传过来的参数不为空例如  btavi_name=“1.avi"   就能存起来了。
			{   /* Save to avi file: */
				CvSize      S = cvSize(pI->width, pI->height);
				if (pBTAvi == NULL)
				{
					pBTAvi = cvCreateVideoWriter(
						btavi_name,
						CV_FOURCC('x', 'v', 'i', 'd'),
						25,
						S);
				}
				cvWriteFrame(pBTAvi, pI);
			}

			cvReleaseImage(&pI);
		}   /* Draw all information about test sequence. */
	}   /*  Main loop. */

	if (pFGAvi)cvReleaseVideoWriter(&pFGAvi);
	if (pBTAvi)cvReleaseVideoWriter(&pBTAvi);
	return 0;
}   /* RunBlobTrackingAuto */
Пример #4
0
void MyVideo::drawText(IplImage* p)
{
    CvFont* f = (CvFont*)malloc(sizeof(CvFont));
    cvInitFont(f, CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, CV_AA);
    cvPutText(p, "3130000059 WangYi", cvPoint(20,100), f, CV_RGB(255,0,0));
}
Пример #5
0
int main(int argc, char *argv[ ]){

	RASPIVID_CONFIG * config = (RASPIVID_CONFIG*)malloc(sizeof(RASPIVID_CONFIG));
	
	config->width=320;
	config->height=240;
	config->bitrate=0;	// zero: leave as default
	config->framerate=0;
	config->monochrome=0;

	int opt;

	while ((opt = getopt(argc, argv, "lxm")) != -1)
	{
		switch (opt)
		{
			case 'l':					// large
				config->width = 640;
				config->height = 480;
				break;
			case 'x':	   				// extra large
				config->width = 960;
				config->height = 720;
				break;
			case 'm':					// monochrome
				config->monochrome = 1;
				break;
			default:
				fprintf(stderr, "Usage: %s [-x] [-l] [-m] \n", argv[0], opt);
				fprintf(stderr, "-l: Large mode\n");
				fprintf(stderr, "-x: Extra large mode\n");
				fprintf(stderr, "-l: Monochrome mode\n");
				exit(EXIT_FAILURE);
		}
	}

	/*
	Could also use hard coded defaults method: raspiCamCvCreateCameraCapture(0)
	*/
    RaspiCamCvCapture * capture = (RaspiCamCvCapture *) raspiCamCvCreateCameraCapture2(0, config); 
	free(config);
	
	CvFont font;
	double hScale=0.4;
	double vScale=0.4;
	int    lineWidth=1;

	cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale, vScale, 0, lineWidth, 8);

	cvNamedWindow("RaspiCamTest", 1);
	int exit =0;
	do {
		IplImage* image = raspiCamCvQueryFrame(capture);
		
		char text[200];
		sprintf(
			text
			, "w=%.0f h=%.0f fps=%.0f bitrate=%.0f monochrome=%.0f"
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FRAME_WIDTH)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FRAME_HEIGHT)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FPS)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_BITRATE)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_MONOCHROME)
		);
		cvPutText (image, text, cvPoint(05, 40), &font, cvScalar(255, 255, 0, 0));
		
		sprintf(text, "Press ESC to exit");
		cvPutText (image, text, cvPoint(05, 80), &font, cvScalar(255, 255, 0, 0));
		
		cvShowImage("RaspiCamTest", image);
		
		char key = cvWaitKey(10);
		
		switch(key)	
		{
			case 27:		// Esc to exit
				exit = 1;
				break;
			case 60:		// < (less than)
				raspiCamCvSetCaptureProperty(capture, RPI_CAP_PROP_FPS, 25);	// Currently NOOP
				break;
			case 62:		// > (greater than)
				raspiCamCvSetCaptureProperty(capture, RPI_CAP_PROP_FPS, 30);	// Currently NOOP
				break;
		}
		
	} while (!exit);

	cvDestroyWindow("RaspiCamTest");
	raspiCamCvReleaseCapture(&capture);
	return 0;
}
Пример #6
0
    /*************************************************************************
    Process
        Process the frames in a video one by one.
            1) FG detection
            2) Blob Detection
            3) Blob Tracking and Association
            4) Blob Post Processing
            5) Blob Analysis
            6) Store the results
    Exceptions
        None
    *************************************************************************/
    void Camera::Process(const int startFrameIndex, const int endFrameIndex)
    {
        ASSERT_TRUE ( m_initializied );
        ASSERT_TRUE ( m_pTracker != NULL );

        InitializeDisplayWindows( );

        LOG_CONSOLE( "Start processing " + m_videoFileName );

        int key, oneFrameProcess=0, frameNum; 
        for ( frameNum = 1; 
             m_videoCap.grab() &&
            ( key = cvWaitKey( oneFrameProcess ? 0 : 1 ) ) != 27 &&
            ( frameNum <=  endFrameIndex || endFrameIndex < 0 );
            frameNum++ )
        {
            if ( frameNum >= startFrameIndex )
            {
                std::cout << "frameNum:  " << frameNum << '\r';

                // get the video frame
                m_videoCap.retrieve( m_originalFrameMat );

                // downscale the image if required
                if ( m_downScaleImage )
                {
                    cv::resize( m_originalFrameMat, m_frame,  m_frame.size() );
                }
                else
                {
                    m_frame = m_originalFrameMat;
                }

                m_frameIpl = m_frame; 

                if ( key != -1 )
                {
                    oneFrameProcess = ( key == 'r' ) ? 0 : 1;
                }

                // Process the current frame
                m_pTracker->Process( &m_frameIpl, m_pFGMaskIpl);
                m_fgMask        = m_pTracker->GetFGMask();


                // Process the current video frame using the blob tracker
                IplImage fgMaskIpl = m_fgMask;


                // Save Blob Information in a file
                for( int i = m_pTracker->GetBlobNum(); i> 0; i-- )
                {
                    CvBlob* pBlob = m_pTracker->GetBlob(i-1);

                    ASSERT_TRUE( pBlob != NULL );

                    // Save blob record
                    SaveBlobRecord( pBlob, frameNum );
                }

                if ( m_displayIntermediateResult || m_saveIntermediateResult )
                {
                    char tempString[128];
                    std::string textMessage;
                    //display intermediate result if necessary
                    CvFont    font; 
                    CvSize  TextSize;
                    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 0.7, 0.7, 0, 1, CV_AA );

                    sprintf(tempString,"frame # %d", frameNum);
                    textMessage = tempString;
                    cv::putText( m_originalFrameMat, textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));
                    cv::putText( m_fgMask,textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));
                    cv::putText( m_frame, textMessage, cv::Point(10,20), CV_FONT_HERSHEY_PLAIN, 1, cv::Scalar((0,255,255)));

                    //drawing blobs if any with green ellipse with m_cvBlob id displayed next to it.
                    int c = 0; // 0: g; 255: red
                    for ( int i = m_pTracker->GetBlobNum(); i > 0; i-- )
                    {
                        CvBlob* pBlob = m_pTracker->GetBlob(i-1);

                        ASSERT_TRUE( pBlob != NULL );

                        cv::Point blobCorner( cvRound( pBlob->x * 256 ), cvRound( pBlob->y * 256 ) );

                        CvSize  blobSize = cvSize( MAX( 1, cvRound( CV_BLOB_RX(pBlob) * 256 ) ), 
                                                   MAX( 1, cvRound( CV_BLOB_RY(pBlob) * 256 ) ) );

                        cv::Scalar boundingBoxColor( c, 255-c, 0 );

                        if ( m_pTracker->GetState( CV_BLOB_ID( pBlob ) ) != 0 )
                        {
                            boundingBoxColor = cv::Scalar( 255-c, c, 0 );
                        }

                        cv::ellipse( m_frame, 
                                    cv::RotatedRect( cv::Point2f( pBlob->x, pBlob->y ), cv::Size2f( pBlob->w, pBlob->h ), 0 ),
                                    cv::Scalar( c, 255-c, 0 ) );
                        blobCorner.x >>= 8;      
                        blobCorner.y >>= 8;
                        
                        blobSize.width >>= 8;
                        blobSize.height >>= 8;
                        blobCorner.y -= blobSize.height;

                        sprintf( tempString, "BlobId=%03d", CV_BLOB_ID(pBlob) );
                        cvGetTextSize( tempString, &font, &TextSize, NULL );
                        
                        cv::putText( m_frame,
                                     std::string( tempString ),
                                     blobCorner,
                                     CV_FONT_HERSHEY_PLAIN,
                                     1,
                                     cv::Scalar( 255, 255, 0, 0 ) );
                    }
                }

                if ( m_displayIntermediateResult )
                {
                    cv::imshow(m_videoFileName+"_FGMask", m_fgMask);
                    cv::imshow(m_videoFileName+"_Tracking", m_frame);
                }

                if ( m_saveIntermediateResult )
                {
                    cv::Mat tmpFrame;
                    cv::cvtColor( m_fgMask, tmpFrame, CV_GRAY2BGR );
                    *m_pFGAvi << tmpFrame;             
                    *m_pBTAvi << m_frame;
                }
            }
Пример #7
0
Файл: model.c Проект: devaib/cnn
int main(void){

    // minimum size(pixels) of detection object (multiple of 12)
    const int MinFaceSize = 72;

    // thresholds
    const float Threshold_12Layer = .5;
    const float Threshold_24Layer = .01;
    const float Threshold_48Layer = -.01;
    const float Threshold_12CalibrationLayer = .1;
    const float Threshold_24CalibrationLayer = .1;
    const float Threshold_48CalibrationLayer = .1;
    const float Threshold_12NMS = .3f;
    const float Threshold_24NMS = .3f;
    const float Threshold_48NMS = 1.0f;

    // detection windows
    struct Windows window[500];

    // loop counter
    int i, j, k;
    int row, col;
    int counter = 0;    // detection window counter

    // image information
    int height, width, step, channels;
    uchar *data, *data24, *data48;

    // size, x, y for calibration
    float *out_12c, *out_24c, *out_48c;     // vector carrying s,x,y
    float s, x, y;
    int cali_x, cali_y, cali_w, cali_h;

    // scores of the 12 layer
    float res_12Layer;
    float res_24Layer;
    float res_48Layer;

    // window sliding stride
    const int Stride = 4;

    // image pyramid rate
    int pyr_rate = MinFaceSize / 12;

    // image pyrimid stopping
    bool flagStop = false;

    // file path
    char file[150];
    strcpy(file, FILE_PATH);
    strcat(file, TEST_IMAGE);

    // alloc memory for 12x12 image
    float **img = malloc(12 * sizeof(float*));
    for (i = 0; i < 12; i++){
        img[i] = malloc(12 * sizeof(float));
    }

    // alloc memory for 24x24 image
    float **img24 = malloc(24 * sizeof(float*));
    for (i = 0; i < 24; i++){
        img24[i] = malloc(24 * sizeof(float));
    }

    // alloc memory for 48x48 image
    float **img48 = malloc(48 * sizeof(float*));
    for (i = 0; i < 48; i++){
        img48[i] = malloc(48 * sizeof(float));
    }

    // for printing scores
    CvFont font;
    cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.3, 0.3, 0, 1, 8);
    char word[5];

    // load image
    IplImage *srcImg, *dstImg;
    srcImg = cvLoadImage(file, CV_LOAD_IMAGE_GRAYSCALE);

    // original size of the image
    const int WIDTH = srcImg->width;
    const int HEIGHT = srcImg->height;

    IplImage *originImg = cvCloneImage(srcImg);
    IplImage *originImg1 = cvCloneImage(srcImg);
    IplImage *originImg2 = cvCloneImage(srcImg);
    IplImage *originImg3 = cvCloneImage(srcImg);
    IplImage *input24Img = cvCreateImage(cvSize(24, 24), IPL_DEPTH_8U, 1);
    IplImage *input48Img = cvCreateImage(cvSize(48, 48), IPL_DEPTH_8U, 1);

    if (!srcImg){
        printf("Could not load image file: %s\n", file);
        exit(1);
    }


    // image pyramid loop starts
    while (!flagStop){
        counter = 0;

        // image pyramid down
        dstImg = doPyrDown(srcImg, pyr_rate);


        // get the image data
        width = dstImg -> width;
        height = dstImg -> height;
        step = dstImg -> widthStep;
        channels = dstImg -> nChannels;
        data = (uchar*)dstImg -> imageData;

        IplImage *detectedImg_12Layer = cvCloneImage(dstImg);

        // window sliding loop starts
        for (row = 0; row + 12 <= height; row += Stride){
            for (col = 0; col + 12 <= width; col += Stride){
                // 12 layer, 12 calibration, NMS
                preprocess(img, data, row, col, step, channels, 12);

                res_12Layer = Layer12(img, 12, 12, channels);

                // 12 layer passed
                if (res_12Layer > Threshold_12Layer){
                    // 12 calibration layer
                    out_12c = CaliLayer12(img, 12, 12, channels, Threshold_12CalibrationLayer);

                    s = out_12c[0]; x = out_12c[1]; y = out_12c[2];
                    free(out_12c);      // memory allocated in CaliLayer12

                    // ignoring returned NAN values(comparison involving them are always false)
                    if (s != s || x != x || y != y) continue;

                    // calibration
                    cali_x = col * pyr_rate - x * 12 * pyr_rate / s;
                    cali_y = row * pyr_rate - y * 12 * pyr_rate / s;
                    cali_w = 12 * pyr_rate / s;
                    cali_h = 12 * pyr_rate / s;

                    // make sure the calibrated window not beyond the image boundary
                    if (cali_x >= WIDTH || cali_y >= HEIGHT) continue;

                    cali_x = max(cali_x, 0);
                    cali_y = max(cali_y, 0);
                    cali_w = min(cali_w, WIDTH - cali_x);
                    cali_h = min(cali_h, HEIGHT - cali_y);

                    window[counter].x1 = cali_x;                    // x1
                    window[counter].y1 = cali_y;                    // y1
                    window[counter].x2 = cali_x + cali_w;           // x2
                    window[counter].y2 = cali_y + cali_h;           // y2
                    window[counter].score = res_12Layer;            // 12 layer score
                    window[counter].iou = 0.0;                      // iou ratio
                    window[counter].dropped= false;                 // if it's dropped
                    counter++;
                    // end of 12 layer, 12 calibration


                }
            }
        }
        // window sliding loop ends

        // sort the detection windows by score in descending order
        mergeSort(window, 0, counter);

        // display sorted windows surviving 12 layer
        cvNamedWindow("12 layer", CV_WINDOW_AUTOSIZE);
        for (i = 0; i < counter; i++){
            cvRectangle(originImg, cvPoint(window[i].x1, window[i].y1), cvPoint(window[i].x2, window[i].y2), cvScalar(255,0,0,0), 2, 4, 0);
            // printf("[#%d] x1: %d, y1: %d, x2: %d, y2: %d, score: %f, iou: %f, dropped: %s\n", i, window[i].x1, window[i].y1, window[i].x2, window[i].y2, window[i].score, window[i].iou, window[i].dropped ? "true" : "false");

            if (window[i].dropped == false){
                sprintf(word, "%.2f", window[i].score);
                cvPutText(originImg, word, cvPoint(window[i].x1, window[i].y1), &font, cvScalar(255, 255, 255, 0));
            }
        }
        cvShowImage("12 layer", originImg);
        cvMoveWindow("12 layer", 10, 10);

        printf("12 layer: x1: %d, y1: %d, x2: %d, y2: %d\n", window[15].x1, window[15].y1, window[15].x2, window[15].y2);

        // NMS after 12 calibration
        nms(window, counter, Threshold_12NMS);

        // display sorted windows surviving 12 layer
        cvNamedWindow("12 layer after NMS", CV_WINDOW_AUTOSIZE);
        for (i = 0; i < counter; i++){
            if (window[i].dropped == false){
                cvRectangle(originImg1, cvPoint(window[i].x1, window[i].y1), cvPoint(window[i].x2, window[i].y2), cvScalar(255,0,0,0), 2, 4, 0);

                sprintf(word, "%.2f", window[i].score);
                cvPutText(originImg1, word, cvPoint(window[i].x1, window[i].y1), &font, cvScalar(255, 255, 255, 0));
            }
        }
        cvShowImage("12 layer after NMS", originImg1);
        cvMoveWindow("12 layer after NMS", 500, 10);

        // 24 layer, 24 calibration, NMS
        for (i = 0; i< counter; i++){
            if (window[i].dropped == true) continue;

            cvSetImageROI(srcImg, cvRect(window[i].x1, window[i].y1, window[i].x2 - window[i].x1, window[i].y2 - window[i].y1));
            cvResize(srcImg, input24Img, CV_INTER_AREA);

            data24 = (uchar*) input24Img->imageData;

            preprocess(img24, data24, 0, 0, input24Img->widthStep, input24Img->nChannels, 24);
            res_24Layer = Layer24(img24, 24, 24, input24Img->nChannels);

            // 24 layer passed
            if (res_24Layer > Threshold_24Layer){
                // 24 calibration
                out_24c = CaliLayer24(img24, 24, 24, input24Img->nChannels, Threshold_24CalibrationLayer);
                s = out_24c[0];
                x = out_24c[1];
                y = out_24c[2];
                free(out_24c);

                cali_x = window[i].x1 - x * (window[i].x2 - window[i].x1) / s;
                cali_y = window[i].y1 - y * (window[i].y2 - window[i].y1) / s;
                cali_w = (window[i].x2 - window[i].x1) / s;
                cali_h = (window[i].y2 - window[i].y1) / s;

                // make sure the calibrated window not beyond the image boundary
                if (cali_x >= WIDTH || cali_y >= HEIGHT) continue;

                cali_x = max(cali_x, 0);
                cali_y = max(cali_y, 0);
                cali_w = min(cali_w, WIDTH - cali_x);
                cali_h = min(cali_h, HEIGHT - cali_y);

                window[i].x1 = cali_x;                    // x1
                window[i].y1 = cali_y;                    // y1
                window[i].x2 = cali_x + cali_w;           // x2
                window[i].y2 = cali_y + cali_h;           // y2
                window[i].score = res_24Layer;            // 24 layer score
                window[i].iou = 0.0;                      // iou ratio
                window[i].dropped= false;                 // if it's dropped

            }
            else
            {
                window[i].dropped = true;
            }

            cvResetImageROI(srcImg);
        }

        printf("24 layer: x1: %d, y1: %d, x2: %d, y2: %d\n", window[15].x1, window[15].y1, window[15].x2, window[15].y2);

        // NMS after 24 calibration
        nms(window, counter, Threshold_24NMS);

        // display sorted windows surviving 24 layer
        cvNamedWindow("24 layer", CV_WINDOW_AUTOSIZE);
        for (i = 0; i < counter; i++){
            if (window[i].dropped == false){
                cvRectangle(originImg2, cvPoint(window[i].x1, window[i].y1), cvPoint(window[i].x2, window[i].y2), cvScalar(255,0,0,0), 2, 4, 0);

                sprintf(word, "%.2f", window[i].score);
                cvPutText(originImg2, word, cvPoint(window[i].x1, window[i].y1), &font, cvScalar(255, 255, 255, 0));
            }
        }
        cvShowImage("24 layer", originImg2);
        cvMoveWindow("24 layer", 10, 400);
        // end of 24 layer, 24 calibration, NMS


        // 48 layer, 48 calibration, NMS
        for (i = 0; i< counter; i++){
            if (window[i].dropped == true) continue;

            cvSetImageROI(srcImg, cvRect(window[i].x1, window[i].y1, window[i].x2 - window[i].x1, window[i].y2 - window[i].y1));
            cvResize(srcImg, input48Img, CV_INTER_AREA);

            data48 = (uchar*) input48Img->imageData;

            preprocess(img48, data48, 0, 0, input48Img->widthStep, input48Img->nChannels, 48);
            res_48Layer = Layer48(img48, 48, 48, input48Img->nChannels);

            // 48 layer passed
            if (res_48Layer > Threshold_48Layer){
                // 48 calibration
                out_48c = CaliLayer48(img48, 48, 48, input48Img->nChannels, Threshold_48CalibrationLayer);

                s = out_48c[0];
                x = out_48c[1];
                y = out_48c[2];
                free(out_48c);

                cali_x = window[i].x1 - x * (window[i].x2 - window[i].x1) / s;
                cali_y = window[i].y1 - y * (window[i].y2 - window[i].y1) / s;
                cali_w = (window[i].x2 - window[i].x1) / s;
                cali_h = (window[i].y2 - window[i].y1) / s;

                // make sure the calibrated window not beyond the image boundary
                if (cali_x >= WIDTH || cali_y >= HEIGHT) window[i].dropped = true;

                cali_x = max(cali_x, 0);
                cali_y = max(cali_y, 0);
                cali_w = min(cali_w, WIDTH - cali_x);
                cali_h = min(cali_h, HEIGHT - cali_y);

                window[i].x1 = cali_x;                    // x1
                window[i].y1 = cali_y;                    // y1
                window[i].x2 = cali_x + cali_w;           // x2
                window[i].y2 = cali_y + cali_h;           // y2
                window[i].score = res_48Layer;            // 48 layer score
                window[i].iou = 0.0;                      // iou ratio
                window[i].dropped= false;                 // if it's dropped
            }
            else
            {
                window[i].dropped = true;
            }

            cvResetImageROI(srcImg);
        }

        // NMS after 48 calibration
        nms(window, counter, Threshold_48NMS);

        // display sorted windows surviving 48 layer
        cvNamedWindow("48 layer", CV_WINDOW_AUTOSIZE);
        for (i = 0; i < counter; i++){
            if (window[i].dropped == false){
                cvRectangle(originImg3, cvPoint(window[i].x1, window[i].y1), cvPoint(window[i].x2, window[i].y2), cvScalar(255,0,0,0), 2, 4, 0);

                sprintf(word, "%.2f", window[i].score);
                cvPutText(originImg3, word, cvPoint(window[i].x1, window[i].y1), &font, cvScalar(255, 255, 255, 0));
            }
        }
        cvShowImage("48 layer", originImg3);
        cvMoveWindow("48 layer", 500, 400);
        // end of 48 layer, 48 calibration, NMS

        printf("48 layer: x1: %d, y1: %d, x2: %d, y2: %d, dropped: %s\n", window[15].x1, window[15].y1, window[15].x2, window[15].y2, window[15].dropped?"true":"false");







        cvWaitKey(0);
        cvDestroyWindow("12 layer");
        cvDestroyWindow("12 layer after NMS");
        cvDestroyWindow("24 layer");
        cvDestroyWindow("48 layer");

        pyr_rate *= 2;
        if (dstImg->height / 2 < 12) flagStop = true;
    }
    // image pyramid loop ends

    freeArray(img, 12);
    freeArray(img24, 12);
    freeArray(img48, 12);

    return 0;
}
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;

    CvMemStorage *mallet_storage;
	CvSeq *mallet_circles = 0;
	float *mallet_p;
	int mi;

    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;

	int npts[2] = { 4, 12 };
	CvPoint **pts;

	pts = (CvPoint **) cvAlloc (sizeof (CvPoint *) * 2);
	pts[0] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 4);
	pts[1] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 12);
	pts[0][0] = cvPoint(0,0);
	pts[0][1] = cvPoint(160,0);
	pts[0][2] = cvPoint(320,240);
	pts[0][3] = cvPoint(0,240);
	pts[1][0] = cvPoint(39,17);
	pts[1][1] = cvPoint(126,15);
	pts[1][2] = cvPoint(147,26);
	pts[1][3] = cvPoint(160,77);
	pts[1][4] = cvPoint(160,164);
	pts[1][5] = cvPoint(145,224);
	pts[1][6] = cvPoint(125,233);
	pts[1][7] = cvPoint(39,233);
	pts[1][8] = cvPoint(15,217);
	pts[1][9] = cvPoint(0,133);
	pts[1][10] = cvPoint(0,115);
	pts[1][11] = cvPoint(17,28);

	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;

    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);

	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 107;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 115;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 255;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);

	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);

	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 11, 180, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする

	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();

			cvFillPoly(img_all_round, pts, npts, 2, CV_RGB(0, 0, 0));

			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			double y_line = (upper_left_f.y + lower_right_f.y)/3;
			double waiting_position = (robot_goal_left.x + lower_left_g.x) / 2;

			if(gY_after - gY_before < -1){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 600);
				target_coordinateX = waiting_position;
				if(waiting_position + 5 < gX_now_mallet){
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < waiting_position - 5){
					target_direction = 1;//時計回り
				}
			}
			/*else if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 1;//時計回り
			}*/
			else if(y_line < gY_after && y_line > gY_before){
				clock_t start = clock();
				clock_t end;
				end = start + 0.5 * (target_coordinateX - robot_goal_left.x) / 10;
				target_direction = 1;
				gpioPWM(25, 128);
				gpioWrite(18, target_direction);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				while(end - start < 0);//時間がくるまでループ
			}
			else{
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
			}



			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);

			/*

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
			cvShowImage("Now Image", show_img);
			cvShowImage("pack", dst_img_pack);
			cvShowImage("mallet", dst_img_mallet);
			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }

    gpioTerminate();

    cvDestroyAllWindows();

	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);
    cvFree(&pts[0]);
	cvFree(&pts[1]);
	cvFree(pts);

    return 0;
}
Пример #9
0
void display()	{
	Hi=cvQueryFrame (hit);	
	hi=Hi;
	//cvFlip(Hi,hi,1);
	/*cvCreateTrackbar(     "huehigher",	"blobbed lappy", &hh,180,harshit);
	cvCreateTrackbar(     "huelower",	"blobbed lappy", &hl,180,harshit);
	cvCreateTrackbar(     "saturationhigher",	"blobbed lappy", &sh,255,harshit);
	cvCreateTrackbar(     "saturationlower",	"blobbed lappy", &sl,255,harshit);
	cvCreateTrackbar(     "valuehiger",	"blobbed lappy",&vh,255,harshit);
	cvCreateTrackbar(     "valuelower",	"blobbed lappy",&vl,255,harshit);
	*/
	blobbing(hi,"binary lappy","blobbed lappy",0);	
	glClear (GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);

	float greaterx,greatery, greaterz , lesserx, lessery, lesserz;
	if (x<X)	{
		greaterx=x;
		greatery=y;
		greaterz=counthand1;
		lesserx=X;
		lessery=Y;
		lesserz=counthand2;
	}
	else	{
		greaterx=X;
		greatery=Y;
		greaterz=counthand2;
		lesserx=x;
		lessery=y;
		lesserz=counthand1;
	}
	
	GLfloat y1=-4 + (-1*(greaterx-320)*1.2/80);
	GLfloat y2= -4+(-1*(lesserx-320)*1.2/80);
	GLfloat sum=y1+y2,diff=y1-y2;

	GLfloat Centre1[]= {6-1*greaterz/scalingfactor,y1 , -1*(greatery-240)*1.2/60 };
	GLfloat Centre2[]= {6-1* lesserz/scalingfactor, y2, -1*(lessery-240)*1.2/60 };
	
	if (y1>-2)
		Centre1[1]=-2;
	if (y1<-6)
		Centre1[1]=-6;
	if(y2>-2)
		Centre2[1]=-2;
	if(y2<-6)
		Centre2[1]=-6;
	me.hand1next= Sphere( Centre1, .5);
	me.hand2next= Sphere( Centre2, .5);
	


	// Moving the computer's hands...


	//to put the comp suddenly backwards 
	if (inmoveornot||inmoveornot) 
	{//inmoveornot=1;
	if (q1>x_initpos_hand1)
	 {	q1=q1-compspeed;
	
	 }
	else
	{	q1=x_initpos_hand1;
		
		handonedone=0;
		if (jumphandsup1==1)
		{
			jumphandsup1=0;
			tempcomp1[2]=tempcomp1[2]+.15;
		}
		else 
		{
			jumphandsup1=1;
			tempcomp1[2]=tempcomp1[2]-.15;
		}
	}
	comp.hand1next=Sphere(tempcomp1,0.5);
	tempcomp1[0]=q1;
	tempcomp1[1]=((q1-x_initpos_hand1)*1.0)/(xrandom_hand1-x_initpos_hand1)*(yrandom_hand1-y_initpos_hand1)+y_initpos_hand1;
	tempcomp1[2]=((q1-x_initpos_hand1)*1.0)/(xrandom_hand1-x_initpos_hand1)*(zrandom_hand1-z_initpos_hand1)+z_initpos_hand1;


	if (q2>x_initpos_hand2)
	 {	q2=q2-compspeed;
	 }
	else
	{	q2=x_initpos_hand2;
		handtwodone=0;
		if (jumphandsup2==1)
		{
			jumphandsup2=0;
			tempcomp2[2]=tempcomp2[2]+.15;
		}
		else 
		{
			jumphandsup2=1;
			tempcomp2[2]=tempcomp2[2]-.15;
		}
	}
	comp.hand2next=Sphere(tempcomp2,0.5);
	tempcomp2[0]=q2;
	tempcomp2[1]=((q2-x_initpos_hand2)*1.0)/(xrandom_hand2-x_initpos_hand2)*(yrandom_hand2-y_initpos_hand2)+y_initpos_hand2;
	tempcomp2[2]=((q2-x_initpos_hand2)*1.0)/(xrandom_hand2-x_initpos_hand2)*(zrandom_hand2-z_initpos_hand2)+z_initpos_hand2;
	if (handonedone==0 && handtwodone==0)
		{   
			inmoveornot=0;
			handonedone=handtwodone=1;
		}
	
	

	}
	else
	{
	if (count1==1)  {	 
		srand((unsigned)time (NULL));
		xrandom_hand1 =rand()%2+5;
		yrandom_hand1 =rand()%1-3.5;
		zrandom_hand1 =rand()%1;
		count1 =0;
	}
	
	tempcomp1[0]=q1;
	tempcomp1[1]=((q1-x_initpos_hand1)*1.0)/(xrandom_hand1-x_initpos_hand1)*(yrandom_hand1-y_initpos_hand1)+y_initpos_hand1;
	tempcomp1[2]=((q1-x_initpos_hand1)*1.0)/(xrandom_hand1-x_initpos_hand1)*(zrandom_hand1-z_initpos_hand1)+z_initpos_hand1;
			
	if (q1>=abs(xrandom_hand1-x_initpos_hand1)||decreasestarthand1) {						
		q1=q1-compspeed;
		decreasestarthand1=1;
	}
	else {
		q1=compspeed+q1; 
	};
					
	if (q1<=0) {
		q1=0;
		decreasestarthand1=0;
		count1 =1;
	}
	
	comp.hand1next=Sphere(tempcomp1,0.5);
	



	if (count2==1) {
		 srand((unsigned)(time (NULL)*13));
		 xrandom_hand2 =-1*rand()%2-5;
		 yrandom_hand2 =rand()%1-4.5;
		 zrandom_hand2 =rand()%1;
		  count2 =0;
	}
	tempcomp2[0]=q2;
	tempcomp2[1]=((q2-x_initpos_hand2)*1.0)/(xrandom_hand2-x_initpos_hand2)*(yrandom_hand2-y_initpos_hand2)+y_initpos_hand2;
	tempcomp2[2]=((q2-x_initpos_hand2)*1.0)/(xrandom_hand2-x_initpos_hand2)*(zrandom_hand2-z_initpos_hand2)+z_initpos_hand2;
			
	if (q2>=abs(xrandom_hand2-x_initpos_hand2)||decreasestarthand2) {
		q2=q2-compspeed;
		 decreasestarthand2=1;
	}
	else {
		q2=q2+compspeed; 
	};
					
	if (q2<=0) {
		q2=0;
		decreasestarthand2=0;
		count2 =1;
	}
	
	comp.hand2next=Sphere(tempcomp2,0.5);
	}

	if (countforcolor>1)				//the color of screen should remain red or cyan until 6 iterations
	{
	countforcolor=0;
	increasingcountforcolor=0;
	mehit=0;
	comphit=0;
	nohit=1;
	}

	if (increasingcountforcolor==1)		//increases the no. of iterations
	{countforcolor++;
	}

	//.....in the below else parts the colors are decide in the event of collision and 
	//......initialising of increasingcountforcolor is done

	//Determines collission of the hands with hands+bodies...
	if(isCollission(me.hand1next,me,comp) == false )
    {
		me.hand1=me.hand1next;
	}
	else {
	mehit=0;
	 nohit=0;
	 comphit=1;
	 increasingcountforcolor=1;
	 //printf("yeah the comp has been hit\n");
	}
	
	if(isCollission(me.hand2next,me,comp) == false )
	{
		me.hand2=me.hand2next;	
	}
	else
	{mehit=0;
	 nohit=0;
	 comphit=1;
	 increasingcountforcolor=1;
	 //printf("yeah the comp has been hit\n");
	}
	
	if(isCollission(comp.hand1next,comp,me) == false ) 
	{
		comp.hand1=comp.hand1next;
	}
	else
	{mehit=1;
	 nohit=0;
	 comphit=0;
	 increasingcountforcolor=1;
	 //printf("you have been hit\n");
	}
	if(isCollission( comp.hand2next,comp,me) == false ) 
	{
		comp.hand2=comp.hand2next;
	}
	else
	{mehit=1;
	nohit=0;
	comphit=0;
	increasingcountforcolor=1;
	//printf("sorry,sorry , sorry you have been hit\n");
	}

	init();
 	//displaying the hands and bodies...
	//me.dispPlayer();  dont intend to display me's body
	headcolor=1;comphandcolor=0;myhandcolor=0;
	comp.dispPlayer();

	headcolor=0;comphandcolor=0;myhandcolor=1;
	me.hand1.dispSphere();
	me.hand2.dispSphere();

	headcolor=0;comphandcolor=1;myhandcolor=0;
	comp.hand1.dispSphere();
	comp.hand2.dispSphere();
	glLoadIdentity ();													
	gluLookAt (xpos, ypos, zpos, xdir, ydir, zdir, 0.0, 0,1);
	glutSwapBuffers();	
	glFlush();
	
	for(int i=0;i<score->height;i++){
		for(int j=0;j<score->width;j++){
			for(int k=0;k<score->nChannels;k++){
				score->imageData[i * score->widthStep +j  * score->nChannels +k]=0;	
			}
		}
	}
	sprintf(&stringme[11],"%d",me.score);
	sprintf(&stringcomp[9],"%d",comp.score);
	CvFont font;
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX,1,1);
	cvPutText(score,stringme,cvPoint(50,50),&font,cvScalar(255,0,0));
	cvPutText(score,stringcomp, cvPoint(370,50),&font,cvScalar(0,0,255));
	
	cvShowImage("Score",score);
	if (me.score>=1000)	{
		char stringmeend[22]="YOU WIN";							//deciding who wins
		for(int i=0;i<score->height;i++){
		for(int j=0;j<score->width;j++){
			for(int k=0;k<score->nChannels;k++){
				score->imageData[i * score->widthStep +j  * score->nChannels +k]=0;	
				}
			}
		}
		cvPutText(score,stringmeend,cvPoint(50,50),&font,cvScalar(255,0,0));
		cvShowImage("Score",score);
		cvWaitKey();
		exit (0);
	}
	if (comp.score>=1000) {
		char stringcompend[29]="YOU LOOSE.";					//decides who loose
		for(int i=0;i<score->height;i++){
		for(int j=0;j<score->width;j++){
		for(int k=0;k<score->nChannels;k++){
				score->imageData[i * score->widthStep +j  * score->nChannels +k]=0;	
				}
			}
		}	
		cvPutText(score,stringcompend, cvPoint(370,50),&font,cvScalar(0,0,255));
		cvWaitKey();
		exit (0);
		}
	
}																																				
Пример #10
0
void todecidescalingfactor() 
{	CvCapture *hiscaling=cvCreateCameraCapture(0);
	for (int w=0;w<400000000;w++);
	int getoutoflooptime=0;
	
	score=cvCreateImage(cvSize(600,120),IPL_DEPTH_8U,3);

	cvNamedWindow("Score",CV_WINDOW_AUTOSIZE);				//to create the window displaying our original instructions to the player for scalingfactor....
	cvMoveWindow("Score",300,650);
	for(int i=0;i<score->height;i++){
		for(int j=0;j<score->width;j++){
			for(int k=0;k<score->nChannels;k++){
				score->imageData[i * score->widthStep +j  * score->nChannels +k]=0;	
				}
			}
		}
	CvFont font;
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX,.4,.5);
	char stringmeend[]="The comp will wait for 8 sec so that you can set ur hands in fowrward position";
	cvPutText(score,stringmeend, cvPoint(0,40),&font,cvScalar(255,255,0));
	cvShowImage("Score",score);

	cvWaitKey(2000);
	
	while(1)
	{	if (getoutoflooptime>65)							//approx. equivalent to 8 seconds 
			break;
		cvWaitKey(10);
		imgscaling=cvQueryFrame(hiscaling);
		cvMoveWindow("harshits",280,140);
		cvShowImage("harshits",imgscaling);
		if (!imgscaling)
			break;
		getoutoflooptime++;
		
	}
	givedepth(imgscaling);
	getoutoflooptime=0;
	float forward=counthand1+counthand2;
	

	cvNamedWindow("Score",CV_WINDOW_AUTOSIZE);				//to create the window displaying our original instructions to the player for scalingfactor....
	cvMoveWindow("Score",300,650);
	for(int i=0;i<score->height;i++){
		for(int j=0;j<score->width;j++){
			for(int k=0;k<score->nChannels;k++){
				score->imageData[i * score->widthStep +j  * score->nChannels +k]=0;	
				}
			}
		}
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX,.4,.5);
	char stringmeend2[]="The comp will wait for 8 sec so that you can set ur hands in backward position";
	cvPutText(score,stringmeend2, cvPoint(0,40),&font,cvScalar(0,255,255));
	cvShowImage("Score",score);
	
	while(1)
	{	if (getoutoflooptime>65)
			break;
		cvWaitKey(10);
		imgscaling=cvQueryFrame(hiscaling);
		cvMoveWindow("harshits",280,140);
		cvShowImage("harshits",imgscaling);
		getoutoflooptime++;
	}
	givedepth(imgscaling);
	cvWaitKey(2000);
	cvDestroyWindow("harshits");
	float backward=counthand1+counthand2;
	printf("the forward is %d\n ",forward);
	printf("the backward is %d\n ",backward);
	scalingfactor=(forward-backward)/(2*8);
	printf("The scaling facator is =%f\n\n\n\n",scalingfactor);
}
Пример #11
0
void CGUIFaceInVideoView::OnDraw(CDC* pDC)
{ 
	

	CGUIFaceInVideoDoc* pDoc = GetDocument();    

	ASSERT_VALID(pDoc);
	if (pDoc == NULL)
		return;

	CIplImage *pImgIn;
	CvPoint *pPointL, *pPointR;
	int nNameTag, nBestMatch; 
	char strMessage[200];
	

	CPerceptualVisionSystem *pPVS = pDoc->getPVS();
	
	bool bRes = pPVS->initializeNextFrame();
	if (bRes == false)
 		return;		// if want to show it but not process it, change it.

	sprintf(strMessage, "%i msec", pPVS->m_tBetweenFrames);
	OnDraw_LogWindow(pDC, strMessage, set_window(0,0), 0);


	pDoc->m_gui.check_start = pDoc->m_gui.check_start;

	if (pDoc->m_gui.check_start == true)
	{
		int z;
		for (z=0; z < pPVS->m_pianoPlaying.m_nNumHands; z++)
			pPVS->m_pianoPlaying.m_Hand[z].destroy();

		pPVS->m_pianoPlaying.m_nNumHands = 0;

	}


	////////////AVI RECORDING//////////////////
	if (pDoc->m_recordCheck == true)  // For saving video file made of video frames
	{
		pPVS->m_imgIn.draw(pDC->m_hDC, set_window(0,3), false, false);
		OnDraw_LogWindow(pDC, "Start recording..", set_window(0,3), 1);

		pPVS->m_bRecording = true;
		if (pPVS->m_bRecordingStarted == false) 
		{

				// open a dialog box.
			TCHAR szFilter[] = _T ( "Movie Files (*.avi)" );
			CFileDialog *dlg = new CFileDialog( FALSE, _T("avi"), _T("*.avi"), OFN_ENABLESIZING | OFN_OVERWRITEPROMPT, szFilter, (CWnd*)this);
			char  strAviFullName[200], strAviLocalName[200];


			if(dlg->DoModal() == IDOK)
				strcpy(strAviFullName, dlg->GetPathName());

			pPVS->m_pCvVideoWriter = NULL;
			pPVS->m_pCvVideoWriter = cvCreateVideoWriter(strAviFullName, 
			// Uncompressed
			CV_FOURCC('D','I','B',' '), 10.0, cvSize(160,120) );

			pPVS->m_bRecordingStarted = true;// Open AVI file
		}

		//Add frame to AVI
		if (pPVS->m_pCvVideoWriter) 
			cvWriteFrame( pPVS->m_pCvVideoWriter, pPVS->m_imgIn.getIplImage() );

		return; 
	}
	else
	{
		pPVS->m_bRecording = false;
		if (pPVS->m_bRecordingStarted == true) 
		{
			pPVS->m_bRecordingStarted = false; // Close AVI
			if (pPVS->m_pCvVideoWriter)
				cvReleaseVideoWriter( &pPVS->m_pCvVideoWriter );
		}
	}
	////////////////END AVI RECORDING////////////////
	


///////////////////////
//	return;   //////// debuging.....
///////////////////////


/***************************************************************************************/
/*** Grab data from MIDI **************************************************/
/***************************************************************************************/

	int q, keyValue, octave;
	bool keyChange[200]; // bKeyChanged[nTotalPianoKeys]	//To indicate Changes only
								//For printing Purposes
	if (m_bMidiCheck==false)
	{
		m_Midi->clearAllEvents();
	}
	//DO MIDI INFORMATION
	if (USING_MIDI == 1 && m_bMidiCheck==true)
	{
		//Get MIDI Events
		m_Midi->getAllEvents();
		//Update User Selected Offsets
		pPVS->m_pianoPlaying.m_Piano.UpdateOctave(m_nKeyOffset, m_nOctave);

	
		
		//No Changes to Start
		for (q=0; q < 200; q++)
			keyChange[q] = false;
		
		//Go Through All MIDI events
		for (q=0; q < m_Midi->numNotes; q++)
		{
			//If Key Has Been Pressed, set it as True
			if (keyDown[int(m_Midi->noteArray[q])] == true)
			{
				keyDown[int(m_Midi->noteArray[q])] = false;
				keyChange[int(m_Midi->noteArray[q])] = true;	//Indicate Change
			}
			//Vice Versa
			else
			{
				keyDown[int(m_Midi->noteArray[q])] = true;
				keyChange[int(m_Midi->noteArray[q])] = true;	//Indicate Change
			}
		}

	}//END MIDI


/***************************************************************************************/
/*** Grab data from video camera / AVI **************************************************/
/***************************************************************************************/


	sprintf(strMessage, "%i: %i (%i) msec        ", pPVS->m_nFrame, pPVS->m_tBetweenFrames, pPVS->m_tBetweenFramesNow);		
//	OnDraw_LogWindow(pDC, strMessage, set_window(0,2), 1);
		

	//Motion Channel Calculations
	pPVS->m_chanMotion.updateImages(&pPVS->m_imgIn);
	pPVS->m_chanMotion.compute_dI();
	pPVS->m_chanMotion.compute_FG(3, 0);


	//////////////////////////PIANO PLAYING SECTION////////////////////////////
	
	//Update Images
	pPVS->m_pianoPlaying.updateImages(&pPVS->m_imgIn);

	//Do Backgorund Initialization && Piano Key (width) Calibration
	if (runOnce == 0)
	{
		pPVS->m_pianoPlaying.setBackgroundImage(&pPVS->m_imgIn);
		runOnce++;
	}

	//Image Adjustment, based on Piano Orientation and Positioning
	pPVS->m_pianoPlaying.AdjustImages();

	//Update Control Values from GUI
	UpdateControlValues(&(pPVS->m_pianoPlaying));

	//Main Piano Playing Function
	pPVS->m_pianoPlaying.createHandsImage();
	m_bDetectHands = false;	//Reset Hand Detecting Flag

/*
// no Filter2D

	CIplImage img1; // , img2; 
	img1.initializeAs(&pPVS->m_imgBW);
//	img2.initializeAs(&pPVS->m_imgBW);

	IplImage *ipl;

	ipl = cvCreateImage( cvGetSize(pPVS->m_imgBW.getIplImage()), IPL_DEPTH_16S , 1 );

	cvAnd(pPVS->m_imgBW.getIplImage(), pPVS->m_pianoPlaying.m_imb_handsImage.getIplImage(), img1.getIplImage());
	cvSobel(img1.getIplImage(), ipl, 1,0,3);

	cvConvert (&ipl, img1.getIplImage());
	img1.draw(pDC->m_hDC, set_window(0,3));
	img1.destroy();

	cvReleaseImage(&ipl);
//	img2.destroy();

*/


#if 1  //<< Clustering


		CIplImage imc, imcClusters;
		imc.initializeAs(&pPVS->m_imgIn);
		imcClusters.initializeAs(&pPVS->m_imgBW);

        cvZero( imc.getIplImage() );

		pImgIn = & pPVS->m_pianoPlaying.m_imb_handsImage; // m_chanColour.m_imbSkinYCrCb; // ;
		CIplImage *pImgOut = & imcClusters;
        cvZero( pImgOut->getIplImage());

//		pImgIn->draw(pDC->m_hDC, set_window(0,3));

		int nClustersDetected, nMaxClusters;

// a)
		PVI_BLOB *blobs = new PVI_BLOB[5];
		nMaxClusters = 4;
		nClustersDetected = detectClusters_wKmeans( pImgIn, pImgOut, nMaxClusters, &nClustersDetected, &blobs);		
		delete blobs;



		colourizeClusters(*pImgOut, imc, nClustersDetected);
		imc.draw(pDC->m_hDC, set_window(0,1));
		sprintf(strMessage, "%i hands", nClustersDetected);
		OnDraw_LogWindow(pDC,strMessage, set_window(0,1), 0);
		
/*


//		nClustersDetected = clusterize_wKmeans( pImgIn, pImgOut, 2, 1, &imc); 
//		imc.draw(pDC->m_hDC, set_window(1,iActiveCamera+1));

// b)
		int nXclose=4, nYclose=4,  nCosine=0;
		nClustersDetected = detectClusters_wProximity( pImgIn, pImgOut, nXclose, nYclose,  nCosine, &imc);
		imc.draw(pDC->m_hDC, set_window(4,0));
		sprintf(strMessage, "%i", nClustersDetected);
		OnDraw_LogWindow(pDC,strMessage, set_window(4,0), 0);

		imc.destroy();
		imcClusters.destroy();

//		if (nClustersDetected > pPVS->m_pianoPlaying.m_nNumHands)
//			m_bDetectHands = true;

  */

#endif //>> Clustering



/***************************************************************************************/
/*** Use MIDI data do detect hand/finger **************************************************/
/***************************************************************************************/

		CvFont cvFont; 
		cvInitFont( &cvFont, CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0.0, 2 );
                        				
		CvPoint ptBtmLeft;



// To do :  midi for hand blob detection

	//DO MIDI INFORMATION
	if (USING_MIDI == 1 && m_bMidiCheck==true)
	{

		char keyPressString[200];	//Printable String
		int i=0;					//Print Counter

		numStrings=0;
	
		//FOR each possible MIDI Key
		for (q=0; q < 200; q++)
		{
			//Calculate Key and Octave Values
			keyValue = (q) % 12;
			octave = (q - keyValue)/12;

			//IF Key is ON
			if (keyDown[q] == true)
			{
				//Get Bounding Box of Key
				CvRect myRect = pPVS->m_pianoPlaying.m_Piano.ReturnKeyBounds(keyValue, octave);

				//Select Hand that this Key intersects with
				int selectedHand = pPVS->m_pianoPlaying.SelectHand(&myRect);

				//Highlight Selected Key
				pPVS->m_pianoPlaying.DrawBox(pPVS->m_pianoPlaying.m_imc_displayImage.getIplImage(), myRect.x, myRect.y, myRect.x + myRect.width, myRect.y + myRect.height, 3); 



				//Put Information to String
				if (selectedHand != -1)
				{
					int selectedFinger = pPVS->m_pianoPlaying.m_Hand[selectedHand].SelectFinger(&myRect);

					if (selectedFinger == -1)
						sprintf(keyPressString, "MIDI: %d -> Octave: %d, Key: %d -> Hand: %d", q, octave, keyValue, selectedHand);
					else
					{
						sprintf(keyPressString, "MIDI: %d -> Octave: %d, Key: %d -> Hand: %d, Finger: %d", q, octave, keyValue, selectedHand, selectedFinger+1);
						sprintf(strMessage, "%i", selectedFinger);
						ptBtmLeft = cvPoint(myRect.x, myRect.y-5);
						cvPutText( pPVS->m_pianoPlaying.m_imc_displayImage.getIplImage(), strMessage, ptBtmLeft, &cvFont, CV_RGB(0, 200, 200));
					}
				}
				else
					sprintf(keyPressString, "MIDI: %d -> Octave: %d, Key: %d", q, octave, keyValue);
					
				//IF Key Was JUST Pressed
				if (keyChange[q] == true)
				{
					//Write Information to File
					fprintf(pPVS->m_pianoPlaying.fDataFile, "%s\n", keyPressString);
				}

				//Add String To List
				if (numStrings < 200)
				{
					sprintf(MIDIString[numStrings], "%s", keyPressString); 
					numStrings++;
				}

			}//END IF
			//IF Key is OFF
			else
			{
				//Put Information to String
				sprintf(keyPressString, "Key Value: %d  -> Octave: %d, Key: %d - RELEASED", q, octave, keyValue);
				
				//IF Key Was JUST Released
				if (keyChange[q] == true)
					//Write Information to File
					fprintf(pPVS->m_pianoPlaying.fDataFile, "%s\n", keyPressString);

			}//END ELSE
		}//END FOR Each Key
	}//END MIDI
	



/***************************************************************************************/
/*** Draw all    **************************************************/
/***************************************************************************************/


	//DRAW IMAGES
	pPVS->m_pianoPlaying.m_imc_displayImage.draw(pDC->m_hDC, set_window(1, 0));
	pPVS->m_pianoPlaying.m_imc_backgroundImage.draw(pDC->m_hDC, set_window(2,0));
//	pPVS->m_pianoPlaying.m_imb_handsImage.draw(pDC->m_hDC, set_window(3,0));
//	pPVS->m_pianoPlaying.m_imb_edgeDetectedImage.draw(pDC->m_hDC, set_window(4,0));

	

/*	pPVS->m_pianoPlaying.m_imb_fingerDetectedImage.
		draw(pDC->m_hDC, set_window(1,1));	
	pPVS->m_pianoPlaying.m_imc_displayImage. 
		draw(pDC->m_hDC, set_window(2,1));
	pPVS->m_pianoPlaying.m_imc_backgroundImage. 
		draw(pDC->m_hDC, set_window(3,1));
*/
	pPVS->m_pianoPlaying.m_Piano.boxes.draw(pDC->m_hDC, set_window(1, 1));
	pPVS->m_pianoPlaying.m_Piano.polarizedImage.draw(pDC->m_hDC, set_window(2, 1));
	//pPVS->m_pianoPlaying.m_ORIMAGE.draw(pDC->m_hDC, set_window(3, 1));
	//pPVS->m_pianoPlaying.m_Hand[0].m_edgeImage.draw(pDC->m_hDC, set_window(3, 1));

	
	int f, h;
	if (pPVS->m_pianoPlaying.m_nNumHands > 0)
	{
		h=0;
		if (pPVS->m_pianoPlaying.m_Hand[h].ready == true)
		{
	//		pPVS->m_pianoPlaying.m_Hand[h].m_fingImg[f].draw(pDC->m_hDC, set_window(f, h+2));
			
			for (f=0; f < 5; f++)
			{
			
				pPVS->m_pianoPlaying.m_Hand[h].m_fingImg[f].draw(pDC->m_hDC, set_window(f, h+2));
				//cvSet( (pPVS->m_imgOut.getIplImage()), CV_RGB(0, 100*h, f*50), 
				//		(pPVS->m_pianoPlaying.m_Hand[h].m_fingImg[f].getIplImage()));
			}
		}
	}


	
	char clearString[150];
	int i;
	for (i=0; i < 150; i++)
		clearString[i] = ' ';
	//////////////////////////END ARJUN////////////////////////////

//
//	pPVS->m_imgIn.draw(pDC->m_hDC, set_window(0,0), false, false);
	pPVS->m_imgOut.draw(pDC->m_hDC, set_window(0,0), false, false);



	nNameTag = pDoc->m_gui.slider4;//-1;

// ************************************************************************************************

	if (!pDoc->m_gui.check_start)  // It has to be checked to start the video processing
		return;
	

// ************************************************************************************************

	pDoc->m_bWorkWithFiles = false;
		
	if (m_bTraceMode == true && pDoc->m_gui.check2)  // if scenario, wait to continue			
		if (MessageBox("Proceed to next face?",NULL,MB_OKCANCEL) != IDOK)
			exit(1);	




}
Пример #12
0
/*************************************************
  vision-serverの本体
    Cameraデータの取得、画像処理、ソケット通信待ち受けを行う
************************************************/
int main (int argc, char **argv){
  CvSize size;
  int step;
  CvCapture *cap;
  IplImage *capture_image;
  IplImage *frame_image;
  IplImage *processed_image;
  IplImage *grayImage; 
  IplImage *binaryImage;
  unsigned char* binarydata;

  CvFont font;
  char text[50];
  char hostname[30];
  int s, i, port = 9000;
  pthread_t tid;

  /*** socket通信のための処理(ここから) ***/
  for (i=1;i<argc;i++){
    if (strcmp("-port", argv[i]) == 0) {
      port=atoi(argv[++i]);
    }}
  gethostname(hostname, sizeof(hostname));
  s = init_socket_server(hostname, &port);
  fprintf(stderr, "hostname %s\n", hostname);
  for (i=0; i< MAX_SOCKET ; i++) sockets[i].type=0;
  //threadで待ちうけ
  fprintf(stderr, "Waiting connection...\n");
  pthread_create(&tid, NULL, acceptor, (void *)s);
  /*** socket通信のための処理(ここまで) ***/

  /** semaphoreの準備 ***/
  raw_semaphore = semget((key_t)1111, 1, 0666|IPC_CREAT);
  if(raw_semaphore == -1){
    perror("semget failure");
    exit(EXIT_FAILURE);
  }
  process_semaphore = semget((key_t)1111, 1, 0666|IPC_CREAT);
  if(process_semaphore == -1){
    perror("semget failure");
    exit(EXIT_FAILURE);
  }
  union semun semunion;
  semunion.val = 0;  //semaphoreの初期値
  if(semctl(raw_semaphore, 0, SETVAL, semunion) == -1){
    perror("semctl(init) failure");
    exit(EXIT_FAILURE);
  }
  if(semctl(process_semaphore, 0, SETVAL, semunion) == -1){
    perror("semctl(init) failure");
    exit(EXIT_FAILURE);
  }
  /** semaphoreの準備(ここまで) ***/

  /** cameraや画像取得の用意(ここから) ***/
  //camera initialization 
  if((cap = cvCreateCameraCapture(-1))==NULL){
    printf("Couldn't find any camera.\n");
    return -1;
  }
  capture_image = cvQueryFrame(cap);
  width = capture_image->width;
  height = capture_image->height;
  fprintf(stderr, "height %d, width %d\n", height, width);
  fprintf(stderr, "process height %d, process width %d\n", process_height, process_width);
  /** cameraや画像取得の用意(ここまで) ***/

  /** 画像処理(赤色抽出)の準備 ***/
  //fontの設定(しないとSegfaultで落ちる)
  float hscale = 1.0f;
  float vscale = 1.0f;
  float italicscale = 0.0f;
  int thickness = 3;
  cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, hscale, vscale, italicscale, thickness, CV_AA);
  //font設定ここまで
  // Set threshold
  rgb_thre[0] = R_MIN_THRE;
  rgb_thre[1] = R_MAX_THRE;
  rgb_thre[2] = G_MIN_THRE;
  rgb_thre[3] = G_MAX_THRE;
  rgb_thre[4] = B_MIN_THRE;
  rgb_thre[5] = B_MAX_THRE;


  //画像処理するイメージ領域を確保
  frame_image = cvCreateImage(cvSize(process_width, process_height), IPL_DEPTH_8U, 3);
  processed_image = cvCreateImage(cvSize(process_width, process_height), IPL_DEPTH_8U, 3);
  /** 画像処理(赤色抽出)の準備(ここまで) ***/

  
  /**** 面積を出すための2値化 ***/
  grayImage = cvCreateImage(cvGetSize(frame_image), IPL_DEPTH_8U, 1);
  binaryImage = cvCreateImage(cvGetSize(frame_image), IPL_DEPTH_8U, 1);
  
  //Labeling init
  label_buf = (int*)malloc(sizeof(int)*frame_image->width*frame_image->height);

  /**** main loop(本体) ****/
  while(1){
    CvPoint centroid;
    //カメラ画像をcaptureする
    capture_image = cvQueryFrame(cap);
    if (capture_image==NULL) {
      fprintf(stderr, "capture_image is %p\n", capture_image);
      continue;
    }
    cvResize(capture_image, frame_image, CV_INTER_LINEAR);

    //カメラ画像を処理する
    maskRGB(frame_image, processed_image, rgb_thre);          //赤色抽出
    // Binarize
    myBinarize(processed_image, grayImage, binaryImage);
    cvDilate(binaryImage, grayImage, NULL, 10); //ぼうちょう
    cvErode(grayImage, binaryImage, NULL, 15);  //収縮
    // Labeling
    cvGetRawData(binaryImage, &binarydata, &step, &size);
    labeling(binarydata, frame_image->height, frame_image->width, label_buf, step);
    label_num = labeling_result(&linfo, label_buf, frame_image->height, frame_image->width);
    //処理結果を書き込む
    {
      int i,n;
      n=25;
      //fprintf(stderr, "num is %d\n", label_num);
      for(i=0; i<label_num; i++){
        //fprintf(stderr, "area %d, x %d y %d\n", linfo[i].area, (int)linfo[i].xpos, (int)linfo[i].ypos);
        centroid.x = (int) linfo[i].xpos;
        centroid.y = (int) linfo[i].ypos;
        drawCross(processed_image, &centroid, CV_RGB(0, 255, 0));                                 //×印をいれる
        sprintf(text, "X: %d Y: %d AREA: %d", centroid.x, centroid.y, linfo[i].area);             //値をかく
        cvPutText(processed_image, text, cvPoint(n, (height-n*(i+1))), &font, CV_RGB(0, 255, 0)); //
      }
    }
    // image -> rawdata
    sema_wait(raw_semaphore);
    cvGetRawData(frame_image, &rawdata, &step, &size);
    
    // process image -> process data
    sema_wait(process_semaphore);
    cvGetRawData(processed_image, &processdata, &step, &size);

    //sleep
    usleep(30000);
  }
  //release the capture object
  cvReleaseCapture(&cap);
  return 0;
}
Пример #13
0
static void showImage()
{
    IplImage* image_clone = cvCloneImage(image);
    char distance_string[32];
    CvFont dfont;
    float hscale      = 0.7f;
    float vscale      = 0.7f;
    float italicscale = 0.0f;
    int  thickness    = 1;

    std::string objectLabel;
    CvFont      dfont_label;
    float       hscale_label = 0.5f;
    float       vscale_label = 0.5f;
    CvSize      text_size;
    int         baseline     = 0;

    cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
    objectLabel = car_fused_objects.type;
    cvGetTextSize(objectLabel.data(),
                  &dfont_label,
                  &text_size,
                  &baseline);

    /*
     * Plot obstacle frame
     */
    showRects(image_clone,
              car_fused_objects.obj,
              ratio,
              cvScalar(255.0,255.0,0.0));
    showRects(image_clone,
              pedestrian_fused_objects.obj,
              ratio,
              cvScalar(0.0,255.0,0.0));


    /*
     * Plot car distance data on image
     */
    for (unsigned int i = 0; i < car_fused_objects.obj.size(); i++) {
      if(!isNearlyNODATA(car_fused_objects.obj.at(i).range)) {
          int rect_x      = car_fused_objects.obj.at(i).rect.x;
          int rect_y      = car_fused_objects.obj.at(i).rect.y;
          int rect_width  = car_fused_objects.obj.at(i).rect.width;
          int rect_height = car_fused_objects.obj.at(i).rect.height;
          float range     = car_fused_objects.obj.at(i).range;

          /* put label */
          CvPoint labelOrg = cvPoint(rect_x - OBJ_RECT_THICKNESS,
                                     rect_y - baseline - OBJ_RECT_THICKNESS);
          cvRectangle(image_clone,
                      cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                      cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                      CV_RGB(0, 0, 0), // label background color is black
                      -1, 8, 0
                      );
          cvPutText(image_clone,
                    objectLabel.data(),
                    labelOrg,
                    &dfont_label,
                    CV_RGB(255, 255, 255) // label text color is white
                    );

          /* put distance data */
            cvRectangle(image_clone,
                        cv::Point(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 45),
                                  rect_y + rect_height + 5),
                        cv::Point(rect_x + (rect_width/2) + (((int)log10(range/100)+1) * 8 + 38),
                                  rect_y + rect_height + 30),
                        cv::Scalar(255,255,255), -1);
            cvInitFont (&dfont, CV_FONT_HERSHEY_COMPLEX , hscale, vscale, italicscale, thickness, CV_AA);
            sprintf(distance_string, "%.2f m", range / 100); //unit of length is meter
            cvPutText(image_clone,
                      distance_string,
                      cvPoint(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 40),
                              rect_y + rect_height + 25),
                      &dfont,
                      CV_RGB(255, 0, 0));
        }
    }

    objectLabel = pedestrian_fused_objects.type;
    cvGetTextSize(objectLabel.data(),
                  &dfont_label,
                  &text_size,
                  &baseline);

    /*
     * Plot pedestrian distance data on image
     */
    for (unsigned int i = 0; i < pedestrian_fused_objects.obj.size(); i++) {
      if(!isNearlyNODATA(pedestrian_fused_objects.obj.at(i).range)) {
          int rect_x      = pedestrian_fused_objects.obj.at(i).rect.x;
          int rect_y      = pedestrian_fused_objects.obj.at(i).rect.y;
          int rect_width  = pedestrian_fused_objects.obj.at(i).rect.width;
          int rect_height = pedestrian_fused_objects.obj.at(i).rect.height;
          float range     = pedestrian_fused_objects.obj.at(i).range;

          /* put label */
          CvPoint labelOrg = cvPoint(rect_x - OBJ_RECT_THICKNESS,
                                     rect_y - baseline - OBJ_RECT_THICKNESS);
          cvRectangle(image_clone,
                      cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                      cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                      CV_RGB(0, 0, 0), // label background color is black
                      -1, 8, 0
                      );
          cvPutText(image_clone,
                    objectLabel.data(),
                    labelOrg,
                    &dfont_label,
                    CV_RGB(255, 255, 255) // label text color is white
                    );

          /* put distance data */
            cvRectangle(image_clone,
                        cv::Point(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 45),
                                  rect_y + rect_height + 5),
                        cv::Point(rect_x + (rect_width/2) + (((int)log10(range/100)+1) * 8 + 38),
                                  rect_y + rect_height + 30),
                        cv::Scalar(255,255,255), -1);
            cvInitFont (&dfont, CV_FONT_HERSHEY_COMPLEX , hscale, vscale, italicscale, thickness, CV_AA);
            sprintf(distance_string, "%.2f m", range / 100); //unit of length is meter
            cvPutText(image_clone,
                      distance_string,
                      cvPoint(rect_x + (rect_width/2) - (((int)log10(range/100)+1) * 5 + 40),
                              rect_y + rect_height + 25),
                      &dfont,
                      CV_RGB(255, 0, 0));
        }
    }

    /*
     * Show image
     */
    if (cvGetWindowHandle(window_name.c_str()) != NULL) // Guard not to write destroyed window by using close button on the window
      {
        cvShowImage(window_name.c_str(), image_clone);
        cvWaitKey(2);
      }
    cvReleaseImage(&image_clone);
}
int main(int argc, char **argv) {
	int i,index=0;
	int width_img=0;	// Frame width
	int height_img=0;	// Frame height
	double fps=0.0;		// FPS (Frames Per Second)
	int frame=0;		// Frame number (index)
	int msec;
	int total_frames=0;	// Total frames
	int marked_frames=0;	// Marked frame
	int *check_frames;			// Contains indeces of marked frames
	int *list_of_frames;		// List of frames
	double *ecr;
	IplImage *previous_frame;	// Previous frame
	IplImage *current_frame;	// Current frame
	IplImage *bgr_frame;	// Frame
	IplImage *new_frame;	// Frame
	CvCapture* capture=0;	// Capture struct
	CvSize size;		// Size of frame (width x height)
	FILE *fp;		// TXT file pointer
	clock_t start, stop, diff; // Timer
	
	// Text variables
	CvScalar black = CV_RGB(255,0,0);
	CvFont font1;
	int thickness = 2.0;
	char text1[20] = "0"; // frame number
	char text2[20] = "0"; // frame msec positiion
	double hscale = 1.0;
	double vscale = 1.0;
	double shear = 0.0;

	// Check if the user gave arguments
	if(argc != 4) {
		fprintf(stderr, "\nUSAGE: %s <input_video_file> <output_video_file> <output_TXT_file>\n", argv[0]);
		return EXIT_FAILURE;
	}

	
	
	/**** STAGE 1: PROCESS FRAMES ****/
	
	capture = cvCreateFileCapture(argv[1]);	// Open video file to start capture
	if(!capture) {
		printf("Error opening video file! (cvCreateFileCapture)\n");
		return EXIT_FAILURE;
	}

	fp=fopen(argv[3],"w");		// Open file to write stats
	if(fp == NULL) {
		printf("Error opening file! (fopen)\n");
		return EXIT_FAILURE;
	}

	fps = cvGetCaptureProperty(capture,CV_CAP_PROP_FPS);				// Get FPS
	width_img = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);		// Get frame width
	height_img = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);		// Get frame height
	total_frames = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_COUNT);		// Get total frames
	size = cvSize(width_img,height_img);						// Get size of frames

	check_frames = (int *)malloc(sizeof(*check_frames) * total_frames);
	list_of_frames = (int *)malloc(sizeof(*list_of_frames) * total_frames);
	ecr = (double *)malloc(sizeof(*ecr) * total_frames);
	if (check_frames == NULL || list_of_frames == NULL || ecr == NULL) {
		printf("Error allocating memory!\n");
		return EXIT_FAILURE;
	}

	// Initialize arrays
	for(i=0;i<total_frames;i++) {
		ecr[i]=0.0;
		check_frames[i]=0;
		list_of_frames[i]=0;
	}
	
	cvInitFont(&font1,CV_FONT_HERSHEY_SIMPLEX,hscale,vscale,shear,thickness,CV_AA);
	
	CvPoint pt1 = cvPoint(5,30);
	CvPoint pt2 = cvPoint(5,70);
	
	fprintf(fp,"Filename\t:\t%s\n\nFrame width\t:\t%d\nFrame height\t:\t%d\nFPS\t\t:\t%f\nTotal frames\t:\t%d\n\n\n\n",argv[1],width_img,height_img,fps,total_frames);
	printf("Filename\t:\t%s\n\nFrame width\t:\t%d\nFrame height\t:\t%d\nFPS\t\t:\t%f\nTotal frames\t:\t%d\n\n\n\n",argv[1],width_img,height_img,fps,total_frames);
	
	printf("Start processing frames...\n\n");
	
	start = clock(); // Start timer
	
	bgr_frame=cvQueryFrame(capture);												// Grab first frame
	previous_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the previous frame
	current_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the current frame
	cvCopy(bgr_frame,previous_frame,NULL);											// Save the copy
	
	// Grab frames from the video until NULL
	while((bgr_frame=cvQueryFrame(capture)) != NULL) {
		/* When entering this loop, we have already grabbed a frame
		 * so the frame counter starts from 2
		 */
		frame = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);					// Get the current frame number
		
		cvCopy(bgr_frame,current_frame,NULL);											// Save the copy
		
		/**** START PROCESSING ****/
		ecrdiff_v2(current_frame, previous_frame, size, frame, fp, &index);
		/**** END PROCESSING ****/

		cvCopy(bgr_frame,previous_frame,NULL);	// Save the copy
		
		if(index==1) {
			check_frames[frame]=1;	// It means that the specific frame is marked
		}
		
		printf("Processing frame %d...\r",frame);
		fflush(stdout);
	}
	
	cvReleaseImage(&bgr_frame);			// Release bgr_frame
	cvReleaseImage(&previous_frame);	// Release previous_frame
	cvReleaseImage(&current_frame);		// Release current_frame
	cvReleaseCapture(&capture);			// Release capture
	
	stop = clock();			// Stop timer
	diff = stop - start;	// Get difference between start time and current time;
	printf("\n\nTotal time processing frames : %f minutes\t%f seconds\n", (((float)diff)/CLOCKS_PER_SEC)/60, ((float)diff)/CLOCKS_PER_SEC);
	printf("Processing completed!\n");
	
	fprintf(fp,"\n\n\n\nMarked frames\n\n");
	printf("\n\n\n\nMarked frames\n\n");

	for(i=0;i<total_frames;i++)	{
		if(check_frames[i]==1) {
			list_of_frames[i]=i;
			fprintf(fp,"frame %d\n",i);	// Write to file only marked frames
			printf("frame %d\n",i);	// Write to file only marked frames
			marked_frames++;
		}
	}

	fprintf(fp,"\n\nTotal marked frames\t:\t%d\n",marked_frames);
	printf("\n\nTotal marked frames\t:\t%d\n\n",marked_frames);

	//If there is no markeed frames, exit
	if(marked_frames == 0) {
		return EXIT_SUCCESS;
	}
	
	
	
	/**** STAGE 2: WRITE VIDEO ****/
	
	capture = cvCreateFileCapture(argv[1]);	// Re-Open video file to start capture
	if(!capture) {
		printf("Error opening video file! (cvCreateFileCapture)\n");
		return EXIT_FAILURE;
	}
	
	CvVideoWriter *writer = cvCreateVideoWriter(argv[2],CV_FOURCC('F','M','P','4'),fps,size,1);
	
	printf("Start writing frames...\n\n");
	
	start = clock(); // Start timer

	bgr_frame = cvQueryFrame(capture);	// Retrieve frame
	new_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the new frame
	
	do {
		frame = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);	// Get the current frame number
		msec = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_MSEC);
		msec=msec/1000;
		
		// If the index number of the current frame is equal to the frame we want, then write it to the stream.
		if(frame == list_of_frames[frame]) {
			cvCopy(bgr_frame,new_frame,NULL);	// Save the copy
			
			sprintf(text1,"%d frame",frame); // int to char via sprintf()
			cvPutText(new_frame,text1,pt1,&font1,black); // frame number

			sprintf(text2,"%d sec",msec); // int to char via sprintf()
			cvPutText(new_frame,text2,pt2,&font1,black); // frame msec position
			
			cvWriteFrame(writer, new_frame);	// Write frame to video stream
		} else {
			cvWriteFrame(writer, new_frame);	// Write frame to video stream
		}
		
		printf("Writing frame %d...\r",frame);
		fflush(stdout); // For '/r' to work we have to flush the output stream
	} while((bgr_frame=cvQueryFrame(capture)) != NULL);
		
	stop = clock(); 		// Stop timer
	diff = stop - start;	// Get difference between start time and current time;
	printf("\n\nTotal time writing frames : %f minutes\t%f seconds\n", (((float)diff)/CLOCKS_PER_SEC)/60, ((float)diff)/CLOCKS_PER_SEC);
	printf("Writing completed!\n\n");

	fclose(fp);					// Close file pointer
	free(list_of_frames);		// Free list_of_frames
	free(check_frames);			// Free check_frames
	free(ecr);					// Free ecr
	cvReleaseImage(&bgr_frame);	// Release bgr_frame
	cvReleaseImage(&new_frame);	// Release new_frame
	cvReleaseCapture(&capture);	// Release capture

	return EXIT_SUCCESS;
}
Пример #15
0
void haarwrapper_drawtext(IplImage *frame, struct bbox_int* pos, CvScalar colour, char* text)
{
  CvFont font1;
  cvInitFont( &font1, CV_FONT_HERSHEY_SIMPLEX, 0.4f, 0.4f, 0.0f, 1, 8 );
  cvPutText( frame, text , cvPoint( pos->x, pos->y ), &font1, colour );
}
Пример #16
0
void moFiducialTrackerModule::applyFilter() {
	IplImage* src = static_cast<IplImage*>(this->input->getData());
	fiducials_data_t *fids = static_cast<fiducials_data_t*>(this->internal);
	moDataGenericContainer *fiducial;
	FiducialX *fdx;
	int fid_count, valid_fiducials = 0;
	bool do_image = this->output->getObserverCount() > 0 ? true : false;
	CvSize size = cvGetSize(src);

	CvFont font, font2;
	cvInitFont(&font, CV_FONT_HERSHEY_DUPLEX, 1.0, 1.0, 0, 2);
	cvInitFont(&font2, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0, 0, 1);

	assert( src != NULL );
	assert( fids != NULL );
	assert( src->imageData != NULL );

	if ( src->nChannels != 1 ) {
		this->setError("FiducialTracker input image must be a single channel binary image.");
		this->stop();
		return;
	}


	// prepare image if we have listener on output
	if ( do_image )
		cvSet(this->output_buffer, CV_RGB(0, 0, 0));

	// libfidtrack
	step_segmenter(&fids->segmenter, (const unsigned char*)src->imageData);
	fid_count = find_fiducialsX(fids->fiducials, MAX_FIDUCIALS,
			&fids->fidtrackerx, &fids->segmenter, src->width, src->height);

	// prepare to refill fiducials
	this->clearFiducials();

	for ( int i = 0; i < fid_count; i++ ) {
		fdx = &fids->fiducials[i];

		// invalid id (INVALID_FIDUCIAL_ID)
		if ( fdx->id < 0 )
			continue;

		// got a valid fiducial ! process...
		valid_fiducials++;

		LOGM(MO_DEBUG) << "fid:" << i << " id=" << fdx->id << " pos=" \
			<< fdx->x << "," << fdx->y << " angle=" << fdx->angle;

		fiducial = new moDataGenericContainer();
		fiducial->properties["type"] = new moProperty("fiducial");
		fiducial->properties["id"] = new moProperty(fdx->id);
		fiducial->properties["x"] = new moProperty(fdx->x / size.width);
		fiducial->properties["y"] = new moProperty(fdx->y / size.height);
		fiducial->properties["angle"] = new moProperty(fdx->angle);
		fiducial->properties["leaf_size"] = new moProperty(fdx->leaf_size);
		fiducial->properties["root_size"] = new moProperty(fdx->root_size);
		this->fiducials.push_back(fiducial);

		// draw on output image
		if ( do_image ) {
			std::ostringstream oss;
			oss << fdx->id;
			cvPutText(this->output_buffer, oss.str().c_str(),
				cvPoint(fdx->x, fdx->y - 20), &font, cvScalar(20, 255, 20));

			oss.str("");
			oss << "angle:" << int(fdx->angle * 180 / 3.14159265);
			cvPutText(this->output_buffer, oss.str().c_str(),
				cvPoint(fdx->x - 30, fdx->y), &font2, cvScalar(20, 255, 20));

			oss.str("");
			oss << "l/r:" << fdx->leaf_size << "/" << fdx->root_size;
			cvPutText(this->output_buffer, oss.str().c_str(),
				cvPoint(fdx->x - 50, fdx->y + 20), &font2, cvScalar(20, 255, 20));

		}
	}

	LOGM(MO_DEBUG) << "-> Found " << valid_fiducials << " fiducials";
	this->output_data->push(&this->fiducials);
}
Пример #17
0
  void process_image()
  {

    //    std::cout << "Checking publish count: " << image_in->publish_count << std::endl;

    //    image_in->lock_atom();

    if (image_in->publish_count > 0) {

      cvSetData(cvimage_in, codec_in->get_raster(), 3*704);
      cvConvertImage(cvimage_in, cvimage_bgr, CV_CVTIMG_SWAP_RB);

      //      image_in->unlock_atom();

      CvSize board_sz = cvSize(12, 12);
      CvPoint2D32f* corners = new CvPoint2D32f[12*12];
      int corner_count = 0;
    
      //This function has a memory leak in the current version of opencv!
      int found = cvFindChessboardCorners(cvimage_bgr, board_sz, corners, &corner_count, 
      					  CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);



      IplImage* gray = cvCreateImage(cvSize(cvimage_bgr->width, cvimage_bgr->height), IPL_DEPTH_8U, 1);
      cvCvtColor(cvimage_bgr, gray, CV_BGR2GRAY);
      cvFindCornerSubPix(gray, corners, corner_count, 
      			 cvSize(5, 5), cvSize(-1, -1),
      			 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.01f ));
      cvReleaseImage(&gray);


      if (take_pic && corner_count == 144) {
	std::stringstream ss;
	img_cnt++;
	ss << dir_name << "/Image" << img_cnt << ".jpg";
	//	std::ofstream imgfile(ss.str().c_str());
	//	imgfile.write((char*)image_in->jpeg_buffer, image_in->compressed_size);
	//	imgfile.close();

	cvSaveImage(ss.str().c_str(), cvimage_bgr);
	
	ss.str("");
	ss << dir_name << "/Position" << img_cnt << ".txt";

	std::ofstream posfile(ss.str().c_str());
	observe->lock_atom();
	posfile << "P: " << observe->pan_val << std::endl
		<< "T: " << observe->tilt_val << std::endl
		<< "Z: " << observe->lens_zoom_val << std::endl
		<< "F: " << observe->lens_focus_val;
	observe->unlock_atom();

	posfile.close();

	take_pic = false;
      }

      float maxdiff = 0;

      for(int c=0; c<12*12; c++) {
	float diff = sqrt( pow(corners[c].x - last_corners[c].x, 2.0) + 
		     pow(corners[c].y - last_corners[c].y, 2.0));
	last_corners[c].x = corners[c].x;
	last_corners[c].y = corners[c].y;

	if (diff > maxdiff) {
	  maxdiff = diff;
	}
      }

      printf("Max diff: %g\n", maxdiff);


      cvDrawChessboardCorners(cvimage_bgr, board_sz, corners, corner_count, found);

      if (undistort) {
	cvUndistort2(cvimage_bgr, cvimage_undistort, intrinsic_matrix, distortion_coeffs);
      } else {
	cvCopy(cvimage_bgr, cvimage_undistort);
      }

      CvFont font;
      cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2);    
      std::stringstream ss;

      observe->lock_atom();
      ss << "P: " << observe->pan_val;
      ss << " T: " << observe->tilt_val;
      ss << " Z: " << observe->lens_zoom_val;
      ss << " F: " << observe->lens_focus_val;
      observe->unlock_atom();
      cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,30), &font, CV_RGB(255,0,0));

      ss.str("");

      ss << "Found " << corner_count << " corners";
      if (centering) {
	ss << " -- Autocentering";
      }
      cvPutText(cvimage_undistort, ss.str().c_str(), cvPoint(15,60), &font, CV_RGB(255,0,0));

      image_out->width = 704;
      image_out->height = 480;
      image_out->compression = "raw";
      image_out->colorspace = "rgb24";

      //      codec_out->realloc_raster_if_needed();
      cvSetData(cvimage_out, codec_out->get_raster(), 3*image_out->width);      
      cvConvertImage(cvimage_undistort, cvimage_out, CV_CVTIMG_SWAP_RB);

      codec_out->set_flow_data();

      image_out->publish();


      CvPoint2D32f COM = cvPoint2D32f(0,0);
    
      if (centering && corner_count > 20) {
	//average corners:
	for (int i = 0; i < corner_count; i++) {
	  COM.x += corners[i].x / corner_count;
	  COM.y += corners[i].y / corner_count;
	}
      
	if ( (fabs(COM.x - 354.0) > 10) || (fabs(COM.y - 240.0) > 10) ) {
	  float rel_pan,rel_tilt;

	  rel_pan = (COM.x - 354.0) * .001;
	  rel_tilt = -(COM.y - 240.0) * .001;

	  control->pan_val = rel_pan;      
	  control->pan_rel = true;
	  control->pan_valid = true;

	  control->tilt_val = rel_tilt;
	  control->tilt_rel = true;
	  control->tilt_valid = true;

	  control->publish();
	}

      }

      delete[] corners;
      
    } else {
      //      image_in->unlock_atom();
    }
  }
Пример #18
0
static void process_image_common(IplImage *frame)
{
  CvFont font;
  cvInitFont(&font, CV_FONT_VECTOR0, 0.25f, 0.25f);

  CvSize video_size;
#if defined(USE_POSIX_SHARED_MEMORY)
  video_size.height = *shrd_ptr_height;
  video_size.width  = *shrd_ptr_width;
#else
  // XXX These parameters should be set ROS parameters
  video_size.height = frame->height;
  video_size.width  = frame->width;
#endif
  CvSize    frame_size = cvSize(video_size.width, video_size.height/2);
  IplImage *temp_frame = cvCreateImage(frame_size, IPL_DEPTH_8U, 3);
  IplImage *gray       = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *edges      = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *half_frame = cvCreateImage(cvSize(video_size.width/2, video_size.height/2), IPL_DEPTH_8U, 3);

  CvMemStorage *houghStorage = cvCreateMemStorage(0);

  cvPyrDown(frame, half_frame, CV_GAUSSIAN_5x5); // Reduce the image by 2

  /* we're intersted only in road below horizont - so crop top image portion off */
  crop(frame, temp_frame, cvRect(0, frame_size.height, frame_size.width, frame_size.height));
  cvCvtColor(temp_frame, gray, CV_BGR2GRAY); // contert to grayscale

  /* Perform a Gaussian blur & detect edges */
  // smoothing image more strong than original program
  cvSmooth(gray, gray, CV_GAUSSIAN, 15, 15);
  cvCanny(gray, edges, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);

  /* do Hough transform to find lanes */
  double rho = 1;
  double theta = CV_PI/180;
  CvSeq *lines = cvHoughLines2(edges, houghStorage, CV_HOUGH_PROBABILISTIC,
                               rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);

  processLanes(lines, edges, temp_frame, frame);

#ifdef SHOW_DETAIL
  /* show middle line */
  cvLine(temp_frame, cvPoint(frame_size.width/2, 0),
         cvPoint(frame_size.width/2, frame_size.height), CV_RGB(255, 255, 0), 1);

  // cvShowImage("Gray", gray);
  // cvShowImage("Edges", edges);
  // cvShowImage("Color", temp_frame);
  // cvShowImage("temp_frame", temp_frame);
  // cvShowImage("frame", frame);
#endif

#if defined(USE_POSIX_SHARED_MEMORY)
  setImage_toSHM(frame);
#endif

#ifdef SHOW_DETAIL
  // cvMoveWindow("Gray", 0, 0);
  // cvMoveWindow("Edges", 0, frame_size.height+25);
  // cvMoveWindow("Color", 0, 2*(frame_size.height+25));
#endif

  cvReleaseMemStorage(&houghStorage);
  cvReleaseImage(&gray);
  cvReleaseImage(&edges);
  cvReleaseImage(&temp_frame);
  cvReleaseImage(&half_frame);
}
Пример #19
0
int _tmain(int argc, _TCHAR* argv[])
{
	/*

	Начальная инициализация параметров. этот 
	код требуется запустить один раз. 

	TVAInitParams params;
	memcpy(&params.Camera, &g_camera, sizeof(TVACamera));
	params.NumZones = 0;
	params.EventSens = 0.5;
	params.EventTimeSens = 1000;
	SaveInitParams("params.xml", &params);
	*/
	// инициализация зон наблюдения. 
	for (int i = 0; i < C_MAX_OBJECTS; i++)
	{
		g_contours[i].IsRect = false;
		g_contours[i].NumPoints = C_MAX_POINTS;
		g_contours[i].Points = (TVAPoint*)malloc(C_MAX_POINTS*sizeof(TVAPoint));
	}
	
	cvInitFont(&g_font, CV_FONT_HERSHEY_PLAIN,1, 1);

	CvCapture* capture = NULL;
	if (argc < 2)
		capture = cvCaptureFromCAM(0);
	else
		capture = cvCaptureFromFile(argv[1]);
	

	if (capture == NULL)
	{
		printf("%s\n", "Cannot open camera.");
		return -1;
	}

    double fps = cvGetCaptureProperty ( // Получаем частоту кадров
        capture,
        CV_CAP_PROP_FPS
    );

    CvSize size = cvSize( // Получаем размер
       (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH),
       (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT)
    );
	g_mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvVideoWriter* writer = NULL;
	cvNamedWindow(_MODULE_);
	cvSetMouseCallback(_MODULE_, on_mouse);

	/*
		Цикл получения и обработки изображения. 
	*/
	for (;;) 
	{
		IplImage* frame = NULL;
		frame = cvQueryFrame(capture);
		if (!frame)
			break;
		/*
		 отрисовка прямоугольников
		*/
		for (int i = 0; i < g_rects_count; i++)
		{
			CvPoint p1 = cvPoint(g_rects[i].x, g_rects[i].y);
			CvPoint p2 = cvPoint(p1.x + g_rects[i].width, p1.y + g_rects[i].height);
			
			cvRectangle(frame, p1, p2, CV_RGB(255,0,0));
		}

		/*
			Отрисовка зон наблюдения. 
		*/
		for (int i = 0; i < g_contours_count; i++)
		{
			if (g_contours[i].NumPoints > 0)
			{
				for (int j = 1; j < g_contours[i].NumPoints; j++)
				{
					CvPoint p1 = cvPoint((int)g_contours[i].Points[j-1].X, (int)g_contours[i].Points[j-1].Y);
					CvPoint p2 = cvPoint((int)g_contours[i].Points[j].X, (int)g_contours[i].Points[j].Y);
					cvLine(frame, p1,p2, CV_RGB(255,0,0));
				}
				CvPoint p1 = cvPoint((int)g_contours[i].Points[g_contours[i].NumPoints-1].X, (int)g_contours[i].Points[g_contours[i].NumPoints-1].Y);
				CvPoint p2 = cvPoint((int)g_contours[i].Points[0].X, (int)g_contours[i].Points[0].Y);
				cvLine(frame, p1,p2, CV_RGB(255,0,0));			
			}
		}

		/*
			Отображение полученного изображения в окне. 
		*/
		ProcessFrame(frame);
		if (g_grid_visible)
			DrawGrid(frame);
		DrawStatus(frame);
		cvShowImage(_MODULE_, frame);
		/*
			Запись фрейма
		*/
		if (g_record_video)
			cvWriteFrame( writer, frame );

		/*
			Анализ клавиатуры
		*/
		bool state = g_set_rects || g_set_contours || g_set_zones;
		int c;
		c = cvWaitKey(10);
		if ((char)c == 27)
			break;

		if ((char)c == 's')
		{
			cvSaveImage("out.png", frame);
		}
		else if ((char)c == 'l')
		{
			if (!state)
				LoadRects(size.width, size.height);
		}
		else if ((char)c == 'g')
		{
			if (!state)
				LoadContours(size.width, size.height);
		}
		else if ((char)c == 'k')
		{
			if (!state)
				LoadZones(size.width, size.height);
		}
		else if ((char)c == 'r')
   		{
			if (g_record_video)
			{
			    // завершаем запись на диск
				cvReleaseVideoWriter( &writer );
				writer = NULL;
				g_record_video = false;
				printf("Stop recording.\n");
			}
			else
			{
				// открываем файл для записи и связываем с ним 
				// переменную writer
				writer = cvCreateVideoWriter("out.avi",CV_FOURCC('D','I','V','X'), fps, size );

				if (writer == NULL)
				{
					printf("%s\n", "Cannot create writer.");
				}
				else
				{
					g_record_video = true;
					printf("Start recording.\n");

				}
			}
		}
		else if ((char)c == 't')
		{
			if (g_set_rects)
			{
				SaveRects(size.width, size.height);
				if (!g_set_zones)
				{
					g_rects_count = 0;
					ClearMask(g_mask);
				}
				g_set_rects = false;
			}
			else if (!g_set_contours)
			{
				g_set_rects = true;
			}
		}
		else if ((char)c == 'c')
		{
			
			if (g_set_contours)
			{
				SaveContours(size.width, size.height);		
				if (!g_set_zones)
				{
					g_contours_count = 0;
					ClearMask(g_mask);
				}
				g_set_contours = false;
				g_open_contour = false;
			}
			else if (!g_set_rects)
			{
				g_set_contours = true;
			}
		}
		else if ((char)c == 'z')
		{
			if (g_set_zones)
			{
				SaveZones(size.width, size.height);
				g_set_zones = false;
				g_contours_count = 0;
				g_rects_count = 0;
				ClearMask(g_mask);
			}
			else if (!g_set_rects && !g_set_contours)
			{
				g_set_zones = true;
			}
		}
		else if ((char)c == 'w')
		{
			g_grid_visible = !g_grid_visible;
		}
	}

	cvReleaseVideoWriter( &writer );
	cvDestroyWindow(_MODULE_);
	cvReleaseCapture(&capture);
	cvReleaseImage(&g_mask);
	// освобождение памяти 
	for (int i = 0; i < C_MAX_OBJECTS; i++)
	{
		free(g_contours[i].Points);
	}

	return 0;
}