Example #1
0
CV_IMPL CvSeq*
cvConvexHull2( const CvArr* array, void* hull_storage,
               int orientation, int return_points )
{
    union { CvContour* c; CvSeq* s; } hull;
    hull.s = 0;

    CvMat* mat = 0;
    CvContour contour_header;
    CvSeq hull_header;
    CvSeqBlock block, hullblock;
    CvSeq* ptseq = 0;
    CvSeq* hullseq = 0;

    if( CV_IS_SEQ( array ))
    {
        ptseq = (CvSeq*)array;
        if( !CV_IS_SEQ_POINT_SET( ptseq ))
            CV_Error( CV_StsBadArg, "Unsupported sequence type" );
        if( hull_storage == 0 )
            hull_storage = ptseq->storage;
    }
    else
    {
        ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block );
    }

    if( CV_IS_STORAGE( hull_storage ))
    {
        if( return_points )
        {
            hullseq = cvCreateSeq(CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE(ptseq)|
                                  CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX,
                                  sizeof(CvContour), sizeof(CvPoint),(CvMemStorage*)hull_storage );
        }
        else
        {
            hullseq = cvCreateSeq(
                                  CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE_PPOINT|
                                  CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX,
                                  sizeof(CvContour), sizeof(CvPoint*), (CvMemStorage*)hull_storage );
        }
    }
    else
    {
        if( !CV_IS_MAT( hull_storage ))
            CV_Error(CV_StsBadArg, "Destination must be valid memory storage or matrix");

        mat = (CvMat*)hull_storage;

        if( (mat->cols != 1 && mat->rows != 1) || !CV_IS_MAT_CONT(mat->type))
            CV_Error( CV_StsBadArg,
                     "The hull matrix should be continuous and have a single row or a single column" );

        if( mat->cols + mat->rows - 1 < ptseq->total )
            CV_Error( CV_StsBadSize, "The hull matrix size might be not enough to fit the hull" );

        if( CV_MAT_TYPE(mat->type) != CV_SEQ_ELTYPE(ptseq) &&
           CV_MAT_TYPE(mat->type) != CV_32SC1 )
            CV_Error( CV_StsUnsupportedFormat,
                     "The hull matrix must have the same type as input or 32sC1 (integers)" );

        hullseq = cvMakeSeqHeaderForArray(
                                          CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED,
                                          sizeof(hull_header), CV_ELEM_SIZE(mat->type), mat->data.ptr,
                                          mat->cols + mat->rows - 1, &hull_header, &hullblock );
        cvClearSeq( hullseq );
    }

    int hulltype = CV_SEQ_ELTYPE(hullseq);
    int total = ptseq->total;
    if( total == 0 )
    {
        if( mat )
            CV_Error( CV_StsBadSize,
                     "Point sequence can not be empty if the output is matrix" );
        return hull.s;
    }

    cv::AutoBuffer<double> _ptbuf;
    cv::Mat h0;
    cv::convexHull(cv::cvarrToMat(ptseq, false, false, 0, &_ptbuf), h0,
                   orientation == CV_CLOCKWISE, CV_MAT_CN(hulltype) == 2);


    if( hulltype == CV_SEQ_ELTYPE_PPOINT )
    {
        const int* idx = h0.ptr<int>();
        int ctotal = (int)h0.total();
        for( int i = 0; i < ctotal; i++ )
        {
            void* ptr = cvGetSeqElem(ptseq, idx[i]);
            cvSeqPush( hullseq, &ptr );
        }
    }
    else
        cvSeqPushMulti(hullseq, h0.ptr(), (int)h0.total());

    if( mat )
    {
        if( mat->rows > mat->cols )
            mat->rows = hullseq->total;
        else
            mat->cols = hullseq->total;
    }
    else
    {
        hull.s = hullseq;
        hull.c->rect = cvBoundingRect( ptseq,
                                       ptseq->header_size < (int)sizeof(CvContour) ||
                                       &ptseq->flags == &contour_header.flags );
    }

    return hull.s;
}
Example #2
0
/* ///////////////////// chess_corner_test ///////////////////////// */
void CV_ChessboardDetectorTest::run( int start_from )
{
    int code = CvTS::OK;

#ifndef WRITE_POINTS    
    const double rough_success_error_level = 2.5;
    const double precise_success_error_level = 0.2;
    double err = 0, max_rough_error = 0, max_precise_error = 0;
#endif

    /* test parameters */
    char   filepath[1000];
    char   filename[1000];

    CvMat*  _u = 0;
    CvMat*  _v = 0;
    CvPoint2D32f* u;
    CvPoint2D32f* v;

    IplImage* img = 0;
    IplImage* gray = 0;
    IplImage* thresh = 0;

    int  idx, max_idx;
    int  progress = 0;

    sprintf( filepath, "%scameracalibration/", ts->get_data_path() );
    sprintf( filename, "%schessboard_list.dat", filepath );
    CvFileStorage* fs = cvOpenFileStorage( filename, 0, CV_STORAGE_READ );
    CvFileNode* board_list = fs ? cvGetFileNodeByName( fs, 0, "boards" ) : 0;

    if( !fs || !board_list || !CV_NODE_IS_SEQ(board_list->tag) ||
        board_list->data.seq->total % 2 != 0 )
    {
        ts->printf( CvTS::LOG, "chessboard_list.dat can not be readed or is not valid" );
        code = CvTS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    max_idx = board_list->data.seq->total/2;

    for( idx = start_from; idx < max_idx; idx++ )
    {
        int etalon_count = -1;
        int count = 0;
        CvSize etalon_size = { -1, -1 };
        int j, result;
        
        ts->update_context( this, idx-1, true );

        /* read the image */
        sprintf( filename, "%s%s", filepath,
            cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*2),"dummy.txt"));
    
        img = cvLoadImage( filename );
        
        if( !img )
        {
            ts->printf( CvTS::LOG, "one of chessboard images can't be read: %s", filename );
            if( max_idx == 1 )
            {
                code = CvTS::FAIL_MISSING_TEST_DATA;
                goto _exit_;
            }
            continue;
        }

        gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        cvCvtColor( img, gray, CV_BGR2GRAY );
 
        sprintf( filename, "%s%s", filepath,
            cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*2+1),"dummy.txt"));

        _u = (CvMat*)cvLoad( filename );

        if( _u == 0 )
        {
            if( idx == 0 )
                ts->printf( CvTS::LOG, "one of chessboard corner files can't be read: %s", filename ); 
            if( max_idx == 1 )
            {
                code = CvTS::FAIL_MISSING_TEST_DATA;
                goto _exit_;
            }
            continue;
        }

        etalon_size.width = _u->cols;
        etalon_size.height = _u->rows;
        etalon_count = etalon_size.width*etalon_size.height;

        /* allocate additional buffers */
        _v = cvCloneMat( _u );
        count = etalon_count;

        u = (CvPoint2D32f*)_u->data.fl;
        v = (CvPoint2D32f*)_v->data.fl;

        OPENCV_CALL( result = cvFindChessBoardCornerGuesses(
                     gray, thresh, 0, etalon_size, v, &count ));

        //show_points( gray, 0, etalon_count, v, count, etalon_size, result );
        if( !result || count != etalon_count )
        {
            ts->printf( CvTS::LOG, "chess board is not found" );
            code = CvTS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }

#ifndef WRITE_POINTS
        err = 0;
        for( j = 0; j < etalon_count; j++ )
        {
            double dx = fabs( v[j].x - u[j].x );
            double dy = fabs( v[j].y - u[j].y );

            dx = MAX( dx, dy );
            if( dx > err )
            {
                err = dx;
                if( err > rough_success_error_level )
                {
                    ts->printf( CvTS::LOG, "bad accuracy of corner guesses" );
                    code = CvTS::FAIL_BAD_ACCURACY;
                    goto _exit_;
                }
            }
        }
        max_rough_error = MAX( max_rough_error, err );
#endif
        OPENCV_CALL( cvFindCornerSubPix( gray, v, count, cvSize( 5, 5 ), cvSize(-1,-1),
                            cvTermCriteria(CV_TERMCRIT_EPS|CV_TERMCRIT_ITER,30,0.1)));
        //show_points( gray, u + 1, etalon_count, v, count );

#ifndef WRITE_POINTS
        err = 0;
        for( j = 0; j < etalon_count; j++ )
        {
            double dx = fabs( v[j].x - u[j].x );
            double dy = fabs( v[j].y - u[j].y );

            dx = MAX( dx, dy );
            if( dx > err )
            {
                err = dx;
                if( err > precise_success_error_level )
                {
                    ts->printf( CvTS::LOG, "bad accuracy of adjusted corners" ); 
                    code = CvTS::FAIL_BAD_ACCURACY;
                    goto _exit_;
                }
            }
        }
        max_precise_error = MAX( max_precise_error, err );
#else
        cvSave( filename, _v );
#endif
        cvReleaseMat( &_u );
        cvReleaseMat( &_v );
        cvReleaseImage( &img );
        cvReleaseImage( &gray );
        cvReleaseImage( &thresh );
        progress = update_progress( progress, idx-1, max_idx, 0 );
    }

_exit_:

    /* release occupied memory */
    cvReleaseMat( &_u );
    cvReleaseMat( &_v );
    cvReleaseFileStorage( &fs );
    cvReleaseImage( &img );
    cvReleaseImage( &gray );
    cvReleaseImage( &thresh );

    if( code < 0 )
        ts->set_failed_test_info( code );
}
/* ///////////////////// chess_corner_test ///////////////////////// */
void CV_ChessboardDetectorTimingTest::run( int start_from )
{
    int code = cvtest::TS::OK;

    /* test parameters */
    char   filepath[1000];
    char   filename[1000];

    CvMat*  _v = 0;
    CvPoint2D32f* v;

    IplImage* img = 0;
    IplImage* gray = 0;
    IplImage* thresh = 0;

    int  idx, max_idx;
    int  progress = 0;

    sprintf( filepath, "%scameracalibration/", ts->get_data_path().c_str() );
    sprintf( filename, "%schessboard_timing_list.dat", filepath );
    printf("Reading file %s\n", filename);
    CvFileStorage* fs = cvOpenFileStorage( filename, 0, CV_STORAGE_READ );
    CvFileNode* board_list = fs ? cvGetFileNodeByName( fs, 0, "boards" ) : 0;

    if( !fs || !board_list || !CV_NODE_IS_SEQ(board_list->tag) ||
            board_list->data.seq->total % 4 != 0 )
    {
        ts->printf( cvtest::TS::LOG, "chessboard_timing_list.dat can not be readed or is not valid" );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    max_idx = board_list->data.seq->total/4;

    for( idx = start_from; idx < max_idx; idx++ )
    {
        int count0 = -1;
        int count = 0;
        CvSize pattern_size;
        int result, result1 = 0;

        const char* imgname = cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4), "dummy.txt");
        int is_chessboard = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4+1), 0);
        pattern_size.width = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 2), -1);
        pattern_size.height = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 3), -1);

        ts->update_context( this, idx-1, true );

        /* read the image */
        sprintf( filename, "%s%s", filepath, imgname );

        img = cvLoadImage( filename );

        if( !img )
        {
            ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename );
            if( max_idx == 1 )
            {
                code = cvtest::TS::FAIL_MISSING_TEST_DATA;
                goto _exit_;
            }
            continue;
        }

        ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard);

        gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        cvCvtColor( img, gray, CV_BGR2GRAY );


        count0 = pattern_size.width*pattern_size.height;

        /* allocate additional buffers */
        _v = cvCreateMat(1, count0, CV_32FC2);
        count = count0;

        v = (CvPoint2D32f*)_v->data.fl;

        int64 _time0 = cvGetTickCount();
        result = cvCheckChessboard(gray, pattern_size);
        int64 _time01 = cvGetTickCount();

        OPENCV_CALL( result1 = cvFindChessboardCorners(
                                   gray, pattern_size, v, &count, 15 ));
        int64 _time1 = cvGetTickCount();

        if( result != is_chessboard )
        {
            ts->printf( cvtest::TS::LOG, "Error: chessboard was %sdetected in the image %s\n",
                        result ? "" : "not ", imgname );
            code = cvtest::TS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }
        if(result != result1)
        {
            ts->printf( cvtest::TS::LOG, "Warning: results differ cvCheckChessboard %d, cvFindChessboardCorners %d\n",
                        result, result1);
        }

        int num_pixels = gray->width*gray->height;
        float check_chessboard_time = float(_time01 - _time0)/(float)cvGetTickFrequency(); // in us
        ts->printf(cvtest::TS::LOG, "    cvCheckChessboard time s: %f, us per pixel: %f\n",
                   check_chessboard_time*1e-6, check_chessboard_time/num_pixels);

        float find_chessboard_time = float(_time1 - _time01)/(float)cvGetTickFrequency();
        ts->printf(cvtest::TS::LOG, "    cvFindChessboard time s: %f, us per pixel: %f\n",
                   find_chessboard_time*1e-6, find_chessboard_time/num_pixels);

        cvReleaseMat( &_v );
        cvReleaseImage( &img );
        cvReleaseImage( &gray );
        cvReleaseImage( &thresh );
        progress = update_progress( progress, idx-1, max_idx, 0 );
    }

_exit_:

    /* release occupied memory */
    cvReleaseMat( &_v );
    cvReleaseFileStorage( &fs );
    cvReleaseImage( &img );
    cvReleaseImage( &gray );
    cvReleaseImage( &thresh );

    if( code < 0 )
        ts->set_failed_test_info( code );
}
Example #4
0
// ######################################################################
Image<PixRGB<byte> > FaceDetector::findFaces(Image<PixRGB<byte> > ima)
{
  Image<PixRGB<byte> > res;
  IplImage *img = img2ipl(ima);


  int scale = 1;

  // Create a string that contains the cascade name
  const char* cascade_name =
    "haarcascade_frontalface_alt.xml";

  // Load a HaarClassifierCascde
  CvHaarClassifierCascade* cascade =
    (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );

  // Check whether the cascade has loaded successfully.
  // Else report and error and quit
  if( !cascade )
    LFATAL("ERROR: Could not load classifier cascade");

  // Create a new image based on the input image
  IplImage* temp =
    cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );

  // Create two points to represent the face locations
  CvPoint pt1, pt2;
  int i;

  // Clear the memory storage which was used before
  CvMemStorage* storage = 0;
  storage = cvCreateMemStorage(0);
  cvClearMemStorage( storage );

  std::vector<Rectangle> tempResults;

  // Find whether the cascade is loaded, to find the faces. If yes, then:
  if( cascade )
    {

      // There can be more than one face in an image.
      // So create a growable sequence of faces.
      // Detect the objects and store them in the sequence
      CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
                                          1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
                                          cvSize(40, 40) );

      // Loop the number of faces found.
      for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
          // Create a new rectangle for drawing the face
          CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

          // Find the dimensions of the face,and scale it if necessary
          pt1.x = r->x*scale;
          pt2.x = (r->x+r->width)*scale;
          pt1.y = r->y*scale;
          pt2.y = (r->y+r->height)*scale;

          // Draw the rectangle in the input image
          cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );

          // store rectangle to publish later
          tempResults.push_back
            (Rectangle::tlbrO(pt1.y,pt1.x,pt2.y,pt2.x));
        }
    }

  // Release the temp image created.
  cvReleaseImage( &temp );

  // storing data
  its_Curr_Res_mutex.lock();
  itsCurrentFacesFound.clear();
  itsCurrentFacesFound = tempResults;
  // for (uint j = 0; j < tempResults.size(); j++)
  //   {
  //     //      itsCurrentFacesFound
  //   }
  its_Curr_Res_mutex.unlock();

  res = ipl2rgb(img);
  return res;
}
Example #5
0
/****************************************************************************
 * Filter: Check for faces and raises an event when one is found.
 ****************************************************************************
 * p_pic: A picture_t with its p_data_orig member set to an array of
 * IplImages (one image for each picture_t plane).
 ****************************************************************************/
static picture_t *Filter( filter_t *p_filter, picture_t *p_pic )
{
    IplImage** p_img = NULL;
    int i_planes = 0;
    CvPoint pt1, pt2;
    int i, scale = 1;
 
    if ((!p_pic) )
    {
        msg_Err( p_filter, "no image array" );
        return NULL;
    }
    if (!(p_pic->p_data_orig))
    {
        msg_Err( p_filter, "no image array" );
        return NULL;
    }
    //(hack) cast the picture_t to array of IplImage*
    p_img = (IplImage**) p_pic->p_data_orig;
    i_planes = p_pic->i_planes;

    //check the image array for validity
    if ((!p_img[0]))    //1st plane is 'I' i.e. greyscale
    {
        msg_Err( p_filter, "no image" );
        return NULL;
    }
    if ((p_pic->format.i_chroma != VLC_CODEC_I420))
    {
        msg_Err( p_filter, "wrong chroma - use I420" );
        return NULL;
    }
    if (i_planes<1)
    {
        msg_Err( p_filter, "no image planes" );
        return NULL;
    }

    //perform face detection
    cvClearMemStorage(p_filter->p_sys->p_storage);
    CvSeq* faces = NULL;
    if( p_filter->p_sys->p_cascade )
    {
        //we should make some of these params config variables
        faces = cvHaarDetectObjects( p_img[0], p_filter->p_sys->p_cascade,
            p_filter->p_sys->p_storage, 1.15, 5, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(20, 20) );
        //create the video_filter_region_info_t struct
        CvRect* r;
        if (faces && (faces->total > 0))
        {
            //msg_Dbg( p_filter, "Found %d face(s)", faces->total );
            free( p_filter->p_sys->event_info.p_region );
            p_filter->p_sys->event_info.p_region = NULL;
            if( NULL == ( p_filter->p_sys->event_info.p_region =
                  (video_filter_region_info_t *)malloc(faces->total*sizeof(video_filter_region_info_t))))
            {
                return NULL;
            }
            memset(p_filter->p_sys->event_info.p_region, 0, faces->total*sizeof(video_filter_region_info_t));
            p_filter->p_sys->event_info.i_region_size = faces->total;
        }

        //populate the video_filter_region_info_t struct
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
            r = (CvRect*)cvGetSeqElem( faces, i );
            pt1.x = r->x*scale;
            pt2.x = (r->x+r->width)*scale;
            pt1.y = r->y*scale;
            pt2.y = (r->y+r->height)*scale;
            cvRectangle( p_img[0], pt1, pt2, CV_RGB(0,0,0), 3, 8, 0 );

            *(CvRect*)(&(p_filter->p_sys->event_info.p_region[i])) = *r;
            p_filter->p_sys->event_info.p_region[i].i_id = p_filter->p_sys->i_id++;
            p_filter->p_sys->event_info.p_region[i].p_description = "Face Detected";
        }

        if (faces && (faces->total > 0))    //raise the video filter event
            var_TriggerCallback( p_filter->p_libvlc, VIDEO_FILTER_EVENT_VARIABLE );
    }
    else
        msg_Err( p_filter, "No cascade - is opencv-haarcascade-file valid?" );

    return p_pic;
}
void Determine::JudgementMouse(CvSeq* circles){
	switch (circles->total){
			case 0:
				OneFinger++;
				//for mouse drag judgement,clear variables after being used
				if(OneFinger==3){
					MouseMoveFlag=0;
					if(LeftButton){
					mouse_event(MOUSEEVENTF_LEFTUP,0,0,0,0);
					LeftButton=false;
					}
					OneFinger=0;
				}
				//left click or double clicks action in interval based on one finger relative action
				if(Temp==1 || Temp==2){
					mouse.LeftClick();
					MouseMoveFlag=1800;//for mouse drag judgement 
				}
	            Initial();
				break;
			case 1:
				if(CaseFlag!=1)
					Initial();
				CaseFlag=1;
				for(int i=0;i<circles->total;i++){
					float* p=(float*)cvGetSeqElem(circles,0);
					X1[0]=cvRound(p[0]);
					X1[1]=cvRound(p[1]);
				}
				Temp++;
				if(Temp==1){
					CooX=X1[0];
					CooY=X1[1];
				}
				MouseMoveFlag++;
				if(MouseMoveFlag==1801){
					mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0);//press left button and not release
					LeftButton=true;
					GetCursorPos(&pt);//get current mouse pointer location
			     	SetCursorPos(pt.x+X1[0]-CooX,pt.y+CooY-X1[1]);//set mouse pointer to a new coordinate
				    CooX=X1[0];
			    	CooY=X1[1];//replace previous location with current one
					MouseMoveFlag=1800;
			    	break;
				}
				else{
					GetCursorPos(&pt);
				    SetCursorPos(pt.x+X1[0]-CooX,pt.y+CooY-X1[1]);
			    	CooX=X1[0];
		    		CooY=X1[1];
					MouseMoveFlag=0;
			    	break;
				}
			case 2:
				if(CaseFlag!=2)
					Initial();
				CaseFlag=2;
				for(int i=0;i<circles->total;i++){
					float* p=(float*)cvGetSeqElem(circles,i);
					X2[i][0]=cvRound(p[0]);
					X2[i][1]=cvRound(p[1]);
				}
				Temp++;
				//initial,calculate the distance between two points and the angle relative to X axis
				if(Temp==1){
					InitialDistance=(double)(pow((X2[0][0]-X2[1][0]),2)+pow((X2[0][1]-X2[1][1]),2));
					InitialAngle=(X2[0][1]+X2[1][1])/(X2[0][0]+X2[1][0]);
					InitialAngle=atan(InitialAngle);//angle
					InitialDistance=sqrt(InitialDistance);//distance
				}
				else{
					double ProcessedDistance=(double)(pow((X2[0][0]-X2[1][0]),2)+pow((X2[0][1]-X2[1][1]),2));
					//calculate the distance of two points in following frame
					ProcessedDistance=sqrt(ProcessedDistance);
					ProcessedAngle=(X2[0][1]+X2[1][1])/(X2[0][0]+X2[1][0]);//distance
					ProcessedAngle=atan(ProcessedAngle);//angle
					//calculate two angles in vector
					if((InitialAngle-ProcessedAngle)>(3.1415926/20)){
						gesture.RotateClockWise();
						InitialAngle=ProcessedAngle;
						break;
					}
					//calculate two angles in vector,picture clockwise
					if((ProcessedAngle-InitialAngle)>(3.1415926/20)){
						gesture.RotateAntiClockWise();
						InitialAngle=ProcessedAngle;
						break;
					}
					//picture zoom out
					//calclate the distance, only when difference value greater than a certain number will following codes run
					if((ProcessedDistance+80)<InitialDistance){
						gesture.ZoomIn();
			    		InitialDistance=ProcessedDistance;
				    	break;
					}
					//picture zoom in
					if((ProcessedDistance-80)>InitialDistance){
						gesture.ZoomOut();
					    InitialDistance=ProcessedDistance;
					    break;
					}
					//double click
					if(abs(ProcessedDistance-InitialDistance)<10 && Temp==6){
						mouse.RightClick();
						break;
					}
				}
				break;
			case 3:
				if(CaseFlag!=3)
					Initial();//clear all used variable when switch among different fingers action
				CaseFlag=3;
				ProcessedDistance=0;
				for(int i=0;i<circles->total;i++){
					float* p = (float*)cvGetSeqElem(circles,i);//get each point's coordinate
					ProcessedDistance=ProcessedDistance+cvRound(p[0]);//sum three fingers' X asix value
				}
				Temp++;
				if (Temp==1){
					InitialDistance=ProcessedDistance;//initial
				}
				else{
					Judge=WindowsJudge();//distinguish what kind of this window is
					if(Judge==2){//if browser
						if((ProcessedDistance-180)>InitialDistance){//tag forward
							gesture.TagForward();
						    InitialDistance=ProcessedDistance;//replace initial sum with current sum of x axis
						    break;
						}
	                 	if((ProcessedDistance+180)<InitialDistance){//tag back
					    	gesture.TagBack();
							InitialDistance=ProcessedDistance;
					    	break;
						}
					}
					//if current window is picture viewer
					else if(Judge==1){
						if((ProcessedDistance-180)>InitialDistance){//change to next picture
							gesture.NextPicture();
						    InitialDistance=ProcessedDistance;
						    break;
						}
	                 	if((ProcessedDistance+180)<InitialDistance){//change to previous picture
							gesture.PreviousPicture();
							InitialDistance=ProcessedDistance;
					    	break;
						}
					}
				}
				break;
			case 4:
				if(CaseFlag!=4)
					Initial();
				CaseFlag=4;
				ProcessedDistance=0;
				for(int i=0;i<circles->total;i++){
					float* p = (float*)cvGetSeqElem(circles,i);
					ProcessedDistance=ProcessedDistance+cvRound(p[1]);//sum up four fingers' Y axis value
				}
				Temp++;
				if (Temp==1){
					InitialDistance=ProcessedDistance;//initial
				}
				else{
					//movement should greater than a certain distance from bottom to top
					if((ProcessedDistance-160)>InitialDistance){
						gesture.UndoMinimizeAll();//call certain modual
						InitialDistance=ProcessedDistance;
						break;
					}
					//move from top to bottom
					if((ProcessedDistance+160)<InitialDistance){
						InitialDistance=ProcessedDistance;
						gesture.MinimizeAll();
						break;
					}
				}
				break;
			default:
				Initial();//do not support more than 4 fingers' actions
				break;
		}		
}
Example #7
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    printf("Nonzero count %d\n", cvCountNonZero(mask));

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                                      cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
Example #8
0
//////////////////////////////////
// main()
//
int _tmain(int argc, _TCHAR* argv[])
{
//	try_conv();
	if( !initAll() ) 
		exitProgram(-1);

	// Capture and display video frames until a face
	// is detected
	int frame_count = 0;
	while( (char)27!=cvWaitKey(1) )
	{
		//Retrieve next image and 
		// Look for a face in the next video frame
		
		//read into pfd_pVideoFrameCopy
		if (!captureVideoFrame()){
			if (frame_count==0)
				throw exception("Failed before reading anything");
			break; //end of video..
		}
		++frame_count;

		CvSeq* pSeq = 0;
		detectFaces(pfd_pVideoFrameCopy,&pSeq);
		
		//Do some filtration of pSeq into pSeqOut, based on history etc,
		//update data structures (history ,face threads etc.)s
		list<Face> & faces_in_this_frame = FdProcessFaces(pfd_pVideoFrameCopy,pSeq);

		//== draw rectrangle for each detected face ==
		if (!faces_in_this_frame.empty()){	//faces detected (??)
			int i = 0;
			for(list<Face>::iterator face_itr = faces_in_this_frame.begin(); face_itr != faces_in_this_frame.end(); ++face_itr)
			{
				CvPoint pt1 = cvPoint(face_itr->x,face_itr->y);
				CvPoint pt2 = cvPoint(face_itr->x + face_itr->width,face_itr->y + face_itr->height);
				if (face_itr->frame_id == frame_count) //detected for this frame
					cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],3,8,0);
				else //from a previous frame
					cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],1,4,0);
			}
		}else{ //no faces detected
			Sleep(100);
		}

		cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy );
		cvReleaseImage(&pfd_pVideoFrameCopy);
	
	} //end input while
	cout << "==========================================================" << endl;
	cout << "========== Input finished ================================" << endl;
	cout << "==========================================================" << endl << endl;
	
	cout << "Press a key to continue with history playback" <<endl;
	char cc = fgetc(stdin);


	cout << "==========================================================" << endl;
	cout << "==== Playback history + rectangles +                 =====" << endl;
	cout << "==== create output video(s)						  =====" << endl;
	cout << "==========================================================" << endl << endl;
	list<FDHistoryEntry> & pHistory = FdGetHistorySeq();
	
	//== VIDEO WRITER START =====================
	int isColor = 1;
	int fps     = 12;//30;//25;  // or 30
	int frameW  = 640; // 744 for firewire cameras
	int frameH  = 480; // 480 for firewire cameras
	CvVideoWriter * playbackVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\playback.avi").c_str(),
								PFD_VIDEO_OUTPUT_FORMAT,
							   fps,cvSize(frameW,frameH),isColor);
	CvVideoWriter *  croppedVidWriter = 0;
	if (!playbackVidWriter) {
		cerr << "can't create vid writer" << endl;
		exitProgram(-1);
	}
	bool wasWrittenToVideo = false;
	//== VIDEO WRITER END =====================

	int index = 0;
	// play recorded sequence----------------------------
	// i.e. just what's in the history
	int playback_counter = 0;

	cout << "start finding consensus rect " << endl;
	//find min max
	bool found =false;
	int min_x = INT_MAX,//pFaceRect->x,
		max_x = 0,//pFaceRect->x+pFaceRect->width,
		min_y = INT_MAX,//pFaceRect->y,
		max_y = 0;//pFaceRect->y+pFaceRect->height;
	for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr)
	{
		CvSeq* pFacesSeq = itr->pFacesSeq;
		assert(pFacesSeq);
		//TODO Might want to convert to Face here
		CvRect * pFaceRect = (CvRect*)cvGetSeqElem(pFacesSeq, 0); //works only on first rec series
		if (pFaceRect){
			found = true;
			if (pFaceRect->x < min_x) min_x = pFaceRect->x;
			if (pFaceRect->x+pFaceRect->width > max_x) max_x = pFaceRect->x + pFaceRect->width;
			
			if (pFaceRect->y < min_y) min_y = pFaceRect->y;
			if (pFaceRect->y+pFaceRect->height > max_y) max_y =  pFaceRect->y+pFaceRect->height;
		}
	}
	//assert(found); //some rect in history..
	CvRect consensus_rect;
	consensus_rect.x = min_x;
	consensus_rect.y = min_y;
	consensus_rect.width  = max_x - min_x;
	consensus_rect.height = max_y - min_y;

	Sleep(3000); //just to make sure that pruneHistory isn't modifying..
	cout << "start playback loop " << endl;
	int k = 0;
	for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr)
	{
		cout << ++k << endl;
		//cvResetImageROI(history_itr->pFrame);  //now reset by FDFaceThread
		pfd_pVideoFrameCopy = cvCreateImage( cvGetSize(itr->pFrame ), 8, 3 ); //TODO query image for its properties
		cvCopy( itr->pFrame , pfd_pVideoFrameCopy, 0 );
		CvSeq* pFacesSeq = itr->pFacesSeq;
#ifndef NO_RECTS_ON_PLAYBACK
		for(int i = 0 ;i < pFacesSeq->total ;i++){				
			Face * pFaceRect = (Face*)cvGetSeqElem(pFacesSeq, i);
			assert(pFaceRect != NULL);
			CvPoint pt1 = cvPoint(pFaceRect->x,pFaceRect->y);
			CvPoint pt2 = cvPoint(pFaceRect->x + pFaceRect->width,pFaceRect->y + pFaceRect->height);
			if (itr->frame_id == pFaceRect->frame_id)
				cvRectangle( pfd_pVideoFrameCopy, pt1, pt2,	 colorArr[i%3],3,8,0);
			else
				cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i%3],1,4,0);
		}
#endif
		if (pFacesSeq->total > 0) 
		{	
			assert(found);
			//write 1st sequence if exists to cropped vid
			if (!croppedVidWriter)
				croppedVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\cropped_playback.avi").c_str(),
									PFD_VIDEO_OUTPUT_FORMAT,
	 						   fps,cvSize(max_x-min_x,max_y-min_y),isColor);
			assert(croppedVidWriter);


			cvResetImageROI(pfd_pVideoFrameCopy);
			cvSetImageROI(pfd_pVideoFrameCopy,consensus_rect);
			//write cropped image to video file
			IplImage *croppedImg = cvCreateImage(cvGetSize(pfd_pVideoFrameCopy),
								   pfd_pVideoFrameCopy->depth,
								   pfd_pVideoFrameCopy->nChannels);	
			assert(croppedImg);
			cvCopy(pfd_pVideoFrameCopy, croppedImg, NULL);
			assert(croppedVidWriter);
			cvWriteFrame(croppedVidWriter,croppedImg);
			cvReleaseImage(&croppedImg);
		}

		cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy );
		cvResetImageROI(pfd_pVideoFrameCopy); //CROP_PLAYBACK_FACE
		cvWriteFrame(playbackVidWriter,pfd_pVideoFrameCopy);
		if( (char)27==cvWaitKey(1) ) break;//exitProgram(0);
		Sleep(50);	
		++playback_counter;	
	}

	
	cvReleaseVideoWriter(&playbackVidWriter);
	cvReleaseVideoWriter(&croppedVidWriter);
	exitProgram(0);
	//-----------------------------------------------------------
	//-----------------------------------------------------------
	//-----------------------------------------------------------
}
Example #9
0
// Function to detect and draw any faces that is present in an image
bool face_detect( IplImage* srcImg, std::vector<CvRect> *vectFaces, CvHaarClassifierCascade* cascade, bool detect_only)
{

    // Create memory for calculations
    static CvMemStorage* storage = 0;

    // Create two points to represent the face locations
    CvPoint pt1, pt2;
    int i;

    if( !cascade )
    {
        MY_LOG("%s: faceCascade is NULL", __FILE__);
        return false;
    }

    // Allocate the memory storage
    storage = cvCreateMemStorage(0);

    // Clear the memory storage which was used before
    cvClearMemStorage( storage );

    int max_width = 0;
    // Find whether the cascade is loaded, to find the biggest face. If yes, then:
    if( cascade )
    {

        // There can be more than one face in an image. So create a growable sequence of faces.
        // Detect the objects and store them in the sequence

        DURATION_START;
        CvSeq* faces = cvHaarDetectObjects( srcImg, cascade, storage,
                1.1, 2, CV_HAAR_DO_CANNY_PRUNING
                , cvSize(20, 20) 
                );
        DURATION_STOP("cvHaarDetectObjects()");

        // Loop the number of faces found.
        for( i = 0, max_width=0; i < faces->total; i++ )
        {
            // Create a new rectangle for the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            if(vectFaces != NULL) {
                vectFaces -> push_back(*r);
            }

            MY_LOG("%s: found face <%d,%d> with %dx%d\n", __func__, r->x, r->y, r->width, r->height);

            if(!detect_only) {
                // Draw the rectangle in the input image
                cvRectangle( srcImg, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
            }

            if(max_width < r->width) {

                pt1.x = r->x;
                pt2.x = (r->x + r->width);
                pt1.y = r->y;
                pt2.y = (r->y + r->height);

                max_width = r->width;

            }
        }

        if(max_width <= 4) {
            return false;
        }

        //printf("%s: (%d,%d), (%d,%d) -> (%d * %d)\n", __func__, pt1.x, pt1.y, pt2.x, pt2.y, (pt2.x - pt1.x) , (pt2.y - pt1.y));

        //cvSetImageROI(srcImg, cvRect(pt1.x, pt1.y, pt2.x - pt1.x, pt2.y - pt1.y));
        //// __android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        //// printf("%s: srcImg ROI: (%d * %d)\n",__func__, cvGetImageROI(srcImg).width, cvGetImageROI(srcImg).height );
        //IplImage *tmpImg2 = cvCreateImage( cvSize(cvGetImageROI(srcImg).width, cvGetImageROI(srcImg).height), IPL_DEPTH_8U, 1);
        //IplImage *tmpImg = srcImg;
        ////color depth
        //if(srcImg->nChannels != 1)  {
        //    cvCvtColor(srcImg, tmpImg2, CV_BGR2GRAY);
        //    tmpImg = tmpImg2;
        //}

        ////resize
        //*dstImg = cvCreateImage(cvSize(FACE_SIZE, FACE_SIZE), IPL_DEPTH_8U, 1);
        //cvResize(tmpImg, *dstImg, CV_INTER_CUBIC);

        ////__android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        //cvResetImageROI(srcImg);

        //cvReleaseImage(&tmpImg2);
        ////__android_log_print(ANDROID_LOG_DEBUG, "run to here ", "func:%s, line:%d", __func__,__LINE__);
        
        return true;
    }

    return false;
}
void CvArrTest::print_time( int test_case_idx, double time_clocks )
{
    int in_type = -1, out_type = -1;
    CvSize size = { -1, -1 };
    const CvFileNode* size_node = find_timing_param( "size" );
    char str[1024], *ptr = str;
    int len;
    bool have_mask;
    double cpe;

    if( size_node )
    {
        if( !CV_NODE_IS_SEQ(size_node->tag) )
        {
            size.width = cvReadInt(size_node,-1);
            size.height = 1;
        }
        else
        {
            size.width = cvReadInt((const CvFileNode*)cvGetSeqElem(size_node->data.seq,0),-1);
            size.height = cvReadInt((const CvFileNode*)cvGetSeqElem(size_node->data.seq,1),-1);
        }
    }

    if( test_array[INPUT].size() )
    {
        in_type = CV_MAT_TYPE(test_mat[INPUT][0].type);
        if( size.width == -1 )
            size = cvGetMatSize(&test_mat[INPUT][0]);
    }

    if( test_array[OUTPUT].size() )
    {
        out_type = CV_MAT_TYPE(test_mat[OUTPUT][0].type);
        if( in_type < 0 )
            in_type = out_type;
        if( size.width == -1 )
            size = cvGetMatSize(&test_mat[OUTPUT][0]);
    }

    if( out_type < 0 && test_array[INPUT_OUTPUT].size() )
    {
        out_type = CV_MAT_TYPE(test_mat[INPUT_OUTPUT][0].type);
        if( in_type < 0 )
            in_type = out_type;
        if( size.width == -1 )
            size = cvGetMatSize(&test_mat[INPUT_OUTPUT][0]);
    }

    have_mask = test_array[MASK].size() > 0 && test_array[MASK][0] != 0;

    if( in_type < 0 && out_type < 0 )
        return;

    if( out_type < 0 )
        out_type = in_type;

    ptr = strchr( (char*)tested_functions, ',' );
    if( ptr )
    {
        len = (int)(ptr - tested_functions);
        strncpy( str, tested_functions, len );
    }
    else
    {
        len = (int)strlen( tested_functions );
        strcpy( str, tested_functions );
    }
    ptr = str + len;
    *ptr = '\0';
    if( have_mask )
    {
        sprintf( ptr, "(Mask)" );
        ptr += strlen(ptr);
    }
    *ptr++ = ',';
    sprintf( ptr, "%s", cvTsGetTypeName(in_type) );
    ptr += strlen(ptr);

    if( CV_MAT_DEPTH(out_type) != CV_MAT_DEPTH(in_type) )
    {
        sprintf( ptr, "%s", cvTsGetTypeName(out_type) );
        ptr += strlen(ptr);
    }
    *ptr++ = ',';

    sprintf( ptr, "C%d", CV_MAT_CN(in_type) );
    ptr += strlen(ptr);

    if( CV_MAT_CN(out_type) != CV_MAT_CN(in_type) )
    {
        sprintf( ptr, "C%d", CV_MAT_CN(out_type) );
        ptr += strlen(ptr);
    }
    *ptr++ = ',';

    sprintf( ptr, "%dx%d,", size.width, size.height );
    ptr += strlen(ptr);

    print_timing_params( test_case_idx, ptr );
    ptr += strlen(ptr);
    cpe = time_clocks / ((double)size.width * size.height);
    if( cpe >= 100 )
        sprintf( ptr, "%.0f,", cpe );
    else
        sprintf( ptr, "%.1f,", cpe );
    ptr += strlen(ptr);
    sprintf( ptr, "%g", time_clocks/cvGetTickFrequency() );

    ts->printf( CvTS::CSV, "%s\n", str );
}
Example #11
0
int MakeFeatureInMem(
		IplImage* 	RGBA,
		IplImage* 	depth,
		IplImage*	mask,
		FEATURE* 	feature){
	if (RGBA==NULL){
		fprintf(stderr, "image file is required to create feature set!");
		return 1;
	}

	IplImage *hsv_img, *h, *s, *v;
	if (HUELBP_ON){
		// convert to hsv image
		hsv_img = cvCreateImage( cvGetSize(RGBA), IPL_DEPTH_8U, 3);
		cvCvtColor(RGBA, hsv_img, CV_RGB2HSV);

		h = cvCreateImage( cvGetSize(hsv_img), IPL_DEPTH_8U, 1 );
		s = cvCreateImage( cvGetSize(hsv_img), IPL_DEPTH_8U, 1 );
		v = cvCreateImage( cvGetSize(hsv_img), IPL_DEPTH_8U, 1 );

		// Split image onto the color planes
		cvSplit( hsv_img, h, s, v, NULL );
	}
	// convert to grayscale-image
	IplImage* gray_img = RGBA;
	if (RGBA->nChannels > 1) {
		gray_img = cvCreateImage(cvGetSize(RGBA), IPL_DEPTH_8U, 1 );
		cvCvtColor( RGBA, gray_img, CV_RGB2GRAY );
	}
//	cvEqualizeHist(gray_img,gray_img);

	feature->grid_x 	= GRID_X;
	feature->grid_y 	= GRID_Y;
	feature->radius 	= RADIUS;
	feature->neighbors	= NEIGHBORS;

	int numPatterns = UNIFORM_ON? (NEIGHBORS+2) : pow(2.0, NEIGHBORS);
	//detect faces
	CvSeq* faces;
	int retCode = DetectFaces(gray_img, depth, &faces, FOREGRND_ON);
	if (retCode){//no faces found
		feature->histogram 		= NULL;
		feature->hue_histogram 	= NULL;
		feature->num_faces 		= 0;
		return 0;
	}else{
		//calculate features
		feature->num_faces 		= faces->total;
		feature->histogram 		= (CvMat**) malloc(faces->total*sizeof(CvMat*));
		feature->hue_histogram 	= (CvMat**) malloc(faces->total*sizeof(CvMat*));
		for(int i = 0; i < faces->total; i++ )
		{
			// Create a new rectangle for drawing the face
			CvRect* r = (CvRect*)cvGetSeqElem( faces, i ); // Find the dimensions of the face, and scale it if necessary
			IplImage* face_img = CreateSubImg(gray_img, *r);
			IplImage* lbp_img =  CalcLBP(face_img, RADIUS, NEIGHBORS, UNIFORM_ON);

			if (lbp_img==NULL){
				fprintf(stderr, "failed to create lbp image!\n");
				return 1;
			}
			feature->histogram[i] = CalcSpatialHistogram(lbp_img, numPatterns, GRID_X, GRID_Y);
			if (feature->histogram[i]==NULL){
				fprintf(stderr, "failed to create spatial histogram!\n");
				return 2;
			}
			cvReleaseImage(&face_img);
			cvReleaseImage(&lbp_img);

			if (HUELBP_ON){
				// Create a hue face image
				IplImage* hue_face_img = CreateSubImg(h, *r);

				//Create Hue LBP
				IplImage* hue_lbp = CalcLBP(hue_face_img, RADIUS, NEIGHBORS, UNIFORM_ON);
				if (hue_lbp==NULL){
					fprintf(stderr, "failed to create hue-lbp image!\n");
					return 1;
				}
				//Create Hue Spatial Histogram
				feature->hue_histogram[i] = CalcSpatialHistogram(hue_lbp, numPatterns, GRID_X, GRID_Y);
				if (feature->hue_histogram[i]==NULL){
					fprintf(stderr, "failed to create hue spatial histogram!\n");
					return 2;
				}

				cvReleaseImage(&hue_face_img);
				cvReleaseImage(&hue_lbp);
			}


		}
	}
	if (HUELBP_ON){
		cvReleaseImage(&hsv_img);
		cvReleaseImage(&h);
		cvReleaseImage(&s);
		cvReleaseImage(&v);
	}
	if (RGBA->nChannels > 1) {
		cvReleaseImage(&gray_img);
	}
	return 0;

}
Example #12
0
//-------DETECT FACE OF GADHIJI ---------------
void detectAllFaces( IplImage *img )
{
	// -------THIS STRUCTURE TO GIVES DETAILS OF DETECTED FACE----------- 
	GandhitplMatch detectedimg[10];
	//cvNamedWindow("displayface",1);
    int k;
 	int cnt=0;
	 CvRect *r1;
	 CvScalar s;
	 int fl=0;
    /* detect faces */
    CvSeq *faces = cvHaarDetectObjects(
            img,
            cascade,
            storage,
            1.1,
            1,
            0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
            cvSize( 30, 30 ) );
	globalmaximum=-999;
	globalmaxindex=-1;
    /* for each face found, draw a red box */
    for( k = 0 ; k < ( faces ? faces->total : 0 ) ; k++ ) {
			r1 = ( CvRect* )cvGetSeqElem( faces, k );	
		if((r1->height<100)&&(r1->width<100))
		 {
				fl=1;
				detectedimg[k].faceimg=cvCreateImage(cvGetSize(img),8,3);
				cvCopyImage(img,detectedimg[k].faceimg);
				detectedimg[k].height=r1->height;
				detectedimg[k].width=r1->width;
				detectedimg[k].x=r1->x;
				detectedimg[k].y=r1->y;
				
			//	cvShowImage("displayface",detectedimg[k].faceimg);
				//cvWaitKey(100);
	
				
			/*cvRectangle( img,
						 cvPoint( r1->x, r1->y ),
						 cvPoint( r1->x + r1->width, r1->y + r1->height ),
						 CV_RGB( 255, 0, 0 ), 1, 8, 0 ); */
				//printf("facedetection called");
//				printf("width= %d height= %d",detectedimg[k].faceimg->width,detectedimg[k].faceimg->height);
		for(int i=0;i<img->height;i++)
	{
		//printf("....");
		for(int j=0;j<img->width;j++)
		{
		//	printf("hi %d",j);
			if((j<r1->x || j>r1->x + r1->width)  || (i<r1->y || i>r1->y + r1->height))
			{ 
					
				
				s = cvGet2D(detectedimg[k].faceimg, i, j);
		//	printf("hi.....");
			//	s.
			//	s.val[3]=0.0;
				s.val[0]=0.0;
				s.val[1]=0.0;
				s.val[2]=0.0;
			
	cvSet2D(detectedimg[k].faceimg, i, j, s );
	
			}
			
			
		}
//		printf("over j");

	}

	//cvShowImage("displayface",detectedimg[k].faceimg);
	//cvWaitKey(10);
		




//			printf("width %d height  %d\n",r1->width,r1->height);
		}
		//-------SEND THE DETECTED FACE TO MATCH WITH FACE OF GANDHIJI---------
		gandhijitplMatch(detectedimg[k],k);	
 
	}
		//------KEEP THE MATCHED IMAGE WHOSE MATCH IS GREATER THAN 0.62-----------
	if(faces->total>0 && globalmaximum>0.62)
	{
		GlobalGandhiji[globalcnt].faceimg=detectedimg[globalmaxindex].faceimg;
		GlobalGandhiji[globalcnt].x=detectedimg[globalmaxindex].x;
		GlobalGandhiji[globalcnt].y=detectedimg[globalmaxindex].y;
		GlobalGandhiji[globalcnt].width=detectedimg[globalmaxindex].width;
		GlobalGandhiji[globalcnt].height=detectedimg[globalmaxindex].height;
		GlobalGandhiji[globalcnt].matchval=globalmaximum;

	}
	else
	{
		GlobalGandhiji[globalcnt].matchval=-1;//TO ELIMINATE THE IMAGES

	}
	globalcnt++;
    /* display video */
  //  cvShowImage( "video", img );
	//cvWaitKey(100);
}
Example #13
0
//--------------------------------HOUGHLINES---------------------------------
void sendtoHoughLines(IplImage * img)
{


	IplImage* src = cvCreateImage(
	cvGetSize(img), 8, 1 );
	cvCvtColor(img, src, CV_RGB2GRAY);

	//-------------CREATING A IMAGE NAMED dst FOR EDGE SHOWING ON IT FROM CANNY RESULT--------------------
	IplImage* dst = cvCreateImage(
	cvGetSize(src), 8, 1 );

	//---------------CREATING color_dst IMAGE FOR RESULT OF LINE DISPLAY PURPOSE---------------------------
	IplImage *color_dst = cvCreateImage(
	cvGetSize(src), 8, 3 );

	//---------------CEATING A STORE POOL FOR LINE PURPOSE AND LINE DECLARATIVE PARAMETER-------------
	CvMemStorage* storage =
	cvCreateMemStorage(0);
	CvSeq* lines = 0;
	int i;

	//---------------APPLYING THE CANNY FUNCTION ON SRC AND STORING THE EDGE RESULTS IN DST-------------
	cvCanny( src, dst,30, 90, 3 );

	cvDilate( dst, dst, 0, 1 );

	//--------------CONVERTING THE CANNY RESULT IMAGE DST INTO RGB AND STORE IT IN COLORDST AND SHOWING A WINDOW OF IT--------------
	
	cvCvtColor( dst, color_dst, CV_GRAY2BGR );


	/*
	//-----hough lines algo--------
	lines = cvHoughLines2( dst, storage,
					  CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 30, 30, 0 );

			//---------------ACCESING THE POINTS OF THE LINES AND DRAWING THOSE LINES ON THE GRAY TO RGB CONVERTED IMAGE NAMED color_dst-------------- 
	for( i = 0; i < lines->total; i++ )
	{
	CvPoint* line =
	(CvPoint*)cvGetSeqElem(lines,i);
	cvLine( color_dst, line[0], line[1],

		   CV_RGB(255,0,0), 3, 8 );
	//printf("\n i = %d x1 = %d y1 = %d x2 = %d y2 = %d ",i,line[0].x,line[0].y,line[1].x,line[1].y);


	}
	*/


	lines = cvHoughLines2( dst, storage,
			CV_HOUGH_STANDARD, 1, CV_PI/180, 30, 0, 0 );
			for( i = 0; i < MIN(lines->total,100); i++ )
			{
			float* line =
			(float*)cvGetSeqElem(lines,i);
			float rho = line[0];
			float theta = line[1];

			printf("theta = %f",(theta*180/3.142));
			CvPoint pt1, pt2;
			double a = cos(theta), b = sin(theta);
			double x0 = a*rho, y0 = b*rho;
			printf("a= %f  b=%f  x0=%f  y0=%f roh=%f\n", a,b,x0,y0,rho);
			pt1.x = cvRound(x0 + 1000*(-b));
			pt1.y = cvRound(y0 + 1000*(a));
			pt2.x = cvRound(x0 - 1000*(-b));
			pt2.y = cvRound(y0 - 1000*(a));
			printf("    x1 = %d, y1 = %d",pt1.x,pt1.y);
			printf("    x2 = %d, y2 = %d\n\n",pt2.x,pt2.y);

			//if((theta*180/3.142) < 100 && (theta*180/3.142) > 79 )
			cvLine( color_dst, pt1, pt2,
			CV_RGB(255,0,0), 3, 8 );
			}
	cvNamedWindow("HoughLinesShow",1);
	cvShowImage("HoughLinesShow",color_dst);
	cvWaitKey(1000);




}
    /****************************************************************
    Tracker::InitializeWithFace
        Initialize the tracker with opencv's face detector.
    Exceptions:
        None
    ****************************************************************/
    bool    Tracker::InitializeWithFace( TrackerParameters* params, Matrixu& frame )
    {
        const int minsz = 20;

        //Get the name of the haar-cascade .xml file
        const char* cascade_name = HAAR_CASCADE_FILE_NAME;
        ASSERT_TRUE( cascade_name != NULL );

        //Load the cascade
        if ( Tracker::s_faceCascade == NULL )
        {
            Tracker::s_faceCascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
        }

        frame.createIpl();
        IplImage* img    = frame.getIpl();
        ASSERT_TRUE( img != NULL );

        //convert to grayscale
        IplImage* gray    = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1 );
        ASSERT_TRUE( gray != NULL );
        cvCvtColor(img, gray, CV_BGR2GRAY );

        frame.freeIpl();

        //histogram equalization
        cvEqualizeHist( gray, gray );

        //memory storage
        CvMemStorage* storage = cvCreateMemStorage(0);
        cvClearMemStorage(storage);

        //call opencv's haar feature based detector
        CvSeq* faces = cvHaarDetectObjects( gray,
                                            Tracker::s_faceCascade,
                                            storage,
                                            1.05,
                                            3,
                                            CV_HAAR_DO_CANNY_PRUNING, 
                                            cvSize( minsz, minsz ) );

        int index = faces->total-1;
        CvRect* r = (CvRect*)cvGetSeqElem( faces, index );
        if ( r == NULL )
        {
            return false;
        }

        while ( r && (r->width<minsz || r->height < minsz || (r->y+r->height+10)>frame.rows() || (r->x+r->width)>frame.cols() ||
            r->y<0 || r->x<0) )
        {
            r = (CvRect*)cvGetSeqElem( faces, --index );
        }

        //set the params
        params->m_initState.resize(4);
        params->m_initState[0]    = (float)r->x;
        params->m_initState[1]    = (float)r->y;
        params->m_initState[2]    = (float)r->width;
        params->m_initState[3]    = (float)r->height+10;

        return true;
    }
Example #15
0
const CvPoint2D32f* locate_puzzle(IplImage *in, IplImage **annotated) {
    IplImage *grid_image = _grid(in);

    *annotated = cvCloneImage(in);

    // find lines using Hough transform
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* lines = 0;

    double distance_resolution = 1;
    double angle_resolution    = CV_PI / 60;
    int threshold              = 60;
    int minimum_line_length    = in->width / 2;
    int maximum_join_gap       = in->width / 10;
    lines = cvHoughLines2(grid_image, storage, CV_HOUGH_PROBABILISTIC,  distance_resolution, angle_resolution, threshold, minimum_line_length, maximum_join_gap);

    cvCvtColor(grid_image, *annotated, CV_GRAY2RGB);

    cvReleaseImage(&grid_image);

    double most_horizontal = INFINITY;
    for (int i = 0; i < lines->total; ++i) {
        CvPoint *line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);

        double slope = INFINITY;
        if (dx != 0) {
            slope = dy / dx;
        }
        if (slope != INFINITY) {
            if (slope < most_horizontal) {
                //printf("most horizontal seen: %0.2f\n", slope);
                most_horizontal = slope;
            }
        }
    }

    int top    = -1;
    int left   = -1;
    int bottom = -1;
    int right  = -1;
    for (int i = 0; i < lines->total; i++) {
        CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);
        double slope  = INFINITY;
        if (dx) {
            slope = dy / dx;
        }

        cvLine(*annotated, line[0], line[1], CV_RGB(255, 0, 0), 1, 8, 0);
        if (abs(slope - most_horizontal) <= 1) {
            if ((top == -1) || (line[1].y < ((CvPoint*)cvGetSeqElem(lines,top))[1].y)) {
                top = i;
            }
            if ((bottom == -1) || (line[1].y > ((CvPoint*)cvGetSeqElem(lines,bottom))[1].y)) {
                bottom = i;
            }
        } else {
            if ((left == -1) || (line[1].x < ((CvPoint*)cvGetSeqElem(lines,left))[1].x)) {
                left = i;
            }
            if ((right == -1) || (line[1].x > ((CvPoint*)cvGetSeqElem(lines,right))[1].x)) {
                right = i;
            }
        }
    }
    //printf("number of lines: %d\n", lines->total);
    if ((top == -1) || (left == -1) || (bottom == -1) || (right == -1)) {
        return NULL;
    }

    CvPoint *top_line    = (CvPoint*)cvGetSeqElem(lines,top);
    cvLine(*annotated, top_line[0], top_line[1], CV_RGB(0, 0, 255), 6, 8, 0);

    CvPoint *bottom_line = (CvPoint*)cvGetSeqElem(lines,bottom);
    cvLine(*annotated, bottom_line[0], bottom_line[1], CV_RGB(0, 255, 255), 6, 8, 0);

    CvPoint *left_line   = (CvPoint*)cvGetSeqElem(lines,left);
    cvLine(*annotated, left_line[0], left_line[1], CV_RGB(0, 255, 0), 6, 8, 0);

    CvPoint *right_line  = (CvPoint*)cvGetSeqElem(lines,right);
    cvLine(*annotated, right_line[0], right_line[1], CV_RGB(255, 255, 0), 6, 8, 0);

    CvPoint2D32f *coordinates;
    coordinates = malloc(sizeof(CvPoint2D32f) * 4);

    // top left
    intersect(top_line, left_line, &(coordinates[0]));
    cvLine(*annotated, cvPointFrom32f(coordinates[0]), cvPointFrom32f(coordinates[0]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_left: %.0f, %.0f\n", coordinates[0].x, coordinates[0].y);

    // top right
    intersect(top_line, right_line, &(coordinates[1]));
    cvLine(*annotated, cvPointFrom32f(coordinates[1]), cvPointFrom32f(coordinates[1]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_right: %.0f, %.0f\n", coordinates[1].x, coordinates[1].y);

    // bottom right
    intersect(bottom_line, right_line, &(coordinates[2]));
    cvLine(*annotated, cvPointFrom32f(coordinates[2]), cvPointFrom32f(coordinates[2]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_right: %.0f, %.0f\n", coordinates[2].x, coordinates[2].y);

    // bottom left
    intersect(bottom_line, left_line, &(coordinates[3]));
    cvLine(*annotated, cvPointFrom32f(coordinates[3]), cvPointFrom32f(coordinates[3]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_left: %.0f, %.0f\n", coordinates[3].x, coordinates[3].y);

    return coordinates;
}
Example #16
0
int main(int argc, char* argv[])
{
 
    // Default capture size - 640x480
    CvSize size = cvSize(640,480);
 
    // Open capture device. 0 is /dev/video0, 1 is /dev/video1, etc.
    CvCapture* capture = cvCaptureFromCAM( 0 );
    if( !capture )
    {
            fprintf( stderr, "ERROR: capture is NULL \n" );
            getchar();
            return -1;
    }
 
    // Create a window in which the captured images will be presented
    cvNamedWindow( "Camera", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );
 
    // Detect a red ball
    int hl = 60, hu = 75, sl = 255, su = 256, vl = 170, vu = 256;
    
 
    IplImage *  hsv_frame    = cvCreateImage(size, IPL_DEPTH_8U, 3);
    IplImage*  thresholded   = cvCreateImage(size, IPL_DEPTH_8U, 1);
 
    while( 1 )
    {
        // Get one frame
        IplImage* frame = cvQueryFrame( capture );
        if( !frame )
        {
                fprintf( stderr, "ERROR: frame is null...\n" );
                getchar();
                break;
        }
 
        CvScalar hsv_min = cvScalar(hl, sl, vl, 0);
        CvScalar hsv_max = cvScalar(hu, su, vu, 0);

        // Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
        cvCvtColor(frame, hsv_frame, CV_RGB2HSV);
        // Filter out colors which are out of range.
        cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
 
        // Memory for hough circles
        CvMemStorage* storage = cvCreateMemStorage(0);
        // hough detector works better with some smoothing of the image
        cvDilate(thresholded, thresholded, NULL, 3);
        cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
        
        cvErode(thresholded, thresholded, NULL, 3);
        
        CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 1,
                                        thresholded->height/4, 400, 10, 20, 400);
 
        int maxRadius = 0;
        int x = 0, y = 0;
        bool found = false;
        
        for (int i = 0; i < circles->total; i++)
        {
            float* p = (float*)cvGetSeqElem( circles, i );
            if (p[2] > maxRadius){
                maxRadius = p[2];
                x = p[0];
                y = p[1];
                found = true;
            }
        }

        if (found){
            //printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] );
            cvCircle( frame, cvPoint(cvRound(x),cvRound(y)),
                                    3, CV_RGB(0,255,0), -1, 8, 0 );
            cvCircle( frame, cvPoint(cvRound(x),cvRound(y)),
                                    cvRound(maxRadius), CV_RGB(255,0,0), 3, 8, 0 );
        }
 
        cvShowImage( "Camera", frame ); // Original stream with detected ball overlay
        cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
        cvShowImage( "After Color Filtering", thresholded ); // The stream after color filtering
 
        cvReleaseMemStorage(&storage);
 
        // Do not release the frame!
 
        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
        //remove higher bits using AND operator
        int key = cvWaitKey(10);
                
                

        switch(key){
            case 'q' : hu += 5; break;
            case 'Q' : hu -= 5; break;
                    
            case 'a' : hl -= 5; break;
            case 'A' : hl += 5; break;
                    
            case 'w' : su += 5; break;
            case 'W' : su -= 5; break;
                    
            case 's' : sl -= 5; break;
            case 'S' : sl += 5; break;
                    
            case 'e' : vu += 5; break;
            case 'E' : vu -= 5; break;
                    
            case 'd' : vl -= 5; break;
            case 'D' : vl += 5; break;
        }

        if (key != -1){
            printf("H: %i, S: %i, V: %i\nH: %i, S: %i, V: %i\n\n", hu, su, vu, hl, sl, vl);
        }
    }
 
     // Release the capture device housekeeping
     cvReleaseCapture( &capture );
     cvDestroyWindow( "mywindow" );
     return 0;
}
void Determine::FunctionSwitch(CvSeq* circles){
	/*
	KeyboardClick SwitchCase=1;
	MouseMove     SwitchCase=2;
	Combination   SwitchCase=3;
	RightClick    SwitchCase=4;
	LeftClick     SwitchCase=5;
	*/
	for(int i=0;i<circles->total;i++){
		float* p=(float*)cvGetSeqElem(circles,0);
		if(cvRound(p[0])<70 || cvRound(p[0])>590 ||cvRound(p[1])<60||cvRound(p[1])>430){
			JudgementMouse(circles);
			SwitchTemp=0;
			SwitchCase=0;
			return;
		}
	}
	SwitchTemp++;
	if(SwitchTemp==1){
		circlesTemp=cvCloneSeq(circles,NULL);
	}
	switch (circles->total){
		case 0:
			MouseMoveToLeftClick++;
			keyboard.OutputKey(circles);
			JudgementMouse(circles);
			SwitchTemp=0;
			SwitchCase=0;
			if(MouseMoveToLeftClick>=10){
				MouseMoveToLeftClick=0;
				SingleClick=false;
			}
			break;
		case 1:
			int PointTemp[2];
			int Point[2];
			if(SwitchCase==2){
					std::cout<<"come SwitchCase2"<<std::endl;
				JudgementMouse(circles);
				break;
			}
			if(SingleClick==true && MouseMoveToLeftClick!=0){
				std::cout<<"come LeftClick"<<std::endl;
				JudgementMouse(circles);
				MouseMoveToLeftClick=0;
				break;
			}
			if(circlesTemp->total==1 && circles->total==1){
				float* p=(float*)cvGetSeqElem(circlesTemp,0);
				PointTemp[0]=cvRound(p[0]);
				PointTemp[1]=cvRound(p[1]);
			    float* q=(float*)cvGetSeqElem(circles,0);
				Point[0]=cvRound(q[0]);
				Point[1]=cvRound(q[1]);
			}
			if(abs(Point[0]-PointTemp[0])<10 && abs(Point[1]-PointTemp[1])<10){
				std::cout<<"come keyboard"<<std::endl;
				SwitchCase=1;
				keyboard.OutputKey(circles);
				break;
			}else if((abs(Point[0]-PointTemp[0])>10 || abs(Point[1]-PointTemp[1])>10)){
				std::cout<<"come mouse"<<std::endl;
				SwitchCase=2;
				SingleClick=true;
				JudgementMouse(circles);
				break;
			}
		case 2:
			if(circlesTemp->total==1&& circles->total==2){
				keyboard.OutputKey(circles);
				SwitchCase=3;
				break;
			}else{
				JudgementMouse(circles);
				SwitchCase=4;
				break;
			}
			if(SwitchCase==3){
				keyboard.OutputKey(circles);
				break;
			}else if (SwitchCase==4){
				JudgementMouse(circles);
				break;
			}
		default:
			JudgementMouse(circles);
		    SwitchTemp=0;
			SwitchCase=0;
			break;
	}
	circlesTemp=cvCloneSeq(circles,NULL);
}
Example #18
0
void face_detect_crop(IplImage * input,IplImage * output)
{

    IplImage * img;
    img = cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,1);
    cvCvtColor(input,img,CV_RGB2GRAY);//convert input to Greyscale and store in image
    int face_origin_x,face_origin_y,width,hieght;//variables to crop face


    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); //load the face detedction cascade
    storage = cvCreateMemStorage(0);
    int scale = 1;
    CvPoint pt1,pt2;
    int face_number;

    CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces ? faces->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces, face_number );

        //Specifies the points for rectangle.
        /* pt1_____________

           |              |

           |              |

           |              |

           |_____________pt2 */
        pt1.x = r->x*scale;
        pt2.x = (r->x+r->width)*scale;
        pt1.y = r->y*scale;
        pt2.y = (r->y+r->height)*scale;
        cvRectangle( input, pt1, pt2, CV_RGB(255,255,255), 1, 8, 0 );
        CvRect rs=*r;
        //cvNamedWindow("i-O", 1);
        //cvShowImage("i-O",input);
        //cvWaitKey(0);
        cvSetImageROI(img,rs);
    }
    IplImage * frame;
    CvSize s1= {48,48};
    frame=cvCreateImage(s1,IPL_DEPTH_8U,1);

    cvResize(img,frame);
    cvCvtColor(frame,output,CV_GRAY2RGB);

    CvPoint pt;
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name_eye, 0, 0, 0 ); //load the face detedction cascade
    CvSeq* faces1 = cvHaarDetectObjects( input, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces1 ? faces1->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces1, face_number );
        pt.x = (r->x*scale);
        pt2.x = ((r->x+r->width)*scale);
        pt.y = (r->y*scale);
        pt2.y = ((r->y+r->height)*scale);
        cvRectangle( input, pt, pt2, CV_RGB(0,255,255), 1, 8, 0 );
    }



}
Example #19
0
/*
 * Finds the Worm's Head and Tail.
 * Requires Worm->Boundary
 *
 */
int GivenBoundaryFindWormHeadTail(WormAnalysisData* Worm, WormAnalysisParam* Params) {
	if (Worm->Boundary->total < 2*Params->NumSegments) {
		printf("Error in GivenBoundaryFindWormHeadTail(). The Boundary has too few points.");
		return -1;
	}

	/*** Clear Out Scratch Storage ***/
	cvClearMemStorage(Worm->MemScratchStorage);

	/* **********************************************************************/
	/*  Express the Boundary in the form of a series of vectors connecting 	*/
	/*  two pixels a Delta pixels apart.									*/
	/* **********************************************************************/

	/* Create A Matrix to store all of the dot products along the boundary.
	 */
	CvSeq* DotProds= cvCreateSeq(CV_32SC1,sizeof(CvSeq),sizeof(int),Worm->MemScratchStorage);

	/* Create A Matrix to store all of the cross products along the boundary.
	 */
	CvSeq* CrossProds= cvCreateSeq(CV_32SC1,sizeof(CvSeq),sizeof(int),Worm->MemScratchStorage);
	
	//We walk around the boundary using the high-speed reader and writer objects.
	CvSeqReader ForeReader; //ForeReader reads delta pixels ahead
	CvSeqReader Reader; 	//Reader reads delta pixels behind
	CvSeqReader BackReader; //BackReader reads delta pixels behind


	/**** Local Variables ***/
	int i;
	CvPoint* Pt;
	CvPoint* AheadPt;
	CvPoint* BehindPt;
	CvPoint AheadVec;
	CvPoint BehindVec;
	int TotalBPts = Worm->Boundary->total;

	/*** Initializing Read & Write Apparatus ***/
	int AheadPtr=0;
	int BehindPtr=0;
	int Ptr=0;
	int* DotProdPtr;
	int* CrossProdPtr;
	int DotProdVal;
	int CrossProdVal;


	/*
	 * Loop through all the boundary and compute the dot products between the ForeVec and BackVec.
	 *
	 * Note: ForeVec and BackVec have the same "handedness" along the boundary.
	 */
	//printf ("total boundary elements = %d\n", TotalBPts); //debug MHG 10/19/09
	for (i = 0; i < TotalBPts; i++) {
		AheadPtr = (i+Params->LengthScale)%TotalBPts;
		BehindPtr = (i+TotalBPts-Params->LengthScale)%TotalBPts;
		Ptr = (i)%TotalBPts;

		//printf("AheadPtr=%d, BehindPtr=%d,Ptr=%d\n", AheadPtr,BehindPtr,Ptr);


		AheadPt = (CvPoint*) cvGetSeqElem(Worm->Boundary,AheadPtr);
		Pt = (CvPoint*) cvGetSeqElem(Worm->Boundary,Ptr);
		BehindPt=(CvPoint*) cvGetSeqElem(Worm->Boundary,BehindPtr);


		/** Compute the Forward Vector **/
		AheadVec = cvPoint((AheadPt->x) - (Pt->x), (AheadPt->y)
				- (Pt->y));

		/** Compute the Rear Vector **/
		BehindVec= cvPoint((Pt->x) - (BehindPt->x), (Pt->y)
				- (BehindPt->y));

		/** Store the Dot Product in our Mat **/
		DotProdVal=PointDot(&AheadVec,&BehindVec);
		cvSeqPush(DotProds,&DotProdVal); //<--- ANDY CONTINUE HERE!
		
		/** Store the Cross Product in our Mat **/
		CrossProdVal=PointCross(&AheadVec,&BehindVec);
		cvSeqPush(CrossProds,&CrossProdVal);

	//	printf("i= %d, DotProdVal=%d\n", i, DotProdVal);
	//	cvWaitKey(0);

	}


	/* **********************************************************************/
	/*  Find the Tail 													 	*/
	/*  Take dot product of neighboring vectors. Tail is location of		*/
	/*	 smallest dot product												*/
	/* **********************************************************************/


	/*
	 * Now Let's loop through the entire boundary to find the tail, which will be the curviest point.
	 */
	float MostCurvy = 1000; //Smallest value.
	float CurrentCurviness; //Metric of CurrentCurviness. In this case the dot product.
	int MostCurvyIndex = 0;
	int TailIndex;

	for (i = 0; i < TotalBPts; i++) {
		DotProdPtr = (int*) cvGetSeqElem(DotProds,i);
		CrossProdPtr = (int*) cvGetSeqElem(CrossProds,i);
		if (*DotProdPtr < MostCurvy && *CrossProdPtr > 0) { //If this locaiton is curvier than the previous MostCurvy location
			MostCurvy = *DotProdPtr; //replace the MostCurvy point
			MostCurvyIndex = i;
		}
	}

	//Set the tail to be the point on the boundary that is most curvy.
	Worm->Tail = (CvPoint*) cvGetSeqElem(Worm->Boundary, MostCurvyIndex);
	Worm->TailIndex=MostCurvyIndex;

	/* **********************************************************************/
	/*  Find the Head 													 	*/
	/* 	Excluding the neighborhood of the Tail, the head is the location of */
	/*	 the smallest dot product											*/
	/* **********************************************************************/

	float SecondMostCurvy = 1000;
	int DistBetPtsOnBound;
	DistBetPtsOnBound = 0;

	/* Set the fallback head location to be halfway away from the tail along the boundary. 	*/
	/* That way, if for some reason there is no reasonable head found, the default 			*/
	/* will at least be a pretty good gueess												*/
	int SecondMostCurvyIndex = (Worm->TailIndex+ TotalBPts/2)%TotalBPts;
	
	

	for (i = 0; i < TotalBPts; i++) {
		DotProdPtr =(int*) cvGetSeqElem(DotProds,i);
		CrossProdPtr=(int*) cvGetSeqElem(CrossProds,i);
		DistBetPtsOnBound = DistBetPtsOnCircBound(TotalBPts, i, MostCurvyIndex);
		//If we are at least a 1/4 of the total boundary away from the most curvy point.
		if (DistBetPtsOnBound > (TotalBPts / 4)) {
			//If this location is curvier than the previous SecondMostCurvy location & is not an invagination
			if (*DotProdPtr< SecondMostCurvy && *CrossProdPtr > 0) {
				SecondMostCurvy = *DotProdPtr; //replace the MostCurvy point
				SecondMostCurvyIndex = i;
			}
		}
	}

	Worm->Head = (CvPoint*) cvGetSeqElem(Worm->Boundary,
			SecondMostCurvyIndex);  

	Worm->HeadIndex = SecondMostCurvyIndex;
	cvClearMemStorage(Worm->MemScratchStorage);
	return 0;
}
Example #20
0
File: map.cpp Project: Metalab/CGSG
const PolygonList& ViennaMap::loadFragment(int fragX, int fragY) {
  //TODO: keep track of the images that are being loaded so we don't issue
  //two load requests for the same picture. this will be important if 
  //we are used in a multi-threaded environment
  IplImage *img = getImage(fragX, fragY);

  if (SDL_mutexP(cvRessourcesGuard) == -1)
    throw "could not acquire cvRessourcesGuard";

  //get one color channel and set white to zero
  cvSplit(img, tempBinarizedImage, NULL, NULL, NULL);
  cvThreshold(tempBinarizedImage, tempBinarizedImage, 250, 255, CV_THRESH_TOZERO_INV);
  
  //find polygons
  CvSeq *contours, *polys;
  cvFindContours(tempBinarizedImage, cvMemStorage, &contours, sizeof(CvContour),
                  CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
  polys = cvApproxPoly(contours, sizeof(CvContour), cvMemStorage, CV_POLY_APPROX_DP, 1, 1);

  //create MapFragment
  MapFragment *frag = new MapFragment();

  //read polygons
  for (; polys; polys = polys->h_next) {
    if (polys->total < 3) continue;
    Polygon* polygon = new Polygon(polys->total); 
    bool incomplete = false;

    CvPoint *point;

    for (int i=0; i < polys->total; i++) {
      point = (CvPoint*)cvGetSeqElem(polys, i);
      int x, y;
      x = point->x + fragX*fragmentImageWidth;
      y = point->y + fragY*fragmentImageHeight;
      (*polygon)[i].x = x;
      (*polygon)[i].y = y;

      if (x == 1 || y == 1 || x == fragmentImageWidth-2 || y == fragmentImageHeight-2)
        incomplete = true;
    }

    if (!incomplete)
      frag->polygons.push_back(polygon);
    else
      frag->incompletePolygons.push_back(polygon);
  }

  //clean up
  cvClearMemStorage(cvMemStorage);
  cvReleaseImage(&img);

  if (SDL_mutexV(cvRessourcesGuard) == -1)
    throw "could not release cvRessourcesGuard";

  if (SDL_mutexP(fragmentGuard) == -1)
    throw "could not acquire fragmentGuard";

  //TODO: tryCompletePolygons
  //throw "not implemented";
  
  //add map fragment to list
  fragments.push_back(frag);

  if (SDL_mutexV(fragmentGuard) == -1)
    throw "could not release fragmentGuard";

  return frag->polygons;
}
Example #21
0
void eyesDetector::runEyesDetector(IplImage * input,IplImage * fullImage,CvPoint LT)

{

    bothEyesDetected=0;
    //static int countR;
    //static CvPoint leftEyeP,rightEyeP;
    eyesInformation.LE =cvPoint(0,0);
    eyesInformation.RE =cvPoint(0,0);
    eyesInformation.Length =0;
    if (input==0)
        return;

    double scale=1,j=0;
    //  //printf("%e SCALE \n\n",scale);

    IplImage *gray = cvCreateImage( cvSize(input->width,input->height/(2)), 8, 1 );
    IplImage *gray_fullimage = cvCreateImage( cvSize(fullImage->width,fullImage->height), 8, 1 );

    IplImage *gray_scale = cvCreateImage( cvSize(input->width/scale,input->height/(2*scale)), 8, 1 );

    cvSetImageROI(input,cvRect(0,(input->height)/(8),input->width,(input->height)/(2)));
    cvCvtColor( input, gray, CV_BGR2GRAY );
    cvResetImageROI(input);


    cvCvtColor( fullImage, gray_fullimage, CV_BGR2GRAY );
    cvResize(gray,gray_scale,CV_INTER_LINEAR);


    cvClearMemStorage( storage );
    CvSeq* nested_objects = cvHaarDetectObjects(gray_scale, nested_cascade, storage,1.4, 2, 0,cvSize(0,0) );
    int count=nested_objects ? nested_objects->total : 0;
    if (count==0)
    {
        nested_objects = cvHaarDetectObjects( gray_scale, nested_cascade_2, storage,1.4, 2, 0,cvSize(0,0) );
    }
    int leftT=0,rightT=0;
    count=nested_objects ? nested_objects->total : 0;
    //int Flag=0;
    if (count>1)
    {

        for ( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
        {
            CvPoint center;
            CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
            center.x = cvRound((LT.x+ (nr->x + nr->width*0.5)*scale));
            center.y = cvRound((LT.y + (input->height)/8 + (nr->y + nr->height*0.5)*scale));

            if ((center.x-4)>0 && (center.x-4)<(IMAGE_WIDTH-8) && (center.y-4)>0  && (center.y-4)<(IMAGE_HEIGHT-8))
            {
                cvSetImageROI(gray_fullimage,cvRect(center.x-4,center.y-4,8,8));
                IplImage* eyeDetect = cvCreateImage(cvSize(8,8),8,1);
                cvResize( gray_fullimage,eyeDetect, CV_INTER_LINEAR ) ;
                cvResetImageROI(gray_fullimage);

                double xCordinate=(center.x-4+CenterofMass(eyeDetect,0));
                double yCordinate=(center.y-4+CenterofMass(eyeDetect,1));

                cvReleaseImage( &eyeDetect );
                if (center.x<cvRound((LT.x + input->width*0.5)))
                {
                    eyesInformation.LE.x=xCordinate;
                    eyesInformation.LE.y=yCordinate;

                    //cvCircle( fullImage, cvPoint(eyesInformation.LE.x,eyesInformation.LE.y), 4, CV_RGB(128,128,128), 1, 8, 0 );
                    leftT=1;
                }
                else
                {

                    eyesInformation.RE.x=xCordinate;
                    eyesInformation.RE.y=yCordinate;
                    //cvCircle( fullImage, cvPoint(eyesInformation.RE.x,eyesInformation.RE.y), 4, CV_RGB(128,128,128), 1, 8, 0 );

                    rightT=1;
                }

            }
        }





        if (leftT==1 && rightT==1)
        {
            eyesInformation.Length=sqrt(pow(eyesInformation.RE.y-eyesInformation.LE.y,2)+ pow(eyesInformation.RE.x-eyesInformation.LE.x,2));
            bothEyesDetected=1;
        }

    }
    cvReleaseImage(&gray_fullimage);
    cvReleaseImage(&gray);
    cvReleaseImage(&gray_scale);
    cvClearMemStorage( storage );
}
//functiown switch among mouse gesture and keyboard automatically
void Recognition::FunctionSwitch(CvSeq* TouchBlobList){
	/*
	Single key                 SwitchCase=1;
	MouseMove                  SwitchCase=2;
	Combination                SwitchCase=3;
	TwoFingersMouse            SwitchCase=4;
	ThreeFingerMouse           SwitchCase=5;
	ThreeKeysCombination       SwitchCase=6;
	*/
	//less fingers number's action can jump to more fingers numbers' action, but it can not jump back
	for(int i=0;i<TouchBlobList->total;i++){
		float* p=(float*)cvGetSeqElem(TouchBlobList,i);
		if(cvRound(p[0])<65 || cvRound(p[0])>590 ||cvRound(p[1])<35||cvRound(p[1])>410){
			MouseGestureSwitch(TouchBlobList);
			SwitchCounter=0;
			SwitchCase=0;
			return;
		}
	}
	SwitchCounter++;//the total number of how many frames come
	if(SwitchCounter==1){//initialize when first frame comes
		PreviousTouchBlobList=cvCloneSeq(TouchBlobList,NULL);
	}
	switch (TouchBlobList->total){//To switch among different numbers of circles
		case 0://if zero finger ,pass this to all functions and they will have corresponding actions
			gesture.estimation(TouchBlobList);
			MouseMoveToLeftClick++;
			keyboard.OutputKey(TouchBlobList);
			MouseGestureSwitch(TouchBlobList);
			SwitchCounter=0;//clear
			SwitchCase=0;//clear
			if(MouseMoveToLeftClick>5){//count for left click,the time between two left click should be limited 
				MouseMoveToLeftClick=0;
				SingleClick=false;//forget last click,begin a new one
			}
			keyboard.UpKeys();
			break;
		case 1:
			gesture.estimation(TouchBlobList);//for cross close window
			POINT PreviousPoint;//initilize a point for previousPoint object
			POINT Point;// initilize a point for current point object
			if(SwitchCase==2){//if previous action is mouse movement action
				MouseGestureSwitch(TouchBlobList);
				break;
			}else if(SingleClick==true && MouseMoveToLeftClick!=0){
				MouseGestureSwitch(TouchBlobList);
				MouseMoveToLeftClick=0;
				keyboard.ClearVirtualKey();
				break;
				//if none of these matches, evaluate for next use
			}else if(PreviousTouchBlobList->total==1 && TouchBlobList->total==1){
				float* p=(float*)cvGetSeqElem(PreviousTouchBlobList,0);
				PreviousPoint.x=cvRound(p[0]);
				PreviousPoint.y=cvRound(p[1]);
			    float* q=(float*)cvGetSeqElem(TouchBlobList,0);
				Point.x=cvRound(q[0]);
				Point.y=cvRound(q[1]);
			}
			//
			if(SwitchCase==0 || SwitchCase==1){
				if(abs(Point.x-PreviousPoint.x)<10 && abs(Point.y-PreviousPoint.y)<10){
					SwitchCase=1;
					keyboard.OutputKey(TouchBlobList);
					break;
				}else if((abs(Point.x-PreviousPoint.x)>10 || abs(Point.y-PreviousPoint.y)>10)){
					SwitchCase=2;
					SingleClick=true;
					keyboard.ClearVirtualKey();
					MouseGestureSwitch(TouchBlobList);
					break;
				}
			}
			break;
		case 2:
			if(SwitchCase==3){
	    		keyboard.OutputKey(TouchBlobList);
				break;
			}else if (SwitchCase==4){
				MouseGestureSwitch(TouchBlobList);
				break;
			}
			//in case previous action is not zero finger or one finger
			if(SwitchCase==0 || SwitchCase==1){
				if(PreviousTouchBlobList->total==1 && TouchBlobList->total==2){
					keyboard.OutputKey(TouchBlobList);
					SwitchCase=3;
					break;
				}else{
					MouseGestureSwitch(TouchBlobList);
					SwitchCase=4;
					break;
				}
			}
			break;
			//three fingers action
		case 3:
			if(SwitchCase==6){
				keyboard.OutputKey(TouchBlobList);
				break;
			}else if (SwitchCase==5){
				MouseGestureSwitch(TouchBlobList);
				break;
			}
			//
			if(SwitchCase==0 || SwitchCase==3 || SwitchCase==4){
				if(PreviousTouchBlobList->total==2&& TouchBlobList->total==3){
					keyboard.OutputKey(TouchBlobList);
					SwitchCase=6;
				}else{
					MouseGestureSwitch(TouchBlobList);
					SwitchCase=5;
				}
			}
			break;
		default://more then three fingers are all mouse actions
			MouseGestureSwitch(TouchBlobList);// only mouse & gestue has four fingers action
		    SwitchCounter=0;//clean for next use
			SwitchCase=0;//clean for next use
			break;
	}
	PreviousTouchBlobList=cvCloneSeq(TouchBlobList,NULL);//copy current TouchBlobList to previous one
}
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;

    CvMemStorage *mallet_storage;
	CvSeq *mallet_circles = 0;
	float *mallet_p;
	int mi;

    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;

	int npts[2] = { 4, 12 };
	CvPoint **pts;

	pts = (CvPoint **) cvAlloc (sizeof (CvPoint *) * 2);
	pts[0] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 4);
	pts[1] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 12);
	pts[0][0] = cvPoint(0,0);
	pts[0][1] = cvPoint(160,0);
	pts[0][2] = cvPoint(320,240);
	pts[0][3] = cvPoint(0,240);
	pts[1][0] = cvPoint(39,17);
	pts[1][1] = cvPoint(126,15);
	pts[1][2] = cvPoint(147,26);
	pts[1][3] = cvPoint(160,77);
	pts[1][4] = cvPoint(160,164);
	pts[1][5] = cvPoint(145,224);
	pts[1][6] = cvPoint(125,233);
	pts[1][7] = cvPoint(39,233);
	pts[1][8] = cvPoint(15,217);
	pts[1][9] = cvPoint(0,133);
	pts[1][10] = cvPoint(0,115);
	pts[1][11] = cvPoint(17,28);

	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;

    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);

	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 107;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 115;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 255;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);

	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);

	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 11, 180, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする

	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();

			cvFillPoly(img_all_round, pts, npts, 2, CV_RGB(0, 0, 0));

			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			double y_line = (upper_left_f.y + lower_right_f.y)/3;
			double waiting_position = (robot_goal_left.x + lower_left_g.x) / 2;

			if(gY_after - gY_before < -1){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 600);
				target_coordinateX = waiting_position;
				if(waiting_position + 5 < gX_now_mallet){
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < waiting_position - 5){
					target_direction = 1;//時計回り
				}
			}
			/*else if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 1;//時計回り
			}*/
			else if(y_line < gY_after && y_line > gY_before){
				clock_t start = clock();
				clock_t end;
				end = start + 0.5 * (target_coordinateX - robot_goal_left.x) / 10;
				target_direction = 1;
				gpioPWM(25, 128);
				gpioWrite(18, target_direction);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				while(end - start < 0);//時間がくるまでループ
			}
			else{
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
			}



			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);

			/*

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
			cvShowImage("Now Image", show_img);
			cvShowImage("pack", dst_img_pack);
			cvShowImage("mallet", dst_img_mallet);
			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }

    gpioTerminate();

    cvDestroyAllWindows();

	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);
    cvFree(&pts[0]);
	cvFree(&pts[1]);
	cvFree(pts);

    return 0;
}
//switch between mouse and gesture
void Recognition::MouseGestureSwitch(CvSeq* TouchBlobList){
	switch (TouchBlobList->total){
		//initial, button release,left click,two left click is double click
			case 0:
				OneFinger++;// a counter
				//for mouse drag,clear variables after being used
				if(OneFinger==3){
					MouseDragFlag=0;
					if(LeftButton){
					mouse_event(MOUSEEVENTF_LEFTUP,0,0,0,0);//release left button
					LeftButton=false;//flag, left button not pressed
					}
					OneFinger=0;
				}
				//left click or double clicks action in interval based on one finger relative action
				if(Counter==1 || Counter==2){
					mouse.LeftClick();
					MouseDragFlag=FlagForMouseDrag;//for mouse drag Recognition 
				}
	            Initial();
				break;
				//mouse drag,mouse move
			case 1:
				//if switch from another case,first initialize variables
				if(CaseFlag!=1)
					Initial();
				CaseFlag=1;//set CaseFlag equal to 1 
				for(int i=0;i<TouchBlobList->total;i++){
					float* p=(float*)cvGetSeqElem(TouchBlobList,0);
					PointForCaseOne.x=cvRound(p[0]);
					PointForCaseOne.y=cvRound(p[1]);
				}//evaluate
				Counter++;
				//when first point comes,cursor location is current point location
				if(Counter==1){
					CursorInitialPosition.x=PointForCaseOne.x;
					CursorInitialPosition.y=PointForCaseOne.y;
				}
				//this is for mouse drag
				MouseDragFlag++;
				if(MouseDragFlag==(FlagForMouseDrag+1)){
					mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0);//press left button and not release
					LeftButton=true;
					GetCursorPos(&pt);//get current mouse pointer location
					//set cursorlocation according to point relative movement
					SetCursorPos(pt.x+PointForCaseOne.x-CursorInitialPosition.x,pt.y+CursorInitialPosition.y-PointForCaseOne.y); 
					CursorInitialPosition.x=PointForCaseOne.x;
					CursorInitialPosition.y=PointForCaseOne.y;//replace previous location with current one
					MouseDragFlag=FlagForMouseDrag;
			    	break;
				}
				else{
					//mouse movement, set sursor location according to point relatice movement
					GetCursorPos(&pt);
					SetCursorPos(pt.x+PointForCaseOne.x-CursorInitialPosition.x,pt.y+CursorInitialPosition.y-PointForCaseOne.y);
					CursorInitialPosition.x=PointForCaseOne.x;
					CursorInitialPosition.y=PointForCaseOne.y;
					MouseDragFlag=0;
			    	break;
				}
				//right click,clockwise,anticlockwise,zoom in,zoom out
			case 2:
				if(CaseFlag!=2)//if switch from another case,first initialize variables
					Initial();
				CaseFlag=2;
				for(int i=0;i<TouchBlobList->total;i++){
					float* p=(float*)cvGetSeqElem(TouchBlobList,i);
					PointForCaseTwo[i].x=cvRound(p[0]);
					PointForCaseTwo[i].y=cvRound(p[1]);
				}//evaluate
				Counter++;
				//when first point comes,initialize,calculate the distance between two points and the angle relative to X axis
				if(Counter==1){
					InitialDistance=(double)(pow(double(PointForCaseTwo[0].x-PointForCaseTwo[1].x),2)+pow(double(PointForCaseTwo[0].y-PointForCaseTwo[1].y),2));
					InitialAngle=(PointForCaseTwo[0].y+PointForCaseTwo[1].y)/(PointForCaseTwo[0].x+PointForCaseTwo[1].x);
					InitialAngle=atan(InitialAngle);//angle
					InitialDistance=sqrt(InitialDistance);//distance
				}
				else{
					double ProcessedDistance=(double)(pow(double(PointForCaseTwo[0].x-PointForCaseTwo[1].x),2)+pow(double(PointForCaseTwo[0].y-PointForCaseTwo[1].y),2));
					//calculate the distance of two points in following frame
					ProcessedDistance=sqrt(ProcessedDistance);
					ProcessedAngle=(PointForCaseTwo[0].y+PointForCaseTwo[1].y)/(PointForCaseTwo[0].x+PointForCaseTwo[1].x);
					ProcessedAngle=atan(ProcessedAngle);//angle
					//calculate two angles in vector,only beyond a certain angle will work
					if((InitialAngle-ProcessedAngle)>(3.1415926/40)){
						gesture.RotateClockWise();
						InitialAngle=ProcessedAngle;
						break;
					}else if((ProcessedAngle-InitialAngle)>(3.1415926/40)){//calculate two angles in vector,picture clockwise
						gesture.RotateAntiClockWise();
						InitialAngle=ProcessedAngle;
						break;
					}else if((ProcessedDistance+160)<InitialDistance){//picture zoom out
					//calclate the distance, only when difference value greater than a certain number will following codes run
						gesture.ZoomIn();
			    		InitialDistance=ProcessedDistance;
						break;
					}else if((ProcessedDistance-160)>InitialDistance){//picture zoom in
						gesture.ZoomOut();
					    InitialDistance=ProcessedDistance;
						break;
					}else if(abs(ProcessedDistance-InitialDistance)<5 && Counter==6){//double click
						mouse.RightClick();
						break;
					}
				}
				break;
				//prevous or next picture,tag back and forwards 
			case 3:
				if(CaseFlag!=3)
					Initial();//clear all used variable when switch among different fingers action
				CaseFlag=3;
				ProcessedDistance=0;
				for(int i=0;i<TouchBlobList->total;i++){
					float* p = (float*)cvGetSeqElem(TouchBlobList,i);//get each point's coordinate
					ProcessedDistance=ProcessedDistance+cvRound(p[0]);//sum three fingers' X asix value
				}
				Counter++;
				if (Counter==1){
					InitialDistance=ProcessedDistance;//initial
				}
				else{
					WindowType=WindowsRecognition();//Recognition what kind of this window is
					if(WindowType==2){//if browser
						if((ProcessedDistance-180)>InitialDistance){//tag forward
							gesture.TagForward();
						    InitialDistance=ProcessedDistance;//replace initial sum with current sum of x axis
						    break;
						}
	                 	if((ProcessedDistance+180)<InitialDistance){//tag back
					    	gesture.TagBack();
							InitialDistance=ProcessedDistance;
					    	break;
						}
					}
					//if current window is picture viewer
					else if(WindowType==1){
						if((ProcessedDistance-180)>InitialDistance){//change to next picture
							gesture.NextPicture();
						    InitialDistance=ProcessedDistance;
						    break;
						}
	                 	if((ProcessedDistance+180)<InitialDistance){//change to previous picture
							gesture.PreviousPicture();
							InitialDistance=ProcessedDistance;
					    	break;
						}
					}
				}
				break;
			case 4:
				//if jump from another case,fist initialize
				if(CaseFlag!=4)
					Initial();
				CaseFlag=4;//set CaseFlag equal to 4
				ProcessedDistance=0;
				for(int i=0;i<TouchBlobList->total;i++){
					float* p = (float*)cvGetSeqElem(TouchBlobList,i);
					ProcessedDistance=ProcessedDistance+cvRound(p[1]);//sum up four fingers' Y axis value
				}
				Counter++;
				if (Counter==1){
					InitialDistance=ProcessedDistance;//initialize at begin
				}
				else{
					//movement should greater than a certain distance from bottom to top
					if((ProcessedDistance-160)>InitialDistance){
						gesture.UndoMinimizeAll();//call certain function
						InitialDistance=ProcessedDistance;
						break;
					}
					//move from top to bottom
					if((ProcessedDistance+160)<InitialDistance){
						InitialDistance=ProcessedDistance;
						gesture.MinimizeAll();//call certain function
						break;
					}
				}
				break;
			//do not support more than 4 fingers' actions
			default:
				Initial();
				break;
		}		
}
Example #25
0
//void processLanes(CvSeq *lines, IplImage* edges, IplImage *temp_frame)
static void processLanes(CvSeq *lines, IplImage* edges, IplImage *temp_frame, IplImage *org_frame)
{
  /* classify lines to left/right side */
  std::vector<Lane> left, right;

  for (int i=0; i<lines->total; i++)
    {
      CvPoint *line = (CvPoint *)cvGetSeqElem(lines, i);
      int dx = line[1].x - line[0].x;
      int dy = line[1].y - line[0].y;
      float angle = atan2f(dy, dx) * 180/CV_PI;

      if (fabs(angle) <= LINE_REJECT_DEGREES) // reject near horizontal lines
        {
          continue;
        }

      /* assume that vanishing point is close to the image horizontal center
         calculate line parameters: y = kx + b; */
      dx = (dx == 0) ? 1 : dx;  // prevent DIV/0!
      float k = dy/(float)dx;
      float b = line[0].y - k*line[0].x;

      /* assign lane's side based by its midpoint position */
      int midx = (line[0].x + line[1].x) / 2;
      if (midx < temp_frame->width/2)
        {
          left.push_back(Lane(line[0], line[1], angle, k, b));
        }
      else if (midx > temp_frame->width/2)
        {
          right.push_back(Lane(line[0], line[1], angle, k, b));
        }
    }

  /* show Hough lines */
  int org_offset = temp_frame->height;
  for (std::size_t i = 0; i < right.size(); ++i)
    {
      CvPoint org_p0 = right[i].p0;
      org_p0.y += org_offset;
      CvPoint org_p1 = right[i].p1;
      org_p1.y += org_offset;
#ifdef USE_POSIX_SHARED_MEMORY
#ifdef SHOW_DETAIL
      cvLine(temp_frame, right[i].p0, right[i].p1, BLUE, 2);
#endif
      cvLine(org_frame, org_p0, org_p1, BLUE, 2);
#endif
    }
  for (std::size_t i = 0; i < left.size(); ++i)
    {
      CvPoint org_p0 = left[i].p0;
      org_p0.y += org_offset;
      CvPoint org_p1 = left[i].p1;
      org_p1.y += org_offset;
#ifdef USE_POSIX_SHARED_MEMORY
#ifdef SHOW_DETAIL
      cvLine(temp_frame, left[i].p0, left[i].p1, RED, 2);
#endif
      cvLine(org_frame, org_p0, org_p1, RED, 2);
#endif
    }

  processSide(left, edges, false);
  processSide(right, edges, true);

  /* show computed lanes */
  int x = temp_frame->width * 0.55f;
  int x2 = temp_frame->width;
#if defined(USE_POSIX_SHARED_MEMORY)
#ifdef SHOW_DETAIL
  cvLine(temp_frame, cvPoint(x, laneR.k.get()*x + laneR.b.get()),
         cvPoint(x2, laneR.k.get()*x2 + laneR.b.get()), PURPLE, 2);
#endif

  cvLine(org_frame, cvPoint(x, laneR.k.get()*x + laneR.b.get() + org_offset),
         cvPoint(x2, laneR.k.get()*x2 + laneR.b.get() + org_offset), PURPLE, 2);
#else
  lane_detector::ImageLaneObjects lane_msg;
  lane_msg.lane_r_x1 = x;
  lane_msg.lane_r_y1 = laneR.k.get()*x + laneR.b.get() + org_offset;
  lane_msg.lane_r_x2 = x2;
  lane_msg.lane_r_y2 = laneR.k.get()*x2 + laneR.b.get() + org_offset;
#endif

  x = temp_frame->width * 0;
  x2 = temp_frame->width * 0.45f;
#if defined(USE_POSIX_SHARED_MEMORY)
#ifdef SHOW_DETAIL
  cvLine(temp_frame, cvPoint(x, laneL.k.get()*x + laneL.b.get()),
         cvPoint(x2, laneL.k.get()*x2 + laneL.b.get()), PURPLE, 2);
#endif

  cvLine(org_frame, cvPoint(x, laneL.k.get()*x + laneL.b.get() + org_offset),
         cvPoint(x2, laneL.k.get()*x2 + laneL.b.get() + org_offset), PURPLE, 2);
#else
  lane_msg.lane_l_x1 = x;
  lane_msg.lane_l_y1 = laneL.k.get()*x + laneL.b.get() + org_offset;
  lane_msg.lane_l_x2 = x2;
  lane_msg.lane_l_y2 = laneL.k.get()*x2 + laneL.b.get() + org_offset;

  image_lane_objects.publish(lane_msg);
#endif
  // cvLine(org_frame, cvPoint(lane_msg.lane_l_x1, lane_msg.lane_l_y1), cvPoint(lane_msg.lane_l_x2, lane_msg.lane_l_y2), RED, 5);
  // cvLine(org_frame, cvPoint(lane_msg.lane_r_x1, lane_msg.lane_r_y1), cvPoint(lane_msg.lane_r_x2, lane_msg.lane_r_y2), RED, 5);
}
Example #26
0
//视频设备和显示设备初始化和预览函数(加设备状态检测)--------------------------------
int video_fb_init_preview()
{
	//串口相关变量-------------------------------
	char buff[512];
	int nread=0;
	int FrameDone=0;//一帧数据结束标志
	int FrameCount=0;//记录帧长度
	int j=0;
	int key=0;//开关标志
	int stat=0;//视频设备状态标志
	//-------------------------------------------
	
	int numBufs;

	//--------------------------------------------
	//SDL yuv
	SDL_Surface      *pscreen;
	SDL_Overlay      *overlay;
	SDL_Rect         drect;
	SDL_Event        sdlevent;
	SDL_mutex        *affmutex;
	unsigned char    *p = NULL;
	unsigned char    frmrate;
	unsigned int     currtime;
	unsigned int     lasttime;
	char* status = NULL;

	//SDL RGB
	unsigned int     rmask;
	unsigned int     gmask;
	unsigned int     bmask;
	unsigned int     amask;	
	int              bpp;
	int 		 pitch;
	int 		 pixels_num;
	unsigned char    *pixels;
	unsigned char    *p_RGB = NULL;	
	SDL_Surface      *pscreen_RGB;
	SDL_Surface      *display_RGB;
	printf("USB Camera Test\n");

	video_fd = open("/dev/video0", O_RDWR, 0);//打开摄像头设备,使用阻塞方式打开
	if (video_fd<0)
	{
		printf("open error\n");
		return  1;
	}

	/*************先向驱动尝试获取设备视频格式start*************/
	struct v4l2_fmtdesc fmt0;
	int ret0;
	memset(&fmt0,0,sizeof(fmt0));
	fmt0.index = 0;
	fmt0.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
	while((ret0 = ioctl(video_fd,VIDIOC_ENUM_FMT,&fmt0) == 0))
	{
		fmt0.index++;
		printf("%d> pixelformat =%c%c%c%c,description =%s\n",fmt0.index,fmt0.pixelformat&0xff,(fmt0.pixelformat>>8)&0xff,(fmt0.pixelformat>>16)&0xff,(fmt0.pixelformat>>24)&0xff,fmt0.description);
	}
	/**************************END***************************/
	
	//---------------------设置获取视频的格式----------------//
	struct v4l2_format fmt;	
	memset( &fmt, 0, sizeof(fmt));
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//视频数据流类型,永远都V4L2_BUF_TYPE_VIDEO_CAPTURE
	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;//视频源的格式为JPEG或YUN4:2:2或RGB
	fmt.fmt.pix.width = 640;//设置视频宽度
	fmt.fmt.pix.height = 480;//设置视频高度
	//fmt.fmt.pix.field=V4L2_FIELD_INTERLACED;
	//fmt.fmt.pix.colorspace=8;
	//printf("color: %d \n",fmt.fmt.pix.colorspace);
	if (ioctl(video_fd, VIDIOC_S_FMT, &fmt) < 0)//使配置生效
	{
		printf("set format failed\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//+++++++++++++++++++++++++++++++++++++++++++++++++++++++
	//if(SDL_Init(SDL_INIT_VIDEO) < 0)
	//{
	//	printf("SDL Init failed.\n");
	//	exit(1);
	//}
	
	//SDL 设置:YUV输出
	/*
 	pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height,0,SDL_VIDEO_Flags);
	overlay = SDL_CreateYUVOverlay(fmt.fmt.pix.width, fmt.fmt.pix.height,SDL_YUY2_OVERLAY,pscreen);
	p = (unsigned char *)overlay->pixels[0];
	drect.x = 0;
	drect.y = 0;
	drect.w = pscreen->w;
	drect.h = pscreen->h;
	*/

	//SDL 设置:RGB输出
	//pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height, 24, SDL_SWSURFACE | SDL_DOUBLEBUF);
	rmask = 0x000000ff;
	gmask = 0x0000ff00;
	bmask = 0x00ff0000;
	amask = 0x00000000;
	bpp   = 24;
	pitch = fmt.fmt.pix.width*3;
	pixels_num = fmt.fmt.pix.width*fmt.fmt.pix.height*3;
	pixels = (unsigned char *)malloc(pixels_num);
	memset(pixels, 0, pixels_num);
	p_RGB = (unsigned char *)pixels;
	//pscreen_RGB = SDL_CreateRGBSurfaceFrom(pixels, fmt.fmt.pix.width, fmt.fmt.pix.height, bpp, pitch, rmask, gmask, bmask, amask);

	
	//lasttime = SDL_GetTicks();
	//affmutex = SDL_CreateMutex();
	//SDL 设置end
	
	//openCV 设置
	CvMemStorage*  storage = cvCreateMemStorage(0);
	IplImage*      img     = cvCreateImageHeader(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 3);//image头,未开辟数据空间
	IplImage*      imggray = cvCreateImage(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 1);//image,开辟数据空间
	cvNamedWindow("image", 1);

	unsigned char *pRGB = NULL;
	pRGB = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*3*sizeof(unsigned char));
	//openCV 设置 end

	//------------------------申请帧缓冲---------------------//
	struct v4l2_requestbuffers req;
	memset(&req, 0, sizeof (req));
	req.count = 3;	//缓存数量,即可保存的图片数量
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE
	req.memory = V4L2_MEMORY_MMAP;	//存储类型:V4L2_MEMORY_MMAP或V4L2_MEMORY_USERPTR
	if (ioctl(video_fd, VIDIOC_REQBUFS, &req) == -1)//使配置生效
	{
		perror("request buffer error \n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//--------将VIDIOC_REQBUFS获取内存转为物理空间-------------//
	buffers = calloc(req.count, sizeof(VideoBuffer));	
	//printf("sizeof(VideoBuffer) is %d\n", sizeof(VideoBuffer));
	struct v4l2_buffer buf;
	for (numBufs = 0; numBufs < req.count; numBufs++)
	{
		memset( &buf, 0, sizeof(buf));
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	
		//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = numBufs;
		if (ioctl(video_fd, VIDIOC_QUERYBUF, &buf) < 0)//使配置生效
		{
			printf("VIDIOC_QUERYBUF error\n");
			return 2;
		}
		//printf("buf len is %d\n", sizeof(buf));
		buffers[numBufs].length = buf.length;
		buffers[numBufs].offset = (size_t) buf.m.offset;
		//使用mmap函数将申请的缓存地址转换应用程序的绝对地址------
		buffers[numBufs].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE,
			MAP_SHARED, video_fd, buf.m.offset);	
		if (buffers[numBufs].start == MAP_FAILED)
		{
			perror("buffers error\n");
			return 2;
		}
		if (ioctl(video_fd, VIDIOC_QBUF, &buf) < 0)//放入缓存队列
		{
			printf("VIDIOC_QBUF error\n");
			return 2;
		}

	}
	//-------------------------------------------------------//
	
	//----------------------开始视频显示----------------------//
	enum v4l2_buf_type type;
	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (ioctl(video_fd, VIDIOC_STREAMON, &type) < 0)
	{
		printf("VIDIOC_STREAMON error\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//---------------------读取视频源格式---------------------//	
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;				
	if (ioctl(video_fd, VIDIOC_G_FMT, &fmt) < 0)	
	{
		printf("get format failed\n");
		return 2 ;
	}
	else
	{
		printf("Picture:Width = %d   Height = %d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);
		
	}
	//-------------------------------------------------------//
	int i=0;	
	//一些关于fb设备或者没有用到的变量---------------------------
	/*FILE * fd_y_file = 0;
	int a=0;
	int k = 0;
	int i=0;
	//设置显卡设备framebuffer------------------------------------
	struct jpeg_decompress_struct cinfo;
	struct jpeg_error_mgr jerr;
	FILE *infile;//Jpeg文件的句柄
	unsigned char *buffer;
	char *fb_device;
	unsigned int x;
	unsigned int y;
	//打开显卡设备------------------------------------------------
	if ((fb = open("/dev/fb0", O_RDWR)) < 0)
	{
		perror(__func__);
		return 2;
	}

	//获取framebuffer的状态-----------------------------------------
	fb_set(fb);//设置显存参数	
	fb_stat(fb);//获取显卡驱动中的长、宽和显示位宽
	
	printf("frame buffer: %dx%d,  %dbpp, 0x%xbyte= %d,graylevels= %d \n", 
		fbdev.fb_width, fbdev.fb_height, fbdev.fb_bpp, fbdev.fb_size, fbdev.fb_size,fbdev.fb_gray);

	//映射framebuffer的地址到用户空间----------------------------------
	fbdev.fb_mem = mmap (NULL, fbdev.fb_size, PROT_READ|PROT_WRITE,MAP_SHARED,fb,0);
	fbdev.fb = fb;
	*/
		
	//预览采集到的图像(如果有需要可以添加capture功能)-------------------
	while (sdl_quit)
	{
		
		fd_set fds;//文件描述符集,准备使用Select机制
		struct timeval tv;
		int ret1;
		
		FD_ZERO(&fds);//清空文件描述符集
		FD_SET(video_fd,&fds);//将视频设备文件的描述符放入集合中
		
		//消息等待超时,可以完全阻塞-------------------------------
		tv.tv_sec =2;
		tv.tv_usec=0;
		//等待视频设备准备好--------------------------------------
		ret1=select(video_fd+1,&fds,NULL,NULL,&tv);
		if(-1==ret1)
		{
			if(EINTR==errno)
				continue;
			printf("select error. \n");
			exit(EXIT_FAILURE);
		}
		if(0==ret1)
		{
			printf("select timeout. \n");
			continue;
		}		
		while(sdl_quit)		
		{
					 
			//检测退出消息
			while(SDL_PollEvent(&sdlevent))
			{
				if(sdlevent.type == SDL_QUIT)
				{
					sdl_quit = 0;
					break;
				}
			}
			currtime = SDL_GetTicks();
			if(currtime - lasttime >0)
				frmrate = 1000/(currtime-lasttime);
			lasttime = currtime;

			//开始获取FIFO中已经准备好的一帧数据-----------------------		
			memset(&buf ,0,sizeof(buf));
			buf.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory=V4L2_MEMORY_MMAP;
			//准备好的出列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_DQBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}	
	
			//获取当前帧的用户空间首地址,用于格式转换------------------
			unsigned char *ptcur=buffers[buf.index].start;
			//++++++++++++++++++++++++++++++++++++++++
			//算法区
			//+++++++++++++++++++++++++++++++++++++++++
			//灰度变换
			/*
			unsigned char *pgray = NULL;
			pgray = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*2*sizeof(unsigned char));//避免被识别为段错误
			yuv2gray(ptcur,pgray,fmt.fmt.pix.width, fmt.fmt.pix.height);
			*/

			//YUV向RGB(24bit)转换
			YUYVToRGB888(ptcur, pRGB, fmt.fmt.pix.width, fmt.fmt.pix.height);
			
			//opencv 检测人脸
			cvSetData(img, pRGB, fmt.fmt.pix.width*3);//将pRGB数据装入img中
			cvCvtColor(img, imggray, CV_RGB2GRAY);//将img灰度转换到imggray,供opencv检测使用
			CvHaarClassifierCascade *cascade=(CvHaarClassifierCascade*)cvLoad("/usr/share/opencv-2.4.6.1/data/haarcascades/haarcascade_frontalface_alt2.xml", storage,0,0);
			cvClearMemStorage(storage);
			cvEqualizeHist(imggray, imggray);
			CvSeq* objects = cvHaarDetectObjects(imggray, cascade, storage, 1.1, 2, 0, cvSize(30,30),cvSize(30,30));
			
			//opencv 标记人脸
			CvScalar colors[] = {{{255,0,0}},{{0,0,0}}};
			int faces=0;
			for(faces=0; faces < (objects ? objects->total:0); faces++)
			{
				CvRect* r = (CvRect *)cvGetSeqElem(objects,faces);
				cvRectangle(img, cvPoint(r->x, r->y), cvPoint(r->x+r->width, r->y+r->height),colors[0],2,8,0 );//原始图像上加框
			}
			

			//调整opencv img图像数据
			/*CvScalar s;
			int imgi=0,imgj=0,sdlcount=0;
			for(imgi=0;imgi<img->height;imgi++)
			{
				for(imgj=0; imgj<img->width; imgj++)
				{
					s=cvGet2D(img,imgi,imgj);
					pRGB[sdlcount++]=0xff;//s.val[0];//B
					pRGB[sdlcount++]=0xff;//s.val[1];//G
					pRGB[sdlcount++]=0xff;//s.val[2];//R
					//cvSet2D(img,imgi,imgj,s);
				}
			}
			*/
			//opencv 显示图像	
			cvShowImage("image", img);
			char c = cvWaitKey(1);
			printf("%d\n",c);
			if(c==27)
				sdl_quit=0;
			
			
			//yuv载入到SDL
			/*
			SDL_LockYUVOverlay(overlay);
			memcpy(p, pgray,pscreen->w*(pscreen->h)*2);
			SDL_UnlockYUVOverlay(overlay);
			SDL_DisplayYUVOverlay(overlay, &drect);
			*/

			//RGB载入到SDL
			//memcpy(pixels, pRGB, pscreen_RGB->w*(pscreen_RGB->h)*3);
			//SDL_BlitSurface(pscreen_RGB, NULL, display_RGB, NULL);
			//SDL_Flip(display_RGB);

			//统计帧率
			//status = (char *)calloc(1,20*sizeof(char));
			//sprintf(status, "Fps:%d",frmrate);
			//SDL_WM_SetCaption(status, NULL);
			//SDL_Delay(10);
			//用完了的入列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_QBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}
			
		}	
	}	

	//fb_munmap(fbdev.fb_mem, fbdev.fb_size);	//释放framebuffer映射
	//close(fb);//关闭Framebuffer设备
	for(i=0;i<req.count;i++)
	{
		if(-1==munmap(buffers[i].start,buffers[i].length))
			printf("munmap error:%d \n",i);
	}

	cvDestroyWindow("image");
	close(video_fd);					
	SDL_DestroyMutex(affmutex);
	//SDL_FreeYUVOverlay(overlay);
	cvReleaseImage(&img);
	cvReleaseImage(&imggray);
	free(status);
	free(buffers);
	//free(pRGB);
	SDL_Quit();
	return 0;

}
int ofxCvHaarFinder::findHaarObjects(const ofxCvGrayscaleImage& input,
	int x, int y, int w, int h,
	int minWidth, int minHeight) {

	int nHaarResults = 0;

	if (cascade) {
		if (!blobs.empty())
			blobs.clear();

		// we make a copy of the input image here
		// because we need to equalize it.

		if (img.width == input.width && img.height == input.height) {
				img = input;
		} else {
				img.clear();
				img.allocate(input.width, input.height);
				img = input;
		}

		img.setROI(x, y, w, h);
		cvEqualizeHist(img.getCvImage(), img.getCvImage());
		CvMemStorage* storage = cvCreateMemStorage();

		/*
		Alternative modes:

		CV_HAAR_DO_CANNY_PRUNING
		Regions without edges are ignored.

		CV_HAAR_SCALE_IMAGE
		Scale the image rather than the detector
		(sometimes yields speed increases).

		CV_HAAR_FIND_BIGGEST_OBJECT
		Only return the largest result.

		CV_HAAR_DO_ROUGH_SEARCH
		When BIGGEST_OBJECT is enabled, stop at
		the first scale for which multiple results
		are found.
		*/

		CvSeq* haarResults = cvHaarDetectObjects(
				img.getCvImage(), cascade, storage, scaleHaar, neighbors, CV_HAAR_DO_CANNY_PRUNING,
				cvSize(minWidth, minHeight));

		nHaarResults = haarResults->total;

		for (int i = 0; i < nHaarResults; i++ ) {
			printf("%i objects\n", i);
			
			ofxCvBlob blob;

			CvRect* r = (CvRect*) cvGetSeqElem(haarResults, i);

			float area = r->width * r->height;
			float length = (r->width * 2) + (r->height * 2);
			float centerx	= (r->x) + (r->width / 2.0);
			float centery	= (r->y) + (r->height / 2.0);

			blob.area = fabs(area);
			blob.hole = area < 0 ? true : false;
			blob.length	= length;
			blob.boundingRect.x = r->x + x;
			blob.boundingRect.y = r->y + y;
			blob.boundingRect.width = r->width;
			blob.boundingRect.height = r->height;
			blob.centroid.x = centerx;
			blob.centroid.y = centery;
			blob.pts.push_back(ofPoint(r->x, r->y));
			blob.pts.push_back(ofPoint(r->x + r->width, r->y));
			blob.pts.push_back(ofPoint(r->x + r->width, r->y + r->height));
			blob.pts.push_back(ofPoint(r->x, r->y + r->height));

			blobs.push_back(blob);
		}

		cvReleaseMemStorage(&storage);
	}

	return nHaarResults;
}
void surf_match(IplImage* object_color, IplImage* object, IplImage* image,const CvSeq *objectKeypoints,const CvSeq *imageKeypoints,const CvSeq * objectDescriptors,const CvSeq * imageDescriptors, CvPoint val[4])
{
    cvNamedWindow("Object", 0);
    cvNamedWindow("Object Correspond", 0);

    static CvScalar colors[] = 
    {
        {{0,0,255}},
        {{0,128,255}},
        {{0,255,255}},
        {{0,255,0}},
        {{255,128,0}},
        {{255,255,0}},
        {{255,0,0}},
        {{255,0,255}},
        {{255,255,255}}
    };

    int i;

	CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
    CvPoint dst_corners[4];
    IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
    cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );
    cvCopy( object, correspond );
    cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );
    cvCopy( image, correspond );
    cvResetImageROI( correspond );

#ifdef USE_FLANN
    printf("Using approximate nearest neighbor search\n");
#endif

    if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,
        imageDescriptors, src_corners, dst_corners ))
    {
        for( i = 0; i < 4; i++ )
        {
            CvPoint r1 = dst_corners[i%4];
            CvPoint r2 = dst_corners[(i+1)%4];
            cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),
                cvPoint(r2.x, r2.y+object->height ), colors[8] );
        }
    }
    vector<int> ptpairs;
#ifdef USE_FLANN
    flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif
    for( i = 0; i < (int)ptpairs.size(); i += 2 )
    {
        CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );
        CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );
        cvLine( correspond, cvPointFrom32f(r1->pt),
            cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );
    }

    cvShowImage( "Object Correspond", correspond );
    for( i = 0; i < objectKeypoints->total; i++ )
    {
        CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );
        CvPoint center;
        int radius;
        center.x = cvRound(r->pt.x);
        center.y = cvRound(r->pt.y);
        radius = cvRound(r->size*1.2/9.*2);
        cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );
    }
    cvShowImage( "Object", object_color );

    cvWaitKey(0);

    cvDestroyWindow("Object");
    cvDestroyWindow("Object SURF");
    cvDestroyWindow("Object Correspond");

	//CvPoint val[4];
	for(int k=0;k<4;k++)
	{
//		printf("%d %d \n", dst_corners[k].x, dst_corners[k].y);
		val[k] = dst_corners[k]; 
		val[k] = dst_corners[k]; 
	}

}
Example #29
0
int main(int argc, char* argv[])
{
	IplImage* color = cvLoadImage("E:\\pic_skindetect\\clothtest\\2.jpg", 1);
	IplImage* gray = cvCreateImage(cvGetSize(color), 8, 1);
	IplImage* show = cvCreateImage(cvGetSize(color), 8, 1);
	cvZero(show);
	int i = 0;

	cvCvtColor(color, gray, CV_RGB2GRAY);
	//cvThreshold(gray, gray, 100, 255, CV_THRESH_BINARY_INV);
	cvCanny(gray, gray, 50, 150, 3); 
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSeq* contours;
	CvSeq* seq_fourier = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),sizeof(CvPoint2D32f), storage);
	cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_TREE);

	CvSeq* mostContours = contours;
	/*for(; contours; contours = contours->h_next)
	{
		if (mostContours->total < contours->total)
		{
			mostContours = contours;
		}
	}*/

	int t = 0;
	for(; contours; contours = contours->h_next)
	{
	//contours = mostContours;
		++t;
		printf("%d\n", contours->total);
		cvDrawContours(color, contours, CV_RGB(255,0,0), CV_RGB(255,0,0), 1, 3);
		CalcFourierDescriptorCoeff(contours, 2000, seq_fourier);
		CalcBoundary(seq_fourier, contours->total, contours);

		for(int i = 0; i < contours->total; i++)
		{
			CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
			if(pt->x >= 0 && pt->x < show->width && pt->y >= 0 && pt->y < show->height)
			{
				((uchar*)(show->imageData+pt->y*show->widthStep))[pt->x] = 255;
			}
		}

		/*for(i = 0; i < contours->total; i++)
		{
			CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
			printf("%d, %d, %d\n", pt->x, pt->y, i);
		}*/
/*
		for(i = 0; i < seq_fourier->total; i++)
		{
			CvPoint2D32f* pt=(CvPoint2D32f*)cvGetSeqElem(seq_fourier, i);
			printf("%f, %f, %d\n", pt->x, pt->y, i);
		}*/
	}
	printf("t=%d\n", t);

	cvNamedWindow("color", 0);
	cvShowImage("color",color);
	//cvWaitKey(0);

	cvNamedWindow("gray", 0);
	cvShowImage("gray", gray);
	//cvWaitKey(0);

	cvNamedWindow("reconstructed", 0);
	cvShowImage("reconstructed", show);
	cvWaitKey(0);
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&color);
	cvReleaseImage(&gray);
	cvReleaseImage(&show);
	cvDestroyAllWindows();
	return 0;
}
Example #30
0
bool Robot::adjustWorldCoordinate(IplImage* image, double coordAdjustRate)
{
    IplImage *img;
    IplImage* src1=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
    if(image->nChannels==3)
    {
        IplImage *hsv_img = get_hsv(image);
        img=worldMap.getField(hsv_img);
        cvReleaseImage(&hsv_img);
        src1=img;
    }
    else
    {
        img=image;
        src1=img;
            //cvCvtColor(img, src1, CV_BGR2GRAY);
    }
		if( img != 0 )
		{
			IplImage* dst = cvCreateImage( cvGetSize(img), 8, 1 );
			IplImage* color_dst = cvCreateImage( cvGetSize(img), 8, 3 );
			CvMemStorage* storage = cvCreateMemStorage(0);
			CvSeq* ls = 0;
			int i;
			cvCanny( src1, dst, 50, 200, 3 );

			cvCvtColor( dst, color_dst, CV_GRAY2BGR );

			ls = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 2, CV_PI/90, 20, 5, 30 );
			//ls = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 5, CV_PI/30, 10, 20, 5 );
			vector<myLine> tmplines;
			for( i = 0; i < ls->total; i++ )
			{
				CvPoint* tmpl = (CvPoint*)cvGetSeqElem(ls,i);
				cvLine( color_dst, tmpl[0], tmpl[1], CV_RGB(255,0,0), 1, 8 );

                cv::Point2f tmpp[2];
                cv::Point2f scrPos(tmpl[0].x,tmpl[0].y);
                cv::Point2f roboPos=worldMap.coord_screen2robot(scrPos,true);
                cv::Point2f worldPos=worldMap.coord_robot2world(roboPos);
                tmpp[0]=worldPos;
                scrPos=cv::Point2f(tmpl[1].x,tmpl[1].y);
                roboPos=worldMap.coord_screen2robot(scrPos,true);
                worldPos=worldMap.coord_robot2world(roboPos);
                tmpp[1]=worldPos;
                myLine templ(tmpp[0],tmpp[1]);
                if(templ.l>LINE_LENGTH_LBOUND)
                    tmplines.push_back(templ);
//				//printf("length=%f angle=%f\n",sqrt(float((tmpl[1].y-tmpl[0].y)*(tmpl[1].y-tmpl[0].y));
				//	+float((tmpl[1].x-tmpl[0].x)*(tmpl[1].x-tmpl[0].x)))
				//	,atan2(float(tmpl[1].y-tmpl[0].y),float(tmpl[1].x-tmpl[0].x)));
			}
			//printf("\n");
			cvNamedWindow( "Source", 1 );
			cvShowImage( "Source", img );

			cvNamedWindow( "Hough", 1 );
			cvShowImage( "Hough", color_dst );

			cvWaitKey(10);
			cvReleaseImage(&dst);
			cvReleaseImage(&src1);
			cvReleaseImage(&color_dst);
			cvReleaseMemStorage(&storage);
			if(coordAdjustRate==0)
			{
                for(i=0;i<tmplines.size();++i)
                {
                    lines.push_back(tmplines[i]);
                }
			}
			else if(coordAdjustRate==2)
			{
                for(i=0;i<tmplines.size();++i)
                {
                    lines.push_back(tmplines[i]);
                }
                //vector<double> oris;
                vector<int> lineNums;
                vector<double> lineValues;
                int groupId=0;
			    for(i=0;i<lines.size();++i)
			    {
			        bool classified=false;
			        int j;
			        for(j=0;j<i;++j)
			        {
			            double angle=lines[i].theta-lines[j].theta+CV_PI/4.0;   //to make the process simple, add 45 degree
                                                                                //to turn the cared angles to the middle of a phase
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            int phase=(int)(angle/(CV_PI/2.0));
			            double angle90=angle-CV_PI/2.0*(double)phase;
			            phase%=2;
			            if(abs(angle90-CV_PI/4.0)<CV_PI/60.0)//subtract the added 45 degree
			            {
			                lines[i].clsId=lines[j].clsId/2*2+phase;
			                ++lineNums[lines[i].clsId];
			                lineValues[lines[i].clsId]+=lines[i].l;
			                classified=true;
			                break;
			            }
			        }
			        if(classified==false)
			        {
			            lines[i].clsId=groupId;
                        lineNums.push_back(1);
                        lineNums.push_back(0);
                        lineValues.push_back(lines[i].l);
                        lineValues.push_back(0);
			            groupId+=2;
			        }
			    }
			    int maxValueGroup=0;
			    double maxValue=0;
			    for(i=0;i<lineNums.size();i+=2)
			    {
			        if(lineValues[i]+lineValues[i+1]>maxValue)
			        {
			            maxValue=lineValues[i]+lineValues[i+1];
			            maxValueGroup=i;
			        }
			    }
			    maxValueGroup/=2;
			    double sumAngle=0;
			    double sumL=0;
			    for(i=0;i<lines.size();++i)
			    {
			        if(lines[i].clsId/2==maxValueGroup)
			        {
			            double angle=lines[i].theta+CV_PI/4.0;//similar strategy, add 45 degree
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            double angle90=angle-CV_PI/2.0*(double)((int)(angle/(CV_PI/2.0)));
			            sumAngle+=(angle90-CV_PI/4.0)*lines[i].l;//subtract 45 degree
			            sumL+=lines[i].l;
			        }
			    }
			    if(sumL==0)
			    {
                    //printf("false 2 sumL=0\n");
			        return false;
			    }
			    mainAngle=sumAngle/sumL;
			    mainGroupId=maxValueGroup;
			    //printf("mainAngle=%f mainGroupId=%d\n",mainAngle,mainGroupId);
			}
			else if(coordAdjustRate==1)
            {
                CvRect bBox=worldMap.getMap_bbox();
                    //printf("in func param=1\n");
                    //printf("tmplines.size=%d\n",tmplines.size());
                for(i=0;i<tmplines.size();++i)
                {
                    cv::Point2f imgPos=world2image(tmplines[i].p[0]);
                    if(!(imgPos.x>bBox.x-BBOX_DELTA && imgPos.x<bBox.x+bBox.width+BBOX_DELTA && imgPos.y>bBox.y-BBOX_DELTA && imgPos.y<bBox.y+bBox.height+BBOX_DELTA))
                        continue;
			        bool classified=false;
			        double minAngle=CV_PI;
			        int minAnglePhase=0;
			        int bestJ=-1;
			        int j;
			        for(j=0;j<lines.size();++j)
			        {
			            if(lines[j].clsId/2!=mainGroupId)
                            continue;
			            double angle=tmplines[i].theta-lines[j].theta+CV_PI/4.0;   //to make the process simple, add 45 degree
                                                                                //to turn the cared angles to the middle of a phase
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            int phase=(int)(angle/(CV_PI/2.0));
			            double angle90=angle-CV_PI/2.0*(double)phase;
			            phase%=2;
			            if(abs(angle90-CV_PI/4.0)<minAngle)//subtract the added 45 degree
			            {
			                minAngle=abs(angle90-CV_PI/4.0);
			                bestJ=j;
                                minAnglePhase=phase;
			            }
			        }
			        if(bestJ>-1)
			        {
			            //if(minAngle<CV_PI/6.0)
                        tmplines[i].clsId=mainGroupId*2+minAnglePhase;
                        classified=true;
                        //printf("nearest main ori found. angle diff=%f\n",minAngle);
			        }
			    }
			    double sumAngle=0;
			    double sumL=0;
			    for(i=0;i<tmplines.size();++i)
			    {
			        if(tmplines[i].clsId/2==mainGroupId)
			        {
                    //printf("comparing with a main line..i=%d\n",i);
			            double angle=tmplines[i].theta+CV_PI/4.0;//similar strategy, add 45 degree
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            double angle90=angle-CV_PI/2.0*double((int)(angle/(CV_PI/2.0)));
			            sumAngle+=angle90*tmplines[i].l;//use the 45 degree to balance the unwanted lines
			            sumL+=tmplines[i].l;
			        }
			    }
			    if(sumL<LINE_LENGTH_SUM_LBOUND)
			    {
                    //printf("false sumL=%f<%d\n",sumL,LINE_LENGTH_SUM_LBOUND);
			        return false;
			    }
			    double curAngle=sumAngle/sumL-CV_PI/4.0;//subtract 45 degree
			    ori+=curAngle-mainAngle;
                    //printf("true oriChange=%f\n",curAngle-mainAngle);
            }
		}

    return true;
}