コード例 #1
0
ファイル: OpenCVImage.cpp プロジェクト: JasonHagler/tortuga
void OpenCVImage::setPixelFormat(Image::PixelFormat format)
{
    assert(format != PF_START && "No format specified");
    assert(format != PF_END   && "No format specified");
    assert(m_own && "Must have ownership of the image to change its format");

    /**
     * Lookup table for conversion codes
     * Invalid conversions labeled with -1
     * Conversions to the same colorspace labeled with -2
     * Conversions on a sentinal value are ALWAYS invalid
     * Macros are defined in cv.h
     *
     * Table is constructed so the row is the current colorspace,
     * column is the colorspace to convert to.
     */
    static const int lookupTable[11][11] = {
        /* Sentinal value */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -1, -1, -1},
        /* RGB */
        {-1,         -2, CV_RGB2BGR,-1,CV_RGB2GRAY,CV_RGB2HSV,CV_RGB2Luv, RGB2LCHUV, -1, -1, -1},
        /* BGR */
        {-1, CV_BGR2RGB,         -2,-1,CV_BGR2GRAY,CV_BGR2HSV,CV_BGR2Luv,        -1, -1, -1, -1},
        /* YUV */
        {-1,         -1,         -1,-2,         -1,        -1,        -1,        -1, -1, -1, -1},
        /* Grayscale */
        {-1,CV_GRAY2RGB,CV_GRAY2BGR,-1,         -2,        -1,        -1,        -1, -1, -1, -1},
        /* HSV */ 
        {-1, CV_HSV2RGB, CV_HSV2BGR,-1,         -1,        -2,        -1,        -1, -1, -1, -1},
        /* CIELUV */
        {-1, CV_Luv2RGB, CV_Luv2BGR,-1,         -1,        -1,        -2,        -1, -1, -1, -1},
        /* CIELCh_uv */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -2, -1, -1, -1},
        /* CIELAB */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -2, -1, -1},
        /* CIELCh_ab */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -1, -2, -1},
        /* Sentinal value */
        {-1,         -1,         -1,-1,         -1,        -1,        -1,        -1, -1, -1, -2}
    };

    int code = lookupTable[m_fmt][format];
    if(code == RGB2LCHUV) {
        LCHConverter::convert(this);
        m_fmt = Image::PF_LCHUV_8;
    } else if (code == -1) {
        throw ImageConversionException(m_fmt, format);
    } else if (code != -2) {
        // If the number of channels or depth change, we need a new image
        int depth = getDepth();
        int channels = getNumChannels();

        int newDepth = getFormatDepth(format);
        int newChannels = getFormatNumChannels(format);

        if (depth != newDepth || channels != newChannels) {
            // Create a new image with the new depth/channels
            IplImage* newImg = cvCreateImage(cvGetSize(m_img),
                                             newDepth, newChannels);
            cvCvtColor(m_img, newImg, code);

            // Delete old image data
            if (m_data) {
                cvReleaseImageHeader(&m_img);
                delete[] m_data;
                m_data = NULL;
            } else {
                cvReleaseImage(&m_img);
            }

            // Assign modified image as current image
            m_img = newImg;
        } else {
            cvCvtColor(m_img, m_img, code);
        }

        // Change the format flag
        m_fmt = format;
    }
}
コード例 #2
0
int main( int argc, char** argv )
{
	int wcam = -1;
	CvCapture* capture = 0;
	IplImage *frame = 0;
	IplImage *frame_resized = 0;
	IplImage *frame_resized2 = 0;
	
    CvSize min_window_size, max_window_size;
    min_window_size.width = 40;
    min_window_size.height = 40;
    max_window_size.width = 0;
    max_window_size.height = 0;
    
	cvNamedWindow(win_face, 1);
    
	// Carico il file con le informazioni su cosa trovare
	cascade = (CvHaarClassifierCascade*)cvLoad(file_xml, 0, 0, 0);
    
	// Alloco la memoria per elaborare i dati
	storage = cvCreateMemStorage(0);
	
	/*if(!(capture = cvCaptureFromCAM(wcam)))
	{
		printf("Impossibile aprire la webcam.\n");
		return -1;
	}*/
    
    CvSize window_size = cvSize(640, 480);
    frame = cvLoadImage("/Users/Gabriele/Desktop/jobs.jpeg");
    frame_resized = cvCreateImage(window_size, frame->depth, 3);
    frame_resized2 = cvCreateImage(window_size, frame->depth, 3);
    cvResize(frame, frame_resized);
    
    CLODEnvironmentData* data = clodInitEnvironment(0);
    clifInitBuffers(data->clif, frame_resized->width, frame_resized->height, frame_resized->widthStep, 3);
    clodInitBuffers(data, &window_size);
    
    ElapseTime t;;
    
    /* Test grayscale */    
    IplImage* grayscale = cvCreateImage(cvSize(frame_resized->width, frame_resized->height), IPL_DEPTH_8U, 1);
    cvCvtColor(frame_resized, grayscale, CV_BGR2GRAY);
    
    CvMat* sim = cvCreateMat(frame_resized->height + 1, frame_resized->width + 1, CV_32SC1);
    CvMat* sqim = cvCreateMat(frame_resized->height + 1, frame_resized->width + 1, CV_64FC1);
    cvIntegral(grayscale, sim, sqim);
    double temp = sqim->data.db[2000];
    
    CLIFIntegralResult r = clifIntegral(frame_resized, data->clif, CL_TRUE);
    cl_ulong temp2 = ((unsigned long*)r.square_image->data.db)[2000];
    
    cvCopyImage(frame_resized, frame_resized2);
    t.start();
    find_faces_rect_opencv(frame_resized2, min_window_size, max_window_size);
    printf("OpenCV: %8.4f ms\n", t.get());
    cvShowImage("Sample OpenCV", frame_resized2);
        
    cvCopyImage(frame_resized, frame_resized2);
    t.start();
    find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PER_STAGE_ITERATIONS | CLOD_PRECOMPUTE_FEATURES, CL_FALSE);
    printf("OpenCL (optimized): %8.4f ms\n", t.get());
    cvShowImage("Sample OpenCL (optimized)", frame_resized2);
    cvCopyImage(frame_resized, frame_resized2);
    t.start();
    find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PRECOMPUTE_FEATURES, CL_TRUE);
    printf("                    %8.4f ms (block)\n", t.get());
    cvShowImage("Sample OpenCL (optimized, block)", frame_resized2);
    
    cvCopyImage(frame_resized, frame_resized2);
    t.start();
    find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PRECOMPUTE_FEATURES | CLOD_PER_STAGE_ITERATIONS, CL_FALSE);
    printf("OpenCL (per-stage): %8.4f ms\n", t.get());
    cvShowImage("Sample OpenCL (per-stage)", frame_resized2);
    cvCopyImage(frame_resized, frame_resized2);
    t.start();
    find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PRECOMPUTE_FEATURES | CLOD_PER_STAGE_ITERATIONS, CL_TRUE);
    printf("                    %8.4f ms (block)\n", t.get());
    cvShowImage("Sample OpenCL (per-stage, block)", frame_resized2);
    
    //frame_resized->imageData =
    //printf("OpenCL (per-stage, optimized): %8.4f ms\n", t.get());
    //cvShowImage("Sample OpenCL (per-stage, optimized)", frame2);
    
    //detect_faces(frame, &data);
    /*
	while(1)
	{
		// Cerco le facce
        
        
        char fps[1024] = { 0 };
        sprintf(fps, "FPS: %4.2f", (1000.0) / (double)(end - start));
        CvFont font;
        cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0f, 1.0f);
        CvPoint point;
        point.x = 10;
        point.y = frame->height - 10;
        CvScalar scalar = { 255,255,255,1 };
        cvPutText(frame, fps, point, &font, scalar);
		cvShowImage(win_face, frame);
		
		frame = cvQueryFrame(capture);
        
		if( (cvWaitKey(10) & 255) == 27 ) break;
	}
    */
    clodReleaseBuffers(data);
    clodReleaseEnvironment(data);
    free(data);
    
	cvReleaseImage(&frame);
	cvReleaseImage(&frame_resized);
	cvReleaseCapture(&capture);
	cvDestroyWindow(win_face);
    
	return 0;
}
コード例 #3
0
ファイル: CCVEffect.cpp プロジェクト: zphilip/VirtualTS
void CHandDrawEffect::DrawEdge(IplImage* image, IplImage* image2, IplImage* base, int plane)
{
	CvSeq* contourSeq0 = NULL;

	int height    = image->height;
	int width     = image->width;
	int step      = image->widthStep;
	int channels  = image->nChannels;
	uchar* data   = (uchar*)image->imageData;

	if(plane < 3) {
		cvCvtColor(image, hsv, CV_BGR2HSV);				// HSVのplaneを線画生成の元にする
		for(int i = 0; i < height * width; i++)
			grayImage->imageData[i] = hsv->imageData[i * 3 + plane];
	} else {
		cvCvtColor(image, grayImage, CV_BGR2GRAY);		// グレーイメージを作り線画生成の元にする
	}

	IplImage* target = base;					// 書き込むターゲットイメージ

	for(int x = 20; x < 240; x += Y) {
		cvThreshold(grayImage, binaryImage, x, 255, CV_THRESH_BINARY);	// x の値を境に2値化し輪郭を抽出
		contourSeq0 = 0;
		cvFindContours(binaryImage, memStorage0, &contourSeq0, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0)); // 輪郭線探索

		if(lineNoise > 0) { // 不連続ラインの場合
			for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {
				CvPoint *p;
				if(contourSeq0->total< X * 5) continue;		// 5角形以下の細かいのは排除

				int index = 0;
				for(int i = 0; i < contourSeq0->total; i += X) {
					p = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, i);				// 点の場所と色を登録
					CvScalar color = GetColor(image2, p->x, p->y);
					MulScaler(color, DARK);										// 輝度を修正
					color.val[3] = CheckPoint(image, p->x, p->y, lineNoise);	// 有効点かどうかを近接ピクセルから判断して[3]へ格納
					SetPoint(index, p, color);									// pointTableへ保存
					index++;
					if(index > MAX_POINT) {
					//	printf("INDEX ERROR\n"); 
						index = 0;
					}
				}
				// 5連続以下の有効点は無効 (Pending:高速化)
				for(int i = 0; i < index; i++) {
					int p1 = i;
					int p2, p3, p4, p0;
					if(pointTable[p1].color.val[3]) {
						p2 = (p1 + 1) % index;
						p3 = (p1 + 2) % index;
						p4 = (p1 + 3) % index;
						p0 = (p1 - 1 + index) % index;
						if(pointTable[p0].color.val[3]) continue;
						if(!pointTable[p2].color.val[3] ||
							!pointTable[p3].color.val[3] ||
							!pointTable[p4].color.val[3]) {						
							pointTable[p1].color.val[3] = 0;
						}
					}
				}
				// 接続された有効点を描く
				for(int i = 0; i < index; i++) {
					int p1 = i;
					int p2 = (i + 1) % index;	// if (p2==index) p2 = 0;
					if(pointTable[p1].color.val[3] && pointTable[p2].color.val[3]) {
						CvScalar c = pointTable[p1].color;
						MulScaler(c, DARK);
						cvLine(target, pointTable[p1].p, pointTable[p2].p, c, lineWidth, CV_AA);
					}
				}
			}
		} else {
			// 全部描く場合
			for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {

				CvPoint *p1 = 0;
				CvPoint *p2;

				if(contourSeq0->total < X * 5) continue;		

				for(int i = 0; i < contourSeq0->total; i += X) {
					p1 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i) % contourSeq0->total);//始点
					p2 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i + X + Z) % contourSeq0->total);// 終点
					CvScalar color = GetColor(image, p1->x, p1->y);
					MulScaler(color, DARK);
					cvLine(target, *p1, *p2, color, lineWidth, CV_AA);
				}
			}
		}
	}
	cvClearMemStorage(memStorage0);
}
コード例 #4
0
ファイル: achesscorners.cpp プロジェクト: JackJone/opencv
/* ///////////////////// chess_corner_test ///////////////////////// */
void CV_ChessboardDetectorTest::run( int start_from )
{
    int code = CvTS::OK;

#ifndef WRITE_POINTS    
    const double rough_success_error_level = 2.5;
    const double precise_success_error_level = 0.2;
    double err = 0, max_rough_error = 0, max_precise_error = 0;
#endif

    /* test parameters */
    char   filepath[1000];
    char   filename[1000];

    CvMat*  _u = 0;
    CvMat*  _v = 0;
    CvPoint2D32f* u;
    CvPoint2D32f* v;

    IplImage* img = 0;
    IplImage* gray = 0;
    IplImage* thresh = 0;

    int  idx, max_idx;
    int  progress = 0;

    sprintf( filepath, "%scameracalibration/", ts->get_data_path() );
    sprintf( filename, "%schessboard_list.dat", filepath );
    CvFileStorage* fs = cvOpenFileStorage( filename, 0, CV_STORAGE_READ );
    CvFileNode* board_list = fs ? cvGetFileNodeByName( fs, 0, "boards" ) : 0;

    if( !fs || !board_list || !CV_NODE_IS_SEQ(board_list->tag) ||
        board_list->data.seq->total % 2 != 0 )
    {
        ts->printf( CvTS::LOG, "chessboard_list.dat can not be readed or is not valid" );
        code = CvTS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    max_idx = board_list->data.seq->total/2;

    for( idx = start_from; idx < max_idx; idx++ )
    {
        int etalon_count = -1;
        int count = 0;
        CvSize etalon_size = { -1, -1 };
        int j, result;
        
        ts->update_context( this, idx-1, true );

        /* read the image */
        sprintf( filename, "%s%s", filepath,
            cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*2),"dummy.txt"));
    
        img = cvLoadImage( filename );
        
        if( !img )
        {
            ts->printf( CvTS::LOG, "one of chessboard images can't be read: %s", filename );
            if( max_idx == 1 )
            {
                code = CvTS::FAIL_MISSING_TEST_DATA;
                goto _exit_;
            }
            continue;
        }

        gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        cvCvtColor( img, gray, CV_BGR2GRAY );
 
        sprintf( filename, "%s%s", filepath,
            cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*2+1),"dummy.txt"));

        _u = (CvMat*)cvLoad( filename );

        if( _u == 0 )
        {
            if( idx == 0 )
                ts->printf( CvTS::LOG, "one of chessboard corner files can't be read: %s", filename ); 
            if( max_idx == 1 )
            {
                code = CvTS::FAIL_MISSING_TEST_DATA;
                goto _exit_;
            }
            continue;
        }

        etalon_size.width = _u->cols;
        etalon_size.height = _u->rows;
        etalon_count = etalon_size.width*etalon_size.height;

        /* allocate additional buffers */
        _v = cvCloneMat( _u );
        count = etalon_count;

        u = (CvPoint2D32f*)_u->data.fl;
        v = (CvPoint2D32f*)_v->data.fl;

        OPENCV_CALL( result = cvFindChessBoardCornerGuesses(
                     gray, thresh, 0, etalon_size, v, &count ));

        //show_points( gray, 0, etalon_count, v, count, etalon_size, result );
        if( !result || count != etalon_count )
        {
            ts->printf( CvTS::LOG, "chess board is not found" );
            code = CvTS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }

#ifndef WRITE_POINTS
        err = 0;
        for( j = 0; j < etalon_count; j++ )
        {
            double dx = fabs( v[j].x - u[j].x );
            double dy = fabs( v[j].y - u[j].y );

            dx = MAX( dx, dy );
            if( dx > err )
            {
                err = dx;
                if( err > rough_success_error_level )
                {
                    ts->printf( CvTS::LOG, "bad accuracy of corner guesses" );
                    code = CvTS::FAIL_BAD_ACCURACY;
                    goto _exit_;
                }
            }
        }
        max_rough_error = MAX( max_rough_error, err );
#endif
        OPENCV_CALL( cvFindCornerSubPix( gray, v, count, cvSize( 5, 5 ), cvSize(-1,-1),
                            cvTermCriteria(CV_TERMCRIT_EPS|CV_TERMCRIT_ITER,30,0.1)));
        //show_points( gray, u + 1, etalon_count, v, count );

#ifndef WRITE_POINTS
        err = 0;
        for( j = 0; j < etalon_count; j++ )
        {
            double dx = fabs( v[j].x - u[j].x );
            double dy = fabs( v[j].y - u[j].y );

            dx = MAX( dx, dy );
            if( dx > err )
            {
                err = dx;
                if( err > precise_success_error_level )
                {
                    ts->printf( CvTS::LOG, "bad accuracy of adjusted corners" ); 
                    code = CvTS::FAIL_BAD_ACCURACY;
                    goto _exit_;
                }
            }
        }
        max_precise_error = MAX( max_precise_error, err );
#else
        cvSave( filename, _v );
#endif
        cvReleaseMat( &_u );
        cvReleaseMat( &_v );
        cvReleaseImage( &img );
        cvReleaseImage( &gray );
        cvReleaseImage( &thresh );
        progress = update_progress( progress, idx-1, max_idx, 0 );
    }

_exit_:

    /* release occupied memory */
    cvReleaseMat( &_u );
    cvReleaseMat( &_v );
    cvReleaseFileStorage( &fs );
    cvReleaseImage( &img );
    cvReleaseImage( &gray );
    cvReleaseImage( &thresh );

    if( code < 0 )
        ts->set_failed_test_info( code );
}
コード例 #5
0
ファイル: latentsvmdetector.cpp プロジェクト: 2693/opencv
/*
// find rectangular regions in the given image that are likely
// to contain objects and corresponding confidence levels
//
// API
// CvSeq* cvLatentSvmDetectObjects(const IplImage* image,
//                                  CvLatentSvmDetector* detector,
//                                  CvMemStorage* storage,
//                                  float overlap_threshold = 0.5f,
                                    int numThreads = -1);
// INPUT
// image                - image to detect objects in
// detector             - Latent SVM detector in internal representation
// storage              - memory storage to store the resultant sequence
//                          of the object candidate rectangles
// overlap_threshold    - threshold for the non-maximum suppression algorithm [here will be the reference to original paper]
// OUTPUT
// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures)
*/
CvSeq* cvLatentSvmDetectObjects(IplImage* image,
                                CvLatentSvmDetector* detector,
                                CvMemStorage* storage,
                                float overlap_threshold, int numThreads)
{
    CvLSVMFeaturePyramid *H = 0;
    CvPoint *points = 0, *oppPoints = 0;
    int kPoints = 0;
    float *score = 0;
    unsigned int maxXBorder = 0, maxYBorder = 0;
    int numBoxesOut = 0;
    CvPoint *pointsOut = 0;
    CvPoint *oppPointsOut = 0;
    float *scoreOut = 0;
    CvSeq* result_seq = 0;
    int error = 0;

    if(image->nChannels == 3)
        cvCvtColor(image, image, CV_BGR2RGB);

    // Getting maximum filter dimensions
    getMaxFilterDims((const CvLSVMFilterObject**)(detector->filters), detector->num_components,
                     detector->num_part_filters, &maxXBorder, &maxYBorder);
    // Create feature pyramid with nullable border
    H = createFeaturePyramidWithBorder(image, maxXBorder, maxYBorder);
    // Search object
    error = searchObjectThresholdSomeComponents(H, (const CvLSVMFilterObject**)(detector->filters),
        detector->num_components, detector->num_part_filters, detector->b, detector->score_threshold,
        &points, &oppPoints, &score, &kPoints, numThreads);
    if (error != LATENT_SVM_OK)
    {
        return NULL;
    }
    // Clipping boxes
    clippingBoxes(image->width, image->height, points, kPoints);
    clippingBoxes(image->width, image->height, oppPoints, kPoints);
    // NMS procedure
    nonMaximumSuppression(kPoints, points, oppPoints, score, overlap_threshold,
                &numBoxesOut, &pointsOut, &oppPointsOut, &scoreOut);

    result_seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvObjectDetection), storage );

    for (int i = 0; i < numBoxesOut; i++)
    {
        CvObjectDetection detection = {{0, 0, 0, 0}, 0};
        detection.score = scoreOut[i];
        CvRect bounding_box = {0, 0, 0, 0};
        bounding_box.x = pointsOut[i].x;
        bounding_box.y = pointsOut[i].y;
        bounding_box.width = oppPointsOut[i].x - pointsOut[i].x;
        bounding_box.height = oppPointsOut[i].y - pointsOut[i].y;
        detection.rect = bounding_box;
        cvSeqPush(result_seq, &detection);
    }

    if(image->nChannels == 3)
        cvCvtColor(image, image, CV_RGB2BGR);

    freeFeaturePyramidObject(&H);
    free(points);
    free(oppPoints);
    free(score);

    return result_seq;
}
コード例 #6
0
ファイル: lane_detector.cpp プロジェクト: 794523332/Autoware
static void process_image_common(IplImage *frame)
{
  CvFont font;
  cvInitFont(&font, CV_FONT_VECTOR0, 0.25f, 0.25f);

  CvSize video_size;
#if defined(USE_POSIX_SHARED_MEMORY)
  video_size.height = *shrd_ptr_height;
  video_size.width  = *shrd_ptr_width;
#else
  // XXX These parameters should be set ROS parameters
  video_size.height = frame->height;
  video_size.width  = frame->width;
#endif
  CvSize    frame_size = cvSize(video_size.width, video_size.height/2);
  IplImage *temp_frame = cvCreateImage(frame_size, IPL_DEPTH_8U, 3);
  IplImage *gray       = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *edges      = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *half_frame = cvCreateImage(cvSize(video_size.width/2, video_size.height/2), IPL_DEPTH_8U, 3);

  CvMemStorage *houghStorage = cvCreateMemStorage(0);

  cvPyrDown(frame, half_frame, CV_GAUSSIAN_5x5); // Reduce the image by 2

  /* we're intersted only in road below horizont - so crop top image portion off */
  crop(frame, temp_frame, cvRect(0, frame_size.height, frame_size.width, frame_size.height));
  cvCvtColor(temp_frame, gray, CV_BGR2GRAY); // contert to grayscale

  /* Perform a Gaussian blur & detect edges */
  // smoothing image more strong than original program
  cvSmooth(gray, gray, CV_GAUSSIAN, 15, 15);
  cvCanny(gray, edges, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);

  /* do Hough transform to find lanes */
  double rho = 1;
  double theta = CV_PI/180;
  CvSeq *lines = cvHoughLines2(edges, houghStorage, CV_HOUGH_PROBABILISTIC,
                               rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);

  processLanes(lines, edges, temp_frame, frame);

#ifdef SHOW_DETAIL
  /* show middle line */
  cvLine(temp_frame, cvPoint(frame_size.width/2, 0),
         cvPoint(frame_size.width/2, frame_size.height), CV_RGB(255, 255, 0), 1);

  // cvShowImage("Gray", gray);
  // cvShowImage("Edges", edges);
  // cvShowImage("Color", temp_frame);
  // cvShowImage("temp_frame", temp_frame);
  // cvShowImage("frame", frame);
#endif

#if defined(USE_POSIX_SHARED_MEMORY)
  setImage_toSHM(frame);
#endif

#ifdef SHOW_DETAIL
  // cvMoveWindow("Gray", 0, 0);
  // cvMoveWindow("Edges", 0, frame_size.height+25);
  // cvMoveWindow("Color", 0, 2*(frame_size.height+25));
#endif

  cvReleaseMemStorage(&houghStorage);
  cvReleaseImage(&gray);
  cvReleaseImage(&edges);
  cvReleaseImage(&temp_frame);
  cvReleaseImage(&half_frame);
}
コード例 #7
0
ファイル: ImgProducer.cpp プロジェクト: elie-moussy/LibPF
void ImgProducer::calcGRAY()
{
  cvCvtColor(imgSRC,img[idGRAY],CV_BGR2GRAY);
  imgOK[idGRAY] = 1;
}
コード例 #8
0
ファイル: main.c プロジェクト: Zazcallabah/dotdetector
// Runs the dot detector and sends detected dots to server on port TODO Implement headless. Needs more config options and/or possibly a config file first though
int run( const char *serverAddress, const int serverPort, char headless ) {
    char calibrate_exposure = 0, show = ~0, flip = 0, vflip = 0, done = 0, warp = 0; //"Boolean" values used in this loop
    char noiceReduction = 2; //Small counter, so char is still ok.
    int i, sockfd; //Generic counter
    int dp = 0, minDist = 29, param1 = 0, param2 = 5; // Configuration variables for circle detection 
    int minDotRadius = 1;
    int detected_dots; //Detected dot counter
    int returnValue = EXIT_SUCCESS;
    int captureControl; //File descriptor for low-level camera controls
    int currentExposure = 150;
    int maxExposure = 1250; //Maximum exposure supported by the camera TODO Get this from the actual camera
    Color min = { 0, 70, 0, 0 }; //Minimum color to detect
    Color max = { 255, 255, 255, 0 }; //Maximum color to detect
    CvScalar colorWhite = cvScalar( WHITE ); //Color to draw detected dots on black and white surface
    BoundingBox DD_mask; //The box indicating what should and what should not be considered for dot search
    BoundingBox DD_transform; //The box indicating the plane we are looking at( and as such is the plane we would transform from )
    BoundingBox DD_transform_to; //The plane we are transforming to
    CvCapture *capture = NULL; //The camera
    CvMemStorage *storage; //Low level memory area used for dynamic structures in OpenCV
    CvSeq *seq; //Sequence to store detected dots in
    IplImage *grabbedImage = NULL; //Raw image from camera( plus some overlay in the end )
    IplImage *imgThreshold = NULL; //Image with detected dots
    IplImage *mask = NULL; //Mask to be able to remove uninteresting areas
    IplImage *coloredMask = NULL; //Mask to be able to indicate above mask on output image
    CvFont font; //Font for drawing text on images
    SendQueue *queue; //Head of the linked list that is the send queue
    char strbuf[255]; //Generic buffer for text formatting( with sprintf())
    struct timeval oldTime, time, diff; //Structs for measuring FPS
    float lastKnownFPS = 0; //Calculated FPS
    CvMat* pointRealMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* pointTransMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* transMat = cvCreateMat( 3,3,CV_32FC1 ); //Translation matrix for transforming input to a straight rectangle
    ClickParams clickParams = { TOP_LEFT, NULL, &DD_transform_to, transMat }; //Struct holding data needed by mouse-click callback function

    // Set up network
    sockfd = initNetwork( serverAddress, serverPort );
    if( sockfd == -1 ) {
        fprintf( stderr, "ERROR: initNetwork returned -1\n");
        return EXIT_FAILURE;
    }
    queue = initSendQueue();

    if( openCamera( &capture, &captureControl ) == 0 ) {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        return EXIT_FAILURE;
    }

    if( ( disableAutoExposure( captureControl ) ) == -1 ) {
        fprintf( stderr, "ERROR: Cannot disable auto exposure \n" );
        //return EXIT_FAILURE;
    }

    if( ( updateAbsoluteExposure( captureControl, currentExposure ) ) == 0 ) {
        fprintf( stderr, "ERROR: Cannot set exposure\n");
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( imagewindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the configuration sliders and the detection frame TODO This is kind of a hack. Make a better solution
    cvNamedWindow( configwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the transformed image. Handy to see how the dots are translated, but not needed for functionality
    if( warp ) cvNamedWindow( warpwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create sliders to adjust the lower color boundry
    cvCreateTrackbar( red_lable  , configwindowname, &min.red,   255, NULL );
    cvCreateTrackbar( green_lable, configwindowname, &min.green, 255, NULL );
    cvCreateTrackbar( blue_lable , configwindowname, &min.blue,  255, NULL );

    //Create sliters for the contour based dot detection
    cvCreateTrackbar( min_area_lable, configwindowname, &minDotRadius,255, NULL );

    /* Slider for manual exposure setting */
    cvCreateTrackbar( exposure_lable, configwindowname, &currentExposure, maxExposure, NULL );

    //Create the memory storage
    storage = cvCreateMemStorage( 0 );

    // void cvInitFont( font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8 );

    // Grab an initial image to be able to fetch image size before the main loop.
    grabbedImage = cvQueryFrame( capture );

    //Move the two windows so both are visible at the same time
    cvMoveWindow( imagewindowname, 0, 10 );
    cvMoveWindow( configwindowname, grabbedImage->width+2, 10 );

    //TODO Move these three inits to a function
    // Set masking defaults TODO load from file? Specify file for this file loading?
    DD_mask.topLeft.x = 0;  
    DD_mask.topLeft.y = 0;

    DD_mask.topRight.x = grabbedImage->width-1;
    DD_mask.topRight.y = 0;

    DD_mask.bottomLeft.x = 0;
    DD_mask.bottomLeft.y = grabbedImage->height-1;

    DD_mask.bottomRight.x = grabbedImage->width-1;
    DD_mask.bottomRight.y = grabbedImage->height-1;

    // Set transformation defaults TODO load from file? Specify file for this file loading?
    DD_transform.topLeft.x = 0;  
    DD_transform.topLeft.y = 0;

    DD_transform.topRight.x = grabbedImage->width-1;
    DD_transform.topRight.y = 0;

    DD_transform.bottomLeft.x = 0;
    DD_transform.bottomLeft.y = grabbedImage->height-1;

    DD_transform.bottomRight.x = grabbedImage->width-1;
    DD_transform.bottomRight.y = grabbedImage->height-1;

    // Set the transformation destination
    DD_transform_to.topLeft.x = 0;  
    DD_transform_to.topLeft.y = 0;

    DD_transform_to.topRight.x = grabbedImage->width-1;
    DD_transform_to.topRight.y = 0;

    DD_transform_to.bottomLeft.x = 0;
    DD_transform_to.bottomLeft.y = grabbedImage->height-1;

    DD_transform_to.bottomRight.x = grabbedImage->width-1;
    DD_transform_to.bottomRight.y = grabbedImage->height-1;

    calculateTransformationMatrix( &DD_transform, &DD_transform_to, transMat );

    // Set callback function for mouse clicks
    cvSetMouseCallback( imagewindowname, calibrateClick, ( void* ) &clickParams );

    gettimeofday( &oldTime, NULL );

    // Main loop. Grabbs an image from cam, detects dots, sends dots,and prints dots to images and shows to user
    while( !done ) {

        //PROFILING_PRO_STAMP(); //Uncomment this and the one in the end of the while-loop, and comment all other PROFILING_* to profile main-loop

        // ------ Common actions
        cvClearMemStorage( storage );
        detected_dots = 0;

        //Grab a fram from the camera
        PROFILING_PRO_STAMP();
        grabbedImage = cvQueryFrame( capture );
        PROFILING_POST_STAMP( "cvQueryFrame");

        if( grabbedImage == NULL ) {
            fprintf( stderr, "ERROR: frame is null...\n" );
            getchar();
            returnValue = EXIT_FAILURE;
            break;
        }

        //Flip images to act as a mirror. 
        if( show && flip ) {
            cvFlip( grabbedImage, grabbedImage, 1 );
        }
        if( show && vflip ) {
            cvFlip( grabbedImage, grabbedImage, 0 );
        }

        // ------ State based actions
        switch( state ) {
            case GRAB_DOTS:

                //Create detection image
                imgThreshold = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvInRangeS( grabbedImage, cvScalar( DD_COLOR( min )), cvScalar( DD_COLOR( max )), imgThreshold );

                //Mask away anything not in our calibration area
                mask = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvZero( mask );
                cvFillConvexPoly( mask, ( CvPoint* ) &DD_mask, 4, cvScalar( WHITE ), 1, 0 );
                cvAnd( imgThreshold, mask, imgThreshold, NULL );

                // Invert mask, increase the number of channels in it and overlay on grabbedImage //TODO Tint the mask red before overlaying
                cvNot( mask, mask );
                coloredMask = cvCreateImage( cvGetSize( grabbedImage ), grabbedImage->depth, grabbedImage->nChannels );
                cvCvtColor( mask, coloredMask, CV_GRAY2BGR );
                cvAddWeighted( grabbedImage, 0.95, coloredMask, 0.05, 0.0, grabbedImage );


                // Reduce noise. 
                // Erode is kind of floor() of pixels, dilate is kind of ceil()
                // I'm not sure which gives the best result.
                switch( noiceReduction ) {
                    case 0: break; //No noice reduction at all
                    case 1: cvErode( imgThreshold, imgThreshold, NULL, 2 ); break;
                    case 2: cvDilate( imgThreshold, imgThreshold, NULL, 2 ); break;
                }

                // Warp the warp-image. We are reusing the coloredMask variable to save some space
                PROFILING_PRO_STAMP();
                if( show && warp ) cvWarpPerspective( grabbedImage, coloredMask, transMat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ));
                PROFILING_POST_STAMP( "Warping perspective" );


                // Find all dots in the image
                PROFILING_PRO_STAMP();

                // Clear old data from seq
                seq = 0;

                // Find the dots
                cvFindContours(
                        imgThreshold,
                        storage,
                        &seq,
                        sizeof( CvContour ),
                        CV_RETR_LIST,
                        CV_CHAIN_APPROX_SIMPLE,
                        cvPoint( 0,0 )
                        );
                // cvFindContours destroys the original image, so we wipe it here
                // and then repaints the detected dots later
                cvZero( imgThreshold );

                PROFILING_POST_STAMP( "Dot detection" );

                //Process all detected dots
                PROFILING_PRO_STAMP();
                for( ; seq != 0; seq = seq->h_next ) {

                    // Calculate radius of the detected contour
                    CvRect rect =( ( CvContour * )seq )->rect;
                    float relCenterX = rect.width / 2;
                    float relCenterY = rect.height / 2;

                    // Make sure the dot is big enough
                    if( relCenterX < minDotRadius || relCenterY < minDotRadius ) {
                        continue;
                    }

                    // Note that we have found another dot
                    ++detected_dots;

                    // Transform the detected dot according to transformation matrix.
                    float absCenter[] = { rect.x + relCenterX, rect.y + relCenterY };
                    pointRealMat->data.fl = absCenter;
                    cvPerspectiveTransform( pointRealMat, pointTransMat, transMat );

                    // Draw the detected contour back to imgThreshold
                    // Draw the detected dot both to real image and to warped( if warp is active )
                    if( show ) {
                        cvDrawContours( imgThreshold, seq, colorWhite, colorWhite, -1, CV_FILLED, 8, cvPoint( 0,0 ) );
                        drawCircle( absCenter[0], absCenter[1], ( relCenterX + relCenterY ) / 2, grabbedImage );
                        if( warp ) {
                            drawCircle( pointTransMat->data.fl[0], pointTransMat->data.fl[1], ( relCenterX + relCenterY ) / 2, coloredMask );
                        }
                    }

                    // Add detected dot to to send queue
                    addPointToSendQueue( pointTransMat->data.fl, queue ); 
                }

                PROFILING_POST_STAMP("Painting dots");

                //Calculate framerate
                gettimeofday( &time, NULL );
                timeval_subtract( &diff, &time, &oldTime );
                lastKnownFPS = lastKnownFPS * 0.7 + ( 1000000.0 / diff.tv_usec ) * 0.3; //We naïvly assume we have more then 1 fps
                oldTime = time;

                //Send the dots detected this frame to the server
                PROFILING_PRO_STAMP();
                sendQueue( sockfd, queue );
                clearSendQueue( queue );
                PROFILING_POST_STAMP( "Sending dots" );

                /* If calibrating, do the calibration */
                if( calibrate_exposure ) {
                    int ret;
                    ret = calibrateExposureLow( captureControl, detected_dots, &currentExposure, DD_MAX_EXPOSURE, lastKnownFPS );
                    switch( ret ) {
                        case 0: // We are done. Let's leave calibration mode
                            calibrate_exposure = 0;
                            printf( "done\n" );
                            break;

                        case -1: // We hit the upper limit with no detected dots
                            fprintf( stderr, "Reached upper limit (%d). Aborting!\n", DD_MAX_EXPOSURE );
                            calibrate_exposure = 0;
                            break;

                        case -2: // We hit lower limit with more then one dot detected
                            fprintf( stderr, "Too bright. More then one dot found even with minimal exposure. Aborting!\n");
                            calibrate_exposure = 0;
                            break;

                        case -3: //No conclusive results.
                            fprintf( stderr, "No conclusive results. Giving up\n" );
                            calibrate_exposure = 0;
                            break;
                    }
                }

                break; //End of GRAB_DOTS

            case SELECT_TRANSFORM:
                //Falling through here. Poor man's multi-case clause. Not putting this in default as we might
                //want to do different things in these two some day.
            case SELECT_MASK:
                snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
                cvDisplayOverlay( imagewindowname, strbuf, 5 );
                break; //End of SELECT_MASK and SELECT_TRANSFORM
        }

        // Paint the corners of the detecting area and the calibration area
        paintOverlayPoints( grabbedImage, &DD_transform );

        //Print some statistics to the image
        if( show ) {
            snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
            snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
            cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
        }

        //Show images 
        PROFILING_PRO_STAMP();
        if( show ) {
            cvShowImage( configwindowname, imgThreshold );
            cvShowImage( imagewindowname, grabbedImage );
            if( warp ) cvShowImage( warpwindowname, coloredMask );
        }
        PROFILING_POST_STAMP("Showing images");

        //Release the temporary images
        cvReleaseImage( &imgThreshold );
        cvReleaseImage( &mask );
        cvReleaseImage( &coloredMask );

        /* Update exposure if needed */
        updateAbsoluteExposure( captureControl, currentExposure );
        cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );

        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
        //remove higher bits using AND operator
        i = ( cvWaitKey( 10 ) & 0xff );
        switch( i ) {
            case 'g': 
                makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
                updateAbsoluteExposure( captureControl, currentExposure+1 );
                break;

            case 'e': 
                toggleCalibrationMode( &calibrate_exposure, &currentExposure );
                break; /* Toggles calibration mode */

            case 'c':
                openCamera( &capture, &captureControl );
                break;

            case 's': 
                show = ~show;
                break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze

            case 'm': 
                state = SELECT_MASK;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_mask;
                break; //Starts selection of masking area. Will return to dot detection once all four points are set

            case 't':
                state = SELECT_TRANSFORM;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_transform;
                break; //Starts selection of the transformation area. Returns to dot detection when done.

            case 'f':
                flip = ~flip;
                break; //Toggles horizontal flipping of the image
            case 'v':
                vflip = ~vflip;
                break; //Toggles vertical flipping of the image

            case 'w':
                warp = ~warp;
                toggleWarpOutput( warp );
                break; //Toggles showing the warped image

            case 'n':
                noiceReduction = ( noiceReduction + 1 ) % 3;
                break; //Cycles noice reduction algorithm

            case 'q': //falling through here to quit

            case  27: 
                done = 1;
                break; //ESC. Kills the whole thing( in a nice and controlled manner )
        }
        fflush( stdout ); //Make sure everything in the buffer is printed before we go on

        //PROFILING_POST_STAMP("Main loop");
    } //End of main while-loop

    // Release the capture device and do some housekeeping
    cvReleaseImage( &grabbedImage );
    cvReleaseCapture( &capture );
    cvReleaseMemStorage( &storage );
    cvDestroyWindow( imagewindowname );
    cvDestroyWindow( configwindowname );
    if( warp ) cvDestroyWindow( warpwindowname ); //If now warp it is already destroyed
    destroySendQueue( queue );
    close( sockfd );
    close( captureControl );
    return returnValue;
}
コード例 #9
0
ファイル: eye.cpp プロジェクト: RoboCraft/robot_2wd
int calc_hsv_colors(IplImage *frame)
{
    if(!frame)
        return -1;

    IplImage* image=0, *hsv=0, *dst=0, *dst2=0, *color_indexes=0, *dst3=0;

    image = cvCloneImage(frame);
    hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
    cvCvtColor( image, hsv, CV_BGR2HSV );

    // for store results
    dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
    dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
    color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); // store color indexes

    // для хранения RGB-х цветов
    CvScalar rgb_colors[NUM_COLOR_TYPES];

    int i=0, j=0, x=0, y=0;

    // reset colors
    memset(colorCount, 0, sizeof(colorCount));
    for(i=0; i<NUM_COLOR_TYPES; i++) {
        rgb_colors[i] = cvScalarAll(0);
    }

    for (y=0; y<hsv->height; y++) {
        for (x=0; x<hsv->width; x++) {

            // get HSV pixel
            uchar H = CV_PIXEL(uchar, hsv, x, y)[0];	// Hue
            uchar S = CV_PIXEL(uchar, hsv, x, y)[1];	// Saturation
            uchar V = CV_PIXEL(uchar, hsv, x, y)[2];	// Value (Brightness)

            // define pixel color type
            int ctype = getPixelColorType(H, S, V);

            // set values
            CV_PIXEL(uchar, dst, x, y)[0] = cCTHue[ctype];	// Hue
            CV_PIXEL(uchar, dst, x, y)[1] = cCTSat[ctype];	// Saturation
            CV_PIXEL(uchar, dst, x, y)[2] = cCTVal[ctype];	// Value

            // collect RGB
            rgb_colors[ctype].val[0] += CV_PIXEL(uchar, image, x, y)[0]; // B
            rgb_colors[ctype].val[1] += CV_PIXEL(uchar, image, x, y)[1]; // G
            rgb_colors[ctype].val[2] += CV_PIXEL(uchar, image, x, y)[2]; // R

            // сохраняем к какому типу относится цвет
            CV_PIXEL(uchar, color_indexes, x, y)[0] = ctype;

            // подсчитываем :)
            colorCount[ctype]++;
        }
    }

    // усреднение RGB-составляющих
    for(i=0; i<NUM_COLOR_TYPES; i++) {
        rgb_colors[i].val[0] /= colorCount[i];
        rgb_colors[i].val[1] /= colorCount[i];
        rgb_colors[i].val[2] /= colorCount[i];
    }

    // теперь загоним массив в вектор и отсортируем :)
    std::vector< std::pair< int, uint > > colors;
    colors.reserve(NUM_COLOR_TYPES);

    for(i=0; i<NUM_COLOR_TYPES; i++){
        std::pair< int, uint > color;
        color.first = i;
        color.second = colorCount[i];
        colors.push_back( color );
    }
    // сортируем
    std::sort( colors.begin(), colors.end(), colors_sort );

    // для отладки - выводим коды, названия цветов и их количество
    for(i=0; i<colors.size(); i++){
        printf("[i] color %d (%s) - %d\n", colors[i].first, sCTypes[colors[i].first], colors[i].second );
    }

    // выдаём код первых цветов
    printf("[i] color code: \n");
    for(i=0; i<NUM_COLOR_TYPES; i++)
        printf("%02d ", colors[i].first);
    printf("\n");
    printf("[i] color names: \n");
    for(i=0; i<NUM_COLOR_TYPES; i++)
        printf("%s ", sCTypes[colors[i].first]);
    printf("\n");

#if 0
    cvSaveImage("image.bmp", image);
#endif

    cvReleaseImage(&image);
    cvReleaseImage(&hsv);
    cvReleaseImage(&dst);
    cvReleaseImage(&dst2);
    cvReleaseImage(&color_indexes);
    cvReleaseImage(&dst3);

    return 0;
}
コード例 #10
0
int main(int argc, char* argv[])
{
	IplImage *m_pPreImage = NULL;
	IplImage *m_pGrayImage = NULL;
	IplImage *m_pSmoothImage = NULL;
	IplImage *pPrev = NULL;
	IplImage *pCurr = NULL;
	IplImage *pDest = NULL;
	IplImage *pMask = NULL;
	IplImage *pMaskDest = NULL;
	IplImage *dst = NULL;
	CvMat *pPrevF = NULL;
	CvMat *pCurrF = NULL;
	CvSize imgSize;

    CvCapture *m_pCapture = NULL;
	CvVideoWriter *writer = 0;
	IplConvKernel* element;
	CvSeq* contour = 0;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvRect r;

	// IplConvKernel* element;

    cvNamedWindow( "VideoDisplay1", 1 );
	cvNamedWindow( "VideoDisplay2", 1 );
	cvNamedWindow( "VideoDisplay3", 1 );
	cvNamedWindow( "VideoDisplay4", 1 );
	
// Capture
	m_pCapture = cvCreateFileCapture("MVI_8833.AVI");
	contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT,sizeof(CvSeq),sizeof(CvPoint),storage);
	

    if( !m_pCapture )
    {
        fprintf(stderr,"Could not initialize capturing! \n");
        return -1;
    }
// Display
    while ( (m_pPreImage = cvQueryFrame(m_pCapture)))
    {	
		imgSize = cvSize(m_pPreImage->width, m_pPreImage->height);
		if(!m_pGrayImage)
			m_pGrayImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		if(!pCurr)
			pCurr = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);	
		if(!m_pSmoothImage)
			m_pSmoothImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);

		//图像预处理
		cvCvtColor(m_pPreImage, m_pGrayImage, CV_BGR2GRAY);//转化为灰度图像
		cvSmooth(m_pGrayImage,m_pSmoothImage,CV_GAUSSIAN,3,0,0,0 );//GAUSSIAN平滑去噪声
		cvEqualizeHist(m_pSmoothImage,pCurr );//直方图均衡


		 if(!pPrevF)
			pPrevF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
		 if(!pCurrF)
			pCurrF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
		 if(!pPrev)
			pPrev = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pMask)
			pMask = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pMaskDest)
			pMaskDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!dst)
			dst = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pDest)
			{
				pDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
				
			}
	
		cvAbsDiff(pPrev, pCurr, pDest);   //帧差
		cvCopy(pCurr, pPrev, NULL);  // 当前帧存入前一帧

		
		cvThreshold(pDest, pMask, 80, 255, CV_THRESH_BINARY);     // 二值化
		element = cvCreateStructuringElementEx( 9, 9, 3, 3, CV_SHAPE_RECT, NULL);
		cvMorphologyEx( pMask, pMaskDest, NULL, element, CV_MOP_CLOSE, 1);//形态学处理
		
		//查找并且画出团块轮廓
		cvFindContours( pMaskDest, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

		//画出包含目标的最小矩形
		for(;contour;contour=contour->h_next)
		{
			r=((CvContour*)contour)->rect;
			if(r.height*r.width>100)
			{
				cvRectangle(m_pPreImage,cvPoint(r.x,r.y),cvPoint(r.x+r.width,r.y+r.height),CV_RGB(255,0,0),1,CV_AA,0);
				
			}
		}


		cvShowImage( "VideoDisplay1", m_pPreImage );
		cvShowImage( "VideoDisplay2", pMask);
		cvShowImage( "VideoDisplay3", pMaskDest );
		cvShowImage( "VideoDisplay4", pPrev );

		if(cvWaitKey(50)>0)
			return 0;
	}

	// Realease
    cvReleaseImage( &m_pPreImage );
	cvReleaseImage( &m_pGrayImage );
	cvReleaseImage( &m_pSmoothImage );
	cvReleaseImage( &pCurr );
	cvReleaseImage( &pDest );
	cvReleaseImage( &pMask );
	cvReleaseImage( &pMaskDest );
	cvReleaseImage( &dst );
	cvReleaseMemStorage( &storage );
    cvDestroyWindow("VideoDisplay1");
	cvDestroyWindow("VideoDisplay2");
	cvDestroyWindow("VideoDisplay3");
	cvDestroyWindow("VideoDisplay4");
	cvReleaseStructuringElement( &element ); 

	return 0;
}
int main()
{
    int c;//to store ascii value of key pressed
    int i,j,h,s,v;
    CvCapture *capture=cvCreateCameraCapture(0);//initiate camera 
    //because of recursively updating frame , here we dont need to wait for camera as for some ms frame will be black and then as camera starts , frame will update and shaw image
    IplImage *frame;
    
     IplImage* outputred;
   IplImage* outputone;
   IplImage* outputtwo;
  // IplImage* outputblue;
   IplImage* outputwhite;
  // IplImage* outputorange;


    uchar *ptemp;
   // uchar *poutputorange;
    uchar *poutputred;
    uchar *poutputwhite;
   // uchar *poutputblue;
    uchar *poutputone;
    uchar *poutputtwo;
   
   
    if(capture!=NULL)
    {
                     frame=cvQueryFrame(capture);//take current image in camera and give it to frame pointer
                     cvNamedWindow("img");
                     while(1)
                     {
                             
                             cvShowImage("img",frame);
                            frame=cvQueryFrame(capture);
                            temp=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
                            cvCvtColor(frame,temp,CV_BGR2HSV);
                            //frame rate time period (if not given system will hang as system processing speed is very fast
                           // cvNamedWindow("output",1);
                            //cvShowImage("output",temp);
                            cvSetMouseCallback("img", my_mouse_callback, NULL);
                            c=cvWaitKey(1);
                            outputred=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     outputone=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
      outputtwo=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     //  outputblue=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
        outputwhite=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     //    outputorange=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
    
    cvCvtColor(frame,temp,CV_BGR2HSV);
    ptemp  =  (uchar*)temp->imageData;
    poutputone  =  (uchar*)outputone->imageData;
    poutputtwo  =  (uchar*)outputtwo->imageData;
   // poutputblue  =  (uchar*)outputblue->imageData;
    poutputwhite  =  (uchar*)outputwhite->imageData;
    poutputred  =  (uchar*)outputred->imageData;
  //  poutputorange  =  (uchar*)outputorange->imageData;
    
     for(i=0;i<frame->height;i++)
            for(j=0;j<frame->width;j++)
            {
                                       h=ptemp[i*temp->widthStep + j*temp->nChannels+0];
                                       s=ptemp[i*temp->widthStep + j*temp->nChannels+1];
                                       v=ptemp[i*temp->widthStep + j*temp->nChannels+2];
                                       
                                         if((h>=157&&h<=178)&&s>=110 && s<=255 &&v>=90)//red
                                                                         poutputred[i*outputred->widthStep + j]=255;
                                       else
                                                                          poutputred[i*outputred->widthStep + j]=0;
                                       if((h==0 && s==0 &&v<150 && v>9)||(h>=25 &&h<=110 && s>=20&&s<=200&& v>=13 && v<=120))//one
                                                                         poutputone[i*outputone->widthStep + j]=255;
                                       else
                                                                          poutputone[i*outputone->widthStep + j]=0;
                                  
                               /*       if((h>=145 &&h<=160)&&s>=175 && s<=255 && v>=80 && v<=150)//one
                                                                         poutputone[i*outputone->widthStep + j]=255;
                                       else
                                                                          poutputone[i*outputone->widthStep + j]=0;
                                 */                                         
                                       if(h>=110 &&h<=153&&s>=90&&v>=7 && v<=150)//two
                                                                         poutputtwo[i*outputtwo->widthStep + j]=255;
                                       else
                                                                          poutputtwo[i*outputtwo->widthStep + j]=0;
                                          if( (h==0 && s==0 && v>=250) || (((h>=0 && h<=30)) && s>=50&&s<=200&&v>=110) )//white 
                                                                         poutputwhite[i*outputwhite->widthStep + j]=255;
                                       else
                                                                          poutputwhite[i*outputwhite->widthStep + j]=0;
                                   
                                                                          
            }
     
    //cvNamedWindow("output",1);
    cvNamedWindow("outputred",1);
    cvNamedWindow("outputone",1);
    cvNamedWindow("outputtwo",1);
  //  cvNamedWindow("outputblue",1);
    cvNamedWindow("outputwhite",1);
    //cvNamedWindow("outputorange",1);

    //cvShowImage("output",temp);
    cvShowImage("outputred",outputred);
    cvShowImage("outputone",outputone);
    cvShowImage("outputtwo",outputtwo);
   // cvShowImage("outputblue",outputblue);
    cvShowImage("outputwhite",outputwhite);
   // cvShowImage("outputorange",outputorange);
    cvWaitKey(1);
 /*   imgOutred=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgred=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsred;
    cvLabel(outputred, labelImgred, blobsred);
    cvRenderBlobs(labelImgred, blobsred, input, imgOutred);
    cvFilterByArea(blobsred,PIXEL_MIN,PIXEL_MAX);

    imgOutone=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgone=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsone;
    cvLabel(outputone, labelImgone, blobsone);
    cvRenderBlobs(labelImgone, blobsone, input, imgOutone);
    cvFilterByArea(blobsone,PIXEL_MIN,PIXEL_MAX);

    imgOuttwo=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgtwo=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobstwo;
    cvLabel(outputtwo, labelImgtwo, blobstwo);
    cvRenderBlobs(labelImgtwo, blobstwo, input, imgOuttwo);
    cvFilterByArea(blobstwo,PIXEL_MIN,PIXEL_MAX);

    imgOutblue=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgblue=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsblue;
    cvLabel(outputblue, labelImgblue, blobsblue);
    cvRenderBlobs(labelImgblue, blobsblue, input, imgOutblue);
    cvFilterByArea(blobsblue,PIXEL_MIN,PIXEL_MAX);
    
    imgOutwhite=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgwhite=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobswhite;
    cvLabel(outputwhite, labelImgwhite, blobswhite);
    cvRenderBlobs(labelImgwhite, blobswhite, input, imgOutwhite);
    cvFilterByArea(blobswhite,PIXEL_MINWHITE,PIXEL_MAX);
    
   */ 
    
    
    
    
    cvReleaseImage( &outputred ); 
    cvReleaseImage( &outputone ); 
    cvReleaseImage( &outputtwo ); 
    //cvReleaseImage( &outputblue ); 
    cvReleaseImage( &outputwhite ); 
    //cvReleaseImage( &outputorange );
                            cvReleaseImage(&temp ); 
                            //cvDestroyWindow( "output" );
                            if(c>0&&c<255)
                                     {
                                            cvDestroyWindow( "img" );
                                            cvReleaseImage( &frame ); 
                                            cvReleaseCapture(&capture);
     //                                       cvDestroyWindow( "outputred" );
                                            //cvDestroyWindow( "output" );
  //  cvDestroyWindow( "outputone" );
   // cvDestroyWindow( "outputtwo" );
    //cvDestroyWindow( "outputblue" );
   // cvDestroyWindow( "outputwhite" );
    //cvDestroyWindow( "outputorange" );
    
    cvReleaseImage( &outputred ); 
    cvReleaseImage( &outputone ); 
    cvReleaseImage( &outputtwo ); 
    //cvReleaseImage( &outputblue ); 
    cvReleaseImage( &outputwhite ); 
    //cvReleaseImage( &outputorange ); 
     
    
                                            return 0;
                                     }
                     }
    }
}
コード例 #12
0
ファイル: kenken.c プロジェクト: dlowe/kenken
static IplImage *_threshold(IplImage *in) {
    IplImage *img = cvCreateImage(cvGetSize(in), 8, 1);

    // convert to grayscale
    cvCvtColor(in, img, CV_BGR2GRAY);

    // compute the mean intensity. This is used to adjust constant_reduction value below.
    long total = 0;
    for (int x = 0; x < img->width; ++x) {
        for (int y = 0; y < img->height; ++y) {
            CvScalar s = cvGet2D(img, y, x);
            total += s.val[0];
        }
    }
    int mean_intensity = (int)(total / (img->width * img->height));

    // apply thresholding (converts it to a binary image)
    // block_size observations: higher value does better for images with variable lighting (e.g.
    //   shadows).
    // may eventually need to paramaterize this, to some extent, because the different callers
    //   seem to do better with different values (e.g. contour location is better with smaller numbers,
    //   but cage location is better with larger...) but for now, have been able to settle on value
    //   which works pretty well for most cases.
    int block_size = (int)(img->width / 9);
    if ((block_size % 2) == 0) {
        // must be odd
        block_size += 1;
    }
    // constant_reduction observations: magic, but adapting this value to the mean intensity of the
    //   image as a whole seems to help.
    int constant_reduction = (int)(mean_intensity / 3.6 + 0.5);

    IplImage *threshold_image = cvCreateImage(cvGetSize(img), 8, 1);
    cvAdaptiveThreshold(img, threshold_image, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV,
        block_size, constant_reduction);
    cvReleaseImage(&img);

    // try to get rid of "noise" spots.
    int min_blob_size = 2;
    for (int x = 0; x < threshold_image->width; ++x) {
        for (int y = 0; y < threshold_image->height; ++y) {
            CvScalar s = cvGet2D(threshold_image, y, x);
            int ink_neighbors = 0;
            if (s.val[0] == 255) {
                for (int dx = -1; dx <= 1; ++dx) {
                    if ((x + dx >= 0) && (x + dx < threshold_image->width)) {
                        for (int dy = -1; dy <= 1; ++dy) {
                            if ((y + dy >= 0) && (y + dy < threshold_image->height)) {
                                if (! ((dy == 0) && (dx == 0))) {
                                    CvScalar m = cvGet2D(threshold_image, y + dy, x + dx);
                                    if (m.val[0] == 255) {
                                        ++ink_neighbors;
                                        if (ink_neighbors > min_blob_size) {
                                            break;
                                        }
                                    }
                                }
                            }
                        }
                        if (ink_neighbors > min_blob_size) {
                            break;
                        }
                    }
                }
                if (ink_neighbors <= min_blob_size) {
                    s.val[0] = 0;
                    cvSet2D(threshold_image, y, x, s);
                }
            }
        }
    }

    return threshold_image;
}
コード例 #13
0
ファイル: kenken.c プロジェクト: dlowe/kenken
const CvPoint2D32f* locate_puzzle(IplImage *in, IplImage **annotated) {
    IplImage *grid_image = _grid(in);

    *annotated = cvCloneImage(in);

    // find lines using Hough transform
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* lines = 0;

    double distance_resolution = 1;
    double angle_resolution    = CV_PI / 60;
    int threshold              = 60;
    int minimum_line_length    = in->width / 2;
    int maximum_join_gap       = in->width / 10;
    lines = cvHoughLines2(grid_image, storage, CV_HOUGH_PROBABILISTIC,  distance_resolution, angle_resolution, threshold, minimum_line_length, maximum_join_gap);

    cvCvtColor(grid_image, *annotated, CV_GRAY2RGB);

    cvReleaseImage(&grid_image);

    double most_horizontal = INFINITY;
    for (int i = 0; i < lines->total; ++i) {
        CvPoint *line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);

        double slope = INFINITY;
        if (dx != 0) {
            slope = dy / dx;
        }
        if (slope != INFINITY) {
            if (slope < most_horizontal) {
                //printf("most horizontal seen: %0.2f\n", slope);
                most_horizontal = slope;
            }
        }
    }

    int top    = -1;
    int left   = -1;
    int bottom = -1;
    int right  = -1;
    for (int i = 0; i < lines->total; i++) {
        CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);
        double slope  = INFINITY;
        if (dx) {
            slope = dy / dx;
        }

        cvLine(*annotated, line[0], line[1], CV_RGB(255, 0, 0), 1, 8, 0);
        if (abs(slope - most_horizontal) <= 1) {
            if ((top == -1) || (line[1].y < ((CvPoint*)cvGetSeqElem(lines,top))[1].y)) {
                top = i;
            }
            if ((bottom == -1) || (line[1].y > ((CvPoint*)cvGetSeqElem(lines,bottom))[1].y)) {
                bottom = i;
            }
        } else {
            if ((left == -1) || (line[1].x < ((CvPoint*)cvGetSeqElem(lines,left))[1].x)) {
                left = i;
            }
            if ((right == -1) || (line[1].x > ((CvPoint*)cvGetSeqElem(lines,right))[1].x)) {
                right = i;
            }
        }
    }
    //printf("number of lines: %d\n", lines->total);
    if ((top == -1) || (left == -1) || (bottom == -1) || (right == -1)) {
        return NULL;
    }

    CvPoint *top_line    = (CvPoint*)cvGetSeqElem(lines,top);
    cvLine(*annotated, top_line[0], top_line[1], CV_RGB(0, 0, 255), 6, 8, 0);

    CvPoint *bottom_line = (CvPoint*)cvGetSeqElem(lines,bottom);
    cvLine(*annotated, bottom_line[0], bottom_line[1], CV_RGB(0, 255, 255), 6, 8, 0);

    CvPoint *left_line   = (CvPoint*)cvGetSeqElem(lines,left);
    cvLine(*annotated, left_line[0], left_line[1], CV_RGB(0, 255, 0), 6, 8, 0);

    CvPoint *right_line  = (CvPoint*)cvGetSeqElem(lines,right);
    cvLine(*annotated, right_line[0], right_line[1], CV_RGB(255, 255, 0), 6, 8, 0);

    CvPoint2D32f *coordinates;
    coordinates = malloc(sizeof(CvPoint2D32f) * 4);

    // top left
    intersect(top_line, left_line, &(coordinates[0]));
    cvLine(*annotated, cvPointFrom32f(coordinates[0]), cvPointFrom32f(coordinates[0]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_left: %.0f, %.0f\n", coordinates[0].x, coordinates[0].y);

    // top right
    intersect(top_line, right_line, &(coordinates[1]));
    cvLine(*annotated, cvPointFrom32f(coordinates[1]), cvPointFrom32f(coordinates[1]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_right: %.0f, %.0f\n", coordinates[1].x, coordinates[1].y);

    // bottom right
    intersect(bottom_line, right_line, &(coordinates[2]));
    cvLine(*annotated, cvPointFrom32f(coordinates[2]), cvPointFrom32f(coordinates[2]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_right: %.0f, %.0f\n", coordinates[2].x, coordinates[2].y);

    // bottom left
    intersect(bottom_line, left_line, &(coordinates[3]));
    cvLine(*annotated, cvPointFrom32f(coordinates[3]), cvPointFrom32f(coordinates[3]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_left: %.0f, %.0f\n", coordinates[3].x, coordinates[3].y);

    return coordinates;
}
コード例 #14
0
void basicOCR::printCvSeq(CvSeq* seq, IplImage* imgSrc, IplImage* img_gray, CvMemStorage* storage)
{
	CvSeq* si = seq;
	CvRect rcFirst = findFirstChar(seq, 0);
	if (rcFirst.x == 0)
	{
		printf("No words found...\n");
		return;
	}
	else
		printf("\nOCR of text:\n");
	CvRect rcNewFirst = rcFirst;
	cvDrawRect(imgSrc, cvPoint(rcFirst.x, rcFirst.y), cvPoint(rcFirst.x + rcFirst.width, rcFirst.y + rcFirst.height), CV_RGB(0, 0, 0));
	int printX = rcFirst.x - 1;
	int printY = rcFirst.y - 1;

	int idx = 0;
	char szName[56] = {0};
	int tempCount=0;

	while (true)
	{
		CvRect rc = findPrintRect(seq, printX, printY, rcFirst);
		cvDrawRect(imgSrc, cvPoint(rc.x, rc.y), cvPoint(rc.x + rc.width, rc.y + rc.height), CV_RGB(0, 0, 0));
		// dealing with useless Part
		/*if (rc.width <= 1 && rc.height <= 1)
		{
		continue;
		}*/

		if (printX < rc.x)
		{
			if ((rc.x - printX) >= (rcFirst.width / 2))
				printf(" ");
			printX = rc.x;
			//cvDrawRect(imgSrc, cvPoint(rc.x, rc.y), cvPoint(rc.x + rc.width, rc.y + rc.height), CV_RGB(255, 0, 0));
			IplImage* imgNo = cvCreateImage(cvSize(rc.width, rc.height), IPL_DEPTH_8U, 3);
			cvSetImageROI(imgSrc, rc);
			cvCopyImage(imgSrc, imgNo);
			cvResetImageROI(imgSrc);
			sprintf(szName, "wnd_%d", idx++);
			// show splited picture or not
			cvNamedWindow(szName);
			cvShowImage(szName, imgNo);
			IplImage* imgDst = cvCreateImage(cvSize(rc.width, rc.height),IPL_DEPTH_8U,1);
			cvCvtColor(imgNo, imgDst, CV_RGB2GRAY);
			printf("%c", (char)classify(imgDst, 0));
			cvReleaseImage(&imgNo);
		}
		else if (printX == rc.x && printX < imgSrc->width)
		{
			printX += rc.width;
		}
		else
		{
			printf("\n");
			printY = rcNewFirst.y + rcNewFirst.height;
			rcNewFirst = findFirstChar(seq, printY);
			if (rcNewFirst.x == 0)
				break;
			cvDrawRect(imgSrc, cvPoint(rcNewFirst.x, rcNewFirst.y), cvPoint(rcNewFirst.x + rcNewFirst.width, rcNewFirst.y + rcNewFirst.height), CV_RGB(0, 0, 0));
			printX = rcNewFirst.x - 1;
			printY = rcNewFirst.y - 1;
		}
	}
	cvNamedWindow("src");
	cvShowImage("src", imgSrc);
	cvWaitKey(0);
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&imgSrc);
	cvReleaseImage(&img_gray);
	cvDestroyAllWindows();

}
コード例 #15
0
ファイル: trainner.cpp プロジェクト: B12040331/HandGesture
trainner::trainner() {

	/*
	 * reading images, convert them to binary,extract features
	 */
	FileReader reader("pathsF.txt");
	FileWriter writer("out.txt");
	string line = "";
	IplImage* img;
	IplImage*gray_im;
	IplImage*gray_img;
	CvSeq* contour; //pointer to a contour.
	CvMemStorage* space = cvCreateMemStorage(0);
	CvBox2D hand_boundary;
	CvSeq* largest_contour;
	int i = 0;
	int all_time[dataset_size];
	while ((line = reader.readFile()) != "") {
		std::clock_t start;

		start = std::clock();

		//load the img
		img = cvLoadImage(line.c_str());
		cvSmooth(img, img, CV_GAUSSIAN, 5, 5);
		gray_im = cvCloneImage(img);
		cvCvtColor(img, gray_im, CV_BGR2YCrCb);
		gray_img = cvCreateImage(cvGetSize(gray_im), 8, 1);
		cvInRangeS(gray_im, cvScalar(0, 131, 80), cvScalar(255, 185, 135),
				gray_img);
		cvSmooth(gray_img, gray_img, CV_MEDIAN, 5, 5);

		hand = cvCloneImage(gray_img);
		//finding all contours in the image
		cvFindContours(gray_img, space, &contour, sizeof(CvContour),
				CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));
		//iterate through each contour
		double max_area = 0;
		double area = 0;
		//finding largest contour
		while (contour) {
			area = cvContourArea(contour);
			if (area > max_area) {
				max_area = area;
				largest_contour = contour;
			}
			contour = contour->h_next;
		}

		if (largest_contour && largest_contour->total > 0) {
			hand_boundary = cvMinAreaRect2(largest_contour);

			float max = hand_boundary.size.width;
			if (max < hand_boundary.size.height)
				max = hand_boundary.size.height;
			//copy the hand in its own image
			CvRect rounded =
					cvRect(hand_boundary.center.x - (max / 2) - 25,
							hand_boundary.center.y - (max / 2) - 25, max + 50,
							max + 50);
			cvSetImageROI(hand, rounded);
			hand = cvCloneImage(hand);
//			cvShowImage("image", hand);
			cvWaitKey(0);
			cvReleaseImage(&gray_img);
			cvClearSeq(largest_contour);
			//			string bin = extract_feature();
			//write to file
			//			writer.writeFile(bin);
			extract_feature(i);

		} else {
			for (int j = 0; j < number_of_features; j++)
				data[i][j] = 0.0;
		}
		int timm = (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000);
		all_time[i] = timm;
		i++;

	}
	int sum = 0;

	for (int i = 0; i < dataset_size; i++) {
		sum += all_time[i];
	}
	sum = sum / dataset_size;
	cout << sum << endl;
	reader.~FileReader();

	// now train the classifier
	train();

	//print features
	for (int i = 0; i < dataset_size; i++) {
		ostringstream oss;
		if (i < 11)
			oss << "up";
		else if (i < 30)
			oss << "open";
		else if (i < 60)
			oss << "capture";
		else if (i < 83)
			oss << "call";
		else if (i < 101)
			oss << "left";
		else if (i < 125)
			oss << "right";
		else if (i < 136)
			oss << "closed";
		else if (i < 149)
			oss << "start";
		else if (i < 159)
			oss << "Lup";
		else if (i < 173)
			oss << "Lopen";
		else if (i < 190)
			oss << "Lcapture";
		else if (i < 197)
			oss << "Lcall";
		oss << ",";
		for (int j = 0; j < number_of_features; j++) {
			if (data[i][j] == 0.0)
				oss << "0";
			else
				oss << "1";
			oss << ",";
		}
		string name = oss.str();
		writer.writeFile(name);
	}
	writer.~FileWriter();
}
コード例 #16
0
ファイル: motempl.c プロジェクト: ericperko/cwrucam
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    printf("Nonzero count %d\n", cvCountNonZero(mask));

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                                      cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
コード例 #17
0
ファイル: Blink.cpp プロジェクト: sreedal/GC
void Blink::Detect(IplImage* newFrame)
{
	CvMemStorage* storage=cvCreateMemStorage(0);
	CvSeq* contour;
	CvPoint start,end;
	if(prev==NULL && curr!=NULL){
		prev=cvCreateImage(cvSize(240,180),8,1);//cvGetSize(newFrame)
		cvCopy(curr,prev,NULL);
		cvCvtColor(newFrame,tmp,CV_BGR2GRAY);
		cvResize(tmp,curr,1);
	}
	if(curr==NULL){
		curr=cvCreateImage(cvSize(240,180),8,1);//cvGetSize(newFrame)
		tmp=cvCreateImage(cvGetSize(newFrame),8,1);
		cvCvtColor(newFrame,tmp,CV_BGR2GRAY);
		cvResize(tmp,curr,1);
		return;
	}
	if(prev && curr){
		cvCopy(curr,prev,NULL);
		cvCvtColor(newFrame,tmp,CV_BGR2GRAY);
		cvResize(tmp,curr,1);
	}
	//Find Connected Components in the difference image
	assert(curr && prev);
	IplImage* diff=cvCreateImage(cvGetSize(curr),8,1);
	cvZero(diff);
	if(leftEye && rightEye){
		start.x=leftEye->x-BORDER;
		start.y=leftEye->y-BORDER;
		end.x=rightEye->x+rightEye->width+BORDER;
		end.y=rightEye->y+rightEye->height+BORDER;
		cvSetImageROI(curr,cvRect(start.x,start.y,end.x-start.x,end.y-start.y));
		cvSetImageROI(prev,cvRect(start.x,start.y,end.x-start.x,end.y-start.y));
		cvSetImageROI(diff,cvRect(start.x,start.y,end.x-start.x,end.y-start.y));
	}
	cvSub(curr,prev,diff,NULL);
	cvThreshold(diff, diff, 5, 255, CV_THRESH_BINARY);
	cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, 1);
	if(leftEye && rightEye){
		cvResetImageROI(curr);
		cvResetImageROI(prev);
		cvResetImageROI(diff);
	}
	cvShowImage(ONE,diff);
	cvWaitKey(1);
	int nc=cvFindContours(diff,storage,&contour,sizeof(CvContour),CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
	cvClearMemStorage(storage);
	cvReleaseImage(&diff);
	//Check if the components found is a eye pair
	if(nc!=2 || contour==0){
		//Detection Failed try tracking if eyepair from previous image exist
		return;
	}
	CvRect l,r;
	l=cvBoundingRect(contour,1);
	contour=contour->h_next;
	r=cvBoundingRect(contour,1);
	if(abs(l.width-r.width)>5 || abs(l.height-r.height)>5 || abs(l.y-r.y)>5 || (abs(l.x-l.y)/l.width)<2 || (abs(l.x-l.y)/l.width)>5 ){ 
		//Detection Failed
		return;
	}
	//Detected good -> setup variables for future use
	leftEye=&l;
	rightEye=&r;
	if(!leftEyeTracker) leftEyeTracker=new TemplateTracker(1.5,0.1);
	if(!rightEyeTracker) rightEyeTracker=new TemplateTracker(1.5,0.1);
	leftEyeTracker->StartTracking(newFrame,leftEye);
	rightEyeTracker->StartTracking(newFrame,rightEye);
}
int main()
{

	
	bool salir=FALSE;

	
	

do
{
	IplImage *im;
	char eleccion;
	bool j=TRUE;

	//Panel

	printf("Elija la imagen que quiere cargar\n");
	printf("Imagenes del programa:\n\n"
		   "A=2_bolas\n"
		   "B=3_bolas\n"
		   "C=4_bolas\n"
		   "D=6_bolas\n"
		   "E=bola_azul\n"
		   "F=bola_roja\n"
		   "G=bolas_cortadas\n"
		   "H=bola_amarilla_blanca\n"
		   "I=bola_amarilla_blanca_+intensidad\n"
		   "J=bola_amarilla1\n"
		   "K=bolas_cortadas_+intensidad\n"
		   "L=bolas_juntas\n"
		   "M=cambio_angulo_iluminacion\n"
		   "N=bolas_pegadas_1\n"
		   "O=bolas_pegadas_2\n"
		   "P=bolas_pegadas_3\n"
		   "Q=bolas_pegadas_4\n"
		   "R=bolas_pegadas_4_+intensidad\n"
		   "S=bolas_pegadas_rotas\n"
		   "T=bolas_pegadas_rotas_2\n"
		   

		   );

	printf("X=SALIR\n\n");

while(j==TRUE)
{
	

	
	scanf("%c",&eleccion);
	
	

	switch(eleccion)
	{
	case 'A':{   char NombreImagen[]="2_bolas.jpg"; im=cvLoadImage(NombreImagen, -1); j=FALSE;}
		    break;

	case 'B':  {char NombreImagen[]="3_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'C': { char NombreImagen[]="4_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'D':  { char NombreImagen[]="6_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'E':  { char NombreImagen[]="bola_azul.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;
		   
	case 'F':  {char NombreImagen[]="bola_roja.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'G':  {char NombreImagen[]="bolas_cortadas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'H':  {char NombreImagen[]="bola_amarilla_blanca.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'I': { char NombreImagen[]="bola_amarilla_blanca_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'J': { char NombreImagen[]="bola_amarilla1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'K':  { char NombreImagen[]="bolas_cortadas_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'L': { char NombreImagen[]="bolas_juntas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'M':  {char NombreImagen[]="cambio_angulo_iluminacion.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'N':  {char NombreImagen[]="bolas_pegadas_1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'O':  {char NombreImagen[]="bolas_pegadas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'P':  {char NombreImagen[]="bolas_pegadas_3.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'Q':  {char NombreImagen[]="bolas_pegadas_4.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'R':  {char NombreImagen[]="bolas_pegadas_4_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'S':  {char NombreImagen[]="bolas_pegadas_rotas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'T':  {char NombreImagen[]="bolas_pegadas_rotas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'X':  {salir=TRUE; return 0;}
		    break;


	default:{ printf("Eleccion incorrecta, vuelva a elegir una opcion\n"); j=TRUE; }
	}

}
		













	
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
	
//OBTENER UNA IMAGEN BINARIA SÓLO CON BOLAS AZULES Y OTRA SÓLO CON BOLAS ROJAS 

	
	
	IplImage *Imagen_RGB;
	
    
	
	IplImage *Imagen_umbr;
	IplImage *Imagen_umbr_2;
	


	CvSize Dimensiones;

	//umbrales de la imagenS y la imagenH. En esta parte no utilizo la función MinMax porque me sale mejor poniendo  unos umbrales fijos
	int umbral1=150;
	int umbral2=100;
	
	

	

	//pasamos de BGR a RGB

	Dimensiones= cvGetSize(im);

	Imagen_RGB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3);
	cvCvtColor(im,Imagen_RGB,CV_BGR2RGB);



	
IplImage *ImagenHSV;
IplImage *ImagenH,*ImagenS,*ImagenV;




//pasamos de RGB a HSV

ImagenHSV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3);

cvCvtColor(Imagen_RGB,ImagenHSV,CV_RGB2HSV);



//Extraemos de la imagen HSV sus tres componentes: H, S y V


ImagenH=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
ImagenS=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
ImagenV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
cvSplit(ImagenHSV,ImagenH,ImagenS,ImagenV,0);

//imagenes binarias para umbralizar Sy H

Imagen_umbr=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
Imagen_umbr_2=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);

//umbralizacion.
cvThreshold(ImagenS,Imagen_umbr,umbral1,255,CV_THRESH_BINARY);

cvThreshold(ImagenH,Imagen_umbr_2,umbral2,255,CV_THRESH_BINARY_INV);



//Descompongo la imagen en R,G y B

IplImage *ImagenR=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
IplImage *ImagenG=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
IplImage *ImagenB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);

cvSplit(Imagen_RGB,ImagenR,ImagenG,ImagenB,0);




//A partir de aquí hago una serie de transformaciones morfológicas para separar en imágenes binarias las bolas azules de las rojas.


    //creo elemento estructurante

	 IplConvKernel* element = 0;
     const int element_shape =CV_SHAPE_ELLIPSE;
	 int pos=1;
  
     element= cvCreateStructuringElementEx(pos*2+1,pos*2+1,pos,pos, element_shape,0);
	
	


    IplImage * temp= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
    IplImage *temp2=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
	IplImage *resta=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
	
	
	//con esto obtengo todas las bolas binarizadas
	
	cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_TOPHAT,2);     //tophat. Me detecta sólo las sombras de las bolas. Mi iluminación iene de arriba.
	//cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); Esto podria aplicarlo si las sombras se crearan en el lado contrario
	cvAbsDiff (Imagen_umbr, temp ,temp); //resto  la original - el tophat
	cvMorphologyEx(temp,temp,temp, NULL,CV_MOP_CLOSE,6); //aplico el cierre




	//Con esto obtengo las bolas azules binarizadas

	cvMorphologyEx(Imagen_umbr_2,temp2,temp2, NULL,CV_MOP_TOPHAT,1);     //tophat
	//cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2);
	cvAbsDiff (Imagen_umbr_2, temp2 ,temp2); //resto  la original - el tophat
	cvMorphologyEx(temp2,temp2,temp2, NULL,CV_MOP_CLOSE,6); //aplico el cierre

	//Dilato y erosiono el mismo número de veces, para que las bolas me queden mas o menos del mismo tamaño. Además lo hago muchas veces(15), para eliminar los
	//máximos defectos posibles debido a sombras y cambios y contrastes debido a la iluminación
	cvDilate(temp2,temp2,element,15);

	cvErode(temp2,temp2,element,15);


	cvAbsDiff (temp2, temp ,resta); // Resto la imagen de todas las bolas -la imagen de las bolas azules, dilato mcuhas veces y erosiono muchas veces,
	//y finalmente solo me quedan las rojas
	cvDilate(resta,resta,element,15);//dilato

	cvErode(resta,resta,element,15);//erosiono

    //Puede que algun contorno no deseado aún permanezca en la imagen binaria. Como aplico las mismas transformaciones morfológicas a las dos imágenes binarias 
	//tendré el mismo defecto en las dos imagenes, así que  obtengo una imagen sólo los defectos, y después resto los defectos a las dos imágenes.


IplImage * temp3= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * temp4= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * Im_defectos_comunes= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * Im_bolas_azules= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * Im_bolas_rojas= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);


cvThreshold(temp2,temp3,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas rojas

cvThreshold(resta,temp4,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas azules

cvAnd(temp3,temp4,Im_defectos_comunes,NULL);//multiplico las dos imagenes, la imagen que obtengo solo aparecen los defectos comunes

cvAbsDiff (temp2,Im_defectos_comunes,Im_bolas_azules);//resto los defectos a las bolas azules

cvAbsDiff (resta, Im_defectos_comunes ,Im_bolas_rojas);//resto los defectos a las bolas rojas

//Ya tengo una imagen binaria sólo con las bolas azules y otra sólo con las rojas.
//-------------------------------------------------------------------------------------------------------------------------------------------------------------------------



//CALCULAR HISTOGRAMA DE LA IMAGEN G






//Nueva imagen para dibujar el histograma
IplImage *histImage;
//Variables para el histograma
int hist_size=256;
int NivelGris;
float NumPixels;
//Estructura histograma para guardar la informacion
CvHistogram *hist;




//Nueva imagen para dibujar el histograma
histImage = cvCreateImage(cvSize(256,256), 8, 1);
//Estructura histograma para guardar la informacion
hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY,NULL, 1);
//calcular el histograma. Lo hago con la imagenG, ya que hay más contraste que en la imagen en escala de grises, pero también funcionaria con la imagen de escala de grises
cvCalcHist(&ImagenG,hist,0,NULL);
cvSetZero(histImage);

long Histograma[256];

//dibujo el histograma
for(NivelGris=0;NivelGris<hist_size;++NivelGris)
{
NumPixels=cvQueryHistValue_1D(hist,NivelGris)/15;
cvLine(histImage,cvPoint(NivelGris,256),cvPoint(NivelGris,256-NumPixels),CV_RGB(255,255,255),1,8,0);

Histograma[NivelGris]=NumPixels;//meto en un array el numero de pixels para cada nivel de gris
}

cvReleaseHist(&hist);
cvSaveImage("Histograma.jpg",histImage,0);
      
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------


//UMBRALIZACIÓN DE LA IMAGEN G


IplImage *imagen_bin;
CvMemStorage *Memoria;
CvSeq *Contorno, *Primer_Contorno;
int Nc;
//imagen=cvLoadImage("herramientas.tif",CV_LOAD_IMAGE_GRAYSCALE);
imagen_bin=cvCreateImage(cvGetSize(ImagenG),8,1);
//imagen_color=cvCreateImage(cvGetSize(ImagenG),8,3);

//umbralizar la ImagenG
int umbral;


umbral=MinMax(Histograma);

//Para algunas imagenes, debido a que tienen mas iluminacion o se introducen otros objetos como la mano, en el histograma las gausianas se juntan mucho o solo aparece
//una. En este caso la función MinMAx() calcula  un umbral muy alto y hace que no se detecten los contornos de algunas bolas, asi que establezco un umbral máximo

if(umbral>100)
{
	umbral=100;
}


cvLine(histImage,cvPoint(umbral,256),cvPoint(umbral,0),CV_RGB(255,255,255),1,8,0);//uDibujo el umbral en el histograma


cvThreshold(ImagenG,imagen_bin,umbral,255,CV_THRESH_BINARY_INV);//Binarizo la imagen G

cvMorphologyEx(imagen_bin,imagen_bin,imagen_bin, NULL,CV_MOP_CLOSE,6);//Alplico cierre para eliminar los cambios de contraste en el interior de las bolas 
//debido al reflejo al reflejo de la luz

	
 



//---------------------------------------------------------------------------------------------------------------------------------------------------------------------


// CÁLCULO DE CONTORNOS, ÁREAS, PERÍMETROS, CAJAS Y CENTROS DE CAJA EN LA IMAGEN G.

 IplConvKernel* element_2 = 0;
     const int element_shape_2 =CV_SHAPE_ELLIPSE;
	 int pos_2=1;
  
     element_2= cvCreateStructuringElementEx(pos_2*2+1,pos_2*2+1,pos_2,pos_2, element_shape_2,0);


Memoria=cvCreateMemStorage();
bool k=FALSE;
int n=0;
bool pelotas_juntas=FALSE;
int i;
double *perimetro;
double *area;
CvBox2D *BoundBox;

 CvPoint *centro;

int bolas_rotas_azules=0;
int bolas_rotas_rojas=0;

CvScalar s3;






     Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL);
   


     perimetro=(double*)malloc(Nc*sizeof(double));
     area=(double*)malloc(Nc*sizeof(double));
     BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D));
	 centro=(CvPoint*)malloc(Nc*sizeof(CvPoint));
	
	
     for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i)
     {
	      area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ);
	      perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1);
	      BoundBox[i]=cvMinAreaRect2(Contorno,NULL);
		  

     }

	  for(i=0;i<Nc;++i)
      {

		  centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);
      }

//----------------------------------------------------------------------------------------------------------------------------------------------------------------------------	  

//DETECTAR BOLAS ROTAS	  
	  
	  
	  
	  
IplImage * inv_bolas_azules, *inv_bolas_rojas;
CvMemStorage *storage_2;
CvMemStorage *storage_3;
CvSeq *Contorno_2, *Primer_Contorno_2;
CvSeq *Contorno_3, *Primer_Contorno_3;

int Nc_2;
int Nc_3;
double *area_2;
double *area_3;
CvBox2D *BoundBox_2;
CvBox2D *BoundBox_3;
CvPoint *centro_2;
CvPoint *centro_3;



inv_bolas_azules=cvCreateImage(cvGetSize(Im_bolas_azules),8,1);

inv_bolas_rojas=cvCreateImage(cvGetSize(Im_bolas_rojas),8,1);

cvThreshold(Im_bolas_azules,inv_bolas_azules,128,255,CV_THRESH_BINARY_INV);
cvThreshold(Im_bolas_rojas,inv_bolas_rojas,128,255,CV_THRESH_BINARY_INV);

storage_2=cvCreateMemStorage();
storage_3=cvCreateMemStorage();


//detecto las bolas rotas azules
Nc_2=cvFindContours(inv_bolas_azules,storage_2,&Primer_Contorno_2,sizeof(CvContour),CV_RETR_EXTERNAL); //Encuentro cotornos en la imagen binaria donde sólo aparecen 
                                                                                                       //las bolas azules



     
     area_2=(double*)malloc(Nc_2*sizeof(double));//tamaño del vector area
     
     BoundBox_2=(CvBox2D*)malloc(Nc_2*sizeof(CvBox2D));//tamaño  del vector BoundBox_2
	 
	 centro_2=(CvPoint*)malloc(Nc_2*sizeof(CvPoint));//tamaño del vector centro_2
	 
	

     for(i=0,Contorno_2=Primer_Contorno_2;Contorno_2!=NULL;Contorno_2=Contorno_2->h_next,++i)
     {
	      area_2[i]=cvContourArea(Contorno_2,CV_WHOLE_SEQ);//Hallo el area de cada contorno
	      
	      
	      BoundBox_2[i]=cvMinAreaRect2(Contorno_2,NULL);//Hallo las caja de cada contorno
		  
		  
		  
	 }

	  for(i=0;i<Nc_2;++i)
      {

		  centro_2[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);// Hallo el centro de cada contorno
      }

	 
	  
	  
	 
	  
	 //Para cada contorno, si su area es menor que 2500, es que se trata de una bola rota


	  for(i=0;i<Nc_2;++i)
	  {
		  if(area_2[i]<2500)
		  {
			  bolas_rotas_azules++;
			  DibujarBox2D(im,BoundBox_2[i]);
			  printf("Bola  rota azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		  }
	  }


	//Detecto las bolas rotas rojas

	// Es el mismo procedimiento que para detectar las bolas rotas azules, pero encontrando contornos en la imagen binaria donde solo aparecen las bolas rojas
	  Nc_3=cvFindContours(inv_bolas_rojas,storage_3,&Primer_Contorno_3,sizeof(CvContour),CV_RETR_EXTERNAL);



     
     area_3=(double*)malloc(Nc_3*sizeof(double));
     
     BoundBox_3=(CvBox2D*)malloc(Nc_3*sizeof(CvBox2D));
	 
	  centro_3=(CvPoint*)malloc(Nc*sizeof(CvPoint));
	 
	 
	

     for(i=0,Contorno_3=Primer_Contorno_3;Contorno_3!=NULL;Contorno_3=Contorno_3->h_next,++i)
     {
	      area_3[i]=cvContourArea(Contorno_3,CV_WHOLE_SEQ);
	      
	      
	      BoundBox_3[i]=cvMinAreaRect2(Contorno_3,NULL);
		  
		  
		  
	 }

	  for(i=0;i<Nc_3;++i)
      {

		  centro_3[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);
      }
	  
	  
	  
	 
	  for(i=0;i<Nc_3;++i)
	  {
		  if(area_3[i]<2000)
		  {
			  bolas_rotas_rojas++;
			  DibujarBox2D(im,BoundBox_3[i]);
			  printf("Bola  rota roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		  }
	  }

	  
	  
	  
//---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//CASO DE LAS BOLAS JUNTAS

// En el caso de que haya dos o más bolas juntas, el programa encuentra un contorno con el área de todas las bolas que están juntas. Para solucionar este problema
//utilizo el perímetro de los contornos. Elijo un valor umbral para el perímetro en el que me aseguro que se han separado todas las bolas. Así, si existe un perímetro 
//mayor al umbral, erosiono la imagen hasta que todos los perímetros sean menores que ese umbral. 
	 
// Para detectar si hay bolas juntas, compruebo si existe algún controno que tenga el área mayor que el de una bola . 	 
	 for(i=0;i<Nc;++i)
	 {
		 if(area[i]>4000)//si existe el área de un contorno mayor al área de una bola
		 {
			 k=TRUE;
			 pelotas_juntas=TRUE;
		 }

		 
	 }

while(k==TRUE)// Se mete en este bucle si ha encontrado algun área mayor que el de una bola
{

		 k=FALSE;
		 Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL);
  


     perimetro=(double*)malloc(Nc*sizeof(double));
     area=(double*)malloc(Nc*sizeof(double));
     
     BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D));
	
	 centro=(CvPoint*)malloc(Nc*sizeof(CvPoint));
	
	 

     for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i)
     {
	      area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ);
	      perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1);
	     
	      BoundBox[i]=cvMinAreaRect2(Contorno,NULL);
		  
		 
		 
     }

	 for(i=0;i<Nc;++i)
      {
	       centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);
      }


	

	 for(i=0;i<Nc;++i)
	 {
		 

		 if(perimetro[i]>100)
		 {
			 k=TRUE;
			 cvErode(imagen_bin,imagen_bin,element_2,1);
		 }

		
	 }



}



    

//------------------------------------------------------------------------------------------------------------------------------------------------------------


//CONOCER EL NÚMERO DE BOLAS DE CADA COLOR  Y SUS RESPECTIVAS POSICIONES




int bolas_azules=0;
int bolas_rojas=0;

int mano=0;
double radio=0.0;
CvScalar s;
CvScalar s2;


//Diferenciar bolas en el caso de que no haya bolas juntas
if( pelotas_juntas==FALSE)
{

    //Bolas azules
    for(i=0;i<Nc;++i)//bucle para todods los contornos
       {

           s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen de bolas azules
		   if(s.val[0]==0)// si es 0,es que puede haber una bola azul o una bola rota azul
		     {
		          if(area[i]>2000 && area[i]<4000)//bola azul 
		            {
			             bolas_azules++;


			             radio=sqrt(area[i]/3.14);

			             cvCircle(
                                  im,
                                  centro[i],
                                  cvRound( radio ),
                                  CV_RGB(0x00,0xff,0xff));

			                      printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		             }
		     }
}


			   


	//Bolas rojas
     for(i=0;i<Nc;++i)//bucle para todos los contornos
       {
   
	       s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas

	       if(s2.val[0]==0)// si es 0,es que puede haber bola roja o bola rota roja
		     {
		         if(area[i]>2000 && area[i]<4000)//bola roja
		           {
			            bolas_rojas++;

			            radio=sqrt(area[i]/3.14);

			            cvCircle(
                                  im,
                                  centro[i],
                                  cvRound( radio ),
                                  CV_RGB(0xff,0x00,0x00));

			                      printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		           }
		   
		   
		     }
        }


		



}

if( pelotas_juntas==TRUE)
{
    float radio=30;//Como en el caso de qhe haya bolas juntas erosiono la imagen hasta separlas, no tengo las áreas reales de las bolas, así que
	              //estipulo un radio aproximado .


	//Bolas azules
    for(i=0;i<Nc;++i)
       {

          s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen con bolas azules
		  if(s.val[0]==0)// si es 0,es que hay bola azul. En este caso no existe la posibilidad de que haya bolas rotas porque al erosionar solo permanecen los contornos
			             //con un perímetro mayor al de una bola. El perímetro de una bola rota siempre será menor
		   {
		   

			   cvCircle(
                        im,
                        centro[i],
                        cvRound( radio ),
                        CV_RGB(0x00,0xff,0xff));

			   bolas_azules++;

			   printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);





		   }
		   
		   
		

	
       }






	//Bolas rojas	
    for(i=0;i<Nc;++i)//bucle para todos los contornos
       {
   
	       s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas

	       if(s2.val[0]==0)// si es 0,es que hay una bola roja
		     {
		   
			     cvCircle(
                          im,
                          centro[i],
                          cvRound( radio ),
                          CV_RGB(0xff,0x00,0x00));

			     bolas_rojas++;

			     printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		     }
		   
		   
		
	   }

}



printf("bolas azules:%d\n",bolas_azules);
printf("bolas rotas azules:%d\n", bolas_rotas_azules);
printf("bolas rojas:%d\n",bolas_rojas);
printf("bolas rotas rojas:%d\n\n",bolas_rotas_rojas);


printf("ORDENAR AL ROBOT\n\n\n");
if(bolas_rotas_azules>0)
{
	printf("METER BOLAS AZULES DEFECTUOSAS EN CAJA DE BOLAS AZULES DEFECTUOSAS\n\n");
}

if(bolas_rotas_rojas>0)
{
	printf("METER BOLAS ROJAS DEFECTUOSAS EN CAJA DE BOLAS ROJAS DEFECTUOSAS\n\n");
}

if(bolas_azules>0 || bolas_rojas>0)
{
	printf("EMPAQUETAR BOLAS\n\n");
}



//----------------------------------------------------------------------------------------------------------------------------------------------------------------------





cvWaitKey(0);

//--------------------------------------------------------------------------------------------------------------------------------------------------------------------
//PANTALLA

cvNamedWindow("Original", CV_WINDOW_AUTOSIZE);
cvShowImage("Original", im );

//cvNamedWindow("imagen_bin", CV_WINDOW_AUTOSIZE);
//cvShowImage("imagen_bin", imagen_bin );

//Mostrar el plano de color rojo, verde y azul

//cvNamedWindow("R", CV_WINDOW_AUTOSIZE);
//cvShowImage("R",ImagenR);

//cvNamedWindow("G", CV_WINDOW_AUTOSIZE);
//cvShowImage("G",inv_bolas_azules);

//cvNamedWindow("B", CV_WINDOW_AUTOSIZE);
//cvShowImage("B",inv_bolas_rojas);

cvNamedWindow("bolas_azules", CV_WINDOW_AUTOSIZE);
cvShowImage("bolas_azules",Im_bolas_azules);

cvNamedWindow("bolas_rojas", CV_WINDOW_AUTOSIZE);
cvShowImage("bolas_rojas",Im_bolas_rojas);

//Mostrar la imagen

cvNamedWindow("Histograma de G", CV_WINDOW_AUTOSIZE);
cvShowImage("Histograma de G", histImage );

cvWaitKey(0);

//---------------------------------------------------------------------------------------------------------------------------------------------------------------

//LIBERAR MEMORIA
cvDestroyAllWindows();

cvReleaseImage(&ImagenR);
cvReleaseImage(&ImagenG);
cvReleaseImage(&ImagenB);
cvReleaseImage(&imagen_bin);
cvReleaseImage(&histImage);
cvReleaseImage(&im);
cvReleaseImage(&Imagen_RGB);
cvReleaseImage(&Imagen_umbr);
cvReleaseImage(&Imagen_umbr_2);
cvReleaseImage(&ImagenHSV);
cvReleaseImage(&ImagenH);
cvReleaseImage(&ImagenS);
cvReleaseImage(&ImagenV);
cvReleaseImage(&temp);
cvReleaseImage(&temp2);
cvReleaseImage(&temp3);
cvReleaseImage(&temp4);
cvReleaseImage(&Im_defectos_comunes);
cvReleaseImage(&Im_bolas_azules);
cvReleaseImage(&Im_bolas_rojas);
cvReleaseImage(&inv_bolas_rojas);
cvReleaseImage(&inv_bolas_azules);









}while(salir==FALSE);

return 0;


}
コード例 #19
0
ファイル: rtbivroi.c プロジェクト: Ur-Ideas-LabS/hamuju
int main(int argc, char*argv[]) {

	int device;
	cvNamedWindow(camwindow,CV_WINDOW_AUTOSIZE);
	CvCapture* capture;
	CvMat* intrinsic ;
	CvMat* distortion;
	if (strcmp(argv[1],"-nocalib") == 0 && (argc == 4)){
		MODE = 1;
		H = (CvMat*)cvLoad(argv[2],NULL,NULL,NULL);
		device = atoi(argv[3]);
		capture = cvCaptureFromCAM( device) ; 
		Z=28;
			printf("\nUsage:\nReset: 'r'\nCrop-ROI: 'c'\nZoom: 'u' +/- 'd'\nSave: 's'\n Quit: 'q' | ESC key\n");
	}
	else if ((strcmp(argv[1],"-calib") == 0) && (argc == 7) ) {
		MODE = 2;
		board_w = atoi(argv[2]);
		board_h = atoi(argv[3]);
		intrinsic = (CvMat*)cvLoad(argv[4],NULL,NULL,NULL);  
		distortion = (CvMat*)cvLoad(argv[5],NULL,NULL,NULL);
		device = atoi(argv[6]);
		capture = cvCaptureFromCAM( device) ;
			printf("\nUsage:\nZoom: 'u' +/- 'd'\nBird-I-View: 't'\n Quit: 'q' | ESC key\n");
	}else {
		printf("Error:Wrong numbers of input parameters\n");
		printf("* if -option == -nocalib then only first 2 parameters are required \
 			 	\n Homography matrix \
 			  	\n usb-camera device driver \
 			* if -option == -calib then only 5 addition parameter are required \
 				\n  #inner checkerboard corners generally it is 7x7  \
 				\n  Intrinsic (xml) from Camera Calibration \
 				\n  Distortion (xml) from Camera Calibration \
 				\n  usb-camera device driver\n");
		return -1;		
	}	
	
	if (capture == NULL ){
		perror("\nFailure to access camera device\n");
		return -1;
	}
	

	CvSize board_sz = cvSize( board_w, board_h );
	int board_n = board_w*board_h;
	int frame=0, found = 0,corner_count = 0;
	CvPoint2D32f corners[board_n];

	cvNamedWindow(BVwindow, CV_WINDOW_AUTOSIZE);	
	cvSetMouseCallback(BVwindow, on_mouse, 0);
	CvMat stub;
	IplImage *image = cvQueryFrame( capture ); 
	IplImage *gray_image = cvCreateImage(cvGetSize(image),8,1);//subpixel
	frame++;

	//Bird Eye View with ROI
	birdsview_image =  cvCreateImage( cvGetSize(image), image->depth,3  );
	Mbvimg = cvGetMat(birdsview_image,&stub,NULL,0);				
	while((MODE == 1 )){
		// Capture bird's view image every 10 frames
		if (frame % board_dt == 0) {
			cvWarpPerspective( 
				image,
				birdsview_image,
				H,
				CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS |CV_WARP_INVERSE_MAP  ,
				cvScalarAll(0) );
			cvShowImage( BVwindow, birdsview_image );
			updateImage();
			frame=1;	
		}

		char key = (char) cvWaitKey(2);
		switch( (char) key )
		{
			case 'r':
				reset();
				if (frame % board_dt != 0) // sychronized updated
					updateImage();
				break;
			case 'c':
				BirdEyeROI();
				break;
			case 'u':
				Z+=0.5;
				CV_MAT_ELEM(*H,float,2,2) = Z;
				printf("\n%f",Z);
				break;
			case 'd':
				Z-=0.5;
				CV_MAT_ELEM(*H,float,2,2) = Z;
				printf("\n%f",Z);
				break;
			case 's':
				cvSaveImage("birdviewROIimg.bmp",birdsview_image,0);
				printf("\nImage Saved! Name: birdviewROIimg\n");
				break;
			case 27:
        		case 'q':
				return 0;
            			break;		
		}				
		cvShowImage(camwindow, image); //overlay points in web cam stream window
		image = cvQueryFrame(capture); //Get next image	
		frame++;
	}
	//Bird Eye View to extract Homography matrix			
	while((MODE == 2 )){

		//Skip every board_dt frames to allow user to move chessboard
		if (frame % board_dt == 0) {
			found = cvFindChessboardCorners(image,board_sz,corners,&corner_count,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
			if (found){
				cvCvtColor( image,gray_image,CV_BGR2GRAY ); 
				cvFindCornerSubPix( gray_image,corners,corner_count,cvSize(11,11),cvSize(-1,-1),cvTermCriteria( CV_TERMCRIT_EPS |CV_TERMCRIT_ITER,30,0.1));
				frame=1;
			}				
		}
	
		char key = (char)cvWaitKey(2);
        	switch (key)
        	{
			case 'u':
				Z +=0.5;
				printf("\n%f",Z);            			
				break;        		
			case 'd':
            			Z -=0.5;
				printf("\n%f",Z);
            			break;
        		case 't':
            			BirdsIview(intrinsic,distortion,cvCloneImage(image),corners);
	    			break;
        		case 27:
        		case 'q':
				if (H != NULL){
					cvSave("H.xml",H,NULL,NULL,cvAttrList(0,0));
					printf("\nHomography Saved! Name: H.xml\n");
				}
            			return 0;
            			break;	
        	}
		cvDrawChessboardCorners(image, board_sz, corners, corner_count,found);
		cvShowImage(camwindow, image); //overlay points in web cam stream window
		
		frame++;
		image = cvQueryFrame(capture); //Get next image			
	}

	return 0;

}
コード例 #20
0
ファイル: main.cpp プロジェクト: trevyn/opencvtest4
int main( int argc, char** argv )
{
    CvSize imgSize;                 
    imgSize.width = 320; 
    imgSize.height = 240; 
	
	int key= -1; 
	
	// set up opencv capture objects

    CvCapture* capture= cvCaptureFromCAM(0); 
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
	
    CvCapture* capture2= cvCaptureFromCAM(1); 
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_HEIGHT, 240);

    CvCapture* capture3= cvCaptureFromCAM(2); 
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_HEIGHT, 240);

    
	// allocate image storage (other createimage specifiers: IPL_DEPTH_32F, IPL_DEPTH_8U)
	
    IplImage* colourImage  = cvCloneImage(cvQueryFrame(capture)); 
    IplImage* greyImage    = cvCreateImage(cvGetSize(colourImage), IPL_DEPTH_8U, 1); 
    IplImage* hannImage    = cvCloneImage(greyImage); 
	IplImage *poc= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_64F, 1 );
	IplImage *pocdisp= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_8U, 1 );
	
	// set up opencv windows
	
    cvNamedWindow("hannImage", 1);
    cvNamedWindow("greyImage", 1); 
    cvNamedWindow("greyImage2", 1); 
    cvNamedWindow("greyImage3", 1); 
    cvNamedWindow("poc", 1);
	cvMoveWindow("greyImage", 40, 0);
	cvMoveWindow("hannImage", 40, 270);
	cvMoveWindow("poc", 365, 0);
	cvMoveWindow("greyImage2", 40, 540);
	cvMoveWindow("greyImage3", 365, 540);
	
	// set up storage for fftw
	
	fftw_complex *fftwSingleRow = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwSingleRow2 = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwStore = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * kFFTStoreSize );
		
	// loop
	
    while(key != 'q') 
	{ 

		//		double t = (double)cvGetTickCount();
		//		printf( "%g ms: start.\n", (cvGetTickCount() - t)/((double)cvGetTickFrequency()*1000.));

		// capture a frame, convert to greyscale, and show it
		
		cvCopyImage(cvQueryFrame(capture), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage",greyImage); 

        cvCopyImage(cvQueryFrame(capture2), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage2",greyImage); 

        cvCopyImage(cvQueryFrame(capture3), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage3",greyImage);

        
        key = cvWaitKey(3);

		// project and calculate hann window
		
		int i, j, k;
		uchar 	*inData= ( uchar* ) greyImage->imageData;
		uchar 	*hannImageData= ( uchar* ) hannImage->imageData;
		unsigned long acc;
		
		for( j = 0 ; j < greyImage->width ; j++) {
			
			// sum input column
			
			acc= 0;
			for( i = 0; i < greyImage->height ; i++ ) {
				acc+= inData[i * greyImage->widthStep + j];
			}
			
			// hann window and output
			
			for( i = 0; i < 240 ; i++ ) {
				double hannMultiplier = 0.5 * (1 - cos(2*3.14159*j/(greyImage->width-1)));  // hann window coefficient
				hannImageData[i * hannImage->widthStep + j]=  hannMultiplier * (acc/greyImage->height);
			}
			
		}

		cvShowImage("hannImage",hannImage); 

		// set up forward FFT into store plan
		
		fftw_plan fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, fftwSingleRow, &(fftwStore[kFFTWidth * pocline]), FFTW_FORWARD,  FFTW_ESTIMATE );
				
		// load data for fftw
		
		for( int j = 0 ; j < kFFTWidth ; j++) {
			fftwSingleRow[j][0] = ( double )hannImageData[j];
			fftwSingleRow[j][1] = 0.0;
		}
		
		// run and release plan
		
		fftw_execute( fft_plan );
		fftw_destroy_plan( fft_plan );

		// compare pocline against ALL OTHER IN STORE

		for( int j = 0 ; j < kFFTStoreSize ; j++) {
			
			fftw_complex *img1= &(fftwStore[kFFTWidth * pocline]);
			fftw_complex *img2= &(fftwStore[kFFTWidth * j]);
			
			// obtain the cross power spectrum
			for( int i = 0; i < kFFTWidth ; i++ ) {
				
				// complex multiply complex img2 by complex conjugate of complex img1
				
				fftwSingleRow[i][0] = ( img2[i][0] * img1[i][0] ) - ( img2[i][1] * ( -img1[i][1] ) );
				fftwSingleRow[i][1] = ( img2[i][0] * ( -img1[i][1] ) ) + ( img2[i][1] * img1[i][0] );
				
				// set tmp to (real) absolute value of complex number res[i]
				
				double tmp = sqrt( pow( fftwSingleRow[i][0], 2.0 ) + pow( fftwSingleRow[i][1], 2.0 ) );
				
				// complex divide res[i] by (real) absolute value of res[i]
				// (this is the normalization step)
				
				if(tmp == 0) {
					fftwSingleRow[i][0]= 0;
					fftwSingleRow[i][1]= 0;
				}
				else {
					fftwSingleRow[i][0] /= tmp;
					fftwSingleRow[i][1] /= tmp;
				}
			}
				
			// run inverse
			
			fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, fftwSingleRow, fftwSingleRow2, FFTW_BACKWARD,  FFTW_ESTIMATE );
			fftw_execute(fft_plan);
			fftw_destroy_plan( fft_plan );

			// normalize and copy to result image

			double 	*poc_data = ( double* )poc->imageData;
			
			for( int k = 0 ; k < kFFTWidth ; k++ ) {
				poc_data[k+(j*kFFTWidth)] = (fftwSingleRow2[k][0] / ( double )kFFTWidth);
			}
				
			
		}
		
		
		

		// inc pocline
		
		pocline++;
		if(pocline == kFFTStoreSize-1)
			pocline= 0;
		
		
		// display??
		

//		for(int i = 0 ; i < kFFTWidth ; i++ ) {
//			poc_data[i+(pocline*kFFTWidth)] = (fftwStore[(kFFTWidth * pocline)+i])[1];
//		}
		
		// find the maximum value and its location
		CvPoint minloc, maxloc;
		double  minval, maxval;
		cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
		
		// print it
//		printf( "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
		
//        cvConvertScale(dft_re,dft_orig,255,0); //255.0*(max-min),0);

        
        
		cvCvtScale(poc, pocdisp, (1.0/(maxval/2))*255, 0);
		
		cvShowImage("poc",pocdisp);
		
		
		// set up fftw plans
//		fftw_plan fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, img2, img2, FFTW_FORWARD,  FFTW_ESTIMATE );
//		fftw_plan ifft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, res,  res,  FFTW_BACKWARD, FFTW_ESTIMATE );
		
		
		
		// TODO FROM HERE
		
		/*
		
		if(key == 'r') {
			cvReleaseImage(&ref);
			ref= cvCloneImage(testOutImage);
			cvShowImage("ref",ref); 
		}
		
		
		
		{  // try phase correlating full img
			
			tpl= cvCloneImage(testOutImage);
			//				ref= cvCloneImage(testOutImage);
//				cvShowImage("tpl",tpl); 
//				cvShowImage("ref",ref); 
			
			
			if(ref == 0)
				continue;
			
			if( ( tpl->width != ref->width ) || ( tpl->height != ref->height ) ) {
				fprintf( stderr, "Both images must have equal width and height!\n" );
				continue
				;
			}
			
			// get phase correlation of input images
			
			phase_correlation( ref, tpl, poc );
			
			// find the maximum value and its location
			CvPoint minloc, maxloc;
			double  minval, maxval;
			cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
			
			// print it
			printf( "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
			
			cvCvtScale(poc, pocdisp, 1.0/(maxval/2), 0);
			
			cvShowImage("poc",pocdisp);
			
			cvReleaseImage(&tpl);
		
			
		}*/

//			cvReleaseImage(&ref);
//			ref= cvCloneImage(testOutImage);

//			printf( "%g ms: done.\n", (cvGetTickCount() - t)/((double)cvGetTickFrequency()*1000.));
			

	} 
	
	
	cvReleaseImage(&poc);

	
	return 0;
}
コード例 #21
0
ファイル: Invade.cpp プロジェクト: VincentYYF/RtspPlayer
// 参数: 
// img - 输入视频帧 // dst - 检测结果 
void Invade::update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
	double timestamp = clock() / 100.; // get current time in seconds 时间戳 
	CvSize size = cvSize(img->width, img->height); // get current frame size,得到当前帧的尺寸 
	int i, idx1, idx2;
	IplImage* silh;
	IplImage* pyr = cvCreateImage(cvSize((size.width & -2) / 2, (size.height & -2) / 2), 8, 1);
	CvMemStorage *stor;
	CvSeq *cont;

	/*先进行数据的初始化*/
	if (!mhi || mhi->width != size.width || mhi->height != size.height)
	{
		if (buf == 0) //若尚没有初始化则分配内存给他 
		{
			buf = (IplImage**)malloc(N*sizeof(buf[0]));
			memset(buf, 0, N*sizeof(buf[0]));
		}

		for (i = 0; i < N; i++)
		{
			cvReleaseImage(&buf[i]);
			buf[i] = cvCreateImage(size, IPL_DEPTH_8U, 1);
			cvZero(buf[i]);// clear Buffer Frame at the beginning 
		}
		cvReleaseImage(&mhi);
		mhi = cvCreateImage(size, IPL_DEPTH_32F, 1);
		cvZero(mhi); // clear MHI at the beginning 
	} // end of if(mhi) 

	/*将当前要处理的帧转化为灰度放到buffer的最后一帧中*/
	cvCvtColor(img, buf[last], CV_BGR2GRAY); // convert frame to grayscale 

	/*设定帧的序号*/
	idx1 = last;
	idx2 = (last + 1) % N; // index of (last - (N-1))th frame 
	last = idx2;

	// 做帧差 
	silh = buf[idx2];//差值的指向idx2 
	cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames 

	// 对差图像做二值化 
	cvThreshold(silh, silh, 50, 255, CV_THRESH_BINARY); //threshold it,二值化 

	//去掉超时的影像以更新运动历史图像
	cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI 

	cvConvert(mhi, dst);//将mhi转化为dst,dst=mhi 

	// 中值滤波,消除小的噪声 
	cvSmooth(dst, dst, CV_MEDIAN, 3, 0, 0, 0);

	cvPyrDown(dst, pyr, CV_GAUSSIAN_5x5);// 向下采样,去掉噪声,图像是原图像的四分之一 
	cvDilate(pyr, pyr, 0, 1); // 做膨胀操作,消除目标的不连续空洞 
	cvPyrUp(pyr, dst, CV_GAUSSIAN_5x5);// 向上采样,恢复图像,图像是原图像的四倍 

	// 下面的程序段用来找到轮廓 
	// Create dynamic structure and sequence. 
	stor = cvCreateMemStorage(0);
	cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), stor);

	// 找到所有轮廓 
	cvFindContours(dst, stor, &cont, sizeof(CvContour),
		CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	// 直接使用CONTOUR中的矩形来画轮廓 
	for (; cont; cont = cont->h_next)
	{
		CvRect r = ((CvContour*)cont)->rect;
		if (r.height * r.width > CONTOUR_MAX_AERA) // 面积小的方形抛弃掉 
		{
			cvRectangle(img, cvPoint(r.x, r.y),
				cvPoint(r.x + r.width, r.y + r.height),
				CV_RGB(255, 0, 0), 1, CV_AA, 0);
		}
	}
	// free memory 
	cvReleaseMemStorage(&stor);
	cvReleaseImage(&pyr);
}
コード例 #22
0
ファイル: eyesDetector.cpp プロジェクト: polyu/camera-single
void eyesDetector::runEyesDetector(IplImage * input,IplImage * fullImage,CvPoint LT)

{

    bothEyesDetected=0;
    //static int countR;
    //static CvPoint leftEyeP,rightEyeP;
    eyesInformation.LE =cvPoint(0,0);
    eyesInformation.RE =cvPoint(0,0);
    eyesInformation.Length =0;
    if (input==0)
        return;

    double scale=1,j=0;
    //  //printf("%e SCALE \n\n",scale);

    IplImage *gray = cvCreateImage( cvSize(input->width,input->height/(2)), 8, 1 );
    IplImage *gray_fullimage = cvCreateImage( cvSize(fullImage->width,fullImage->height), 8, 1 );

    IplImage *gray_scale = cvCreateImage( cvSize(input->width/scale,input->height/(2*scale)), 8, 1 );

    cvSetImageROI(input,cvRect(0,(input->height)/(8),input->width,(input->height)/(2)));
    cvCvtColor( input, gray, CV_BGR2GRAY );
    cvResetImageROI(input);


    cvCvtColor( fullImage, gray_fullimage, CV_BGR2GRAY );
    cvResize(gray,gray_scale,CV_INTER_LINEAR);


    cvClearMemStorage( storage );
    CvSeq* nested_objects = cvHaarDetectObjects(gray_scale, nested_cascade, storage,1.4, 2, 0,cvSize(0,0) );
    int count=nested_objects ? nested_objects->total : 0;
    if (count==0)
    {
        nested_objects = cvHaarDetectObjects( gray_scale, nested_cascade_2, storage,1.4, 2, 0,cvSize(0,0) );
    }
    int leftT=0,rightT=0;
    count=nested_objects ? nested_objects->total : 0;
    //int Flag=0;
    if (count>1)
    {

        for ( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
        {
            CvPoint center;
            CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
            center.x = cvRound((LT.x+ (nr->x + nr->width*0.5)*scale));
            center.y = cvRound((LT.y + (input->height)/8 + (nr->y + nr->height*0.5)*scale));

            if ((center.x-4)>0 && (center.x-4)<(IMAGE_WIDTH-8) && (center.y-4)>0  && (center.y-4)<(IMAGE_HEIGHT-8))
            {
                cvSetImageROI(gray_fullimage,cvRect(center.x-4,center.y-4,8,8));
                IplImage* eyeDetect = cvCreateImage(cvSize(8,8),8,1);
                cvResize( gray_fullimage,eyeDetect, CV_INTER_LINEAR ) ;
                cvResetImageROI(gray_fullimage);

                double xCordinate=(center.x-4+CenterofMass(eyeDetect,0));
                double yCordinate=(center.y-4+CenterofMass(eyeDetect,1));

                cvReleaseImage( &eyeDetect );
                if (center.x<cvRound((LT.x + input->width*0.5)))
                {
                    eyesInformation.LE.x=xCordinate;
                    eyesInformation.LE.y=yCordinate;

                    //cvCircle( fullImage, cvPoint(eyesInformation.LE.x,eyesInformation.LE.y), 4, CV_RGB(128,128,128), 1, 8, 0 );
                    leftT=1;
                }
                else
                {

                    eyesInformation.RE.x=xCordinate;
                    eyesInformation.RE.y=yCordinate;
                    //cvCircle( fullImage, cvPoint(eyesInformation.RE.x,eyesInformation.RE.y), 4, CV_RGB(128,128,128), 1, 8, 0 );

                    rightT=1;
                }

            }
        }





        if (leftT==1 && rightT==1)
        {
            eyesInformation.Length=sqrt(pow(eyesInformation.RE.y-eyesInformation.LE.y,2)+ pow(eyesInformation.RE.x-eyesInformation.LE.x,2));
            bothEyesDetected=1;
        }

    }
    cvReleaseImage(&gray_fullimage);
    cvReleaseImage(&gray);
    cvReleaseImage(&gray_scale);
    cvClearMemStorage( storage );
}
コード例 #23
0
ファイル: trackcolour.cpp プロジェクト: tsandmann/ctbot-misc
int main() {
	raspicam::RaspiCam_Cv Camera; // Camera Object
	cv::Mat frame;

	// Set camera params
	Camera.set(CV_CAP_PROP_FORMAT, CV_8UC3); // For color
	Camera.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	Camera.set(CV_CAP_PROP_FRAME_HEIGHT, 480);

	// Open camera
    std::cout << "Opening camera...\n";
    if (! Camera.open()) {
    	std::cerr << "Error opening camera!\n";
    	return -1;
    }


	// The two windows we'll be using
  cvNamedWindow("video");
	cvNamedWindow("thresh");

	cvMoveWindow("video", 0, 0);
	cvMoveWindow("thresh", 240, 0);

	int thresh_h[] {0, 18};
	int thresh_s[] {160, 255};
	int thresh_v[] {144, 255};
	const int max_thresh(255);

	cv::createTrackbar(" H min:", "thresh", &thresh_h[0], max_thresh, nullptr);
	cv::createTrackbar(" H max:", "thresh", &thresh_h[1], max_thresh, nullptr);
	cv::createTrackbar(" S min:", "thresh", &thresh_s[0], max_thresh, nullptr);
	cv::createTrackbar(" S max:", "thresh", &thresh_s[1], max_thresh, nullptr);
	cv::createTrackbar(" V min:", "thresh", &thresh_v[0], max_thresh, nullptr);
	cv::createTrackbar(" V max:", "thresh", &thresh_v[1], max_thresh, nullptr);

	// This image holds the "scribble" data, the tracked positions of the ball
	IplImage* imgScribble = NULL;

	cv::Mat frame_mat;

	while (true) {
		if (! Camera.grab()) {
			break;
		}
		Camera.retrieve(frame_mat);

		// Will hold a frame captured from the camera
		IplImage frame = frame_mat;

		// If this is the first frame, we need to initialize it
		if (imgScribble == NULL) {
			imgScribble = cvCreateImage(cvGetSize(&frame), 8, 3);
		}

		// Holds the yellow thresholded image (yellow = white, rest = black)
		IplImage* imgYellowThresh = GetThresholdedImage(&frame, thresh_h, thresh_s, thresh_v);

		// Calculate the moments to estimate the position of the ball
		CvMoments moments;
		cvMoments(imgYellowThresh, &moments, 1);

		// The actual moment values
		double moment10 = cvGetSpatialMoment(&moments, 1, 0);
		double moment01 = cvGetSpatialMoment(&moments, 0, 1);
		double area = cvGetCentralMoment(&moments, 0, 0);

		// Holding the last and current ball positions
		static int posX = 0;
		static int posY = 0;

		int lastX = posX;
		int lastY = posY;

		posX = moment10 / area;
		posY = moment01 / area;

		// Print it out for debugging purposes
		printf("position (%d,%d)\n", posX, posY);

		// We want to draw a line only if its a valid position
		if (lastX > 0 && lastY > 0 && posX > 0 && posY > 0) {
			// Draw a yellow line from the previous point to the current point
			cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
		}

		// Add the scribbling image and the frame... and we get a combination of the two
		cvAdd(&frame, imgScribble, &frame);
		cvCvtColor(&frame, &frame, CV_BGR2RGB);
		cvShowImage("video", &frame);
//	cvShowImage("video", imgScribble);
		cvShowImage("thresh", imgYellowThresh);

		// Wait for a keypress
		int c = cvWaitKey(10);
		if (c != -1) {
			// If pressed, break out of the loop
      break;
		}
  }

  return 0;
}
コード例 #24
0
int main(int argc, char ** argv)
{
	IplImage* image = cvLoadImage("1.png");
	IplImage* gray = cvCreateImage(cvGetSize(image), 8, 1);
	IplImage* bin = cvCreateImage(cvGetSize(image), 8, 1);
	IplImage* contourBeatle = cvCreateImage(cvGetSize(image), 8, 1);
	IplImage* contourScan = cvCreateImage(cvGetSize(image), 8, 1);
	IplImage* contourLane = cvCreateImage(cvGetSize(image), 8, 1);
	IplImage* rgbBeatle = cvCloneImage(image);
	IplImage* rgbScan = cvCloneImage(image);
	IplImage* rgbLane = cvCloneImage(image);
	
	cvCvtColor(image, gray, CV_BGR2GRAY);
	cvThreshold(gray, bin, 0, 255, CV_THRESH_OTSU);
	
	Beatle(bin, contourBeatle);
	Scan(bin, contourScan);
	Lane(bin, contourLane);

	for (int r = 0; r < image->height; ++r)
	{
		for (int c = 0; c < image->width; ++c)
		{
			if (*cvPtr2D(contourBeatle, r, c) == 0)
			{
				*cvPtr2D(rgbBeatle, r, c) = 0; // B
				*(cvPtr2D(rgbBeatle, r, c) + 1) = 0; // G
				*(cvPtr2D(rgbBeatle, r, c) + 2) = 0; // R
			}

			if (*cvPtr2D(contourScan, r, c) == 0)
			{
				*cvPtr2D(rgbScan, r, c) = 0; // B
				*(cvPtr2D(rgbScan, r, c) + 1) = 0; // G
				*(cvPtr2D(rgbScan, r, c) + 2) = 0; // R
			}

			if (*cvPtr2D(contourLane, r, c) == 0)
			{
				*cvPtr2D(rgbLane, r, c) = 0; // B
				*(cvPtr2D(rgbLane, r, c) + 1) = 0; // G
				*(cvPtr2D(rgbLane, r, c) + 2) = 0; // R
			}
		}
	}

	cvShowImage("image", image);
	cvShowImage("gray", gray);
	cvShowImage("bin", bin);
	cvShowImage("contourBeatle", contourBeatle);
	cvShowImage("contourScan", contourScan);
	cvShowImage("contourLane", contourLane);
	cvShowImage("rgbBeatle", rgbBeatle);
	cvShowImage("rgbScan", rgbScan);
	cvShowImage("rgbLane", rgbLane);
	cvSaveImage("im_contourBeatle.bmp", contourBeatle);
	cvSaveImage("im_contourScan.bmp", contourScan);
	cvSaveImage("im_contourLane.bmp", contourLane);
	cvSaveImage("im_rgbBeatle.bmp", rgbBeatle);
	cvSaveImage("im_rgbScan.bmp", rgbScan);
	cvSaveImage("im_rgbLane.bmp", rgbLane);

	while (true)
	{
		int c = cvWaitKey();
		
		if ((char)c == 27)
			break;
	}
}
コード例 #25
0
/* ///////////////////// chess_corner_test ///////////////////////// */
void CV_ChessboardDetectorTimingTest::run( int start_from )
{
    int code = cvtest::TS::OK;

    /* test parameters */
    char   filepath[1000];
    char   filename[1000];

    CvMat*  _v = 0;
    CvPoint2D32f* v;

    IplImage* img = 0;
    IplImage* gray = 0;
    IplImage* thresh = 0;

    int  idx, max_idx;
    int  progress = 0;

    sprintf( filepath, "%scameracalibration/", ts->get_data_path().c_str() );
    sprintf( filename, "%schessboard_timing_list.dat", filepath );
    printf("Reading file %s\n", filename);
    CvFileStorage* fs = cvOpenFileStorage( filename, 0, CV_STORAGE_READ );
    CvFileNode* board_list = fs ? cvGetFileNodeByName( fs, 0, "boards" ) : 0;

    if( !fs || !board_list || !CV_NODE_IS_SEQ(board_list->tag) ||
            board_list->data.seq->total % 4 != 0 )
    {
        ts->printf( cvtest::TS::LOG, "chessboard_timing_list.dat can not be readed or is not valid" );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    max_idx = board_list->data.seq->total/4;

    for( idx = start_from; idx < max_idx; idx++ )
    {
        int count0 = -1;
        int count = 0;
        CvSize pattern_size;
        int result, result1 = 0;

        const char* imgname = cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4), "dummy.txt");
        int is_chessboard = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4+1), 0);
        pattern_size.width = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 2), -1);
        pattern_size.height = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 3), -1);

        ts->update_context( this, idx-1, true );

        /* read the image */
        sprintf( filename, "%s%s", filepath, imgname );

        img = cvLoadImage( filename );

        if( !img )
        {
            ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename );
            if( max_idx == 1 )
            {
                code = cvtest::TS::FAIL_MISSING_TEST_DATA;
                goto _exit_;
            }
            continue;
        }

        ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard);

        gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        cvCvtColor( img, gray, CV_BGR2GRAY );


        count0 = pattern_size.width*pattern_size.height;

        /* allocate additional buffers */
        _v = cvCreateMat(1, count0, CV_32FC2);
        count = count0;

        v = (CvPoint2D32f*)_v->data.fl;

        int64 _time0 = cvGetTickCount();
        result = cvCheckChessboard(gray, pattern_size);
        int64 _time01 = cvGetTickCount();

        OPENCV_CALL( result1 = cvFindChessboardCorners(
                                   gray, pattern_size, v, &count, 15 ));
        int64 _time1 = cvGetTickCount();

        if( result != is_chessboard )
        {
            ts->printf( cvtest::TS::LOG, "Error: chessboard was %sdetected in the image %s\n",
                        result ? "" : "not ", imgname );
            code = cvtest::TS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }
        if(result != result1)
        {
            ts->printf( cvtest::TS::LOG, "Warning: results differ cvCheckChessboard %d, cvFindChessboardCorners %d\n",
                        result, result1);
        }

        int num_pixels = gray->width*gray->height;
        float check_chessboard_time = float(_time01 - _time0)/(float)cvGetTickFrequency(); // in us
        ts->printf(cvtest::TS::LOG, "    cvCheckChessboard time s: %f, us per pixel: %f\n",
                   check_chessboard_time*1e-6, check_chessboard_time/num_pixels);

        float find_chessboard_time = float(_time1 - _time01)/(float)cvGetTickFrequency();
        ts->printf(cvtest::TS::LOG, "    cvFindChessboard time s: %f, us per pixel: %f\n",
                   find_chessboard_time*1e-6, find_chessboard_time/num_pixels);

        cvReleaseMat( &_v );
        cvReleaseImage( &img );
        cvReleaseImage( &gray );
        cvReleaseImage( &thresh );
        progress = update_progress( progress, idx-1, max_idx, 0 );
    }

_exit_:

    /* release occupied memory */
    cvReleaseMat( &_v );
    cvReleaseFileStorage( &fs );
    cvReleaseImage( &img );
    cvReleaseImage( &gray );
    cvReleaseImage( &thresh );

    if( code < 0 )
        ts->set_failed_test_info( code );
}
コード例 #26
0
//---------------------------------------------------------------
//【関数名 】:cv_ColorExtraction
//【処理概要】:色抽出
//【引数  】:src_img        = 入力画像(8bit3ch)
//      :dst_img        = 出力画像(8bit3ch)
//      :code        = 色空間の指定(CV_BGR2HSV,CV_BGR2Labなど)
//      :ch1_lower    = ch1のしきい値(小)
//      :ch1_upper    = ch1のしきい値(大)
//      :ch2_lower    = ch2のしきい値(小)
//      :ch2_upper    = ch2のしきい値(大)
//      :ch3_lower    = ch3のしきい値(小)
//      :ch3_upper    = ch3のしきい値(大)
//【戻り値 】:なし
//【備考  】:lower <= upperの場合、lower以上upper以下の範囲を抽出、
//      :lower >  upperの場合、upper以下lower以上の範囲を抽出します。
//---------------------------------------------------------------
void cv_ColorExtraction(IplImage* src_img, IplImage* dst_img,
                        int code,
                        int ch1_lower, int ch1_upper,
                        int ch2_lower, int ch2_upper,
                        int ch3_lower, int ch3_upper
                       ) {

    int i, k;

    IplImage *Color_img;
    IplImage *ch1_img, *ch2_img, *ch3_img;
    IplImage *Mask_img;

    int lower[3];
    int upper[3];
    int val[3];

    CvMat *lut;

    //codeに基づいたカラー変換
    Color_img = cvCreateImage(cvGetSize(src_img), src_img->depth, src_img->nChannels);
    cvCvtColor(src_img, Color_img, code);

    //3ChのLUT作成
    lut    = cvCreateMat(256, 1, CV_8UC3);

    lower[0] = ch1_lower;
    lower[1] = ch2_lower;
    lower[2] = ch3_lower;

    upper[0] = ch1_upper;
    upper[1] = ch2_upper;
    upper[2] = ch3_upper;

    for (i = 0; i < 256; i++) {
        for (k = 0; k < 3; k++) {
            if (lower[k] <= upper[k]) {
                if ((lower[k] <= i) && (i <= upper[k])) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            } else {
                if ((i <= upper[k]) || (lower[k] <= i)) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            }
        }
        //LUTの設定
        cvSet1D(lut, i, cvScalar(val[0], val[1], val[2]));
    }

    //3ChごとのLUT変換(各チャンネルごとに2値化処理)
    cvLUT(Color_img, Color_img, lut);
    cvReleaseMat(&lut);

    //各チャンネルごとのIplImageを確保する
    ch1_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch2_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch3_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);

    //チャンネルごとに二値化された画像をそれぞれのチャンネルに分解する
    cvSplit(Color_img, ch1_img, ch2_img, ch3_img, NULL);

    //3Ch全てのANDを取り、マスク画像を作成する。
    Mask_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    cvAnd(ch1_img, ch2_img, Mask_img);
    cvAnd(Mask_img, ch3_img, Mask_img);

    //入力画像(src_img)のマスク領域を出力画像(dst_img)へコピーする
    cvZero(dst_img);
    cvCopy(src_img, dst_img, Mask_img);

    //解放
    cvReleaseImage(&Color_img);
    cvReleaseImage(&ch1_img);
    cvReleaseImage(&ch2_img);
    cvReleaseImage(&ch3_img);
    cvReleaseImage(&Mask_img);

}
コード例 #27
0
ファイル: main.cpp プロジェクト: kovaloid/university
int WINAPI WinMain(HINSTANCE hThisInstance, HINSTANCE hPrevInstance, LPSTR lpszArgs, int nWinMode)
{
	// переменные для хранения изображений
	IplImage *frame = 0, *image = 0, *hsv = 0, *dst = 0, *dst2 = 0, *color_indexes = 0, *dst3 = 0, *image2 = 0, *tmp = 0;
	int key = 0, zx = 0, zy = 0;

	// загружаем картинку из файла
	IplImage *menu = cvLoadImage("menu.png");
	// создаем главное окно проекта
	cvNamedWindow("Проект OpenCV");
	cvShowImage("Проект OpenCV",menu);
	cvMoveWindow("Проект OpenCV",100,50);

	// получаем любую подключенную Web-камеру
    CvCapture *capture = cvCaptureFromCAM(CV_CAP_ANY);

    // частота кадров
	double fps = 18;
	// инициализация записи видео в файл; 4-буквенный код кодека для обработки видео, формируется макросом CV_FOURCC
	CvVideoWriter *writer = cvCreateVideoWriter("record.avi", CV_FOURCC('I','Y','U','V'), fps, cvSize(640, 480), 1);

	if (!capture)
		return 0;
	else
	{
		while(key != 27)
		{
			// получаем текущий кадр
			frame = cvQueryFrame(capture);
			// копируем его для обработки
			image = cvCloneImage(frame);
	
			// зум
			if(key=='+')
			{
					zx = zx + 4;
					zy = zy + 3;
			}
			if(key=='-')
			{
					zx = zx - 4;
					zy = zy - 3;
			}
			if(zx > 300)
			{
					zx = 300;
					zy = 225;
			}
			if(zx < 0)
			{
					zx = 0;
					zy = 0;
			}

			// задаем ширину и высоту ROI
			int zwidth = frame->width-2*zx; 
			int zheight = frame->height-2*zy;

			// устанавливаем ROI (Region Of Interest — интересующая область изображения)
			cvSetImageROI(frame, cvRect(zx,zy,zwidth,zheight));
			// копируем интересующую область в переменную image2
			image2 = cvCloneImage(frame); 
			// создаем пустое изображение размером 640x480
			tmp = cvCreateImage( cvSize(640, 480), frame->depth, frame->nChannels );
			// размещаем ROI на пустое изображение tmp
			cvResize(image2, tmp, 0);

			// сохраняем кадр в видео-файл
            cvWriteFrame(writer, tmp);

			// сбрасываем ROI
			cvResetImageROI(frame);

			// инициализация шрифта
			CvFont font;
			cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX,1.0, 1.0, 0, 1, CV_AA);
			// используя шрифт выводим на картинку текст
			cvPutText(tmp, "press '+' to increase", cvPoint(150, 40), &font, CV_RGB(150, 0, 150) );
			cvPutText(tmp, "press '-' to reduce", cvPoint(165, 450), &font, CV_RGB(150, 0, 150) );

			// число пикселей данного цвета на изображении 
			uint colorCount[NUM_COLOR_TYPES] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };

			hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			cvCvtColor( image, hsv, CV_BGR2HSV );

			// картинки для хранения результатов
			dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
			color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); //для хранения индексов цвета

			// для хранения RGB цветов
			CvScalar rgb_colors[NUM_COLOR_TYPES];

			int i=0, j=0, x=0, y=0;

			// обнуляем цвета
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i] = cvScalarAll(0);
			}

			for (y=0; y<hsv->height; y++) {
					for (x=0; x<hsv->width; x++) {

							// получаем HSV-компоненты пикселя
							uchar H = CV_PIXEL(uchar, hsv, x, y)[0];        // Hue
							uchar S = CV_PIXEL(uchar, hsv, x, y)[1];        // Saturation
							uchar V = CV_PIXEL(uchar, hsv, x, y)[2];        // Value (Brightness)

							// определяем к какому цвету можно отнести данные значения
							int ctype = getPixelColorType(H, S, V);

							// устанавливаем этот цвет у отладочной картинки
							CV_PIXEL(uchar, dst, x, y)[0] = cCTHue[ctype];  // Hue
							CV_PIXEL(uchar, dst, x, y)[1] = cCTSat[ctype];  // Saturation
							CV_PIXEL(uchar, dst, x, y)[2] = cCTVal[ctype];  // Value

							// собираем RGB-составляющие
							rgb_colors[ctype].val[0] += CV_PIXEL(uchar, image, x, y)[0]; // B
							rgb_colors[ctype].val[1] += CV_PIXEL(uchar, image, x, y)[1]; // G
							rgb_colors[ctype].val[2] += CV_PIXEL(uchar, image, x, y)[2]; // R

							// сохраняем к какому типу относится цвет
							CV_PIXEL(uchar, color_indexes, x, y)[0] = ctype;

							// подсчитываем
							colorCount[ctype]++;
					}
			}

			// усреднение RGB-составляющих
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i].val[0] /= colorCount[i];
					rgb_colors[i].val[1] /= colorCount[i];
					rgb_colors[i].val[2] /= colorCount[i];
			}

			// теперь загоним массив в вектор и отсортируем
			std::vector< std::pair< int, uint > > colors;
			colors.reserve(NUM_COLOR_TYPES);

			for(i=0; i<NUM_COLOR_TYPES; i++){
					std::pair< int, uint > color;
					color.first = i;
					color.second = colorCount[i];
					colors.push_back( color );
			}
		
			// сортируем
			std::sort( colors.begin(), colors.end(), colors_sort );

			// покажем цвета
			cvZero(dst2);
			int h = dst2->height;
			int w = dst2->width / RECT_COLORS_SIZE;
			for(i=0; i<RECT_COLORS_SIZE; i++ ){
					cvRectangle(dst2, cvPoint(i*w, 0), cvPoint(i*w+w, h), rgb_colors[colors[i].first], -1);
			}

			// покажем картинку в найденных цветах
			dst3 = cvCloneImage(image);
			for (y=0; y<dst3->height; y++) {
					for (x=0; x<dst3->width; x++) {
							int color_index = CV_PIXEL(uchar, color_indexes, x, y)[0];

							CV_PIXEL(uchar, dst3, x, y)[0] = rgb_colors[color_index].val[0];
							CV_PIXEL(uchar, dst3, x, y)[1] = rgb_colors[color_index].val[1];
							CV_PIXEL(uchar, dst3, x, y)[2] = rgb_colors[color_index].val[2];
					}
			}

			// конвертируем отладочную картинку обратно в RGB
			cvCvtColor( dst, dst, CV_HSV2BGR );

			cvSetMouseCallback("Проект OpenCV", ClickOnMenu, (void*) menu);

			if(flag_1 == 1)
			{
				cvNamedWindow("Веб-камера", CV_WINDOW_AUTOSIZE);
				cvShowImage("Веб-камера", image);
			}
			else cvDestroyWindow("Веб-камера");
			if(flag_2 == 1)
			{
				cvNamedWindow("Zoom", CV_WINDOW_AUTOSIZE);
				cvShowImage("Zoom", tmp);
			}
			else cvDestroyWindow("Zoom");
			if(flag_3 == 1)
			{
				cvNamedWindow("Обнаруженные цвета");
				cvShowImage("Обнаруженные цвета", dst2);
			}
			else cvDestroyWindow("Обнаруженные цвета");
			if(flag_4 == 1)
			{
				cvNamedWindow("Изображение в обнаруженных цветах");
				cvShowImage("Изображение в обнаруженных цветах", dst3);
			}
			else cvDestroyWindow("Изображение в обнаруженных цветах");
			if(flag_5 == 1)
			{
				cvNamedWindow("Из HSV в RGB");
				cvShowImage("Из HSV в RGB", dst);
			}
			else cvDestroyWindow("Из HSV в RGB");
	
			// освобождаем ресурсы
			cvReleaseImage(&hsv);
			cvReleaseImage(&dst);
			cvReleaseImage(&dst2);
			cvReleaseImage(&color_indexes);
			cvReleaseImage(&dst3);
			cvReleaseImage(&image);
			cvReleaseImage(&image2);
			cvReleaseImage(&tmp);

			if(flag_exit == 1)
			{
				cvReleaseCapture(&capture);
				cvReleaseVideoWriter(&writer); // закрываем видео-файл
				return 0;
			}

			// если нажали ESC - выходим из цикла
			key = cvWaitKey(1);
		}

		// освобождаем инициализированные ранее переменные
		cvReleaseCapture(&capture);
		cvReleaseVideoWriter(&writer);

	}
    return 0;
}
コード例 #28
0
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;

    CvMemStorage *mallet_storage;
	CvSeq *mallet_circles = 0;
	float *mallet_p;
	int mi;

    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;

	int npts[2] = { 4, 12 };
	CvPoint **pts;

	pts = (CvPoint **) cvAlloc (sizeof (CvPoint *) * 2);
	pts[0] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 4);
	pts[1] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 12);
	pts[0][0] = cvPoint(0,0);
	pts[0][1] = cvPoint(160,0);
	pts[0][2] = cvPoint(320,240);
	pts[0][3] = cvPoint(0,240);
	pts[1][0] = cvPoint(39,17);
	pts[1][1] = cvPoint(126,15);
	pts[1][2] = cvPoint(147,26);
	pts[1][3] = cvPoint(160,77);
	pts[1][4] = cvPoint(160,164);
	pts[1][5] = cvPoint(145,224);
	pts[1][6] = cvPoint(125,233);
	pts[1][7] = cvPoint(39,233);
	pts[1][8] = cvPoint(15,217);
	pts[1][9] = cvPoint(0,133);
	pts[1][10] = cvPoint(0,115);
	pts[1][11] = cvPoint(17,28);

	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;

    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);

	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 107;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 115;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 255;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);

	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);

	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 11, 180, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする

	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();

			cvFillPoly(img_all_round, pts, npts, 2, CV_RGB(0, 0, 0));

			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			double y_line = (upper_left_f.y + lower_right_f.y)/3;
			double waiting_position = (robot_goal_left.x + lower_left_g.x) / 2;

			if(gY_after - gY_before < -1){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 600);
				target_coordinateX = waiting_position;
				if(waiting_position + 5 < gX_now_mallet){
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < waiting_position - 5){
					target_direction = 1;//時計回り
				}
			}
			/*else if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 1;//時計回り
			}*/
			else if(y_line < gY_after && y_line > gY_before){
				clock_t start = clock();
				clock_t end;
				end = start + 0.5 * (target_coordinateX - robot_goal_left.x) / 10;
				target_direction = 1;
				gpioPWM(25, 128);
				gpioWrite(18, target_direction);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				while(end - start < 0);//時間がくるまでループ
			}
			else{
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
			}



			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);

			/*

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
			cvShowImage("Now Image", show_img);
			cvShowImage("pack", dst_img_pack);
			cvShowImage("mallet", dst_img_mallet);
			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }

    gpioTerminate();

    cvDestroyAllWindows();

	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);
    cvFree(&pts[0]);
	cvFree(&pts[1]);
	cvFree(pts);

    return 0;
}
コード例 #29
0
ファイル: CCVEffect.cpp プロジェクト: zphilip/VirtualTS
int CHandDrawEffect::DrawHatching(IplImage* frame, IplImage* base)
{
//	IplImage* hsv = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);  
	cvCvtColor(frame, hsv, CV_BGR2HSV);

	int hwidth = hatching[0]->width;
	int hheight = hatching[0]->height;

	int rx = rand() % hwidth;// texture offset
	int ry = rand() % hheight;

	float d = (float)(rand() % 1000) / 1000.0f - 1.5f;//傾き
	//d = 0;

	int height    = frame->height;
	int width     = frame->width;


	for(int y = 0; y < height; y++) { 
		for(int x = 0; x < width; x++) {
			int index = x * frame->nChannels + y * frame->widthStep;
			unsigned char v = hsv->imageData[index + 2];

			if(VFlag) {
				float V = v / 256.0f;
				V = sqrt(V) * 256.0f;
				v = (int)V;
			}

			int X = x + rx;
			int Y = y + ry + (int) (x * d);
			while(Y<0) Y += hheight * 100;
			X %= hwidth;
			Y %= hheight;
			float typeV;

			int maxV = PATT_NUM - 1 + patternOffset;
			
			typeV = (float) (255 - v) / (float) (256 / maxV) - patternOffset;
			typeV = maxV - typeV ;

			if(typeV < 0) typeV = 0;
			if(typeV > PATT_NUM-1) typeV = PATT_NUM - 1;

			float vf = 0.0;
			int a = (int) typeV;
			int b = a + 1;
			float w = typeV - a; // weight

			if(hatchingType >= 1) { // Inverse
				a = PATT_NUM-1 - a;
				b = PATT_NUM-1 - b;
			}
			vf += ((float) (unsigned char) hatching[a]->imageData[X*hatching[a]->nChannels + Y * hatching[a]->widthStep]) * (1.0f - w);
			if(b >= 0 && b <= 6){
				vf += ((float) (unsigned char) hatching[b]->imageData[X*hatching[b]->nChannels + Y * hatching[b]->widthStep]) * w;

			}

			if(hatchingType < 2)
				vf = 255 - vf;
			unsigned char vv = hsv->imageData[index + 2];
			if(hatchingType == 2){
				//int V = vv;V*=2;if (V>255)V=255;vv = V;// ベースを明るくしてみたり
				vv = (unsigned char) Clip(vv * 2.0f, 0.0f, 255.0f);
				vf = vf * 0.7f + 255.0f * 0.3f; // 線も黒すぎるのでちょっと明るく
			}
			float sf = 1.0f;
			if(hatchingType == 3) { // 白黒
				vv = 255;
				sf = 0.0;
				//vf=vf*0.7+255.0*0.3; // 線も黒すぎるのでちょっと明るく
			}
			

			base->imageData[index + 0] = hsv->imageData[index + 0];
			base->imageData[index + 1] = (char) (hsv->imageData[index + 1] * sf);
			base->imageData[index + 2] = (unsigned char) (vv * (float) vf / 255);

		}
	}
	cvCvtColor(base, base, CV_HSV2BGR);

	return(1);
}
コード例 #30
0
ファイル: litearry.c プロジェクト: muzilike/roubosys
//视频设备和显示设备初始化和预览函数(加设备状态检测)--------------------------------
int video_fb_init_preview()
{
	//串口相关变量-------------------------------
	char buff[512];
	int nread=0;
	int FrameDone=0;//一帧数据结束标志
	int FrameCount=0;//记录帧长度
	int j=0;
	int key=0;//开关标志
	int stat=0;//视频设备状态标志
	//-------------------------------------------
	
	int numBufs;

	//--------------------------------------------
	//SDL yuv
	SDL_Surface      *pscreen;
	SDL_Overlay      *overlay;
	SDL_Rect         drect;
	SDL_Event        sdlevent;
	SDL_mutex        *affmutex;
	unsigned char    *p = NULL;
	unsigned char    frmrate;
	unsigned int     currtime;
	unsigned int     lasttime;
	char* status = NULL;

	//SDL RGB
	unsigned int     rmask;
	unsigned int     gmask;
	unsigned int     bmask;
	unsigned int     amask;	
	int              bpp;
	int 		 pitch;
	int 		 pixels_num;
	unsigned char    *pixels;
	unsigned char    *p_RGB = NULL;	
	SDL_Surface      *pscreen_RGB;
	SDL_Surface      *display_RGB;
	printf("USB Camera Test\n");

	video_fd = open("/dev/video0", O_RDWR, 0);//打开摄像头设备,使用阻塞方式打开
	if (video_fd<0)
	{
		printf("open error\n");
		return  1;
	}

	/*************先向驱动尝试获取设备视频格式start*************/
	struct v4l2_fmtdesc fmt0;
	int ret0;
	memset(&fmt0,0,sizeof(fmt0));
	fmt0.index = 0;
	fmt0.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
	while((ret0 = ioctl(video_fd,VIDIOC_ENUM_FMT,&fmt0) == 0))
	{
		fmt0.index++;
		printf("%d> pixelformat =%c%c%c%c,description =%s\n",fmt0.index,fmt0.pixelformat&0xff,(fmt0.pixelformat>>8)&0xff,(fmt0.pixelformat>>16)&0xff,(fmt0.pixelformat>>24)&0xff,fmt0.description);
	}
	/**************************END***************************/
	
	//---------------------设置获取视频的格式----------------//
	struct v4l2_format fmt;	
	memset( &fmt, 0, sizeof(fmt));
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//视频数据流类型,永远都V4L2_BUF_TYPE_VIDEO_CAPTURE
	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;//视频源的格式为JPEG或YUN4:2:2或RGB
	fmt.fmt.pix.width = 640;//设置视频宽度
	fmt.fmt.pix.height = 480;//设置视频高度
	//fmt.fmt.pix.field=V4L2_FIELD_INTERLACED;
	//fmt.fmt.pix.colorspace=8;
	//printf("color: %d \n",fmt.fmt.pix.colorspace);
	if (ioctl(video_fd, VIDIOC_S_FMT, &fmt) < 0)//使配置生效
	{
		printf("set format failed\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//+++++++++++++++++++++++++++++++++++++++++++++++++++++++
	//if(SDL_Init(SDL_INIT_VIDEO) < 0)
	//{
	//	printf("SDL Init failed.\n");
	//	exit(1);
	//}
	
	//SDL 设置:YUV输出
	/*
 	pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height,0,SDL_VIDEO_Flags);
	overlay = SDL_CreateYUVOverlay(fmt.fmt.pix.width, fmt.fmt.pix.height,SDL_YUY2_OVERLAY,pscreen);
	p = (unsigned char *)overlay->pixels[0];
	drect.x = 0;
	drect.y = 0;
	drect.w = pscreen->w;
	drect.h = pscreen->h;
	*/

	//SDL 设置:RGB输出
	//pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height, 24, SDL_SWSURFACE | SDL_DOUBLEBUF);
	rmask = 0x000000ff;
	gmask = 0x0000ff00;
	bmask = 0x00ff0000;
	amask = 0x00000000;
	bpp   = 24;
	pitch = fmt.fmt.pix.width*3;
	pixels_num = fmt.fmt.pix.width*fmt.fmt.pix.height*3;
	pixels = (unsigned char *)malloc(pixels_num);
	memset(pixels, 0, pixels_num);
	p_RGB = (unsigned char *)pixels;
	//pscreen_RGB = SDL_CreateRGBSurfaceFrom(pixels, fmt.fmt.pix.width, fmt.fmt.pix.height, bpp, pitch, rmask, gmask, bmask, amask);

	
	//lasttime = SDL_GetTicks();
	//affmutex = SDL_CreateMutex();
	//SDL 设置end
	
	//openCV 设置
	CvMemStorage*  storage = cvCreateMemStorage(0);
	IplImage*      img     = cvCreateImageHeader(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 3);//image头,未开辟数据空间
	IplImage*      imggray = cvCreateImage(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 1);//image,开辟数据空间
	cvNamedWindow("image", 1);

	unsigned char *pRGB = NULL;
	pRGB = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*3*sizeof(unsigned char));
	//openCV 设置 end

	//------------------------申请帧缓冲---------------------//
	struct v4l2_requestbuffers req;
	memset(&req, 0, sizeof (req));
	req.count = 3;	//缓存数量,即可保存的图片数量
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE
	req.memory = V4L2_MEMORY_MMAP;	//存储类型:V4L2_MEMORY_MMAP或V4L2_MEMORY_USERPTR
	if (ioctl(video_fd, VIDIOC_REQBUFS, &req) == -1)//使配置生效
	{
		perror("request buffer error \n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//--------将VIDIOC_REQBUFS获取内存转为物理空间-------------//
	buffers = calloc(req.count, sizeof(VideoBuffer));	
	//printf("sizeof(VideoBuffer) is %d\n", sizeof(VideoBuffer));
	struct v4l2_buffer buf;
	for (numBufs = 0; numBufs < req.count; numBufs++)
	{
		memset( &buf, 0, sizeof(buf));
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	
		//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = numBufs;
		if (ioctl(video_fd, VIDIOC_QUERYBUF, &buf) < 0)//使配置生效
		{
			printf("VIDIOC_QUERYBUF error\n");
			return 2;
		}
		//printf("buf len is %d\n", sizeof(buf));
		buffers[numBufs].length = buf.length;
		buffers[numBufs].offset = (size_t) buf.m.offset;
		//使用mmap函数将申请的缓存地址转换应用程序的绝对地址------
		buffers[numBufs].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE,
			MAP_SHARED, video_fd, buf.m.offset);	
		if (buffers[numBufs].start == MAP_FAILED)
		{
			perror("buffers error\n");
			return 2;
		}
		if (ioctl(video_fd, VIDIOC_QBUF, &buf) < 0)//放入缓存队列
		{
			printf("VIDIOC_QBUF error\n");
			return 2;
		}

	}
	//-------------------------------------------------------//
	
	//----------------------开始视频显示----------------------//
	enum v4l2_buf_type type;
	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (ioctl(video_fd, VIDIOC_STREAMON, &type) < 0)
	{
		printf("VIDIOC_STREAMON error\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//---------------------读取视频源格式---------------------//	
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;				
	if (ioctl(video_fd, VIDIOC_G_FMT, &fmt) < 0)	
	{
		printf("get format failed\n");
		return 2 ;
	}
	else
	{
		printf("Picture:Width = %d   Height = %d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);
		
	}
	//-------------------------------------------------------//
	int i=0;	
	//一些关于fb设备或者没有用到的变量---------------------------
	/*FILE * fd_y_file = 0;
	int a=0;
	int k = 0;
	int i=0;
	//设置显卡设备framebuffer------------------------------------
	struct jpeg_decompress_struct cinfo;
	struct jpeg_error_mgr jerr;
	FILE *infile;//Jpeg文件的句柄
	unsigned char *buffer;
	char *fb_device;
	unsigned int x;
	unsigned int y;
	//打开显卡设备------------------------------------------------
	if ((fb = open("/dev/fb0", O_RDWR)) < 0)
	{
		perror(__func__);
		return 2;
	}

	//获取framebuffer的状态-----------------------------------------
	fb_set(fb);//设置显存参数	
	fb_stat(fb);//获取显卡驱动中的长、宽和显示位宽
	
	printf("frame buffer: %dx%d,  %dbpp, 0x%xbyte= %d,graylevels= %d \n", 
		fbdev.fb_width, fbdev.fb_height, fbdev.fb_bpp, fbdev.fb_size, fbdev.fb_size,fbdev.fb_gray);

	//映射framebuffer的地址到用户空间----------------------------------
	fbdev.fb_mem = mmap (NULL, fbdev.fb_size, PROT_READ|PROT_WRITE,MAP_SHARED,fb,0);
	fbdev.fb = fb;
	*/
		
	//预览采集到的图像(如果有需要可以添加capture功能)-------------------
	while (sdl_quit)
	{
		
		fd_set fds;//文件描述符集,准备使用Select机制
		struct timeval tv;
		int ret1;
		
		FD_ZERO(&fds);//清空文件描述符集
		FD_SET(video_fd,&fds);//将视频设备文件的描述符放入集合中
		
		//消息等待超时,可以完全阻塞-------------------------------
		tv.tv_sec =2;
		tv.tv_usec=0;
		//等待视频设备准备好--------------------------------------
		ret1=select(video_fd+1,&fds,NULL,NULL,&tv);
		if(-1==ret1)
		{
			if(EINTR==errno)
				continue;
			printf("select error. \n");
			exit(EXIT_FAILURE);
		}
		if(0==ret1)
		{
			printf("select timeout. \n");
			continue;
		}		
		while(sdl_quit)		
		{
					 
			//检测退出消息
			while(SDL_PollEvent(&sdlevent))
			{
				if(sdlevent.type == SDL_QUIT)
				{
					sdl_quit = 0;
					break;
				}
			}
			currtime = SDL_GetTicks();
			if(currtime - lasttime >0)
				frmrate = 1000/(currtime-lasttime);
			lasttime = currtime;

			//开始获取FIFO中已经准备好的一帧数据-----------------------		
			memset(&buf ,0,sizeof(buf));
			buf.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory=V4L2_MEMORY_MMAP;
			//准备好的出列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_DQBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}	
	
			//获取当前帧的用户空间首地址,用于格式转换------------------
			unsigned char *ptcur=buffers[buf.index].start;
			//++++++++++++++++++++++++++++++++++++++++
			//算法区
			//+++++++++++++++++++++++++++++++++++++++++
			//灰度变换
			/*
			unsigned char *pgray = NULL;
			pgray = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*2*sizeof(unsigned char));//避免被识别为段错误
			yuv2gray(ptcur,pgray,fmt.fmt.pix.width, fmt.fmt.pix.height);
			*/

			//YUV向RGB(24bit)转换
			YUYVToRGB888(ptcur, pRGB, fmt.fmt.pix.width, fmt.fmt.pix.height);
			
			//opencv 检测人脸
			cvSetData(img, pRGB, fmt.fmt.pix.width*3);//将pRGB数据装入img中
			cvCvtColor(img, imggray, CV_RGB2GRAY);//将img灰度转换到imggray,供opencv检测使用
			CvHaarClassifierCascade *cascade=(CvHaarClassifierCascade*)cvLoad("/usr/share/opencv-2.4.6.1/data/haarcascades/haarcascade_frontalface_alt2.xml", storage,0,0);
			cvClearMemStorage(storage);
			cvEqualizeHist(imggray, imggray);
			CvSeq* objects = cvHaarDetectObjects(imggray, cascade, storage, 1.1, 2, 0, cvSize(30,30),cvSize(30,30));
			
			//opencv 标记人脸
			CvScalar colors[] = {{{255,0,0}},{{0,0,0}}};
			int faces=0;
			for(faces=0; faces < (objects ? objects->total:0); faces++)
			{
				CvRect* r = (CvRect *)cvGetSeqElem(objects,faces);
				cvRectangle(img, cvPoint(r->x, r->y), cvPoint(r->x+r->width, r->y+r->height),colors[0],2,8,0 );//原始图像上加框
			}
			

			//调整opencv img图像数据
			/*CvScalar s;
			int imgi=0,imgj=0,sdlcount=0;
			for(imgi=0;imgi<img->height;imgi++)
			{
				for(imgj=0; imgj<img->width; imgj++)
				{
					s=cvGet2D(img,imgi,imgj);
					pRGB[sdlcount++]=0xff;//s.val[0];//B
					pRGB[sdlcount++]=0xff;//s.val[1];//G
					pRGB[sdlcount++]=0xff;//s.val[2];//R
					//cvSet2D(img,imgi,imgj,s);
				}
			}
			*/
			//opencv 显示图像	
			cvShowImage("image", img);
			char c = cvWaitKey(1);
			printf("%d\n",c);
			if(c==27)
				sdl_quit=0;
			
			
			//yuv载入到SDL
			/*
			SDL_LockYUVOverlay(overlay);
			memcpy(p, pgray,pscreen->w*(pscreen->h)*2);
			SDL_UnlockYUVOverlay(overlay);
			SDL_DisplayYUVOverlay(overlay, &drect);
			*/

			//RGB载入到SDL
			//memcpy(pixels, pRGB, pscreen_RGB->w*(pscreen_RGB->h)*3);
			//SDL_BlitSurface(pscreen_RGB, NULL, display_RGB, NULL);
			//SDL_Flip(display_RGB);

			//统计帧率
			//status = (char *)calloc(1,20*sizeof(char));
			//sprintf(status, "Fps:%d",frmrate);
			//SDL_WM_SetCaption(status, NULL);
			//SDL_Delay(10);
			//用完了的入列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_QBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}
			
		}	
	}	

	//fb_munmap(fbdev.fb_mem, fbdev.fb_size);	//释放framebuffer映射
	//close(fb);//关闭Framebuffer设备
	for(i=0;i<req.count;i++)
	{
		if(-1==munmap(buffers[i].start,buffers[i].length))
			printf("munmap error:%d \n",i);
	}

	cvDestroyWindow("image");
	close(video_fd);					
	SDL_DestroyMutex(affmutex);
	//SDL_FreeYUVOverlay(overlay);
	cvReleaseImage(&img);
	cvReleaseImage(&imggray);
	free(status);
	free(buffers);
	//free(pRGB);
	SDL_Quit();
	return 0;

}