Пример #1
0
void Hand_recognition::Detect_Skin(IplImage *src, IplImage *dst){

	cvCvtColor(src, img_YCrCb, CV_BGR2YCrCb);
	cvCvtColor(src, img_HSV, CV_BGR2HSV);

	cvZero(dst);

	for(int i = 0; i < dst->height; i++){
		for(int j = 0; j < dst->width; j++){

			B = (unsigned char)src->imageData[(j * 3) + i * src->widthStep];
			G = (unsigned char)src->imageData[(j * 3) + i * src->widthStep + 1];
			R = (unsigned char)src->imageData[(j * 3) + i * src->widthStep + 2];

			bool a = R1(R,G,B);

			if(a){
				H = (unsigned char)img_HSV->imageData[(j * 3) + i * img_HSV->widthStep];
				S = (unsigned char)img_HSV->imageData[(j * 3) + i * img_HSV->widthStep + 1];
				V = (unsigned char)img_HSV->imageData[(j * 3) + i * img_HSV->widthStep + 2];

				bool c = R3(H,S,V);;

				if(c){
					Y = (unsigned char)img_YCrCb->imageData[(j * 3) + i * img_YCrCb->widthStep];
					Cr = (unsigned char)img_YCrCb->imageData[(j * 3) + i * img_YCrCb->widthStep + 1];
					Cb = (unsigned char)img_YCrCb->imageData[(j * 3) + i * img_YCrCb->widthStep + 2];

					bool b = R2(Y,Cr,Cb);

					if(b)
						dst->imageData[j + i * dst->widthStep] = (unsigned char) 255;
				}

			}
		}
	}

	cvErode(dst, dst, 0, MOP_NUM);
	cvDilate(dst, dst, 0, MOP_NUM);
}
Пример #2
0
void GripPipeline::Process(cv::Mat & source0)
{
	cv::Mat cvResizeSrc = source0;
	cv::Size cvResizeDsize(0, 0);
	double cvResizeFx = 0.75;  // default Double
	double cvResizeFy = 0.75;  // default Double
	int cvResizeInterpolation = cv::INTER_LINEAR;
	cvResize(cvResizeSrc, cvResizeDsize, cvResizeFx, cvResizeFy, cvResizeInterpolation, this->cvResizeOutput);

	cv::Mat hsvThresholdInput = cvResizeOutput;

	double hsvThresholdHue[] = {69,180};
	double hsvThresholdSaturation[] = {172,255};
	double hsvThresholdValue[] = {112,255};
	hsvThreshold(hsvThresholdInput, hsvThresholdHue, hsvThresholdSaturation, hsvThresholdValue, this->hsvThresholdOutput);

	cv::Mat findContoursInput = hsvThresholdOutput;
	source0= hsvThresholdOutput;

	std::vector<std::vector<cv::Point> > filterContoursContours = findContoursOutput;

	cv::Mat cvErodeSrc = hsvThresholdOutput;
	cv::Mat cvErodeKernel;
	cv::Point cvErodeAnchor(-1, -1);
	double cvErodeIterations = 1;
	int cvErodeBordertype = cv::BORDER_CONSTANT;
	cv::Scalar cvErodeBordervalue(-1);
	cvErode(cvErodeSrc, cvErodeKernel, cvErodeAnchor, cvErodeIterations, cvErodeBordertype, cvErodeBordervalue, this->cvErodeOutput);

	cv::Mat findLinesInput = cvErodeOutput;
	findLines(findLinesInput, this->findLinesOutput);

//	print the lines
//	GripPipeline::printLines(source0, findLinesOutput);

	// find center
	GripPipeline::findCenter(source0, findLinesOutput);

	// find distance
	GripPipeline::findDistance(source0, findLinesOutput, difference);
}
Пример #3
0
void givedepth(IplImage *localimagergb)
{	IplImage*localimage=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3);
	cvCvtColor(localimagergb,localimage,CV_BGR2HSV);
	IplImage *blobbedscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3);
	uchar *itemp=(uchar *)(localimage->imageData);
    IplImage *binaryscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,1);
	uchar *itemp1=(uchar *)(binaryscaling ->imageData);
	for(int i=0;i<hi2->height;i++){
			for(int j=0;j<hi2->width;j++){	

					if((itemp[i*localimage->widthStep+j*localimage->nChannels] <hh)	       
					&&
					(itemp[i*localimage->widthStep+j*localimage->nChannels]>hl)
				    &&
					(itemp[i*localimage->widthStep+j*localimage->nChannels+1]<sh)
					&&
					(itemp[i*localimage->widthStep+j*localimage->nChannels+1]>sl)
					&& 
					( itemp[i*localimage->widthStep+j*localimage->nChannels+2]<vh)			
					 &&
					( itemp[i*localimage->widthStep+j*localimage->nChannels+2]>vl)		//previous 124
					)	{
						itemp1[i*binaryscaling->widthStep+j]=0;					//dark regions black rest white
						}
					else
						itemp1[i*binaryscaling->widthStep+j]=255;
			}}
    cvErode( binaryscaling, binaryscaling, NULL, 4);
	cvDilate(binaryscaling, binaryscaling, NULL, 4);
	CBlobResult  blob;				
	CBlob *currentBlob=NULL;
	blob=CBlobResult(binaryscaling,NULL,255);
	blob.Filter(blob,B_EXCLUDE,CBlobGetArea(),B_LESS,500);
	cvMerge(binaryscaling,binaryscaling,binaryscaling,NULL,blobbedscaling);
	CBlob hand1,hand2;																//two blobs,one for each hand
	blob.GetNthBlob( CBlobGetArea(), 0, (hand2));	
	blob.GetNthBlob( CBlobGetArea(), 1, (hand1 ));
	hand1.FillBlob(blobbedscaling,CV_RGB(0,0,255));											//fill the color of blob of hand one with blue
	hand2.FillBlob(blobbedscaling,CV_RGB(0,255,0));											//fill the color of blob of hand two with green 
	coordinates (blobbedscaling,0);
}
Пример #4
0
void clsTracking2D::calcBGsubtraction(IplImage *image, IplImage *foreground, IplImage *debugImage)
{
        if(!BGcolorstarted)
        {
                bgcolorImage = cvCreateImage( cvSize( image->width, image->height ),image->depth, image->nChannels );
                bgsGray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
                cvCvtColor(image, bgsGray, CV_BGR2GRAY);
                readyForParticles = true;
                BGcolorstarted = true;
        }
        
        cvCopy(image,bgcolorImage);
        
        if(paramsInt["blur"] > 0)
                cvSmooth(image, bgcolorImage, CV_BLUR, paramsInt["blur"], paramsInt["blur"], 0, 0);
        
        //converting to HSV
        // cvCvtColor(bgcolorImage, bgcolorImage, CV_BGR2HSV);
        
        //filtrating by color... good on this video!
        cvInRangeS(bgcolorImage, cvScalar(paramsInt["minH"], paramsInt["minS"], paramsInt["minL"]), cvScalar(paramsInt["maxH"], paramsInt["maxS"], paramsInt["maxL"]), foreground);
        // cv::invert(bgcolorImage,bgcolorImage);
        // cvCvtColor(bgcolorImage, bgsGray, CV_BGR2GRAY);
        // cvThreshold(bgsGray, foreground, paramsInt["minThreshold"], paramsInt["maxThreshold"], CV_THRESH_BINARY );
        
        // cvThreshold(bgsGray, foreground, paramsInt["minThreshold"], paramsInt["maxThreshold"], CV_THRESH_BINARY);
        // cvThreshold(bgsGray, foreground, paramsInt["minThreshold"], paramsInt["maxThreshold"], CV_THRESH_BINARY_INV);
        
        // cvThreshold(foreground, foreground, 5,255, 1 );
        //removing loose points
        cvErode(foreground, foreground, NULL,1);
        
        //augmenting neighbour points
        cvDilate(foreground, foreground, NULL,1);
        
        //calculating edges
        // cvCanny(foreground,foreground,10,100,3);
        
        
}
Пример #5
0
/**
* Runs an iteration of the Pipeline and updates outputs.
*
* Sources need to be set before calling this method. 
*
*/
void GripPipeline::process(cv::Mat source0){
	//Step CV_resize0:
	//input
	cv::Mat cvResizeSrc = source0;
	cv::Size cvResizeDsize(0, 0);
	double cvResizeFx = 0.25;  // default Double
	double cvResizeFy = 0.25;  // default Double
    int cvResizeInterpolation = cv::INTER_LINEAR;
	cvResize(cvResizeSrc, cvResizeDsize, cvResizeFx, cvResizeFy, cvResizeInterpolation, this->cvResizeOutput);
	//Step HSV_Threshold0:
	//input
	cv::Mat hsvThresholdInput = cvResizeOutput;
	double hsvThresholdHue[] = {42.086330935251794, 86.7911714770798};
	double hsvThresholdSaturation[] = {32.10431654676259, 207.37691001697794};
	double hsvThresholdValue[] = {91.72661870503596, 255.0};
	hsvThreshold(hsvThresholdInput, hsvThresholdHue, hsvThresholdSaturation, hsvThresholdValue, this->hsvThresholdOutput);
	//Step CV_erode0:
	//input
	cv::Mat cvErodeSrc = hsvThresholdOutput;
	cv::Mat cvErodeKernel;
	cv::Point cvErodeAnchor(-1, -1);
	double cvErodeIterations = 1;  // default Double
    int cvErodeBordertype = cv::BORDER_CONSTANT;
	cv::Scalar cvErodeBordervalue(-1);
	cvErode(cvErodeSrc, cvErodeKernel, cvErodeAnchor, cvErodeIterations, cvErodeBordertype, cvErodeBordervalue, this->cvErodeOutput);
	//Step Mask0:
	//input
	cv::Mat maskInput = cvResizeOutput;
	cv::Mat maskMask = cvErodeOutput;
	mask(maskInput, maskMask, this->maskOutput);
	//Step Find_Blobs0:
	//input
	cv::Mat findBlobsInput = maskOutput;
	double findBlobsMinArea = 0.0;  // default Double
	double findBlobsCircularity[] = {0.0, 1.0};
	bool findBlobsDarkBlobs = true;  // default Boolean
	findBlobs(findBlobsInput, findBlobsMinArea, findBlobsCircularity, findBlobsDarkBlobs, this->findBlobsOutput);
}
Пример #6
0
int main( int argc, char** argv )
{
	//Check if user specify image to process
	if(argc >= 2 )
	{
		char* filename= argv[1];
		//load image  in gray level
		imagen=cvLoadImage(filename,0);
	}
	else
	{
		printf("Use:\n\t%s image\n",argv[0]);
		return 0;
	}	
//------------------------------------------------------------------------------------
//NUMBER ISOLATION

	//Create needed images
	smooth= cvCreateImage(cvSize(imagen->width, imagen->height), IPL_DEPTH_8U, 1);
	threshold= cvCreateImage(cvSize(imagen->width, imagen->height), IPL_DEPTH_8U, 1);
	open_morf= cvCreateImage(cvSize(imagen->width, imagen->height), IPL_DEPTH_8U, 1);
	cvSmooth(imagen, smooth, CV_GAUSSIAN, 3, 0, 0, 0);
	
	CvScalar avg;
	CvScalar avgStd;
	cvAvgSdv(smooth, &avg, &avgStd, NULL);
	//printf("Avg: %f\nStd: %f\n", avg.val[0], avgStd.val[0]);
	//threshold image
	cvThreshold(smooth, threshold, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY);
	//Morfologic filters
	cvErode(threshold, open_morf, NULL,1); 
	cvDilate(open_morf, open_morf, NULL,1); 
	int lprs=cvSaveImage(argv[2],open_morf,&lprs);
	//Duplicate image for countour
	cvReleaseImage(&imagen);
	cvReleaseImage(&open_morf);
	return 0;
}
Пример #7
0
IplImage* cutterDetect(IplImage *img) {

    // Convert the image into an HSV image
    IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
    // Create an image for the output
    IplImage* out = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 3 );
    IplImage *temp = cvCreateImage(cvSize(img->width,img->height),8,1);
    IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);

    // Perform a Gaussian blur
    cvSmooth( img, out, CV_GAUSSIAN,15, 15);
    cvCvtColor(out, imgHSV, CV_BGR2HSV);
    
    cvInRangeS(imgHSV, BLUE_LOW, BLUE_HIGH, imgThreshed);
   
    cvErode(imgThreshed,temp,NULL,1);
    cvDilate(temp,imgThreshed,NULL,1);
    
    cvReleaseImage(&imgHSV);
    cvReleaseImage(&temp);	
    cvReleaseImage( &out );	
    return imgThreshed;
}
Пример #8
0
static void node_composit_exec_cvErode(void *data, bNode *node, bNodeStack **in, bNodeStack **out)
{
	int iterations;	
	IplImage *src, *dst;
	CompBuf *dst_buf;

	if(in[0]->hasinput==0) return;
	if(out[0]->hasoutput==0) return;

	src= BOCV_IplImage_attach(in[0]->data);
	dst_buf = dupalloc_compbuf(in[0]->data);
	iterations=(int)in[1]->vec[0];
	dst = BOCV_IplImage_attach(dst_buf);

	cvErode(src,dst,0,iterations);

	out[0]->data = dst_buf;

	BOCV_IplImage_detach(src);
	BOCV_IplImage_detach(dst);


}
Пример #9
0
  int regionTracker::calcCentroidAndArea()
  {
    // input: none
    // output: set centroid and area, CvPoint and int
    // return: 0
    //
    // Calculate controid and area of region

    int areaCount = 0;         // count total pixel of region
    int xSum = 0, ySum = 0;    // sum of x (or y) coordinate of each pixel in the region
    CvScalar current;
    int i, j;
    int iteration = 5;

    // contract 'result' for reduce influence by arm when calcurate centroid
    cvErode (result, contractedResult, element, iteration);

    for(i=0; i<result->width; i++)
      for(j=0; j<result->height; j++)
	{
	  current = cvGet2D(contractedResult, j, i);
	  if(current.val[0] != 0)
	    {
	      areaCount++;
	      xSum += i;
	      ySum += j;
	    }
	}

    // set result
    if(areaCount == 0) return -1;
    area = areaCount;
    centroid.x = xSum / areaCount;
    centroid.y = ySum / areaCount;

    return 0;
  }
Пример #10
0
int regionDetector::getRegion(IplImage *src, int x, int y, IplImage *dst)
{
  // input: image (depth, intensity and so on), IplImage, 1 channel, (recommend 8 bit depth)
  //        a coordinate which is contained in region you want to get, int
  // return: region image, IplImage, binary image
  //         or 0 if coordinate is invalid (out of iamge size or there are no region)
  //
  // Take (depth, intensity, binary and so on) image, classify the image by the region.
  // The region is pixels which has almost same value between that pixel and around pixels.
  // And return the region which contain pixel (x, y).

  int bitDepth;
  int iteration;

  // prepare images
  original = src;
  result->imageData = dst->imageData;

  // set threshold value.
  bitDepth = src->depth;
  if(bitDepth == IPL_DEPTH_8U)
    threshold = 20;
  else if(bitDepth == IPL_DEPTH_16U)
    threshold = 4000;
  else
    return 0;

  // get region
  traverse(x, y, NONE);

  // noise reduction
  iteration = 2;
  cvErode (dst, dst, element, iteration);
  cvDilate (dst, dst, element, iteration);

  return 0;
}
Пример #11
0
CV_IMPL void
cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg,
                      CvArr* orientation,
                      double delta1, double delta2,
                      int aperture_size )
{
    cv::Ptr<CvMat> dX_min, dY_max;

    CvMat  mhistub, *mhi = cvGetMat(mhiimg, &mhistub);
    CvMat  maskstub, *mask = cvGetMat(maskimg, &maskstub);
    CvMat  orientstub, *orient = cvGetMat(orientation, &orientstub);
    CvMat  dX_min_row, dY_max_row, orient_row, mask_row;
    CvSize size;
    int x, y;

    float  gradient_epsilon = 1e-4f * aperture_size * aperture_size;
    float  min_delta, max_delta;

    if( !CV_IS_MASK_ARR( mask ))
        CV_Error( CV_StsBadMask, "" );

    if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )
        CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" );

    if( delta1 <= 0 || delta2 <= 0 )
        CV_Error( CV_StsOutOfRange, "both delta's must be positive" );

    if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )
        CV_Error( CV_StsUnsupportedFormat,
        "MHI and orientation must be single-channel floating-point images" );

    if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
        CV_Error( CV_StsUnmatchedSizes, "" );

    if( orient->data.ptr == mhi->data.ptr )
        CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );

    if( delta1 > delta2 )
    {
        double t;
        CV_SWAP( delta1, delta2, t );
    }

    size = cvGetMatSize( mhi );
    min_delta = (float)delta1;
    max_delta = (float)delta2;
    dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F );
    dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F );

    // calc Dx and Dy
    cvSobel( mhi, dX_min, 1, 0, aperture_size );
    cvSobel( mhi, dY_max, 0, 1, aperture_size );
    cvGetRow( dX_min, &dX_min_row, 0 );
    cvGetRow( dY_max, &dY_max_row, 0 );
    cvGetRow( orient, &orient_row, 0 );
    cvGetRow( mask, &mask_row, 0 );

    // calc gradient
    for( y = 0; y < size.height; y++ )
    {
        dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
        dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
        orient_row.data.ptr = orient->data.ptr + y*orient->step;
        mask_row.data.ptr = mask->data.ptr + y*mask->step;
        cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 );

        // make orientation zero where the gradient is very small
        for( x = 0; x < size.width; x++ )
        {
            float dY = dY_max_row.data.fl[x];
            float dX = dX_min_row.data.fl[x];

            if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon )
            {
                mask_row.data.ptr[x] = 0;
                orient_row.data.i[x] = 0;
            }
            else
                mask_row.data.ptr[x] = 1;
        }
    }

    cvErode( mhi, dX_min, 0, (aperture_size-1)/2);
    cvDilate( mhi, dY_max, 0, (aperture_size-1)/2);

    // mask off pixels which have little motion difference in their neighborhood
    for( y = 0; y < size.height; y++ )
    {
        dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
        dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
        mask_row.data.ptr = mask->data.ptr + y*mask->step;
        orient_row.data.ptr = orient->data.ptr + y*orient->step;
        
        for( x = 0; x < size.width; x++ )
        {
            float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x];

            if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 )
            {
                mask_row.data.ptr[x] = 0;
                orient_row.data.i[x] = 0;
            }
        }
    }
}
Пример #12
0
static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
    OCVContext *ocv = ctx->priv;
    DilateContext *dilate = ocv->priv;
    cvErode(inimg, outimg, dilate->kernel, dilate->nb_iterations);
}
// returns sequence of squares detected on the image.  
// the sequence is stored in the specified memory storage  
CvSeq* findSquares4( IplImage* img, CvMemStorage* &storage,bool isForDay)  
{  

	CvSeq* contours;  

	int i, l, N = 50;
	int thresh2,thresh1=15;
	if(isForDay) {
		thresh2=240;}
	else {thresh2=180;}
	CvSize sz = cvSize( img->width & -2, img->height & -2 );  

	IplImage* timg = cvCloneImage( img ); // make a copy of input image  
	IplImage* gray = cvCreateImage( cvGetSize(timg), 8, 1 );   
	//IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );  
	IplImage* tgray;  
	CvSeq* result;  
	double s, t;  
	//	storage=cvCreateMemStorage(0);  
	// create empty sequence that will contain points -  
	// 4 points per square (the square's vertices)  
	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );  
	// select the maximum ROI in the image  
	// with the width and height divisible by 2  
	cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));  

	// down-scale and upscale the image to filter out the noise  
	//	cvPyrDown( timg, pyr, 7 );  
	//	cvPyrUp( pyr, timg, 7 );  
	tgray = cvCreateImage( sz, 8, 1 );  
	//	cvCvtColor(timg,timg,CV_BGR2HSV);
	//	cvCvtColor(timg,tgray,CV_HSV2GRAY);
	//	cvShowImage("tg",tgray);
	// find squares in every color plane of the image  

	for(int c = 0; c < 3; c++ )  
	{  
	//	cout<<c<<endl;
		// extract the c-th color plane  
		cvSetImageCOI( timg, c+1 );  
		cvCopy( timg, tgray, 0 );  
		//	cvEqualizeHist(tgray,tgray);
		// try several threshold levels  
		for( l = 0; l < N; l++ )  
		{  
			// hack: use Canny instead of zero thres50hold level.  
			// Canny helps to catch squares with gradient shading     
			if( l == 0 )  
			{  
				// apply Canny. Take the upper threshold from slider  
				// and set the lower to 0 (which forces edges merging)   
				cvCanny( tgray, gray,thresh1, thresh2, 3 );     // 白天
				//		cvCanny( tgray, gray,15, 240, 3 );  
				//		ImagePreprocess::colorEdgeDetect1(img,gray,15,240);
				// dilate canny output to remove potential  
				// holes between edge segments   
				cvDilate( gray, gray, 0, 1 );  
				cvErode( gray, gray, 0, 1 );  

			}  
			else 
			{  
				//apply threshold if l!=0:  
				cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );  
				//		cvShowImage("gray",gray);
			}

			// find contours and store them all as a list  
			cvFindContours( gray, storage, &contours, sizeof(CvContour),  
				CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );  


			// test each contour  
			while( contours )  
			{  
				// approximate contour with accuracy proportional  
				// to the contour perimeter  
				result = cvApproxPoly( contours, sizeof(CvContour), storage,  
					CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );  
				// square contours should have 4 vertices after approximation  
				// relatively large area (to filter out noisy contours)  
				// and be convex.  
				// Note: absolute value of an area is used because  
				// area may be positive or negative - in accordance with the  
				// contour orientation  
				if( result->total == 4 &&  
					fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 4000 &&   
					fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 10000 &&
					cvCheckContourConvexity(result) )  
				{  
					s = 0;  
					for( i = 0; i < 5; i++ )  
					{  
						// find minimum angle between joint  
						// edges (maximum of cosine)  
						if( i >= 2 )
						{
							t = fabs(angle(
								(CvPoint*)cvGetSeqElem( result, i ),
								(CvPoint*)cvGetSeqElem( result, i-2 ),
								(CvPoint*)cvGetSeqElem( result, i-1 )));
							s = s > t ? s : t;
						}
					}  


					// if cosines of all angles are small  
					// (all angles are ~90 degree) then write quandrange  
					// vertices to resultant sequence   
					if( s < 0.3 )  
						for( i = 0; i < 4; i++ )  
							cvSeqPush( squares,  
							(CvPoint*)cvGetSeqElem( result, i ));  
				}  



				// take the next contour  
				contours = contours->h_next;  

			}  
		}

	}  


	// release all the temporary images  
	cvReleaseImage( &gray );  
	//	cvReleaseImage( &pyr );  
	cvReleaseImage( &tgray );  
	cvReleaseImage( &timg );  

	return squares;  
}  
Пример #14
0
void get_fish_pos(){
	mod++;
	if(g_msPrm.isDrawing){
		cvCopy(frame,cl_frame,NULL);
		switch(g_mouse){
		case AVG_POS:
			set_avg_pos(cl_frame);
		break;
		case RANGE_POS:
			set_range_pos(cl_frame);
		break;

		default:
		break;
		}
	}
	else{

		frame = cvQueryFrame( g_capture );
//			if(!(++mod&0x3))
//			cvShowImage("Camera",frame);
#if 1
//		if(g_msPrm.box.width != 0){
//			draw_box(frame,g_msPrm.box);
//		}
//		else{
//		}
		cvCircle(frame,
			 fishPos,
			 sqrt(mom.m00)/1,
			 cvScalar(0x00,0x00,0x00,0),1,8,0
			 );
		//if(!(++mod&0x3))
		cvShowImage("Camera",frame);
		if(g_is_range){
			cvSetImageROI(frame,g_range);
			cvSetZero(gr_frame);
			cvSetImageROI(gr_frame,g_range);
		}

		cvSmooth( frame, frame, CV_GAUSSIAN, 3, 3 ,0,0);
		//cvCvtColor(frame,frame,CV_RGB2HSV);
		cvInRangeS(frame,g_hsv_min,g_hsv_max,gr_frame);

		if(g_is_range){
			cvSetImageROI(frame,g_range);
		}
		cvErode(gr_frame,gr_frame,NULL,2);
		cvDilate(gr_frame,gr_frame,NULL,2);
		if(g_is_range){
			cvResetImageROI(frame);
			cvResetImageROI(gr_frame);
		}
		cvMoments(gr_frame,&mom,1);
		fishPos.x = (mom.m10/mom.m00);
		fishPos.y = (mom.m01/mom.m00);

		cvShowImage( "set_HSV", gr_frame );
//			cvErode(gr_frame,gr_frame,NULL,10);
#endif
	}

}
Пример #15
0
static GstFlowReturn gst_gcs_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) 
{
  GstGcs *gcs = GST_GCS (btrans);

  GST_GCS_LOCK (gcs);

  //////////////////////////////////////////////////////////////////////////////
  // get image data from the input, which is RGBA or BGRA
  gcs->pImageRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf);
  cvSplit(gcs->pImageRGBA,   gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChX );
  cvCvtColor(gcs->pImageRGBA,  gcs->pImgRGB, CV_BGRA2BGR);


  //////////////////////////////////////////////////////////////////////////////
  ////////////////////////////////////////////////////////MOTION CUES INTEGR////
  //////////////////////////////////////////////////////////////////////////////

  //////////////////////////////////////////////////////////////////////////////
  // apply step 1. filtering using bilateral filter. Cannot happen in-place => scratch
  cvSmooth(gcs->pImgRGB, gcs->pImgScratch, CV_BILATERAL, 3, 50, 3, 0);
  // create GRAY image
  cvCvtColor(gcs->pImgScratch, gcs->pImgGRAY, CV_BGR2GRAY);

  // Frame difference the GRAY and the previous one
  // not intuitive: first smooth frames, then 
  cvCopy( gcs->pImgGRAY,   gcs->pImgGRAY_copy,  NULL);
  cvCopy( gcs->pImgGRAY_1, gcs->pImgGRAY_1copy, NULL);
  get_frame_difference( gcs->pImgGRAY_copy, gcs->pImgGRAY_1copy, gcs->pImgGRAY_diff);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);
  cvDilate( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);


  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // ghost mapping
  gcs->dstTri[0].x = gcs->facepos.x - gcs->facepos.width/2 ;
  gcs->dstTri[0].y = gcs->facepos.y - gcs->facepos.height/2;
  gcs->dstTri[1].x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->dstTri[1].y = gcs->facepos.y + gcs->facepos.height/2;
  gcs->dstTri[2].x = gcs->facepos.x + gcs->facepos.width/2;
  gcs->dstTri[2].y = gcs->facepos.y + gcs->facepos.height/2;

  if( gcs->ghostfilename){
    cvGetAffineTransform( gcs->srcTri, gcs->dstTri, gcs->warp_mat );
    cvWarpAffine( gcs->cvGhostBwResized, gcs->cvGhostBwAffined, gcs->warp_mat );
  }




  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // GrabCut algorithm preparation and running

  gcs->facepos.x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->facepos.y = gcs->facepos.y - gcs->facepos.height/2;

  // create an IplImage  with the skin colour pixels as 255
  compose_skin_matrix(gcs->pImgRGB, gcs->pImg_skin);
  // And the skin pixels with the movement mask
  cvAnd( gcs->pImg_skin,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);
  //cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5, 5, 3, 3, CV_SHAPE_RECT,NULL), 1);
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 2);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 2);

  // if there is alpha==all 1's coming in, then we ignore it: prevents from no vibe before us
  if((0.75*(gcs->width * gcs->height) <= cvCountNonZero(gcs->pImgChX)))
    cvZero(gcs->pImgChX);
  // OR the input Alpha
  cvOr( gcs->pImgChX,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);


  //////////////////////////////////////////////////////////////////////////////
  // try to consolidate a single mask from all the sub-patches
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 3);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 4);

  //////////////////////////////////////////////////////////////////////////////
  // use either Ghost or boxes-model to create a PR foreground starting point in gcs->grabcut_mask
  if( gcs->ghostfilename)
    compose_grabcut_seedmatrix3(gcs->grabcut_mask, gcs->cvGhostBwAffined, gcs->pImgGRAY_diff  );
  else{
    // toss it all to the bbox creation function, together with the face position and size
    compose_grabcut_seedmatrix2(gcs->grabcut_mask, gcs->facepos, gcs->pImgGRAY_diff, gcs->facefound );
  }


  //////////////////////////////////////////////////////////////////////////////
#ifdef KMEANS
  gcs->num_clusters = 18; // keep it even to simplify integer arithmetics
  cvCopy(gcs->pImgRGB, gcs->pImgRGB_kmeans, NULL);
  posterize_image(gcs->pImgRGB_kmeans);
  create_kmeans_clusters(gcs->pImgRGB_kmeans, gcs->kmeans_points, gcs->kmeans_clusters, 
                         gcs->num_clusters, gcs->num_samples);
  adjust_bodybbox_w_clusters(gcs->grabcut_mask, gcs->pImgRGB_kmeans, gcs->num_clusters, gcs->facepos);
#endif //KMEANS


  //////////////////////////////////////////////////////////////////////////////
  if( gcs->debug < 70)
    run_graphcut_iteration( &(gcs->GC), gcs->pImgRGB, gcs->grabcut_mask, &gcs->bbox_prev);



  // get a copy of GRAY for the next iteration
  cvCopy(gcs->pImgGRAY, gcs->pImgGRAY_1, NULL);

  //////////////////////////////////////////////////////////////////////////////
  // if we want to display, just overwrite the output
  if( gcs->display ){
    int outputimage = gcs->debug;
    switch( outputimage ){
    case 1: // output the GRAY difference
      cvCvtColor( gcs->pImgGRAY_diff, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 50:// Ghost remapped
      cvCvtColor( gcs->cvGhostBwAffined, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 51:// Ghost applied
      cvAnd( gcs->cvGhostBwAffined, gcs->pImgGRAY, gcs->pImgGRAY, NULL );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 60:// Graphcut
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 127.0);
      cvCvtColor( gcs->grabcut_mask, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 61:// Graphcut applied on input/output image
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG, PR_FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);
      cvAnd( gcs->grabcut_mask,  gcs->pImgGRAY,  gcs->pImgGRAY, NULL);
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );

      cvRectangle(gcs->pImgRGB, cvPoint(gcs->bbox_now.x, gcs->bbox_now.y), 
                  cvPoint(gcs->bbox_now.x + gcs->bbox_now.width, gcs->bbox_now.y+gcs->bbox_now.height),
                  cvScalar(127,0.0), 1, 8, 0 );
     break;
    case 70:// bboxes
      cvZero( gcs->pImgGRAY );
      cvMul( gcs->grabcut_mask,  gcs->grabcut_mask,  gcs->pImgGRAY, 40.0 );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 71:// bboxes applied on the original image
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG, PR_FG
      cvMul( gcs->grabcut_mask,  gcs->pImgGRAY,  gcs->pImgGRAY, 1.0 );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 72: // input alpha channel mapped to output
      cvCvtColor( gcs->pImgChX, gcs->pImgRGB, CV_GRAY2BGR );
      break;
#ifdef KMEANS
    case 80:// k-means output
      cvCopy(gcs->pImgRGB_kmeans, gcs->pImgRGB, NULL);
      break;
    case 81:// k-means output filtered with bbox/ghost mask
      cvSplit(gcs->pImgRGB_kmeans, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL        );
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get FG and PR_FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);     // scale any to 255.

      cvAnd( gcs->grabcut_mask,  gcs->pImgCh1,  gcs->pImgCh1, NULL );
      cvAnd( gcs->grabcut_mask,  gcs->pImgCh2,  gcs->pImgCh2, NULL );
      cvAnd( gcs->grabcut_mask,  gcs->pImgCh3,  gcs->pImgCh3, NULL );

      cvMerge(              gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL, gcs->pImgRGB);
      break;
#endif //KMEANS
    default:
      break;
    }
  }

  //////////////////////////////////////////////////////////////////////////////
  // copy anyhow the fg/bg to the alpha channel in the output image alpha ch
  cvSplit(gcs->pImgRGB, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL        );
  cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG and possible FG
  cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);
  gcs->pImgChA->imageData = (char*)gcs->grabcut_mask->data.ptr;

  cvMerge(              gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChA, gcs->pImageRGBA);

  gcs->numframes++;

  GST_GCS_UNLOCK (gcs);  
  
  return GST_FLOW_OK;
}
Пример #16
0
void CPianoHand::SearchForHand(CIplImage *image)
{
	int x, y;

	float highMatch = 0;
	int highValX, highValY, highSplitType;
	float currMatch;
	int splitType;

	int i, j;

	CIplImage backupImage;
	backupImage.initialize(IMAGE_WIDTH, IMAGE_HEIGHT, 8);
	backupImage.copy(image);

	for (i=0; i < 10; i++)
	{
						//Top and Bottom   //Left and Right
		for (j=0; j < ((((i*2)+1)*2) + (((i*2)-1)*2)); j++)
		{
			//Loop top row
			for (x=-i; x <= i; x++)
			{
				y = -i;
				currMatch = CheckForHand(image, x, y, &splitType);
				if (currMatch > highMatch)
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
			
				}				
			}
			for (x=-i; x <= i; x++)
			{
				y = i;
				currMatch = CheckForHand(image, x, y, &splitType);
				if (currMatch > highMatch)
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
				}				
			}
			for (y=-i; y <= i; y++)
			{
				x = -i;
				currMatch = CheckForHand(image, x, y, &splitType);
				if (currMatch > highMatch)
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
				}				
			}
			for (y=-i; y <= i; y++)
			{
				x = i;
				currMatch = CheckForHand(image, x, y, &splitType);
				if (currMatch > highMatch)
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
				}				
			}
		}
	}

	if (highMatch > 0)
	{
		int x1, y1, x2, y2;
		cvCopy(backupImage.getIplImage(), image->getIplImage(), NULL);
		computeBlob(&backupImage, &backupImage, m_center.x+highValX, m_center.y+highValY, 100, &x1, &y1, &x2, &y2);

		CPianoHand tempHand; 
		
		
		if (highSplitType == 0)	//Center Reference
			tempHand = *(new CPianoHand(0, x1, y1, x2, y2));
		else if (highSplitType == 1)//Top-left reference
			tempHand = *(new CPianoHand(0, x1, y1, x1+m_boundingBox.width, y1+m_boundingBox.height));
		else if (highSplitType == 2)	//bottom-right reference
			tempHand = *(new CPianoHand(0, x2-m_boundingBox.width, y2-m_boundingBox.height, x2, y2));
		else	//Center reference, without much width change
			tempHand = *(new CPianoHand(0, x1, y1, x1+m_boundingBox.width, y2));
		UpdateWithHand(&tempHand);


		//Create Image Hands Mask Image from Bounding Box
		for (x=0; x < IMAGE_WIDTH; x++)
		{
			for (y=0; y < IMAGE_HEIGHT; y++)
			{
				m_handsImage.getIplImage()->imageData[y*IMAGE_WIDTH+x]=0;
				m_traceImage.getIplImage()->imageData[y*IMAGE_WIDTH+x]=0;

				if (x >= tempHand.m_boundingBox.x && x < (tempHand.m_boundingBox.x+tempHand.m_boundingBox.width))
				{
					if (y >= tempHand.m_boundingBox.y && y < (tempHand.m_boundingBox.y+tempHand.m_boundingBox.height))
					{
							m_handsImage.getIplImage()->imageData[y*IMAGE_WIDTH+x] = 
								(unsigned char)image->getIplImage()->imageData[y*IMAGE_WIDTH+x];
						
					}
				}

			}
		}

		CIplImage tempImage;
		tempImage.initialize(IMAGE_WIDTH, IMAGE_HEIGHT, 8);

		cvDilate(m_handsImage.getIplImage(), m_edgeImage.getIplImage(), NULL, 1);
		cvErode(m_edgeImage.getIplImage(), tempImage.getIplImage(), NULL, 1);
		cvCanny(tempImage.getIplImage(), m_edgeImage.getIplImage(), 0, 1, 3);

		
		/*DrawBox(m_imb_edgeDetectedImage.getIplImage(), x1, y1, x2, y2, 1);
		(*numHands)++;*/
	}
}
void CMFC_systemServerDlg::Thread_getImage(LPVOID lParam)
{
	CMythreadParam * Thread_Info = (CMythreadParam *)lParam;
	CMFC_systemServerDlg * hWnd = (CMFC_systemServerDlg *)CWnd::FromHandle((HWND)Thread_Info->hWnd);
	
	Kinect2Capture kinect;
	kinect.Open(1, 1, 0);

	kinect.uDepthMax = 2000;
	IplImage* img_get = nullptr;
	while (1)
	{
		img_get = kinect.DepthImage();
		if (img_get != NULL)
		{
			cv::Mat src_img = img_get;
			// 設定變換[之前]與[之後]的坐標 (左上,左下,右下,右上)
			cv::Point2f pts1[4] = { roi.pts_depth[0], roi.pts_depth[1], roi.pts_depth[2], roi.pts_depth[3] };
			cv::Point2f pts2[4] = { roi.pts_to[0], roi.pts_to[1], roi.pts_to[2], roi.pts_to[3] };
			// 透視變換行列計算
			cv::Mat perspective_matrix = cv::getPerspectiveTransform(pts1, pts2);
			cv::Mat dst_img;
			// 變換
			cv::warpPerspective(src_img, dst_img, perspective_matrix, cvSize(320, 240), cv::INTER_LINEAR);
			
			cvCopy(&(IplImage)dst_img, m_TabPage1.sImage_depth);
			//*****影像相減*****
			cvAbsDiff(m_TabPage1.sImage_depth, m_TabPage1.sImage_depthGround, m_TabPage1.sImage_depth);
			cvThreshold(m_TabPage1.sImage_depth, m_TabPage1.sImage_depth, 1, 255, CV_THRESH_BINARY);
			//*******************
			//*****erode & dilate*****
			IplConvKernel *pKernel = NULL;
			pKernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL);
			cvErode(m_TabPage1.sImage_depth, m_TabPage1.sImage_depth, pKernel, 1);
			cvDilate(m_TabPage1.sImage_depth, m_TabPage1.sImage_depth, pKernel, 1);
			//*************************
			hWnd->ShowImage(m_TabPage1.sImage_depth, hWnd->GetDlgItem(IDC_IMAGE_binPickLiveDepth),1);
		}
		cvReleaseImage(&img_get);

		img_get = kinect.RGBAImage();
		if (img_get != NULL)
		{
			cv::Mat src_img = img_get;
			// 設定變換[之前]與[之後]的坐標 (左上,左下,右下,右上)
			cv::Point2f pts1[4] = { roi.pts_color[0], roi.pts_color[1], roi.pts_color[2], roi.pts_color[3] };
			cv::Point2f pts2[4] = { roi.pts_to[0], roi.pts_to[1], roi.pts_to[2], roi.pts_to[3] };
			// 透視變換行列計算
			cv::Mat perspective_matrix = cv::getPerspectiveTransform(pts1, pts2);
			cv::Mat dst_img;
			// 變換
			cv::warpPerspective(src_img, dst_img, perspective_matrix, cvSize(320, 240), cv::INTER_LINEAR);
			
			cvCopy(&(IplImage)dst_img, m_TabPage1.sImage_live);
			 
			IplImage* image_show = cvCreateImage(cvSize(320, 240), IPL_DEPTH_8U, 3);
			cvCvtColor(m_TabPage1.sImage_live, image_show, CV_BGRA2BGR);
			hWnd->ShowImage(image_show, hWnd->GetDlgItem(IDC_IMAGE_binPickLive));
			cvReleaseImage(&image_show);
		}
		cvReleaseImage(&img_get);
	}
}
Пример #18
0
//--------------------------------------------------------------
void ofApp::update() {
    kinect.update();
    
    if(kinect.getHasNewFrame()){
        grayImage = kinect.getPatchedCvImage(); // get the merged cvImage from the two kinects
        
        // set new background image
        if(bLearnBackground){
            bgImage = grayImage;   // let this frame be the background image from now on
            bLearnBackground = false;
            bBackgroundLearned = true;
        }
        
        // forget background image
        if(bForgetBackground){
            bBackgroundLearned = false;
            bForgetBackground = false;
        }
        // set minimal blob area
        contFinder.setMinArea(minArea);
        
        grayImage.flagImageChanged();
        if(bBackgroundLearned){
            cvAbsDiff(bgImage.getCvImage(), grayImage.getCvImage(), grayDiff.getCvImage());
            cvErode(grayDiff.getCvImage(), grayDiff.getCvImage(), NULL, 2);
            cvDilate(grayDiff.getCvImage(), grayDiff.getCvImage(), NULL, 1);
            // threshold ignoring little differences
            cvThreshold(grayDiff.getCvImage(), grayDiff.getCvImage(), 4, 255, CV_THRESH_BINARY);
            grayDiff.flagImageChanged();
            // update the ofImage to be used as background mask for the blob finder
            grayDiffOfImage.setFromPixels(grayDiff.getPixels(), kinect.width, kinect.height);
            
            // update the cv images
            grayDiffOfImage.flagImageChanged();

            // pass image on to contour finder
            contFinder.findContours(grayDiffOfImage.getCvImage());
        } else {
            contFinder.findContours(grayImage.getCvImage());
        }//backGroundLearned
    }
    
    
    // send a osc message for every blob
    // format /blobs <index> <label> <age> <area> <x> <y>
    ofPoint loc;
    ofRectangle area;
    int label;
    if( contFinder.size() > 0) {
        for(unsigned int i = 0; i<contFinder.size(); ++i) {
            area = ofxCv::toOf(contFinder.getBoundingRect(i));
            if(area.getCenter().y > kinect.height * 0.5){
                ofxOscMessage m;
                m.setAddress("/blobs");
                m.addIntArg( i );                                       // index
                m.addIntArg( (label = contFinder.getLabel(i)) );        // label
                m.addIntArg( contFinder.getTracker().getAge(label) );   // age
                m.addIntArg(( area.width*area.height ));                // area
                loc = ofxCv::toOf(contFinder.getCenter(i));
                m.addIntArg(loc.x);                                     // x
                m.addIntArg(loc.y);                                     // y
                sender.sendMessage(m);
                cout << "message sent with label: " << contFinder.getLabel(i) << endl;
            }
        } //for
    } else {
        ofxOscMessage m;
        m.setAddress("/blobs");
        for(int i = 0; i<6;++i){
            m.addIntArg(0); // send to all poly instances, all info set to zero
        }
        sender.sendMessage(m);
        
    }// if
}
Пример #19
0
void display()
{
	glClearColor(0.0, 0.0, 0.0, 0.0);
    	glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);


/*	glPushMatrix();
	glTranslatef(xavg,yavg,0);
	glutSolidCube(200);
	glPopMatrix();
/*

	glBegin(GL_QUADS);
		glVertex3f(xr,xb,0);	
		glVertex3f(xb,yb,0);	
		glVertex3f(xl,yl,0);
		glVertex3f(xt,yt,0);
	glEnd();
*/
///////////////////////////////////////////////////////////nishanthprakash20///////////////////////////////////////////////////
	captured=cvQueryFrame(video1);
	disp=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);	
	eroded=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);	
	dilated=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);

//	data=cvGet2D(captured,240,320);
//	printf("%f,%f,%f\n",data.val[0],data.val[1],data.val[2]);
	

	thresh1=150;
	thresh2=100;
	thresh3=100;


	for(i=0;i<disp->height;i++)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(captured,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	
				cvSet2D(disp,i,j,data);
			}
		}

	cvErode(disp,eroded,NULL,1);
	cvDilate(eroded,dilated,NULL,4);
	for(i=0;i<disp->height;i++)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(dilated,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	goto donetop;
				
			}
		}
	donetop:
	xt=j;
	yt=i;
	
	for(i=479;i>0;i--)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(dilated,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	goto doneleft;
				
			}
		}
	doneleft:
	xb=j;
	yb=i;
	
	inclination=((float)atan((yt-yb)/(xt-xb))-(float)atan(10.0/21))*180/3.14;
	if(inclination<0)	inclination+=60;
	printf("%f\n",inclination);
	
	cvNamedWindow("Cap");
	cvShowImage("Cap",dilated);
	cvWaitKey(3);

//*/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
    	glColor3f(1.0, 1.0, 1.0);
	glPushMatrix();
	glTranslatef(0,0,-5);
	glRotatef(inclination,0,0,1);

	glScalef(100,100,100);
	glColor3f(0.0f,0.0f,0.0f);
	drawmodel_box();
		glColor3f(1.0f,1.0f,1.0f);
		drawmodel_box2();
		glColor3f(1.0f,1.0f,1.0f);
		drawmodel_box3();
		glColor3f(1.0f,1.0f,1.0f);
		drawmodel_box4();
		glColor3f(0.2f,0.2f,1.0f);
		drawmodel_box5();									//remove this
	//glScalef(0.01,0.01,0.01);
//glTranslatef(0,0,5);
	glPopMatrix();
	
  	glutSwapBuffers();
}
Пример #20
0
ReturnType ErodeDilate::onExecute()
{
	// 영상을 Inport로부터 취득
	opros_any *pData = ImageIn.pop();
	RawImage result;

	if(pData != NULL){
		
		// 포트로 부터 이미지 취득
		RawImage Image = ImageIn.getContent(*pData);
		RawImageData *RawImage = Image.getImage();

		// 현재영상의 크기를 취득
		m_in_width = RawImage->getWidth();
		m_in_height = RawImage->getHeight();

		// 원본영상의 이미지영역 확보
		if(m_orig_img == NULL){
			m_orig_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		}
		if(m_gray_img == NULL){
			m_gray_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
		}
		if(m_result_img == NULL){
			m_result_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		}

		// 영상에 대한 정보를 확보!memcpy
		memcpy(m_orig_img->imageData, RawImage->getData(), RawImage->getSize());

		
		if(m_RGB_mode == "Gray"){	// m_RGB_mode:1 흑백
			// 컬러영상을 그레이스케일로 변환
			cvCvtColor( m_orig_img, m_gray_img, CV_RGB2GRAY );

			// 그레이이미지(1채널)을 3채널로 변경, 팽창침식연산위해 다시 m_image_buff에 저장
			cvMerge(m_gray_img, m_gray_img, m_gray_img, NULL, m_orig_img);
		}

		if(m_Change_mode == "Erode"){	// m_change_mode:Erode 침식
			cvErode(m_orig_img, m_result_img, NULL, m_Repeat_count);
		}else if(m_Change_mode == "Dilate"){				// m_change_mode:Dilate 팽창
			cvDilate(m_orig_img, m_result_img, NULL, m_Repeat_count);
		}else{
			cvCopy(m_orig_img, m_result_img);
		}


		// RawImage의 이미지 포인터 변수 할당
		RawImageData *pimage = result.getImage();
		
		// 입력된 이미지 사이즈 및 채널수로 로 재 설정
		pimage->resize(m_result_img->width, m_result_img->height, m_result_img->nChannels);
		
		// 영상의 총 크기(pixels수) 취득
		int size = m_result_img->width * m_result_img->height * m_result_img->nChannels;
		
		// 영상 데이터로부터 영상값만을 할당하기 위한 변수
		unsigned char *ptrdata = pimage->getData();
		
		// 현재 프레임 영상을 사이즈 만큼 memcpy
		memcpy(ptrdata, m_result_img->imageData, size);

		// 포트아웃
		opros_any mdata = result;
		ImageOut.push(result);//전달

		delete pData;
	}

	return OPROS_SUCCESS;
}
Пример #21
0
//---------------------------------------------------------
bool COpenCV_Morphology::On_Execute(void)
{
	int			Type, Shape, Radius, Iterations;
	CSG_Grid	*pInput, *pOutput;

	pInput		= Parameters("INPUT")		->asGrid();
	pOutput		= Parameters("OUTPUT")		->asGrid();
	Type		= Parameters("TYPE")		->asInt();
	Shape		= Parameters("SHAPE")		->asInt();
	Radius		= Parameters("RADIUS")		->asInt();
	Iterations	= Parameters("ITERATIONS")	->asInt();

	//-----------------------------------------------------
	switch( Shape )
	{
	default:
	case 0:	Shape	= CV_SHAPE_ELLIPSE;	break;
	case 1:	Shape	= CV_SHAPE_RECT;	break;
	case 2:	Shape	= CV_SHAPE_CROSS;	break;
	}

	//-----------------------------------------------------
	IplImage	*cv_pInput	= Get_CVImage(pInput);
	IplImage	*cv_pOutput	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type());
	IplImage	*cv_pTmp	= NULL;

	//-----------------------------------------------------
	IplConvKernel	*cv_pElement	= cvCreateStructuringElementEx(Radius * 2 + 1, Radius * 2 + 1, Radius, Radius, Shape, 0);

	switch( Type )
	{
	case 0:	// dilation
		cvDilate		(cv_pInput, cv_pOutput, cv_pElement, Iterations);
		break;

	case 1:	// erosion
		cvErode			(cv_pInput, cv_pOutput, cv_pElement, Iterations);
		break;

	case 2:	// opening
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp,
			cv_pElement, CV_MOP_OPEN    , Iterations
		);
		break;

	case 3:	// closing
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp,
			cv_pElement, CV_MOP_CLOSE   , Iterations
		);
		break;

	case 4:	// morpological gradient
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type()),
			cv_pElement, CV_MOP_GRADIENT, Iterations
		);
		break;

	case 5:	// top hat
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type()),
			cv_pElement, CV_MOP_TOPHAT  , Iterations
		);
		break;

	case 6:	// black hat
		cvMorphologyEx	(cv_pInput, cv_pOutput, cv_pTmp	= Get_CVImage(Get_NX(), Get_NY(), pInput->Get_Type()),
			cv_pElement, CV_MOP_BLACKHAT, Iterations
		);
		break;
	}

	cvReleaseStructuringElement(&cv_pElement);

	//-----------------------------------------------------
	Copy_CVImage_To_Grid(pOutput, cv_pOutput);

    cvReleaseImage(&cv_pInput);
    cvReleaseImage(&cv_pOutput);

	if( cv_pTmp )
	{
		cvReleaseImage(&cv_pTmp);
	}

	pOutput->Set_Name(CSG_String::Format(SG_T("%s [%s]"), pInput->Get_Name(), Get_Name().c_str()));

	return( true );
}
Пример #22
0
void CvAdaptiveSkinDetector::process(IplImage *inputBGRImage, IplImage *outputHueMask)
{
    IplImage *src = inputBGRImage;

    int h, v, i, l;
    bool isInit = false;

    nFrameCount++;

    if (imgHueFrame == NULL)
    {
        isInit = true;
        initData(src, nSamplingDivider, nSamplingDivider);
    }

    unsigned char *pShrinked, *pHueFrame, *pMotionFrame, *pLastGrayFrame, *pFilteredFrame, *pGrayFrame;
    pShrinked = (unsigned char *)imgShrinked->imageData;
    pHueFrame = (unsigned char *)imgHueFrame->imageData;
    pMotionFrame = (unsigned char *)imgMotionFrame->imageData;
    pLastGrayFrame = (unsigned char *)imgLastGrayFrame->imageData;
    pFilteredFrame = (unsigned char *)imgFilteredFrame->imageData;
    pGrayFrame = (unsigned char *)imgGrayFrame->imageData;

    if ((src->width != imgHueFrame->width) || (src->height != imgHueFrame->height))
    {
        cvResize(src, imgShrinked);
        cvCvtColor(imgShrinked, imgHSVFrame, CV_BGR2HSV);
    }
    else
    {
        cvCvtColor(src, imgHSVFrame, CV_BGR2HSV);
    }

    cvSplit(imgHSVFrame, imgHueFrame, imgSaturationFrame, imgGrayFrame, 0);

    cvSetZero(imgMotionFrame);
    cvSetZero(imgFilteredFrame);

    l = imgHueFrame->height * imgHueFrame->width;

    for (i = 0; i < l; i++)
    {
        v = (*pGrayFrame);
        if ((v >= GSD_INTENSITY_LT) && (v <= GSD_INTENSITY_UT))
        {
            h = (*pHueFrame);
            if ((h >= GSD_HUE_LT) && (h <= GSD_HUE_UT))
            {
                if ((h >= nSkinHueLowerBound) && (h <= nSkinHueUpperBound))
                    ASD_INTENSITY_SET_PIXEL(pFilteredFrame, h);

                if (ASD_IS_IN_MOTION(pLastGrayFrame, v, 7))
                    ASD_INTENSITY_SET_PIXEL(pMotionFrame, h);
            }
        }
        pShrinked += 3;
        pGrayFrame++;
        pLastGrayFrame++;
        pMotionFrame++;
        pHueFrame++;
        pFilteredFrame++;
    }

    if (isInit)
        cvCalcHist(&imgHueFrame, skinHueHistogram.fHistogram);

    cvCopy(imgGrayFrame, imgLastGrayFrame);

    cvErode(imgMotionFrame, imgTemp);  // eliminate disperse pixels, which occur because of the camera noise
    cvDilate(imgTemp, imgMotionFrame);

    cvCalcHist(&imgMotionFrame, histogramHueMotion.fHistogram);

    skinHueHistogram.mergeWith(&histogramHueMotion, fHistogramMergeFactor);

    skinHueHistogram.findCurveThresholds(nSkinHueLowerBound, nSkinHueUpperBound, 1 - fHuePercentCovered);

    switch (nMorphingMethod)
    {
        case MORPHING_METHOD_ERODE :
            cvErode(imgFilteredFrame, imgTemp);
            cvCopy(imgTemp, imgFilteredFrame);
            break;
        case MORPHING_METHOD_ERODE_ERODE :
            cvErode(imgFilteredFrame, imgTemp);
            cvErode(imgTemp, imgFilteredFrame);
            break;
        case MORPHING_METHOD_ERODE_DILATE :
            cvErode(imgFilteredFrame, imgTemp);
            cvDilate(imgTemp, imgFilteredFrame);
            break;
    }

    if (outputHueMask != NULL)
        cvCopy(imgFilteredFrame, outputHueMask);
};
bool findBlueNYelContour(IplImage* img, CvMemStorage* &storage,CvPoint &centre,int color){  //color :  blue==0,  yellow==1
	CvSeq* contours;  
	IplImage* timg = cvCloneImage( img ); // make a copy of input image  
	IplImage* gray = cvCreateImage( cvGetSize(timg), 8, 1 );   
	CvSeq* result;  

	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );  
	cvNamedWindow("rgbContour",0);

	IplImage* hsv = cvCreateImage( cvGetSize(timg), 8, 3 );   
	cvSmooth(hsv,hsv,2,3);
	if(color==0){
		findLP_HSV_BLUE(timg,hsv);
		cvNamedWindow("hsv_license_blue",0);
	}
	else {
		findLP_HSV_YEL(timg,hsv);
			cvNamedWindow("hsv_license_yel",0);
	}
	//	

	cvNamedWindow("侵蚀前",0);
	cvShowImage("侵蚀前",hsv);
	cvErode(hsv,hsv,0,1);
	cvNamedWindow("侵蚀后",0);
	cvShowImage("侵蚀后",hsv);
	cvDilate(hsv,hsv,0,4);
	cvNamedWindow("膨胀后",0);
	cvShowImage("膨胀后",hsv);
	cvCvtColor(hsv,hsv,CV_HSV2RGB);
	

	cvCvtColor(hsv,gray,CV_RGB2GRAY);
	cvThreshold(gray,gray,100,255,0);
	CvContourScanner scanner = NULL;
	scanner = cvStartFindContours(gray,storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
	//ImagePreprocess::contourFinder(gray,0,hsv_blue,4000,10000);
	// find contours and store them all as a list  
/*	cvFindContours( gray, storage, &contours, sizeof(CvContour),  
		CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );  */
	// test each contour  
	int t=0;
	while (contours=cvFindNextContour(scanner))
	{
		// approximate contour with accuracy proportional  
		// to the contour perimeter  
		result = cvApproxPoly( contours, sizeof(CvContour), storage,  
			CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.04, 0 );  
		double tempArea = fabs(cvContourArea(result,CV_WHOLE_SEQ));
		double peri=cvContourPerimeter(result);
		CvRect rct=cvBoundingRect(result,1);
		// square contours should have 4 vertices after approximation  
		// relatively large area (to filter out noisy contours)  
		// and be convex.  
		// Note: absolute value of an area is used because  
		// area may be positive or negative - in accordance with the  
		// contour orientation  
		if(tempArea<3500 || tempArea>10000 || 
			result->total < 4 || result->total >10 ||
			peri<340 || peri>500
			|| rct.width/(1.0*rct.height)>3.85 || rct.width/(1.0*rct.height)<2.47 || rct.width<135 || rct.width>175
			){
			cvSubstituteContour(scanner,NULL);
	}
		else{  
			
	//	cout<<"height: "<<rct.height<<" width: "<<rct.width<<" rate: "<<rct.width/(rct.height*1.0)<<endl;
	//			cout<<"edge num: "<<result->total<<endl;
	//			cout<<"area : "<<fabs(cvContourArea(result,CV_WHOLE_SEQ))<<endl;
	//			cout<<"peri : "<<cvContourPerimeter(result)<<endl;
				CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );
	//			cvDrawContours( timg, result, color, color, -1, 3, 8 );
	//			cvDrawContours( hsv, result, color, color, -1, 3, 8 );
				t++;
				//		contour = cvApproxPoly( contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );         
				CvMat *region;
				region=(CvMat*)result; 
				CvMoments moments;  
				cvMoments( region, &moments,0 );
				int xc=moments.m10/moments.m00 , yc=moments.m01/moments.m00; 
				//		double angle3=atan(2*moments.mu11/(moments.mu20-moments.mu02))/2;
		//		cout<<"long: "<<longAxis<<"short: "<<shortAxis<<endl;
				centre=cvPoint(xc,yc);
	//			cvCircle( hsv, centre, 3, color, 3, 8, 0 );
	//			cvCircle( timg, centre, 3, color, 3, 8, 0 );
		}
		// take the next contour  
//		contours = contours->h_next;  			
	}
	result = cvEndFindContours(&scanner);
	cvShowImage("rgbContour",timg);
	if(color==0)
		cvShowImage("hsv_license_blue",hsv);
	else
		cvShowImage("hsv_license_yel",hsv);
	cvReleaseImage( &timg );  
	cvReleaseImage( &hsv );  
	cvReleaseImage( &gray );  
	if(0==t){
		return false;
	}
	else
		return true;
	// release all the temporary images  
	//	cvReleaseImage( &gray );  

	//cvReleaseImage( &hsv_blue );  
}  
Пример #24
0
void ofCvImage::erode( int nIterations ) {
	cvErode( cvImage, cvImageTemp, 0, nIterations );
	swapTemp();
}
Пример #25
0
std::list<utils::Garbage*> GarbageRecognition::garbageList(IplImage * src, IplImage * model){
	std::list<utils::Garbage*>::iterator it;
	for ( it=garbages.begin() ; it != garbages.end() ; it++ )
		delete *it;
	garbages.clear();

	//cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
	//object model

	//image for the histogram-based filter
	//could be a parameter

	utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
	CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

	//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
	//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



	//gets a frame for setting  image size
	//CvSize srcSize = cvSize(frameWidth,frameHeight);
	CvSize srcSize = cvGetSize(src);

	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );



	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);

	//image for equalization
	IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);

	//image for image smoothing
	IplImage * smoothImage=cvCreateImage(srcSize,8,1);

	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);

	int frameCounter=1;
	int cont_index=0;

	//convolution kernel for morph operations
	IplConvKernel* element;

	CvRect boundingRect;

	//contours
	CvSeq * contours;

	//Main loop


	frameCounter++;

	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	//equalize Saturation Channel image
	cvEqualizeHist(s_plane,equalizedImage);

	//threshold the equalized Saturation channel image
	cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
	CV_THRESH_BINARY);

	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
		CV_SHAPE_RECT, NULL);

	cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);

	//apply smooth gaussian-filter
	cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);

	//get all contours
	contours = myFindContours(smoothImage);

	cont_index=0;
	cvCopy(src,contourImage,0);
	


	while(contours!=NULL){
		CvSeq * aContour=getPolygon(contours);
		utils::Contours * ct = new Contours(aContour);

	
	    int	pf = ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER);

		int raf = ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO);

		// int af = ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA);
		int baf = ct->boxAreaFilter(BOXFILTER_TOLERANCE);

        int hmf = ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN);


		//apply filters

		if( pf && raf && baf && hmf	){

				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
				
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
				//build garbage List
			
				//printf(" c %d,%d\n",boundingRect.x,boundingRect.y);

				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);



				utils::Garbage * aGarbage = new utils::Garbage(r);
//				printf("%d , %d - %d , %d\n",boundingRect.x,boundingRect.y,boundingRect.width,boundingRect.height);

				garbages.push_back(aGarbage);


			}

		delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}

   // cvShowImage("output",contourImage);
   // cvWaitKey(0);
	delete h;

	cvReleaseHist(&testImageHistogram);
	//Image for thresholding
	//cvReleaseMemStorage( &contours->storage );
	cvReleaseImage(&threshImage);
	cvReleaseImage(&equalizedImage);
	cvReleaseImage(&morphImage);
	cvReleaseImage(&smoothImage);
	cvReleaseImage(&contourImage);
	
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);


	return garbages;
}
Пример #26
0
static GstFlowReturn
gst_skin_detect_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
    IplImage * img, GstBuffer * outbuf, IplImage * outimg)
{
  GstSkinDetect *filter = GST_SKIN_DETECT (base);



  filter->cvRGB->imageData = (char *) img->imageData;
  filter->cvSkin->imageData = (char *) outimg->imageData;

  /* SKIN COLOUR BLOB DETECTION */
  if (HSV == filter->method) {
    cvCvtColor (filter->cvRGB, filter->cvHSV, CV_RGB2HSV);
    cvCvtPixToPlane (filter->cvHSV, filter->cvH, filter->cvS, filter->cvV, 0);  /*  Extract the 3 color components. */

    /*  Detect which pixels in each of the H, S and V channels are probably skin pixels. 
       Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80. */
    cvThreshold (filter->cvH, filter->cvH2, 10, UCHAR_MAX, CV_THRESH_BINARY);   /* (hue > 10) */
    cvThreshold (filter->cvH, filter->cvH, 20, UCHAR_MAX, CV_THRESH_BINARY_INV);        /* (hue < 20) */
    cvThreshold (filter->cvS, filter->cvS, 48, UCHAR_MAX, CV_THRESH_BINARY);    /* (sat > 48) */
    cvThreshold (filter->cvV, filter->cvV, 80, UCHAR_MAX, CV_THRESH_BINARY);    /* (val > 80) */

    /*  erode the HUE to get rid of noise. */
    cvErode (filter->cvH, filter->cvH, NULL, 1);

    /*  Combine all 3 thresholded color components, so that an output pixel will only 
       be white (255) if the H, S and V pixels were also white.
       imageSkin = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where   ^ mean pixels-wise AND */
    cvAnd (filter->cvH, filter->cvS, filter->cvSkinPixels1, NULL);
    cvAnd (filter->cvSkinPixels1, filter->cvH2, filter->cvSkinPixels1, NULL);
    cvAnd (filter->cvSkinPixels1, filter->cvV, filter->cvSkinPixels1, NULL);

    cvCvtColor (filter->cvSkinPixels1, filter->cvRGB, CV_GRAY2RGB);
  } else if (RGB == filter->method) {
    cvCvtPixToPlane (filter->cvRGB, filter->cvR, filter->cvG, filter->cvB, 0);  /*  Extract the 3 color components. */
    cvAdd (filter->cvR, filter->cvG, filter->cvAll, NULL);
    cvAdd (filter->cvB, filter->cvAll, filter->cvAll, NULL);    /*  All = R + G + B */
    cvDiv (filter->cvR, filter->cvAll, filter->cvRp, 1.0);      /*  R' = R / ( R + G + B) */
    cvDiv (filter->cvG, filter->cvAll, filter->cvGp, 1.0);      /*  G' = G / ( R + G + B) */

    cvConvertScale (filter->cvR, filter->cvR2, 1.0, 0.0);
    cvCopy (filter->cvGp, filter->cvGp2, NULL);
    cvCopy (filter->cvRp, filter->cvRp2, NULL);

    cvThreshold (filter->cvR2, filter->cvR2, 60, UCHAR_MAX, CV_THRESH_BINARY);  /* (R > 60) */
    cvThreshold (filter->cvRp, filter->cvRp, 0.42, UCHAR_MAX, CV_THRESH_BINARY);        /* (R'> 0.4) */
    cvThreshold (filter->cvRp2, filter->cvRp2, 0.6, UCHAR_MAX, CV_THRESH_BINARY_INV);   /* (R'< 0.6) */
    cvThreshold (filter->cvGp, filter->cvGp, 0.28, UCHAR_MAX, CV_THRESH_BINARY);        /* (G'> 0.28) */
    cvThreshold (filter->cvGp2, filter->cvGp2, 0.4, UCHAR_MAX, CV_THRESH_BINARY_INV);   /* (G'< 0.4) */

    /*  Combine all 3 thresholded color components, so that an output pixel will only
       be white (255) if the H, S and V pixels were also white. */

    cvAnd (filter->cvR2, filter->cvRp, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvRp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvRp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvGp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvGp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);

    cvConvertScale (filter->cvSkinPixels2, filter->cvdraft, 1.0, 0.0);
    cvCvtColor (filter->cvdraft, filter->cvRGB, CV_GRAY2RGB);
  }

  /* After this we have a RGB Black and white image with the skin, in 
     filter->cvRGB. We can postprocess by applying 1 erode-dilate and 1
     dilate-erode, or alternatively 1 opening-closing all together, with
     the goal of removing small (spurious) skin spots and creating large
     connected areas */
  if (filter->postprocess) {
    cvSplit (filter->cvRGB, filter->cvChA, NULL, NULL, NULL);

    cvErode (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);
    cvDilate (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 2);
    cvErode (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);

    cvCvtColor (filter->cvChA, filter->cvRGB, CV_GRAY2RGB);
  }

  cvCopy (filter->cvRGB, filter->cvSkin, NULL);

  return GST_FLOW_OK;
}
// does a fast check if a chessboard is in the input image. This is a workaround to 
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called, 
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
    if(src->nChannels > 1)
    {
        cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only", 
                __FILE__, __LINE__);
    }
    
    if(src->depth != 8)
    {
        cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only", 
                __FILE__, __LINE__);
    }
    
    const int erosion_count = 1;
    const float black_level = 20.f;
    const float white_level = 130.f;
    const float black_white_gap = 70.f;
    
#if defined(DEBUG_WINDOWS)
    cvNamedWindow("1", 1);
    cvShowImage("1", src);
    cvWaitKey(0);
#endif //DEBUG_WINDOWS

    CvMemStorage* storage = cvCreateMemStorage();
    
    IplImage* white = cvCloneImage(src);
    IplImage* black = cvCloneImage(src);
        
    cvErode(white, white, NULL, erosion_count);
    cvDilate(black, black, NULL, erosion_count);
    IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
    
    int result = 0;
    for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
    {
        cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);
        
#if defined(DEBUG_WINDOWS)
        cvShowImage("1", thresh);
        cvWaitKey(0);
#endif //DEBUG_WINDOWS
        
        CvSeq* first = 0;
        std::vector<std::pair<float, int> > quads;
        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);        
        icvGetQuadrangleHypotheses(first, quads, 1);
        
        cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);
        
#if defined(DEBUG_WINDOWS)
        cvShowImage("1", thresh);
        cvWaitKey(0);
#endif //DEBUG_WINDOWS
        
        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
        icvGetQuadrangleHypotheses(first, quads, 0);
        
        const size_t min_quads_count = size.width*size.height/2;
        std::sort(quads.begin(), quads.end(), less_pred);
        
        // now check if there are many hypotheses with similar sizes
        // do this by floodfill-style algorithm
        const float size_rel_dev = 0.4f;
        
        for(size_t i = 0; i < quads.size(); i++)
        {
            size_t j = i + 1;
            for(; j < quads.size(); j++)
            {
                if(quads[j].first/quads[i].first > 1.0f + size_rel_dev)
                {
                    break;
                }
            }
            
            if(j + 1 > min_quads_count + i)
            {
                // check the number of black and white squares
                std::vector<int> counts;
                countClasses(quads, i, j, counts);
                const int black_count = cvRound(ceil(size.width/2.0)*ceil(size.height/2.0));
                const int white_count = cvRound(floor(size.width/2.0)*floor(size.height/2.0));
                if(counts[0] < black_count*0.75 ||
                   counts[1] < white_count*0.75)
                {
                    continue;
                }
                result = 1;
                break;
            }
        }
    }
    
    
    cvReleaseImage(&thresh);
    cvReleaseImage(&white);
    cvReleaseImage(&black);
    cvReleaseMemStorage(&storage);
    
    return result;
}
int main(int argc, char *argv[])
{
	
	CvCapture *cap = cvCaptureFromCAM(0);
	if(!cap){
		printf("CubieCam : Cannot open cam(0)\n");
		return 0;
	}

	int width = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH);
	int height = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT);
	CvSize size = cvSize(width, height);
	pthread_t serverThread;
	pthread_t serialThread;
	time_t t = time(NULL);
	struct tm *tm;

	IplImage *grayImage0 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage *grayImage1 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage *diffImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage *frame = NULL;

	int nThreshold = 50;
	char c;
	char *diffData;
	char *tempStr;
	char filename[40];
	char date[30];
	char *buffer;
	int flag = 1;
	int i, d = 9, cnt = 0;
	int maxSize = (diffImage->widthStep) * (diffImage->height);
	float rate = 0;
	port = -1;

	printf("CubieCam : Run ServerThread\n");
	if(pthread_create(&serverThread, NULL, serverTask, NULL))
	{
		printf("CubieCam : Cannot create serverThread\n");
	}

	printf("CubieCam : Run SerialThread\n");
	if(pthread_create(&serialThread, NULL, serialTask, NULL))
	{
		printf("CubieCam : Cannot create serialThread\n");
	}

	printf("Width : %d, Height : %d, Size : %d\n", 
		diffImage->widthStep, diffImage->height, maxSize);

	printf("CubieCam : Start capturing\n");

	while(1)
	{
		frame = cvQueryFrame(cap);
		if(!frame)
		{
			printf("CubieCam : Cannot load camera frame\n");
			break;
		}

		if(flag)
		{
			cvCvtColor(frame, grayImage0, CV_BGR2GRAY);
			cvAbsDiff(grayImage0, grayImage1, diffImage);
			cvThreshold(diffImage, diffImage, nThreshold, 255, CV_THRESH_BINARY);
			cvErode(diffImage, diffImage, NULL, 1);
			flag = 0;
		} else
		{
			cvCvtColor(frame, grayImage1, CV_BGR2GRAY);
			cvAbsDiff(grayImage1, grayImage0, diffImage);
			cvThreshold(diffImage, diffImage, nThreshold, 255, CV_THRESH_BINARY);
			cvErode(diffImage, diffImage, NULL, 1);
			flag = 1;
		}

		diffData = diffImage->imageData;
		for(i = 0; i < maxSize; i++)
		{
			if(diffData[i] != 0)
				cnt++;
		}

		rate = ((float)cnt / (float)maxSize) * 100;
		if(rate > 0.5)
		{
			printf("CubieCam : Ratio %5.2f\n", rate);
			if(d > 5)
			{
				t = time(NULL);
				tm = localtime(&t);
				tempStr = asctime(tm);
				tempStr[strlen(tempStr)-1] = 0;
				sprintf(filename, "./captures/%s.png", tempStr);
				cvSaveImage(filename, frame);
				printf("CubieCam : Capture Saved, Notificated\n");
				d = 0;
				if(port > 0)
				{
					sprintf(filename, "Captured/%s", tempStr);
					sendMessage(filename);
				}
			}
		}
	
		cnt = 0;
		if(d <= 5)
		{
			d++;
		}


		if(mode == STREAM_MODE)
		{
			cvReleaseCapture(&cap);
			pid_stream = fork();
			if(pid_stream == 0)
			{
				printf("CubieCam : Create mjpg-streamer process...\n");
				system("/home/cubie/mjpg-streamer/mjpg_streamer -i \"/home/cubie/mjpg-streamer/input_uvc.so -r 320x240 -f 15\" -o \"/home/cubie/mjpg-streamer/output_http.so -w /home/cubie/mjpg-streamer/www\"");
				return -1;
			} else if(pid_stream == -1)
			{
				printf("CubieCam : Cannot create mjpg-streamer process\n");
			} else
			{
				while(mode == STREAM_MODE){}
				kill(pid_stream, SIGTERM);
				kill(pid_stream+1, SIGTERM);
				kill(pid_stream+2, SIGTERM);
				kill(pid_stream+3, SIGTERM);
				waitpid(pid_stream, NULL, 0);
				waitpid(pid_stream+1, NULL, 0);
				waitpid(pid_stream+2, NULL, 0);
				waitpid(pid_stream+3, NULL, 0);
				printf("CubieCam : mjpg-streamer process %d stopped\n", pid_stream);
				sleep(2);
			}
			printf("CubieCam : Resume Monitoring...\n");
			cap = cvCaptureFromCAM(0);
		}


		if((c = cvWaitKey(10)) == 27)
		{
			printf("\nCubieCam : Quit capturing\n");
			break;
		}
	}

	cvReleaseImage(&grayImage1);
	cvReleaseImage(&grayImage0);
	cvReleaseImage(&diffImage);
	cvReleaseCapture(&cap);

	printf("CubieCam : Exit\n");
	return 0;
}
Пример #29
0
int snaps_nav(struct snaps_nav_state_struct* nav_state, time_t *ptr_framestamptime){

		//Get frame from camera
	 for (int i=0; i<6; i++){
		img = cvQueryFrame( capture );
		if ( !img ) {
		   fprintf( stderr, "ERROR: frame is null...\n" );
		   getchar();
		   break;
		 }
	 }

	timestamp_frame(ptr_framestamptime);

//Crop Image code
	cvSetImageROI(img,cvRect(1,1,540,380));
	cvCopy(img, Cropped, NULL);

//Change the color format from BGR to HSV
	cvCvtColor(Cropped, imgHSV, CV_BGR2HSV);

//copy original img to be displayed in drawn Targets/DM img
	cvCopy(Cropped, TimgDrawn, NULL );

	cvInRangeS(imgHSV, cvScalar(T_range_low,0,0,0), cvScalar(T_range_high,255,255,0), TargetsFilter);
	cvInRangeS(imgHSV, cvScalar(DM_range_low,0,0,0), cvScalar(DM_range_high,255,255,0), DMFilter);

//Magenta Marker Image Processing
	cvErode(TargetsFilter, TargetsFilter, 0, 1);
	cvDilate(TargetsFilter, TargetsFilter, NULL, 1);						//Dilate image
	cvSmooth(TargetsFilter, TargetsFilter, CV_GAUSSIAN, 3, 0, 0.0, 0.0);  	//Smooth Target image*/

//Orange Target Image Processing
	cvErode(DMFilter, DMFilter, 0, 1);
	cvDilate(DMFilter, DMFilter, NULL, 1);									//Dilate image
	//cvSmooth(DMFilter, DMFilter, CV_GAUSSIAN, 3, 0, 0.0, 0.0);  			//Smooth DM image

//Show filtered Images
	cvShowImage("TargetsFilter", TargetsFilter);							//Show Targets filter image
	cvShowImage("DMFilter", DMFilter);   									//Show DM filter image										//Show Noise Filter

//Perform Canny on Images
	cvCanny(TargetsFilter, TimgCanny, T_canny_low, T_canny_high, 3);  			// Apply canny filter to the targets image
	cvCanny(DMFilter, DMimgCanny, DM_canny_low, DM_canny_high, 3); 				// Apply canny filter to the DM image

	cvShowImage("TCannyImage", TimgCanny);
	cvShowImage("DMCannyImage", DMimgCanny);


// Find and Draw circles for the Targets image
	CvPoint Tpt;

	CvSeq* TimgHCirc = cvHoughCircles(
			TimgCanny, TcircStorage, CV_HOUGH_GRADIENT,					// in, out, method,
			2, 															//precision of the accumulator (2x the input image)
			T_rad_max*4, 												//min dist between circles
			T_tol_max, T_tol_min,										//parm1, parm2
			T_rad_min, T_rad_max); 										//min radius, max radius

		for (int i = 0; i < TimgHCirc->total; i++) {
			float* p = (float*) cvGetSeqElem(TimgHCirc, i);

		// To get the circle coordinates
			CvPoint pt = cvPoint(cvRound(p[0]), cvRound(p[1]));

		// Draw center of circles in green
			cvCircle(TimgDrawn, pt, 1, CV_RGB(0,255,0), -1, 8, 0 );
			cvCircle(TimgDrawn, pt, cvRound(p[2]), CV_RGB(255,255,0), 2, 8, 0); 	// img, center, radius, color, thickness, line type, shift

				Tpt = cvPoint(cvRound(p[0]), cvRound(p[1]));
				if (i == 0){
				Tpt = cvPoint(cvRound(p[0]), cvRound(p[1]));

				printf("Magenta Marker (x,y) - (%d, %d) \n", Tpt.x, Tpt.y);
				}
				else {printf("TM - There is an extra point frame not good");
				}
		} // end of for

	// Find and Draw circles for the DM image
	CvPoint DMpt;

	CvSeq* DMimgHCirc = cvHoughCircles(
			DMimgCanny, DMcircStorage, CV_HOUGH_GRADIENT,				// in, out, method,
			2, 															//precision of the accumulator (2x the input image)
			DM_rad_max*4, 												//min dist between circles
			DM_tol_max, DM_tol_min,										//parm1, parm2
			DM_rad_min, DM_rad_max); 									//min radius, max radius

	for (int i=0; i<DMimgHCirc->total; i++) {
		float* p = (float*) cvGetSeqElem(DMimgHCirc, i);
		CvPoint pt = cvPoint(cvRound(p[0]), cvRound(p[1]));

		// Draw center of circles in green
		cvCircle(TimgDrawn, pt, 1, CV_RGB(255,0,0), -1, 8, 0 );
		cvCircle(TimgDrawn, pt, cvRound(p[2]), CV_RGB(255,127,0), 2, 8, 0); 	// img, center, radius, color, thickness, line type, shift

		if (i == 0){
			DMpt = cvPoint(cvRound(p[0]), cvRound(p[1]));
			printf("Red Marker(x,y) - (%d, %d)\n", DMpt.x, DMpt.y);
		}
		else {printf("DM - There is an extra point frame not good");
		}
	} // end of for

	//Draw line in between points
		cvLine(TimgDrawn, Tpt, DMpt, CV_RGB(0,255,0), 1, 8, 0);
		d = sqrt(pow(Tpt.x-DMpt.x, 2)+pow(Tpt.y-DMpt.y, 2));      //distance in between points
		printf("Distance in between tagets %d \n", d);

		//Magenta target coordinates
							int MT_pt_x = Tpt.x;
							int MT_pt_y = Tpt.y;
							//Orange target coordinates
							int OT_pt_x = DMpt.x;
							int OT_pt_y = DMpt.y;

							//Minimum of the two coordinates
							int x_min;
							int x_max;
							int y_min;
							int y_max;

							if (MT_pt_x > OT_pt_x){
								x_min = OT_pt_x;
								x_max = MT_pt_x;
							}
							else{
									x_min = MT_pt_x;
									x_max = OT_pt_x;
							}
																							//printf("x_min %d \n", x_min);
																							//printf("x_max %d \n", x_max);
							if (MT_pt_y > OT_pt_y){
								y_min = OT_pt_y;
								y_max = MT_pt_y;
							}
							else{
									y_min = MT_pt_y;
									y_max = OT_pt_y;
							}
																							//printf("y_min %d", y_min);
																							//printf("y_max %d", y_max);
							//Center of targets point (CT)
							int CT_pt_x = (((x_max - x_min)/2) + x_min);
							int CT_pt_y = (((y_max - y_min)/2) + y_min);
							printf("Center coordinate (x, y) - (%d, %d) \n", CT_pt_x, CT_pt_y);

							//Draw halfway targets point
							CvPoint CT_pt = cvPoint(cvRound(CT_pt_x), cvRound(CT_pt_y));
							cvCircle(img, CT_pt, 2, CV_RGB(255,0,0), -1, 8, 0);

							//Orientation
							int orientation_x = (OT_pt_x - CT_pt_x);
							int orientation_y = (CT_pt_y - OT_pt_y);

							double Theta = (((atan2(orientation_y, orientation_x )) * (180/3.14))+360);
							//if
							printf("Orientation %f Degrees \n", Theta);

	//cvResetImageROI(img);
	cvShowImage("TDrawnImage", TimgDrawn);
	//cvShowImage("DMDrawnImage", DMimgDrawn);

	//clear memory for target and DM circle finder
	//note: this may not be necessary
	cvClearMemStorage(TcircStorage);
	cvClearMemStorage(DMcircStorage);

	return 0;
}
Пример #30
0
int main()
{
/*********************************** 主程序用到的参数 ***********************************/ 
	IplImage * srcImg = NULL;						// 存放从摄像头读取的每一帧彩色源图像
	IplImage * img = NULL;							// 存放从摄像头读取的每一帧灰度源图像
	CvCapture * capture;							// 指向CvCapture结构的指针
	CvMemStorage* storage = cvCreateMemStorage(0);	// 存放矩形框序列的内存空间
	CvSeq* objects = NULL;							// 存放检测到人脸的平均矩形框
	double scale_factor = 1.2;						// 搜索窗口的比例系数
	int min_neighbors = 3;							// 构成检测目标的相邻矩形的最小个数
	int flags = 0;									// 操作方式
	CvSize min_size = cvSize(40, 40);				// 检测窗口的最小尺寸
	int i, globalK;								
	int hist[256];									// 存放直方图的数组
	int pixelSum;
	int threshold;									// 存储二值化最优阈值
	clock_t start, stop;							// 计时参数
	IplImage* faceImg = NULL;						// 存储检测出的人脸图像
	int temp = 0;									// 临时用到的变量
	int temp1 = 0;									// 临时用到的变量
	int count = 0;									// 计数用的变量
	int flag = 0;									// 标记变量
	int * tempPtr = NULL;							// 临时指针
	CvRect* largestFaceRect;						// 存储检测到的最大的人脸矩形框
	int * horiProject = NULL;						// 水平方向的投影结果(数组指针)
	int * vertProject = NULL;						// 垂直方向的投影结果(数组指针)
	int * subhoriProject = NULL;					// 水平方向的投影结果(数组指针)
	int * subvertProject = NULL;					// 垂直方向的投影结果(数组指针)
	int WIDTH;										// 图像的宽度
	int HEIGHT;										// 图像的高度
	int rEyeCol = 0;								// 右眼所在的列数
	int lEyeCol = 0;								// 左眼所在的列数
	int lEyeRow = 0;								// 左眼所在的行数
	int rEyeRow = 0;								// 右眼所在的行数
	int eyeBrowThreshold;							// 区分眉毛与眼睛之间的阈值
	uchar* rowPtr = NULL;							// 指向图片每行的指针
	uchar* rowPtrTemp = NULL;						// 指向图片每行的指针, 中间变量
	IplImage* eyeImg = NULL;						// 存储眼睛的图像
	CvRect eyeRect;									// 存储裁剪后的人眼的矩形区域
	CvRect eyeRectTemp;								// 临时矩形区域
	IplImage* lEyeImg = NULL;						// 存储左眼的图像
	IplImage* rEyeImg = NULL;						// 存储右眼的图像
	IplImage* lEyeImgNoEyebrow = NULL;				// 存储去除眉毛之后的左眼图像
	IplImage* rEyeImgNoEyebrow = NULL;				// 存储去除眉毛之后的右眼图像
	IplImage* lEyeballImg = NULL;					// 存储最终分割的左眼框的图像
	IplImage* rEyeballImg = NULL;					// 存储最终分割的右眼框的图像
	IplImage* lMinEyeballImg = NULL;				// 存储最终分割的最小的左眼框的图像
	IplImage* rMinEyeballImg = NULL;				// 存储最终分割的最小的右眼框的图像
	int lMinEyeballBlackPixel;						// 存储最终分割的最小的左眼框的白色像素个数
	int rMinEyeballBlackPixel;						// 存储最终分割的最小的右眼框的白色像素个数
	double lMinEyeballBlackPixelRate;				// 存储最终分割的最小的左眼框的黑色像素占的比例
	double rMinEyeballBlackPixelRate;				// 存储最终分割的最小的右眼框的黑色像素占的比例
	double lMinEyeballRectShape;					// 存储最小左眼眶的矩形长宽比值
	double rMinEyeballRectShape;					// 存储最小右眼眶的矩形长宽比值
	double lMinEyeballBeta;							// 存储最小左眼眶的中间1/2区域的黑像素比值
	double rMinEyeballBeta;							// 存储最小右边眼眶的中间1/2区域的黑像素比值
	int lEyeState;									// 左眼睁(0)、闭(1)状态
	int rEyeState;									// 右眼睁(0)、闭(1)状态
	int eyeState;									// 眼睛综合睁(0)、闭(1)状态
	int eyeCloseNum = 0;							// 统计一次检测过程中闭眼的总数
	int eyeCloseDuration = 0;						// 统计一次检测过程中连续检测到闭眼状态的次数
	int maxEyeCloseDuration = 0;					// 一次检测过程中连续检测到闭眼状态的次数的最大值
	int failFaceNum = 0;							// 统计一次检测过程中未检测到人脸的总数
	int failFaceDuration = 0;						// 统计一次检测过程中连续未检测到人脸的次数
	int maxFailFaceDuration = 0;					// 一次检测过程中连续未检测到人脸的次数的最大值
	int fatigueState = 1;							// 驾驶员的驾驶状态:疲劳驾驶(1),正常驾驶(0)

	/********************* 创建显示窗口 **************************/
	cvNamedWindow("img", CV_WINDOW_AUTOSIZE);		// 显示灰度源图像
	cvNamedWindow("分割后的人脸", 1);				// 显示分割出大致眼眶区域的人脸
	cvNamedWindow("大致的左眼区域", 1);				// 显示大致的左眼区域
	cvNamedWindow("大致的右眼区域", 1);				// 显示大致的右眼区域
	cvNamedWindow("l_binary");						// 显示大致右眼区域的二值化图像
	cvNamedWindow("r_binary");						// 显示大致左眼区域的二值化图像
	cvNamedWindow("lEyeImgNoEyebrow", 1);			// 显示去除眉毛区域的左眼图像
	cvNamedWindow("rEyeImgNoEyebrow", 1);			// 显示去除眉毛区域的右眼图像
	cvNamedWindow("lEyeCenter", 1);					// 显示标出虹膜中心的左眼图像
	cvNamedWindow("rEyeCenter", 1);					// 显示标出虹膜中心的右眼图像
	cvNamedWindow("lEyeballImg", 1);				// 根据lEyeImgNoEyebrow大小的1/2区域重新划分的左眼图像
	cvNamedWindow("rEyeballImg", 1);				// 根据rEyeImgNoEyebrow大小的1/2区域重新划分的右眼图像
	cvNamedWindow("lkai", 1);						// 左眼进行开运算之后的图像
	cvNamedWindow("rkai", 1);						// 右眼进行开运算之后的图像
	cvNamedWindow("lMinEyeballImg", 1);				// 缩小至边界区域的左眼虹膜图像
	cvNamedWindow("rMinEyeballImg", 1);				// 缩小至边界区域的右眼眼虹膜图像
	
	
	capture = cvCreateCameraCapture(0);
	if( capture == NULL )
		return -1;

	for( globalK = 1; globalK <= DETECTTIME; globalK ++ ){
		start = clock();
		srcImg = cvQueryFrame(capture);
		img = cvCreateImage(cvGetSize(srcImg), IPL_DEPTH_8U, 1);
		cvCvtColor(srcImg, img, CV_BGR2GRAY);
		if( !img )
			continue;
		cvShowImage("img", img);
		cvWaitKey(20);

	/************************************* 检测人脸 ****************************************/
		cvClearMemStorage(storage);	// 将存储块的 top 置到存储块的头部,既清空存储块中的存储内容
		detectFace(
			img,					// 灰度图像
			objects,				// 输出参数:检测到人脸的矩形框
			storage,				// 存储矩形框的内存区域
			scale_factor,			// 搜索窗口的比例系数
			min_neighbors,			// 构成检测目标的相邻矩形的最小个数
			flags,					// 操作方式
			cvSize(20, 20)			// 检测窗口的最小尺寸
		);

		// 提取人脸区域
		if ( !objectsTemp->total ){
			printf("Failed to detect face!\n");		// 调试代码
			failFaceNum ++;							// 统计未检测到人脸的次数
			failFaceDuration ++;					// 统计连续未检测到人脸的次数

			// 检测过程中判断全是闭眼和检测不到人脸的情况,没有睁开眼的情况,导致maxEyeCloseDuration = 0;
			(eyeCloseDuration > maxEyeCloseDuration) ? maxEyeCloseDuration = eyeCloseDuration : maxEyeCloseDuration;
			eyeCloseDuration = 0;

			if( globalK == DETECTTIME ){
				// 当一次检测过程中,所有的过程都检测不到人脸,则要在此更新 maxFailFaceDuration 
				(failFaceDuration > maxFailFaceDuration) ? maxFailFaceDuration = failFaceDuration : maxFailFaceDuration;

				printf("\nFATIGUETHRESHOLD: %d\n", FATIGUETHRESHOLD);
				printf("eyeCloseNum: %d\tmaxEyeCloseDuration: %d\n", eyeCloseNum, maxEyeCloseDuration);
				printf("failFaceNum: %d\tmaxFailFaceDuration: %d\n", failFaceNum, maxFailFaceDuration);
				
				// 进行疲劳状态的判别
				fatigueState = recoFatigueState(FATIGUETHRESHOLD, eyeCloseNum, maxEyeCloseDuration, failFaceNum, maxFailFaceDuration);
				if( fatigueState == 1 )
					printf("驾驶员处于疲劳驾驶状态\n\n");
				else if( fatigueState == 0 )
					printf("驾驶员处于正常驾驶状态\n\n");

				// 进入下一次检测过程前,将变量清零
				globalK = 0;
				lEyeState = 1;
				rEyeState = 1;
				eyeState = 1;
				eyeCloseNum = 0;
				eyeCloseDuration = 0;
				maxEyeCloseDuration = 0;
				failFaceNum = 0;
				failFaceDuration = 0;
				maxFailFaceDuration = 0;
				fatigueState = 1;

				cvWaitKey(0);
			}

			continue;
		}
		else{
			// 统计连续未检测到人脸的次数中的最大数值
			(failFaceDuration > maxFailFaceDuration) ? maxFailFaceDuration = failFaceDuration : maxFailFaceDuration;
			failFaceDuration = 0;

			// 找到检测到的最大的人脸矩形区域
			temp = 0;
			for(i = 0; i < (objectsTemp ? objectsTemp->total : 0); i ++) {
				CvRect* rect = (CvRect*) cvGetSeqElem(objectsTemp, i);
				if ( (rect->height * rect->width) > temp ){
					largestFaceRect = rect;
					temp = rect->height * rect->width;
				}
			}

			// 根据人脸的先验知识分割出大致的人眼区域
			temp = largestFaceRect->width / 8;
			largestFaceRect->x = largestFaceRect->x + temp;
			largestFaceRect->width = largestFaceRect->width - 3*temp/2;
			largestFaceRect->height = largestFaceRect->height / 2;
			largestFaceRect->y = largestFaceRect->y + largestFaceRect->height / 2;
			largestFaceRect->height = largestFaceRect->height / 2;

			cvSetImageROI(img, *largestFaceRect);		// 设置ROI为检测到的最大的人脸区域
			faceImg = cvCreateImage(cvSize(largestFaceRect->width, largestFaceRect->height), IPL_DEPTH_8U, 1);
			cvCopy(img, faceImg, NULL);
			cvResetImageROI(img);						// 释放ROI
			cvShowImage("分割后的人脸", faceImg);

			eyeRectTemp = *largestFaceRect;
			// 根据人脸的先验知识分割出大致的左眼区域
			largestFaceRect->width /= 2;
			cvSetImageROI(img, *largestFaceRect);		// 设置ROI为检测到的最大的人脸区域
			lEyeImg = cvCreateImage(cvSize(largestFaceRect->width, largestFaceRect->height), IPL_DEPTH_8U, 1);
			cvCopy(img, lEyeImg, NULL);
			cvResetImageROI(img);						// 释放ROI
			cvShowImage("大致的左眼区域", lEyeImg);

			// 根据人脸的先验知识分割出大致的右眼区域
 			eyeRectTemp.x += eyeRectTemp.width / 2;
			eyeRectTemp.width /= 2;
			cvSetImageROI(img, eyeRectTemp);		// 设置ROI为检测到的最大的人脸区域
			rEyeImg = cvCreateImage(cvSize(eyeRectTemp.width, eyeRectTemp.height), IPL_DEPTH_8U, 1);
			cvCopy(img, rEyeImg, NULL);
			cvResetImageROI(img);						// 释放ROI
			cvShowImage("大致的右眼区域", rEyeImg);

		/********************************** 二值化处理 ***********************************/
			// 图像增强:直方图均衡化在detectFace中实现了一次;可尝试非线性点运算
			/*** 二值化左眼大致区域的图像 ***/
			//lineTrans(lEyeImg, lEyeImg, 1.5, 0);		// 线性点运算
			cvSmooth(lEyeImg, lEyeImg, CV_MEDIAN);		// 中值滤波 默认窗口大小为3*3
			nonlineTrans(lEyeImg, lEyeImg, 0.8);		// 非线性点运算
			memset(hist, 0, sizeof(hist));				// 初始化直方图的数组为0
			histogram(lEyeImg, hist);					// 计算图片直方图
			// 计算最佳阈值
			pixelSum = lEyeImg->width * lEyeImg->height;
			threshold = ostuThreshold(hist, pixelSum, 45);
			cvThreshold(lEyeImg, lEyeImg, threshold, 255, CV_THRESH_BINARY);// 对图像二值化
			// 显示二值化后的图像
			cvShowImage("l_binary",lEyeImg);

			/*** 二值化右眼大致区域的图像 ***/
			//lineTrans(rEyeImg, rEyeImg, 1.5, 0);		// 线性点运算
			cvSmooth(rEyeImg, rEyeImg, CV_MEDIAN);		// 中值滤波 默认窗口大小为3*3
			nonlineTrans(rEyeImg, rEyeImg, 0.8);		// 非线性点运算
			memset(hist, 0, sizeof(hist));				// 初始化直方图的数组为0
			histogram(rEyeImg, hist);					// 计算图片直方图
			// 计算最佳阈值
			pixelSum = rEyeImg->width * rEyeImg->height;
			threshold = ostuThreshold(hist, pixelSum, 45);
			cvThreshold(rEyeImg, rEyeImg, threshold, 255, CV_THRESH_BINARY);// 对图像二值化
			// 显示二值化后的图像
			cvShowImage("r_binary",rEyeImg);

		/***************************************** 检测人眼 ********************************************/
			/** 如果有明显的眉毛区域,则分割去除眉毛 **/

			// 分割左眼眉毛
			HEIGHT = lEyeImg->height;
			WIDTH = lEyeImg->width;
			// 分配内存
			horiProject = (int*)malloc(HEIGHT * sizeof(int));
			vertProject = (int*)malloc(WIDTH * sizeof(int));
			if( horiProject == NULL || vertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(horiProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(vertProject + i) = 0;
			histProject(lEyeImg, horiProject, vertProject);				// 计算直方图投影
			lEyeRow = removeEyebrow(horiProject, WIDTH, HEIGHT, 10);	// 计算分割眉毛与眼框的位置

			// 分割右眼眉毛
			HEIGHT = rEyeImg->height;
			WIDTH = rEyeImg->width;
			// 分配内存
			horiProject = (int*)malloc(HEIGHT * sizeof(int));
			vertProject = (int*)malloc(WIDTH * sizeof(int));
			if( horiProject == NULL || vertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(horiProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(vertProject + i) = 0;
			histProject(rEyeImg, horiProject, vertProject);				// 计算直方图投影
			rEyeRow = removeEyebrow(horiProject, WIDTH, HEIGHT, 10);	// 计算分割眉毛与眼框的位置

			// 显示去除眉毛后的人眼大致区域
			eyeRect = cvRect(0, lEyeRow, lEyeImg->width, (lEyeImg->height - lEyeRow));		// 去眉毛的眼眶区域在lEyeImg中的矩形框区域
			cvSetImageROI(lEyeImg, eyeRect);							// 设置ROI为去除眉毛的眼眶,在下面释放ROI
			lEyeImgNoEyebrow = cvCreateImage(cvSize(eyeRect.width, eyeRect.height), IPL_DEPTH_8U, 1);
			cvCopy(lEyeImg, lEyeImgNoEyebrow, NULL);
			cvShowImage("lEyeImgNoEyebrow", lEyeImgNoEyebrow);

			eyeRectTemp = cvRect(0, rEyeRow, rEyeImg->width, (rEyeImg->height - rEyeRow));	// 去眉毛的眼眶区域在rEyeImg中的矩形框区域
			cvSetImageROI(rEyeImg, eyeRectTemp);						// 设置ROI为去除眉毛的眼眶,在下面释放ROI
			rEyeImgNoEyebrow = cvCreateImage(cvSize(eyeRectTemp.width, eyeRectTemp.height), IPL_DEPTH_8U, 1);
			cvCopy(rEyeImg, rEyeImgNoEyebrow, NULL);
			cvShowImage("rEyeImgNoEyebrow", rEyeImgNoEyebrow);

			///////////////// 定位眼睛中心点在去除眉毛图像中的行列位置 ///////////////////
			HEIGHT = lEyeImgNoEyebrow->height;
			WIDTH = lEyeImgNoEyebrow->width;
			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
	
			histProject(lEyeImgNoEyebrow, subhoriProject, subvertProject);	// 重新对分割出的左眼图像进行积分投影
			lEyeRow = getEyePos(subhoriProject, HEIGHT, HEIGHT/5);	// 定位左眼所在的行
			lEyeCol = getEyePos(subvertProject, WIDTH, WIDTH/5);	// 定位左眼所在的列


			HEIGHT = rEyeImgNoEyebrow->height;
			WIDTH = rEyeImgNoEyebrow->width;
			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(rEyeImgNoEyebrow, subhoriProject, subvertProject);	// 重新对分割出的右眼图像进行积分投影
			rEyeRow = getEyePos(subhoriProject, HEIGHT, HEIGHT/5);	// 定位右眼所在的行
			rEyeCol = getEyePos(subvertProject, WIDTH,  WIDTH/5);	// 定位右眼所在的列
			/*
			printf("************ image of eyes without eyebrow ***********\n");
			printf("Left eye: width: %d\theight: %d\n", lEyeImgNoEyebrow->width, lEyeImgNoEyebrow->height);
			printf("Right eye: width: %d\theight: %d\n", rEyeImgNoEyebrow->width, rEyeImgNoEyebrow->height);
			printf("Right eye: WIDTH: %d\tHEIGHT: %d\n", WIDTH, HEIGHT);
			printf("Centers positon of Eyes. lEyeRow: %d lEyeCol: %d\trEyeRow: %d rEyeCol: %d\n\n", lEyeRow, lEyeCol, rEyeRow, rEyeCol);
			*/
			// 标记眼睛的位置
			cvCircle(lEyeImgNoEyebrow, cvPoint(lEyeCol, lEyeRow), 3, CV_RGB(0,0,255), 1, 8, 0);
			cvCircle(rEyeImgNoEyebrow, cvPoint(rEyeCol, rEyeRow), 3, CV_RGB(0,0,255), 1, 8, 0);
			cvShowImage("lEyeCenter", lEyeImgNoEyebrow);
			cvShowImage("rEyeCenter", rEyeImgNoEyebrow);
	

		/********************************** 判断人眼睁闭状态 ***********************************/
	
			////////////////// 分割出以找到的中心为中心的大致眼眶 /////////////////
			// 左眼眶
			HEIGHT = lEyeImgNoEyebrow->height;
			WIDTH = lEyeImgNoEyebrow->width;
			// 计算大致眼眶的区域: eyeRect
			eyeRect = cvRect(0, 0, WIDTH, HEIGHT);
			calEyeSocketRegion(&eyeRect, WIDTH, HEIGHT, lEyeCol, lEyeRow);
			/*
			printf("************lEyeImgNoEyebrow************\n");
			printf("width: %d\theight: %d\n", WIDTH, HEIGHT);
			printf("**********lEyeballRect**********\n");
			printf("eyeRect.x = %d\teyeRect.width = %d\n", eyeRect.x, eyeRectTemp.width);
			printf("eyeRect.y = %d\teyeRect.height = %d\n\n", eyeRectTemp.y, eyeRectTemp.height);
			*/
			cvSetImageROI(lEyeImgNoEyebrow, eyeRect);		// 设置ROI为检测到眼眶区域
			lEyeballImg = cvCreateImage(cvGetSize(lEyeImgNoEyebrow), IPL_DEPTH_8U, 1);
			cvCopy(lEyeImgNoEyebrow, lEyeballImg, NULL);
			cvResetImageROI(lEyeImgNoEyebrow);
			cvShowImage("lEyeballImg", lEyeballImg);

			// 右眼眶
			HEIGHT = rEyeImgNoEyebrow->height;
			WIDTH = rEyeImgNoEyebrow->width;
			// 计算大致眼眶的区域: eyeRectTemp
			eyeRect = cvRect(0, 0, WIDTH, HEIGHT);
			calEyeSocketRegion(&eyeRect, WIDTH, HEIGHT, rEyeCol, rEyeRow);
			/*
			printf("************rEyeImgNoEyebrow************\n");
			printf("width: %d\theight: %d\n", WIDTH, HEIGHT);
			printf("**********rEyeballRect**********\n");
			printf("eyeRect.x = %d\teyeRect.width = %d\n", eyeRect.x, eyeRect.width);
			printf("eyeRect.y = %d\teyeRect.height = %d\n\n", eyeRect.y, eyeRect.height);
			*/
			cvSetImageROI(rEyeImgNoEyebrow, eyeRect);		// 设置ROI为检测到眼眶区域
			rEyeballImg = cvCreateImage(cvGetSize(rEyeImgNoEyebrow), IPL_DEPTH_8U, 1);
			cvCopy(rEyeImgNoEyebrow, rEyeballImg, NULL);
			cvResetImageROI(rEyeImgNoEyebrow);
			cvShowImage("rEyeballImg", rEyeballImg);

			/////////////////////////// 闭运算 ///////////////////////////
			cvErode(lEyeballImg, lEyeballImg, NULL, 2);		//腐蚀图像  
			cvDilate(lEyeballImg, lEyeballImg, NULL, 2);	//膨胀图像
			cvShowImage("lkai", lEyeballImg);

			cvErode(rEyeballImg, rEyeballImg, NULL, 1);		//腐蚀图像  
			cvDilate(rEyeballImg, rEyeballImg, NULL, 1);	//膨胀图像
			cvShowImage("rkai", rEyeballImg);

			/////////////////// 计算最小眼睛的矩形区域 ////////////////////
	
			///////////////////////////左眼
			HEIGHT = lEyeballImg->height;
			WIDTH = lEyeballImg->width;

			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(lEyeballImg, subhoriProject, subvertProject);
			// 计算左眼最小的矩形区域
			eyeRectTemp = cvRect(0, 0 , 1, 1);		// 初始化
			getEyeMinRect(&eyeRectTemp, subhoriProject, subvertProject, WIDTH, HEIGHT, 5, 3);
			/*
			printf("eyeRectTemp.y: %d\n", eyeRectTemp.y);
			printf("eyeRectTemp.height: %d\n", eyeRectTemp.height);
			printf("eyeRectTemp.x: %d\n", eyeRectTemp.x);
			printf("eyeRectTemp.width: %d\n", eyeRectTemp.width);
			*/
			// 计算最小左眼矩形的长宽比,  判断眼睛状态时用的到
			lMinEyeballRectShape = (double)eyeRectTemp.width / (double)eyeRectTemp.height;
			//printf("\nlMinEyeballRectShape: %f\n", lMinEyeballRectShape);

			cvSetImageROI(lEyeballImg, eyeRectTemp);		// 设置ROI为检测到最小面积的眼眶
			lMinEyeballImg = cvCreateImage(cvGetSize(lEyeballImg), IPL_DEPTH_8U, 1);
			cvCopy(lEyeballImg, lMinEyeballImg, NULL);
			cvResetImageROI(lEyeballImg);
			cvShowImage("lMinEyeballImg", lMinEyeballImg);

			////////////////////////  统计左眼黑像素个数  /////////////////////
			HEIGHT = lMinEyeballImg->height;
			WIDTH = lMinEyeballImg->width;

			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;

			histProject(lMinEyeballImg, subhoriProject, subvertProject);

			// 统计lEyeballImg中黑色像素的个数
			temp = 0;	// 白像素个数
			for( i = 0; i < WIDTH; i ++ )
				temp += *(subvertProject + i);
			temp /= 255;
			lMinEyeballBlackPixel = WIDTH * HEIGHT - temp;
			lMinEyeballBlackPixelRate = (double)lMinEyeballBlackPixel / (double)(WIDTH * HEIGHT);
			//printf("WIDTH * HEIGHT: %d\tlMinEyeballBlackSum;%d\n\n", WIDTH * HEIGHT, lMinEyeballBlackPixel);
			//printf("lMinEyeballBlackPixelRate;%f\n\n", lMinEyeballBlackPixelRate);

			// 统计lMinEyeballImg中的1/2区域内黑像素的比例
			lMinEyeballBeta = 0;
			lMinEyeballBeta = calMiddleAreaBlackPixRate(subvertProject, &eyeRectTemp, WIDTH, HEIGHT, lEyeCol, lMinEyeballBlackPixel);

			//printf("lMinEyeballBeta; %f\n\n", lMinEyeballBeta);



			////////////////////////////////////右眼
			HEIGHT = rEyeballImg->height;
			WIDTH = rEyeballImg->width;
			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(rEyeballImg, subhoriProject, subvertProject);

			// 计算右眼最小的矩形区域
			eyeRectTemp = cvRect(0, 0 , 1, 1);
			getEyeMinRect(&eyeRectTemp, subhoriProject, subvertProject, WIDTH, HEIGHT, 5, 3);

			// 计算最小右眼矩形的长宽比,判断眼睛状态时用的到
			rMinEyeballRectShape = (double)eyeRectTemp.width / (double)eyeRectTemp.height;
			//printf("\nrMinEyeballRectShape: %f\n", rMinEyeballRectShape);

			cvSetImageROI(rEyeballImg, eyeRectTemp);		// 设置ROI为检测到最小面积的眼眶
			rMinEyeballImg = cvCreateImage(cvGetSize(rEyeballImg), IPL_DEPTH_8U, 1);
			cvCopy(rEyeballImg, rMinEyeballImg, NULL);
			cvResetImageROI(rEyeballImg);
			cvShowImage("rMinEyeballImg", rMinEyeballImg);

			////////////////////////  统计右眼黑像素个数  /////////////////////
			HEIGHT = rMinEyeballImg->height;
			WIDTH = rMinEyeballImg->width;

			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(rMinEyeballImg, subhoriProject, subvertProject);// 计算直方图积分投影

			// 统计lEyeballImg中黑色像素的个数
			temp = 0;
			for( i = 0; i < WIDTH; i ++ )
				temp += *(subvertProject + i);
			temp /= 255;
			rMinEyeballBlackPixel = WIDTH * HEIGHT - temp;
			rMinEyeballBlackPixelRate = (double)rMinEyeballBlackPixel / (double)(WIDTH * HEIGHT);
			//printf("WIDTH * HEIGHT: %d\trMinEyeballBlackSum;%d\n\n", WIDTH * HEIGHT, rMinEyeballBlackPixel);
			//printf("rMinEyeballBlackPixelRate; %f\n\n", rMinEyeballBlackPixelRate);

			// 统计lMinEyeballImg中的1/2区域内黑像素的比例
			rMinEyeballBeta = 0;
			rMinEyeballBeta = calMiddleAreaBlackPixRate(subvertProject, &eyeRectTemp, WIDTH, HEIGHT, rEyeCol, rMinEyeballBlackPixel);

			//printf("temp:%d\trMinEyeballBeta; %f\n\n", temp, rMinEyeballBeta);

			// 判断眼睛睁闭情况
			lEyeState = 1;		// 左眼状态,默认闭眼
			rEyeState = 1;		// 右眼状态,默认闭眼
			eyeState = 1;		// 眼睛综合状态,默认闭眼
			if( lMinEyeballBlackPixel > 50)
				lEyeState = getEyeState(lMinEyeballRectShape, lMinEyeballBlackPixelRate, lMinEyeballBeta);
			else
				lEyeState = 1;

			if( rMinEyeballBlackPixel > 50)
				rEyeState = getEyeState(rMinEyeballRectShape, rMinEyeballBlackPixelRate, rMinEyeballBeta);
			else
				rEyeState = 1;
			(lEyeState + rEyeState) == 2 ? eyeState = 1 : eyeState=0;

			// 统计眼睛闭合的次数
			if( eyeState == 1 ){
				eyeCloseNum ++;					// 统计 eyeCloseNum 眼睛闭合次数
				eyeCloseDuration ++;
				if( globalK == DETECTTIME){
					// 检测过程中判断全是闭眼情况,没有睁眼和检测不到人脸的情况
					(eyeCloseDuration > maxEyeCloseDuration) ? maxEyeCloseDuration = eyeCloseDuration : maxEyeCloseDuration;
					eyeCloseDuration = 0;
				}
			}
			else{
				(eyeCloseDuration > maxEyeCloseDuration) ? maxEyeCloseDuration = eyeCloseDuration : maxEyeCloseDuration;
				eyeCloseDuration = 0;
			}
		} // 承接判断是否检测到人脸的if语句

	/*	
		printf("\n************** 眼睛状态 ***************\n");
		printf("lEyeState: %d\trEyeState: %d\n", lEyeState, rEyeState);
		printf("eyeState: %d\n\n\n\n", eyeState);
	*/

		// 计时:执行一次循环的时间
		stop = clock();
		//printf("run time: %f\n", (double)(stop - start) / CLOCKS_PER_SEC);

		printf("eyeState: %d\n", eyeState);

		// 调整循环变量,进入下一次检测过程
		if( globalK == DETECTTIME ){
			printf("\nFATIGUETHRESHOLD*****: %d\n", FATIGUETHRESHOLD);
			printf("eyeCloseNum: %d\tmaxEyeCloseDuration: %d\n", eyeCloseNum, maxEyeCloseDuration);
			printf("failFaceNum: %d\tmaxFailFaceDuration: %d\n", failFaceNum, maxFailFaceDuration);

			// 进行疲劳状态的判别
			fatigueState = recoFatigueState(FATIGUETHRESHOLD, eyeCloseNum, maxEyeCloseDuration, failFaceNum, maxFailFaceDuration);
			if( fatigueState == 1 )
				printf("驾驶员处于疲劳驾驶状态\n\n");
			else if( fatigueState == 0 )
				printf("驾驶员处于正常驾驶状态\n\n");

			// 进入下一次检测过程前,将变量清零
			globalK = 0;
			lEyeState = 1;
			rEyeState = 1;
			eyeState = 1;
			eyeCloseNum = 0;
			eyeCloseDuration = 0;
			maxEyeCloseDuration = 0;
			failFaceNum = 0;
			failFaceDuration = 0;
			maxFailFaceDuration = 0;
			fatigueState = 1;
			char c = cvWaitKey(0);
			if( c == 27 )
				break;
			else
				continue;
		}
	} // 承接检测过程的 for 循环

	// 释放内存
	cvDestroyWindow("分割后的人脸");
	cvDestroyWindow("大致的左眼区域");
	cvDestroyWindow("大致的右眼区域");
	cvDestroyWindow("l_binary");
	cvDestroyWindow("r_binary");
	cvDestroyWindow("lEyeImgNoEyebrow");
	cvDestroyWindow("rEyeImgNoEyebrow");
	cvDestroyWindow("lEyeCenter");
	cvDestroyWindow("rEyeCenter");	
	cvDestroyWindow("lEyeballImg");
	cvDestroyWindow("rEyeballImg");
	cvDestroyWindow("lkai");
	cvDestroyWindow("rkai");
	cvDestroyWindow("lMinEyeballImg");
	cvDestroyWindow("rMinEyeballImg");
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&eyeImg);
	free(horiProject);
	free(vertProject);
	free(subhoriProject);
	free(subvertProject);

	return 0;
}