Exemple #1
0
void main()
{
	// capture image
	IplImage* image1 = cvLoadImage(IMAGE_FILE_NAME_1);
	IplImage* image2 = cvLoadImage(IMAGE_FILE_NAME_2);
	IplImage* grayImage1 = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 1);
	IplImage* grayImage2 = cvCreateImage(cvGetSize(image2), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(image1->width*2, image1->height), IPL_DEPTH_8U, 3);

	cvCvtColor(image1, grayImage1, CV_BGR2GRAY);
	cvCvtColor(image2, grayImage2, CV_BGR2GRAY);

	windage::Algorithms::FeatureDetector* detector = new windage::Algorithms::SIFTGPUdetector();
	windage::Algorithms::SearchTree* tree = new windage::Algorithms::FLANNtree();
	windage::Algorithms::HomographyEstimator* estimator = new windage::Algorithms::RANSACestimator();
	windage::Algorithms::OutlierChecker* checker = new windage::Algorithms::OutlierChecker();
	
	std::vector<windage::FeaturePoint>* feature = NULL;
	std::vector<windage::FeaturePoint> feature1;
	std::vector<windage::FeaturePoint> feature2;
	std::vector<windage::FeaturePoint> matching1;
	std::vector<windage::FeaturePoint> matching2;

	tree->SetRatio(0.3);
	estimator->AttatchReferencePoint(&matching1);
	estimator->AttatchScenePoint(&matching2);
	estimator->SetReprojectionError(REPROJECTION_ERRPR);

	checker->AttatchEstimator(estimator);
	checker->SetReprojectionError(REPROJECTION_ERRPR);

	cvNamedWindow("result");


	bool processing = true;
	while(processing)
	{
		feature1.clear();
		feature2.clear();
		matching1.clear();
		matching2.clear();
		
		detector->DoExtractKeypointsDescriptor(grayImage1);
		feature = detector->GetKeypoints();
		for(unsigned int i=0; i<feature->size(); i++)
		{
			feature1.push_back((*feature)[i]);
		}

		detector->DoExtractKeypointsDescriptor(grayImage2);
		feature = detector->GetKeypoints();
		for(unsigned int i=0; i<feature->size(); i++)
		{
			feature2.push_back((*feature)[i]);
		}

		Matching(tree, &feature1, &feature2, &matching1, &matching2);
		estimator->Calculate();
		checker->Calculate();

		cvSetImageROI(resultImage, cvRect(0, 0, image1->width, image1->height));
		cvCopyImage(image1, resultImage);
		cvSetImageROI(resultImage, cvRect(image1->width, 0, image1->width, image1->height));
		cvCopyImage(image2, resultImage);
		cvResetImageROI(resultImage);

		int count = (int)matching1.size();
		for(int i=0; i<count; i++)
		{
			double R = (count - i)/(double)count * 255.0;
			double G = (i+1)/(double)count * 255.0;
			if(matching1[i].IsOutlier() == false)
				cvLine(resultImage, cvPoint((int)matching1[i].GetPoint().x, (int)matching1[i].GetPoint().y), cvPoint(image1->width + (int)matching2[i].GetPoint().x, (int)matching2[i].GetPoint().y), CV_RGB(0, 255, 0));
			else
				cvLine(resultImage, cvPoint((int)matching1[i].GetPoint().x, (int)matching1[i].GetPoint().y), cvPoint(image1->width + (int)matching2[i].GetPoint().x, (int)matching2[i].GetPoint().y), CV_RGB(255, 0, 0));

		}
		
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case 's':
		case 'S':
			cvSaveImage("FeaturePairMatching.png", resultImage);
			break;
		case 'q':
		case 'Q':
		case 27:
			processing = false;
			break;
		}
	}

	cvDestroyAllWindows();
}
void recognize(IplImage* original_images, IplImage* processed_original_images, IplImage** templates)
{
	double matching_result = 0;
	double matching_max = 0;
	double matching_first_max = 0;
	int template_flag = -1;

	IplImage* drawed_original_images = cvCloneImage(original_images);
	//extract the sub images first and do the matching image by image.
	sub_image* sub_images = extract_sub_image(processed_original_images,MIN_SUB_IMAGE_WIDTH,MIN_SUB_IMAGE_HEIGHT);

	int ori_flag = 0;	
	while(sub_images != NULL)
	{
		//different original sub image to be matched.
		printf("ori_flag  = %d\n",ori_flag++);
		for(int i = 0; i < TEMPLATES_NUM; i++)
		{	
			//match the given image with the provided templates.
			
			//extract the template as well to cut the black edges which might leads negative impacts to the matching.
			sub_image* temp_template = extract_sub_image(templates[i],MIN_SUB_IMAGE_WIDTH,MIN_SUB_IMAGE_HEIGHT);

			//match the original sub image with template.
			matching_result = template_original_match(sub_images->image, temp_template->image);
			
			printf("with template %d, result = %f\n",i,matching_result);
			
			//find the maximum
			if(i == 0)
			{
				matching_first_max = matching_result;
				matching_max = matching_result;
			}
			else
			{
				if(matching_result > matching_max)
				{					
					matching_max = matching_result ;
					template_flag = i;
				}
			}
		}
		
		if(matching_first_max == matching_max)
		{
			template_flag = 0;
		}

		//if the object not found or the match result is not reasonable, do nothing.
		if(template_flag == -1 || matching_max < MATCHING_FALIED_THRESHOLD)
		{
			printf("image not matched\n");	
		}
		else
		{
			//draw the matched template on the original image.
			IplImage* drawed_templates_resized = cvCreateImage(cvSize(30,30),processed_original_images->depth,processed_original_images-> nChannels);
			sub_image* min_temp_template = extract_sub_image(templates[template_flag],MIN_SUB_IMAGE_WIDTH,MIN_SUB_IMAGE_HEIGHT);
			cvResize(min_temp_template->image,drawed_templates_resized);
			
			printf("draw left = %d, top = %d\n",sub_images->image_left,sub_images->image_top);
			//draw the template.
			cvSetImageROI(drawed_original_images, cvRect(sub_images->image_left, sub_images->image_top, drawed_templates_resized->width, drawed_templates_resized->height));  
			cvCopy(drawed_templates_resized, drawed_original_images);  
			cvResetImageROI(drawed_original_images); 
		}
		sub_images = sub_images->next_image;
	}
	
	cvShowImage("result",drawed_original_images);
}
Exemple #3
0
std::string basicOCR::readText(std::string filesrc)
{

	IplImage* imgSrc = cvLoadImage(filesrc.data(),CV_LOAD_IMAGE_COLOR);
	IplImage* img_gray = cvCreateImage(cvGetSize(imgSrc), IPL_DEPTH_8U, 1);
	IplImage* img_check = cvCreateImage(cvGetSize(imgSrc), IPL_DEPTH_8U, 1);
	//cvSmooth(img_gray,img_gray,CV_GAUSSIAN,5,5);
	//cvCopyImage(imgSrc,img_gray);
	cvCvtColor(imgSrc, img_gray, CV_BGR2GRAY);
	cvCvtColor(imgSrc, img_check, CV_BGR2GRAY);
	cvThreshold(img_check, img_check,160, 255,CV_THRESH_BINARY);
	cvThreshold(img_gray, img_gray,160, 255,CV_THRESH_BINARY_INV);// CV_THRESH_BINARY_INV使得背景为黑色,字符为白色,这样找到的最外层才是字符的最外层
	//cvShowImage("ThresholdImg",img_gray);
	CvSeq* contours = NULL;
	CvMemStorage* storage = cvCreateMemStorage(0); 
	int count = cvFindContours(img_gray, storage, &contours,sizeof(CvContour),CV_RETR_EXTERNAL);
	int idx = 0;
	char szName[56] = {0};
	int tempCount=0;
	LISTRECT allrect;
	LISTRECT line;
	double countH = 0;
	double countW = 0;
	//取出所有字符边界,根据X轴排序
	int buu = 0;
	std::string output = "";
	for (CvSeq* c = contours; c != NULL; c = c->h_next)
	{
		bool isInster = false;
		CvRect rc =cvBoundingRect(c,0);
		countH += rc.height;
		countW += rc.width;
		for (ILR i = allrect.begin();i!= allrect.end();++i)
		{

			if(rc.x < i->x)
			{
				allrect.insert(i,rc);
				isInster= true;
				break;
			}
		}
		if (isInster == false)
		{
			allrect.push_back(rc);
		}
	}

	double avgh = countH/allrect.size();
	double avgw = countW/allrect.size();
	for (line.clear();allrect.size() != 0;line.clear())
	{
		//find the highest char
		ILR i = allrect.begin();
		int miny = i->y;
		for (++i;i != allrect.end(); ++i)
		{
			if (miny > i->y)
			{
				miny = i->y;
			}
		}
		//find first char of line
		for (i = allrect.begin();i->y > (miny+avgh)*1.2 ;++i);
		//cvDrawRect(imgSrc, cvPoint(i->x, i->y), cvPoint(i->x + i->width, i->y + i->height), CV_RGB(255, 0, 0));
		double countY = i->y + avgh;
		int lastXb = i->x;
		int lastXe = i->x+i->width;
		int lastY = i->y + i->height;
		int countX = 0;
		int countSpace = 0;
		//put first char to line list
		line.push_back(*i);
		i = allrect.erase(i);

		for (;i != allrect.end();)
		{
			//find next char
			if(i->y < lastY || i->y < (countY))
			{
				//cvDrawRect(imgSrc, cvPoint(i->x, i->y), cvPoint(i->x + i->width, i->y + i->height), CV_RGB(255, 0, 0));
				countX += i->x - lastXb;
				countSpace += i->x - lastXe;
				//countY += i->y + i->height;
				lastY = i->y + i->height;
				lastXb = i->x;
				lastXe = i->x+i->width;
				line.push_back(*i);
				i = allrect.erase(i);

			}
			else
			{
				++i;
			}
		}

		for (ILR li = line.begin();li != line.end();)
		{
			ILR lasti = li;
			li++;
			if (li == line.end())
			{
				break;
			}
			//cvDrawRect(imgSrc, cvPoint(li->x, li->y), cvPoint(li->x + li->width, li->y + li->height), CV_RGB(255, 0, 0));

			if (((li->height < avgh/2) || (lasti->height < avgh/2))
				&& ((li->x - lasti->x) < (countX/(line.size()-1)/2) 
				|| (li->x+li->width > lasti->x && li->x+li->width < (lasti->x+lasti->width)) 
				|| (li->x > lasti->x && li->x < (lasti->x+lasti->width))))
			{				
				int x = std::min(li->x,lasti->x);
				int y = std::min(li->y, lasti->y);
				int height = (std::max(li->y+li->height, lasti->y+lasti->height) - y);
				int width = (std::max(li->x+li->width, lasti->x+lasti->width) - x);
				CvRect add = {x,y,width,height};
				*lasti = add;
				li = line.erase(li);
				li--;
				//line.insert(li,add);
			}
		}

		for (ILR ci  = line.begin();ci != line.end();ci++)
		{
			int rate = ((double)ci->width/(double)ci->height) /((double)avgw/(double)avgh*2);
			rate++;
			if (rate > 1)
			{
				int x = ci->x;
				int y = ci->y;
				int h = ci->height;
				int w = ci->width;
				ci = line.erase(ci);
				for(int a = rate;a > 0 ;a--)
				{
					CvRect add = {x+w/rate*(rate-a),y,w/rate,h};
					if (ci == line.end())
					{
						line.push_back(add);
						ci = line.end();
					}
					else
					{
						line.insert(ci,add);
					}
				}
				ci--;
			}
		}
		int c = 0;
		i = line.begin();
		IplImage* imgNo = cvCreateImage(cvSize(i->width, i->height), IPL_DEPTH_8U, 1); 
		cvSetImageROI(img_check, *i);
		cvCopyImage(img_check, imgNo);
		cvResetImageROI(img_check); 
		char temp;
		temp = classify(imgNo,0);
		printf("%c",temp);
		c = c*10 + classify(imgNo,0);
		output += temp;
		//cvDrawRect(imgSrc, cvPoint(i->x, i->y), cvPoint(i->x + i->width, i->y + i->height), CV_RGB(255, 0, 0));
		int lastX = i->x+i->width;
		lastY = i->y;

		for (i++;i != line.end(); ++i)
		{
			buu++;
			if (i->x - lastX > (countSpace / (line.size() - 1)))
			{
							/*
				//cvDrawRect(imgSrc, cvPoint(lastX, lastY), cvPoint(lastX + avgw, lastY + avgh), CV_RGB(255, 0, 0));
				CvRect space = {lastX, lastY, avgw, avgh};
				imgNo = cvCreateImage(cvSize(avgw, avgh), IPL_DEPTH_8U, 1); 
				cvSetImageROI(img_check, space);
				cvCopyImage(img_check, imgNo);
				cvResetImageROI(img_check);
				temp = classify(imgNo,0);
				c = c*10 + classify(imgNo,0);
				output += temp;
				imgNo = cvCreateImage(cvSize(i->width, i->height), IPL_DEPTH_8U, 1); 
				cvSetImageROI(img_check, *i);
				cvCopyImage(img_check, imgNo);
				cvResetImageROI(img_check); 
				temp = classify(imgNo,0);
				*/
				printf(" ",temp);
			}
			lastX = i->x+i->width;
			lastY = i->y; 
			imgNo = cvCreateImage(cvSize(i->width, i->height), IPL_DEPTH_8U, 1); 
			cvSetImageROI(img_check, *i);
			cvCopyImage(img_check, imgNo);
			cvResetImageROI(img_check); 
			temp = classify(imgNo,0);
			printf("%c",temp);
			c = c*10 + classify(imgNo,0);
			output += temp;
			//char szName[56] = {0};
			//sprintf(szName, "%d", idx++); 
			//cvNamedWindow(szName); 
			//cvShowImage(szName, imgNo); 
			//cvDrawRect(imgSrc, cvPoint(i->x, i->y), cvPoint(i->x + i->width, i->y + i->height), CV_RGB(255, 0, 0));
		}
		output += "\n";
		printf("\n");
		//printf("%d\n",c);
	}
	printf("轮廓个数:%d",++buu);
	/*
	cvNamedWindow("src"); 
	cvShowImage("src", imgSrc);
	cvWaitKey(0); 
	cvReleaseMemStorage(&storage); 
	cvReleaseImage(&imgSrc); 
	cvReleaseImage(&img_gray); 
	cvDestroyAllWindows(); 
	*/
	return output;                    
}
//추후 수정
void FkPaperKeyboard_TypeA::cornerVerification(IplImage* srcImage){
	CvSize size = cvGetSize(srcImage);
	IplImage* eigImage = cvCreateImage(size, IPL_DEPTH_8U,1);
	IplImage* tempImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* grayImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* veriImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* dstImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask2 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvRect rect = cvRect(10, 10, 640 - 20, 480 - 20);

	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
	CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
	CvMat* result = cvCreateMat(3, 1, CV_32FC1);
	CvMat* dst = cvCreateMat(3, 1,CV_32FC1);

	int keyButtonCornerCount = 316;
	
	cvCvtColor(srcImage, grayImage, CV_BGR2GRAY);
	cvSetImageROI(grayImage, rect);
	cvSetImageROI(mask, rect);
	cvSetImageROI(dstImage, rect);
	cvSetImageROI(mask2, rect);

	// 150~255사이의 값만 추출해서 마스크에 저장
	cvInRangeS(grayImage, cvScalar(100, 100, 100), cvScalar(255, 255, 255), mask);
	cvCopy(mask, mask2);

	//cvShowImage("mask", mask);
	//cvShowImage("mask2", mask2);

	// 20,20? 150 미만의 값을 제외하기 위해 0인 값(mask)과 추출한 값(mask2)을 XOR 연산 한다.
	cvFloodFill(mask, cvPoint(10, 10), cvScalar(0, 0, 0));
	cvXor(mask2, mask, dstImage);
	
	//cvShowImage("mask3", mask);
	//cvShowImage("mask4", mask2);
	//cvShowImage("dstImage", dstImage);

	// 최종 연산된 이미지에서 코너 추출(각 키패드의 코너)
	cvGoodFeaturesToTrack(dstImage, eigImage, tempImage, keyButtonCorner, &keyButtonCornerCount, 0.01, 7, NULL, 7, 0);
	cvFindCornerSubPix (dstImage, keyButtonCorner, keyButtonCornerCount,cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	
	cvResetImageROI(dstImage);
	for(int i =0 ; i < 316 ; i++){
		keyButtonCorner[i].x += rect.x;
		keyButtonCorner[i].y += rect.y;
	}
	
	initKeyButtonCorner();
	
	srcQuad[CLOCKWISE_1].x = keyButtonCorner[315].x+10;
	srcQuad[CLOCKWISE_1].y = keyButtonCorner[315].y-10;
	srcQuad[CLOCKWISE_5].x = keyButtonCorner[31].x + 10;
	srcQuad[CLOCKWISE_5].y = keyButtonCorner[31].y + 10;
	srcQuad[CLOCKWISE_7].x = keyButtonCorner[0].x - 10;
	srcQuad[CLOCKWISE_7].y = keyButtonCorner[0].y + 10;
	srcQuad[CLOCKWISE_11].x = keyButtonCorner[290].x - 10;
	srcQuad[CLOCKWISE_11].y = keyButtonCorner[290].y - 10;
	dstQuad[CLOCKWISE_1].x = 640;
	dstQuad[CLOCKWISE_1].y = 0;
	dstQuad[CLOCKWISE_5].x = 640;
	dstQuad[CLOCKWISE_5].y = 480;
	dstQuad[CLOCKWISE_7].x = 0;
	dstQuad[CLOCKWISE_7].y = 480;
	dstQuad[CLOCKWISE_11].x = 0;
	dstQuad[CLOCKWISE_11].y = 0;
	cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
	
	cvWarpPerspective(dstImage, veriImage, warp_matrix);
	detectKeyButtonCorner(veriImage);
	cvInvert(warp_matrix, warp_matrix_invert);
	for(int i = 0 ; i < 316 ; i++){	
		cvmSet(dst, 0, 0, keyButtonCorner[i].x);  
		cvmSet(dst, 1, 0, keyButtonCorner[i].y);
		cvmSet(dst, 2, 0, 1);

		cvMatMul(warp_matrix_invert, dst, result);
		float t = cvmGet(result, 2,0);
		keyButtonCorner[i].x = cvmGet(result, 0,0)/t ;
		keyButtonCorner[i].y = cvmGet(result, 1,0)/t ;
	}
	cvResetImageROI(srcImage);
	cvResetImageROI(mask);
	cvReleaseImage(&eigImage);
	cvReleaseImage(&tempImage);
	cvReleaseImage(&grayImage);
	cvReleaseImage(&veriImage);
	cvReleaseImage(&dstImage);
	cvReleaseImage(&mask);
	cvReleaseImage(&mask2);
	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&warp_matrix_invert);
	cvReleaseMat(&result);
	cvReleaseMat(&dst);	
}
Exemple #5
0
// the function draws all the squares in the image and crop and save images
void drawSquaresAndCrop(char *pFileName, IplImage* imgFilter, IplImage* img, CvSeq* squares )
{
    CvSeqReader reader;
    IplImage* cpy = cvCloneImage( imgFilter );
	IplImage* cpyc = cvCloneImage( imgFilter );
    int i;
	char sFileNameCroped[255];
    
    // initialize reader of the sequence
    cvStartReadSeq( squares, &reader, 0 );
    
    // read 4 sequence elements at a time (all vertices of a square)
    for(int iCnt=0, i = 0; i < squares->total; i += 4,iCnt++ )
    {
        CvPoint pt[4], *rect = pt;
        int count = 4;
        
        // read 4 vertices
        CV_READ_SEQ_ELEM( pt[0], reader );
        CV_READ_SEQ_ELEM( pt[1], reader );
        CV_READ_SEQ_ELEM( pt[2], reader );
        CV_READ_SEQ_ELEM( pt[3], reader );
        
        // draw the square as a closed polyline 
        cvPolyLine( cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 3, CV_AA, 0 );

		// Get Area to crop
		CvRect rc = GetRect(pt);
		// Filter the area full image
		if (abs(rc.width-img->width)>POINTS_NEAR ||
			abs(rc.height-img->height)>POINTS_NEAR){

			// Draw area
			CvPoint pt1, pt2;
			pt1.x = rc.x;
			pt1.y = rc.y;
			pt2.x = pt1.x+rc.width;
			pt2.y = pt1.y+rc.height;
			cvRectangle(cpy, pt1, pt2, CV_RGB(0,0,255),2);

			
			// sets the Region of Interest 
			// Note that the rectangle area has to be __INSIDE__ the image 
			cvSetImageROI(cpyc, rc);
			// create destination image 
			// Note that cvGetSize will return the width and the height of ROI 
			IplImage *img1 = cvCreateImage(cvGetSize(cpyc), 
										   cpyc->depth, 
										   cpyc->nChannels);
			 
			// copy subimage 
			cvCopy(cpyc, img1, NULL);		 
			// save file
			char stype[32];
			char sFile[255];
			strcpy(sFile, pFileName);
			strcpy(stype, &(pFileName[strlen(pFileName)-3]));
			sFile[strlen(pFileName)-4]=NULL;
			
			sprintf(sFileNameCroped, "%s_%d.%s", sFile,iCnt,stype);
			cvSaveImage(sFileNameCroped, img1);

			// always reset the Region of Interest
			cvResetImageROI(img1);    
		}
	}
    
    // show the resultant image
    cvShowImage( wndname, cpy );
    cvReleaseImage( &cpy );
	cvReleaseImage( &cpyc );
}
void WindscreenLocator::locateBottomTop()
{
    cvSetImageROI(imgGradH, cvRect(winLeft, 0, winRight - winLeft, imgGradH->height));
    histoStat(imgGradH, NULL, horizon2Y, thresholdGrad, 0);
    cvResetImageROI(imgGradH);

    histoSmooth(horizon2Y, imgGrad->height, 5);
    histoSmooth(horizon2Y, imgGrad->height, 10);
    histoSmooth(horizon2Y, imgGrad->height, 10);

    int yCrest[20];
    int yCrestNr = 20;
    histoCrest(horizon2Y, imgGrad->height, yCrest, yCrestNr);

    int crest = 0;
    bool usePrediction = false;
    winTop = 0;
    winBottom = imgRGB->height;

//	if(plate->isValid() && reviser != NULL){
    if(plate->isValid()
            && (plate->getMiddlePoint().x > winLeft
                    && plate->getMiddlePoint().x < winRight)){
        int plateWidth = plate->getPlateWidth();
        if(reviser && reviser->getPlateWidth() > 0)
            plateWidth = reviser->getPlateWidth();
        CvPoint mid = plate->getMiddlePoint();
        if(winRight - winLeft > 4.5 * plateWidth)
            winBottom = mid.y - 80 / 35.0 * plateWidth;
        else
            winBottom = mid.y - 60.0/35.0 * plateWidth;

//		crest = findLikelytCrest(horizon2Y, imgGrad->height, winBottom, winBottom - plateWidth * 0.3, winBottom + plateWidth * 0.3);
        pdebug("winBottom=%d, crest=%d\n", winBottom, crest);
        if(crest > 0 && crest < imgRGB->height)
            winBottom = crest;
        if(reviser)
            reviser->statLowerBorder(plate, winLeft, winRight, winBottom);
    }else if(reviser != NULL){
        int plateWidth = reviser->getPlateWidth();
        usePrediction = true;
        pdebug("License not Found, Use Prediction.\n");
        winBottom = reviser->predictLowerBorder(plate, winLeft, winRight);
//		crest = findLikelytCrest(horizon2Y, imgGrad->height, winBottom, winBottom - plateWidth * 0.3, winBottom + plateWidth * 0.3);
        crest = reviser->predictLowerBorder(plate, winLeft, winRight);	//
        if(crest == 0){
            crest = imgRGB->height - 1;
            winTop = imgRGB->height * 0.25;
            winBottom = imgRGB->height * 0.9;

            winBottom = std::max(0, winBottom);
            winBottom = std::min(imgRGB->height - 1, winBottom);

            winTop = std::min(winTop, winBottom - 1);
            winTop = std::max(0, winTop);
            return;
        }
        if(crest <= winBottom)
            crest += 0.5 * plateWidth;	// 预测的放宽范围
        if(crest > 0 && crest < imgRGB->height)
            winBottom = crest;
    }

    if(!usePrediction){
        if(plate->isValid())
            winTop = winBottom - (winRight - winLeft) * 0.65;
        else
            winTop = 0;
    }else{
//		winTop = winBottom - (winRight - winLeft) * 0.60;	// 预测的放宽范围
        winTop = winBottom - (winRight - winLeft) * 0.75;	// 预测的放宽范围

    }

    winBottom = std::max(0, winBottom);
    winBottom = std::min(imgRGB->height - 1, winBottom);

    winTop = std::min(winTop, winBottom - 1);
    winTop = std::max(0, winTop);
}
Exemple #7
0
int main()
{
/*********************************** 主程序用到的参数 ***********************************/ 
	IplImage * srcImg = NULL;						// 存放从摄像头读取的每一帧彩色源图像
	IplImage * img = NULL;							// 存放从摄像头读取的每一帧灰度源图像
	CvCapture * capture;							// 指向CvCapture结构的指针
	CvMemStorage* storage = cvCreateMemStorage(0);	// 存放矩形框序列的内存空间
	CvSeq* objects = NULL;							// 存放检测到人脸的平均矩形框
	double scale_factor = 1.2;						// 搜索窗口的比例系数
	int min_neighbors = 3;							// 构成检测目标的相邻矩形的最小个数
	int flags = 0;									// 操作方式
	CvSize min_size = cvSize(40, 40);				// 检测窗口的最小尺寸
	int i, globalK;								
	int hist[256];									// 存放直方图的数组
	int pixelSum;
	int threshold;									// 存储二值化最优阈值
	clock_t start, stop;							// 计时参数
	IplImage* faceImg = NULL;						// 存储检测出的人脸图像
	int temp = 0;									// 临时用到的变量
	int temp1 = 0;									// 临时用到的变量
	int count = 0;									// 计数用的变量
	int flag = 0;									// 标记变量
	int * tempPtr = NULL;							// 临时指针
	CvRect* largestFaceRect;						// 存储检测到的最大的人脸矩形框
	int * horiProject = NULL;						// 水平方向的投影结果(数组指针)
	int * vertProject = NULL;						// 垂直方向的投影结果(数组指针)
	int * subhoriProject = NULL;					// 水平方向的投影结果(数组指针)
	int * subvertProject = NULL;					// 垂直方向的投影结果(数组指针)
	int WIDTH;										// 图像的宽度
	int HEIGHT;										// 图像的高度
	int rEyeCol = 0;								// 右眼所在的列数
	int lEyeCol = 0;								// 左眼所在的列数
	int lEyeRow = 0;								// 左眼所在的行数
	int rEyeRow = 0;								// 右眼所在的行数
	int eyeBrowThreshold;							// 区分眉毛与眼睛之间的阈值
	uchar* rowPtr = NULL;							// 指向图片每行的指针
	uchar* rowPtrTemp = NULL;						// 指向图片每行的指针, 中间变量
	IplImage* eyeImg = NULL;						// 存储眼睛的图像
	CvRect eyeRect;									// 存储裁剪后的人眼的矩形区域
	CvRect eyeRectTemp;								// 临时矩形区域
	IplImage* lEyeImg = NULL;						// 存储左眼的图像
	IplImage* rEyeImg = NULL;						// 存储右眼的图像
	IplImage* lEyeImgNoEyebrow = NULL;				// 存储去除眉毛之后的左眼图像
	IplImage* rEyeImgNoEyebrow = NULL;				// 存储去除眉毛之后的右眼图像
	IplImage* lEyeballImg = NULL;					// 存储最终分割的左眼框的图像
	IplImage* rEyeballImg = NULL;					// 存储最终分割的右眼框的图像
	IplImage* lMinEyeballImg = NULL;				// 存储最终分割的最小的左眼框的图像
	IplImage* rMinEyeballImg = NULL;				// 存储最终分割的最小的右眼框的图像
	int lMinEyeballBlackPixel;						// 存储最终分割的最小的左眼框的白色像素个数
	int rMinEyeballBlackPixel;						// 存储最终分割的最小的右眼框的白色像素个数
	double lMinEyeballBlackPixelRate;				// 存储最终分割的最小的左眼框的黑色像素占的比例
	double rMinEyeballBlackPixelRate;				// 存储最终分割的最小的右眼框的黑色像素占的比例
	double lMinEyeballRectShape;					// 存储最小左眼眶的矩形长宽比值
	double rMinEyeballRectShape;					// 存储最小右眼眶的矩形长宽比值
	double lMinEyeballBeta;							// 存储最小左眼眶的中间1/2区域的黑像素比值
	double rMinEyeballBeta;							// 存储最小右边眼眶的中间1/2区域的黑像素比值
	int lEyeState;									// 左眼睁(0)、闭(1)状态
	int rEyeState;									// 右眼睁(0)、闭(1)状态
	int eyeState;									// 眼睛综合睁(0)、闭(1)状态
	int eyeCloseNum = 0;							// 统计一次检测过程中闭眼的总数
	int eyeCloseDuration = 0;						// 统计一次检测过程中连续检测到闭眼状态的次数
	int maxEyeCloseDuration = 0;					// 一次检测过程中连续检测到闭眼状态的次数的最大值
	int failFaceNum = 0;							// 统计一次检测过程中未检测到人脸的总数
	int failFaceDuration = 0;						// 统计一次检测过程中连续未检测到人脸的次数
	int maxFailFaceDuration = 0;					// 一次检测过程中连续未检测到人脸的次数的最大值
	int fatigueState = 1;							// 驾驶员的驾驶状态:疲劳驾驶(1),正常驾驶(0)

	/********************* 创建显示窗口 **************************/
	cvNamedWindow("img", CV_WINDOW_AUTOSIZE);		// 显示灰度源图像
	cvNamedWindow("分割后的人脸", 1);				// 显示分割出大致眼眶区域的人脸
	cvNamedWindow("大致的左眼区域", 1);				// 显示大致的左眼区域
	cvNamedWindow("大致的右眼区域", 1);				// 显示大致的右眼区域
	cvNamedWindow("l_binary");						// 显示大致右眼区域的二值化图像
	cvNamedWindow("r_binary");						// 显示大致左眼区域的二值化图像
	cvNamedWindow("lEyeImgNoEyebrow", 1);			// 显示去除眉毛区域的左眼图像
	cvNamedWindow("rEyeImgNoEyebrow", 1);			// 显示去除眉毛区域的右眼图像
	cvNamedWindow("lEyeCenter", 1);					// 显示标出虹膜中心的左眼图像
	cvNamedWindow("rEyeCenter", 1);					// 显示标出虹膜中心的右眼图像
	cvNamedWindow("lEyeballImg", 1);				// 根据lEyeImgNoEyebrow大小的1/2区域重新划分的左眼图像
	cvNamedWindow("rEyeballImg", 1);				// 根据rEyeImgNoEyebrow大小的1/2区域重新划分的右眼图像
	cvNamedWindow("lkai", 1);						// 左眼进行开运算之后的图像
	cvNamedWindow("rkai", 1);						// 右眼进行开运算之后的图像
	cvNamedWindow("lMinEyeballImg", 1);				// 缩小至边界区域的左眼虹膜图像
	cvNamedWindow("rMinEyeballImg", 1);				// 缩小至边界区域的右眼眼虹膜图像
	
	
	capture = cvCreateCameraCapture(0);
	if( capture == NULL )
		return -1;

	for( globalK = 1; globalK <= DETECTTIME; globalK ++ ){
		start = clock();
		srcImg = cvQueryFrame(capture);
		img = cvCreateImage(cvGetSize(srcImg), IPL_DEPTH_8U, 1);
		cvCvtColor(srcImg, img, CV_BGR2GRAY);
		if( !img )
			continue;
		cvShowImage("img", img);
		cvWaitKey(20);

	/************************************* 检测人脸 ****************************************/
		cvClearMemStorage(storage);	// 将存储块的 top 置到存储块的头部,既清空存储块中的存储内容
		detectFace(
			img,					// 灰度图像
			objects,				// 输出参数:检测到人脸的矩形框
			storage,				// 存储矩形框的内存区域
			scale_factor,			// 搜索窗口的比例系数
			min_neighbors,			// 构成检测目标的相邻矩形的最小个数
			flags,					// 操作方式
			cvSize(20, 20)			// 检测窗口的最小尺寸
		);

		// 提取人脸区域
		if ( !objectsTemp->total ){
			printf("Failed to detect face!\n");		// 调试代码
			failFaceNum ++;							// 统计未检测到人脸的次数
			failFaceDuration ++;					// 统计连续未检测到人脸的次数

			// 检测过程中判断全是闭眼和检测不到人脸的情况,没有睁开眼的情况,导致maxEyeCloseDuration = 0;
			(eyeCloseDuration > maxEyeCloseDuration) ? maxEyeCloseDuration = eyeCloseDuration : maxEyeCloseDuration;
			eyeCloseDuration = 0;

			if( globalK == DETECTTIME ){
				// 当一次检测过程中,所有的过程都检测不到人脸,则要在此更新 maxFailFaceDuration 
				(failFaceDuration > maxFailFaceDuration) ? maxFailFaceDuration = failFaceDuration : maxFailFaceDuration;

				printf("\nFATIGUETHRESHOLD: %d\n", FATIGUETHRESHOLD);
				printf("eyeCloseNum: %d\tmaxEyeCloseDuration: %d\n", eyeCloseNum, maxEyeCloseDuration);
				printf("failFaceNum: %d\tmaxFailFaceDuration: %d\n", failFaceNum, maxFailFaceDuration);
				
				// 进行疲劳状态的判别
				fatigueState = recoFatigueState(FATIGUETHRESHOLD, eyeCloseNum, maxEyeCloseDuration, failFaceNum, maxFailFaceDuration);
				if( fatigueState == 1 )
					printf("驾驶员处于疲劳驾驶状态\n\n");
				else if( fatigueState == 0 )
					printf("驾驶员处于正常驾驶状态\n\n");

				// 进入下一次检测过程前,将变量清零
				globalK = 0;
				lEyeState = 1;
				rEyeState = 1;
				eyeState = 1;
				eyeCloseNum = 0;
				eyeCloseDuration = 0;
				maxEyeCloseDuration = 0;
				failFaceNum = 0;
				failFaceDuration = 0;
				maxFailFaceDuration = 0;
				fatigueState = 1;

				cvWaitKey(0);
			}

			continue;
		}
		else{
			// 统计连续未检测到人脸的次数中的最大数值
			(failFaceDuration > maxFailFaceDuration) ? maxFailFaceDuration = failFaceDuration : maxFailFaceDuration;
			failFaceDuration = 0;

			// 找到检测到的最大的人脸矩形区域
			temp = 0;
			for(i = 0; i < (objectsTemp ? objectsTemp->total : 0); i ++) {
				CvRect* rect = (CvRect*) cvGetSeqElem(objectsTemp, i);
				if ( (rect->height * rect->width) > temp ){
					largestFaceRect = rect;
					temp = rect->height * rect->width;
				}
			}

			// 根据人脸的先验知识分割出大致的人眼区域
			temp = largestFaceRect->width / 8;
			largestFaceRect->x = largestFaceRect->x + temp;
			largestFaceRect->width = largestFaceRect->width - 3*temp/2;
			largestFaceRect->height = largestFaceRect->height / 2;
			largestFaceRect->y = largestFaceRect->y + largestFaceRect->height / 2;
			largestFaceRect->height = largestFaceRect->height / 2;

			cvSetImageROI(img, *largestFaceRect);		// 设置ROI为检测到的最大的人脸区域
			faceImg = cvCreateImage(cvSize(largestFaceRect->width, largestFaceRect->height), IPL_DEPTH_8U, 1);
			cvCopy(img, faceImg, NULL);
			cvResetImageROI(img);						// 释放ROI
			cvShowImage("分割后的人脸", faceImg);

			eyeRectTemp = *largestFaceRect;
			// 根据人脸的先验知识分割出大致的左眼区域
			largestFaceRect->width /= 2;
			cvSetImageROI(img, *largestFaceRect);		// 设置ROI为检测到的最大的人脸区域
			lEyeImg = cvCreateImage(cvSize(largestFaceRect->width, largestFaceRect->height), IPL_DEPTH_8U, 1);
			cvCopy(img, lEyeImg, NULL);
			cvResetImageROI(img);						// 释放ROI
			cvShowImage("大致的左眼区域", lEyeImg);

			// 根据人脸的先验知识分割出大致的右眼区域
 			eyeRectTemp.x += eyeRectTemp.width / 2;
			eyeRectTemp.width /= 2;
			cvSetImageROI(img, eyeRectTemp);		// 设置ROI为检测到的最大的人脸区域
			rEyeImg = cvCreateImage(cvSize(eyeRectTemp.width, eyeRectTemp.height), IPL_DEPTH_8U, 1);
			cvCopy(img, rEyeImg, NULL);
			cvResetImageROI(img);						// 释放ROI
			cvShowImage("大致的右眼区域", rEyeImg);

		/********************************** 二值化处理 ***********************************/
			// 图像增强:直方图均衡化在detectFace中实现了一次;可尝试非线性点运算
			/*** 二值化左眼大致区域的图像 ***/
			//lineTrans(lEyeImg, lEyeImg, 1.5, 0);		// 线性点运算
			cvSmooth(lEyeImg, lEyeImg, CV_MEDIAN);		// 中值滤波 默认窗口大小为3*3
			nonlineTrans(lEyeImg, lEyeImg, 0.8);		// 非线性点运算
			memset(hist, 0, sizeof(hist));				// 初始化直方图的数组为0
			histogram(lEyeImg, hist);					// 计算图片直方图
			// 计算最佳阈值
			pixelSum = lEyeImg->width * lEyeImg->height;
			threshold = ostuThreshold(hist, pixelSum, 45);
			cvThreshold(lEyeImg, lEyeImg, threshold, 255, CV_THRESH_BINARY);// 对图像二值化
			// 显示二值化后的图像
			cvShowImage("l_binary",lEyeImg);

			/*** 二值化右眼大致区域的图像 ***/
			//lineTrans(rEyeImg, rEyeImg, 1.5, 0);		// 线性点运算
			cvSmooth(rEyeImg, rEyeImg, CV_MEDIAN);		// 中值滤波 默认窗口大小为3*3
			nonlineTrans(rEyeImg, rEyeImg, 0.8);		// 非线性点运算
			memset(hist, 0, sizeof(hist));				// 初始化直方图的数组为0
			histogram(rEyeImg, hist);					// 计算图片直方图
			// 计算最佳阈值
			pixelSum = rEyeImg->width * rEyeImg->height;
			threshold = ostuThreshold(hist, pixelSum, 45);
			cvThreshold(rEyeImg, rEyeImg, threshold, 255, CV_THRESH_BINARY);// 对图像二值化
			// 显示二值化后的图像
			cvShowImage("r_binary",rEyeImg);

		/***************************************** 检测人眼 ********************************************/
			/** 如果有明显的眉毛区域,则分割去除眉毛 **/

			// 分割左眼眉毛
			HEIGHT = lEyeImg->height;
			WIDTH = lEyeImg->width;
			// 分配内存
			horiProject = (int*)malloc(HEIGHT * sizeof(int));
			vertProject = (int*)malloc(WIDTH * sizeof(int));
			if( horiProject == NULL || vertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(horiProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(vertProject + i) = 0;
			histProject(lEyeImg, horiProject, vertProject);				// 计算直方图投影
			lEyeRow = removeEyebrow(horiProject, WIDTH, HEIGHT, 10);	// 计算分割眉毛与眼框的位置

			// 分割右眼眉毛
			HEIGHT = rEyeImg->height;
			WIDTH = rEyeImg->width;
			// 分配内存
			horiProject = (int*)malloc(HEIGHT * sizeof(int));
			vertProject = (int*)malloc(WIDTH * sizeof(int));
			if( horiProject == NULL || vertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(horiProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(vertProject + i) = 0;
			histProject(rEyeImg, horiProject, vertProject);				// 计算直方图投影
			rEyeRow = removeEyebrow(horiProject, WIDTH, HEIGHT, 10);	// 计算分割眉毛与眼框的位置

			// 显示去除眉毛后的人眼大致区域
			eyeRect = cvRect(0, lEyeRow, lEyeImg->width, (lEyeImg->height - lEyeRow));		// 去眉毛的眼眶区域在lEyeImg中的矩形框区域
			cvSetImageROI(lEyeImg, eyeRect);							// 设置ROI为去除眉毛的眼眶,在下面释放ROI
			lEyeImgNoEyebrow = cvCreateImage(cvSize(eyeRect.width, eyeRect.height), IPL_DEPTH_8U, 1);
			cvCopy(lEyeImg, lEyeImgNoEyebrow, NULL);
			cvShowImage("lEyeImgNoEyebrow", lEyeImgNoEyebrow);

			eyeRectTemp = cvRect(0, rEyeRow, rEyeImg->width, (rEyeImg->height - rEyeRow));	// 去眉毛的眼眶区域在rEyeImg中的矩形框区域
			cvSetImageROI(rEyeImg, eyeRectTemp);						// 设置ROI为去除眉毛的眼眶,在下面释放ROI
			rEyeImgNoEyebrow = cvCreateImage(cvSize(eyeRectTemp.width, eyeRectTemp.height), IPL_DEPTH_8U, 1);
			cvCopy(rEyeImg, rEyeImgNoEyebrow, NULL);
			cvShowImage("rEyeImgNoEyebrow", rEyeImgNoEyebrow);

			///////////////// 定位眼睛中心点在去除眉毛图像中的行列位置 ///////////////////
			HEIGHT = lEyeImgNoEyebrow->height;
			WIDTH = lEyeImgNoEyebrow->width;
			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
	
			histProject(lEyeImgNoEyebrow, subhoriProject, subvertProject);	// 重新对分割出的左眼图像进行积分投影
			lEyeRow = getEyePos(subhoriProject, HEIGHT, HEIGHT/5);	// 定位左眼所在的行
			lEyeCol = getEyePos(subvertProject, WIDTH, WIDTH/5);	// 定位左眼所在的列


			HEIGHT = rEyeImgNoEyebrow->height;
			WIDTH = rEyeImgNoEyebrow->width;
			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(rEyeImgNoEyebrow, subhoriProject, subvertProject);	// 重新对分割出的右眼图像进行积分投影
			rEyeRow = getEyePos(subhoriProject, HEIGHT, HEIGHT/5);	// 定位右眼所在的行
			rEyeCol = getEyePos(subvertProject, WIDTH,  WIDTH/5);	// 定位右眼所在的列
			/*
			printf("************ image of eyes without eyebrow ***********\n");
			printf("Left eye: width: %d\theight: %d\n", lEyeImgNoEyebrow->width, lEyeImgNoEyebrow->height);
			printf("Right eye: width: %d\theight: %d\n", rEyeImgNoEyebrow->width, rEyeImgNoEyebrow->height);
			printf("Right eye: WIDTH: %d\tHEIGHT: %d\n", WIDTH, HEIGHT);
			printf("Centers positon of Eyes. lEyeRow: %d lEyeCol: %d\trEyeRow: %d rEyeCol: %d\n\n", lEyeRow, lEyeCol, rEyeRow, rEyeCol);
			*/
			// 标记眼睛的位置
			cvCircle(lEyeImgNoEyebrow, cvPoint(lEyeCol, lEyeRow), 3, CV_RGB(0,0,255), 1, 8, 0);
			cvCircle(rEyeImgNoEyebrow, cvPoint(rEyeCol, rEyeRow), 3, CV_RGB(0,0,255), 1, 8, 0);
			cvShowImage("lEyeCenter", lEyeImgNoEyebrow);
			cvShowImage("rEyeCenter", rEyeImgNoEyebrow);
	

		/********************************** 判断人眼睁闭状态 ***********************************/
	
			////////////////// 分割出以找到的中心为中心的大致眼眶 /////////////////
			// 左眼眶
			HEIGHT = lEyeImgNoEyebrow->height;
			WIDTH = lEyeImgNoEyebrow->width;
			// 计算大致眼眶的区域: eyeRect
			eyeRect = cvRect(0, 0, WIDTH, HEIGHT);
			calEyeSocketRegion(&eyeRect, WIDTH, HEIGHT, lEyeCol, lEyeRow);
			/*
			printf("************lEyeImgNoEyebrow************\n");
			printf("width: %d\theight: %d\n", WIDTH, HEIGHT);
			printf("**********lEyeballRect**********\n");
			printf("eyeRect.x = %d\teyeRect.width = %d\n", eyeRect.x, eyeRectTemp.width);
			printf("eyeRect.y = %d\teyeRect.height = %d\n\n", eyeRectTemp.y, eyeRectTemp.height);
			*/
			cvSetImageROI(lEyeImgNoEyebrow, eyeRect);		// 设置ROI为检测到眼眶区域
			lEyeballImg = cvCreateImage(cvGetSize(lEyeImgNoEyebrow), IPL_DEPTH_8U, 1);
			cvCopy(lEyeImgNoEyebrow, lEyeballImg, NULL);
			cvResetImageROI(lEyeImgNoEyebrow);
			cvShowImage("lEyeballImg", lEyeballImg);

			// 右眼眶
			HEIGHT = rEyeImgNoEyebrow->height;
			WIDTH = rEyeImgNoEyebrow->width;
			// 计算大致眼眶的区域: eyeRectTemp
			eyeRect = cvRect(0, 0, WIDTH, HEIGHT);
			calEyeSocketRegion(&eyeRect, WIDTH, HEIGHT, rEyeCol, rEyeRow);
			/*
			printf("************rEyeImgNoEyebrow************\n");
			printf("width: %d\theight: %d\n", WIDTH, HEIGHT);
			printf("**********rEyeballRect**********\n");
			printf("eyeRect.x = %d\teyeRect.width = %d\n", eyeRect.x, eyeRect.width);
			printf("eyeRect.y = %d\teyeRect.height = %d\n\n", eyeRect.y, eyeRect.height);
			*/
			cvSetImageROI(rEyeImgNoEyebrow, eyeRect);		// 设置ROI为检测到眼眶区域
			rEyeballImg = cvCreateImage(cvGetSize(rEyeImgNoEyebrow), IPL_DEPTH_8U, 1);
			cvCopy(rEyeImgNoEyebrow, rEyeballImg, NULL);
			cvResetImageROI(rEyeImgNoEyebrow);
			cvShowImage("rEyeballImg", rEyeballImg);

			/////////////////////////// 闭运算 ///////////////////////////
			cvErode(lEyeballImg, lEyeballImg, NULL, 2);		//腐蚀图像  
			cvDilate(lEyeballImg, lEyeballImg, NULL, 2);	//膨胀图像
			cvShowImage("lkai", lEyeballImg);

			cvErode(rEyeballImg, rEyeballImg, NULL, 1);		//腐蚀图像  
			cvDilate(rEyeballImg, rEyeballImg, NULL, 1);	//膨胀图像
			cvShowImage("rkai", rEyeballImg);

			/////////////////// 计算最小眼睛的矩形区域 ////////////////////
	
			///////////////////////////左眼
			HEIGHT = lEyeballImg->height;
			WIDTH = lEyeballImg->width;

			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(lEyeballImg, subhoriProject, subvertProject);
			// 计算左眼最小的矩形区域
			eyeRectTemp = cvRect(0, 0 , 1, 1);		// 初始化
			getEyeMinRect(&eyeRectTemp, subhoriProject, subvertProject, WIDTH, HEIGHT, 5, 3);
			/*
			printf("eyeRectTemp.y: %d\n", eyeRectTemp.y);
			printf("eyeRectTemp.height: %d\n", eyeRectTemp.height);
			printf("eyeRectTemp.x: %d\n", eyeRectTemp.x);
			printf("eyeRectTemp.width: %d\n", eyeRectTemp.width);
			*/
			// 计算最小左眼矩形的长宽比,  判断眼睛状态时用的到
			lMinEyeballRectShape = (double)eyeRectTemp.width / (double)eyeRectTemp.height;
			//printf("\nlMinEyeballRectShape: %f\n", lMinEyeballRectShape);

			cvSetImageROI(lEyeballImg, eyeRectTemp);		// 设置ROI为检测到最小面积的眼眶
			lMinEyeballImg = cvCreateImage(cvGetSize(lEyeballImg), IPL_DEPTH_8U, 1);
			cvCopy(lEyeballImg, lMinEyeballImg, NULL);
			cvResetImageROI(lEyeballImg);
			cvShowImage("lMinEyeballImg", lMinEyeballImg);

			////////////////////////  统计左眼黑像素个数  /////////////////////
			HEIGHT = lMinEyeballImg->height;
			WIDTH = lMinEyeballImg->width;

			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;

			histProject(lMinEyeballImg, subhoriProject, subvertProject);

			// 统计lEyeballImg中黑色像素的个数
			temp = 0;	// 白像素个数
			for( i = 0; i < WIDTH; i ++ )
				temp += *(subvertProject + i);
			temp /= 255;
			lMinEyeballBlackPixel = WIDTH * HEIGHT - temp;
			lMinEyeballBlackPixelRate = (double)lMinEyeballBlackPixel / (double)(WIDTH * HEIGHT);
			//printf("WIDTH * HEIGHT: %d\tlMinEyeballBlackSum;%d\n\n", WIDTH * HEIGHT, lMinEyeballBlackPixel);
			//printf("lMinEyeballBlackPixelRate;%f\n\n", lMinEyeballBlackPixelRate);

			// 统计lMinEyeballImg中的1/2区域内黑像素的比例
			lMinEyeballBeta = 0;
			lMinEyeballBeta = calMiddleAreaBlackPixRate(subvertProject, &eyeRectTemp, WIDTH, HEIGHT, lEyeCol, lMinEyeballBlackPixel);

			//printf("lMinEyeballBeta; %f\n\n", lMinEyeballBeta);



			////////////////////////////////////右眼
			HEIGHT = rEyeballImg->height;
			WIDTH = rEyeballImg->width;
			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(rEyeballImg, subhoriProject, subvertProject);

			// 计算右眼最小的矩形区域
			eyeRectTemp = cvRect(0, 0 , 1, 1);
			getEyeMinRect(&eyeRectTemp, subhoriProject, subvertProject, WIDTH, HEIGHT, 5, 3);

			// 计算最小右眼矩形的长宽比,判断眼睛状态时用的到
			rMinEyeballRectShape = (double)eyeRectTemp.width / (double)eyeRectTemp.height;
			//printf("\nrMinEyeballRectShape: %f\n", rMinEyeballRectShape);

			cvSetImageROI(rEyeballImg, eyeRectTemp);		// 设置ROI为检测到最小面积的眼眶
			rMinEyeballImg = cvCreateImage(cvGetSize(rEyeballImg), IPL_DEPTH_8U, 1);
			cvCopy(rEyeballImg, rMinEyeballImg, NULL);
			cvResetImageROI(rEyeballImg);
			cvShowImage("rMinEyeballImg", rMinEyeballImg);

			////////////////////////  统计右眼黑像素个数  /////////////////////
			HEIGHT = rMinEyeballImg->height;
			WIDTH = rMinEyeballImg->width;

			// 分配内存
			subhoriProject = (int*)malloc(HEIGHT * sizeof(int));
			subvertProject = (int*)malloc(WIDTH * sizeof(int));
			if( subhoriProject == NULL || subvertProject == NULL ){
				printf("Failed to allocate memory\n");
				cvWaitKey(0);
				return -1;
			}
			// 内存置零
			for(i = 0; i < HEIGHT; i ++)
				*(subhoriProject + i) = 0;
			for(i = 0; i < WIDTH; i ++)
				*(subvertProject + i) = 0;
			histProject(rMinEyeballImg, subhoriProject, subvertProject);// 计算直方图积分投影

			// 统计lEyeballImg中黑色像素的个数
			temp = 0;
			for( i = 0; i < WIDTH; i ++ )
				temp += *(subvertProject + i);
			temp /= 255;
			rMinEyeballBlackPixel = WIDTH * HEIGHT - temp;
			rMinEyeballBlackPixelRate = (double)rMinEyeballBlackPixel / (double)(WIDTH * HEIGHT);
			//printf("WIDTH * HEIGHT: %d\trMinEyeballBlackSum;%d\n\n", WIDTH * HEIGHT, rMinEyeballBlackPixel);
			//printf("rMinEyeballBlackPixelRate; %f\n\n", rMinEyeballBlackPixelRate);

			// 统计lMinEyeballImg中的1/2区域内黑像素的比例
			rMinEyeballBeta = 0;
			rMinEyeballBeta = calMiddleAreaBlackPixRate(subvertProject, &eyeRectTemp, WIDTH, HEIGHT, rEyeCol, rMinEyeballBlackPixel);

			//printf("temp:%d\trMinEyeballBeta; %f\n\n", temp, rMinEyeballBeta);

			// 判断眼睛睁闭情况
			lEyeState = 1;		// 左眼状态,默认闭眼
			rEyeState = 1;		// 右眼状态,默认闭眼
			eyeState = 1;		// 眼睛综合状态,默认闭眼
			if( lMinEyeballBlackPixel > 50)
				lEyeState = getEyeState(lMinEyeballRectShape, lMinEyeballBlackPixelRate, lMinEyeballBeta);
			else
				lEyeState = 1;

			if( rMinEyeballBlackPixel > 50)
				rEyeState = getEyeState(rMinEyeballRectShape, rMinEyeballBlackPixelRate, rMinEyeballBeta);
			else
				rEyeState = 1;
			(lEyeState + rEyeState) == 2 ? eyeState = 1 : eyeState=0;

			// 统计眼睛闭合的次数
			if( eyeState == 1 ){
				eyeCloseNum ++;					// 统计 eyeCloseNum 眼睛闭合次数
				eyeCloseDuration ++;
				if( globalK == DETECTTIME){
					// 检测过程中判断全是闭眼情况,没有睁眼和检测不到人脸的情况
					(eyeCloseDuration > maxEyeCloseDuration) ? maxEyeCloseDuration = eyeCloseDuration : maxEyeCloseDuration;
					eyeCloseDuration = 0;
				}
			}
			else{
				(eyeCloseDuration > maxEyeCloseDuration) ? maxEyeCloseDuration = eyeCloseDuration : maxEyeCloseDuration;
				eyeCloseDuration = 0;
			}
		} // 承接判断是否检测到人脸的if语句

	/*	
		printf("\n************** 眼睛状态 ***************\n");
		printf("lEyeState: %d\trEyeState: %d\n", lEyeState, rEyeState);
		printf("eyeState: %d\n\n\n\n", eyeState);
	*/

		// 计时:执行一次循环的时间
		stop = clock();
		//printf("run time: %f\n", (double)(stop - start) / CLOCKS_PER_SEC);

		printf("eyeState: %d\n", eyeState);

		// 调整循环变量,进入下一次检测过程
		if( globalK == DETECTTIME ){
			printf("\nFATIGUETHRESHOLD*****: %d\n", FATIGUETHRESHOLD);
			printf("eyeCloseNum: %d\tmaxEyeCloseDuration: %d\n", eyeCloseNum, maxEyeCloseDuration);
			printf("failFaceNum: %d\tmaxFailFaceDuration: %d\n", failFaceNum, maxFailFaceDuration);

			// 进行疲劳状态的判别
			fatigueState = recoFatigueState(FATIGUETHRESHOLD, eyeCloseNum, maxEyeCloseDuration, failFaceNum, maxFailFaceDuration);
			if( fatigueState == 1 )
				printf("驾驶员处于疲劳驾驶状态\n\n");
			else if( fatigueState == 0 )
				printf("驾驶员处于正常驾驶状态\n\n");

			// 进入下一次检测过程前,将变量清零
			globalK = 0;
			lEyeState = 1;
			rEyeState = 1;
			eyeState = 1;
			eyeCloseNum = 0;
			eyeCloseDuration = 0;
			maxEyeCloseDuration = 0;
			failFaceNum = 0;
			failFaceDuration = 0;
			maxFailFaceDuration = 0;
			fatigueState = 1;
			char c = cvWaitKey(0);
			if( c == 27 )
				break;
			else
				continue;
		}
	} // 承接检测过程的 for 循环

	// 释放内存
	cvDestroyWindow("分割后的人脸");
	cvDestroyWindow("大致的左眼区域");
	cvDestroyWindow("大致的右眼区域");
	cvDestroyWindow("l_binary");
	cvDestroyWindow("r_binary");
	cvDestroyWindow("lEyeImgNoEyebrow");
	cvDestroyWindow("rEyeImgNoEyebrow");
	cvDestroyWindow("lEyeCenter");
	cvDestroyWindow("rEyeCenter");	
	cvDestroyWindow("lEyeballImg");
	cvDestroyWindow("rEyeballImg");
	cvDestroyWindow("lkai");
	cvDestroyWindow("rkai");
	cvDestroyWindow("lMinEyeballImg");
	cvDestroyWindow("rMinEyeballImg");
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&eyeImg);
	free(horiProject);
	free(vertProject);
	free(subhoriProject);
	free(subvertProject);

	return 0;
}
//=========================================
CvRect camKalTrack(IplImage* frame, camshift_kalman_tracker& camKalTrk) {
//=========================================
	if (!frame)
		printf("Input frame empty!\n");

	cvCopy(frame, camKalTrk.image, 0);
	cvCvtColor(camKalTrk.image, camKalTrk.hsv, CV_BGR2HSV); // BGR to HSV

	if (camKalTrk.trackObject) {
		int _vmin = vmin, _vmax = vmax;
		cvInRangeS(camKalTrk.hsv, cvScalar(0, smin, MIN(_vmin,_vmax), 0), cvScalar(180, 256, MAX(_vmin,_vmax), 0), camKalTrk.mask); // MASK
		cvSplit(camKalTrk.hsv, camKalTrk.hue, 0, 0, 0); //  HUE
		if (camKalTrk.trackObject < 0) {
			float max_val = 0.f;
			boundaryCheck(camKalTrk.originBox, frame->width, frame->height);
			cvSetImageROI(camKalTrk.hue, camKalTrk.originBox); // for ROI
			cvSetImageROI(camKalTrk.mask, camKalTrk.originBox); // for camKalTrk.mask
			cvCalcHist(&camKalTrk.hue, camKalTrk.hist, 0, camKalTrk.mask); //
			cvGetMinMaxHistValue(camKalTrk.hist, 0, &max_val, 0, 0);
			cvConvertScale(camKalTrk.hist->bins, camKalTrk.hist->bins, max_val ? 255. / max_val : 0., 0); //  bin  [0,255]
			cvResetImageROI(camKalTrk.hue); // remove ROI
			cvResetImageROI(camKalTrk.mask);
			camKalTrk.trackWindow = camKalTrk.originBox;
			camKalTrk.trackObject = 1;
			camKalTrk.lastpoint = camKalTrk.predictpoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2,
					camKalTrk.trackWindow.y + camKalTrk.trackWindow.height / 2);
			getCurrState(camKalTrk.kalman, camKalTrk.lastpoint, camKalTrk.predictpoint);//input curent state
		}
		//(x,y,vx,vy),
		camKalTrk.prediction = cvKalmanPredict(camKalTrk.kalman, 0);//predicton=kalman->state_post

		camKalTrk.predictpoint = cvPoint(cvRound(camKalTrk.prediction->data.fl[0]), cvRound(camKalTrk.prediction->data.fl[1]));

		camKalTrk.trackWindow = cvRect(camKalTrk.predictpoint.x - camKalTrk.trackWindow.width / 2, camKalTrk.predictpoint.y
				- camKalTrk.trackWindow.height / 2, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.trackWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.trackWindow);

		camKalTrk.searchWindow = cvRect(camKalTrk.trackWindow.x - region, camKalTrk.trackWindow.y - region, camKalTrk.trackWindow.width + 2
				* region, camKalTrk.trackWindow.height + 2 * region);

		camKalTrk.searchWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.searchWindow);

		cvSetImageROI(camKalTrk.hue, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.mask, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.backproject, camKalTrk.searchWindow);

		cvCalcBackProject( &camKalTrk.hue, camKalTrk.backproject, camKalTrk.hist ); // back project

		cvAnd(camKalTrk.backproject, camKalTrk.mask, camKalTrk.backproject, 0);

		camKalTrk.trackWindow = cvRect(region, region, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		if (camKalTrk.trackWindow.height > 5 && camKalTrk.trackWindow.width > 5) {
			// calling CAMSHIFT
			cvCamShift(camKalTrk.backproject, camKalTrk.trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),
					&camKalTrk.trackComp, &camKalTrk.trackBox);

			/*cvMeanShift( camKalTrk.backproject, camKalTrk.trackWindow,
			 cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
			 &camKalTrk.trackComp);*/
		}
		else {
			camKalTrk.trackComp.rect.x = 0;
			camKalTrk.trackComp.rect.y = 0;
			camKalTrk.trackComp.rect.width = 0;
			camKalTrk.trackComp.rect.height = 0;
		}

		cvResetImageROI(camKalTrk.hue);
		cvResetImageROI(camKalTrk.mask);
		cvResetImageROI(camKalTrk.backproject);
		camKalTrk.trackWindow = camKalTrk.trackComp.rect;
		camKalTrk.trackWindow = cvRect(camKalTrk.trackWindow.x + camKalTrk.searchWindow.x, camKalTrk.trackWindow.y
				+ camKalTrk.searchWindow.y, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.measurepoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2, camKalTrk.trackWindow.y
				+ camKalTrk.trackWindow.height / 2);
		camKalTrk.realposition->data.fl[0] = camKalTrk.measurepoint.x;
		camKalTrk.realposition->data.fl[1] = camKalTrk.measurepoint.y;
		camKalTrk.realposition->data.fl[2] = camKalTrk.measurepoint.x - camKalTrk.lastpoint.x;
		camKalTrk.realposition->data.fl[3] = camKalTrk.measurepoint.y - camKalTrk.lastpoint.y;
		camKalTrk.lastpoint = camKalTrk.measurepoint;//keep the current real position

		//measurement x,y
		cvMatMulAdd( camKalTrk.kalman->measurement_matrix/*2x4*/, camKalTrk.realposition/*4x1*/,/*measurementstate*/0, camKalTrk.measurement );
		cvKalmanCorrect(camKalTrk.kalman, camKalTrk.measurement);

		cvRectangle(frame, cvPoint(camKalTrk.trackWindow.x, camKalTrk.trackWindow.y), cvPoint(camKalTrk.trackWindow.x
				+ camKalTrk.trackWindow.width, camKalTrk.trackWindow.y + camKalTrk.trackWindow.height), CV_RGB(255,128,0), 4, 8, 0);
	}
	// set new selection if it exists
	if (camKalTrk.selectObject && camKalTrk.selection.width > 0 && camKalTrk.selection.height > 0) {
		cvSetImageROI(camKalTrk.image, camKalTrk.selection);
		cvXorS(camKalTrk.image, cvScalarAll(255), camKalTrk.image, 0);
		cvResetImageROI(camKalTrk.image);
	}

	return camKalTrk.trackWindow;
}
IplImage* detect_and_draw(IplImage* img, double scale = 1.3)
{
	IplImage* img1;
	char * str;
	static CvScalar colors[] = {
		{{0,0,255}}, {{0,128,255}},{{0,255,255}},{{0,255,0}},
		{{255,128,0}},{{255,255,0}},{{255,0,0}}, {{255,0,255}}
	}; //Just some pretty colors to draw with
	// IMAGE PREPARATION:
	//
	IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
	IplImage* small_img = cvCreateImage(
	cvSize( cvRound(img->width/scale), cvRound(img->height/scale)), 8, 1);
	cvCvtColor( img, gray, CV_BGR2GRAY );
	cvResize( gray, small_img, CV_INTER_LINEAR );
	cvEqualizeHist( small_img, small_img );
	// DETECT OBJECTS IF ANY
	//
	cvClearMemStorage( storage );
	fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
	CvSeq* objects = cvHaarDetectObjects(
										small_img,
										cascade,
										storage,
										1.1,
										2,
										0 ,
										cvSize(35, 35)
										);
	// LOOP THROUGH FOUND OBJECTS AND DRAW BOXES AROUND THEM
	//
	// for(int i = 0; i<(objects ? objects->total : 0); i++ )
	fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
	if( 0<(objects ? objects->total : 0))
	{

		CvRect* r = (CvRect*)cvGetSeqElem( objects, 0 );

		cvSetImageROI(img,*r);

		img1=cvCreateImage(cvSize(r->width,r->height),img->depth,img->nChannels);

		cvCopy(img,img1,NULL);
		cvRectangle(
					img,
					cvPoint(r->x,r->y),
					cvPoint(r->x+r->width,r->y+r->height),
					colors[0]
		);
		cvResetImageROI(img);
		HAND=1;

	}
	else
	{
		HAND=0;		
		img1=cvCreateImage(cvSize(100,100),img->depth,img->nChannels);
	}


	cvReleaseImage( &gray);
	cvReleaseImage( &small_img );
	return img1;
}
Exemple #10
0
void GetFrame(void *data,int *pW,int *pH,int *pBpp,int *lvlStat,
int *lvlstatR,int *lvlstatG,int *lvlstatB)
{
    int ret = 0;
    
    *pW = qhyusb->QCam.cameraW;
    *pH = qhyusb->QCam.cameraH;
    *pBpp = qhyusb->QCam.transferBit;

    switch(qhyusb->QCam.CAMERA)
    {
        case DEVICETYPE_QHY5LII:
        case DEVICETYPE_QHY5II:
        {
            while((ret != (qhyusb->QCam.cameraW * qhyusb->QCam.cameraH + 5)) && (qhyusb->liveabort == 0))
            {
                ret = qhyusb->qhyccd_readUSB2B(qhyusb->QCam.ccd_handle,(unsigned char *)data,qhyusb->QCam.cameraW * qhyusb->QCam.cameraH + 5,1,&qhyusb->QCam.pos);
                #ifdef QHYCCD_DEBUG
                printf("%d\n",ret);
                #endif
            }

	    if(qhyusb->QCam.transferBit == 16 && qhyusb->QCam.CAMERA == DEVICETYPE_QHY5LII)
	    {
                q5lii->SWIFT_MSBLSBQHY5LII((unsigned char *)data);
	    }
		
	    IplImage *cvImg, *cropImg;
	    cvImg = cvCreateImage(cvSize(qhyusb->QCam.ImgX, qhyusb->QCam.ImgY), qhyusb->QCam.transferBit, 1);
	    cropImg = cvCreateImage(cvSize(qhyusb->QCam.ShowImgX, qhyusb->QCam.ShowImgY), qhyusb->QCam.transferBit, 1);
	    cvImg->imageData = (char *)data;
	    cvSetImageROI(cvImg, cvRect(qhyusb->QCam.ShowImgX_Start, qhyusb->QCam.ShowImgY_Start, qhyusb->QCam.ShowImgX,qhyusb->QCam.ShowImgY));
	    cvCopy(cvImg, cropImg, NULL);
	    cvResetImageROI(cvImg);
	    memcpy(data,cropImg->imageData,cropImg->imageSize);
	    cvReleaseImage(&cvImg);
	    cvReleaseImage(&cropImg);
            break;
        }
        case DEVICETYPE_QHY16000:
        {
            qhyusb->qhyccd_readUSB2B(qhyusb->QCam.ccd_handle,(unsigned char *)data,qhyusb->QCam.cameraW * qhyusb->QCam.cameraH,1,&qhyusb->QCam.pos);
            break;
        }
        case DEVICETYPE_QHY9:
        case DEVICETYPE_IC8300:
        case DEVICETYPE_QHY11:
        case DEVICETYPE_QHY21:
        case DEVICETYPE_QHY22:
        case DEVICETYPE_QHY23:
        case DEVICETYPE_QHY6:
        {
            qhyusb->qhyccd_readUSB2B(qhyusb->QCam.ccd_handle,(unsigned char *)data,qhyusb->QCam.P_Size,qhyusb->QCam.Total_P,&qhyusb->QCam.pos);
            
            if(qhyusb->QCam.CAMERA == DEVICETYPE_IC8300 || qhyusb->QCam.CAMERA == DEVICETYPE_QHY22 || qhyusb->QCam.CAMERA == DEVICETYPE_QHY21 ||
               qhyusb->QCam.CAMERA == DEVICETYPE_QHY23)
            {
                if(qhyusb->ccdreg.VBIN == 1)
                {
                    ic8300->ConvertIC8300DataBIN11((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH,qhyusb->ccdreg.TopSkipPix);
                }
                else if(qhyusb->ccdreg.VBIN == 2)
                {
                    ic8300->ConvertIC8300DataBIN22((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH,qhyusb->ccdreg.TopSkipPix);
                }
                else if(qhyusb->ccdreg.VBIN == 4)
                {
                    ic8300->ConvertIC8300DataBIN44((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH,qhyusb->ccdreg.TopSkipPix);
                }
            }
            else if(qhyusb->QCam.CAMERA == DEVICETYPE_QHY6)
            {
                if(qhyusb->ccdreg.VBIN == 1)
                {
                    qhy6->ConvertQHY6PRODataBIN11((unsigned char *)data);
                }
                else if(qhyusb->ccdreg.VBIN == 2)
                {
                    qhy6->ConvertQHY6PRODataBIN22((unsigned char *)data);
                }
            }
            break;
        } 
    }
    
    if(qhyusb->QCam.bin == 22)
    {
	Bin2x2((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH);
	*pW /= 2;
	*pH /= 2;
    }
}
ReturnType HandsMotionTracking::onExecute()
{
	// 영상을 Inport로부터 취득
	opros_any *pData = ImageIn.pop();
	RawImage result;

	//아웃 데이터
	std::vector<PositionDataType> data;


	if(pData != NULL){
		
		// 포트로 부터 이미지 취득
		RawImage Image = ImageIn.getContent(*pData);
		RawImageData *RawImage = Image.getImage();


		// 현재영상의 크기를 취득
		m_in_width = RawImage->getWidth();
		m_in_height = RawImage->getHeight();

		// 메모리 한번 해제해주고
		if(m_image_buff			!= NULL)
			cvReleaseImage(&m_image_buff);
		if(m_image_dest         != NULL)
			cvReleaseImage(&m_image_dest);
		if(m_image_dest2        != NULL)
			cvReleaseImage(&m_image_dest2);

		if(m_image_th			!= NULL)
			cvReleaseImage(&m_image_th);
		if(m_image_th2			!= NULL)
			cvReleaseImage(&m_image_th2);

		// 이미지용 메모리 할당
        m_image_buff   = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);//원본 이미지
		m_image_dest   = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		m_image_dest2  = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);

		m_image_th     = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);//영역 추출 이미지
		m_image_th2    = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);//영역 추출 이미지
		
		
		if(!video_flag)
		{
			std::string cpath = getProperty("opros.component.dir");
			std::string file = getProperty("VideoFile");
			if (file == "") file = "sample.avi";

			std::string path = cpath + file;

			m_video	= NULL;
			m_video = cvCreateFileCapture(path.c_str()); //비디오
			video_flag = true;// 비디오가 계속 새로 재생됨을 방지
			
		}

		// 영상에 대한 정보를 확보!memcpy
		memcpy(m_image_buff->imageData, RawImage->getData(), RawImage->getSize());

		// 출력용
		cvCopy(m_image_buff, m_image_dest, 0);

		// 색상 분리용 이미지
		IplImage* m_image_YCrCb = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 3);
		IplImage* m_Y  = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1);
		IplImage* m_Cr = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1);
		IplImage* m_Cb = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1);

		cvCvtColor(m_image_buff, m_image_YCrCb, CV_RGB2YCrCb);   //RGB - > YCrCV 변환

		cvSplit(m_image_YCrCb, m_Y, m_Cr, m_Cb, NULL);   //채널 분리

		//추출이 필요한 영역 픽셀 데이터 저장 변수
		unsigned char m_Cr_val = 0;			
		unsigned char m_Cb_val = 0;

		// 살색추출
		for(int i=0;i<m_image_buff->height;i++)            
		{
			for(int j=0;j<m_image_buff->width;j++)
			{  
				//Cr 영역과 Cb 영역 추출
				m_Cr_val = (unsigned char)m_Cr->imageData[i*m_Cr->widthStep+j];
				m_Cb_val = (unsigned char)m_Cb->imageData[i*m_Cb->widthStep+j];

				//살색에 해당하는 영역인지 검사
				if( (77 <= m_Cr_val) && (m_Cr_val <= 127) && (133 <= m_Cb_val) && (m_Cb_val <= 173) )
				{
					// 살색부분은 하얀색
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+0] = (unsigned char)255; 
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+1] = (unsigned char)255;
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+2] = (unsigned char)255; 
				}
				else
				{ 
					// 나머지는 검정색
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+0]= 0;
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+1]= 0;
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+2]= 0;
				}
			}
		}
		
		//살색 추출한 영상을 이진화
		cvCvtColor(m_image_buff, m_image_th, CV_RGB2GRAY); 

		//잡영 제거를 위한 연산
		cvDilate (m_image_th, m_image_th, NULL, 2);//팽창
		cvErode  (m_image_th, m_image_th, NULL, 2);//침식	

		//변수 및 이미지 메모리 초기화
		int temp_num = 0;
		int StartX , StartY, EndX , EndY;
		int nNumber = 0;
		m_nThreshold	= 100;

		if( m_rec_out != NULL )
		{
			delete m_rec_out;

			m_rec_out	= NULL;
			m_nBlobs_out	= _DEF_MAX_BLOBS;
		}
		else
		{
			m_rec_out	= NULL;
			m_nBlobs_out	= _DEF_MAX_BLOBS;
		}
	
		if( m_image_th2 != NULL )	
			cvReleaseImage( &m_image_th2 );

		//레이블링 할 영상 따로 생성
		m_image_th2			= cvCloneImage( m_image_th );

		//레이블링 할 이미지의 크기 저장
		int nWidth	= m_image_th2->width;
		int nHeight = m_image_th2->height;

		//해당 영상 크기만큼 버프 설정
		unsigned char* tmpBuf = new unsigned char [nWidth * nHeight];

		for(int j=0; j<nHeight ;j++)	
			for(int i=0; i<nWidth ;i++)	
				//전 픽셀 순회
				tmpBuf[j*nWidth+i] = (unsigned char)m_image_th2->imageData[j*m_image_th2->widthStep+i];
		
////// 레이블링을 위한 포인트 초기화

		m_vPoint_out = new Visited [nWidth * nHeight];
		
		for(int nY = 0; nY < nHeight; nY++)
		{
			for(int nX = 0; nX < nWidth; nX++)
			{
				m_vPoint_out[nY * nWidth + nX].bVisitedFlag		= FALSE;
				m_vPoint_out[nY * nWidth + nX].ptReturnPoint.x	= nX;
				m_vPoint_out[nY * nWidth + nX].ptReturnPoint.y	= nY;
			}
		}

////// 레이블링 수행
		for(int nY = 0; nY < nHeight; nY++)
		{
			for(int nX = 0; nX < nWidth; nX++)
			{
				if(tmpBuf[nY * nWidth + nX] == 255)		// Is this a new component?, 255 == Object
				{
					temp_num++;

					tmpBuf[nY * nWidth + nX] = temp_num;
					
					StartX = nX, StartY = nY, EndX = nX, EndY= nY;

					__NRFIndNeighbor(tmpBuf, nWidth, nHeight, nX, nY, &StartX, &StartY, &EndX, &EndY, m_vPoint_out);

					if(__Area(tmpBuf, StartX, StartY, EndX, EndY, nWidth, temp_num) < m_nThreshold)
					{
		 				for(int k = StartY; k <= EndY; k++)
						{
							for(int l = StartX; l <= EndX; l++)
							{
								if(tmpBuf[k * nWidth + l] == temp_num)
									tmpBuf[k * nWidth + l] = 0;
							}
						}
						--temp_num;

						if(temp_num > 250)
							temp_num = 0;
					}
				}
			}
		}
		// 포인트 메모리 해제
		delete m_vPoint_out;

		//결과 보존
		nNumber = temp_num;
		
		//레이블링 수만큼 렉트 생성
		if( nNumber != _DEF_MAX_BLOBS )		
			m_rec_out = new CvRect [nNumber];
	
		//렉트 만들기
			if( nNumber != 0 )	
				DetectLabelingRegion(nNumber, tmpBuf, nWidth, nHeight,m_rec_out);

				for(int j=0; j<nHeight; j++)
					for(int i=0; i<nWidth ; i++)
						m_image_th2->imageData[j*m_image_th2->widthStep+i] = tmpBuf[j*nWidth+i];
		
				delete tmpBuf;
	
		//레이블링 수 보존		
		m_nBlobs_out = nNumber;
	
		//레이블링 영역 거르기
		int nMaxWidth	= m_in_height * 9 / 10;	   // 영상 가로 전체 크기의 90% 이상인 레이블은 제거
		int nMaxHeight	= m_in_width  * 9 / 10;	   // 영상 세로 전체 크기의 90% 이상인 레이블은 제거

		//최소영역과 최대영역 지정- 화면 크기에 영향 받음..
		_BlobSmallSizeConstraint( 5, 150, m_rec_out, &m_nBlobs_out);
		_BlobBigSizeConstraint(nMaxWidth, nMaxHeight,m_rec_out, &m_nBlobs_out);

		//앞으로 쓸 메모리 등록
		storage1 = cvCreateMemStorage(0);
		storage2 = cvCreateMemStorage(0);

		//변수 초기화
		CvPoint point;
		CvSeq* seq[10];
		CvSeq* hull;
		CvPoint end_pt;
		CvPoint center;

		//내보낼 데이터 초기화
		outData[0].x = 0, outData[0].y  = 0;
		outData[1].x = 0, outData[1].y  = 0;
		outData[2].x = 0, outData[2].y  = 0;

		int num = 0;
		int temp_x = 0;
		int temp_y = 0;
		int rect = 0;
		
		//만일을 대비하여 준비한 시퀸스 배열의 크기를 초과하지 않도록 조절
		//일단 한곳에서만 영상이 나오도록 조절..
		if(m_nBlobs_out > 1)
		{
			m_nBlobs_out = 1;
		}

		//레이블링 영역 내의 처리 시작 
		for( int i=0; i <  m_nBlobs_out; i++ )
		{
			//사각형 그리기에 필요한 두점 저장
			CvPoint	pt1 = cvPoint(	m_rec_out[i].x, m_rec_out[i].y );
			CvPoint pt2 = cvPoint(	pt1.x + m_rec_out[i].width,pt1.y + m_rec_out[i].height );

			// 컬러값 설정
			CvScalar color	= cvScalar( 0, 0, 255 );

			//레이블 사각형 그리기 - 확인용
			//cvDrawRect( m_image_dest, pt1, pt2, color);
			
			//레이블을 관심영역으로 지정할 이미지 생성
			temp_mask = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1);
			temp_mask2 = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1);
			
			//관심영역 지정
			cvSetImageROI(m_image_th, m_rec_out[i]);
				
			//관심영역 추출
			cvCopy(m_image_th, temp_mask, 0);

			//관심영역 해제
			cvResetImageROI(m_image_th);

			
			//관심영역 내의 오브젝트 처리를 위한 시퀸스 생성
			seq[i] = cvCreateSeq(CV_SEQ_KIND_GENERIC | CV_32SC2,sizeof(CvContour),sizeof(CvPoint), storage1);
			
			//관심영역에서 추출한이미지의 흰색 픽셀값으로 시퀸스 생성
			for(int j =0; j < temp_mask ->height ; j++)
			{
				for(int k = 0; k < temp_mask ->width; k++)
				{				
					if((unsigned char)temp_mask->imageData[j*temp_mask->widthStep+k] == 255)
					{
						point.x = k;		//흰색 픽셀 x좌표 저장
						point.y = j;		//흰색 픽셀 y좌표 저장
						cvSeqPush(seq[i], &point);	//시퀸스 구조체에 해당 좌표 삽입
						temp_x += point.x; //좌표 누적
						temp_y += point.y; //좌표 누적
						num++;             //픽셀 수 카운트

					}	
				}
			} 
			
			//좌표 초기화
			point.x				= 0;
			point.y				= 0;
			end_pt.x			= 0;
			end_pt.y			= 0;
			center.x			= 0;
			center.y			= 0;
			
			CvPoint dist_pt;			//중심점과의 최대거리를 찾을 컨백스헐 저장
			double fMaxDist		= 0;    //중심점과의 최대거리 저장
			double fDist		= 0;	//거리계산에 사용
		
	
			//중심점 찾기 - 픽셀의  평균값 찾기
			if(num != 0)
			{
				center.x			= (int)temp_x/num; //평균 좌표값 구하기
				center.y			= (int)temp_y/num; //평균 좌표값 구하기
			}

			//관심영역 설정
			cvSetImageROI(m_image_dest, m_rec_out[i]);

/////////컨백스헐 그리기////////
			if(seq[i]->total !=0)
			{	
				//컨백스헐 구하기
				hull = cvConvexHull2(seq[i], 0, CV_COUNTER_CLOCKWISE, 0);	
				point = **CV_GET_SEQ_ELEM(CvPoint*, hull,hull->total-1);

				//구한 컨백스헐 라인으로 그리기
				for(int x = 0; x < hull->total; x++)
				{
					CvPoint hull_pt = **CV_GET_SEQ_ELEM(CvPoint*, hull,x);

					//컨백스헐 라인 그리기
					//cvLine(m_image_dest, point, hull_pt, CV_RGB(255, 255, 0 ),2, 8);
					point = hull_pt;

					//최대 거리 구하기
					dist_pt =  **CV_GET_SEQ_ELEM(CvPoint*, hull,x);

					fDist = sqrt((double)((center.x - dist_pt.x) * (center.x - dist_pt.x) 
						+ (center.y - dist_pt.y) * (center.y - dist_pt.y)));

					if(fDist > fMaxDist)
					{
						max_pt = dist_pt;
						fMaxDist = fDist;
					}
				}
			}
			

			//중심점그리기
			cvCircle(m_image_dest,center,5, CV_RGB(0,0,255), 5);

			//내보낼 중심점 데이터 저장
			outData[0].x = center.x;
			outData[0].y = center.y;
	
			
////////마스크 만들기///////

			//중심점을 기준으로 그릴 마스크 이미지 생성
			circle_mask = cvCreateImage(cvGetSize(temp_mask), 8, 1);
			
			//바탕은 검은색으로
			cvSetZero(circle_mask);
			
			//흰색 원 - 손 영상과의 연산을 위해 바이너리 이미지에 그리기
			int radi = (int)m_rec_out[i].height/2.9; // 원 크기 수동조절..

			//흰색 원과 흰색 네모로 구성된 마스크 영상 생성을 위한 그리기
			cvCircle(circle_mask, center, radi, CV_RGB(255,255,255),CV_FILLED);
			cvDrawRect(circle_mask, cvPoint(center.x - radi, center.y),cvPoint(center.x + radi, pt2.y),
				 CV_RGB(255,255,255),CV_FILLED);

			//마스크 추출
			cvSub(temp_mask, circle_mask, temp_mask, 0);

	
///////관심영역 레이블링 - 손가락 끝 추출//////

			//변수 및 이미지 메모리 초기화
			int temp_num_in = 0;
			int StartX_in , StartY_in, EndX_in , EndY_in;
			int nNumber_in = 0;
			m_nThreshold_in	= 10;

			if( m_rec_in != NULL )
			{
				delete m_rec_in;

				m_rec_in	= NULL;
				m_nBlobs_in	= _DEF_MAX_BLOBS;
			}
			else
			{
				m_rec_in	= NULL;
				m_nBlobs_in	= _DEF_MAX_BLOBS;
			}

			if( temp_mask2 != NULL )	
				cvReleaseImage( &temp_mask2 );

			temp_mask2			= cvCloneImage( temp_mask );	

			//들어온 이미지의 크기 저장
			int nWidth	= temp_mask2->width;
			int nHeight = temp_mask2->height;
		
			//영상 크기만큼 버프 설정
			unsigned char* tmpBuf_in = new unsigned char [nWidth * nHeight];

			for(int j=0; j<nHeight ;j++)	
				for(int i=0; i<nWidth ;i++)
					//전 픽셀 순회
					tmpBuf_in[j*nWidth+i] = (unsigned char)temp_mask2->imageData[j*temp_mask2->widthStep+i];
				
	
		/////// 레이블링을 위한 포인트 초기화 ////////
			
			m_vPoint_in = new Visited [nWidth * nHeight];

			for(int nY = 0; nY < nHeight; nY++)
			{
				for(int nX = 0; nX < nWidth; nX++)
				{
					m_vPoint_in[nY * nWidth + nX].bVisitedFlag		= FALSE;
					m_vPoint_in[nY * nWidth + nX].ptReturnPoint.x	= nX;
					m_vPoint_in[nY * nWidth + nX].ptReturnPoint.y	= nY;
				}
			}

			////레이블링 수행
			for(int nY = 0; nY < nHeight; nY++)
			{
				for(int nX = 0; nX < nWidth; nX++)
				{
					if(tmpBuf_in[nY * nWidth + nX] == 255)		// Is this a new component?, 255 == Object
					{
						temp_num_in++;

						tmpBuf_in[nY * nWidth + nX] = temp_num_in;
						
						StartX_in = nX, StartY_in = nY, EndX_in = nX, EndY_in= nY;

						__NRFIndNeighbor(tmpBuf_in, nWidth, nHeight, nX, nY, 
							&StartX_in, &StartY_in, &EndX_in, &EndY_in,m_vPoint_in);

						if(__Area(tmpBuf_in, StartX_in, StartY_in, EndX_in, EndY_in, nWidth, temp_num_in) < m_nThreshold_in)
						{
		 					for(int k = StartY_in; k <= EndY_in; k++)
							{
								for(int l = StartX_in; l <= EndX_in; l++)
								{
									if(tmpBuf_in[k * nWidth + l] == temp_num_in)
										tmpBuf_in[k * nWidth + l] = 0;
								}
							}
							--temp_num_in;

							if(temp_num_in > 250)
								temp_num_in = 0;
						}
					}
				}
			}
			// 포인트 메모리 해제
			delete m_vPoint_in;

			//레이블링 수 보존
			nNumber_in = temp_num_in;

			if( nNumber_in != _DEF_MAX_BLOBS )		
				m_rec_in = new CvRect [nNumber_in];
		
				if( nNumber_in != 0 )	
					DetectLabelingRegion(nNumber_in, tmpBuf_in, nWidth, nHeight,m_rec_in);
				
				for(int j=0; j<nHeight; j++)
					for(int i=0; i<nWidth ; i++)
						temp_mask2->imageData[j*temp_mask2->widthStep+i] = tmpBuf_in[j*nWidth+i];
			
					delete tmpBuf_in;
				
			m_nBlobs_in = nNumber_in;

			//최소영역과 최대영역 설정
			_BlobSmallSizeConstraint( 5, 5, m_rec_in, &m_nBlobs_in);
			_BlobBigSizeConstraint( temp_mask2->width, temp_mask2->height,m_rec_in, &m_nBlobs_in);

			//선언 및 초기화
			CvPoint center_in;
			CvPoint point_in;
			
			point_in.x = 0;
			point_in.y = 0;
			center_in.x = 0;
			center_in.x = 0;
			CvSeq* seq_in[20];

			//준비한 시퀸스 배열크기를 초과하지 않도록 조절
			if(m_nBlobs_in > 20)
			{
				m_nBlobs_in =20;
			}

			for( int ni =0; ni <  m_nBlobs_in; ni++ )
			{		
				//사각형 그리기에 필요한 두 점 저장
				CvPoint	pt1 = cvPoint(	m_rec_in[ni].x, m_rec_in[ni].y );
				CvPoint pt2 = cvPoint(	pt1.x + m_rec_in[ni].width,pt1.y + m_rec_in[ni].height );

				//색상값 설정
				CvScalar color	= cvScalar( 255,0 , 255 );
				
				//레이블 사각형 그리기
				//cvDrawRect( m_image_dest, pt1, pt2, color);
				
				//처리할 손끝 마스크 생성할 메모리 할당
				in_mask = cvCreateImage(cvSize(m_rec_in[ni].width, m_rec_in[ni].height),8,1);

				//관심영역 설정
				cvSetImageROI(temp_mask, m_rec_in[ni]);
				
				//필요한 영역 복사
				cvCopy(temp_mask, in_mask, 0);

				//관심영역 해제
				cvResetImageROI(temp_mask);

				//관심영역 내의 오브젝트 처리를 위한 시퀸스 생성
				seq_in[ni] = cvCreateSeq(CV_SEQ_KIND_GENERIC | CV_32SC2,sizeof(CvContour),sizeof(CvPoint), storage2);

				//초기화
				int temp_x_in = 0;
				int temp_y_in = 0;	
				int num_in = 0;
				
				//관심영역에서 추출한이미지의 흰색 픽셀값으로 시퀸스 생성
				for(int j =0; j < in_mask ->height ; j++)
				{
					for(int k = 0; k < in_mask ->width; k++)
					{				
						if((unsigned char)in_mask->imageData[j*in_mask->widthStep+k] == 255)
						{
							point_in.x = k;		//흰색 픽셀 x좌표 저장
							point_in.y = j;		//흰색 픽셀 y좌표 저장
							cvSeqPush(seq_in[ni], &point_in);	//시퀸스 구조체에 해당 좌표 삽입
							temp_x_in += point_in.x; //좌표 누적
							temp_y_in += point_in.y; //좌표 누적
							num_in++;             //픽셀 수 카운트

						}

					}
				}
				
				//초기화
				max_pt_in.x = 0;
				max_pt_in.y = 0;					
				double fMaxDist_in	= 0;
				double fDist_in	= 0;
			
				//중심점 찾기 - 픽셀의  평균값 찾기
				if(num_in != 0)
				{
					center_in.x			= (int)temp_x_in/num_in + pt1.x; //평균 좌표값 구하기
					center_in.y			= (int)temp_y_in/num_in + pt1.y; //평균 좌표값 구하기
					
				}

				//우선 끝점이 2개일때만..
				if(m_nBlobs_in == 2)  
				{	
					//초기화
					finger_pt[ni].x = NULL;
					finger_pt[ni].y = NULL;
					finger_pt[ni].x = NULL;
					finger_pt[ni].y = NULL;

					if(seq_in[ni]->total !=0)
					{	
						//컨백스헐 구하기 - 윤곽선의 좌표 정보 겟
						CvSeq* hull_in = cvConvexHull2(seq_in[ni], 0, CV_COUNTER_CLOCKWISE, 0);	
						//point_in = **CV_GET_SEQ_ELEM(CvPoint*, hull_in,hull_in->total-1);


						//구한 컨백스헐 라인으로 그리기
						for(int nx = 0; nx < hull_in->total; nx++)
						{
							CvPoint hull_pt_in = **CV_GET_SEQ_ELEM(CvPoint*, hull_in,nx);
							hull_pt_in.x = hull_pt_in.x  + pt1.x;
							hull_pt_in.y = hull_pt_in.y + pt1.y;

							//중심점과 해당영역의 컨백스 헐 지점간의 거리 계산
							fDist_in = sqrt((double)((center.x - hull_pt_in.x) * (center.x - hull_pt_in.x) 
								+ (center.y - hull_pt_in.y) * (center.y - hull_pt_in.y)));

							//거리가 먼 점 찾기
							if(fDist_in > fMaxDist_in)
							{
								max_pt_in = hull_pt_in;
								fMaxDist_in = fDist_in;
								
							}
						}
					}				
				
					//최대점 보존
					finger_pt[ni].x = max_pt_in.x ;
					finger_pt[ni].y = max_pt_in.y ;
						
					//관심영역 해제할 경우의 값으로 보정
					finger_pt[ni].x = finger_pt[ni].x + m_rec_out[i].x;
					finger_pt[ni].y = finger_pt[ni].y + m_rec_out[i].y;		
				}
void FindArenaObjects(IplImage* Image, CvFont Font, _ArenaObject *pRamp, _ArenaObject* pPlatform, _ArenaObject* pRightPit, _ArenaObject* pLeftPit, _Robot* pRobot)
{
	IplImage* ImageCopy = cvCloneImage(Image);
	IplImage* ImageCopy2 = cvCloneImage(Image);
	SelectionNumber = 0;
	Select_Object = 0;
	int PrevSelectionNumber = -1;

	cvNamedWindow("Arena");
	cvShowImage("Arena", ImageCopy);
	cvSetMouseCallback("Arena", OnMouse);	

	while(SelectionNumber < 6 && cvWaitKey(10) != 27)
	{
		if(SelectionNumber - PrevSelectionNumber > 0)
		{
			PrevSelectionNumber = SelectionNumber;
			cvCopyImage(Image, ImageCopy);
			switch(SelectionNumber)
			{
			case 0:
				cvPutText(ImageCopy, "Select Temp Ramp", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 1:
				if(pRamp)
				{
					pRamp->BoundingRect = Selection;
					pRamp->Center = cvPoint(pRamp->BoundingRect.x + pRamp->BoundingRect.width/2, pRamp->BoundingRect.y + pRamp->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Temp Platform", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 2:
				if(pPlatform)
				{
					pPlatform->BoundingRect = Selection;
					pPlatform->Center = cvPoint(pPlatform->BoundingRect.x + pPlatform->BoundingRect.width/2, pPlatform->BoundingRect.y + pPlatform->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Right Pit", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 3:
				if(pRightPit)
				{
					pRightPit->BoundingRect = Selection;
					pRightPit->Center = cvPoint(pRightPit->BoundingRect.x + pRightPit->BoundingRect.width/2, pRightPit->BoundingRect.y + pRightPit->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Left Pit", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 4:
				if(pLeftPit)
				{
					pLeftPit->BoundingRect = Selection;
					pLeftPit->Center = cvPoint(pLeftPit->BoundingRect.x + pLeftPit->BoundingRect.width/2, pLeftPit->BoundingRect.y + pLeftPit->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Robot", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 5:
				if(pRobot)
				{
					pRobot->BoundingRect = Selection;
				}
				cvPutText(ImageCopy, "Select Robot Patch", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 6:
				if(pRobot)
				{
					pRobot->Patch = Selection;
					pRobot->PatchCenter = cvPoint(pRobot->Patch.x + pRobot->Patch.width/2, pRobot->Patch.y + pRobot->Patch.height/2);
					pRobot->Updated = 1;
				}
				cvPutText(ImageCopy, "Press Escape to Continue...", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			default:
				break;
			}
			cvShowImage("Arena", ImageCopy);
		}
		if(Select_Object && Selection.width > 0 && Selection.height > 0 )
        {
			cvCopyImage(ImageCopy, ImageCopy2);
            cvSetImageROI(ImageCopy2, Selection);
            
			cvXorS(ImageCopy2, cvScalarAll(255), ImageCopy2);
            
			cvResetImageROI(ImageCopy2);
			cvShowImage("Arena", ImageCopy2);
        }
	}
	cvReleaseImage(&ImageCopy);
	cvReleaseImage(&ImageCopy2);
	cvDestroyWindow("Arena");
}
double catcierge_haar_matcher_match(void *octx,
		IplImage *img, match_result_t *result, int save_steps)
{
	catcierge_haar_matcher_t *ctx = (catcierge_haar_matcher_t *)octx;
	catcierge_haar_matcher_args_t *args = ctx->args;
	double ret = HAAR_SUCCESS_NO_HEAD;
	IplImage *img_eq = NULL;
	IplImage *img_gray = NULL;
	IplImage *tmp = NULL;
	IplImage *thr_img = NULL;
	CvSize max_size;
	CvSize min_size;
	int cat_head_found = 0;
	assert(ctx);
	assert(ctx->args);
	assert(result);

	min_size.width = args->min_width;
	min_size.height = args->min_height;
	max_size.width = 0;
	max_size.height = 0;
	result->step_img_count = 0;
	result->description[0] = '\0';

	// Make gray scale if needed.
	if (img->nChannels != 1)
	{
		tmp = cvCreateImage(cvGetSize(img), 8, 1);
		cvCvtColor(img, tmp, CV_BGR2GRAY);
		img_gray = tmp;
	}
	else
	{
		img_gray = img;
	}

	if (result->direction)
	{
		result->direction = MATCH_DIR_UNKNOWN;
	}

	// Equalize histogram.
	if (args->eq_histogram)
	{
		img_eq = cvCreateImage(cvGetSize(img), 8, 1);
		cvEqualizeHist(img_gray, img_eq);
	}
	else
	{
		img_eq = img_gray;
	}

	catcierge_haar_matcher_save_step_image(ctx,
		img_eq, result, "gray", "Grayscale original", save_steps);

	result->rect_count = MAX_MATCH_RECTS;

	if (cv2CascadeClassifier_detectMultiScale(ctx->cascade,
			img_eq, result->match_rects, &result->rect_count,
			1.1, 3, CV_HAAR_SCALE_IMAGE, &min_size, &max_size))
	{
		ret = -1.0;
		goto fail;
	}

	if (ctx->super.debug) printf("Rect count: %d\n", (int)result->rect_count);

	cat_head_found = (result->rect_count > 0);

	// Even if we don't find a face we count it as a success.
	// Only when a prey is found we consider it a fail.
	// Unless args->no_match_is_fail is set.
	if (args->no_match_is_fail)
	{
		// Any return value above 0.0 is considered
		// a success. Just so we can distinguish the types of successes.
		ret = cat_head_found ?
			HAAR_SUCCESS_NO_HEAD_IS_FAIL : HAAR_FAIL;
	}

	if (cat_head_found)
	{
		int inverted; 
		int flags;
		CvRect roi;
		find_prey_f find_prey = NULL;

		// Only use the lower part of the region of interest
		// and extend it some towards the "outside" for better result.
		// (We only use the first haar cascade match).
		roi = result->match_rects[0];

		// If we're saving steps, include the original haar cascade
		// match rectangle image.
		if (save_steps)
		{
			cvSetImageROI(img_eq, roi);

			catcierge_haar_matcher_save_step_image(ctx,
				img_eq, result, "haar_roi", "Haar match", save_steps);
		}

		catcierge_haar_matcher_calculate_roi(ctx, &roi);
		cvSetImageROI(img_eq, roi);

		catcierge_haar_matcher_save_step_image(ctx,
				img_eq, result, "roi", "Cropped region of interest", save_steps);

		if (args->prey_method == PREY_METHOD_ADAPTIVE)
		{
			inverted = 1;
			flags = CV_THRESH_BINARY_INV | CV_THRESH_OTSU;
			find_prey = catcierge_haar_matcher_find_prey_adaptive;
		}
		else
		{
			inverted = 0;
			flags = CV_THRESH_BINARY | CV_THRESH_OTSU;
			find_prey = catcierge_haar_matcher_find_prey;
		}

		// Both "find prey" and "guess direction" needs
		// a thresholded image, so perform it before calling those.
		thr_img = cvCreateImage(cvGetSize(img_eq), 8, 1);
		cvThreshold(img_eq, thr_img, 0, 255, flags);
		if (ctx->super.debug) cvShowImage("Haar image binary", thr_img);

		catcierge_haar_matcher_save_step_image(ctx,
			thr_img, result, "thresh", "Global thresholded binary image", save_steps);

		result->direction = catcierge_haar_guess_direction(ctx, thr_img, inverted);
		if (ctx->super.debug) printf("Direction: %s\n", catcierge_get_direction_str(result->direction));

		// Don't bother looking for prey when the cat is going outside.
		if ((result->direction) == MATCH_DIR_OUT)
		{
			if (ctx->super.debug) printf("Skipping prey detection!\n");
			snprintf(result->description, sizeof(result->description) - 1,
				"Skipped prey detection when going out");
			goto done;
		}

		// Note that thr_img will be modified.
		if (find_prey(ctx, img_eq, thr_img, result, save_steps))
		{
			if (ctx->super.debug) printf("Found prey!\n");
			ret = HAAR_FAIL;

			snprintf(result->description, sizeof(result->description) - 1,
				"Prey detected");
		}
		else
		{

			ret = HAAR_SUCCESS;
			snprintf(result->description, sizeof(result->description) - 1,
				"No prey detected");
		}
	}
	else
	{
		snprintf(result->description, sizeof(result->description) - 1,
			"%sNo cat head detected",
			(ret == HAAR_SUCCESS_NO_HEAD_IS_FAIL) ? "Fail ": "");
	}

done:
fail:
	cvResetImageROI(img);

	if (args->eq_histogram)
	{
		cvReleaseImage(&img_eq);
	}

	if (tmp)
	{
		cvReleaseImage(&tmp);
	}

	if (thr_img)
	{
		cvReleaseImage(&thr_img);
	}

	result->result = ret;
	result->success = (result->result > 0.0);

	return ret;
}
int catcierge_haar_matcher_find_prey_adaptive(catcierge_haar_matcher_t *ctx,
											IplImage *img, IplImage *inv_thr_img,
											match_result_t *result, int save_steps)
{
	IplImage *inv_adpthr_img = NULL;
	IplImage *inv_combined = NULL;
	IplImage *open_combined = NULL;
	IplImage *dilate_combined = NULL;
	CvSeq *contours = NULL;
	size_t contour_count = 0;
	CvSize img_size;
	assert(ctx);
	assert(img);
	assert(ctx->args);

	img_size = cvGetSize(img);

	// We expect to be given an inverted global thresholded image (inv_thr_img)
	// that contains the rough cat profile.

	// Do an inverted adaptive threshold of the original image as well.
	// This brings out small details such as a mouse tail that fades
	// into the background during a global threshold.
	inv_adpthr_img = cvCreateImage(img_size, 8, 1);
	cvAdaptiveThreshold(img, inv_adpthr_img, 255,
		CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, 11, 5);
	catcierge_haar_matcher_save_step_image(ctx,
		inv_adpthr_img, result, "adp_thresh", "Inverted adaptive threshold", save_steps);

	// Now we can combine the two thresholded images into one.
	inv_combined = cvCreateImage(img_size, 8, 1);
	cvAdd(inv_thr_img, inv_adpthr_img, inv_combined, NULL);
	catcierge_haar_matcher_save_step_image(ctx,
		inv_combined, result, "inv_combined", "Combined global and adaptive threshold", save_steps);

	// Get rid of noise from the adaptive threshold.
	open_combined = cvCreateImage(img_size, 8, 1);
	cvMorphologyEx(inv_combined, open_combined, NULL, ctx->kernel2x2, CV_MOP_OPEN, 2);
	catcierge_haar_matcher_save_step_image(ctx,
		open_combined, result, "opened", "Opened image", save_steps);

	dilate_combined = cvCreateImage(img_size, 8, 1);
	cvDilate(open_combined, dilate_combined, ctx->kernel3x3, 3);
	catcierge_haar_matcher_save_step_image(ctx,
		dilate_combined, result, "dilated", "Dilated image", save_steps);

	// Invert back the result so the background is white again.
	cvNot(dilate_combined, dilate_combined);
	catcierge_haar_matcher_save_step_image(ctx,
		dilate_combined, result, "combined", "Combined binary image", save_steps);

	cvFindContours(dilate_combined, ctx->storage, &contours,
		sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));

	// If we get more than 1 contour we count it as a prey.
	contour_count = catcierge_haar_matcher_count_contours(ctx, contours);

	if (save_steps)
	{
		IplImage *img_contour = cvCloneImage(img);
		IplImage *img_final_color = NULL;
		CvScalar color;

		cvDrawContours(img_contour, contours, cvScalarAll(255), cvScalarAll(0), 1, 1, 8, cvPoint(0, 0));
		catcierge_haar_matcher_save_step_image(ctx,
			img_contour, result, "contours", "Background contours", save_steps);

		// Draw a final color combined image with the Haar detection + contour.
		cvResetImageROI(img_contour);

		img_final_color =  cvCreateImage(cvGetSize(img_contour), 8, 3);

		cvCvtColor(img_contour, img_final_color, CV_GRAY2BGR);
		color = (contour_count > 1) ? CV_RGB(255, 0, 0) : CV_RGB(0, 255, 0);
		cvRectangleR(img_final_color, result->match_rects[0], color, 2, 8, 0);

		catcierge_haar_matcher_save_step_image(ctx,
			img_final_color, result, "final", "Final image", save_steps);

		cvReleaseImage(&img_contour);
		cvReleaseImage(&img_final_color);
	}

	cvReleaseImage(&inv_adpthr_img);
	cvReleaseImage(&inv_combined);
	cvReleaseImage(&open_combined);
	cvReleaseImage(&dilate_combined);

	return (contour_count > 1);
}
int WindscreenLocator::locateLeftRight()
{
    CvRect roiRect = cvRect(0, imgGradH->height / 3, imgGradH->width, imgGradH->height * 0.9 - imgGradH->height/3);

    thresholdGrad = max(averageGrad, 1.0 ) * 2.5;

    cvSetImageROI(imgGradV, roiRect);
    histoStat(imgGradV, vertical2X, NULL, thresholdGrad, 0);
    cvResetImageROI(imgGradV);

    cvSetImageROI(imgGradH, roiRect);
    histoStat(imgGradH, horizon2X, NULL, thresholdGrad, 0);
    cvResetImageROI(imgGradH);


    int margin = 0.04 * imgGrad->width;
    memset(vertical2X, 0, sizeof(double) * margin);
    memset(vertical2X + imgGrad->width - margin, 0, sizeof(double) * margin);
    histoSmooth(vertical2X, imgGrad->width, 5);
    histoSmooth(vertical2X, imgGrad->width, 10);
    histoSmooth(vertical2X, imgGrad->width, 20);

    histoSmooth(horizon2X, imgGrad->width, 5);
    histoSmooth(horizon2X, imgGrad->width, 10);
    histoSmooth(horizon2X, imgGrad->width, 40);

    // 计算车辆左右边界
    int topList[10];
    int topNr = 3;
    histoCrest(vertical2X, imgGrad->width, topList, topNr);

    if(!plate->isValid()){
        if(topNr >= 2){
            int a = min(topList[0], topList[1]);
            int b = max(topList[0], topList[1]);
            pdebug("horizon=%f\n", horizon2X[(b+a) / 2]);
            pdebug("bike Width = %d, lpWIdth=%f\n", b - a, plate->defaultWidth *2);
            double f = isDay(imgGray) ? 0.09 : 0.07;
            if(horizon2X[(b+a) / 2] > imgRGB->height * (0.9 - 1.0 / 3) * f){

            }else if((double)(b - a) < plate->defaultWidth * 2){
                if(b - a < plate->defaultWidth * 1.5){
                    int ext = -(b - a - plate->defaultWidth * 1.5) / 2;
                    a = max((a - ext), 0);
                    b = min((b + ext), (imgGrad->width - 1));
                }
                if(existCrest(horizon2X, imgGrad->width, a, b, 0.9) != 0){
                    return -1;
                }
            }else if((double)(b - a) < plate->defaultWidth * 3.2 && b < imgRGB->width * 0.7){
                if(existCrest(horizon2X, imgGrad->width, a, b, 0.8) != 0){
                    CvRect rect;
                    double shrunkRatio = imgRGB->width / double(imgOrigin->width);
                    rect.x = a;
                    rect.width = b - a;
                    rect.y = imgRGB->height * 0.25;
                    rect.height = imgRGB->height * (0.3 - 0.25);
                    rect = rectScale(rect, 1 / shrunkRatio, cvSize(imgOrigin->width, imgOrigin->height));
                    vector<FaceInfo> faceVec;
                    faceDetector->detectTriWheelMotor(imgOrigin, rect, plate->defaultWidth, faceVec);
                    if(faceVec.size() < 1){
                        return -1;
                    }
                    bool isTriWheel = true;
                    for(size_t i = 0; i < faceVec.size(); i++){
                        int mid = faceVec[i].getMiddlePoint().x;
                        if(mid < rect.x + 0.4 * rect.width || mid > rect.x + 0.6 * rect.width){
                            isTriWheel = false;
                            pdebug("face pos=%d %d\n", faceVec[i].getMiddlePoint().x, faceVec[i].getMiddlePoint().y);
                            break;
                        }
                    }
                    if(isTriWheel)
                        return -1;
                }
            }
        }
    }

    int intervalA;
    int intervalB;

    histoSegment(horizon2X, imgGrad->width, histoSegmentRatio(averageGrad), intervalA, intervalB);
    judgeVerticalBorder(imgGrad->width, topList, topNr, intervalA, intervalB, winLeft, winRight);
    return 0;
}
Exemple #16
0
int main(int argc,char ** argv){

	int key;
	int w,h;
	IplImage* img,*img1;
	CvCapture* capture = NULL;
	CvCapture* cap2=NULL;
	CvRect  pr;
	CvRect * r;
	w=150;
	h=100;
	int start=100;
	if(NULL==(capture = cvCaptureFromCAM(0))){
		printf("\nError on cvCaptureFromCAM");
		return -1;
	}
	
	/*if(NULL==(cap2 = cvCaptureFromAVI("../TestingSData/infile.ogv"))){
		printf("\nError on cvCaptureFromCAM");
		return -1;
	}*/
	cvNamedWindow("Capture", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Captured",CV_WINDOW_AUTOSIZE);
	cvMoveWindow("Capture", 550, 250);
	cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,640);
	cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,480);
	for(;;){
		if(NULL==(img=cvQueryFrame(capture))){
			printf("\nError on cvQueryFrame");
		break;
		}
		pr=cvRect(start,start,start+w,start+h);
		r=&pr;
		

		img1=cvCreateImage(cvSize(r->width,r->height),img->depth,img->nChannels);
		cvSetImageROI(img,*r);
		cvCopy(img,img1,NULL);
		cvResetImageROI(img);
		
		cvRectangle(
					img,
					cvPoint(r->x,r->y),
					cvPoint(r->x+r->width,r->y+r->height),
					colors[0]
		);
		cvShowImage("Capture",img);
		cvShowImage("Captured",img1);

		key = cvWaitKey(100);

		if((key&0x7f)==27)
			break;
	}
	fprintf(stderr,"successful exit\n");

	IplImage* skin=cvCreateImage( cvGetSize(img1), img1->depth,3);
	cvCvtColor(img1,skin,CV_BGR2HSV);
	
	for(;;){
		if(NULL==(img=cvQueryFrame(capture))){
			printf("\nError on cvQueryFrame");
		break;
		}
		pr=cvRect(start,start,start+w,start+h);
		r=&pr;
		

		img1=cvCreateImage(cvSize(r->width,r->height),img->depth,img->nChannels);
		cvSetImageROI(img,*r);
		cvCopy(img,img1,NULL);
		cvResetImageROI(img);
		
		cvRectangle(
					img,
					cvPoint(r->x,r->y),
					cvPoint(r->x+r->width,r->y+r->height),
					colors[0]
		);
		cvShowImage("Capture",img);
		cvShowImage("Captured",img1);

		key = cvWaitKey(100);

		if((key&0x7f)==27)
			break;
	}
	fprintf(stderr,"successful exit\n");

	IplImage* background=cvCreateImage( cvGetSize(img1), img1->depth,3);
	cvCvtColor(img1,background,CV_BGR2HSV);
	IplImage* h_plane = cvCreateImage( cvGetSize(img1), 8, 1 );
	IplImage* s_plane = cvCreateImage( cvGetSize(img1), 8, 1 );
	IplImage* s2_plane = cvCreateImage( cvGetSize(background), 8, 1 );
	IplImage* h2_plane = cvCreateImage( cvGetSize(background), 8, 1 );
	IplImage* v_plane = cvCreateImage( cvGetSize(img1), 8, 1 );
	//IplImage* planes[] = { h_plane, s_plane };
	//IplImage* planes2[] = { h2_plane, s2_plane };
	cvCvtPixToPlane( skin, h_plane, s_plane, v_plane, 0 );
	cvCvtPixToPlane( background, h2_plane, s2_plane, v_plane, 0 );

	CvHistogram *hist_skins,*hist_skinh, *hist_backs,*hist_backh;
	int h_bins = 181, s_bins = 256;

	{
		int hist_size[] = { h_bins, s_bins };
		float h_ranges[] = { 0, 180 }; // hue is [0,180]
		float s_ranges[] = { 0, 255 };
		float* ranges[] = { h_ranges, s_ranges };
		hist_skinh = cvCreateHist(
				1,
				&hist_size[0],
				CV_HIST_ARRAY,
				&ranges[0],
				1
			);
		hist_skins = cvCreateHist(
				1,
				&hist_size[1],
				CV_HIST_ARRAY,
				&ranges[1],
				1
			);
		hist_backh = cvCreateHist(
				1,
				&hist_size[0],
				CV_HIST_ARRAY,
				&ranges[0],
				1
			);
		hist_backs = cvCreateHist(
				1,
				&hist_size[1],
				CV_HIST_ARRAY,
				&ranges[1],
				1
			);
	}	
	cvCalcHist( &h_plane, hist_skinh, 0, 0 );
	cvCalcHist( &s_plane, hist_skins, 0, 0 );
	cvCalcHist( &h2_plane, hist_backh, 0, 0 );
	cvCalcHist( &s2_plane, hist_backs, 0, 0 );

	float min[4],max[4];
	int min_indx[4],max_indx[4];
	/*//cvNormalizeHist( hist_skinh, 1.0 );
	cvGetMinMaxHistValue( hist_skinh, &min[0], &max[0], &min_indx[0], &max_indx[0] );
	//cvNormalizeHist( hist_skins, 1.0 );
	cvGetMinMaxHistValue( hist_skins, &min[1], &max[1], &min_indx[1], &max_indx[1] );
	//cvNormalizeHist( hist_backh, 1.0 );
	cvGetMinMaxHistValue( hist_backh, &min[2], &max[2], &min_indx[2], &max_indx[2] );
	//cvNormalizeHist( hist_backs, 1.0 );
	cvGetMinMaxHistValue( hist_backs, &min[3], &max[3], &min_indx[3], &max_indx[3] );
	*/
	int hskin[2];
	int hback[2];
	int sskin[2];
	int sback[2];

	range_finder(hskin,0, 180,hist_skinh,w*h,0.0001);
	range_finder(sskin,0, 255,hist_skins,w*h,0.01);
	range_finder(hback,0, 180,hist_backh,w*h,0.01);
	range_finder(sback,0, 255,hist_backs,w*h,0.01);

	fprintf(stderr,"hskin min=%d max=%d\n",hskin[0],hskin[1]);
	fprintf(stderr,"sskin min=%d max=%d\n",sskin[0],sskin[1]);
	fprintf(stderr,"hback min=%d max=%d\n",hback[0],hback[1]);
	fprintf(stderr,"sback min=%d max=%d\n",sback[0],sback[1]);

	int hrange[2],srange[2];

	decision(hrange,hskin[0], hskin[1],hback[0],hback[1]);
	decision(srange,sskin[0], sskin[1],sback[0],sback[1]);
	//decide on thresholds
	
	fprintf(stderr,"hmin=%d hmax=%d\n",hrange[0],hrange[1]);
	fprintf(stderr,"smin=%d smax=%d\n",srange[0],srange[1]);

	
	for(;;){
		if(NULL==(img=cvQueryFrame(capture))){
			printf("\nError on cvQueryFrame");
		break;
		}
		
// h-0 179 s-61 227
		cvReleaseImage(&img1);
		img1=binary_threshold_hsl(img,0,179,srange[0],srange[1]);

		cvShowImage("Capture",img);
		cvShowImage("Captured",img1);

		key = cvWaitKey(50);

		if((key&0x7f)==27)
			break;
	}

		cvReleaseImage(&img1);
		char * filename = "../TestingSData/11-1.png";
		img = cvLoadImage( filename );
		img1=binary_threshold_hsl(img,0,179,61,227);
		IplImage* img2=cvCreateImage(cvSize(img1->width,img1->height),img1->depth,img1->nChannels);

		IplConvKernel * myKernel= cvCreateStructuringElementEx(8,8,0,0,CV_SHAPE_RECT,NULL);
	//set the top corner to 0;
		cvDilate(img1,img2,myKernel,1); 

		cvShowImage("Capture",img);
		cvShowImage("Captured",img1);
		cvNamedWindow("dilated",CV_WINDOW_AUTOSIZE);
		cvShowImage("dilated",img2);

		key = cvWaitKey(0);

	cvReleaseCapture(&capture);
	cvDestroyWindow("Capture");

	cvReleaseImage( &img );

	exit(1);
	return 0;
}
void WindscreenLocator::locateBottomTop2()
{
    int margin = (winRight - winLeft) * 0.22;
    int height = winBottom - winTop;

    cvSetImageROI(imgRGB, cvRect(winLeft + margin, winTop, winRight - winLeft - margin * 2, height));

    double* vertical2Y = (double*)malloc(sizeof(double) * height);
    if(!vertical2Y){
        fprintf(stderr, "malloc failed\n");
        fflush(stderr);
        exit(-1);
    }
    memset(vertical2Y, 0, sizeof(double) * height);
    IplImage* ver;
    IplImage* hor;
    IplImage* sla;
    IplImage* res;
    diagonal_origin(imgRGB, hor, ver, sla, res);

    double thres = statThreshold(ver);
    histoStat(ver, NULL, vertical2Y, thres * 0.65, 0);
    cvResetImageROI(imgRGB);

    histoSmooth(vertical2Y, height, 5);
    histoSmooth(vertical2Y, height, 10);
    histoSmooth(vertical2Y, height, 20);

    int intervalA = 0;
    int intervalB = height - 1;
    histoSegment(vertical2Y, height, 0.5, intervalA, intervalB, 0.7);
    vertical2Y[intervalA] = 100;
    vertical2Y[intervalB] = 80;

    cvReleaseImage(&ver);
    cvReleaseImage(&hor);
    cvReleaseImage(&sla);
    cvReleaseImage(&res);
    free(vertical2Y);
    if((plate->isValid() && (plate->getMiddlePoint().x > winLeft
                    && plate->getMiddlePoint().x < winRight))
            || reviser != NULL){
        int plateWidth = 35;
        if(plate->isValid())
            plateWidth= plate->getPlateWidth();
        if(reviser && reviser->getPlateWidth() > 0)
            plateWidth = reviser->getPlateWidth();
        if(winRight - winLeft < 4.5 * plateWidth){
            if(intervalA != 0 && intervalB - intervalA > plateWidth * 0.65){
                pdebug("[%d %d]\n", intervalA, intervalB);
                winBottom = intervalB + winTop;
                winTop = intervalA + winTop;
//				int newHeight = intervalB - intervalA;
                int w = winRight - winLeft;
                winBottom += w * 0.05;
                winTop -= w * 0.14;
            }
        }
    }
    winBottom = std::max(0, winBottom);
    winBottom = std::min(imgRGB->height - 1, winBottom);

    winTop = std::min(winTop, winBottom - 1);
    winTop = std::max(0, winTop);
}
static GstFlowReturn
kms_crowd_detector_transform_frame_ip (GstVideoFilter * filter,
    GstVideoFrame * frame)
{
  KmsCrowdDetector *crowddetector = KMS_CROWD_DETECTOR (filter);
  GstMapInfo info;

  kms_crowd_detector_initialize_images (crowddetector, frame);
  if ((crowddetector->priv->num_rois == 0)
      && (crowddetector->priv->rois != NULL)) {
    kms_crowd_detector_extract_rois (crowddetector);
  }
  if (crowddetector->priv->pixels_rois_counted == TRUE &&
      crowddetector->priv->actual_image != NULL) {
    kms_crowd_detector_count_num_pixels_rois (crowddetector);
    crowddetector->priv->pixels_rois_counted = FALSE;
  }
  gst_buffer_map (frame->buffer, &info, GST_MAP_READ);
  crowddetector->priv->actual_image->imageData = (char *) info.data;

  IplImage *frame_actual_gray =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (frame_actual_gray);

  IplImage *actual_lbp =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (actual_lbp);

  IplImage *lbp_temporal_result =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (lbp_temporal_result);

  IplImage *add_lbps_result =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (add_lbps_result);

  IplImage *lbps_alpha_result_rgb =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 3);

  cvSet (lbps_alpha_result_rgb, CV_RGB (0, 0, 0), 0);

  IplImage *actual_image_masked =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height), IPL_DEPTH_8U, 1);

  cvZero (actual_image_masked);

  IplImage *substract_background_to_actual =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (substract_background_to_actual);

  IplImage *low_speed_map =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (low_speed_map);

  IplImage *high_speed_map =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (high_speed_map);

  IplImage *actual_motion =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 3);

  cvSet (actual_motion, CV_RGB (0, 0, 0), 0);

  IplImage *binary_actual_motion =
      cvCreateImage (cvSize (crowddetector->priv->actual_image->width,
          crowddetector->priv->actual_image->height),
      IPL_DEPTH_8U, 1);

  cvZero (binary_actual_motion);

  uint8_t *low_speed_pointer;
  uint8_t *low_speed_pointer_aux;
  uint8_t *high_speed_pointer;
  uint8_t *high_speed_pointer_aux;
  uint8_t *actual_motion_pointer;
  uint8_t *actual_motion_pointer_aux;
  uint8_t *binary_actual_motion_pointer;
  uint8_t *binary_actual_motion_pointer_aux;

  int w, h;

  if (crowddetector->priv->num_rois != 0) {
    cvFillPoly (actual_image_masked, crowddetector->priv->curves,
        crowddetector->priv->n_points, crowddetector->priv->num_rois,
        cvScalar (255, 255, 255, 0), CV_AA, 0);
  }
  cvCvtColor (crowddetector->priv->actual_image, frame_actual_gray,
      CV_BGR2GRAY);
  kms_crowd_detector_mask_image (frame_actual_gray, actual_image_masked, 0);

  if (crowddetector->priv->background == NULL) {
    cvCopy (frame_actual_gray, crowddetector->priv->background, 0);
  } else {
    cvAddWeighted (crowddetector->priv->background, BACKGROUND_ADD_RATIO,
        frame_actual_gray, 1 - BACKGROUND_ADD_RATIO, 0,
        crowddetector->priv->background);
  }

  kms_crowd_detector_compute_temporal_lbp (frame_actual_gray, actual_lbp,
      actual_lbp, FALSE);
  kms_crowd_detector_compute_temporal_lbp (frame_actual_gray,
      lbp_temporal_result, crowddetector->priv->frame_previous_gray, TRUE);
  cvAddWeighted (crowddetector->priv->previous_lbp, LBPS_ADD_RATIO, actual_lbp,
      (1 - LBPS_ADD_RATIO), 0, add_lbps_result);
  cvSub (crowddetector->priv->previous_lbp, actual_lbp, add_lbps_result, 0);
  cvThreshold (add_lbps_result, add_lbps_result, 70.0, 255.0, CV_THRESH_OTSU);
  cvNot (add_lbps_result, add_lbps_result);
  cvErode (add_lbps_result, add_lbps_result, 0, 4);
  cvDilate (add_lbps_result, add_lbps_result, 0, 11);
  cvErode (add_lbps_result, add_lbps_result, 0, 3);
  cvCvtColor (add_lbps_result, lbps_alpha_result_rgb, CV_GRAY2BGR);
  cvCopy (actual_lbp, crowddetector->priv->previous_lbp, 0);
  cvCopy (frame_actual_gray, crowddetector->priv->frame_previous_gray, 0);

  if (crowddetector->priv->acumulated_lbp == NULL) {
    cvCopy (add_lbps_result, crowddetector->priv->acumulated_lbp, 0);
  } else {
    cvAddWeighted (crowddetector->priv->acumulated_lbp, TEMPORAL_LBPS_ADD_RATIO,
        add_lbps_result, 1 - TEMPORAL_LBPS_ADD_RATIO, 0,
        crowddetector->priv->acumulated_lbp);
  }

  cvThreshold (crowddetector->priv->acumulated_lbp, high_speed_map,
      150.0, 255.0, CV_THRESH_BINARY);
  cvSmooth (high_speed_map, high_speed_map, CV_MEDIAN, 3, 0, 0.0, 0.0);
  kms_crowd_detector_substract_background (frame_actual_gray,
      crowddetector->priv->background, substract_background_to_actual);
  cvThreshold (substract_background_to_actual, substract_background_to_actual,
      70.0, 255.0, CV_THRESH_OTSU);

  cvCanny (substract_background_to_actual,
      substract_background_to_actual, 70.0, 150.0, 3);

  if (crowddetector->priv->acumulated_edges == NULL) {
    cvCopy (substract_background_to_actual,
        crowddetector->priv->acumulated_edges, 0);
  } else {
    cvAddWeighted (crowddetector->priv->acumulated_edges, EDGES_ADD_RATIO,
        substract_background_to_actual, 1 - EDGES_ADD_RATIO, 0,
        crowddetector->priv->acumulated_edges);
  }

  kms_crowd_detector_process_edges_image (crowddetector, low_speed_map, 3);
  cvErode (low_speed_map, low_speed_map, 0, 1);

  low_speed_pointer = (uint8_t *) low_speed_map->imageData;
  high_speed_pointer = (uint8_t *) high_speed_map->imageData;
  actual_motion_pointer = (uint8_t *) actual_motion->imageData;
  binary_actual_motion_pointer = (uint8_t *) binary_actual_motion->imageData;

  for (h = 0; h < low_speed_map->height; h++) {
    low_speed_pointer_aux = low_speed_pointer;
    high_speed_pointer_aux = high_speed_pointer;
    actual_motion_pointer_aux = actual_motion_pointer;
    binary_actual_motion_pointer_aux = binary_actual_motion_pointer;
    for (w = 0; w < low_speed_map->width; w++) {
      if (*high_speed_pointer_aux == 0) {
        actual_motion_pointer_aux[0] = 255;
        binary_actual_motion_pointer_aux[0] = 255;
      }
      if (*low_speed_pointer_aux == 255) {
        *actual_motion_pointer_aux = 0;
        actual_motion_pointer_aux[2] = 255;
        binary_actual_motion_pointer_aux[0] = 255;
      } else if (*high_speed_pointer_aux == 0) {
        actual_motion_pointer_aux[0] = 255;
      }
      low_speed_pointer_aux++;
      high_speed_pointer_aux++;
      actual_motion_pointer_aux = actual_motion_pointer_aux + 3;
      binary_actual_motion_pointer_aux++;
    }
    low_speed_pointer += low_speed_map->widthStep;
    high_speed_pointer += high_speed_map->widthStep;
    actual_motion_pointer += actual_motion->widthStep;
    binary_actual_motion_pointer += binary_actual_motion->widthStep;
  }

  int curve;

  for (curve = 0; curve < crowddetector->priv->num_rois; curve++) {

    if (crowddetector->priv->rois_data[curve].send_optical_flow_event == TRUE) {

      CvRect container =
          kms_crowd_detector_get_square_roi_contaniner (crowddetector, curve);

      cvSetImageROI (crowddetector->priv->actual_image, container);
      cvSetImageROI (crowddetector->priv->previous_image, container);
      cvSetImageROI (actual_motion, container);

      kms_crowd_detector_compute_optical_flow (crowddetector,
          binary_actual_motion, container, curve);

      cvResetImageROI (crowddetector->priv->actual_image);
      cvResetImageROI (crowddetector->priv->previous_image);
    }
  }

  {
    uint8_t *orig_row_pointer =
        (uint8_t *) crowddetector->priv->actual_image->imageData;
    uint8_t *overlay_row_pointer = (uint8_t *) actual_motion->imageData;

    for (h = 0; h < crowddetector->priv->actual_image->height; h++) {
      uint8_t *orig_column_pointer = orig_row_pointer;
      uint8_t *overlay_column_pointer = overlay_row_pointer;

      for (w = 0; w < crowddetector->priv->actual_image->width; w++) {
        int c;

        for (c = 0; c < crowddetector->priv->actual_image->nChannels; c++) {
          if (overlay_column_pointer[c] != 0) {
            orig_column_pointer[c] = overlay_column_pointer[c];
          }
        }

        orig_column_pointer += crowddetector->priv->actual_image->nChannels;
        overlay_column_pointer += actual_motion->nChannels;
      }
      orig_row_pointer += crowddetector->priv->actual_image->widthStep;
      overlay_row_pointer += actual_motion->widthStep;
    }
  }

  if (crowddetector->priv->num_rois != 0) {
    cvPolyLine (crowddetector->priv->actual_image, crowddetector->priv->curves,
        crowddetector->priv->n_points, crowddetector->priv->num_rois, 1,
        cvScalar (255, 255, 255, 0), 1, 8, 0);
  }

  cvNot (high_speed_map, high_speed_map);
  kms_crowd_detector_roi_analysis (crowddetector, low_speed_map,
      high_speed_map);

  cvReleaseImage (&frame_actual_gray);
  cvReleaseImage (&actual_lbp);
  cvReleaseImage (&lbp_temporal_result);
  cvReleaseImage (&add_lbps_result);
  cvReleaseImage (&lbps_alpha_result_rgb);
  cvReleaseImage (&actual_image_masked);
  cvReleaseImage (&substract_background_to_actual);
  cvReleaseImage (&low_speed_map);
  cvReleaseImage (&high_speed_map);
  cvReleaseImage (&actual_motion);
  cvReleaseImage (&binary_actual_motion);

  gst_buffer_unmap (frame->buffer, &info);

  return GST_FLOW_OK;
}
Exemple #19
0
feature::vec2DFloat feature::calFeatureVec(IplImage* eachImage, bool debug)
{
    auto imgVec = analyse::process(eachImage, debug);

    //准备图片,整个细胞bgr, gray,轮廓分析备份,  细胞核bgr, gray,
    IplImage* wholeCell = imgVec[0];
    IplImage* wholeCellb = cvCreateImage(cvGetSize(wholeCell), 8, 1);
    IplImage* wholeCell4Contour= cvCreateImage(cvGetSize(wholeCell), 8, 1);
    IplImage* nuclei = imgVec[1];
    IplImage* nucleib = cvCreateImage(cvGetSize(nuclei), 8, 1);

    cvCvtColor(wholeCell, wholeCellb, CV_BGR2GRAY);
    cvCopy(wholeCellb, wholeCell4Contour);
    cvCvtColor(nuclei, nucleib, CV_BGR2GRAY);

//    analyse::showImg(wholeCell, "wholeCell");
//    analyse::showImg(nuclei, "nuclei");
//    cvWaitKey(0);


    //返回值, n * dimension 维向量
    feature::vec2DFloat features;

    //对整个细胞gray图进行轮廓分析,找出一个个细胞,然后ROI, 再接着处理
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* contours = 0;
    cvFindContours(wholeCell4Contour, storage, &contours, sizeof(CvContour), CV_RETR_LIST);
    if (contours) {
        CvSeq* c = contours;
        //int i = 0;
        for (; c != NULL; c = c->h_next) {
            if (cvContourArea(c) < 2000) continue;
            //qDebug("i = %d, area = %f, perimeter = %f", ++i, cvContourArea(c), cvArcLength(c));

            //开始设置ROI
            CvRect rect = cvBoundingRect(c,0);
            cvSetImageROI(wholeCell, rect);
            cvSetImageROI(wholeCellb, rect);
            cvSetImageROI(nuclei, rect);
            cvSetImageROI(nucleib, rect);
            //为临时图片申请空间,复制图片
            IplImage* temp1 = cvCreateImage(cvGetSize(nuclei), 8, 3);//细胞核bgr
            IplImage* temp2 = cvCreateImage(cvGetSize(nucleib), 8, 1);//细胞核gray
            IplImage* temp22 = cvCreateImage(cvGetSize(nucleib), 8, 1);//细胞核gray
            IplImage* temp3 = cvCreateImage(cvGetSize(wholeCell), 8, 3);//整个细胞bgr
            IplImage* temp4 = cvCreateImage(cvGetSize(wholeCellb), 8, 1);//整个细胞gray
            IplImage* temp5 = cvCreateImage(cvGetSize(wholeCell), 8, 3);//细胞质bgr
            cvCopy(nuclei, temp1);
            cvCopy(nucleib, temp2);
            cvCopy(nucleib, temp22);
            cvCopy(wholeCell, temp3);
            cvCopy(wholeCellb, temp4);
            //细胞质图片
            cvSub(temp3, temp1, temp5);

            //取消ROI设置
            cvResetImageROI(wholeCell);
            cvResetImageROI(wholeCellb);
            cvResetImageROI(nuclei);
            cvResetImageROI(nucleib);

            //开始提取特征, 用tempX图片
            auto t1 = feature::meanRgb(temp1);
            auto t3 = feature::meanRgb(temp3);
            auto t5 = feature::meanRgb(temp5);

            auto t2 = feature::getPAR(temp2, 2);//4-d
            auto t4 = feature::getPAR(temp4, 4);//3-d
            auto t6 = vector<float> ({t2.at(1) - t2.at(1), t2.at(1) / t2.at(1)});
            auto t22 = feature::getLBP(temp22);

            //细胞核面积为0,说明ROI设置有错,跳过,特征值不要啦
            if (t2.at(1) == 0) {
                continue;
            }

//debug mode, display the pic
            if (debug == true)  {
                analyse::showImg(temp1, "Nuclei-bgr");
                analyse::showImg(temp3, "Cell-bgr");
                analyse::showImg(temp5, "Cryto-bgr");
                cvWaitKey(waitTime * 2);
             }

            //如果细胞质面积为0,说明没有细胞质, 应该对BGR分量另外处理
            vector<float> eachFeature;
            auto insertItr = std::back_inserter(eachFeature);
            move(t2.begin(), t2.end(), insertItr);
            move(t1.begin(), t1.end(), insertItr);
            move(t4.begin(), t4.end(), insertItr);
            move(t3.begin(), t3.end(), insertItr);

            if (t6.at(0) == 0) {
                eachFeature.push_back(0);
                eachFeature.push_back(0);
                eachFeature.push_back(0);
            }
            else {
                move(t5.begin(), t5.end(), insertItr);
//                eachFeature << t2.join("'") << t1.join("'") << t4.join("'") << t3.join("'") << t5.join("'") << t6.join("'") << t22.join("'");
            }

            move(t6.begin(), t6.end(), insertItr);
            move(t22.begin(), t22.end(), insertItr);

            //把当前细胞的特征值放入图片总特征值中
            features.push_back(eachFeature);

//            analyse::showImg(temp1, "temp1");
//            analyse::showImg(temp3, "temp3");
//            if (cvWaitKey(0)== 27) break;


            cvReleaseImage(&temp1);
            cvReleaseImage(&temp2);
            cvReleaseImage(&temp22);
            cvReleaseImage(&temp3);
            cvReleaseImage(&temp4);
            cvReleaseImage(&temp5);
        }
    }
    cvReleaseMemStorage(&storage);
    cvReleaseImage(&wholeCell);
    cvReleaseImage(&wholeCellb);
    cvReleaseImage(&wholeCell4Contour);
    cvReleaseImage(&nuclei);
    cvReleaseImage(&nucleib);

    return features;

}
/* 
 * Performs the face detection
 */
static GstFlowReturn
gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
    IplImage * img)
{
  GstFaceDetect *filter = GST_FACE_DETECT (base);

  if (filter->cvFaceDetect) {
    GstMessage *msg = NULL;
    GstStructure *s;
    GValue facelist = { 0 };
    GValue facedata = { 0 };
    CvSeq *faces;
    CvSeq *mouth = NULL, *nose = NULL, *eyes = NULL;
    gint i;
    gboolean do_display = FALSE;

    if (filter->display) {
      if (gst_buffer_is_writable (buf)) {
        do_display = TRUE;
      } else {
        GST_LOG_OBJECT (filter, "Buffer is not writable, not drawing faces.");
      }
    }

    cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
    cvClearMemStorage (filter->cvStorage);

    faces = gst_face_detect_run_detector (filter, filter->cvFaceDetect,
        filter->min_size_width, filter->min_size_height);

    msg = gst_face_detect_message_new (filter, buf);
    g_value_init (&facelist, GST_TYPE_LIST);

    for (i = 0; i < (faces ? faces->total : 0); i++) {
      CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
      guint mw = filter->min_size_width / 8;
      guint mh = filter->min_size_height / 8;
      guint rnx = 0, rny = 0, rnw, rnh;
      guint rmx = 0, rmy = 0, rmw, rmh;
      guint rex = 0, rey = 0, rew, reh;
      gboolean have_nose, have_mouth, have_eyes;

      /* detect face features */

      if (filter->cvNoseDetect) {
        rnx = r->x + r->width / 4;
        rny = r->y + r->height / 4;
        rnw = r->width / 2;
        rnh = r->height / 2;
        cvSetImageROI (filter->cvGray, cvRect (rnx, rny, rnw, rnh));
        nose =
            gst_face_detect_run_detector (filter, filter->cvNoseDetect, mw, mh);
        have_nose = (nose && nose->total);
        cvResetImageROI (filter->cvGray);
      } else {
        have_nose = FALSE;
      }

      if (filter->cvMouthDetect) {
        rmx = r->x;
        rmy = r->y + r->height / 2;
        rmw = r->width;
        rmh = r->height / 2;
        cvSetImageROI (filter->cvGray, cvRect (rmx, rmy, rmw, rmh));
        mouth =
            gst_face_detect_run_detector (filter, filter->cvMouthDetect, mw,
            mh);
        have_mouth = (mouth && mouth->total);
        cvResetImageROI (filter->cvGray);
      } else {
        have_mouth = FALSE;
      }

      if (filter->cvEyesDetect) {
        rex = r->x;
        rey = r->y;
        rew = r->width;
        reh = r->height / 2;
        cvSetImageROI (filter->cvGray, cvRect (rex, rey, rew, reh));
        eyes =
            gst_face_detect_run_detector (filter, filter->cvEyesDetect, mw, mh);
        have_eyes = (eyes && eyes->total);
        cvResetImageROI (filter->cvGray);
      } else {
        have_eyes = FALSE;
      }

      GST_LOG_OBJECT (filter,
          "%2d/%2d: x,y = %4u,%4u: w.h = %4u,%4u : features(e,n,m) = %d,%d,%d",
          i, faces->total, r->x, r->y, r->width, r->height,
          have_eyes, have_nose, have_mouth);

      s = gst_structure_new ("face",
          "x", G_TYPE_UINT, r->x,
          "y", G_TYPE_UINT, r->y,
          "width", G_TYPE_UINT, r->width,
          "height", G_TYPE_UINT, r->height, NULL);
      if (have_nose) {
        CvRect *sr = (CvRect *) cvGetSeqElem (nose, 0);
        GST_LOG_OBJECT (filter, "nose/%d: x,y = %4u,%4u: w.h = %4u,%4u",
            nose->total, rnx + sr->x, rny + sr->y, sr->width, sr->height);
        gst_structure_set (s,
            "nose->x", G_TYPE_UINT, rnx + sr->x,
            "nose->y", G_TYPE_UINT, rny + sr->y,
            "nose->width", G_TYPE_UINT, sr->width,
            "nose->height", G_TYPE_UINT, sr->height, NULL);
      }
      if (have_mouth) {
        CvRect *sr = (CvRect *) cvGetSeqElem (mouth, 0);
        GST_LOG_OBJECT (filter, "mouth/%d: x,y = %4u,%4u: w.h = %4u,%4u",
            mouth->total, rmx + sr->x, rmy + sr->y, sr->width, sr->height);
        gst_structure_set (s,
            "mouth->x", G_TYPE_UINT, rmx + sr->x,
            "mouth->y", G_TYPE_UINT, rmy + sr->y,
            "mouth->width", G_TYPE_UINT, sr->width,
            "mouth->height", G_TYPE_UINT, sr->height, NULL);
      }
      if (have_eyes) {
        CvRect *sr = (CvRect *) cvGetSeqElem (eyes, 0);
        GST_LOG_OBJECT (filter, "eyes/%d: x,y = %4u,%4u: w.h = %4u,%4u",
            eyes->total, rex + sr->x, rey + sr->y, sr->width, sr->height);
        gst_structure_set (s,
            "eyes->x", G_TYPE_UINT, rex + sr->x,
            "eyes->y", G_TYPE_UINT, rey + sr->y,
            "eyes->width", G_TYPE_UINT, sr->width,
            "eyes->height", G_TYPE_UINT, sr->height, NULL);
      }

      g_value_init (&facedata, GST_TYPE_STRUCTURE);
      g_value_take_boxed (&facedata, s);
      gst_value_list_append_value (&facelist, &facedata);
      g_value_unset (&facedata);
      s = NULL;

      if (do_display) {
        CvPoint center;
        CvSize axes;
        gdouble w, h;
        gint cb = 255 - ((i & 3) << 7);
        gint cg = 255 - ((i & 12) << 5);
        gint cr = 255 - ((i & 48) << 3);

        w = r->width / 2;
        h = r->height / 2;
        center.x = cvRound ((r->x + w));
        center.y = cvRound ((r->y + h));
        axes.width = w;
        axes.height = h * 1.25; /* tweak for face form */
        cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
            3, 8, 0);

        if (have_nose) {
          CvRect *sr = (CvRect *) cvGetSeqElem (nose, 0);

          w = sr->width / 2;
          h = sr->height / 2;
          center.x = cvRound ((rnx + sr->x + w));
          center.y = cvRound ((rny + sr->y + h));
          axes.width = w;
          axes.height = h * 1.25;       /* tweak for nose form */
          cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
              1, 8, 0);
        }
        if (have_mouth) {
          CvRect *sr = (CvRect *) cvGetSeqElem (mouth, 0);

          w = sr->width / 2;
          h = sr->height / 2;
          center.x = cvRound ((rmx + sr->x + w));
          center.y = cvRound ((rmy + sr->y + h));
          axes.width = w * 1.5; /* tweak for mouth form */
          axes.height = h;
          cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
              1, 8, 0);
        }
        if (have_eyes) {
          CvRect *sr = (CvRect *) cvGetSeqElem (eyes, 0);

          w = sr->width / 2;
          h = sr->height / 2;
          center.x = cvRound ((rex + sr->x + w));
          center.y = cvRound ((rey + sr->y + h));
          axes.width = w * 1.5; /* tweak for eyes form */
          axes.height = h;
          cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
              1, 8, 0);
        }
      }
    }

    gst_structure_set_value ((GstStructure *) gst_message_get_structure (msg),
        "faces", &facelist);
    g_value_unset (&facelist);
    gst_element_post_message (GST_ELEMENT (filter), msg);
  }

  return GST_FLOW_OK;
}
void BleImageProcessThread::run()
{
    CvSize dstSize;
    dstSize.width = m_width;
    dstSize.height = m_height;
    IplImage* dstImg = cvCreateImage(dstSize, IPL_DEPTH_8U, 3);
    cvZero(dstImg);

    QRect dstRect(0, 0, m_width, m_height);

    while (!m_stop) {
        QElapsedTimer elapsedTimer;
        elapsedTimer.start();

        m_updateMutex.lock();
        for (int i = 0; i < m_sources.size(); ++i) {
            SourcePair & pair = m_sources[i];

            BleImage bleImage = pair.source->getImage();
            if (bleImage.dataSize <= 0) continue;

            IplImage *cvImage = cvCreateImageHeader(cvSize(bleImage.width, bleImage.height), 8, 3);
            cvImage->imageData = bleImage.data;
            cvImage->imageDataOrigin = bleImage.data;

            // if the image size not qual to area size
            // then resize it.
            IplImage *resizedImage = cvImage;
            if (cvImage->width != pair.rect.width() ||
                    cvImage->height != pair.rect.height())
            {
                resizedImage = cvCreateImage(cvSize(pair.rect.width(), pair.rect.height()), cvImage->depth, cvImage->nChannels);
                cvResize(cvImage, resizedImage, CV_INTER_LINEAR);
            }

            if (bleImage.format != BleImage_Format_BGR24) {
                cvConvertImage(resizedImage, resizedImage, CV_CVTIMG_SWAP_RB);
            }

            // get the intersection of dst rect
            if (!dstRect.intersects(pair.rect)) continue;

            QRect intersectionRect_1 = dstRect.intersected(pair.rect);
            QRect intersectionRect_2 = pair.rect.intersected(dstRect);

            // intersectionRect_2 should relative to pair.rect
            intersectionRect_2.moveTopLeft(QPoint(intersectionRect_2.x() - pair.rect.x(),
                                       intersectionRect_2.y() - pair.rect.y()));

            cvSetImageROI(dstImg, QRect2CvRect(intersectionRect_1));
            cvSetImageROI(resizedImage, QRect2CvRect(intersectionRect_2));

            cvCopy(resizedImage, dstImg);

            cvResetImageROI(dstImg);
            cvResetImageROI(resizedImage);

            // if resizedImage is cvCreateImage created ?
            if (resizedImage != cvImage) {
                cvReleaseImage(&resizedImage);
            }
            cvReleaseImageHeader(&cvImage);
        }
        m_updateMutex.unlock();

        m_modifyOutputMutex.lock();

        // if delayed about 1s , then discard some image.
        if (m_outputQueue.size() > 20) {
            log_trace("queue has many mang image, maybe your encoder is too slow!");
            goto end;
        }

        if (true) {
            // to BleImage
            BleImage *be = new BleImage;
            be->width = dstImg->width;
            be->height = dstImg->height;

            be->data = new char[dstImg->imageSize];
            memcpy(be->data, dstImg->imageData, dstImg->imageSize);

            be->dataSize = dstImg->imageSize;

            be->format = BleImage_Format_BGR24;

            m_timestampBuilder.setVideoCaptureInternal(m_internal);
            be->pts = m_timestampBuilder.addVideoFrame();

            m_outputQueue.enqueue(be);
        }

end:
        m_modifyOutputMutex.unlock();

        int elapsedMs = elapsedTimer.elapsed();
        int needSleepMs = m_internal - elapsedMs;
        if (needSleepMs < 0) {
            needSleepMs = 0;
        }
        msleep(needSleepMs);

        // reset bg image to black
        cvZero(dstImg);
    }

    log_trace("BleImageProcessThread exit normally.");
}
/**
 * Locate the user's eye with template matching
 *
 * @param	IplImage* img     the source image
 * @param	IplImage* tpl     the eye template
 * @param	CvRect*   window  search within this window,
 *                            will be updated with the recent search window
 * @param	CvRect*   eye     output parameter, will contain the current
 *                            location of user's eye
 * @return	int               '1' if found, '0' otherwise
 */
int
locate_eye(IplImage* img, IplImage* tpl, CvRect* window, CvRect* eye)
{
	IplImage*	tm;
	CvRect		win;
	CvPoint		minloc, maxloc, point;
	double		minval, maxval;
	int			w, h;

	/* get the centroid of eye */
	point = cvPoint(
		(*eye).x + (*eye).width / 2, 
		(*eye).y + (*eye).height / 2
	);

	/* setup search window 
	   replace the predefined WIN_WIDTH and WIN_HEIGHT above 
	   for your convenient */
	win = cvRect(
		point.x - WIN_WIDTH / 2,
		point.y - WIN_HEIGHT / 2,
		WIN_WIDTH,
		WIN_HEIGHT
	);

	/* make sure that the search window is still within the frame */
	if (win.x < 0)
		win.x = 0;
	if (win.y < 0)
		win.y = 0;
	if (win.x + win.width > img->width)
		win.x = img->width - win.width;
	if (win.y + win.height > img->height)
		win.y = img->height - win.height;

	/* create new image for template matching result where: 
	   width  = W - w + 1, and
	   height = H - h + 1 */
	w  = win.width  - tpl->width  + 1;
	h  = win.height - tpl->height + 1;
	tm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 1);

	/* apply the search window */
	cvSetImageROI(img, win);

	/* template matching */
	cvMatchTemplate(img, tpl, tm, CV_TM_SQDIFF_NORMED);
	cvMinMaxLoc(tm, &minval, &maxval, &minloc, &maxloc, 0);

	/* release things */
	cvResetImageROI(img);
	cvReleaseImage(&tm);

	/* only good matches */
	if (minval > TM_THRESHOLD)
		return 0;

	/* return the search window */
	*window = win;

	/* return eye location */
	*eye = cvRect(
		win.x + minloc.x,
		win.y + minloc.y,
		TPL_WIDTH,
		TPL_HEIGHT
	);

	return 1;
}
void MapMaker::image_callback(const sensor_msgs::ImageConstPtr& msg) {
//  printf("callback called\n");
  try
	{
	
	// if you want to work with color images, change from mono8 to bgr8
	  if(input_image==NULL){
		  input_image = cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
		  rotationImage=cvCloneImage(input_image);
		 // printf("cloned image\n");
		}
		else{
		  cvCopy(bridge.imgMsgToCv(msg, "mono8"),input_image);
		 // printf("copied image\n");
		}
	}
	catch (sensor_msgs::CvBridgeException& e)
	{
		ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
		return;
	}
	
	if(input_image!=NULL) {
    //get tf transform here and put in map
    ros::Time acquisition_time = msg->header.stamp;
    geometry_msgs::PoseStamped basePose;
    geometry_msgs::PoseStamped mapPose;
    basePose.pose.orientation.w=1.0;
    ros::Duration timeout(3);
    basePose.header.frame_id="/base_link";
    mapPose.header.frame_id="/map";
    try {
      tf_listener_.waitForTransform("/base_link", "/map", acquisition_time, timeout);
       
      tf_listener_.transformPose("/map", acquisition_time,basePose,"/base_link",mapPose);
	    
	    printf("pose #%d %f %f %f\n",pic_number,mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
	    
	    
	    /*
	    char buffer [50];
	    sprintf (buffer, "/tmp/test%02d.jpg", pic_number);
			if(!cvSaveImage(buffer,input_image,0)) printf("Could not save: %s\n",buffer);
			else printf("picture taken!!!\n");
	    pic_number++;
	    */
	    
	    cv::Point_<double> center;
      center.x=input_image->width/2;
      center.y=input_image->height/2;
      double tranlation_arr[2][3];
      CvMat translation;
      cvInitMatHeader(&translation,2,3,CV_64F,tranlation_arr);
      
      cvSetZero(&translation);
      cv2DRotationMatrix(center, (tf::getYaw(mapPose.pose.orientation)*180/3.14159) -90,1.0,&translation);
      cvSetZero(rotationImage);
      cvWarpAffine(input_image,rotationImage,&translation,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0));
      
      
      CvRect roi;
      roi.width=rotationImage->width;
      roi.height=rotationImage->height;
      
      if(init_zero_x==0){
        init_zero_x=(int)(mapPose.pose.position.x*(1.0/map_meters_per_pixel));
        init_zero_y=(int)(mapPose.pose.position.y*(-1.0/map_meters_per_pixel));
      }
      
      roi.x=(int)(mapPose.pose.position.x*(1.0/map_meters_per_pixel))-init_zero_x+map_zero_x-roi.width/2;
      roi.y=(int)(mapPose.pose.position.y*(-1.0/map_meters_per_pixel))-init_zero_y+map_zero_y-roi.height/2;
      
      printf("x %d, y %d, rot %f\n",roi.x,roi.y, (tf::getYaw(mapPose.pose.orientation)*180/3.14159) -90);
      
      cvSetImageROI(map,roi);
      
      cvMax(map,rotationImage,map);
      
      cvResetImageROI(map);
	    cvShowImage("map image",map);	    
    }
    catch (tf::TransformException& ex) {
      ROS_WARN("[map_maker] TF exception:\n%s", ex.what());
      printf("[map_maker] TF exception:\n%s", ex.what());
      return;
    }
    catch(...){
      printf("opencv shit itself cause our roi is bad\n");
    }
  }
}
Exemple #24
0
int fbtrack(IplImage *imgI, IplImage *imgJ, float *bb, float *bbnew,
            float *scaleshift)
{
    char level = 5;
    int numAdd = 50;

    // find good points
    const int margin = 5;
    CvRect rect = cvRect(bb[0],bb[1],bb[2]-bb[0]+1,bb[3]-bb[1]+1);
    cvSetImageROI(imgI, rect);
    IplImage *eig_image = cvCreateImage(cvGetSize(imgI), 32, 1);
    IplImage *temp_image = cvCreateImage(cvGetSize(imgI), 32, 1);
    CvPoint2D32f corners [numAdd];
    cvGoodFeaturesToTrack(imgI, eig_image, temp_image, corners, &numAdd, 0.01, 0, NULL, 2, 0, 0.04);
    cvReleaseImage(&(eig_image));
    cvReleaseImage(&(temp_image));
    cvResetImageROI(imgI);
    //printf("%d - number of features\n", numAdd);
    if (numAdd > 50) {
      numAdd = 50;
    }

    int numM = sqrt(100 - numAdd);
    int numN = sqrt(100 - numAdd);
    const int nPoints = numM * numN + numAdd;
    const int sizePointsArray = nPoints * 2;


    float fb[nPoints];
    float ncc[nPoints];
    char status[nPoints];

    float pt[sizePointsArray];
    float ptTracked[sizePointsArray];
    int nlkPoints;
    CvPoint2D32f *startPoints;
    CvPoint2D32f *targetPoints;
    float *fbLkCleaned;
    float *nccLkCleaned;
    int i, M;
    int nRealPoints;
    float medFb;
    float medNcc;
    int nAfterFbUsage;
    getFilledBBPoints(bb, numM, numN, margin, pt);
    //getFilledBBPoints(bb, numM, numN, 5, &ptTracked);

    //show good points
    //IplImage *tmp_show = cvCreateImage(cvGetSize(imgI), imgI->depth, imgI->nChannels);
    //cvCopy(imgI, tmp_show, NULL);
    //for(i = numN+numM; i < numN+numM+numAdd; i++) {
    //    cvCircle(tmp_show, CvPoint{bb[0]+corners[i-(numN+numM)].x, bb[1]+corners[i-(numN+numM)].y}, 2, CvScalar{0,0,255}, 1, 8, 0);
    //}
    //cvRectangle(tmp_show, CvPoint{bb[0],bb[1]},CvPoint{bb[2],bb[3]},CvScalar{0,0,255},1,8,0);
    //cvShowImage("name",tmp_show);

    //copy good points
    for(i = numN*numM; i < numN*numM+numAdd; i++)
    {
            pt[2*i + 0] = (int)(corners[i-(numN*numM)].x+bb[0]);
            pt[2*i + 1] = (int)(corners[i-(numN*numM)].y+bb[1]);
    }

    memcpy(ptTracked, pt, sizeof(float) * sizePointsArray);

    initImgs();
    trackLK(imgI, imgJ, pt, nPoints, ptTracked, nPoints, level, fb, ncc, status);
    initImgs();
    //  char* status = *statusP;
    nlkPoints = 0;

    for(i = 0; i < nPoints; i++)
    {
        nlkPoints += status[i];
    }

    startPoints = (CvPoint2D32f *) malloc(nlkPoints * sizeof(CvPoint2D32f));
    targetPoints = (CvPoint2D32f *) malloc(nlkPoints * sizeof(CvPoint2D32f));
    fbLkCleaned = (float *) malloc(nlkPoints * sizeof(float));
    nccLkCleaned = (float *) malloc(nlkPoints * sizeof(float));

    M = 2;
    nRealPoints = 0;

    for(i = 0; i < nPoints; i++)
    {
        //TODO:handle Missing Points
        //or status[i]==0
        if(ptTracked[M * i] == -1)
        {
        }
        else
        {
            startPoints[nRealPoints].x = pt[2 * i];
            startPoints[nRealPoints].y = pt[2 * i + 1];
            targetPoints[nRealPoints].x = ptTracked[2 * i];
            targetPoints[nRealPoints].y = ptTracked[2 * i + 1];
            fbLkCleaned[nRealPoints] = fb[i];
            nccLkCleaned[nRealPoints] = ncc[i];
            nRealPoints++;
        }
    }

    //assert nRealPoints==nlkPoints
    medFb = getMedian(fbLkCleaned, nlkPoints);
    medNcc = getMedian(nccLkCleaned, nlkPoints);
    /*  printf("medianfb: %f\nmedianncc: %f\n", medFb, medNcc);
     printf("Number of points after lk: %d\n", nlkPoints);*/
    nAfterFbUsage = 0;

    for(i = 0; i < nlkPoints; i++)
    {
        if((fbLkCleaned[i] <= medFb) & (nccLkCleaned[i] >= medNcc))
        {
            startPoints[nAfterFbUsage] = startPoints[i];
            targetPoints[nAfterFbUsage] = targetPoints[i];
            nAfterFbUsage++;
        }
    }

    /*printf("Number of points after fb correction: %d\n", nAfterFbUsage);*/
    //  showIplImage(IMGS[1]);
    // show "OpticalFlow" fb filtered.
    //  drawLinesCvPoint2D32f(imgI, startPoints, nRealPoints, targetPoints,
    //      nRealPoints);
    //  showIplImage(imgI);

    predictbb(bb, startPoints, targetPoints, nAfterFbUsage, bbnew, scaleshift);
    /*printf("bbnew: %f,%f,%f,%f\n", bbnew[0], bbnew[1], bbnew[2], bbnew[3]);
     printf("relative scale: %f \n", scaleshift[0]);*/
    //show picture with tracked bb
    //  drawRectFromBB(imgJ, bbnew);
    //  showIplImage(imgJ);
    free(startPoints);
    free(targetPoints);
    free(fbLkCleaned);
    free(nccLkCleaned);

    if(medFb > 10) return 0;
    else return 1;

}
//extract the sub image from the whole image. 
//The first node of a linkedlist with images and coordinates in the original image is returned.
sub_image* extract_sub_image(IplImage* original_image, int min_width, int min_height)
{
	int left = 0;
	int top = 0;
	int right = 0;
	int buttom = 0;
	int temp_x = 0;
	int temp_y = 0;

	int contour_flag = 0;
	int flag = 0;


	sub_image* temp_result =  (sub_image*)malloc(sizeof(sub_image));;
	sub_image* result = NULL;
	sub_image* prev = NULL;

	IplImage* temp_original_image = cvCloneImage(original_image);
	IplImage* temp = cvCloneImage(original_image);

	//search the connected components(here search the connected area)
	CvSeq* contours = connected_components(temp_original_image, temp);

	for (CvSeq* c=contours ;c!=NULL; c=c->h_next)
	{
		for (int i = 0; i < c->total; i ++)
		{
			CvPoint* p = (CvPoint*)cvGetSeqElem(c, i);
			temp_x = p->x;
			temp_y = p->y;
			//printf("x = %d, y = %d\n",temp_x,temp_y);

			//do the approximation
			if(i == 0)
			{
				left = temp_x;
				right = temp_x;
				top = temp_y;
				buttom = temp_y;
			}
			else
			{
				if(temp_x <= left)	left = temp_x;
				if(temp_x >= right) right = temp_x;
				if(temp_y <= top) top = temp_y;
				if(temp_y >= buttom) buttom = temp_y;
			}
		}

		//Not a correct area.To define the value of min_width and min_height I estimate the smallest scale of sub image.
		if(right-left < min_width || buttom - top < min_height)
		{
			continue;
		}
		else
		{
			//printf("find subimage %d\n",flag++);
			cvSetImageROI(temp_original_image, cvRect(left,top,right - left,buttom - top));

			if(contour_flag++ == 0)
			{
				result = temp_result;
			}
			
			temp_result->image = cvCreateImage(cvSize(right - left, buttom - top),temp_original_image->depth,temp_original_image-> nChannels);
			
			temp_result->image_left = left;
			temp_result->image_top = top;
			temp_result->image_right = right;
			temp_result->image_buttom = buttom;
			
			//copy the region of interesting(the sub image) to the destination image.
			cvCopy(temp_original_image, temp_result->image, NULL);
			
			//cvShowImage(string[flag++],temp_result->image);

			//organize the linkedlist.
			temp_result->next_image =  (sub_image*)malloc(sizeof(sub_image));
			prev = temp_result;
			temp_result = temp_result->next_image;

			cvResetImageROI(temp_original_image);
		}
	}
	
	temp_result = prev;
	temp_result->next_image= NULL;
	//the head node of the linkedlist is returned.
	return result;
}
Exemple #26
0
//--------------------------------------------------------------------------------
void ofxCvImage::resetROI() {
    cvResetImageROI( cvImage );
    cvResetImageROI( cvImageTemp );
}
Exemple #27
0
void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers, int dMinSize)
{
    CvSeq* seq;
    CvRect roi = m_rROI;
    Extend(roi, 1);
    cvSetImageROI(img, roi);
    cvSetImageROI(thresh, roi);
    // layers
    int colors[MAX_LAYERS] = {0};
    int iMinLevel = 0, iMaxLevel = 255;
    float step, power;
    ThresholdingParam(img, nLayers / 2, iMinLevel, iMaxLevel, step, power, 4);
    int iMinLevelPrev = iMinLevel;
    int iMaxLevelPrev = iMinLevel;
    if (m_trPrev.iColor != 0)
    {
        iMinLevelPrev = m_trPrev.iColor - nLayers / 2;
        iMaxLevelPrev = m_trPrev.iColor + nLayers / 2;
    }
    if (iMinLevelPrev < iMinLevel)
    {
        iMaxLevelPrev += iMinLevel - iMinLevelPrev;
        iMinLevelPrev = iMinLevel;
    }
    if (iMaxLevelPrev > iMaxLevel)
    {
        iMinLevelPrev -= iMaxLevelPrev - iMaxLevel;
        if (iMinLevelPrev < iMinLevel)
            iMinLevelPrev = iMinLevel;
        iMaxLevelPrev = iMaxLevel;
    }
    int n = nLayers;
    n -= (iMaxLevelPrev - iMinLevelPrev + 1) / 2;
    step = float(iMinLevelPrev - iMinLevel + iMaxLevel - iMaxLevelPrev) / float(n);
    int j = 0;
    float level;
    for (level = (float)iMinLevel; level < iMinLevelPrev && j < nLayers; level += step, j++)
        colors[j] = int(level + 0.5);
    for (level = (float)iMinLevelPrev; level < iMaxLevelPrev && j < nLayers; level += 2.0, j++)
        colors[j] = int(level + 0.5);
    for (level = (float)iMaxLevelPrev; level < iMaxLevel && j < nLayers; level += step, j++)
        colors[j] = int(level + 0.5);
    //
    for (int i = 0; i < nLayers; i++)
    {
        cvThreshold(img, thresh, colors[i], 255.0, CV_THRESH_BINARY);
        if (cvFindContours(thresh, m_mstgRects, &seq, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE))
        {
            CvTrackingRect cr;
            for (CvSeq* external = seq; external; external = external->h_next)
            {
                cr.r = cvContourBoundingRect(external);
                Move(cr.r, roi.x, roi.y);
                if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize)
                {
                    cr.ptCenter = Center(cr.r);
                    cr.iColor = colors[i];
                    cvSeqPush(m_seqRects, &cr);
                }
                for (CvSeq* internal = external->v_next; internal; internal = internal->h_next)
                {
                    cr.r = cvContourBoundingRect(internal);
                    Move(cr.r, roi.x, roi.y);
                    if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize)
                    {
                        cr.ptCenter = Center(cr.r);
                        cr.iColor = colors[i];
                        cvSeqPush(m_seqRects, &cr);
                    }
                }
            }
            cvClearSeq(seq);
        }
    }
    cvResetImageROI(img);
    cvResetImageROI(thresh);
}//void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers)
Exemple #28
0
//--------------------------------------------------------------------------------
void  ofxCvImage::resetImageROI( IplImage* img ) {
    cvResetImageROI(img);
}
Exemple #29
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
void
MotionCapture::HUD(char* title, int nArgs, ...) {

    // img - Used for getting the arguments
    IplImage *img;

    // DispImage - the image in which input images are to be copied
    IplImage *DispImage;

    int size;
    int i;
    int m, n;
    int x, y;

    // w - Maximum number of images in a row
    // h - Maximum number of images in a column
    int w, h;

    // scale - How much we have to resize the image
    float scale;
    int max;

    // If the number of arguments is lesser than 0 or greater than 12
    // return without displaying
    if(nArgs <= 0) {
        printf("Number of arguments too small....\n");
        return;
    }
    else if(nArgs > 12) {
        printf("Number of arguments too large....\n");
        return;
    }
    // Determine the size of the image,
    // and the number of rows/cols
    // from number of arguments
    else if (nArgs == 1) {
        w = h = 1;
        size = 300;
    }
    else if (nArgs == 2) {
        w = 2; h = 1;
        size = 300;
    }
    else if (nArgs == 3 || nArgs == 4) {
        w = 2; h = 2;
        size = 350;
    }
    else if (nArgs == 5 || nArgs == 6) {
        w = 3; h = 2;
        size = 200;
    }
    else if (nArgs == 7 || nArgs == 8) {
        w = 4; h = 2;
        size = 200;
    }
    else {
        w = 4; h = 3;
        size = 150;
    }

    // Create a new 3 channel image
    DispImage = cvCreateImage( cvSize( 50 + size*w, 60 + size*h), 8, 3 );

    // Used to get the arguments passed
    va_list args;
    va_start(args, nArgs);

    // Loop for nArgs number of arguments
    for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (20 + size)) {

        // Get the Pointer to the IplImage
        img = va_arg(args, IplImage*);

        // Check whether it is NULL or not
        // If it is NULL, release the image, and return
        if(img == 0) {
            printf("Invalid arguments");
            cvReleaseImage(&DispImage);
            return;
        }

        // Find the width and height of the image
        x = img->width;
        y = img->height;

        // Find whether height or width is greater in order to resize the image
        max = (x > y)? x: y;

        // Find the scaling factor to resize the image
        scale = (float) ( (float) max / size );

        // Used to Align the images
        if( i % w == 0 && m!= 20) {
            m = 20;
            n+= 20 + size;
        }

        // Set the image ROI to display the current image
        cvSetImageROI(DispImage, cvRect(m, n, (int)( x/scale ), (int)( y/scale )));

        // Resize the input image and copy the it to the Single Big Image
        cvResize( img, DispImage );

        // Reset the ROI in order to display the next image
        cvResetImageROI(DispImage);
    }

    // Create a new window, and show the Single Big Image
    cvNamedWindow( title, 1 );
    cvShowImage( title, DispImage);

    //cvWaitKey();
    //cvDestroyWindow(title);

    // End the number of arguments
    va_end(args);

    // Release the Image Memory
    //cvReleaseImage(&DispImage);
}