コード例 #1
0
ファイル: head_analyzer.cpp プロジェクト: AIRLab-POLIMI/e2
//==============================================================================
//==============================================================================
bool detectFace()
{
	//Variables 
	int biggerContourIdx = 0;
	int contourArea = -1;
	
	Mat binaryFrame;
	Mat binaryFrameCopy;
	
	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	
	Rect headROI;
	Rect faceROI;
	Blob faceBlobempirical;
	Rect eyesROI;
	
	if(userDistance < 750)
	{
		//Face area into frameIR 
		headROI = cvRect(	(headPosition.x - BLOB_HEAD_WIDTH/2),  //X
							headPosition.y, 					   //Y
							BLOB_HEAD_WIDTH, BLOB_HEAD_HEIGHT);
		//Convert IRImage into gray image and then into binary one
		//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
		frameIR.copyTo(binaryFrame);
		binaryFrame = binaryFrame > THRESHOLD_HEAD_NEAR;
	}
	else
	{
		//Face area into frameIR 
		headROI = cvRect(	headPosition.x - (BLOB_HEAD_WIDTH-10)/2,  //X
							headPosition.y,							  //Y
							BLOB_HEAD_WIDTH-10, BLOB_HEAD_HEIGHT-20);
		//Convert IRImage into gray image and then into binary one
		//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
		frameIR.copyTo(binaryFrame);
		binaryFrame = binaryFrame > THRESHOLD_HEAD_FAR;
	}

	//Chech out-of-frame error
	//check outOfImage error
	if(headROI.x < 0)
		headROI.x = 0;
	if(headROI.x > ( FRAME_WIDTH - headROI.width))
		headROI.x = FRAME_WIDTH - headROI.width;
	
	if(headROI.y < 0)
		headROI.y = 0;
	if(headROI.y > (FRAME_HEIGHT - headROI.height))
		headROI.y = FRAME_HEIGHT - (headROI.height+10);
	
	//Define a sub-image for head detection algorithm
	binaryFrame.copyTo(binaryFrameCopy);
	Mat headBinaryFrame (binaryFrame, headROI);
	
	//OpenCV find contours algorithm
	findContours(headBinaryFrame, contours, hierarchy, CV_RETR_CCOMP, 
				CV_CHAIN_APPROX_SIMPLE, cvPoint(headROI.x, headROI.y));
	
	//Filter contours and get the biggest one
	for (int i = 0; i >= 0; i = hierarchy[i][0])
	{
		//Draw contours
		//vScalar color = CV_RGB(rand()&255,rand()&255,rand()&255);
		//drawContours(frameDrawn, contours, i, color, CV_FILLED,8, hierarchy);
			
		headROI = boundingRect(contours[i]);
		
		//Get the biggest area
		int temp = headROI.width * headROI.height;
		if(temp > contourArea)
		{
			contourArea = temp;
			biggerContourIdx = i;
		}
	}

	//Save head dimensions
	if(contourArea > 0)
	{
		headROI = boundingRect(contours[biggerContourIdx]);
		headBlob = Blob(cvPoint(headROI.x, headROI.y), headROI.height, headROI.width);
		
		//imshow("BinaryFrame", binaryFrame);
		//rectangle(frameDrawn, headROI, CV_RGB(0,255,0), 2, 8, 0);

		//Take some border around the image
		if(headBlob.getPt1().x < 150)
		{
			userDistanceReady = false;
			return false;
		}
		else if (headBlob.getPt2().x > 600)
		{	
			userDistanceReady = false;
			return false;
		}
		if( headBlob.getPt1().y < 20)
		{
			userDistanceReady = false;
			return false;
		}
		else if(headBlob.getPt2().y > 360 )
		{
			userDistanceReady = false;
			return false;
		}


		//Define eyes area
		eyesROI = cvRect(headROI.x, (headROI.y + headROI.height/8 + 15),
						 headROI.width, 3*headROI.height/8);			
		
		//Shrink headROI width with findContours algorithm applied on eyesArea sub-image
		//Define a sub-image for face detection algorithm
		Mat faceBinaryFrame (binaryFrameCopy, eyesROI);

		//Find face contours
		contours.clear();
		hierarchy.clear();
		findContours(faceBinaryFrame, contours, hierarchy, CV_RETR_CCOMP, 
					CV_CHAIN_APPROX_SIMPLE, cvPoint(eyesROI.x, eyesROI.y));
		
		//Filter contours and get the biggest one
		biggerContourIdx = 0;
		contourArea = -1;
		for (int i = 0; i >= 0; i = hierarchy[i][0])
		{
			faceROI = boundingRect(contours[i]);
			
			//Get the biggest area
			int temp = faceROI.width * faceROI.height;
			if(temp > contourArea)
			{
				contourArea = temp;
				biggerContourIdx = i;
			}
		}
		
		//Save face dimensions
		if(contourArea > 0)
		{
			faceROI = boundingRect(contours[biggerContourIdx]);
			faceBlob = Blob(cvPoint(faceROI.x, headROI.y), headROI.height, faceROI.width);
			
			//faceBlobempirical = Blob(cvPoint(faceROI.x, headROI.y), headROI.height, (headROI.height/4)*3);
			//rectangle(frameDrawn, faceBlobempirical.getRect(), CV_RGB(0,0,255), 2, 8, 0);
			
			eyesAreaBlob = 	Blob( cvPoint((faceROI.x), (eyesROI.y-5)),	//Pt1
								  cvPoint((faceROI.x+faceROI.width),eyesROI.y+eyesROI.height));	//Pt2
			
			//Drawn face blob and eye area
			rectangle(frameDrawn, faceBlob.getRect(), CV_RGB(0,255,0), 2, 8, 0);
			rectangle(frameDrawn, eyesAreaBlob.getRect(), CV_RGB(255,0,0), 2, 8, 0);
			
			//Save ratio
			userHeadRatio = (float)faceBlob.getWidth() / (float)faceBlob.getHeight();
			if (userHeadRatio > 0.9) {userHeadRatio = -1.0; }
			
			userHeadPitch = faceBlob.getPt1().y - headPosition.y;
			headDataMessageReady = true;
			return true;
		}
	}
	return false;
}
コード例 #2
0
ファイル: head_analyzer.cpp プロジェクト: AIRLab-POLIMI/e2
//==============================================================================
//==============================================================================
void detectEyes()
{
	//Variables
	Mat binaryFrame;
	Mat histBinaryFaceFrame;
	Mat contoursFrame;
	Mat temp1;

	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	
	vector<Rect> leftBlobs;
	vector<Rect> rightBlobs;
	vector<Rect> leftCandidatesEye;
	vector<Rect> rightCandidatesEye;
	
	Blob candidatedBlob;
	Rect aBlob;
	Rect searchAreaForEyesFiltering;
	
	unsigned int blobSize = 0;
	float blobRatio = 0.0;
	float blobsDistance = 0;
	int xDiff = 0;
	int yDiff = 0;
	
	bool isLeft = false;
	bool isRight = false;
	
	//Convert IRImage from Kinect into grayScale image and cut eyesArea
	//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
	frameIR.copyTo(binaryFrame);
	
	//Cut eyesBinaryFrame to obtain eyesArea image
	Mat temp2 (binaryFrame, eyesAreaBlob.getRect());	
			
	//Distance handler
	if (userDistance < 700)
	{	
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 30;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
						   ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 89, 0); //AirLab 125
		erode(temp1,contoursFrame, Mat());
	}
	else if ((userDistance >= 700)&&(userDistance < 760))
	{
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 40;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
						   ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 91, 0); //AirLab 125
		erode(temp1,contoursFrame, Mat());

		//imshow("Binary Eyes Image", temp1);
		//imshow("Eroded Eyes Image", contoursFrame); 
	}
	else
	{
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 35;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
							ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 75, 0); //Airlab 111
		erode(temp1,contoursFrame, Mat());
	}
	
	//Find eyesBlob
	//-----TRY TO USE CANNY FIRST-------//
	findContours(contoursFrame, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE , eyesAreaBlob.getPt1());
	
	//Filter contours and get the best ones
	for(int i = 0; i >= 0 ; i = hierarchy[i][0] )
	{
		if ((int)contours[i].size() > 4)
		{
			aBlob = boundingRect(contours[i]);
			if(eyesFilteringEnable)
			{
				//Data for filtering on blob dimensions
				blobSize = ((int)aBlob.width)*((int)aBlob.height);
				blobRatio = ((int)aBlob.width)/((int)aBlob.height);
				//Save blob into vector of candidated blobs
				candidatedBlob = Blob(cvPoint(aBlob.x, aBlob.y), aBlob.height, aBlob.width);
				
				if (((blobSize > MIN_EYE_BLOB_SIZE) && (blobSize < MAX_EYE_BLOB_SIZE)) && (blobRatio > BLOB_EYE_RATIO))
				{
					//Get distance between blob center and left/right edge of eyesAreaBlob
					unsigned int distDX = eyesAreaBlob.getPt2().x - candidatedBlob.getCenter().x;
					unsigned int distSX = candidatedBlob.getCenter().x - eyesAreaBlob.getPt1().x;

					//Put blobs into vector
					if( distDX >= distSX )  //SX
					{
						leftBlobs.push_back(aBlob);
					}
					else
					{
						rightBlobs.push_back(aBlob);
					}
				}
			}
		}
	}
					
	//LEFT BLOBS 
	if(leftBlobs.size() >= MAX_EYE_CANDIDATE_SIZE)
	{
		for(int i = 0; i < MAX_EYE_CANDIDATE_SIZE; i ++)
		{
			int k = getMinDistanceRect_y(leftBlobs, eyesAreaBlob.getPt2().y);
			leftCandidatesEye.push_back(leftBlobs[k]);
			leftBlobs.erase(leftBlobs.begin() + k);
		}
	}
	else
	{
		for(int i = 0; i < leftBlobs.size(); i ++)
		leftCandidatesEye.push_back(leftBlobs[i]);
	}

	//RIGHT BLOBS
	if(rightBlobs.size() >= MAX_EYE_CANDIDATE_SIZE)
	{
		for(int i = 0; i < MAX_EYE_CANDIDATE_SIZE; i ++)
		{					
			int k = getMinDistanceRect_y(rightBlobs, eyesAreaBlob.getPt2().y);
			rightCandidatesEye.push_back(rightBlobs[k]);
			rightBlobs.erase(rightBlobs.begin() + k);
		}
	}
	else
	{
		for(int i = 0; i < rightBlobs.size(); i ++)
		 rightCandidatesEye.push_back(rightBlobs[i]);
	}
	
	
	//Draw all eyes candidates
	for(int i = 0; i < leftCandidatesEye.size(); i ++)
		rectangle(frameDrawn, leftCandidatesEye[i], CV_RGB(255,0,0), 1,8,0);
	for(int i = 0; i < rightCandidatesEye.size(); i ++)
		rectangle(frameDrawn, rightCandidatesEye[i], CV_RGB(0,255,0), 1,8,0);
	
	
	/* 
	 * 
	 * Final filtering
	 *
	*/
	
	if(leftCandidatesEye.size() == 1)
	{
		isLeft = true;
		leftEye = leftCandidatesEye[0];
	}
	if(rightCandidatesEye.size() == 1)
	{
		isRight = true;
		rightEye = rightCandidatesEye[0];
	}
	
	if(isLeft)
	{
		//circle(frameDrawn, candidatedBlob.getCenter(), 2, CV_RGB(255,255,0), 2, 8, 0);
		rectangle(frameDrawn, leftEye, CV_RGB(0,0,255), 2,8,0);
		userEyeLratio = (float)leftEye.height / (float)leftEye.width;
		userHeadRoll = leftEye.y + leftEye.height/2;
	}
	else if(!isLeft)
		userEyeLratio = -1.0;
		
	if(isRight)
	{	
		//circle(frameDrawn, candidatedBlob.getCenter(), 2, CV_RGB(255,255,0), 2, 8, 0);
		rectangle(frameDrawn, rightEye, CV_RGB(0,0,255), 2,8,0);
		userEyeRratio = (float)rightEye.height / (float)rightEye.width;
		userHeadRoll = userHeadRoll - (rightEye.y + rightEye.height/2);
	}	
	else if(!isRight)
		userEyeRratio = -1.0;
		
	if(!isLeft || !isRight)
		userHeadRoll = 0;
	
	eyesDataMessageReady = true;
}