Esempio n. 1
0
//==============================================================================
//==============================================================================
void detectEyes()
{
	//Variables
	Mat binaryFrame;
	Mat histBinaryFaceFrame;
	Mat contoursFrame;
	Mat temp1;

	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	
	vector<Rect> leftBlobs;
	vector<Rect> rightBlobs;
	vector<Rect> leftCandidatesEye;
	vector<Rect> rightCandidatesEye;
	
	Blob candidatedBlob;
	Rect aBlob;
	Rect searchAreaForEyesFiltering;
	
	unsigned int blobSize = 0;
	float blobRatio = 0.0;
	float blobsDistance = 0;
	int xDiff = 0;
	int yDiff = 0;
	
	bool isLeft = false;
	bool isRight = false;
	
	//Convert IRImage from Kinect into grayScale image and cut eyesArea
	//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
	frameIR.copyTo(binaryFrame);
	
	//Cut eyesBinaryFrame to obtain eyesArea image
	Mat temp2 (binaryFrame, eyesAreaBlob.getRect());	
			
	//Distance handler
	if (userDistance < 700)
	{	
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 30;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
						   ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 89, 0); //AirLab 125
		erode(temp1,contoursFrame, Mat());
	}
	else if ((userDistance >= 700)&&(userDistance < 760))
	{
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 40;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
						   ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 91, 0); //AirLab 125
		erode(temp1,contoursFrame, Mat());

		//imshow("Binary Eyes Image", temp1);
		//imshow("Eroded Eyes Image", contoursFrame); 
	}
	else
	{
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 35;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
							ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 75, 0); //Airlab 111
		erode(temp1,contoursFrame, Mat());
	}
	
	//Find eyesBlob
	//-----TRY TO USE CANNY FIRST-------//
	findContours(contoursFrame, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE , eyesAreaBlob.getPt1());
	
	//Filter contours and get the best ones
	for(int i = 0; i >= 0 ; i = hierarchy[i][0] )
	{
		if ((int)contours[i].size() > 4)
		{
			aBlob = boundingRect(contours[i]);
			if(eyesFilteringEnable)
			{
				//Data for filtering on blob dimensions
				blobSize = ((int)aBlob.width)*((int)aBlob.height);
				blobRatio = ((int)aBlob.width)/((int)aBlob.height);
				//Save blob into vector of candidated blobs
				candidatedBlob = Blob(cvPoint(aBlob.x, aBlob.y), aBlob.height, aBlob.width);
				
				if (((blobSize > MIN_EYE_BLOB_SIZE) && (blobSize < MAX_EYE_BLOB_SIZE)) && (blobRatio > BLOB_EYE_RATIO))
				{
					//Get distance between blob center and left/right edge of eyesAreaBlob
					unsigned int distDX = eyesAreaBlob.getPt2().x - candidatedBlob.getCenter().x;
					unsigned int distSX = candidatedBlob.getCenter().x - eyesAreaBlob.getPt1().x;

					//Put blobs into vector
					if( distDX >= distSX )  //SX
					{
						leftBlobs.push_back(aBlob);
					}
					else
					{
						rightBlobs.push_back(aBlob);
					}
				}
			}
		}
	}
					
	//LEFT BLOBS 
	if(leftBlobs.size() >= MAX_EYE_CANDIDATE_SIZE)
	{
		for(int i = 0; i < MAX_EYE_CANDIDATE_SIZE; i ++)
		{
			int k = getMinDistanceRect_y(leftBlobs, eyesAreaBlob.getPt2().y);
			leftCandidatesEye.push_back(leftBlobs[k]);
			leftBlobs.erase(leftBlobs.begin() + k);
		}
	}
	else
	{
		for(int i = 0; i < leftBlobs.size(); i ++)
		leftCandidatesEye.push_back(leftBlobs[i]);
	}

	//RIGHT BLOBS
	if(rightBlobs.size() >= MAX_EYE_CANDIDATE_SIZE)
	{
		for(int i = 0; i < MAX_EYE_CANDIDATE_SIZE; i ++)
		{					
			int k = getMinDistanceRect_y(rightBlobs, eyesAreaBlob.getPt2().y);
			rightCandidatesEye.push_back(rightBlobs[k]);
			rightBlobs.erase(rightBlobs.begin() + k);
		}
	}
	else
	{
		for(int i = 0; i < rightBlobs.size(); i ++)
		 rightCandidatesEye.push_back(rightBlobs[i]);
	}
	
	
	//Draw all eyes candidates
	for(int i = 0; i < leftCandidatesEye.size(); i ++)
		rectangle(frameDrawn, leftCandidatesEye[i], CV_RGB(255,0,0), 1,8,0);
	for(int i = 0; i < rightCandidatesEye.size(); i ++)
		rectangle(frameDrawn, rightCandidatesEye[i], CV_RGB(0,255,0), 1,8,0);
	
	
	/* 
	 * 
	 * Final filtering
	 *
	*/
	
	if(leftCandidatesEye.size() == 1)
	{
		isLeft = true;
		leftEye = leftCandidatesEye[0];
	}
	if(rightCandidatesEye.size() == 1)
	{
		isRight = true;
		rightEye = rightCandidatesEye[0];
	}
	
	if(isLeft)
	{
		//circle(frameDrawn, candidatedBlob.getCenter(), 2, CV_RGB(255,255,0), 2, 8, 0);
		rectangle(frameDrawn, leftEye, CV_RGB(0,0,255), 2,8,0);
		userEyeLratio = (float)leftEye.height / (float)leftEye.width;
		userHeadRoll = leftEye.y + leftEye.height/2;
	}
	else if(!isLeft)
		userEyeLratio = -1.0;
		
	if(isRight)
	{	
		//circle(frameDrawn, candidatedBlob.getCenter(), 2, CV_RGB(255,255,0), 2, 8, 0);
		rectangle(frameDrawn, rightEye, CV_RGB(0,0,255), 2,8,0);
		userEyeRratio = (float)rightEye.height / (float)rightEye.width;
		userHeadRoll = userHeadRoll - (rightEye.y + rightEye.height/2);
	}	
	else if(!isRight)
		userEyeRratio = -1.0;
		
	if(!isLeft || !isRight)
		userHeadRoll = 0;
	
	eyesDataMessageReady = true;
}
Esempio n. 2
0
//==============================================================================
//==============================================================================
bool detectFace()
{
	//Variables 
	int biggerContourIdx = 0;
	int contourArea = -1;
	
	Mat binaryFrame;
	Mat binaryFrameCopy;
	
	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	
	Rect headROI;
	Rect faceROI;
	Blob faceBlobempirical;
	Rect eyesROI;
	
	if(userDistance < 750)
	{
		//Face area into frameIR 
		headROI = cvRect(	(headPosition.x - BLOB_HEAD_WIDTH/2),  //X
							headPosition.y, 					   //Y
							BLOB_HEAD_WIDTH, BLOB_HEAD_HEIGHT);
		//Convert IRImage into gray image and then into binary one
		//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
		frameIR.copyTo(binaryFrame);
		binaryFrame = binaryFrame > THRESHOLD_HEAD_NEAR;
	}
	else
	{
		//Face area into frameIR 
		headROI = cvRect(	headPosition.x - (BLOB_HEAD_WIDTH-10)/2,  //X
							headPosition.y,							  //Y
							BLOB_HEAD_WIDTH-10, BLOB_HEAD_HEIGHT-20);
		//Convert IRImage into gray image and then into binary one
		//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
		frameIR.copyTo(binaryFrame);
		binaryFrame = binaryFrame > THRESHOLD_HEAD_FAR;
	}

	//Chech out-of-frame error
	//check outOfImage error
	if(headROI.x < 0)
		headROI.x = 0;
	if(headROI.x > ( FRAME_WIDTH - headROI.width))
		headROI.x = FRAME_WIDTH - headROI.width;
	
	if(headROI.y < 0)
		headROI.y = 0;
	if(headROI.y > (FRAME_HEIGHT - headROI.height))
		headROI.y = FRAME_HEIGHT - (headROI.height+10);
	
	//Define a sub-image for head detection algorithm
	binaryFrame.copyTo(binaryFrameCopy);
	Mat headBinaryFrame (binaryFrame, headROI);
	
	//OpenCV find contours algorithm
	findContours(headBinaryFrame, contours, hierarchy, CV_RETR_CCOMP, 
				CV_CHAIN_APPROX_SIMPLE, cvPoint(headROI.x, headROI.y));
	
	//Filter contours and get the biggest one
	for (int i = 0; i >= 0; i = hierarchy[i][0])
	{
		//Draw contours
		//vScalar color = CV_RGB(rand()&255,rand()&255,rand()&255);
		//drawContours(frameDrawn, contours, i, color, CV_FILLED,8, hierarchy);
			
		headROI = boundingRect(contours[i]);
		
		//Get the biggest area
		int temp = headROI.width * headROI.height;
		if(temp > contourArea)
		{
			contourArea = temp;
			biggerContourIdx = i;
		}
	}

	//Save head dimensions
	if(contourArea > 0)
	{
		headROI = boundingRect(contours[biggerContourIdx]);
		headBlob = Blob(cvPoint(headROI.x, headROI.y), headROI.height, headROI.width);
		
		//imshow("BinaryFrame", binaryFrame);
		//rectangle(frameDrawn, headROI, CV_RGB(0,255,0), 2, 8, 0);

		//Take some border around the image
		if(headBlob.getPt1().x < 150)
		{
			userDistanceReady = false;
			return false;
		}
		else if (headBlob.getPt2().x > 600)
		{	
			userDistanceReady = false;
			return false;
		}
		if( headBlob.getPt1().y < 20)
		{
			userDistanceReady = false;
			return false;
		}
		else if(headBlob.getPt2().y > 360 )
		{
			userDistanceReady = false;
			return false;
		}


		//Define eyes area
		eyesROI = cvRect(headROI.x, (headROI.y + headROI.height/8 + 15),
						 headROI.width, 3*headROI.height/8);			
		
		//Shrink headROI width with findContours algorithm applied on eyesArea sub-image
		//Define a sub-image for face detection algorithm
		Mat faceBinaryFrame (binaryFrameCopy, eyesROI);

		//Find face contours
		contours.clear();
		hierarchy.clear();
		findContours(faceBinaryFrame, contours, hierarchy, CV_RETR_CCOMP, 
					CV_CHAIN_APPROX_SIMPLE, cvPoint(eyesROI.x, eyesROI.y));
		
		//Filter contours and get the biggest one
		biggerContourIdx = 0;
		contourArea = -1;
		for (int i = 0; i >= 0; i = hierarchy[i][0])
		{
			faceROI = boundingRect(contours[i]);
			
			//Get the biggest area
			int temp = faceROI.width * faceROI.height;
			if(temp > contourArea)
			{
				contourArea = temp;
				biggerContourIdx = i;
			}
		}
		
		//Save face dimensions
		if(contourArea > 0)
		{
			faceROI = boundingRect(contours[biggerContourIdx]);
			faceBlob = Blob(cvPoint(faceROI.x, headROI.y), headROI.height, faceROI.width);
			
			//faceBlobempirical = Blob(cvPoint(faceROI.x, headROI.y), headROI.height, (headROI.height/4)*3);
			//rectangle(frameDrawn, faceBlobempirical.getRect(), CV_RGB(0,0,255), 2, 8, 0);
			
			eyesAreaBlob = 	Blob( cvPoint((faceROI.x), (eyesROI.y-5)),	//Pt1
								  cvPoint((faceROI.x+faceROI.width),eyesROI.y+eyesROI.height));	//Pt2
			
			//Drawn face blob and eye area
			rectangle(frameDrawn, faceBlob.getRect(), CV_RGB(0,255,0), 2, 8, 0);
			rectangle(frameDrawn, eyesAreaBlob.getRect(), CV_RGB(255,0,0), 2, 8, 0);
			
			//Save ratio
			userHeadRatio = (float)faceBlob.getWidth() / (float)faceBlob.getHeight();
			if (userHeadRatio > 0.9) {userHeadRatio = -1.0; }
			
			userHeadPitch = faceBlob.getPt1().y - headPosition.y;
			headDataMessageReady = true;
			return true;
		}
	}
	return false;
}
Esempio n. 3
0
//==============================================================================
//==============================================================================
void detectMovements()
{
	Mat frameIR_1C, prevFrameIR_1C;
	
	//Convert prevFrameIR in a 1 channel image (prevFrameIR_1C)
	//cvtColor(prevFrameIR, prevFrameIR_1C, CV_BGR2GRAY);
	prevFrameIR.copyTo(prevFrameIR_1C);
	
	//Convert frameIR in a 1 channel image (frameIR_1C)
	//cvtColor(frameIR, frameIR_1C, CV_BGR2GRAY);
	frameIR.copyTo(frameIR_1C);
	
	//-----	SHI & TOMASI ALGORITHM	-----
	
	//Saving of prevFrameIR_1C features
	vector<Point2f> prevFrameIRFeatures;
	//mask setting
	Mat mask;
	CvPoint pt1Mask, pt2Mask;
	
	CvSize dim = cv::Size(frameIR.cols, frameIR.rows);
	mask = cv::Mat::zeros(dim, CV_8U);
	pt1Mask.x = eyesAreaBlob.getPt1().x + 10;
	pt1Mask.y = eyesAreaBlob.getPt1().y;
	pt2Mask.x = eyesAreaBlob.getPt2().x - 10;
	pt2Mask.y = eyesAreaBlob.getPt2().y;

	cv::rectangle(mask, pt1Mask, pt2Mask, CV_RGB(255, 255, 255), CV_FILLED, 8, 0);
	
	//draw mask dimensions
	//cv::rectangle(frameDrawn, pt1Mask, pt2Mask, CV_RGB(0, 0, 255), 2, 8, 0);
	
	//Computation of prevFrameIR_1C features
	goodFeaturesToTrack(prevFrameIR_1C, prevFrameIRFeatures, FEATURES_NUMBER, .2, .1, mask, 3, false, 0.04);
	
	//-----	LUCAS AND KANADE ALGORITHM	-----
	
	//Saving of frameIR_1C features
	vector<Point2f> frameIRFeatures;
	vector<uchar> foundFeatures;
	vector<float> featuresError;
	
	TermCriteria terminationCriteria = TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .03);
	
	calcOpticalFlowPyrLK(prevFrameIR_1C, frameIR_1C, 
						 prevFrameIRFeatures, frameIRFeatures,
						 foundFeatures, featuresError,
						 cv::Size(15,15), 3, terminationCriteria, 0.5, 0);
	
	//----- PRINT ALL GOOD FEATURES FOUND -----
	/*for(int i=0; i< FEATURES_NUMBER; i++)
	{
		pointer arrow;
		arrow.pt1.x = prevFrameIRFeatures[i].x;
		arrow.pt1.y = prevFrameIRFeatures[i].y;
		arrow.pt2.x = frameIRFeatures[i].x;
		arrow.pt2.y = frameIRFeatures[i].y;
		
		double angle = atan2((double) arrow.pt1.y - arrow.pt2.y, (double) arrow.pt1.x - arrow.pt2.x);
		double hypotenuse = sqrt(pow((arrow.pt1.y - arrow.pt2.y),2) + pow((arrow.pt1.x - arrow.pt2.x),2));
		
		drawArrow(arrow, hypotenuse, angle, CV_RGB(0,255,0), 1);
	}*/
	
	//----- MEDIAN COMPUTATION -----
	pointer median;
	median.pt1 = medianComputation(prevFrameIRFeatures);
	median.pt2 = medianComputation(frameIRFeatures);
	
	//----- MOVEMENTS DATA COMPUTATION -----
	double medianHypotenuse = sqrt(pow((median.pt1.y - median.pt2.y),2) + pow((median.pt1.x - median.pt2.x),2));
	double medianAngle = atan2((double)(median.pt1.y - median.pt2.y), (double)(median.pt1.x - median.pt2.x));
	
	//----- PRINT MOVEMENT FOUND -----
	pointer moveArrow;
	
	//Print arrow upon the face
	moveArrow.pt1.x = faceBlob.getPt1().x + (((faceBlob.getPt2().x)-(faceBlob.getPt1().x))/2);
	moveArrow.pt1.y = faceBlob.getPt1().y;
	
	//Considering only meaningful arrows
	if(((unsigned int)medianHypotenuse > 10) && ((unsigned int)medianHypotenuse < 100))
	{
		//reset count of holes during movements
		countHolesMove = 0;

		//draw arrow
		drawArrow(moveArrow, medianHypotenuse, medianAngle, CV_RGB(255,0,0), 2);
		
		//save hypotenuse  count
		moveIntensity = moveIntensity + medianHypotenuse;
		
		//save data of arrow
		//ROS_INFO("Angle: %f, AngleClass:%d", medianAngle, defineAngleClass(medianAngle));
		Point2DSample moveSample = Point2DSample(defineAngleClass(medianAngle), 1, 1.0, 'U');		
		move.insert(moveSample);
		
	}
	else if(countHolesMove < MAX_NO_MOVES)
	{
		countHolesMove++;
	}
	else
	{
		vector<Point2DSample> interpolatedMoves;
		
		//pruning of meaningful recorded movements and interpolating of them
		if ((move.size() >= MIN_MOVES) && (move.size() <= MAX_MOVES))
		{
			moveIntensity = moveIntensity / move.size();
			
			//ROS_INFO(" --- MOVE DONE!!!---Intensity average: %f", moveIntensity);
			interpolatedMoves = move.interpolate(INTERPOLATION_FACTOR, HERMITE_INTERPOLATION);
			//ROS_INFO("Ready to Classify A!");

			//saving of interpolated vector on a file
			for(int j = 0; j < INTERPOLATION_FACTOR; j++)
			{
				Point2DSample sample = Point2DSample(interpolatedMoves[j].getX(), j, 1.0, 'U');
				movement.insert(sample);
			}
			
			//ROS_INFO("Ready to Classify B!");
			
			//----ACTIVATE THIS PART FOR ENABLING CLASSIFICATION ----//
			vector<Point2DSample> temp = movement.getSamples();
			//Get movement classificatiuon using KNN Classifier with radius algorithm
			userMoveClass = knn->getClassification(temp, knnScores, knnClass, KNN_WITH_RADIUS, 2.0, WITHOUT_LOG);	

			//Check of result based on intensity
			if(moveIntensity > DENYING_INTESITY && userMoveClass == 'D')
			{
				int temp, temp1;
				for (int i = 0; i < knnClass.size(); i++)
				{
					if(knnClass[i] == 'L')
						temp = knnScores[i];
					else if(knnClass[i] == 'R')
						temp1 = knnScores[i];
					
					if (temp >= temp1)
						userMoveClass = 'L';
					else
						userMoveClass = 'R';
				}
			}
			
			moveDataMessageReady = true;
			
			/*//----ACTIVATE THIS PART FOR CREATING DATASET ----//
			movement->save(movesClassifier, SAVE_AS_DOUBLE, true, true);*/
			
			//reset
			moveIntensity = 0.0;
			movement.clear();
		}
		move.clear();
	}
}