Example #1
0
    bool Machinery::detect(ImageVector & images, JSON & data)
    {
        // -------------
        // Detect motion

        return detectMotion(images, data);
    }
/*
* processFrame
*
* creates a threshold image of frame, splits the thresholded image into left and
* right halves and then tracks for the configured color in each half, setting the
* left and right paddle positions accordingly
*
* preconditions:	frame must be a valid Mat object representing a single frame from
*					from a VideoCapture object
* postconditions:	sets left and right paddles according to color detected in the
*					left and right halves of the frame, respectively
*/
void ColorPaddleDetector::processFrame(Mat &frame)
{
	flip(frame, frame, 1);

	Mat thres;
	createThresholdImg(frame, thres);

	// create left and right threshold images for seperate color detection in
	// left and right sides of the frame
	int x = thres.cols / 2;
	int y = thres.rows;
	Mat thresholdLeft(thres, Rect(0, 0, 320, 480));
	Mat thresholdRight(thres, Rect(320, 0, 320, 480));

	// detect motion in the left and right frames 
	detectMotion(thresholdLeft, frame, IS_RED);
	detectMotion(thresholdRight, frame, IS_BLUE);
}
/*
* processFrame
*
* uses sequential images to detect motion in the left and right halves of the frame.
*
* preconditions:	frame must be a valid Mat object representing a single frame from 
*					from a VideoCapture object
* postconditions:	sets left and right paddles according to motion detected in the
*					left and right halves of the frame, respectively
*/
void MotionPaddleDetector::processFrame(Mat& frame) {
	Mat frame2, gray, gray2, thres, diff;

	// use sequential images (frame and frame2) for motion detection

	// read in frame and convert to grayscale
	m_vid->read(frame);
	flip(frame, frame, 1);
	cvtColor(frame, gray, COLOR_BGR2GRAY);

	// read in frame2 and convert to grayscale
	m_vid->read(frame2);
	flip(frame2, frame2, 1);
	cvtColor(frame2, gray2, COLOR_BGR2GRAY);

	// create difference image of frame1 and frame2 after being converted to
	// grayscale images
	absdiff(gray, gray2, diff);

	// threshold difference
	threshold(diff, thres, THRESHOLD_SENSITIVITY, 255, THRESH_BINARY);

	// blur the image. output will be an intensity image
	blur(thres, thres, cv::Size(BLUR_SIZE, BLUR_SIZE));

	// threshold intensity image to get binary image (after blurring)
	threshold(thres, thres, THRESHOLD_SENSITIVITY, 255, THRESH_BINARY);

	// split threshold (now binary image) into left and right halves
	int x = thres.cols / 2;
	int y = thres.rows;
	Mat thresholdLeft(thres, Rect(0, 0, x, y));
	Mat thresholdRight(thres, Rect(x, 0, x, y));

	// detect motion in each half of the binary image
	detectMotion(thresholdLeft, frame, IS_RED);
	detectMotion(thresholdRight, frame, IS_BLUE);
}
Example #4
0
void LKTracker::ShowMotion(cv::Mat& image)
{
	MotionVector::iterator iter;

	// TRUE- 1 is Right 	|	 FALSE- 0 is Left
	int motionR, motionL, motionLast, motionLongestL, motionLongestR;
	double motionSumL, motionSumR;

	for(iter = this->regions.begin(); iter != this->regions.end(); ++iter)
	{
		Motion *motion = (*iter);

		motionR = 0;
		motionL = 0;
		motionLast = 0;
		motionLongestR = 0;
		motionLongestL = 0;
		motionSumL = 0;
		motionSumR = 0;

		cv::Rect origin = motion->getRect();

		cv::rectangle(image, origin, CV_RGB(255, 0, 255), 2);

		for(int i = 0; i < motion->getVx().rows; i++)
		{
			for(int j = 0; j < motion->getVx().cols; j++)
			{
				double x_component = motion->getVx().at<double>(i,j);
				double y_component = motion->getVy().at<double>(i,j);

				//std::cout << "vx " << x_component << " vy " << y_component << std::endl;

				cv::Point p1 = cv::Point(j+origin.x, i+origin.y);
				cv::Point p2 = cv::Point(j+x_component+origin.x, i + y_component+origin.y);

				// distance ?????
				// if(cv::norm(px1-px2) < magnitude_treshold || cv::norm(py1-py2) < magnitude_treshold)
				
				if(cv::norm(p1-p2) >= magnitude_treshold)
				{
					// Draw the vector
					cv::circle ( image , p1 , 4 , cv::Scalar(0,255,0) , 2 , 8 );
					cv::line(image, p1, p2, CV_RGB(255, 0, 0), 2);
					cv::circle ( image , p2 , 1 , cv::Scalar(0,255,0) , 2 , 8 );
				}

				if(cv::norm(p1-p2) < 5)
				{
					continue;
				}

				if(detectMotion(p1, p2) == 1) {
					motionSumR += std::abs(x_component);
					motionR++;
				} else if(detectMotion(p1, p2) == 0) {
					motionSumL += std::abs(x_component);
					motionL++;
				}

				/*
				// detect gestures
				if(detectMotion(p1, p2) == 1 && motionLast == 1)
				{
					++motionR;
					motionLast = 1;
				}
				else if (detectMotion(p1, p2) == 1 && motionLast == 0)
				{
					motionR = 1;
					motionLast = 1;

					// check if last sequence of LEFTS- 0 was longest one
					if (motionLongestL < motionL)
					{
						motionLongestL = motionL;
					}
				}
				else if(detectMotion(p1, p2) == 0 && motionLast == 0)
				{
					++motionL;
					motionLast = 0;
				}
				else if(detectMotion(p1, p2) == 0 && motionLast == 1)
				{
					motionL = 1;
					motionLast = 0;

					// check if last sequence of RIGHTS- 1 was longest one
					if (motionLongestR < motionR)
					{
						motionLongestR = motionR;
					}
				}
				*/
				
			}
		}

		// print detected motion
		/*if (motionLongestR>motionLongestL)
			std::cout << "RIGHT" << std::endl;
		else if (motionLongestR<motionLongestL)
			std::cout << "LEFT" << std::endl;

			*/
		double av_motion_l = motionSumL / motionL;
		double av_motion_r = motionSumR / motionR;

		cv::Point p(origin.x+50, origin.y+50);
		if (av_motion_r>av_motion_l) {
			cv::putText(image, "R", p, cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(0,255,0), 3);
		}
		else if (av_motion_r<av_motion_l) {
			cv::putText(image, "L", p, cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(0,255,0), 3);
		}
	}
}