コード例 #1
0
void ForegroundDetector::nextIteration(const Mat &img)
{
    if(bgImg.empty())
    {
        return;
    }

    Mat absImg = Mat(img.cols, img.rows, img.type());
    Mat threshImg = Mat(img.cols, img.rows, img.type());

    absdiff(bgImg, img, absImg);
    threshold(absImg, threshImg, fgThreshold, 255, CV_THRESH_BINARY);

    IplImage im = (IplImage)threshImg;
    CBlobResult blobs = CBlobResult(&im, NULL, 0);

    blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobSize);

    vector<Rect>* fgList = detectionResult->fgList;
    fgList->clear();

    for(int i = 0; i < blobs.GetNumBlobs(); i++)
    {
        CBlob *blob = blobs.GetBlob(i);
        CvRect rect = blob->GetBoundingBox();
        fgList->push_back(rect);
    }

}
コード例 #2
0
ファイル: App.cpp プロジェクト: nebogeo/magicsquares
void App::Update(Image &camera)
{
    /*camera=camera.Scale(camera.m_Image->width/2,
                        camera.m_Image->height/2);
    */
    //cvFlip(camera.m_Image, NULL, 0);

	///////////////////////////////////
	// dispatch from input

	int key=cvWaitKey(10);

//    usleep(500);

	static int t=150;
    static bool viewthresh=false;
    static bool off=false;
    static int spirit=0;
    static int crop_x=0;
    static int crop_y=0;
    static int crop_w=camera.m_Image->width;
    static int crop_h=camera.m_Image->height;

	switch (key)
	{
    case 't': viewthresh=!viewthresh; break;
    case 'q': t--; break;
    case 'w': t++; break;
    case 'e': t-=20; break;
    case 'r': t+=20; break;
    case 'o': off=!off; break;
    case 'p': spirit++; break;
    case 'z': crop_x+=10; break;
    case 'x': crop_x-=10; break;
    case 'c': crop_y+=10; break;
    case 'v': crop_y-=10; break;
    case 'b': crop_w+=10; break;
    case 'n': crop_w-=10; break;
    case 'm': crop_h+=10; break;
    case ',': crop_h-=10; break;
	}

    if (crop_x<0) crop_x=0;
    if (crop_x>=camera.m_Image->width) crop_x=camera.m_Image->width-1;
    if (crop_y<0) crop_x=0;
    if (crop_y>=camera.m_Image->height) crop_y=camera.m_Image->height-1;
    if (crop_w+crop_x>camera.m_Image->width)
    {
        crop_w=camera.m_Image->width-crop_x;
    }
    if (crop_h+crop_y>camera.m_Image->height)
    {
        crop_h=camera.m_Image->height-crop_y;
    }

    if (off)
    {
        sleep(1);
        cerr<<"off..."<<endl;
        return;
    }

    Image thresh=camera.RGB2GRAY().SubImage(crop_x,crop_y,crop_w,crop_h);
    cvThreshold(thresh.m_Image,thresh.m_Image,t,255,CV_THRESH_BINARY);
    // copy the threshold into a colour image
    Image tofill=thresh.GRAY2RGB();
    cvFloodFill(tofill.m_Image,cvPoint(camera.m_Image->width/2,
                                       camera.m_Image->height/2),
                CV_RGB(0,255,0),cvScalar(0),cvScalar(255));

    CBlobResult blobs;
    blobs = CBlobResult( thresh.m_Image, NULL, 255 );
    // exclude the ones smaller than param2 value
    blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 10);

    CBlob *currentBlob;
    Image *out=NULL;

    if (key=='s')
    {
        // add the alpha channel
        Image src=camera.SubImage(crop_x,crop_y,crop_w,crop_h);
        out = new Image(src.m_Image->width,
                        src.m_Image->height, 8, 4);

        for(int y=0; y<src.m_Image->height; y++)
        {

            for(int x=0; x<src.m_Image->width; x++)
            {
                CvScalar col = cvGet2D(src.m_Image,y,x);
                CvScalar alpha = cvGet2D(tofill.m_Image,y,x);
                if (alpha.val[0]==0 &&
                    alpha.val[1]==255 &&
                    alpha.val[2]==0)
                    col.val[3]=0;
                else
                    col.val[3]=255;
                cvSet2D(out->m_Image,y,x,col);
            }
        }
    }

    if (key=='s')
    {
        cerr<<"deleting old images in islands/"<<endl;
        int r=system("rm islands/*");
    }

    list<CvRect> allrects;

    for (int i = 0; i < blobs.GetNumBlobs(); i++ )
    {
        currentBlob = blobs.GetBlob(i);
        allrects.push_back(currentBlob->GetBoundingBox());
    }

    list<CvRect> filteredrects=allrects;

    /* for (list<CvRect>::iterator i=allrects.begin();
         i!=allrects.end(); ++i)
    {
        bool in=false;
        for (list<CvRect>::iterator j=allrects.begin();
             j!=allrects.end(); ++j)
        {
            if (Inside(*i,*j)) in=true;
        }
        if (!in) filteredrects.push_back(*i);
        }*/

    unsigned int instance = rand();

    unsigned int count=0;
    for (list<CvRect>::iterator i=filteredrects.begin();
         i!=filteredrects.end(); ++i)
    {
        CvRect rect = *i;

        if (key=='s')
        {
            Image island = out->SubImage(rect.x,rect.y,
                                         rect.width,rect.height);

            char buf[256];
            sprintf(buf,"islands/island-%d-%d-%d.png",count,
                    rect.x+rect.width/2,
                    rect.y+rect.height/2);
            cerr<<"saving "<<buf<<endl;
            island.Save(buf);

            sprintf(buf,"dump/island-%d-%d-%d-%d.png",
                    instance,
                    count,
                    rect.x+rect.width/2,
                    rect.y+rect.height/2);
            cerr<<"saving "<<buf<<endl;
            island.Save(buf);

        }
        else
        {
            cvRectangle(camera.m_Image,
                        cvPoint(crop_x+rect.x,crop_y+rect.y),
                        cvPoint(crop_x+rect.x+rect.width,
                                crop_y+rect.y+rect.height),
                        colors[1]);
        }
        count++;
    }

    if (key=='s')
    {
        cerr<<"copying images to server"<<endl;
        //int r=system("scp -r islands [email protected]:/home/garden/GerminationX/oak/");
        string path("/home/dave/code/lirec/scenarios/GerminationX/oak/public/");
        path+=string(spirits[spirit%3]);
        string command=string("rm ")+path+string("/*.*");
        int r=system(command.c_str());
        string command2=string("cp islands/* ")+path;
        r=system(command2.c_str());
        //cerr<<"finished copying...("<<r<<")"<<endl;
    }

    if (viewthresh) camera=tofill;

    char buf[256];
    sprintf(buf,"spirit: %s thresh: %d", spirits[spirit%3], t);
    cvPutText(camera.m_Image, buf, cvPoint(10,20),
              &m_Font, colors[0]);

    cvRectangle(camera.m_Image,
                cvPoint(crop_x,crop_y),
                cvPoint(crop_x+crop_w,crop_y+crop_h),
                colors[2]);

    if (out!=NULL) delete out;
}
コード例 #3
0
int main(int argc, char *argv[])
{
    CvCapture* capture = cvCreateFileCapture( "recording_01.avi");



    handOrientation rightOrientationLast = NONE, leftOrientationLast = NONE;
    handOrientation rightOrientationCur = NONE, leftOrientationCur = NONE;


	//cvNamedWindow("Input Image", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("Skin Pixels", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Skin Blobs", CV_WINDOW_AUTOSIZE);

    while(1){
        Mat imageBGR = cvQueryFrame(capture);
        if(imageBGR.empty())break;
        //imshow("Input Image", imageBGR);

        // Convert the image to HSV colors.
        Mat imageHSV = Mat(imageBGR.size(), CV_8UC3);	// Full HSV color image.
        cvtColor(imageBGR, imageHSV, CV_BGR2HSV);				// Convert from a BGR to an HSV image.

        std::vector<Mat> channels(3);
        split(imageHSV, channels);

        Mat planeH = channels[0];
        Mat planeS = channels[1];
        Mat planeV = channels[2];


        // Detect which pixels in each of the H, S and V channels are probably skin pixels.
        threshold(channels[0], channels[0], 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18
        threshold(channels[1], channels[1], 60, UCHAR_MAX, CV_THRESH_BINARY);//50
        threshold(channels[2], channels[2], 170, UCHAR_MAX, CV_THRESH_BINARY);//80


        // Combine all 3 thresholded color components, so that an output pixel will only
        // be white if the H, S and V pixels were also white.
        Mat imageSkinPixels = Mat( imageBGR.size(), CV_8UC3);	// Greyscale output image.
        bitwise_and(channels[0], channels[1], imageSkinPixels);				// imageSkin = H {BITWISE_AND} S.
        bitwise_and(imageSkinPixels, channels[2], imageSkinPixels);	// imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.

        // Show the output image on the screen.

        //imshow("Skin Pixels", imageSkinPixels);


        IplImage ipl_imageSkinPixels = imageSkinPixels;

        // Find blobs in the image.
        CBlobResult blobs;

        blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0);	// Use a black background color.

        // Ignore the blobs whose area is less than minArea.

        blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);

        srand (time(NULL));

        // Show the large blobs.
        IplImage* imageSkinBlobs = cvCreateImage(imageBGR.size(), 8, 3);	//Colored Output//,1); Greyscale output image.
        for (int i = 0; i < blobs.GetNumBlobs(); i++) {
            CBlob *currentBlob = blobs.GetBlob(i);
            currentBlob->FillBlob(imageSkinBlobs, CV_RGB(rand()%255,rand()%255,rand()%255));	// Draw the large blobs as white.

             cvDrawRect(imageSkinBlobs,
                  cvPoint(currentBlob->GetBoundingBox().x,currentBlob->GetBoundingBox().y),
                  cvPoint(currentBlob->GetBoundingBox().x + currentBlob->GetBoundingBox().width,currentBlob->GetBoundingBox().y + currentBlob->GetBoundingBox().height),
                  cvScalar(0,0,255),
                  2);//Draw Bounding Boxes

        }

        cvShowImage("Skin Blobs", imageSkinBlobs);

        //Gestures

        //std::cout << "Number of Blobs: "<< blobs.GetNumBlobs() <<endl;

        if(blobs.GetNumBlobs() == 0){
            //picture empty
        }else if(blobs.GetNumBlobs() == 1) {
            //head detected
        }else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3){
            //head + one hand || head + two hands
            CvRect rect[3];
            int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;


            //Get Bounding Boxes
            for(int i = 0; i< blobs.GetNumBlobs(); i++){
                rect[i] = blobs.GetBlob(i)->GetBoundingBox();
            }

            //Detect Head and Hand indexes
            if(blobs.GetNumBlobs() == 2){
                int indexHand = -1;
                if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y){
                    indexHead = 0;
                    indexHand = 1;
                }else{
                    indexHead = 1;
                    indexHand = 0;
                }

                if(getHandside(rect[indexHead], rect[indexHand]) == LEFT){
                    indexHandLeft = 1;
                    indexHandRight = -1;
                }else{
                    // right hand
                    indexHandLeft = -1;
                    indexHandRight = 1;
                }

            }else{
                //two hands
                int indexHand1 = -1;
                int indexHand2 = -1;
                if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y && getCenterPoint(rect[0]).y < getCenterPoint(rect[2]).y){
                    indexHead = 0;
                    indexHand1 = 1;
                    indexHand2 = 2;
                }else if(getCenterPoint(rect[1]).y < getCenterPoint(rect[0]).y && getCenterPoint(rect[1]).y < getCenterPoint(rect[2]).y){
                    indexHead = 1;
                    indexHand1 = 0;
                    indexHand2 = 2;
                }else{
                    indexHead = 2;
                    indexHand1 = 0;
                    indexHand2 = 1;
                }

                if(getHandside(rect[indexHead], rect[indexHand1]) == LEFT){
                    indexHandLeft = indexHand1;
                    indexHandRight = indexHand2;
                }else{
                    indexHandLeft = indexHand2;
                    indexHandRight = indexHand1;
                }
            }

            // follow the right hand
            if(indexHandRight > 0) {
                //std::cout << "right hand deteced" <<endl;
                if(isMoving(handRight)) {
                    std::cout << "hand moving" <<endl;
                    handRight.centerPrev = handRight.centerCurr;
                    handRight.centerCurr = getCenterPoint(rect[indexHandRight]);
                } else {
                    std::cout << "hand not moving" <<endl;
                    if(handRight.centerInit.y != 0 && abs(handRight.centerInit.y - handRight.centerCurr.y) > 20) {
                        if(handRight.centerInit.y < handRight.centerCurr.y) {
                            // hand moved down
                            std::cout << "                           hand moved down" <<endl;
                        } else {
                            // hand moved up
                            std::cout << "                           hand moved up" <<endl;
                        }
                    }
                    handRight.centerInit = getCenterPoint(rect[indexHandRight]);
                    handRight.centerPrev = handRight.centerCurr;
                    handRight.centerCurr = getCenterPoint(rect[indexHandRight]);
                }
            }

            //Get Orientations from Hand rects
            leftOrientationCur = (indexHandLeft != -1)?getOrientationOfRect(rect[indexHandLeft]):NONE;
            rightOrientationCur = (indexHandRight != -1)?getOrientationOfRect(rect[indexHandRight]):NONE;

            //Check Change of Left hand
            /*switch(detectHandStateChange(leftOrientationLast, leftOrientationCur)){
                case PORTRAIT_TO_LANDSCAPE:
                    handleGestures(LEFT_FLIP_DOWN);
                    break;
                case LANDSCAPE_TO_PORTRAIT:
                    handleGestures(LEFT_FLIP_UP);
                    break;
                case NOCHANGE:
                default:
                    break;
            }

            //Check Change of Right hand
            switch(detectHandStateChange(rightOrientationLast, rightOrientationCur)){
                case PORTRAIT_TO_LANDSCAPE:
                    handleGestures(RIGHT_FLIP_DOWN);
                    break;
                case LANDSCAPE_TO_PORTRAIT:
                    handleGestures(RIGHT_FLIP_UP);
                    break;
                case NOCHANGE:
                default:
                    break;
            }*/


        }else if(blobs.GetNumBlobs() > 3){
            //too much information
            cout<<"too much information"<<endl;
        }

        leftOrientationLast = leftOrientationCur;
        rightOrientationLast = rightOrientationCur;



        // Free all the resources.
        /*cvReleaseImage( &imageBGR );
        cvReleaseImage( &imageHSV );
        cvReleaseImage( &planeH );
        cvReleaseImage( &planeS );
        cvReleaseImage( &planeV );
        cvReleaseImage( &imageSkinPixels );
        cvReleaseImage( &imageSkinBlobs );*/

        //if ESC is pressed then exit loop
        cvWaitKey(33);
	}
	cvWaitKey(0);

	return 0;
}
コード例 #4
0
int main(){

	Scalar robotColor = CV_RGB(255, 0, 0);
	Scalar rightColor = CV_RGB(0, 255, 0);
	Scalar leftColor = CV_RGB(0, 0, 255);
	Scalar robotColor_2 = CV_RGB(0, 255, 255);
	Scalar rightColor_2 = CV_RGB(255, 0, 255);
	Scalar leftColor_2 = CV_RGB(255, 255, 0);

	int lowH = 0;
	int highH = 14;
	int top_cut = 120;
	int bot_cut = 70;
	int lowV = 200;
	int type = 0;
	int ticks = 0;
	int nb_errors = 0;
	int len = 150;
	int trace = 25;
	int sensitivity = 100;
	int area = 3000;
	int flip = 0; //set to 0 if no flips are needed, 1 for y axis, 2 for x axis and 3 for both

	namedWindow("My Window", 1);
	createTrackbar("lowH", "My Window", &lowH, 180);
	createTrackbar("highH", "My Window", &highH, 180);
	createTrackbar("top_cut", "My Window", &top_cut, 255);
	createTrackbar("bot_cut", "My Window", &bot_cut, 255);
	createTrackbar("lowV", "My Window", &lowV, 255);
	createTrackbar("LEN", "My Window", &len, 300);
	createTrackbar("TRACE", "My Window", &trace, 100);
	createTrackbar("SENSITIVITY", "My Window", &sensitivity, 200);
	createTrackbar("AREA", "My Window", &area, 7000);
	createTrackbar("FLIP", "My Window", &flip, 3);
	moveWindow("My Window", 0, 0);

	namedWindow("kalman", 1);
	moveWindow("kalman", 500, 0);
	namedWindow("Blobs Image", 1);
	moveWindow("Blobs Image", 500, 300);
	namedWindow("frame", 1);
	moveWindow("frame", 0, 500);
	namedWindow("test", WINDOW_AUTOSIZE);
	moveWindow("test", 0, 500);
	namedWindow("white", WINDOW_AUTOSIZE);
	moveWindow("white", 0, 500);

	//file of video input
	string filename = "testVideo_5.webm";
	ofstream logs;
	ofstream stats;
	stats.open("stats.txt");
	logs.open("logs.csv");
	logs << "Left_x,Left_y,Left_holds,Right_x,Right_y,Right_holds,confirmed" << endl;

	Point center_window = Point(WIDTH/2, (HEIGHT - top_cut - bot_cut)/2);
	Point center_left = Point(WIDTH/4, .5*max(10, HEIGHT - top_cut - bot_cut));
	Point center_right = Point(3*WIDTH/4, .5*max(10, HEIGHT - top_cut - bot_cut));


	// initialize the kalman filters
	KalmanFilter KF_left(4, 2, 0);
	KalmanFilter KF_right(4, 2, 0);

	Mat_<float> measurement_left(2,1); measurement_left.setTo(Scalar(0));
	Mat_<float> measurement_right(2,1); measurement_right.setTo(Scalar(0));

	initialize_kalman(&KF_left, center_left);
	initialize_kalman(&KF_right, center_right);

	VideoCapture cap(0);

  // VideoCapture cap(filename);

	Mat kf_img(HEIGHT - top_cut - bot_cut, WIDTH, CV_8UC3);
	vector<Point> mousev_left,kalmanv_left;
	mousev_left.clear();
	kalmanv_left.clear();
	vector<Point> mousev_right,kalmanv_right;
	mousev_right.clear();
	kalmanv_right.clear();

	int counter = 0;
	int nb_confirmed = 0;
	int nb_total = 0;
	double average_left = 0;
	double average_right = 0;
	double error_left = 0;
	double error_right = 0;
	double prev_dist = norm(center_left - center_right);
	double new_dist = prev_dist;
	bool left_valid = false;
	bool right_valid = true;
	Mat temp = Mat::zeros(100,400, CV_8UC3);
	putText(temp, "Press any key to start", Point(50,50), FONT_HERSHEY_SIMPLEX, .5, Scalar(255,255,255));
	putText(temp, "and ESC to end", Point(50, 75), FONT_HERSHEY_SIMPLEX, .5, Scalar(255,255,255));
	imshow("Blobs Image", temp);


	waitKey(-1);
	int key;
	bool eof = false;

	for(;;){

		Mat frame;
		Mat prediction_left = KF_left.predict();
		Point new_left(prediction_left.at<float>(0), prediction_left.at<float>(1));
		measurement_left(0) = center_left.x;
		measurement_left(1) = center_left.y;

		Mat estimated_left = KF_left.correct(measurement_left);

		Point statePt_left(estimated_left.at<float>(0),estimated_left.at<float>(1));
		Point measPt_left(measurement_left(0),measurement_left(1));

		Mat prediction_right = KF_right.predict();
		Point new_right(prediction_right.at<float>(0), prediction_right.at<float>(1));
		measurement_right(0) = center_right.x;
		measurement_right(1) = center_right.y;

		Mat estimated_right = KF_right.correct(measurement_right);

		Point statePt_right(estimated_right.at<float>(0),estimated_right.at<float>(1));
		Point measPt_right(measurement_right(0),measurement_right(1));

		ticks ++;
		error_left = norm(statePt_left - measPt_left);
		average_left = ((average_left * (ticks - 1)) + error_left) / ticks;
		error_right = norm(statePt_right - measPt_right);
		average_right = ((average_right * (ticks - 1)) + error_right) / ticks;

		imshow("kalman", kf_img);
		// waitKey(-1);
		kf_img = Scalar::all(0);
		mousev_left.push_back(measPt_left);
		kalmanv_left.push_back(statePt_left);

		circle(kf_img, statePt_left, 1,  Scalar(255,255,255), -1);
		circle(kf_img, measPt_left, 1, Scalar(0,0,255), -1);
		int nb_mousev_left = mousev_left.size() - 1;
		int nb_kalmanv_left = mousev_left.size() - 1;
		int nb_mousev_right = mousev_left.size() - 1;
		int nb_kalmanv_right = mousev_left.size() - 1;

		for(int i = max(0, nb_mousev_left - trace); i< nb_mousev_left; i++){
			line(kf_img, mousev_left[i], mousev_left[i+1], Scalar(255,255,0), 1);
		}
		for(int i = max(0, nb_kalmanv_left - trace); i< nb_kalmanv_left; i++){
			line(kf_img, kalmanv_left[i], kalmanv_left[i+1], Scalar(0,0,255), 1);
		}

		mousev_right.push_back(measPt_right);
		kalmanv_right.push_back(statePt_right);

		circle(kf_img, statePt_right, 1,  Scalar(255,255,255), -1);
		circle(kf_img, measPt_right, 1, Scalar(0,0,255), -1);

		for(int i = max(0, nb_mousev_right - trace); i< nb_mousev_right; i++){
			line(kf_img, mousev_right[i], mousev_right[i+1], Scalar(0,255,0), 1);
		}
		for(int i = max(0, nb_kalmanv_right - trace); i< nb_kalmanv_right; i++){
			line(kf_img, kalmanv_right[i], kalmanv_right[i+1], Scalar(255,0,255), 1);
		}


		Rect border(0, top_cut, WIDTH, max(10, HEIGHT - top_cut - bot_cut));
		cap >> frame;

		if(!frame.empty()){

			Mat image;
			int flip_type = 1;
			switch (flip) {
				case 0: break;
				case 1:	break;
				case 2: flip_type = 0;
				break;
				case 3: flip_type = -1;
				break;
			}
			if(flip) cv::flip(frame, frame, flip_type);

			resize(frame, frame, Size(WIDTH, HEIGHT));
			image = frame(border);
			imshow("frame", image);

			//performs the skin detection
			Mat converted_skin;
			cvtColor(image, converted_skin, CV_BGR2HSV);

			Mat skin_masked;
			inRange(converted_skin, Scalar(min(lowH, highH), 48, 80),Scalar(max(lowH, highH), 255, 255), skin_masked);
			imshow("test", skin_masked);

			//performs the robot detection
			Mat converted_white, white_masked, lights_masked;
			cvtColor(image, converted_white, CV_BGR2GRAY);
			inRange(converted_skin, Scalar(0, 0, 245), Scalar(180, 255, 255), lights_masked);
			threshold(converted_white, white_masked, lowV, 255, type);
			bitwise_or(white_masked, lights_masked, white_masked);
			imshow("white", white_masked);


			Mat copy(converted_skin.size(), converted_skin.type());// = converted.clone();

			//detects hands as blobs
			CBlobResult blobs;
			IplImage temp = (IplImage)skin_masked;
			blobs = CBlobResult(&temp,NULL,1);
			blobs = CBlobResult(skin_masked,Mat(),NUMCORES);
			int numBlobs = blobs.GetNumBlobs();
			if(0 == numBlobs){
				cout << "can't find blobs!" << endl;
				continue;
			}

			// detects robot as a blob
			CBlobResult robot_blobs;
			IplImage robot_temp = (IplImage) white_masked;
			robot_blobs = CBlobResult(&robot_temp, NULL, 1);
			robot_blobs = CBlobResult(white_masked, Mat(), NUMCORES);
			if(0 == robot_blobs.GetNumBlobs()){
				cout << "can't find robot_blobs!" << endl;
				continue;
			}

			CBlob *curblob;
			CBlob* blob_1;
			CBlob* blob_2;
			CBlob* leftBlob;
			CBlob* rightBlob;
			CBlob* robotBlob;


			copy.setTo(Vec3b(0,0,0));

			// chooses the two largest blobs for the hands
			Point center_1, center_2;
			int max_1 = 0;
			int max_2 = 0;
			int maxArea_1 = 0;
			int maxArea_2 = 0;
			for(int i=0;i<numBlobs;i++){
				int area = blobs.GetBlob(i)->Area();
				if(area > maxArea_1){
					maxArea_2 = maxArea_1;
					maxArea_1 = area;
					max_2 = max_1;
					max_1 = i;
				} else if(area > maxArea_2){
					maxArea_2 = area;
					max_2 = i;
				}
			}
			int i_1 = max_1;
			int i_2 = max_2;
			double area_left, area_right;
			Rect rect_1;
			Rect rect_2;

			//determines which hand is left/right
			blob_1 = blobs.GetBlob(i_1);
			blob_2 = blobs.GetBlob(i_2);
			center_1 = blob_1->getCenter();
			center_2 = blob_2->getCenter();
			bool left_is_1 = (center_1.x < center_2.x)? true : false;
			leftBlob = (left_is_1)? blob_1 : blob_2;
			rightBlob = (left_is_1)? blob_2 : blob_1;
			center_left = leftBlob->getCenter();
			center_right = rightBlob->getCenter();

			//determine the number of valid hands
			//validity is decided by whether or not the hand followed a logical movement,
			//and if the area of the blob is large enough to be accepted
			int valids = 0;
			rect_1 = leftBlob->GetBoundingBox();
			rectangle(copy, rect_1.tl(), rect_1.br(), leftColor_2, 5);
			error_left = norm(statePt_left - center_left);
			area_left = leftBlob->Area();
			left_valid = error_left < sensitivity && area_left > area;
			if(left_valid){
				leftBlob->FillBlob(copy,leftColor, true);
				valids ++;
			}
			circle(copy, center_left, 5, leftColor_2, -1);


			rect_2 = rightBlob->GetBoundingBox();
			rectangle(copy, rect_2.tl(), rect_2.br(), rightColor_2, 5);
			error_right = norm(statePt_right - center_right);
			area_right = rightBlob->Area();
			right_valid = error_right < sensitivity && area_right > area;
			if(right_valid){
				rightBlob->FillBlob(copy,rightColor, true);
				valids ++;
			}
			circle(copy, center_right, 5, rightColor_2, -1);


			//finds the blob representing the robot
			//we could add a restriction to only choose a blob between the two hands
			//in terms of x-coordinate
			//a Kalman check can easily be done for the robot
			Point robot_center;
			maxArea_1 = 0;
			max_1 = 0;
			numBlobs = robot_blobs.GetNumBlobs();
			if(0 < numBlobs){
				for(int i=0;i<numBlobs;i++){
					curblob = robot_blobs.GetBlob(i);
					robot_center = curblob->getCenter();
					double dist_1 = norm(center_1 - robot_center);
					double dist_2 = norm(center_2 - robot_center);
					if(dist_1 < len || dist_2 < len){
						double area = robot_blobs.GetBlob(i)->Area();
						if(area > maxArea_1){
							max_1 = i;
							maxArea_1 = area;
						}
					}
				}
				int i_3 = max_1;
				curblob = robot_blobs.GetBlob(i_3);
				curblob->FillBlob(copy,robotColor, true);
				robot_center = curblob->getCenter();
				circle(copy, robot_center, 5, robotColor_2, -1);
				Rect rect_3 = curblob->GetBoundingBox();
				rectangle(copy, rect_3.tl(), rect_3.br(), robotColor_2, 5);

				// determines which hand is controlling the robot
				// by cheching the position of the 3 blobs
				// an additional check could be done by verifying if
				//the center of the robot is moving in the same direction
				//as the center of the hand moving it
				bool is_left = false;
				bool is_right = false;
				bool confirmed = false;

				double dist_left = norm(center_left - robot_center);
				double dist_right = norm(center_right - robot_center);
				double dist_both = norm(center_left - center_right);

				Point robot_tl = rect_3.tl();
				Point robot_br = rect_3.br();

				int left_count = 0;
				int right_count = 0;

				if(rect_1.contains(robot_tl)) left_count++;
				if(rect_1.contains(robot_br)) left_count++;
				if(rect_1.contains(robot_center)) left_count++;
				if(rect_2.contains(robot_tl)) right_count++;
				if(rect_2.contains(robot_br)) right_count++;
				if(rect_2.contains(robot_center)) right_count++;


				switch(valids){
					case 0: break;
					case 1:{
						int area_sum = area_left + area_right;
						if(dist_left > 2* dist_right || dist_right > 2*dist_left){
							if(area_sum > 2 * area && (area_left > 2*area_right || area_right > 2*area_left) &&
							((left_valid && left_count > 0)||(right_valid && right_count > 0))){
								is_left = true;
								is_right = true;
								if(left_count > 2 || right_count > 2) confirmed = true;
							}
						}
						if(left_valid && left_count > 1) {
							is_left = true;
							if(left_count > 2) confirmed = true;
						}
						if(right_valid && right_count > 1) {
							is_right = true;
							if(right_count > 2) confirmed = true;
						}

						//if just one hand is on screen
						if(area_right < area/2){
							if(center_left.x > robot_center.x){
								is_left = true;
							} else{
								is_right = true;
							}
						} else if (area_left < area/2){
							if(center_right.x < robot_center.x){
								is_right = true;
							} else{
								is_left = true;
							}
						}
						break;}
						case 2:{
							int moreLeft = left_count - right_count;
							int moreRight = right_count - left_count;
							int countSum = left_count + right_count;

							switch (countSum) {
								case 3:{

									switch (left_count) {
										case 3: is_left = true;
										confirmed = true;
										break;
										case 2:
										case 1: is_left = true;
										is_right = true;
										confirmed= true;
										break;
										case 0: is_right = true;
										confirmed = true;
										break;
									}
								}
								case 2:{

									switch (left_count) {
										case 2: is_left = true;
										confirmed = true;
										break;
										case 1: is_left = true;
										is_right = true;
										break;
										case 0: is_right = true;
										confirmed = true;
										break;
									}
								}
								case 1:{

									switch (left_count) {
										case 1: is_left = true;
										break;
										case 0: is_right = true;
										break;
									}
								}
								case 0:{
									break;
								}
							}


							break;}
						}

						bool found = false;
						for(size_t i = robot_tl.x; i<= robot_br.x && !found; i++){
							for(size_t j = robot_tl.y; j<= robot_br.y && !found; j++){
								int color1 = 0; int color2 = 255;
								Vec3b colour = copy.at<Vec3b>(Point(i, j));
								if(colour[1] == color1 && colour[0] == color2){
									found = true;
									is_left = true;
								}
								if(colour[1] == color2 && colour[0] == color1){
									found = true;
									is_right = true;
								}
							}
						}
						if (found) confirmed = true;

						if(!is_left && !is_right){
							cout << "-- none!";
							if(left_count == 0 && right_count == 0) confirmed = true;
						} else if(is_left && is_right){
							cout << "-- both!";
						} else {

							if (is_left){
								cout << " -- left!";
							} else {
								cout << " -- right!";
							}
						}



imshow("kalman", kf_img);
// up till here

						if(confirmed){
							nb_confirmed ++;
							cout << " -- confirmed" << endl;
						} else {
							cout << endl;
						}
						csv(&logs, center_left.x, center_left.y, is_left, center_right.x, center_right.y, is_right, confirmed);
					}
					nb_total ++;



					//
					// //displayOverlay("Blobs Image","Multi Thread");
					new_dist = norm(center_left - center_right);
					// don't throw errors in the first 10 frames
					if(ticks > 10){
						if(error_left > 20 && error_right > 20 /*&& new_dist < prev_dist*/){
							circle(copy, Point(WIDTH/2, HEIGHT/2), 100, Scalar(0, 0, 255), 30);
							nb_errors ++;
						}
					}

					prev_dist = new_dist;

					imshow("Blobs Image",copy);


					key = waitKey(10);
		} else{
			eof = true;
		}

		if(27 == key || 1048603 == key || eof){
			double kalman_error_percentage = (nb_errors*100.0)/ticks;
			double confirm_percentage = (nb_confirmed*100.0/nb_total);
			stats << "kalman error frequency: " << kalman_error_percentage << "\%" << endl;
			stats << "confirmed: " << confirm_percentage << "\%" << endl;

			logs.close();
			stats.close();
			return 0;
		}

	}
}
コード例 #5
0
/*
arg1: Width of each frame
arg2: Height of each frame
arg3: Target frames per second of the program
arg4: Maximum number of blobs to track. Each blob MAY corresspond to a person in front of the camera
*/
int main(int argc, char* argv[])
{
    if (argc < 5)
    {
        cout << "Too few arguments to the program. Exiting...\n";
        return 0;
    }

    int width, height, fps, numberOfBlobs;
    try
    {
        //Read the arguments
        width = atoi(argv[1]);
        height = atoi(argv[2]);
        fps = atoi(argv[3]);
        numberOfBlobs = atoi(argv[4]);
        //Done reading arguments
    }
    catch(...)
    {
        cout << "One or more arguments are invalid!. Exiting...\n";
        return 0;
    }


    /*
    int width = 320;
    int height = 240;
    int fps = 10;
    int numberOfBlobs = 2;
    */

    tempImageV4L = cvCreateImage(cvSize(width, height), 8, 3);
    frameNumber = 0;

    //Beginning initialising cameras
    rightCamera = new Camera("/dev/video0", width, height, fps);
    leftCamera = new Camera("/dev/video1", width, height, fps);
	//leftCamera = rightCamera; //If only one camera is available, uncomment this line and comment the line above this.
    //Done initialising cameras

    //Waste some frames so as to get the cameras running in full flow
    WasteNFrames(10);

    //Beginning capturing background
    backImageRight = GetNextCameraShot(rightCamera);
    backImageLeft = GetNextCameraShot(leftCamera);
    frameNumber++;
    cvtColor(backImageRight, backImageRight, CV_BGR2HSV);
    cvtColor(backImageLeft, backImageLeft, CV_BGR2HSV);
    //Done capturing background

    //General Stuff
    Mat motionImageRight(backImageRight.rows, backImageRight.cols, CV_8UC1);
    Mat motionImageLeft(backImageLeft.rows, backImageLeft.cols, CV_8UC1);
    Mat HSVImageRight, HSVImageLeft;
    Mat displayImageRight, displayImageLeft;
    //End of General Stuff


    while (1) //The infinite loop
    {
        //Beginning getting camera shots
        rightImage = GetNextCameraShot(rightCamera);
        leftImage = GetNextCameraShot(leftCamera);
        frameNumber++;
        //Done getting camera shots


        //Beginning getting motion images
        HSVImageRight = rightImage.clone();
        cvtColor(HSVImageRight, HSVImageRight, CV_BGR2HSV);
        CompareWithBackground(HSVImageRight, backImageRight, motionImageRight);
        medianBlur(motionImageRight, motionImageRight, 3);

        HSVImageLeft = leftImage.clone();
        cvtColor(HSVImageLeft, HSVImageLeft, CV_BGR2HSV);
        CompareWithBackground(HSVImageLeft, backImageLeft, motionImageLeft);
        medianBlur(motionImageLeft, motionImageLeft, 3);
        //Ended getting motion images

        cout << "\nFor frame #" << frameNumber << " :\n";

        //Beginning Getting Blobs
        IplImage  imageblobPixels = motionImageRight;
        CBlobResult blobs;
        blobs = CBlobResult(&imageblobPixels, NULL, 0);	// Use a black background color.
        int minArea = 100 / ((640 / width) * (640 / width));
        blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minArea);
        int foundBlobs = blobs.GetNumBlobs();
        //Ended Getting Blobs

        cout << "Found " << foundBlobs << " motion blobs\n";

        //Creating copies of original images for modifying and displaying
        displayImageRight = rightImage.clone();
        displayImageLeft = leftImage.clone();
        //Done creating copies

        //Cycling through the blobs
        for (int blobIndex = 0; blobIndex < blobs.GetNumBlobs() && blobIndex < numberOfBlobs; blobIndex++)
        {
            cout << "Blob #" << blobIndex << " : ";

            //Getting blob details
            CBlob * blob = blobs.GetBlob(blobIndex);
            int x = blob->GetBoundingBox().x;
            int y = blob->GetBoundingBox().y;
            int w = blob->GetBoundingBox().width;
            int h = blob->GetBoundingBox().height;
            //Done getting blob details

            int sep = 0;

            //The point for which we want to find depth
            PixPoint inP = {x + w/2, y + h/2}, oP = {0, 0};
            cout << "inPoint = {" << inP.x << ", " << inP.y << "} ";

            //Initialing the rectangle in which the corressponding point is likely in
            Rectangle rect;
            rect.location.x = -1;
            rect.location.y = inP.y - 5;
            rect.size.x = rightImage.cols;
            rect.size.y = 11;
            //Done initialising the target rectangle

            //Find the corressponding point and calculate the sepertion
            oP = PointCorresponder::correspondPoint(rightImage, leftImage, inP, rect, motionImageLeft);
            sep = inP.x - oP.x;
            cout << "foundPoint = {" << oP.x << ", " << oP.y << "} ";

            //Just for visual presentation
            DrawRect(displayImageRight, x, y, w, h);
            cv::circle(displayImageRight, Point(inP.x, inP.y), 10, Scalar(0), 3);
            cv::circle(displayImageLeft, Point(oP.x, oP.y), 10, Scalar(0), 3);
            //Done decoration

            //The thing we were looking for... how can we forget to print this? :P
            cout << "seperation = " << sep << "\n";
        }

        //Show the windows
        cv::namedWindow("RIGHT");
        cv::namedWindow("thresh");
        cv::namedWindow("LEFT");
        imshow("LEFT", displayImageLeft);
        imshow("RIGHT", displayImageRight);
        imshow("thresh", motionImageRight);
        //End of code for showing windows

        //The loop terminating condition
        if (waitKey(27) >= 0) break;
    }

    //Mission Successful!! :D :)
    return 0;
}
コード例 #6
0
ファイル: markers.cpp プロジェクト: kenkeiter/interface
/* Find the center of a given blob. */
CvPoint MarkerCapture::blob_center(CBlob blob){
    CvPoint point;
    point.x = blob.GetBoundingBox().x + (blob.GetBoundingBox().width / 2);
    point.y = blob.GetBoundingBox().y + (blob.GetBoundingBox().height / 2);
    return point;
}
コード例 #7
0
float thresholdSegmentation(Rect r, ntk::RGBDImage* current_frame, Mat& dst){
	Mat depth = current_frame->depth();
	Rect& rr = r;
	Mat depthROI = depth(rr), maskROI;
	Mat& rDepthROI = depthROI, &rMaskROI = maskROI;
	double var = 0.3;

	// maskROI for nonZero values in the Face Region
	inRange(depthROI, Scalar::all(0.001), Scalar::all(255), maskROI);
	// Mean depth of Face Region
	Scalar mFace = cv::mean(rDepthROI, rMaskROI);
	//mFace[0]  = mFace[0] - mFace[0] * var;
	inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	mFace = cv::mean(rDepthROI, rMaskROI);
	//inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	//mFace = cv::mean(rDepthROI, rMaskROI);
	


	
	// Mask for nearer than the mean of face.
	inRange(depth, Scalar::all(0.001), mFace, dst);
	Mat rgbImage = current_frame->rgb();
	Mat outFrame = cvCreateMat(rgbImage.rows, rgbImage.cols, CV_32FC3);
	rgbImage.copyTo(outFrame, dst);
	Mat outFrameROI;
	outFrameROI = outFrame(rr);
	//cvCopy(&rgbImage, &outFrame, &dst);
	//rgbImageROI = rgbImageROI(rr);
	
	imshow("ROI", outFrameROI);
	//imshow("thresholdSeg", dst);

	// For debug of cvblobslib
	// Display the color image	

	//imshow("faceRIO", maskROI);
	imshow("faceRIO", outFrameROI);
	bool iswrite;
	const int nchannel = 1;
	vector<Rect> faces;
	//iswrite = imwrite("faceROI.png", maskROI);
	iswrite = imwrite("faceROI.png", outFrameROI);
	//iswrite = cvSaveImage("faceROI.jpeg", pOutFrame, &nchannel);

	// ---- blob segmentation on maskROI by using cvblobslib ----
	// ---		Third Trial	---
	//visualizeBlobs("faceROI.png", "faceRIO");




	// ---		First Trial Not Successful		---
	//Mat maskROIThr=cvCreateMat(maskROI.rows, maskROI.cols, CV_8UC1);	
	//maskROIThr = maskROI;
	//IplImage imgMaskROIThr = maskROIThr;
	//IplImage* pImgMaskROIThr = &imgMaskROIThr;
	//cvThreshold(pImgMaskROIThr, pImgMaskROIThr, 0.1, 255, CV_THRESH_BINARY_INV);

	// ---		Second Trial	---
	IplImage* original = cvLoadImage("faceROI.png", 0);
	IplImage* originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 1);
	IplImage* displayBiggestBlob = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 3);
	CBlobResult blobs;
	CBlob biggestBlob;
	//IplImage source = maskROIThr;	IplImage* pSource = &source;
	//blobs = CBlobResult(
	cvThreshold(original, originalThr, 0.1, 255, CV_THRESH_BINARY_INV);
	blobs =  CBlobResult( originalThr, NULL, 255);
	printf("%d blobs \n", blobs.GetNumBlobs());
	blobs.GetNthBlob(CBlobGetArea(), 0, biggestBlob);
	biggestBlob.FillBlob(displayBiggestBlob, CV_RGB(255, 0, 0));

	// Drawing the eclipse and Rect on the blob
	Mat mat(displayBiggestBlob);

	cv::RotatedRect blobEllipseContour;
	cv::Rect blobRectContour;
	//RotatedRect blobEllipseContour;
	blobEllipseContour = biggestBlob.GetEllipse();
	blobRectContour = biggestBlob.GetBoundingBox();
	//cv::ellipse(
	cv::ellipse(mat, blobEllipseContour, cv::Scalar(0,255, 0), 3, CV_AA);
	cv::rectangle(mat, blobRectContour, cv::Scalar(255, 0, 0), 3, CV_AA);
	//cv::ellipse(mat, blobEllipseContour);
	float headOritation = blobEllipseContour.angle;
	if (headOritation <= 180)
		headOritation = headOritation - 90;
	else
		headOritation = headOritation - 270;
	cv::putText(mat,
			cv::format("%f degree", headOritation),
			Point(10,20), 0, 0.5, Scalar(255,0,0,255));

	cv::imshow("faceRIO", mat);
	return(headOritation);
}
コード例 #8
0
ファイル: blob1.cpp プロジェクト: akashwar/Eye-NAB
 int main()  
 {  
     CBlobResult blobs;    
     CBlob *currentBlob;   
     CvPoint pt1, pt2;  
     CvRect cvRect;  
     int key = 0;  
     IplImage* frame = 0;  
   
     // Initialize capturing live feed from video file or camera  
     CvCapture* capture = cvCaptureFromFile( "MOV.MPG" );  
   
     // Get the frames per second  
     int fps = ( int )cvGetCaptureProperty( capture,  
                                            CV_CAP_PROP_FPS );    
   
     // Can't get device? Complain and quit  
     if( !capture )  
     {  
         printf( "Could not initialize capturing...\n" );  
         return -1;  
     }  
   
     // Windows used to display input video with bounding rectangles  
     // and the thresholded video  
     cvNamedWindow( "video" );  
     cvNamedWindow( "thresh" );        
   
     // An infinite loop  
     while( key != 'x' ) 
     { 
         // If we couldn't grab a frame... quit  
         if( !( frame = cvQueryFrame( capture ) ) )  
             break;        
   
         // Get object's thresholded image (blue = white, rest = black)  
         IplImage* imgThresh = GetThresholdedImageHSV( frame );        
   
         // Detect the white blobs from the black background  
         blobs = CBlobResult( imgThresh, NULL, 0 );    
   
         // Exclude white blobs smaller than the given value (10)    
         // The bigger the last parameter, the bigger the blobs need    
         // to be for inclusion    
         blobs.Filter( blobs,  
                       B_EXCLUDE,  
                       CBlobGetArea(),  
                       B_LESS,  
                       10 );           
   
         // Attach a bounding rectangle for each blob discovered  
         int num_blobs = blobs.GetNumBlobs();  
   
         for ( int i = 0; i < num_blobs; i++ )    
         {                 
             currentBlob = blobs.GetBlob( i );               
             cvRect = currentBlob->GetBoundingBox();  
   
             pt1.x = cvRect.x;  
             pt1.y = cvRect.y;  
             pt2.x = cvRect.x + cvRect.width;  
             pt2.y = cvRect.y + cvRect.height;  
   
             // Attach bounding rect to blob in orginal video input  
             cvRectangle( frame,  
                          pt1,   
                          pt2,  
                          cvScalar(0, 0, 0, 0),  
                          1,  
                          8,  
                          0 );  
         }  
   
         // Add the black and white and original images  
         cvShowImage( "thresh", imgThresh );  
         cvShowImage( "video", frame );  
   
         // Optional - used to slow up the display of frames  
         key = cvWaitKey( 2000 / fps );  
   
         // Prevent memory leaks by releasing thresholded image  
         cvReleaseImage( &imgThresh );        
     }  
   
     // We're through with using camera.   
     cvReleaseCapture( &capture );  
   
     return 0;  
 }