예제 #1
0
CBlob getNearestBlob(CBlobResult blobs, coord coordinate){
	
	int tot = blobs.GetNumBlobs();
	CBlob Blob;
	float distance[10]; // 10 è il numero massimo di blob trovabile in un video
	float minimum;
	
	coord tempCoord;

	//Questo ciclo for fa la distanza manhattan tra le coordinate passate e tutti i blob catturati e crea il vettore con tutte le distanze.
	for (int i=0; i<tot; i++){
		Blob = blobs.GetBlob(i);
		tempCoord.set( (int) Blob.MaxX(), (int) Blob.MinX(), (int) Blob.MaxY(), (int) Blob.MinY());
		distance[i] = sqrt((double)(tempCoord.cX - coordinate.cX)*(tempCoord.cX - coordinate.cX) + (tempCoord.cY - coordinate.cY)*(tempCoord.cY - coordinate.cY));
	}

	int minDistanceId=0;
	
	//Questo ciclo for becca la minima distanza fra tutte quelle calcolate
	for (int j=0; j<tot; j++){
		minimum = min( distance[j], distance[minDistanceId]);	
		if ( distance[j] == minimum ) minDistanceId = j;
		}

	//Ottenuta la minima distanza si va a ritornare il Blob corrispondente
	Blob = blobs.GetBlob( minDistanceId );
	//delete[] distance;
	return Blob;

}
예제 #2
0
void drawInitialBlobs(IplImage * tmp_frame, CBlobResult blobs){

	
	coord drawCoord;

	for (int i=0; i<blobs.GetNumBlobs();i++){
		
		//!Creating the coordinate struct
		drawCoord.set( (int) blobs.GetBlob(i).MaxX(), (int) blobs.GetBlob(i).MinX(), (int) blobs.GetBlob(i).MaxY(), (int) blobs.GetBlob(i).MinY());

		drawBlob(tmp_frame, drawCoord, 255, 255, 0);
	}
}
void ForegroundDetector::nextIteration(const Mat &img)
{
    if(bgImg.empty())
    {
        return;
    }

    Mat absImg = Mat(img.cols, img.rows, img.type());
    Mat threshImg = Mat(img.cols, img.rows, img.type());

    absdiff(bgImg, img, absImg);
    threshold(absImg, threshImg, fgThreshold, 255, CV_THRESH_BINARY);

    IplImage im = (IplImage)threshImg;
    CBlobResult blobs = CBlobResult(&im, NULL, 0);

    blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobSize);

    vector<Rect>* fgList = detectionResult->fgList;
    fgList->clear();

    for(int i = 0; i < blobs.GetNumBlobs(); i++)
    {
        CBlob *blob = blobs.GetBlob(i);
        CvRect rect = blob->GetBoundingBox();
        fgList->push_back(rect);
    }

}
void ScheinrieseApp::findBlobs() {
    CBlobResult blobs;
    int i;
    CBlob *currentBlob;
    IplImage *original, *originalThr;

    // load an image and threshold it
    original = cvLoadImage("pic1.png", 0);
    cvThreshold( original, originalThr, 100, 0, 255, CV_THRESH_BINARY );

    // find non-white blobs in thresholded image
    blobs = CBlobResult( originalThr, NULL, 255 );
    // exclude the ones smaller than param2 value
    blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 );

    // get mean gray color of biggest blob
    CBlob biggestBlob;
    CBlobGetMean getMeanColor( original );
    double meanGray;

    blobs.GetNth( CBlobGetArea(), 0, biggestBlob );
    meanGray = getMeanColor( biggestBlob );

    // display filtered blobs
    cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage );

    for (i = 0; i < blobs.GetNumBlobs(); i++ )
    {
        currentBlob = blobs.GetBlob(i);
        currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0));
    }
}
예제 #5
0
vector<Bubble> OMRSheet::getBubbles(int xi1, int yi1, int xi2, int yi2){
    vector <Bubble> bubbles;
    cout<<"Bubble area "<<bubbleArea;
    int minArea = bubbleArea/2, maxArea = bubbleArea*1.5;
    CBlobResult blobs = ImageUtils::findBlobs(rawSheet, minArea, maxArea, cvRect(xi1, yi1, xi2-xi1, yi2-yi1));
    for (int i = 0; i < blobs.GetNumBlobs(); i++ )
    {
        CvRect rect = blobs.GetBlob(i)->GetBoundingBox();
        Point centroid = ImageUtils::findCentroid(rawSheet, &rect);
        Point centroidMM((centroid.x() - x1)/15, (centroid.y() - y1)/15);
        Bubble bubble(blobs.GetBlob(i), &centroidMM, &centroid);
        bubbles.push_back(bubble);
        
    }
    
    return bubbles;
}
예제 #6
0
CBlobResult computeWhiteMaskOtsu(Mat& imgRGBin, Mat& imgHSVIn, CBlobResult& blobs, int limitRGB, int limitHSV, double RGBratio, double HSVratio, int bmin, int bmax, int i){
	waitKey(30);
	Mat BGRbands[3];  
	split(imgRGBin,BGRbands);
	Mat imgHSV;
	cvtColor(imgHSVIn,imgHSV,CV_BGR2HSV);
	Mat HSVbands[3];  
	split(imgHSV,HSVbands);
	Mat maskHSV, maskRGB, maskT;

	int otsuTRGB = getThreshVal_Otsu_8u(BGRbands[2]);
	do{
		threshold(BGRbands[2],maskRGB,otsuTRGB,255,THRESH_BINARY);
		otsuTRGB++;
	}while(countNonZero(maskRGB)>(RGBratio*limitRGB) & otsuTRGB<=255);
	int otsuTHSV = getThreshVal_Otsu_8u(HSVbands[1]);
	do{	
		threshold(HSVbands[1],maskHSV,otsuTHSV,255,THRESH_BINARY_INV);
		otsuTHSV--;
	}while(countNonZero(maskHSV)>(HSVratio*limitHSV) & otsuTHSV>=0); // 0.1
	bitwise_or(maskHSV,maskRGB,maskT);
	int blobSizeBefore = blobs.GetNumBlobs();
    blobs = blobs + CBlobResult( maskT ,Mat(),8);
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_GREATER, bmax );
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_LESS, bmin );
	int blobSizeAfter = blobs.GetNumBlobs();
	Mat newMask(maskT.size(),maskT.type());
    newMask.setTo(0);
    for(;i<blobs.GetNumBlobs();i++){
		double area = blobs.GetBlob(i)->Area();
		if(area < 5000 && area > 400)
			blobs.GetBlob(i)->FillBlob(newMask,CV_RGB(255,255,255),0,0,true);
    }
	if(countNonZero(maskRGB)>400 && countNonZero(maskHSV)>400 && blobSizeBefore!=blobSizeAfter){
		vector<Mat> BGRbands;  split(imgRGBin,BGRbands);
		Mat maskedRGB = applyMaskBandByBand(newMask,BGRbands);
		bitwise_not(newMask,newMask);
		split(imgHSVIn,BGRbands);
		Mat maskedHSV = applyMaskBandByBand(newMask,BGRbands);
		blobs = computeWhiteMaskOtsu(maskedRGB, maskedHSV, blobs, countNonZero(maskRGB),countNonZero(maskHSV),RGBratio, HSVratio, bmin, bmax, i-1);
	}		
	return blobs;
}
예제 #7
0
파일: main.cpp 프로젝트: pratikac/freekick
void extractBall()
{
    imgTransform(BALL_HUE_U, BALL_HUE_L, BALL_SAT_U, BALL_SAT_L, VAL_U, VAL_L);

	blobRes = CBlobResult(dst, NULL, 0);
	blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN
	numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
	blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_GREATER, BALL_SIZE_MAX );// keep blobs smaller than BALL_SIZE_MAX
	numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
	blobRes.Filter( blobRes, B_INCLUDE, CBlobGetCompactness(), B_GREATER, BALL_COMPACTNESS );// keep blobs greater than BALL_COMPACTNESS
	numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;

	for(int i=0; i<numOfBlobs; i++)
		blobs[i] = blobRes.GetBlob(i);
};
예제 #8
0
파일: main.cpp 프로젝트: pratikac/freekick
void extractBots()
{
    //RED TEAM
    imgTransform(TEAM_R_HUE_U, TEAM_R_HUE_L, TEAM_R_SAT_U, TEAM_R_SAT_L, VAL_U, VAL_L);
	blobRes = CBlobResult(dst, NULL, 0);
	blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN
	numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
    if(numOfBlobs == 2)
    {
        for (int i=0; i<2; i++)
            blobRes.GetBlob(i)

	for(int i=0; i<numOfBlobs; i++)
		blobs[i] = blobRes.GetBlob(i);
};


void printBlobs()
{

	CBlobGetXCenter getXC;
	CBlobGetYCenter getYC;
    CBlobGetArea    getArea;
    CBlobGetCompactness getCompactness;


	printf("-----Printng Blobs------\n");
	for(int i=0; i<numOfBlobs; i++)
	{
		printf("%d\t(%3.2f,%3.2f),%3.2f %3.2f\n", i, getXC(blobs[i]), getYC(blobs[i]), getArea(blobs[i]), getCompactness(blobs[i]));		
	}
	printf("\n");

	cvNamedWindow("old", 1);
	cvNamedWindow("new", 1);
	cvMoveWindow("old", 0,0);
	cvMoveWindow("new", 0,400);

	cvShowImage("old", img);
	cvShowImage("new", dst);
	cvWaitKey();

};
예제 #9
0
// threshold trackbar callback
void on_trackbar( int dummy )
{
	if(!originalThr)
	{
		originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,1);
	}

	if(!displayedImage)
	{
		displayedImage = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,3);
	}
	
	// threshold input image
	cvThreshold( original, originalThr, param1, 255, CV_THRESH_BINARY );

	// get blobs and filter them using its area
	CBlobResult blobs;
	int i;
	CBlob *currentBlob;

	// find blobs in image
	blobs = CBlobResult( originalThr, NULL, 255 );
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 );

	// display filtered blobs
	cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage );

	for (i = 0; i < blobs.GetNumBlobs(); i++ )
	{
		currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0));
	}
	 
    cvShowImage( wndname, displayedImage );
	
}
예제 #10
0
int main(int argc, char *argv[])
{
    CvCapture* capture = cvCreateFileCapture( "recording_01.avi");



    handOrientation rightOrientationLast = NONE, leftOrientationLast = NONE;
    handOrientation rightOrientationCur = NONE, leftOrientationCur = NONE;


	//cvNamedWindow("Input Image", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("Skin Pixels", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Skin Blobs", CV_WINDOW_AUTOSIZE);

    while(1){
        Mat imageBGR = cvQueryFrame(capture);
        if(imageBGR.empty())break;
        //imshow("Input Image", imageBGR);

        // Convert the image to HSV colors.
        Mat imageHSV = Mat(imageBGR.size(), CV_8UC3);	// Full HSV color image.
        cvtColor(imageBGR, imageHSV, CV_BGR2HSV);				// Convert from a BGR to an HSV image.

        std::vector<Mat> channels(3);
        split(imageHSV, channels);

        Mat planeH = channels[0];
        Mat planeS = channels[1];
        Mat planeV = channels[2];


        // Detect which pixels in each of the H, S and V channels are probably skin pixels.
        threshold(channels[0], channels[0], 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18
        threshold(channels[1], channels[1], 60, UCHAR_MAX, CV_THRESH_BINARY);//50
        threshold(channels[2], channels[2], 170, UCHAR_MAX, CV_THRESH_BINARY);//80


        // Combine all 3 thresholded color components, so that an output pixel will only
        // be white if the H, S and V pixels were also white.
        Mat imageSkinPixels = Mat( imageBGR.size(), CV_8UC3);	// Greyscale output image.
        bitwise_and(channels[0], channels[1], imageSkinPixels);				// imageSkin = H {BITWISE_AND} S.
        bitwise_and(imageSkinPixels, channels[2], imageSkinPixels);	// imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.

        // Show the output image on the screen.

        //imshow("Skin Pixels", imageSkinPixels);


        IplImage ipl_imageSkinPixels = imageSkinPixels;

        // Find blobs in the image.
        CBlobResult blobs;

        blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0);	// Use a black background color.

        // Ignore the blobs whose area is less than minArea.

        blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);

        srand (time(NULL));

        // Show the large blobs.
        IplImage* imageSkinBlobs = cvCreateImage(imageBGR.size(), 8, 3);	//Colored Output//,1); Greyscale output image.
        for (int i = 0; i < blobs.GetNumBlobs(); i++) {
            CBlob *currentBlob = blobs.GetBlob(i);
            currentBlob->FillBlob(imageSkinBlobs, CV_RGB(rand()%255,rand()%255,rand()%255));	// Draw the large blobs as white.

             cvDrawRect(imageSkinBlobs,
                  cvPoint(currentBlob->GetBoundingBox().x,currentBlob->GetBoundingBox().y),
                  cvPoint(currentBlob->GetBoundingBox().x + currentBlob->GetBoundingBox().width,currentBlob->GetBoundingBox().y + currentBlob->GetBoundingBox().height),
                  cvScalar(0,0,255),
                  2);//Draw Bounding Boxes

        }

        cvShowImage("Skin Blobs", imageSkinBlobs);

        //Gestures

        //std::cout << "Number of Blobs: "<< blobs.GetNumBlobs() <<endl;

        if(blobs.GetNumBlobs() == 0){
            //picture empty
        }else if(blobs.GetNumBlobs() == 1) {
            //head detected
        }else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3){
            //head + one hand || head + two hands
            CvRect rect[3];
            int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;


            //Get Bounding Boxes
            for(int i = 0; i< blobs.GetNumBlobs(); i++){
                rect[i] = blobs.GetBlob(i)->GetBoundingBox();
            }

            //Detect Head and Hand indexes
            if(blobs.GetNumBlobs() == 2){
                int indexHand = -1;
                if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y){
                    indexHead = 0;
                    indexHand = 1;
                }else{
                    indexHead = 1;
                    indexHand = 0;
                }

                if(getHandside(rect[indexHead], rect[indexHand]) == LEFT){
                    indexHandLeft = 1;
                    indexHandRight = -1;
                }else{
                    // right hand
                    indexHandLeft = -1;
                    indexHandRight = 1;
                }

            }else{
                //two hands
                int indexHand1 = -1;
                int indexHand2 = -1;
                if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y && getCenterPoint(rect[0]).y < getCenterPoint(rect[2]).y){
                    indexHead = 0;
                    indexHand1 = 1;
                    indexHand2 = 2;
                }else if(getCenterPoint(rect[1]).y < getCenterPoint(rect[0]).y && getCenterPoint(rect[1]).y < getCenterPoint(rect[2]).y){
                    indexHead = 1;
                    indexHand1 = 0;
                    indexHand2 = 2;
                }else{
                    indexHead = 2;
                    indexHand1 = 0;
                    indexHand2 = 1;
                }

                if(getHandside(rect[indexHead], rect[indexHand1]) == LEFT){
                    indexHandLeft = indexHand1;
                    indexHandRight = indexHand2;
                }else{
                    indexHandLeft = indexHand2;
                    indexHandRight = indexHand1;
                }
            }

            // follow the right hand
            if(indexHandRight > 0) {
                //std::cout << "right hand deteced" <<endl;
                if(isMoving(handRight)) {
                    std::cout << "hand moving" <<endl;
                    handRight.centerPrev = handRight.centerCurr;
                    handRight.centerCurr = getCenterPoint(rect[indexHandRight]);
                } else {
                    std::cout << "hand not moving" <<endl;
                    if(handRight.centerInit.y != 0 && abs(handRight.centerInit.y - handRight.centerCurr.y) > 20) {
                        if(handRight.centerInit.y < handRight.centerCurr.y) {
                            // hand moved down
                            std::cout << "                           hand moved down" <<endl;
                        } else {
                            // hand moved up
                            std::cout << "                           hand moved up" <<endl;
                        }
                    }
                    handRight.centerInit = getCenterPoint(rect[indexHandRight]);
                    handRight.centerPrev = handRight.centerCurr;
                    handRight.centerCurr = getCenterPoint(rect[indexHandRight]);
                }
            }

            //Get Orientations from Hand rects
            leftOrientationCur = (indexHandLeft != -1)?getOrientationOfRect(rect[indexHandLeft]):NONE;
            rightOrientationCur = (indexHandRight != -1)?getOrientationOfRect(rect[indexHandRight]):NONE;

            //Check Change of Left hand
            /*switch(detectHandStateChange(leftOrientationLast, leftOrientationCur)){
                case PORTRAIT_TO_LANDSCAPE:
                    handleGestures(LEFT_FLIP_DOWN);
                    break;
                case LANDSCAPE_TO_PORTRAIT:
                    handleGestures(LEFT_FLIP_UP);
                    break;
                case NOCHANGE:
                default:
                    break;
            }

            //Check Change of Right hand
            switch(detectHandStateChange(rightOrientationLast, rightOrientationCur)){
                case PORTRAIT_TO_LANDSCAPE:
                    handleGestures(RIGHT_FLIP_DOWN);
                    break;
                case LANDSCAPE_TO_PORTRAIT:
                    handleGestures(RIGHT_FLIP_UP);
                    break;
                case NOCHANGE:
                default:
                    break;
            }*/


        }else if(blobs.GetNumBlobs() > 3){
            //too much information
            cout<<"too much information"<<endl;
        }

        leftOrientationLast = leftOrientationCur;
        rightOrientationLast = rightOrientationCur;



        // Free all the resources.
        /*cvReleaseImage( &imageBGR );
        cvReleaseImage( &imageHSV );
        cvReleaseImage( &planeH );
        cvReleaseImage( &planeS );
        cvReleaseImage( &planeV );
        cvReleaseImage( &imageSkinPixels );
        cvReleaseImage( &imageSkinBlobs );*/

        //if ESC is pressed then exit loop
        cvWaitKey(33);
	}
	cvWaitKey(0);

	return 0;
}
예제 #11
0
//==============================================================================
void PanTiltCameraClass::blobTracking(IplImage* hsv_mask,
                                      IplImage* pFour,
                                      IplImage* pImg)
{
   //--- Get blobs and filter them using the blob area
   CBlobResult blobs;
   CBlob *currentBlob;
   
   //--- Create a thresholded image and display image --------------------
   //--- Creates binary image
   IplImage* originalThr = cvCreateImage(cvGetSize(hsv_mask), IPL_DEPTH_8U,1);
   
   //--- Create 3-channel image
   IplImage* display = cvCreateImage(cvGetSize(hsv_mask),IPL_DEPTH_8U,3);
   
   //--- Copies the original
   cvMerge( hsv_mask, hsv_mask, hsv_mask, NULL, display );
   
   //--- Makes a copy for processing
   cvCopy(hsv_mask,originalThr);
   
   //--- Find blobs in image ---------------------------------------------
   int blobThreshold = 0;
   bool blobFindMoments = true;
   blobs = CBlobResult( originalThr, originalThr, blobThreshold, blobFindMoments);
   
   //--- filters blobs according to size and radius constraints
   blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, this->minBlobSize );
   
   //--- display filtered blobs ------------------------------------------
   
   //--- copies the original in (for background)
   cvMerge( originalThr, originalThr, originalThr, NULL, display );
   
   CvPoint pts[this->NUMBER_OF_CIRCLES];
   
   //--- This sequence marks all the blobs
   for (int i = 0; i < blobs.GetNumBlobs(); i++ )
   {
      currentBlob = blobs.GetBlob(i);
      currentBlob->FillBlob( display, CV_RGB(0,0,255));				
      
      //--- Get blobs centerpoint
      CvPoint bcg;
      bcg.x = (int)(currentBlob->MinX()+((currentBlob->MaxX()-currentBlob->MinX())/2));
      bcg.y = (int)(currentBlob->MinY()+((currentBlob->MaxY()-currentBlob->MinY())/2));
      
      //--- Print the CG on the picture
      char blobtext[40];
      for(int k=0;k<this->NUMBER_OF_CIRCLES;k++)
      {
         sprintf(blobtext,"%d",k+1);
         TargetReticle(display,&pts[k],blobtext,6,CV_RGB(255,0,0));
      }//for
   }//for each blob
   
   //--- Set the ROI in the pFour image
   cvSetImageROI(pFour,cvRect(pImg->width,pImg->height+80,pImg->width,pImg->height));
   cvCopy(display,pFour);
   
   //Reset region of interest
   cvResetImageROI(display);						
   
   //Clean up
   cvReleaseImage( &originalThr );
   cvReleaseImage( &display);
}
예제 #12
0
int main(){

	Scalar robotColor = CV_RGB(255, 0, 0);
	Scalar rightColor = CV_RGB(0, 255, 0);
	Scalar leftColor = CV_RGB(0, 0, 255);
	Scalar robotColor_2 = CV_RGB(0, 255, 255);
	Scalar rightColor_2 = CV_RGB(255, 0, 255);
	Scalar leftColor_2 = CV_RGB(255, 255, 0);

	int lowH = 0;
	int highH = 14;
	int top_cut = 120;
	int bot_cut = 70;
	int lowV = 200;
	int type = 0;
	int ticks = 0;
	int nb_errors = 0;
	int len = 150;
	int trace = 25;
	int sensitivity = 100;
	int area = 3000;
	int flip = 0; //set to 0 if no flips are needed, 1 for y axis, 2 for x axis and 3 for both

	namedWindow("My Window", 1);
	createTrackbar("lowH", "My Window", &lowH, 180);
	createTrackbar("highH", "My Window", &highH, 180);
	createTrackbar("top_cut", "My Window", &top_cut, 255);
	createTrackbar("bot_cut", "My Window", &bot_cut, 255);
	createTrackbar("lowV", "My Window", &lowV, 255);
	createTrackbar("LEN", "My Window", &len, 300);
	createTrackbar("TRACE", "My Window", &trace, 100);
	createTrackbar("SENSITIVITY", "My Window", &sensitivity, 200);
	createTrackbar("AREA", "My Window", &area, 7000);
	createTrackbar("FLIP", "My Window", &flip, 3);
	moveWindow("My Window", 0, 0);

	namedWindow("kalman", 1);
	moveWindow("kalman", 500, 0);
	namedWindow("Blobs Image", 1);
	moveWindow("Blobs Image", 500, 300);
	namedWindow("frame", 1);
	moveWindow("frame", 0, 500);
	namedWindow("test", WINDOW_AUTOSIZE);
	moveWindow("test", 0, 500);
	namedWindow("white", WINDOW_AUTOSIZE);
	moveWindow("white", 0, 500);

	//file of video input
	string filename = "testVideo_5.webm";
	ofstream logs;
	ofstream stats;
	stats.open("stats.txt");
	logs.open("logs.csv");
	logs << "Left_x,Left_y,Left_holds,Right_x,Right_y,Right_holds,confirmed" << endl;

	Point center_window = Point(WIDTH/2, (HEIGHT - top_cut - bot_cut)/2);
	Point center_left = Point(WIDTH/4, .5*max(10, HEIGHT - top_cut - bot_cut));
	Point center_right = Point(3*WIDTH/4, .5*max(10, HEIGHT - top_cut - bot_cut));


	// initialize the kalman filters
	KalmanFilter KF_left(4, 2, 0);
	KalmanFilter KF_right(4, 2, 0);

	Mat_<float> measurement_left(2,1); measurement_left.setTo(Scalar(0));
	Mat_<float> measurement_right(2,1); measurement_right.setTo(Scalar(0));

	initialize_kalman(&KF_left, center_left);
	initialize_kalman(&KF_right, center_right);

	VideoCapture cap(0);

  // VideoCapture cap(filename);

	Mat kf_img(HEIGHT - top_cut - bot_cut, WIDTH, CV_8UC3);
	vector<Point> mousev_left,kalmanv_left;
	mousev_left.clear();
	kalmanv_left.clear();
	vector<Point> mousev_right,kalmanv_right;
	mousev_right.clear();
	kalmanv_right.clear();

	int counter = 0;
	int nb_confirmed = 0;
	int nb_total = 0;
	double average_left = 0;
	double average_right = 0;
	double error_left = 0;
	double error_right = 0;
	double prev_dist = norm(center_left - center_right);
	double new_dist = prev_dist;
	bool left_valid = false;
	bool right_valid = true;
	Mat temp = Mat::zeros(100,400, CV_8UC3);
	putText(temp, "Press any key to start", Point(50,50), FONT_HERSHEY_SIMPLEX, .5, Scalar(255,255,255));
	putText(temp, "and ESC to end", Point(50, 75), FONT_HERSHEY_SIMPLEX, .5, Scalar(255,255,255));
	imshow("Blobs Image", temp);


	waitKey(-1);
	int key;
	bool eof = false;

	for(;;){

		Mat frame;
		Mat prediction_left = KF_left.predict();
		Point new_left(prediction_left.at<float>(0), prediction_left.at<float>(1));
		measurement_left(0) = center_left.x;
		measurement_left(1) = center_left.y;

		Mat estimated_left = KF_left.correct(measurement_left);

		Point statePt_left(estimated_left.at<float>(0),estimated_left.at<float>(1));
		Point measPt_left(measurement_left(0),measurement_left(1));

		Mat prediction_right = KF_right.predict();
		Point new_right(prediction_right.at<float>(0), prediction_right.at<float>(1));
		measurement_right(0) = center_right.x;
		measurement_right(1) = center_right.y;

		Mat estimated_right = KF_right.correct(measurement_right);

		Point statePt_right(estimated_right.at<float>(0),estimated_right.at<float>(1));
		Point measPt_right(measurement_right(0),measurement_right(1));

		ticks ++;
		error_left = norm(statePt_left - measPt_left);
		average_left = ((average_left * (ticks - 1)) + error_left) / ticks;
		error_right = norm(statePt_right - measPt_right);
		average_right = ((average_right * (ticks - 1)) + error_right) / ticks;

		imshow("kalman", kf_img);
		// waitKey(-1);
		kf_img = Scalar::all(0);
		mousev_left.push_back(measPt_left);
		kalmanv_left.push_back(statePt_left);

		circle(kf_img, statePt_left, 1,  Scalar(255,255,255), -1);
		circle(kf_img, measPt_left, 1, Scalar(0,0,255), -1);
		int nb_mousev_left = mousev_left.size() - 1;
		int nb_kalmanv_left = mousev_left.size() - 1;
		int nb_mousev_right = mousev_left.size() - 1;
		int nb_kalmanv_right = mousev_left.size() - 1;

		for(int i = max(0, nb_mousev_left - trace); i< nb_mousev_left; i++){
			line(kf_img, mousev_left[i], mousev_left[i+1], Scalar(255,255,0), 1);
		}
		for(int i = max(0, nb_kalmanv_left - trace); i< nb_kalmanv_left; i++){
			line(kf_img, kalmanv_left[i], kalmanv_left[i+1], Scalar(0,0,255), 1);
		}

		mousev_right.push_back(measPt_right);
		kalmanv_right.push_back(statePt_right);

		circle(kf_img, statePt_right, 1,  Scalar(255,255,255), -1);
		circle(kf_img, measPt_right, 1, Scalar(0,0,255), -1);

		for(int i = max(0, nb_mousev_right - trace); i< nb_mousev_right; i++){
			line(kf_img, mousev_right[i], mousev_right[i+1], Scalar(0,255,0), 1);
		}
		for(int i = max(0, nb_kalmanv_right - trace); i< nb_kalmanv_right; i++){
			line(kf_img, kalmanv_right[i], kalmanv_right[i+1], Scalar(255,0,255), 1);
		}


		Rect border(0, top_cut, WIDTH, max(10, HEIGHT - top_cut - bot_cut));
		cap >> frame;

		if(!frame.empty()){

			Mat image;
			int flip_type = 1;
			switch (flip) {
				case 0: break;
				case 1:	break;
				case 2: flip_type = 0;
				break;
				case 3: flip_type = -1;
				break;
			}
			if(flip) cv::flip(frame, frame, flip_type);

			resize(frame, frame, Size(WIDTH, HEIGHT));
			image = frame(border);
			imshow("frame", image);

			//performs the skin detection
			Mat converted_skin;
			cvtColor(image, converted_skin, CV_BGR2HSV);

			Mat skin_masked;
			inRange(converted_skin, Scalar(min(lowH, highH), 48, 80),Scalar(max(lowH, highH), 255, 255), skin_masked);
			imshow("test", skin_masked);

			//performs the robot detection
			Mat converted_white, white_masked, lights_masked;
			cvtColor(image, converted_white, CV_BGR2GRAY);
			inRange(converted_skin, Scalar(0, 0, 245), Scalar(180, 255, 255), lights_masked);
			threshold(converted_white, white_masked, lowV, 255, type);
			bitwise_or(white_masked, lights_masked, white_masked);
			imshow("white", white_masked);


			Mat copy(converted_skin.size(), converted_skin.type());// = converted.clone();

			//detects hands as blobs
			CBlobResult blobs;
			IplImage temp = (IplImage)skin_masked;
			blobs = CBlobResult(&temp,NULL,1);
			blobs = CBlobResult(skin_masked,Mat(),NUMCORES);
			int numBlobs = blobs.GetNumBlobs();
			if(0 == numBlobs){
				cout << "can't find blobs!" << endl;
				continue;
			}

			// detects robot as a blob
			CBlobResult robot_blobs;
			IplImage robot_temp = (IplImage) white_masked;
			robot_blobs = CBlobResult(&robot_temp, NULL, 1);
			robot_blobs = CBlobResult(white_masked, Mat(), NUMCORES);
			if(0 == robot_blobs.GetNumBlobs()){
				cout << "can't find robot_blobs!" << endl;
				continue;
			}

			CBlob *curblob;
			CBlob* blob_1;
			CBlob* blob_2;
			CBlob* leftBlob;
			CBlob* rightBlob;
			CBlob* robotBlob;


			copy.setTo(Vec3b(0,0,0));

			// chooses the two largest blobs for the hands
			Point center_1, center_2;
			int max_1 = 0;
			int max_2 = 0;
			int maxArea_1 = 0;
			int maxArea_2 = 0;
			for(int i=0;i<numBlobs;i++){
				int area = blobs.GetBlob(i)->Area();
				if(area > maxArea_1){
					maxArea_2 = maxArea_1;
					maxArea_1 = area;
					max_2 = max_1;
					max_1 = i;
				} else if(area > maxArea_2){
					maxArea_2 = area;
					max_2 = i;
				}
			}
			int i_1 = max_1;
			int i_2 = max_2;
			double area_left, area_right;
			Rect rect_1;
			Rect rect_2;

			//determines which hand is left/right
			blob_1 = blobs.GetBlob(i_1);
			blob_2 = blobs.GetBlob(i_2);
			center_1 = blob_1->getCenter();
			center_2 = blob_2->getCenter();
			bool left_is_1 = (center_1.x < center_2.x)? true : false;
			leftBlob = (left_is_1)? blob_1 : blob_2;
			rightBlob = (left_is_1)? blob_2 : blob_1;
			center_left = leftBlob->getCenter();
			center_right = rightBlob->getCenter();

			//determine the number of valid hands
			//validity is decided by whether or not the hand followed a logical movement,
			//and if the area of the blob is large enough to be accepted
			int valids = 0;
			rect_1 = leftBlob->GetBoundingBox();
			rectangle(copy, rect_1.tl(), rect_1.br(), leftColor_2, 5);
			error_left = norm(statePt_left - center_left);
			area_left = leftBlob->Area();
			left_valid = error_left < sensitivity && area_left > area;
			if(left_valid){
				leftBlob->FillBlob(copy,leftColor, true);
				valids ++;
			}
			circle(copy, center_left, 5, leftColor_2, -1);


			rect_2 = rightBlob->GetBoundingBox();
			rectangle(copy, rect_2.tl(), rect_2.br(), rightColor_2, 5);
			error_right = norm(statePt_right - center_right);
			area_right = rightBlob->Area();
			right_valid = error_right < sensitivity && area_right > area;
			if(right_valid){
				rightBlob->FillBlob(copy,rightColor, true);
				valids ++;
			}
			circle(copy, center_right, 5, rightColor_2, -1);


			//finds the blob representing the robot
			//we could add a restriction to only choose a blob between the two hands
			//in terms of x-coordinate
			//a Kalman check can easily be done for the robot
			Point robot_center;
			maxArea_1 = 0;
			max_1 = 0;
			numBlobs = robot_blobs.GetNumBlobs();
			if(0 < numBlobs){
				for(int i=0;i<numBlobs;i++){
					curblob = robot_blobs.GetBlob(i);
					robot_center = curblob->getCenter();
					double dist_1 = norm(center_1 - robot_center);
					double dist_2 = norm(center_2 - robot_center);
					if(dist_1 < len || dist_2 < len){
						double area = robot_blobs.GetBlob(i)->Area();
						if(area > maxArea_1){
							max_1 = i;
							maxArea_1 = area;
						}
					}
				}
				int i_3 = max_1;
				curblob = robot_blobs.GetBlob(i_3);
				curblob->FillBlob(copy,robotColor, true);
				robot_center = curblob->getCenter();
				circle(copy, robot_center, 5, robotColor_2, -1);
				Rect rect_3 = curblob->GetBoundingBox();
				rectangle(copy, rect_3.tl(), rect_3.br(), robotColor_2, 5);

				// determines which hand is controlling the robot
				// by cheching the position of the 3 blobs
				// an additional check could be done by verifying if
				//the center of the robot is moving in the same direction
				//as the center of the hand moving it
				bool is_left = false;
				bool is_right = false;
				bool confirmed = false;

				double dist_left = norm(center_left - robot_center);
				double dist_right = norm(center_right - robot_center);
				double dist_both = norm(center_left - center_right);

				Point robot_tl = rect_3.tl();
				Point robot_br = rect_3.br();

				int left_count = 0;
				int right_count = 0;

				if(rect_1.contains(robot_tl)) left_count++;
				if(rect_1.contains(robot_br)) left_count++;
				if(rect_1.contains(robot_center)) left_count++;
				if(rect_2.contains(robot_tl)) right_count++;
				if(rect_2.contains(robot_br)) right_count++;
				if(rect_2.contains(robot_center)) right_count++;


				switch(valids){
					case 0: break;
					case 1:{
						int area_sum = area_left + area_right;
						if(dist_left > 2* dist_right || dist_right > 2*dist_left){
							if(area_sum > 2 * area && (area_left > 2*area_right || area_right > 2*area_left) &&
							((left_valid && left_count > 0)||(right_valid && right_count > 0))){
								is_left = true;
								is_right = true;
								if(left_count > 2 || right_count > 2) confirmed = true;
							}
						}
						if(left_valid && left_count > 1) {
							is_left = true;
							if(left_count > 2) confirmed = true;
						}
						if(right_valid && right_count > 1) {
							is_right = true;
							if(right_count > 2) confirmed = true;
						}

						//if just one hand is on screen
						if(area_right < area/2){
							if(center_left.x > robot_center.x){
								is_left = true;
							} else{
								is_right = true;
							}
						} else if (area_left < area/2){
							if(center_right.x < robot_center.x){
								is_right = true;
							} else{
								is_left = true;
							}
						}
						break;}
						case 2:{
							int moreLeft = left_count - right_count;
							int moreRight = right_count - left_count;
							int countSum = left_count + right_count;

							switch (countSum) {
								case 3:{

									switch (left_count) {
										case 3: is_left = true;
										confirmed = true;
										break;
										case 2:
										case 1: is_left = true;
										is_right = true;
										confirmed= true;
										break;
										case 0: is_right = true;
										confirmed = true;
										break;
									}
								}
								case 2:{

									switch (left_count) {
										case 2: is_left = true;
										confirmed = true;
										break;
										case 1: is_left = true;
										is_right = true;
										break;
										case 0: is_right = true;
										confirmed = true;
										break;
									}
								}
								case 1:{

									switch (left_count) {
										case 1: is_left = true;
										break;
										case 0: is_right = true;
										break;
									}
								}
								case 0:{
									break;
								}
							}


							break;}
						}

						bool found = false;
						for(size_t i = robot_tl.x; i<= robot_br.x && !found; i++){
							for(size_t j = robot_tl.y; j<= robot_br.y && !found; j++){
								int color1 = 0; int color2 = 255;
								Vec3b colour = copy.at<Vec3b>(Point(i, j));
								if(colour[1] == color1 && colour[0] == color2){
									found = true;
									is_left = true;
								}
								if(colour[1] == color2 && colour[0] == color1){
									found = true;
									is_right = true;
								}
							}
						}
						if (found) confirmed = true;

						if(!is_left && !is_right){
							cout << "-- none!";
							if(left_count == 0 && right_count == 0) confirmed = true;
						} else if(is_left && is_right){
							cout << "-- both!";
						} else {

							if (is_left){
								cout << " -- left!";
							} else {
								cout << " -- right!";
							}
						}



imshow("kalman", kf_img);
// up till here

						if(confirmed){
							nb_confirmed ++;
							cout << " -- confirmed" << endl;
						} else {
							cout << endl;
						}
						csv(&logs, center_left.x, center_left.y, is_left, center_right.x, center_right.y, is_right, confirmed);
					}
					nb_total ++;



					//
					// //displayOverlay("Blobs Image","Multi Thread");
					new_dist = norm(center_left - center_right);
					// don't throw errors in the first 10 frames
					if(ticks > 10){
						if(error_left > 20 && error_right > 20 /*&& new_dist < prev_dist*/){
							circle(copy, Point(WIDTH/2, HEIGHT/2), 100, Scalar(0, 0, 255), 30);
							nb_errors ++;
						}
					}

					prev_dist = new_dist;

					imshow("Blobs Image",copy);


					key = waitKey(10);
		} else{
			eof = true;
		}

		if(27 == key || 1048603 == key || eof){
			double kalman_error_percentage = (nb_errors*100.0)/ticks;
			double confirm_percentage = (nb_confirmed*100.0/nb_total);
			stats << "kalman error frequency: " << kalman_error_percentage << "\%" << endl;
			stats << "confirmed: " << confirm_percentage << "\%" << endl;

			logs.close();
			stats.close();
			return 0;
		}

	}
}
/*
arg1: Width of each frame
arg2: Height of each frame
arg3: Target frames per second of the program
arg4: Maximum number of blobs to track. Each blob MAY corresspond to a person in front of the camera
*/
int main(int argc, char* argv[])
{
    if (argc < 5)
    {
        cout << "Too few arguments to the program. Exiting...\n";
        return 0;
    }

    int width, height, fps, numberOfBlobs;
    try
    {
        //Read the arguments
        width = atoi(argv[1]);
        height = atoi(argv[2]);
        fps = atoi(argv[3]);
        numberOfBlobs = atoi(argv[4]);
        //Done reading arguments
    }
    catch(...)
    {
        cout << "One or more arguments are invalid!. Exiting...\n";
        return 0;
    }


    /*
    int width = 320;
    int height = 240;
    int fps = 10;
    int numberOfBlobs = 2;
    */

    tempImageV4L = cvCreateImage(cvSize(width, height), 8, 3);
    frameNumber = 0;

    //Beginning initialising cameras
    rightCamera = new Camera("/dev/video0", width, height, fps);
    leftCamera = new Camera("/dev/video1", width, height, fps);
	//leftCamera = rightCamera; //If only one camera is available, uncomment this line and comment the line above this.
    //Done initialising cameras

    //Waste some frames so as to get the cameras running in full flow
    WasteNFrames(10);

    //Beginning capturing background
    backImageRight = GetNextCameraShot(rightCamera);
    backImageLeft = GetNextCameraShot(leftCamera);
    frameNumber++;
    cvtColor(backImageRight, backImageRight, CV_BGR2HSV);
    cvtColor(backImageLeft, backImageLeft, CV_BGR2HSV);
    //Done capturing background

    //General Stuff
    Mat motionImageRight(backImageRight.rows, backImageRight.cols, CV_8UC1);
    Mat motionImageLeft(backImageLeft.rows, backImageLeft.cols, CV_8UC1);
    Mat HSVImageRight, HSVImageLeft;
    Mat displayImageRight, displayImageLeft;
    //End of General Stuff


    while (1) //The infinite loop
    {
        //Beginning getting camera shots
        rightImage = GetNextCameraShot(rightCamera);
        leftImage = GetNextCameraShot(leftCamera);
        frameNumber++;
        //Done getting camera shots


        //Beginning getting motion images
        HSVImageRight = rightImage.clone();
        cvtColor(HSVImageRight, HSVImageRight, CV_BGR2HSV);
        CompareWithBackground(HSVImageRight, backImageRight, motionImageRight);
        medianBlur(motionImageRight, motionImageRight, 3);

        HSVImageLeft = leftImage.clone();
        cvtColor(HSVImageLeft, HSVImageLeft, CV_BGR2HSV);
        CompareWithBackground(HSVImageLeft, backImageLeft, motionImageLeft);
        medianBlur(motionImageLeft, motionImageLeft, 3);
        //Ended getting motion images

        cout << "\nFor frame #" << frameNumber << " :\n";

        //Beginning Getting Blobs
        IplImage  imageblobPixels = motionImageRight;
        CBlobResult blobs;
        blobs = CBlobResult(&imageblobPixels, NULL, 0);	// Use a black background color.
        int minArea = 100 / ((640 / width) * (640 / width));
        blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minArea);
        int foundBlobs = blobs.GetNumBlobs();
        //Ended Getting Blobs

        cout << "Found " << foundBlobs << " motion blobs\n";

        //Creating copies of original images for modifying and displaying
        displayImageRight = rightImage.clone();
        displayImageLeft = leftImage.clone();
        //Done creating copies

        //Cycling through the blobs
        for (int blobIndex = 0; blobIndex < blobs.GetNumBlobs() && blobIndex < numberOfBlobs; blobIndex++)
        {
            cout << "Blob #" << blobIndex << " : ";

            //Getting blob details
            CBlob * blob = blobs.GetBlob(blobIndex);
            int x = blob->GetBoundingBox().x;
            int y = blob->GetBoundingBox().y;
            int w = blob->GetBoundingBox().width;
            int h = blob->GetBoundingBox().height;
            //Done getting blob details

            int sep = 0;

            //The point for which we want to find depth
            PixPoint inP = {x + w/2, y + h/2}, oP = {0, 0};
            cout << "inPoint = {" << inP.x << ", " << inP.y << "} ";

            //Initialing the rectangle in which the corressponding point is likely in
            Rectangle rect;
            rect.location.x = -1;
            rect.location.y = inP.y - 5;
            rect.size.x = rightImage.cols;
            rect.size.y = 11;
            //Done initialising the target rectangle

            //Find the corressponding point and calculate the sepertion
            oP = PointCorresponder::correspondPoint(rightImage, leftImage, inP, rect, motionImageLeft);
            sep = inP.x - oP.x;
            cout << "foundPoint = {" << oP.x << ", " << oP.y << "} ";

            //Just for visual presentation
            DrawRect(displayImageRight, x, y, w, h);
            cv::circle(displayImageRight, Point(inP.x, inP.y), 10, Scalar(0), 3);
            cv::circle(displayImageLeft, Point(oP.x, oP.y), 10, Scalar(0), 3);
            //Done decoration

            //The thing we were looking for... how can we forget to print this? :P
            cout << "seperation = " << sep << "\n";
        }

        //Show the windows
        cv::namedWindow("RIGHT");
        cv::namedWindow("thresh");
        cv::namedWindow("LEFT");
        imshow("LEFT", displayImageLeft);
        imshow("RIGHT", displayImageRight);
        imshow("thresh", motionImageRight);
        //End of code for showing windows

        //The loop terminating condition
        if (waitKey(27) >= 0) break;
    }

    //Mission Successful!! :D :)
    return 0;
}
예제 #14
0
void BlobDetection::init()
{
    /** Init is called just after construction. */
    try
    {
        initStatusMask();
        // Create a proxy to ALVideoDevice on the robot.
        ALVideoDeviceProxy* camProxy = new ALVideoDeviceProxy(getParentBroker());
        behavoirProxy = new ALBehaviorManagerProxy(getParentBroker());
        ledProxy = new ALLedsProxy(getParentBroker());
        motionProxy = new ALMotionProxy(getParentBroker());

        initLeds();

        // Subscribe a client image requiring 640*480px and RGB colorspace.
        const std::string cameraID = camProxy->subscribeCamera("camera_01", 0, AL::kVGA, AL::kRGBColorSpace , 10);

        // Create a proxy to ALMemoryProxy on the robot.
        ALMemoryProxy fMemoryProxy = ALMemoryProxy(getParentBroker());
        fMemoryProxy.subscribeToEvent("FrontTactilTouched", "BlobDetection","onFrontTactilTouched");
        fMemoryProxy.subscribeToEvent("MiddleTactilTouched", "BlobDetection","onMiddleTactilTouched");

        HandOrientation rightOrientationLast = NONE;
        HandOrientation leftOrientationLast = NONE;
        HandOrientation rightOrientationCur = NONE, leftOrientationCur = NONE;

        // stand up
        behavoirProxy->runBehavior(STAND);

        // RECODING: prepare vido recording
        /*
        int size;
        std::string arvFile = std::string("/home/nao/video");

        streamHeader tmpStreamHeader;
        std::vector<streamHeader> streamHeaderVector;
        ALVideo videoFile;

        tmpStreamHeader.width = 640;
        tmpStreamHeader.height = 480;
        tmpStreamHeader.colorSpace = AL::kRGBColorSpace; // this is not really necessary, coz in pyuv u decide in which colorspace the vid is shown
        tmpStreamHeader.pixelDepth = 8;
        streamHeaderVector.push_back(tmpStreamHeader);

        std::cout<<"Output arv file properties: "<< streamHeaderVector[0].width <<"x"<< streamHeaderVector[0].height
            <<" Colorspace id:"<< streamHeaderVector[0].colorSpace <<" Pixel depth:"<< streamHeaderVector[0].pixelDepth
            <<std::endl;

        if( !videoFile.recordVideo( arvFile, 0, streamHeaderVector ) ) {
            std::cout<<"Error writing "<< arvFile <<" file."<<std::endl;
            return;
        }
        */

        int j = 0;

        while(1)
        {
            if(touched)
            {
                //j++;
                //Switch LEDs RED OFF, BLUE ON
                if(red_on == 1)
                {
                    ledProxy->off(FACE_LED_RED);
                    red_on = 0;
                }
                if(blue_on == 0)
                {
                    ledProxy->on(FACE_LED_BLUE);
                    blue_on = 1;
                }
                // Fetch the image from the nao camera, we subscribed on. Its in RGB colorspace
                ALImage *img_cam = (ALImage*)camProxy->getImageLocal(cameraID);

                // Create an openCv Mat header to convert the aldebaran AlImage image.
                // To convert the aldebaran image only the data are of it are assigned to the openCv image.
                Mat img_hsv = Mat(Size(img_cam->getWidth(), img_cam->getHeight()), CV_8UC3);
                img_hsv.data = (uchar*) img_cam->getData();

                // Convert the RGB image from the camera to an HSV image */
                cvtColor(img_hsv, img_hsv, CV_RGB2HSV);

                // RECORDING: record converted to hsv video
                //videoFile.write((char*) img_hsv.data, size); //video ging hier

                // Get the separate HSV color components of the color input image.
                std::vector<Mat> channels(3);
                split(img_hsv, channels);

                Mat planeH = channels[0];
                Mat planeS = channels[1];
                Mat planeV = channels[2];

                // Detect which pixels in each of the H, S and V channels are probably skin pixels.
                threshold(planeH, planeH, 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18
                threshold(planeS, planeS, 60, UCHAR_MAX, CV_THRESH_BINARY);//50
                threshold(planeV, planeV, 170, UCHAR_MAX, CV_THRESH_BINARY);//80

                // Combine all 3 thresholded color components, so that an output pixel will only
                // be white if the H, S and V pixels were also white.
                Mat imageSkinPixels = Mat(img_hsv.size(), CV_8UC3);	// Greyscale output image.
                bitwise_and(planeH, planeS, imageSkinPixels);	// imageSkin = H {BITWISE_AND} S.
                bitwise_and(imageSkinPixels, planeV, imageSkinPixels);	// imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.

                // Assing the Mat (C++) to an IplImage (C), this is necessary because the blob detection is writtn in old opnCv C version
                IplImage ipl_imageSkinPixels = imageSkinPixels;

                // RECODING: record the video using the C container variable
                // RECODING: store the size (in memory meaning) of the image for recording purpouse
                //size = img_cam->getSize();
                //videoFile.write((char*) ipl_imageSkinPixels.imageData, size/3);

                // Set up the blob detection.
                CBlobResult blobs;
                blobs.ClearBlobs();
                blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0);	// Use a black background color.

                // Ignore the blobs whose area is less than minArea.
                blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);

                // ##### Gestures #####
                std::cout << "Number of Blobs: " << blobs.GetNumBlobs() <<endl;
                if(blobs.GetNumBlobs() == 0)
                {
                    //picture empty
                }
                else if(blobs.GetNumBlobs() == 1)
                {
                    //head detected
                    trackHead(getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).x, getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).y);

                }
                else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3)
                {
                    //head + one hand || head + two hands
                    Rect rect[3];
                    int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;

                    //Get Bounding Boxes
                    for(int i = 0; i< blobs.GetNumBlobs(); i++)
                    {
                        rect[i] = blobs.GetBlob(i)->GetBoundingBox();
                    }

                    //Detect Head and Hand indexes
                    if(blobs.GetNumBlobs() == 2)
                    {
                        // head and one hand
                        int indexHand = -1;
                        if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y)
                        {
                            // rect[0] is head
                            indexHead = 0;
                            indexHand = 1;
                        }
                        else
                        {
                            // rect[1] is head
                            indexHead = 1;
                            indexHand = 0;
                        }

                        if(getHandside(rect[indexHead], rect[indexHand]) == LEFT)
                        {
                            // hand is left
                            indexHandLeft = 1;
                            indexHandRight = -1;
                        }
                        else
                        {
                            // hand ist right
                            indexHandLeft = -1;
                            indexHandRight = 1;
                        }

                    }
                    else
                    {
                        //two hands
                        int indexHand1 = -1;
                        int indexHand2 = -1;
                        if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y && getCenterPoint(rect[0]).y < getCenterPoint(rect[2]).y)
                        {
                            // rect[0] is head
                            indexHead = 0;
                            indexHand1 = 1;
                            indexHand2 = 2;
                        }
                        else if(getCenterPoint(rect[1]).y < getCenterPoint(rect[0]).y && getCenterPoint(rect[1]).y < getCenterPoint(rect[2]).y)
                        {
                            // rect[1] is head
                            indexHead = 1;
                            indexHand1 = 0;
                            indexHand2 = 2;
                        }
                        else
                        {
                            // rect[2] is head
                            indexHead = 2;
                            indexHand1 = 0;
                            indexHand2 = 1;
                        }

                        if(getHandside(rect[indexHead], rect[indexHand1]) == LEFT)
                        {
                            indexHandLeft = indexHand1;
                            indexHandRight = indexHand2;
                        }
                        else
                        {
                            indexHandLeft = indexHand2;
                            indexHandRight = indexHand1;
                        }
                    }

                    // bobs are detected.
                    // adjuste naos head to detected head-bolb
                    trackHead(getCenterPoint(rect[indexHead]).x, getCenterPoint(rect[indexHead]).y);

                    //Get Orientations from Hand rects
                    leftOrientationCur = (indexHandLeft != -1)?getOrientationOfRect(rect[indexHandLeft]):NONE;
                    rightOrientationCur = (indexHandRight != -1)?getOrientationOfRect(rect[indexHandRight]):NONE;

                    //Check Change of Left hand
                    switch(detectHandStateChange(leftOrientationLast, leftOrientationCur))
                    {
                    case PORTRAIT_TO_LANDSCAPE:
                        handleGestures(LEFT_FLIP_DOWN);
                        break;
                    case LANDSCAPE_TO_PORTRAIT:
                        handleGestures(LEFT_FLIP_UP);
                        break;
                    case NOCHANGE:
                        // TODO
                    default:
                        break;
                    }

                    //Check Change of Right hand
                    switch(detectHandStateChange(rightOrientationLast, rightOrientationCur))
                    {
                    case PORTRAIT_TO_LANDSCAPE:
                        handleGestures(RIGHT_FLIP_DOWN);
                        break;
                    case LANDSCAPE_TO_PORTRAIT:
                        handleGestures(RIGHT_FLIP_UP);
                        break;
                    case NOCHANGE:
                        //TODO
                    default:
                        break;
                    }
                }
                else if(blobs.GetNumBlobs() > 3)
                {
                    //too much information
                    cout<<"too much information"<<endl;
                }

                leftOrientationLast = leftOrientationCur;
                rightOrientationLast = rightOrientationCur;

                // RECODING: close the video recorder
                //videoFile.closeVideo();

                // Free all the resources.
                camProxy->releaseImage(cameraID);

                //IplImage* p_iplImage = &ipl_imageSkinPixels;
                //cvReleaseImage(&p_iplImage);

                qi::os::sleep(0.5f);
                //sleep(1);
            }
            else
            {
                //Switch LEDs RED ON, BLUE OFF
                if(red_on == 0)
                {
                    ledProxy->on(FACE_LED_RED);
                    red_on = 1;
                    behavoirProxy->runBehavior(STAND);
                }
                if(blue_on == 1)
                {
                    ledProxy->off(FACE_LED_BLUE);
                    blue_on = 0;
                }
            }
        }
        camProxy->unsubscribe(cameraID);

    }
    catch (const AL::ALError& e)
    {
        std::cerr << "Caught exception: " << e.what() << std::endl;
        return;
    }
    return;
}
예제 #15
0
파일: RunTracker.cpp 프로젝트: elfmedy/2013
int main(int argc, char * argv[])
{
	vector <string> imgNames;
	vector <string> imgNamesMask;
	char strFrame[20];
	readImageSequenceFiles(imgNames, imgNamesMask);

	list<TrackLine> trackLineArr;

	// read org frame and forground for process
	// you can modify it to read video by add a segment alg
	for(unsigned int i = 40; i < imgNames.size() - 1; i++)
	{ 		
		Mat frame = imread(imgNames[i]);
		Mat grayImg;
		cvtColor(frame, grayImg, CV_RGB2GRAY);

		Mat maskImage = imread(imgNamesMask[i], 0);

		// get blobs and filter them using its area
		// use 'cvblobslib' to get the object blobs
		threshold( maskImage, maskImage, 81, 255, CV_THRESH_BINARY );
		medianBlur(maskImage, maskImage, 3);
			
		IplImage ipl_maskImage = maskImage;
		CBlobResult blobs = CBlobResult( &ipl_maskImage, NULL, 0 );
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 30 );	// filter blobs that area smaller than a certern num
		
		list<CBlob *> remBlob;
		for (int k = 0; k < blobs.GetNumBlobs(); k++)
		{
			remBlob.push_back(blobs.GetBlob(k));
		}

		printf("%d\n", trackLineArr.size());
		for (list<TrackLine>::iterator trackIter = trackLineArr.begin(); trackIter != trackLineArr.end(); )
		{
			//kf predicition, get kfRect
			Mat kfPrediction = (trackIter->kf).predict();
			Point kfPrePt((int)(kfPrediction.at<float>(0)), (int)(kfPrediction.at<float>(1)));
			Rect kfRect(kfPrePt.x - (trackIter->box).width / 2, kfPrePt.y - (trackIter->box).height / 2, (trackIter->box).width, (trackIter->box).height);
			//ct predicition, get ctRect
			int ctError = 0;
			Rect ctRect(trackIter->box);
			float score = (trackIter->ct).predicition(grayImg, ctRect);
			rectangle(frame, kfRect, Scalar(0, 200, 0));	//green, kf predicition box
			rectangle(frame, ctRect, Scalar(0, 0, 200));	//red, ct predicition box
			//union predicit rectangle
			//if they have no same area, we consider ct is wrong, because kalman is physical movement
			float areaScale = (float)(sqrt((kfRect & ctRect).area() *1.0 / kfRect.area()));
			Point movePoint((int)((ctRect.x - kfRect.x) * areaScale), (int)((ctRect.y - kfRect.y) * areaScale));
			Rect unionPreRect = kfRect + movePoint;

			//calc object box
			Rect objRect;
			int j = 0;
			for (list<CBlob *>::iterator blobIter = remBlob.begin(); blobIter != remBlob.end(); )
			{
				Rect detRect((*blobIter)->GetBoundingBox());
				float detArea = (float)((*blobIter)->Area());
				if ((unionPreRect & detRect).area() > 0)
				{
					if (j++ == 0) objRect = detRect;
					else objRect = objRect | detRect;
					blobIter = remBlob.erase(blobIter);
				}
				else blobIter++;
			}

			// let box's area equal
			float objArea = (float)(objRect.area());
			objRect = Rect((int)(objRect.x + objRect.width / 2.0 - unionPreRect.width / 2.0), 
				(int)(objRect.y + objRect.height / 2.0 - unionPreRect.height / 2.0), 
				unionPreRect.width, unionPreRect.height);

			float detAreaScale = (float)(sqrt(objArea * 1.0 / unionPreRect.area()));
			if (detAreaScale > 1.0) detAreaScale = 1.0;
			Point detMovePoint((int)((objRect.x - unionPreRect.x) * detAreaScale), (int)((objRect.y - unionPreRect.y) * detAreaScale));
			Rect unionCorrRect = unionPreRect + detMovePoint;

			// if detect area > 0
			if (objArea > 0)
			{
				trackIter->box = unionCorrRect;
				rectangle(frame, unionCorrRect, Scalar(200,0,0), 1);
				//kf correct
				Mat_<float> measurement(2,1);
				measurement(0) = (float)((trackIter->box).x + (trackIter->box).width / 2.0);
				measurement(1) = (float)((trackIter->box).y + (trackIter->box).height / 2.0);
				(trackIter->kf).correct(measurement);
				//ct update
				(trackIter->ct).update(grayImg, trackIter->box);

				trackIter++;
			}
			// else we beleve tracking miss
			else
			{
				if ((trackIter->miss)++ == 5) trackIter = trackLineArr.erase(trackIter);
				else trackIter++;
			}
		}

		// !!!
		// use a sample way to get a new track init object box, i just add all others box toghter and expand it bigger
		// it's not a good idea when two object appear at the same time will lead only one init box
		// and, this sample is reasonless. so, i suggest you to use another method to get the init box
		// here, i just give a tracking alg, with a bad method to get init box, all -_-!
		// !!!
		Rect tmprect;
		int u = 0;
		for (list<CBlob *>::iterator blobIter = remBlob.begin(); blobIter != remBlob.end(); blobIter++)
		{
			if (u++ == 0) tmprect = Rect((*blobIter)->GetBoundingBox());
			else tmprect = tmprect | Rect((*blobIter)->GetBoundingBox());
		}
		if (tmprect.area() > 0) tmprect = Rect(tmprect.x - 5, tmprect.y - 8, tmprect.width + 10, tmprect.height + 16);
		if (tmprect.area() > 0 && tmprect.x != 0 && tmprect.y != 0 && (tmprect.x + tmprect.width) != 319 && (tmprect.y + tmprect.height) != 239)
		{
			TrackLine track;
			track.kf.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,1,0,   0,1,0,1,  0,0,1,0,  0,0,0,1);
			track.kf.measurementMatrix = *(Mat_<float>(2, 4) << 1,0,0,0,   0,1,0,0);
			setIdentity(track.kf.processNoiseCov, Scalar::all(1e-4));
			setIdentity(track.kf.measurementNoiseCov, Scalar::all(1e-1));
			setIdentity(track.kf.errorCovPost, Scalar::all(.1));
			// kf init
			track.kf.statePre.at<float>(0) = (float)(tmprect.x + tmprect.width / 2.0);
			track.kf.statePre.at<float>(1) = (float)(tmprect.y + tmprect.height / 2.0);
			track.kf.statePre.at<float>(2) = 0;
			track.kf.statePre.at<float>(3) = 0;
			track.kf.statePost.at<float>(0) = (float)(tmprect.x + tmprect.width / 2.0);
			track.kf.statePost.at<float>(1) = (float)(tmprect.y + tmprect.height / 2.0);
			track.kf.statePost.at<float>(2) = 0;
			track.kf.statePost.at<float>(3) = 0;
			// ct init
			track.ct.init(grayImg, tmprect);

			rectangle(frame, tmprect, Scalar(255, 0, 0), 2, 7);
			track.box = tmprect;

			trackLineArr.push_back(track);
		}

		sprintf(strFrame, "#%d ",i) ;
		putText(frame,strFrame,cvPoint(0,20),2,1,CV_RGB(25,200,25));

		char outstr[20];
		//if (0)
		//if ((i >= 450 && i <= 600) || (i >= 930 && i <= 960) || (i >= 1420 && i <= 1450))
		{
			sprintf(outstr, "output\\%d.png", i);
			string outstring(outstr);
			imwrite(outstring, frame);
			sprintf(outstr, "output\\mask_%d.png", i);
			string outstring2(outstr);
			imwrite(outstring2, maskImage);
		}

		//imshow("ORG", frame);
		//imshow("mask", maskImage);
		//waitKey(1);
	}

	return 0;
}
예제 #16
0
파일: locator.cpp 프로젝트: ashokzg/cpb
	void locator()
	{
		namedWindow("Tracking");
		int hMin, hMax, sMin, sMax, vMin, vMax,area_min;
		hMin = 0;
		//hMax = 124; // night values/???
		hMax = 255;
		//sMin = 95;
		sMin = 126;
		sMax = 255;
		//vMin = 139;
		vMin = 173;
		vMax = 255;
		area_min = 100;
		Mat smoothed, hsvImg, t_img;
		createTrackbar("blob min area","Tracking" ,&area_min ,1000);
		createTrackbar("Hue Min", "Tracking", &hMin, 255);
		createTrackbar("Hue Max", "Tracking", &hMax, 255);
		createTrackbar("Sat Min", "Tracking", &sMin, 255);
		createTrackbar("Sat Max", "Tracking", &sMax, 255);
		createTrackbar("Val Min", "Tracking", &vMin, 255);
		createTrackbar("Val MaX", "Tracking", &vMax, 255);
		while(ros::ok())
		{
			Mat source = imageB;
			Mat copy = imageB.clone();
			GaussianBlur(source, smoothed, Size(9,9), 4);
			cvtColor(smoothed, hsvImg, CV_BGR2HSV);
			inRange(hsvImg, Scalar(hMin, sMin, vMin), Scalar(hMax, sMax, vMax), t_img);

			CBlobResult blob;
			IplImage i_img = t_img;
			blob = CBlobResult(&i_img,NULL,0);
			int num_blobs = blob.GetNumBlobs();

			blob.Filter(blob, B_INCLUDE, CBlobGetArea(), B_INSIDE, area_min, blob_area_absolute_max_);
			num_blobs = blob.GetNumBlobs();

			std::string reference_frame = "/virtual_table"; // Table frame at ball_radius above the actual table plane

			tf::StampedTransform transform;
			tf_.waitForTransform(reference_frame, model.tfFrame(), ros::Time(0), ros::Duration(0.5));
			tf_.lookupTransform(reference_frame, model.tfFrame(), ros::Time(0), transform);

			for(int i =0;i<num_blobs;i++)
			{
				CBlob* bl = blob.GetBlob(i);
				Point2d uv(CBlobGetXCenter()(*bl), CBlobGetYCenter()(*bl));
				//Use the width as the height
				uv.y = bl->MinY() + (bl->MaxX() - bl->MinX()) * 0.5;
				circle(copy,uv,50,Scalar(255,0,0),5);

				cv::Point3d xyz;
				model.projectPixelTo3dRay(uv, xyz);
		
				// Intersect ray with plane in virtual table frame
				//Origin of camera frame wrt virtual table frame
				tf::Point P0 = transform.getOrigin();
				//Point at end of unit ray wrt virtual table frame
				tf::Point P1 = transform * tf::Point(xyz.x, xyz.y, xyz.z);
				// Origin of virtual table frame
				tf::Point V0 = tf::Point(0.0,0.0,0.0);
				// normal to the table plane
				tf::Vector3 n(0, 0, 1);
				// finding scaling value
				double scale = (n.dot(V0-P0))/(n.dot(P1-P0));
				tf::Point ball_pos = P0 + (P1-P0)*scale;
				cout <<ball_pos.x() << " " << ball_pos.y() << " " << ball_pos.z() <<endl;
			}
			imshow(WINDOW, copy);
			waitKey(3);

			imshow("edited", t_img);
			waitKey(3);

			ros::spinOnce();
		}
	}
예제 #17
0
void
Auvsi_Recognize::extractShape( void )
{
	typedef cv::Vec<T, 1> VT;

	// Reduce input to two colors
	cv::Mat reducedColors = doClustering<T>( _image, 2 );	
	cv::Mat grayScaled, binary;

	// Make output grayscale
	grayScaled = convertToGray( reducedColors );
	//cv::cvtColor( reducedColors, grayScaled, CV_RGB2GRAY );

	// Make binary
	double min, max;
	cv::minMaxLoc( grayScaled, &min, &max );
	cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY );	

	// ensure that background is black, image white
	if( binary.at<VT>(0, 0)[0] > 0.0f )
		cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY_INV );

	binary.convertTo( binary, CV_8U, 255.0f );

	// Fill in all black regions smaller than largest black region with white
	CBlobResult blobs;
	CBlob * currentBlob;
	IplImage binaryIpl = binary;
	blobs = CBlobResult( &binaryIpl, NULL, 255 );
	
	// Get area of biggest blob
	CBlob biggestBlob;
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );

	// Remove all blobs of smaller area
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );

	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(255));
	}
	


	// Fill in all small white regions black 
	blobs = CBlobResult( &binaryIpl, NULL, 0 );
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );

	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );

	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(0));
	}

	binary = cv::Scalar(0);
	biggestBlob.FillBlob( &binaryIpl, cvScalar(255));

	_shape = binary;
} 
예제 #18
0
파일: blob1.cpp 프로젝트: akashwar/Eye-NAB
 int main()  
 {  
     CBlobResult blobs;    
     CBlob *currentBlob;   
     CvPoint pt1, pt2;  
     CvRect cvRect;  
     int key = 0;  
     IplImage* frame = 0;  
   
     // Initialize capturing live feed from video file or camera  
     CvCapture* capture = cvCaptureFromFile( "MOV.MPG" );  
   
     // Get the frames per second  
     int fps = ( int )cvGetCaptureProperty( capture,  
                                            CV_CAP_PROP_FPS );    
   
     // Can't get device? Complain and quit  
     if( !capture )  
     {  
         printf( "Could not initialize capturing...\n" );  
         return -1;  
     }  
   
     // Windows used to display input video with bounding rectangles  
     // and the thresholded video  
     cvNamedWindow( "video" );  
     cvNamedWindow( "thresh" );        
   
     // An infinite loop  
     while( key != 'x' ) 
     { 
         // If we couldn't grab a frame... quit  
         if( !( frame = cvQueryFrame( capture ) ) )  
             break;        
   
         // Get object's thresholded image (blue = white, rest = black)  
         IplImage* imgThresh = GetThresholdedImageHSV( frame );        
   
         // Detect the white blobs from the black background  
         blobs = CBlobResult( imgThresh, NULL, 0 );    
   
         // Exclude white blobs smaller than the given value (10)    
         // The bigger the last parameter, the bigger the blobs need    
         // to be for inclusion    
         blobs.Filter( blobs,  
                       B_EXCLUDE,  
                       CBlobGetArea(),  
                       B_LESS,  
                       10 );           
   
         // Attach a bounding rectangle for each blob discovered  
         int num_blobs = blobs.GetNumBlobs();  
   
         for ( int i = 0; i < num_blobs; i++ )    
         {                 
             currentBlob = blobs.GetBlob( i );               
             cvRect = currentBlob->GetBoundingBox();  
   
             pt1.x = cvRect.x;  
             pt1.y = cvRect.y;  
             pt2.x = cvRect.x + cvRect.width;  
             pt2.y = cvRect.y + cvRect.height;  
   
             // Attach bounding rect to blob in orginal video input  
             cvRectangle( frame,  
                          pt1,   
                          pt2,  
                          cvScalar(0, 0, 0, 0),  
                          1,  
                          8,  
                          0 );  
         }  
   
         // Add the black and white and original images  
         cvShowImage( "thresh", imgThresh );  
         cvShowImage( "video", frame );  
   
         // Optional - used to slow up the display of frames  
         key = cvWaitKey( 2000 / fps );  
   
         // Prevent memory leaks by releasing thresholded image  
         cvReleaseImage( &imgThresh );        
     }  
   
     // We're through with using camera.   
     cvReleaseCapture( &capture );  
   
     return 0;  
 }  
	bool findBiggestBlobImage(IplImage* img, int color, IplImage* &output)
	{
		CBlobResult blobs;
		CBlob *currentBlob;

		blobs = CBlobResult( img, NULL, 0 );
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, m_minBlobSize );

		double biggestArea = m_minBlobSize;
		int biggestBlob = -1;

		for (int i = 0; i < blobs.GetNumBlobs(); i++ )
		{
			currentBlob = blobs.GetBlob(i);
			double blobArea = currentBlob->Area();
			if(blobArea > biggestArea) 
			{
				biggestBlob = i;
				biggestArea = blobArea;
			}
		}

		if(biggestBlob >= 0)
		{
			int x = (int) blobs.GetBlob(biggestBlob)->MinX();
			int y = (int) blobs.GetBlob(biggestBlob)->MinY();
			int width= (int) blobs.GetBlob(biggestBlob)->MaxX()-x;
			int height= (int) blobs.GetBlob(biggestBlob)->MaxY()-y;

			IplImage* temp = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U, 1);
			IplImage* temp2 = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);
			IplImage* result = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);

			if(biggestBlob>=0) blobs.GetBlob(biggestBlob)->FillBlob(temp,cvScalar(255),x,y);

			cvSetImageROI(temp, cvRect(x, y, width, height));

			cvCopy(temp,temp2);

			uchar* tempData;

			uchar* resultData;

			tempData = (uchar *)(temp2->imageData);
			resultData = (uchar *) (result->imageData);

			for (int j = 0; j < width*height; j++)
			{
				if (tempData[j]==255) resultData[j] = color;
				else	resultData[j] = 0;
			}

			cvResize(result, output);

			cvReleaseImage(&temp);
			cvReleaseImage(&temp2);
			cvReleaseImage(&result);

			return true;
		}
		else
			return false;
	}
예제 #20
0
void iptask::markerDetect(void)
{
     IplImage * frame,*img_hsv,*img_proc,* new1;
     CvMemStorage * storage = cvCreateMemStorage(0);
     ros::NodeHandle n;
     ros::Publisher marker = n.advertise<ikat_ip_data::ip_marker_data>("marker_data",3);
     ros::Rate looprate(5);
     int count = 0;
     CvSeq * contours,*final_contour;
     int total_con;
     double maxarea;
     marker_data * Data =(marker_data *)malloc(sizeof(marker_data));
     CBlobResult blobs;
     CBlob * currentblob;
     CvPoint2D32f vertices[4];
     //CvCapture * img_video=cvCaptureFromAVI("downward-pipe-15_56_17.avi");
     frame=cvQueryFrame(img);
     cvNamedWindow("Image Actual");
     cvNamedWindow("final Image");
     img_hsv=cvCreateImage(cvGetSize(frame),8,3);
     img_proc=cvCreateImage(cvGetSize(frame),8,1);
     new1=cvCreateImage(cvGetSize(frame),8,1);
     while(ros::ok())
     {
         ikat_ip_data::ip_marker_data msg;
         IplImage * img_con=cvCreateImage(cvGetSize(frame),8,1);
         frame=cvQueryFrame(img);
         if(!frame)
                 break;
         cvShowImage("Image Actual",frame);
         cvCvtColor(frame,img_hsv,CV_RGB2HSV);
         cvInRangeS(img_hsv,cvScalar(100,100,100),cvScalar(120,170,255),img_proc);
         cvSmooth(img_proc,img_proc,CV_GAUSSIAN,11,11);
         cvErode(img_proc,img_proc);
         blobs=CBlobResult(img_proc,NULL,0);
         blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,75);
         for (int i = 0; i < blobs.GetNumBlobs(); i++ )
         {
                 currentblob = blobs.GetBlob(i);
                 currentblob->FillBlob(img_proc,cvScalar(255));
         }
         cvCanny(img_proc,img_proc,10,200);
         total_con=cvFindContours(img_proc,storage,&contours,sizeof(CvContour),CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
         if(contours->total==0)
             continue;
         final_contour=cvApproxPoly(contours,sizeof(CvContour),storage,CV_POLY_APPROX_DP,1,1);
         maxarea=0;
         cvZero(img_con);
         CvBox2D rect;
         while(final_contour)
         {
              rect=cvMinAreaRect2(final_contour, storage);
              if(rect.size.height*rect.size.width>maxarea)
              {
                  Data->center.x=rect.center.x;
                  Data->center.y=rect.center.y;
                  Data->size.x=rect.size.width;
                  Data->size.y=rect.size.height;
                  Data->angle=rect.angle;
                  maxarea=rect.size.height*rect.size.width;
                  msg.Marker_data[0]=Data->center.x;
                  msg.Marker_data[1]=Data->center.y;
                  msg.Marker_data[2]=Data->angle;
              }
              final_contour=final_contour->h_next;
         }
         cvBoxPoints(rect,vertices);
         cvLine(frame,cvPointFrom32f(vertices[0]),cvPointFrom32f(vertices[1]),cvScalarAll(255),2);
         cvLine(frame,cvPointFrom32f(vertices[1]),cvPointFrom32f(vertices[2]),cvScalarAll(255),2);
         cvLine(frame,cvPointFrom32f(vertices[2]),cvPointFrom32f(vertices[3]),cvScalarAll(255),2);
         cvLine(frame,cvPointFrom32f(vertices[3]),cvPointFrom32f(vertices[0]),cvScalarAll(255),2);
         ROS_INFO("center x :[%f]",msg.Marker_data[0]);
         ROS_INFO("center y :[%f]",msg.Marker_data[1]);
         ROS_INFO("angle : [%f]",msg.Marker_data[2]);
         marker.publish(msg);
         cvShowImage("final Image",frame);
         char c=cvWaitKey(33);
         if  (c==27)
         break;
         ros::spinOnce();
         ++count;
         looprate.sleep();
     }
     cvDestroyWindow("Image Actual");
     cvDestroyWindow("final Image");
     free(Data);
}
예제 #21
0
// starts the auto targeting sequence
void MainWindow::on_startstopbutton_clicked()
{
    shootingstopped=false;
    QImage* currimage=getQImage();
    n=currimage->width();
    k=currimage->height();
    IplImage* curriplimage=Qimage2IplImage(&currimage->convertToFormat(QImage::Format_RGB32));
    IplImage* threshedimage=threshimage(curriplimage);
    CBlobResult blobs;
    CBlob* currentblob;
    blobs=CBlobResult(threshedimage,NULL,0);
    blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 150 );
    int j=blobs.GetNumBlobs();
    if(j==0)
    {
        QMessageBox::information(this,"No Targets","No Targets Found!");
        cvReleaseImage(&threshedimage);
        cvReleaseImage(&curriplimage);
        return;
    }
    CBlobGetXCenter XCenter;
    CBlobGetYCenter YCenter;
    for(int i=0;i<blobs.GetNumBlobs();i++)
    {
      tmptargetcenter=new targetcenter;
      currentblob=blobs.GetBlob(i);
      tmptargetcenter->x=XCenter(*currentblob);
      tmptargetcenter->y=YCenter(*currentblob);
      getangles(tmptargetcenter);
      targets.append(tmptargetcenter);
    }
    checkformissiles();
    ui->targetcountdisplay->display(targets.size());
    setupautobuttons();
    qApp->processEvents();
    ui->timeNumber->display(0);
    timeshooting.start(100);
    turr->initAngle();
    if(shootingstopped)
    {
        timeshooting.stop();
        targets.clear();
        return;
    }
    foreach(targetcenter* target,targets)
    {
        checkformissiles();
        qApp->processEvents();
        turr->setAngle(target->beta,target->betav);
        ui->shotcountdisplay->display(turr->currentmissilecount());
        if(shootingstopped)
        {
            timeshooting.stop();
            targets.clear();
            delete target;
            return;
        }
        ui->targetcountdisplay->display(ui->targetcountdisplay->value()-1);
        qApp->processEvents();
        delete target;
    }
예제 #22
0
void App::Update(Image &camera)
{
    /*camera=camera.Scale(camera.m_Image->width/2,
                        camera.m_Image->height/2);
    */
    //cvFlip(camera.m_Image, NULL, 0);

	///////////////////////////////////
	// dispatch from input

	int key=cvWaitKey(10);

//    usleep(500);

	static int t=150;
    static bool viewthresh=false;
    static bool off=false;
    static int spirit=0;
    static int crop_x=0;
    static int crop_y=0;
    static int crop_w=camera.m_Image->width;
    static int crop_h=camera.m_Image->height;

	switch (key)
	{
    case 't': viewthresh=!viewthresh; break;
    case 'q': t--; break;
    case 'w': t++; break;
    case 'e': t-=20; break;
    case 'r': t+=20; break;
    case 'o': off=!off; break;
    case 'p': spirit++; break;
    case 'z': crop_x+=10; break;
    case 'x': crop_x-=10; break;
    case 'c': crop_y+=10; break;
    case 'v': crop_y-=10; break;
    case 'b': crop_w+=10; break;
    case 'n': crop_w-=10; break;
    case 'm': crop_h+=10; break;
    case ',': crop_h-=10; break;
	}

    if (crop_x<0) crop_x=0;
    if (crop_x>=camera.m_Image->width) crop_x=camera.m_Image->width-1;
    if (crop_y<0) crop_x=0;
    if (crop_y>=camera.m_Image->height) crop_y=camera.m_Image->height-1;
    if (crop_w+crop_x>camera.m_Image->width)
    {
        crop_w=camera.m_Image->width-crop_x;
    }
    if (crop_h+crop_y>camera.m_Image->height)
    {
        crop_h=camera.m_Image->height-crop_y;
    }

    if (off)
    {
        sleep(1);
        cerr<<"off..."<<endl;
        return;
    }

    Image thresh=camera.RGB2GRAY().SubImage(crop_x,crop_y,crop_w,crop_h);
    cvThreshold(thresh.m_Image,thresh.m_Image,t,255,CV_THRESH_BINARY);
    // copy the threshold into a colour image
    Image tofill=thresh.GRAY2RGB();
    cvFloodFill(tofill.m_Image,cvPoint(camera.m_Image->width/2,
                                       camera.m_Image->height/2),
                CV_RGB(0,255,0),cvScalar(0),cvScalar(255));

    CBlobResult blobs;
    blobs = CBlobResult( thresh.m_Image, NULL, 255 );
    // exclude the ones smaller than param2 value
    blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 10);

    CBlob *currentBlob;
    Image *out=NULL;

    if (key=='s')
    {
        // add the alpha channel
        Image src=camera.SubImage(crop_x,crop_y,crop_w,crop_h);
        out = new Image(src.m_Image->width,
                        src.m_Image->height, 8, 4);

        for(int y=0; y<src.m_Image->height; y++)
        {

            for(int x=0; x<src.m_Image->width; x++)
            {
                CvScalar col = cvGet2D(src.m_Image,y,x);
                CvScalar alpha = cvGet2D(tofill.m_Image,y,x);
                if (alpha.val[0]==0 &&
                    alpha.val[1]==255 &&
                    alpha.val[2]==0)
                    col.val[3]=0;
                else
                    col.val[3]=255;
                cvSet2D(out->m_Image,y,x,col);
            }
        }
    }

    if (key=='s')
    {
        cerr<<"deleting old images in islands/"<<endl;
        int r=system("rm islands/*");
    }

    list<CvRect> allrects;

    for (int i = 0; i < blobs.GetNumBlobs(); i++ )
    {
        currentBlob = blobs.GetBlob(i);
        allrects.push_back(currentBlob->GetBoundingBox());
    }

    list<CvRect> filteredrects=allrects;

    /* for (list<CvRect>::iterator i=allrects.begin();
         i!=allrects.end(); ++i)
    {
        bool in=false;
        for (list<CvRect>::iterator j=allrects.begin();
             j!=allrects.end(); ++j)
        {
            if (Inside(*i,*j)) in=true;
        }
        if (!in) filteredrects.push_back(*i);
        }*/

    unsigned int instance = rand();

    unsigned int count=0;
    for (list<CvRect>::iterator i=filteredrects.begin();
         i!=filteredrects.end(); ++i)
    {
        CvRect rect = *i;

        if (key=='s')
        {
            Image island = out->SubImage(rect.x,rect.y,
                                         rect.width,rect.height);

            char buf[256];
            sprintf(buf,"islands/island-%d-%d-%d.png",count,
                    rect.x+rect.width/2,
                    rect.y+rect.height/2);
            cerr<<"saving "<<buf<<endl;
            island.Save(buf);

            sprintf(buf,"dump/island-%d-%d-%d-%d.png",
                    instance,
                    count,
                    rect.x+rect.width/2,
                    rect.y+rect.height/2);
            cerr<<"saving "<<buf<<endl;
            island.Save(buf);

        }
        else
        {
            cvRectangle(camera.m_Image,
                        cvPoint(crop_x+rect.x,crop_y+rect.y),
                        cvPoint(crop_x+rect.x+rect.width,
                                crop_y+rect.y+rect.height),
                        colors[1]);
        }
        count++;
    }

    if (key=='s')
    {
        cerr<<"copying images to server"<<endl;
        //int r=system("scp -r islands [email protected]:/home/garden/GerminationX/oak/");
        string path("/home/dave/code/lirec/scenarios/GerminationX/oak/public/");
        path+=string(spirits[spirit%3]);
        string command=string("rm ")+path+string("/*.*");
        int r=system(command.c_str());
        string command2=string("cp islands/* ")+path;
        r=system(command2.c_str());
        //cerr<<"finished copying...("<<r<<")"<<endl;
    }

    if (viewthresh) camera=tofill;

    char buf[256];
    sprintf(buf,"spirit: %s thresh: %d", spirits[spirit%3], t);
    cvPutText(camera.m_Image, buf, cvPoint(10,20),
              &m_Font, colors[0]);

    cvRectangle(camera.m_Image,
                cvPoint(crop_x,crop_y),
                cvPoint(crop_x+crop_w,crop_y+crop_h),
                colors[2]);

    if (out!=NULL) delete out;
}
예제 #23
0
SHModel* ShapeModel( CvCapture* g_capture,StaticBGModel* BGModel , BGModelParams* BGParams){

	int num_frames = 0;
	int total_blobs=0;
	float Sumatorio = 0;
	float SumatorioDes = 0;
	IplImage* frame = NULL;

	STFrame* frameData = NULL;
	SHModel* Shape = NULL;

	CBlobResult blobs;
	CBlob *currentBlob;

	IplImage* ImGris = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 1 );
	IplImage* Imblob = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 3 );
	IplImage* lastBG = cvCreateImage( cvGetSize( BGModel->Imed ),8, 1 );
	IplImage* lastIdes = cvCreateImage( cvGetSize( BGModel->Imed ), IPL_DEPTH_32F, 1);
	cvZero(Imblob);
	// Iniciar estructura para modelo de forma

	Shape = ( SHModel *) malloc( sizeof( SHModel));
	if ( !Shape ) {error(4);return 0;}
	Shape->FlyAreaDes = 0;
	Shape->FlyAreaMedia=0;
	//Pone a 0 los valores del vector areas


	//EXTRACCION DE LOS BLOBS Y CALCULO DE MEDIANA/MEDIA Y DESVIACION TIPICA PARA TODOS LOS FRAMES
	cvSetCaptureProperty( g_capture,1,BGParams->initDelay ); // establecemos la posición
	while( num_frames < ShParams->FramesTraining ){
		frame = cvQueryFrame( g_capture );
		if ( !frame ) {
			error(2);
			break;
		}
		if ( (cvWaitKey(10) & 255) == 27 ) break;

		ImPreProcess( frame, ImGris, BGModel->ImFMask, 0, BGModel->DataFROI);

		// Cargamos datos del fondo
		if(!frameData ) { //en la primera iteración iniciamos el modelo dinamico al estático
			// Iniciar estructura para datos del nuevo frame
			frameData = InitNewFrameData( frame );
			cvCopy(  BGModel->Imed,frameData->BGModel);
			cvSet(frameData->IDesvf, cvScalar(1));
			cvCopy(  BGModel->Imed,lastBG);
		}
		else{	// cargamos los últimos parámetros del fondo.
			cvCopy( lastBG, frameData->BGModel);
			cvCopy( lastIdes,frameData->IDesvf );
		}
	//	obtener la mascara del FG y la lista con los datos de sus blobs.
		//// BACKGROUND UPDATE
		// Actualización del fondo
		// establecer parametros

		UpdateBGModel( ImGris,frameData->BGModel,frameData->IDesvf, BGParams, BGModel->DataFROI, BGModel->ImFMask );
		/////// BACKGROUND DIFERENCE. Obtención de la máscara del foreground
		BackgroundDifference( ImGris, frameData->BGModel,frameData->IDesvf, frameData->FG ,BGParams, BGModel->DataFROI);

		// guardamos las imagenes para iniciar el siguiente frame
		cvCopy( frameData->BGModel, lastBG);
		cvCopy(  frameData->IDesvf,lastIdes);

		//Obtener los Blobs y excluir aquellos que no interesan por su tamaño
//		cvSetImageROI(  frameData->FG , BGModel->DataFROI);

		blobs = CBlobResult( frameData->FG, NULL, 100, true );
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(),B_GREATER,100);
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetPerimeter(),B_GREATER,1000);

		int j = blobs.GetNumBlobs();//numero de blos encontrados en el frame

		total_blobs=total_blobs+j; // Contabiliza los blobs encontrados para todos los frames

		//Recorrer Blob a blob y obtener las caracteristicas del AREA de cada uno de ellos

		for (int i = 0; i < blobs.GetNumBlobs(); i++ ){ //for 1

			currentBlob = blobs.GetBlob(i);

			CBlobGetArea();
			if(ShParams->SHOW_DATA_AREAS) {
				//printf("Area blob %d = %f ",i,currentBlob->area);
			}
			//Estimar la media de las Areas

			Sumatorio = Sumatorio + currentBlob->area;
			SumatorioDes = SumatorioDes + currentBlob->area*currentBlob->area;

			muestrearAreas( currentBlob->area);
			currentBlob->FillBlob( Imblob, CV_RGB(255,0,0));

		}//Fin del For 1

		Shape->FlyAreaMedia = Sumatorio / total_blobs;
		Shape->FlyAreaDes = (SumatorioDes / total_blobs) - Shape->FlyAreaMedia*Shape->FlyAreaMedia;

		num_frames += 1;
//		cvResetImageROI(frameData->FG);
		DraWWindow(Imblob, frameData, BGModel, SHOW_SHAPE_MODELING, COMPLETO);
		DraWWindow(Imblob, frameData, BGModel, SHAPE,SIMPLE );

	}
	desvanecer( NULL, 20);
	Shape->FlyAreaDes = sqrt(abs(Shape->FlyAreaDes) ) ;
	if( Shape->FlyAreaDes == 0){
		printf("hola");
	}

	//Mostrar mediana y media para todos los frames

	if(ShParams->SHOW_DATA_AREAS )
		printf("\n MEDIA AREAS: %f \t DESVIACION AREAS: %f",Shape->FlyAreaMedia,Shape->FlyAreaDes);

	free( ShParams);
	liberarSTFrame( frameData );
	cvReleaseImage( &ImGris);
	cvReleaseImage( &Imblob);
	cvReleaseImage( &lastIdes);
	cvReleaseImage( &lastBG);

	return Shape;

}//Fin de la función ShapeModel2
예제 #24
0
void detect2(Mat img, vector<Mat>& regionsOfInterest,vector<Blob>& blobs){
/*	Mat blurred; 
	GaussianBlur(img, blurred, Size(), _SharpSigma, _SharpSigma);
	Mat lowContrastMask = abs(img - blurred) < _SharpThreshold;
	Mat sharpened = img*(1+_SharpAmount) + blurred*(-_SharpAmount);
	img.copyTo(sharpened, lowContrastMask);
	sharpened.copyTo(img);*/
	/*************INIZIALIZZAZIONI**********/
	Mat gray; 
	Mat out = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat masked = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat morph = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat bwmorph = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat cont = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat maskHSV = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat whiteMaskMasked = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat whiteMaskOrig = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat Bands[3];
	Mat noBackMask = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat kernelEr = getStructuringElement(MORPH_ELLIPSE,Size(5,5));
	Mat thMasked; Mat thOrig; Mat bwOrig; Mat bwNoBackMask;
	Mat kernelOp = getStructuringElement(MORPH_ELLIPSE,Size(13,13));
	vector<Mat> BGRbands;  split(img,BGRbands);
	vector< vector<Point> > contours;
	/***************************************/
	/*cvtColor(img,gray,CV_BGR2GRAY);
	gray = (gray!=0);
	imshow("gray",gray);*/
	/*Rimozione Ombre e Background*/
//	masked = applyMaskBandByBand(maskHSV,BGRbands); split(masked,BGRbands);
	
	/*Rimozione sfondo e sogliatura per videnziare esclusivamente ciò che è bianco*/
	noBackMask = backgroundRemoval(img);
	masked = applyMaskBandByBand(noBackMask,BGRbands);
/*
	whiteMaskOrig = computeWhiteMaskLight(img);
	whiteMaskOrig = whiteMaskOrig + computeWhiteMaskShadow(img);

	whiteMaskMasked = computeWhiteMaskLight(masked);
	whiteMaskMasked = whiteMaskMasked + computeWhiteMaskShadow(masked);
*/
	CBlobResult blobsRs;
	blobsRs = computeWhiteMaskOtsu(img, img, blobsRs, img.rows*img.cols, img.rows*img.cols, 0.8, 0.8, 30, 200, 0);
	
	//Mat newimg(img.size(),img.type());
    whiteMaskOrig.setTo(0);
    for(int i=0;i<blobsRs.GetNumBlobs();i++){
			 blobsRs.GetBlob(i)->FillBlob(whiteMaskOrig,CV_RGB(255,255,255),0,0,true);
    }

	threshold(masked,whiteMaskMasked,0,255,THRESH_BINARY);
	cvtColor(whiteMaskMasked,whiteMaskMasked,CV_BGR2GRAY);
		cout << whiteMaskMasked.type() << " " << whiteMaskOrig.type() << endl;
	bitwise_or(whiteMaskMasked,whiteMaskOrig,thOrig);
	masked = applyMaskBandByBand(thOrig,BGRbands);
#if DO_MORPH
	/*Operazioni morfologiche per poter riempire i buchi e rimuovere i bordi frastagliati*/
	dilate(masked,morph,kernelEr);
	erode(morph,morph,kernelEr);
	
	erode(morph,morph,kernelOp);
	dilate(morph,morph,kernelOp);
#else
	morph = masked;
#endif
	/*Ricerca componenti connesse e rimozione in base all'area*/
	cvtColor(morph,bwmorph,CV_BGR2GRAY);
	findContours(bwmorph, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
	vector<double> areas = computeArea(contours);
	for(int j = areas.size()-1; j>=0; j--){
		if(areas.at(j)>MAX_AREA || areas.at(j)<MIN_AREA )
			contours.erase(contours.begin()+j);
	}

	/*Calcolo Bounding Rectangle a partire dall'immagine con componenti connesse di interesse*/
	 vector<Rect> boundRect( contours.size() );
	 vector<vector<Point> > contours_poly( contours.size() );
	 vector<Point2f>center( contours.size() ); 
	 vector<float>radius( contours.size() );
	 /*Costruzione immagine finale ed estrazione regioni di interesse*/
	for (int idx = 0; idx < contours.size(); idx++){
		Blob b; b.originalImage = &img;
		Scalar color(255);
		approxPolyDP( Mat(contours[idx]), contours_poly[idx], 3, true );
		boundRect[idx] = boundingRect( Mat(contours_poly[idx]) );
		
		minEnclosingCircle( (Mat)contours_poly[idx], center[idx], radius[idx] );
	//	Rect tmpRect(center[idx].x-boundRect[idx].width/2,center[idx].y-boundRect[idx].height/2,boundRect[idx].width,boundRect[idx].height);
		Rect tmpRect(center[idx].x-radius[idx],center[idx].y-radius[idx],radius[idx]*2,radius[idx]*2);
		//Rect tmpRect = boundRect[idx];
		Rect toPrint; 
		tmpRect += Size(tmpRect.width*RECT_AUGMENT ,tmpRect.height*RECT_AUGMENT);			  // Aumenta area di RECT_ARGUMENT
		tmpRect -= Point((tmpRect.width*RECT_AUGMENT)/2 , (tmpRect.height*RECT_AUGMENT)/2 ); // Ricentra il rettangolo
		
		drawContours(cont, contours, idx, color, CV_FILLED, 8);
		if(tmpRect.x>0 && tmpRect.y>0 && tmpRect.x+tmpRect.width < morph.cols && tmpRect.y+tmpRect.height < morph.rows){ //Se il nuovo rettangolo allargato
																														// NON esce fuori dall'immagine, accettalo
			regionsOfInterest.push_back(masked(tmpRect));
			b.cuttedWithBack = img(tmpRect);
			b.cuttedImages = masked(tmpRect);
			b.blobsImage = cont(tmpRect);
			b.rectangles = tmpRect;
			toPrint = tmpRect;
		}
		else{
			toPrint = boundRect[idx];
			regionsOfInterest.push_back(masked(boundRect[idx]));
			b.cuttedImages = masked(boundRect[idx]);
			b.cuttedWithBack = img(boundRect[idx]);
			b.rectangles = boundRect[idx];
			b.blobsImage = cont(boundRect[idx]);
		}
		Point centroid = computeCentroid(contours[idx]);
		b.centroid = centroid;
		b.area = contourArea(contours[idx]);
		b.distance = HEIGH - centroid.y;
		
		/*rectangle( cont, toPrint.tl(), toPrint.br(), color, 2, 8, 0 );
		circle( cont, center[idx], (int)radius[idx], color, 2, 8, 0 );*/
		blobs.push_back(b);
	}
	
	//out = out+cont;
	bitwise_xor(out,cont,out);
	
	/*imshow("img",img);
	imshow("out",out);
	waitKey(0);*/
}
예제 #25
0
double findShadow(IplImage *l_img, int hue,int sat,int val,int threshold, double blobLowLimit,double blobHighLimit){
	// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. 
// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. 
	//Ouput: pointer to data array, size[#ofblobs*3+1]; Format data=[Number of Blobs, Area1,X of center1, y of center1, Area2,X of center2,y of center2,...,areaN,X of centerN, Y of centerN];
    

    

	// Image variables
	IplImage* local_copy = cvCloneImage(l_img);
	IplImage* imageSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
	IplImage* imageSuperSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
	IplImage* imageHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
	IplImage* i1 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* i2 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* i_ts = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* planeH = cvCreateImage(cvGetSize(l_img),8,1); //Hue
	IplImage* planeS = cvCreateImage(cvGetSize(l_img),8,1); //Saturation
	IplImage* planeV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
	IplImage* planeSmoothV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
	IplImage* imageSmoothHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
	IplImage* obsdetmask = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_dil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_b = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_bdil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	//Blob variables
	CBlobResult mask_bls;
	CBlob	mask_bl;
	CBlobResult blobs;
	CBlob blob;
	CBlobResult blobs1;
	CBlob blob1;
	CBlobGetXCenter getXCenter;
	CBlobGetYCenter getYCenter;
	//Output Variable
	//Gausian Filter
	cvSmooth(l_img,imageSmooth,CV_GAUSSIAN,13,13,0,0);
	cvSmooth(l_img,imageSuperSmooth,CV_GAUSSIAN,41,41,0,0);
	//cvShowImage("View2a",imageSmooth);
	
	
	
	//Covert RGB to HSV
	cvCvtColor(imageSmooth,imageHSV,CV_BGR2HSV);
	cvCvtColor(imageSuperSmooth,imageSmoothHSV,CV_BGR2HSV);
	cvCvtPixToPlane(imageSuperSmooth,NULL,NULL,planeSmoothV,0);
	cvCvtPixToPlane(imageHSV, planeH,planeS,planeV,0);//Extract the 3 color components
	cvSetImageROI(imageHSV,cvRect(0,imageHSV->height/3,imageHSV->width,imageHSV->height*2/3));
	IplImage* planeH1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Hue
	IplImage* planeS1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Saturation
	IplImage* planeV1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Brightness
	cvCvtPixToPlane(imageHSV, planeH1,planeS1,planeV1,0);//Extract the 3 color components
	cvResetImageROI(imageHSV);
	
	
	cvShowImage("Dark_Value",planeV);
	cvShowImage("Dark_Sat",planeS);
	cvShowImage("Dark_Hue",planeH);
	cvSet(obsdetmask, cvScalar(0,0,0));
	cv::waitKey(3);
	
	
	int maxDark = 0;
	int minDark = 255;
	int minDarknessValue=0;
	int maxDarknessValue = 0;
	int midDarknessValue = 0;
	//Filter image for desired Color, output image with only desired color highlighted remaining
	for( int y = 0; y < planeH1->height; y++ ){
		unsigned char* h = &CV_IMAGE_ELEM( planeH1, unsigned char, y, 0 );
		unsigned char* s = &CV_IMAGE_ELEM( planeS1, unsigned char, y, 0 );
		unsigned char* v = &CV_IMAGE_ELEM( planeV1, unsigned char, y, 0 );
		for( int x = 0; x < planeH1->width*planeH1->nChannels; x += planeH1->nChannels ){
		  //if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);}
			//int f= HSV_filter(h[x],s[x],v[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val);
			int diff = abs((h[x]-hue));
			if(((diff < threshold)||(v[x]<MIN_BRIGHT)||(s[x]<MIN_SAT)))
			{ 
			  ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=255;
			   if(v[x]<minDark)
			   {minDark=v[x];}
			    if(v[x]>maxDark)
			    {maxDark=v[x];}
			}
			else
			{
			  ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=0;
			}
		}
	}//debug
	cvDilate(obsdetmask,obsdetmask_dil,NULL,1);
	cvShowImage("Dark_ObsDetPre",obsdetmask_dil);
	mask_bls = CBlobResult(obsdetmask_dil,NULL,0);
	mask_bls.Filter(mask_bls,B_EXCLUDE,CBlobGetArea(),B_LESS,MASK_MIN_BLOB); // Filter Blobs with min and max size
	mask_bls.GetNthBlob( CBlobGetArea(), 0, mask_bl );
	cvSet(obsdetmask_b, cvScalar(0,0,0));
	mask_bl.FillBlob(obsdetmask_b,CV_RGB(255,255,255));
	cvDilate(obsdetmask_b,obsdetmask_bdil,NULL,5);
	cvShowImage("Dark_ObsDet",obsdetmask_bdil);
	cvWaitKey(3);
	minDarknessValue=((maxDark-minDark)*LOW_PERCENT)+minDark;
	if(minDarknessValue<VALUE_LOW_LIM){minDarknessValue=VALUE_LOW_LIM;}
	maxDarknessValue=(maxDark)-((maxDark-minDark)*HIGH_PERCENT);
	midDarknessValue = .5*(minDarknessValue+maxDarknessValue);
	ROS_INFO("minDark = %d, maxDark = %d, minDV = %d, maxDV = %d",minDark,maxDark,minDarknessValue,maxDarknessValue);
	for( int y = 0; y < planeH->height; y++ ){
		unsigned char* h = &CV_IMAGE_ELEM( planeH, unsigned char, y, 0 );
		unsigned char* s = &CV_IMAGE_ELEM( planeS, unsigned char, y, 0 );
		unsigned char* v = &CV_IMAGE_ELEM( planeV, unsigned char, y, 0 );
		unsigned char* m = &CV_IMAGE_ELEM( obsdetmask_bdil, unsigned char, y, 0 );
		for( int x = 0; x < planeH->width*planeH->nChannels; x += planeH->nChannels ){
		  //if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);}
			 int f = HSV_filter(h[x],s[x],v[x],m[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val);
			 if((f==0))//Non-floor
			 {
				 ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				 ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=0;
				 ((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			 }
			 else if(f==1)	//dark
			 {
				((uchar *)(i1->imageData + y*i1->widthStep))[x]=255;
				((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=64;
				((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			 }
			 else if(f==2)
			 {
				((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=128;
				((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			 }
			 else if(f==3)
			 {
				((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=196;
				((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			   
			 }
			 else if(f==4)	//bright
			 {
				 ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=255;
				 ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				 ((uchar *)(i2->imageData + y*i2->widthStep))[x]=255;
			 }else{	
			   
			 }
		}
	}

	
	cvShowImage("Dark_Triscale",i_ts);
	cvWaitKey(3);
	//Blob stuff
	blobs = CBlobResult(i1,NULL,0);   //Get blobs of image
	blobs1 =CBlobResult(i2,NULL,0);
	blobs.Filter(blobs,B_INCLUDE,CBlobGetArea(),B_INSIDE,blobLowLimit,blobHighLimit);  // Filter Blobs with min and max size
	blobs1.Filter(blobs1,B_INCLUDE,CBlobGetArea(),B_INSIDE,blobLowLimit,blobHighLimit);
	//Set up data array
	xCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()];
	yCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()];
	valCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()];
	
	ROS_INFO("size:%d  ",blobs.GetNumBlobs()+blobs1.GetNumBlobs());
	double data;
	if(maxDark>190)
	{
	 data=blobs.GetNumBlobs()+blobs1.GetNumBlobs();// Set first data value to total number of blobs
	//cout<<data[0]<<"  ";
	int k=0;
	//ROS_INFO("Blobs gotten.");
	cvWaitKey(3);
	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{ // Get Blob Data 
	    blob = blobs.GetBlob(i);//cycle through each blob
		//data[i*3+1]=blob.area;//blob areaEFF
		xCent[i]= getXCenter(blob); //X min
		yCent[i]= getYCenter(blob); //X max	
		valCent[i]= 1; //Y max 
		//debug
		blob.FillBlob(local_copy, cvScalar(255, 0, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something
      }    
      //ROS_INFO("loop 1 done.");
      cvWaitKey(3);
      for (int i = 0; i < blobs1.GetNumBlobs(); i++ )
      { // Get Blob Data 
	      blob = blobs1.GetBlob(i);//cycle through each blob
		  //data[i*3+1]=blob.area;//blob area
		xCent[blobs.GetNumBlobs()+i]= getXCenter(blob); //X min
		yCent[blobs.GetNumBlobs()+i]= getYCenter(blob); //X max
		valCent[blobs.GetNumBlobs()+i]= -1;

		  //debug
		  blob.FillBlob(local_copy, cvScalar(0, 255, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something
      }    
	  
	}else{
    //
	data=blobs.GetNumBlobs();// Set first data value to total number of blobs
	//cout<<data[0]<<"  ";
	int k=0;
	//ROS_INFO("Blobs gotten.");
	cvWaitKey(3);
	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{ // Get Blob Data 
	    blob = blobs.GetBlob(i);//cycle through each blob
		//data[i*3+1]=blob.area;//blob areaEFF
		xCent[i]= getXCenter(blob); //X min
		yCent[i]= getYCenter(blob); //X max
		valCent[i]= 1; //Y max 
		//debug
		blob.FillBlob(local_copy, cvScalar(255, 0, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something
      }    
   
   
      }
   cvShowImage("Dark_Detected",local_copy);
    //cv::imshow("View",cv_ptr->image);
    cv::waitKey(3);
    
    
      cvReleaseImage(&local_copy);
      cvReleaseImage(&imageSmooth);
      cvReleaseImage(&imageSuperSmooth);
      cvReleaseImage(&imageHSV);
      cvReleaseImage(&i1);
      cvReleaseImage(&i2);
      cvReleaseImage(&planeSmoothV);
      cvReleaseImage(&imageSmoothHSV);
      cvReleaseImage(&i_ts);
      cvReleaseImage(&planeH);
      cvReleaseImage(&planeS);
      cvReleaseImage(&planeV);
      cvReleaseImage(&planeH1);
      cvReleaseImage(&planeS1);
      cvReleaseImage(&planeV1);
      cvReleaseImage(&obsdetmask);
      cvReleaseImage(&obsdetmask_dil);
      cvReleaseImage(&obsdetmask_b);
      cvReleaseImage(&obsdetmask_bdil);
      return data; //return pointer to data array
}
예제 #26
0
void
Auvsi_Recognize::extractLetter( void )
{
	typedef cv::Vec<unsigned char, 1> VT_binary;
	#ifdef TWO_CHANNEL
		typedef cv::Vec<T, 2> VT;
	#else
		typedef cv::Vec<T, 3> VT;
	#endif
	typedef cv::Vec<int, 1> IT;
	

	
	
	
	
	// Erode input slightly
	cv::Mat input;
	cv::erode( _shape, input, cv::Mat() );

	// Remove any small white blobs left over
	CBlobResult blobs;
	CBlob * currentBlob;
	CBlob biggestBlob;
	IplImage binaryIpl = input;

	blobs = CBlobResult( &binaryIpl, NULL, 0 );
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );

	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );

	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(0));
	}

	// Perform k-means on this region only
	int areaLetter = (int)biggestBlob.Area();
	cv::Mat kMeansInput = cv::Mat( areaLetter, 1, _image.type() );

	// Discard if we couldn't extract a letter
	if( areaLetter <= 0 )
	{
		_letter = cv::Mat( _shape );
		_letter = cv::Scalar(0);
		return;
	}

	cv::MatIterator_<VT_binary> binaryIterator = input.begin<VT_binary>();
	cv::MatIterator_<VT_binary> binaryEnd = input.end<VT_binary>();
	cv::MatIterator_<VT> kMeansIterator = kMeansInput.begin<VT>();

	for( ; binaryIterator != binaryEnd; ++binaryIterator )
	{
		if( (*binaryIterator)[0] > 0 )
		{
			(*kMeansIterator) = _image.at<VT>( binaryIterator.pos() );
			++kMeansIterator;
		}
	}

	// Get k-means labels
	cv::Mat labels = doClustering<T>( kMeansInput, 2, false );	
	int numZeros = areaLetter - cv::countNonZero( labels );
	bool useZeros = numZeros < cv::countNonZero( labels );

	// Reshape into original form
	_letter = cv::Mat( _shape.size(), _shape.type() );
	_letter = cv::Scalar(0);

	binaryIterator = input.begin<VT_binary>();
	binaryEnd = input.end<VT_binary>();
	cv::MatIterator_<IT> labelsIterator = labels.begin<IT>();

	for( int index = 0; binaryIterator != binaryEnd; ++binaryIterator )
	{
		if( (*binaryIterator)[0] > 0 )
		{
			// Whichever label was the minority, we make that value white and all other values black
			unsigned char value = (*labelsIterator)[0];

			if( useZeros )
				if( value )
					value = 0;
				else
					value = 255;
			else
				if( value )
					value = 255;
				else
					value = 0;

			_letter.at<VT_binary>( binaryIterator.pos() ) = VT_binary( value );
			++labelsIterator;
		}
	}
}
예제 #27
0
int Frame::findCandidates(Mat prev, Mat next) {
	Mat diff1, diff2;
	Mat gray_diff1, gray_diff2;
	Mat andMaskGray, andMaskHSV, andMaskBGR1, andMaskBGR2, andMaskBGR;
	Mat colourFiltered1;
		
	absdiff(prev, curr, diff1);
	absdiff(curr, next, diff2);

 	cvtColor(diff1, gray_diff1, CV_BGR2GRAY);
	cvtColor(diff2, gray_diff2, CV_BGR2GRAY);

	bitwise_and(gray_diff1, gray_diff2, andMaskGray);
	
	GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 );
	//morphologyEx(andMaskGray, andMaskGray, MORPH_CLOSE, getStructuringElement(MORPH_RECT, cv::Size(11, 11)));
	//morphologyEx(andMaskGray, andMaskGray, MORPH_OPEN, getStructuringElement(MORPH_RECT, cv::Size(5, 5)));
	//dilate(andMaskGray, andMaskGray, Mat());
	//GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 );

	threshold(andMaskGray, andMaskGray, 4, 255, THRESH_BINARY);
	GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 );
	threshold(andMaskGray, andMaskGray, 4, 255, THRESH_BINARY);
	//GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 );
	//threshold(andMaskGray, andMaskGray, 5, 255, THRESH_BINARY);

	// colour
	//cvtColor(curr, andMaskHSV, CV_BGR2HSV);
	//Scalar min(0.11*256, 0.4*256, 0.5*256, 0);
	//Scalar max(0.15*256, 0.7*256, 1*256, 0);
	//inRange(andMaskHSV, min, max, andMaskBGR);
	//Scalar min(140, 150, 130, 0);
	//Scalar max(200, 255, 180, 0);
	Scalar min1(90, 200, 160, 0);
	Scalar max1(175, 255, 220, 0);
	inRange(curr, min1, max1, andMaskBGR1);
	Scalar min2(28, 90, 35, 0);
	Scalar max2(100, 120, 120, 0);
	inRange(curr, min2, max2, andMaskBGR2);
	bitwise_or(andMaskBGR1, andMaskBGR2, andMaskBGR);
	GaussianBlur( andMaskBGR, andMaskBGR, cv::Size(9, 9), 2, 2 );
	morphologyEx(andMaskBGR, andMaskBGR, MORPH_CLOSE, getStructuringElement(MORPH_RECT, cv::Size(11, 11)));
	morphologyEx(andMaskBGR, andMaskBGR, MORPH_OPEN, getStructuringElement(MORPH_RECT, cv::Size(5, 5)));

	threshold(andMaskBGR, andMaskBGR, 1, 255, THRESH_BINARY);
	bitwise_and(andMaskBGR, andMaskGray, colourFiltered1);
	
	/*vector<Vec3f> circles;
	HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 2, colourFiltered1->rows/4, 200, 100 );
	for( size_t i = 0; i < circles.size(); i++ )
	{
		Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
		int radius = cvRound(circles[i][2]);
		// draw the circle center
		circle( img, center, 3, Scalar(0,255,0), -1, 8, 0 );
		// draw the circle outline
		circle( img, center, radius, Scalar(0,0,255), 3, 8, 0 );
	}*/

	//Mat tmp;
	//cvtColor(andMaskHSV, tmp, CV_HSV2BGR);
	//imshow("bgr", tmp);
	//imshow("hsv", andMaskHSV);
	//imshow("curr", curr);
	//imshow("andmask", andMaskGray);
	//imshow("colours", andMaskBGR);
	//imshow("thresholded", colourFiltered1);
	//waitKey(0);
	IplImage blobimage = andMaskGray;

	CBlobResult results = CBlobResult(&blobimage, NULL, 0);
	results.Filter(results, B_EXCLUDE, CBlobGetArea(), B_GREATER, 500);
	results.Filter(results, B_EXCLUDE, CBlobGetArea(), B_LESS, 200);

	CBlob *blob;
				 
	//Blob numbering
	int i;
	for(i=0; i<results.GetNumBlobs(); i++) {
		blob = results.GetBlob(i);
					
		//if((blob->MaxY() - blob->MinY()) < 1*(blob->MaxX() - blob->MinX())) {
			candidates.push_back(*blob);
		//}
	}
	return i;
}