CBlobResult getBlobs(IplImage* tmp_frame, IplImage* binFore){ //IplImage* binFore = cvCreateImage(cvGetSize(tmp_frame),IPL_DEPTH_8U,1); //get the binary foreground object //cvSub( getBinaryImage(tmp_frame) , binBack, binFore, NULL ); //if(!cvSaveImage("binFore.jpg",binFore)) printf("Could not save the backgroundimage\n"); //!Starting the extracting of Blob CBlobResult blobs; //! get the blobs from the image, with no mask, using a threshold of 100 blobs = CBlobResult( binFore, NULL, 10, true ); //! Create a file with all the found blob blobs.PrintBlobs( "blobs.txt" ); //! discard the blobs with less area than 60 pixels blobs.Filter( blobs, B_INCLUDE, CBlobGetArea(), B_GREATER, 40); //!This two row of code are to filter the blob find from the library by a bug that match ablob like all the image and return the center of it blobs.Filter( blobs, B_INCLUDE, CBlobGetArea(), B_LESS, (tmp_frame->height)*(tmp_frame->width)*0.8); blobs.Filter( blobs, B_INCLUDE, CBlobGetPerimeter(), B_LESS, (tmp_frame->height)+(tmp_frame->width)*2*0.8); //! Create a file with filtered results blobs.PrintBlobs( "filteredBlobs.txt" ); //return blobs; return blobs; }
void extractBall() { imgTransform(BALL_HUE_U, BALL_HUE_L, BALL_SAT_U, BALL_SAT_L, VAL_U, VAL_L); blobRes = CBlobResult(dst, NULL, 0); blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl; blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_GREATER, BALL_SIZE_MAX );// keep blobs smaller than BALL_SIZE_MAX numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl; blobRes.Filter( blobRes, B_INCLUDE, CBlobGetCompactness(), B_GREATER, BALL_COMPACTNESS );// keep blobs greater than BALL_COMPACTNESS numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl; for(int i=0; i<numOfBlobs; i++) blobs[i] = blobRes.GetBlob(i); };
void ScheinrieseApp::findBlobs() { CBlobResult blobs; int i; CBlob *currentBlob; IplImage *original, *originalThr; // load an image and threshold it original = cvLoadImage("pic1.png", 0); cvThreshold( original, originalThr, 100, 0, 255, CV_THRESH_BINARY ); // find non-white blobs in thresholded image blobs = CBlobResult( originalThr, NULL, 255 ); // exclude the ones smaller than param2 value blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 ); // get mean gray color of biggest blob CBlob biggestBlob; CBlobGetMean getMeanColor( original ); double meanGray; blobs.GetNth( CBlobGetArea(), 0, biggestBlob ); meanGray = getMeanColor( biggestBlob ); // display filtered blobs cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage ); for (i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0)); } }
/* Detect blobs larger than min_size in a given IplImage. */ CBlobResult MarkerCapture::detect_blobs(IplImage *img, int min_size = 10){ // find white blobs in thresholded image CBlobResult blobs = CBlobResult(img, NULL, 0); // exclude ones smaller than min_size. blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, min_size); return blobs; }
void ForegroundDetector::nextIteration(const Mat &img) { if(bgImg.empty()) { return; } Mat absImg = Mat(img.cols, img.rows, img.type()); Mat threshImg = Mat(img.cols, img.rows, img.type()); absdiff(bgImg, img, absImg); threshold(absImg, threshImg, fgThreshold, 255, CV_THRESH_BINARY); IplImage im = (IplImage)threshImg; CBlobResult blobs = CBlobResult(&im, NULL, 0); blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobSize); vector<Rect>* fgList = detectionResult->fgList; fgList->clear(); for(int i = 0; i < blobs.GetNumBlobs(); i++) { CBlob *blob = blobs.GetBlob(i); CvRect rect = blob->GetBoundingBox(); fgList->push_back(rect); } }
CBlobResult computeWhiteMaskOtsu(Mat& imgRGBin, Mat& imgHSVIn, CBlobResult& blobs, int limitRGB, int limitHSV, double RGBratio, double HSVratio, int bmin, int bmax, int i){ waitKey(30); Mat BGRbands[3]; split(imgRGBin,BGRbands); Mat imgHSV; cvtColor(imgHSVIn,imgHSV,CV_BGR2HSV); Mat HSVbands[3]; split(imgHSV,HSVbands); Mat maskHSV, maskRGB, maskT; int otsuTRGB = getThreshVal_Otsu_8u(BGRbands[2]); do{ threshold(BGRbands[2],maskRGB,otsuTRGB,255,THRESH_BINARY); otsuTRGB++; }while(countNonZero(maskRGB)>(RGBratio*limitRGB) & otsuTRGB<=255); int otsuTHSV = getThreshVal_Otsu_8u(HSVbands[1]); do{ threshold(HSVbands[1],maskHSV,otsuTHSV,255,THRESH_BINARY_INV); otsuTHSV--; }while(countNonZero(maskHSV)>(HSVratio*limitHSV) & otsuTHSV>=0); // 0.1 bitwise_or(maskHSV,maskRGB,maskT); int blobSizeBefore = blobs.GetNumBlobs(); blobs = blobs + CBlobResult( maskT ,Mat(),8); blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_GREATER, bmax ); blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_LESS, bmin ); int blobSizeAfter = blobs.GetNumBlobs(); Mat newMask(maskT.size(),maskT.type()); newMask.setTo(0); for(;i<blobs.GetNumBlobs();i++){ double area = blobs.GetBlob(i)->Area(); if(area < 5000 && area > 400) blobs.GetBlob(i)->FillBlob(newMask,CV_RGB(255,255,255),0,0,true); } if(countNonZero(maskRGB)>400 && countNonZero(maskHSV)>400 && blobSizeBefore!=blobSizeAfter){ vector<Mat> BGRbands; split(imgRGBin,BGRbands); Mat maskedRGB = applyMaskBandByBand(newMask,BGRbands); bitwise_not(newMask,newMask); split(imgHSVIn,BGRbands); Mat maskedHSV = applyMaskBandByBand(newMask,BGRbands); blobs = computeWhiteMaskOtsu(maskedRGB, maskedHSV, blobs, countNonZero(maskRGB),countNonZero(maskHSV),RGBratio, HSVratio, bmin, bmax, i-1); } return blobs; }
void extractBots() { //RED TEAM imgTransform(TEAM_R_HUE_U, TEAM_R_HUE_L, TEAM_R_SAT_U, TEAM_R_SAT_L, VAL_U, VAL_L); blobRes = CBlobResult(dst, NULL, 0); blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl; if(numOfBlobs == 2) { for (int i=0; i<2; i++) blobRes.GetBlob(i) for(int i=0; i<numOfBlobs; i++) blobs[i] = blobRes.GetBlob(i); }; void printBlobs() { CBlobGetXCenter getXC; CBlobGetYCenter getYC; CBlobGetArea getArea; CBlobGetCompactness getCompactness; printf("-----Printng Blobs------\n"); for(int i=0; i<numOfBlobs; i++) { printf("%d\t(%3.2f,%3.2f),%3.2f %3.2f\n", i, getXC(blobs[i]), getYC(blobs[i]), getArea(blobs[i]), getCompactness(blobs[i])); } printf("\n"); cvNamedWindow("old", 1); cvNamedWindow("new", 1); cvMoveWindow("old", 0,0); cvMoveWindow("new", 0,400); cvShowImage("old", img); cvShowImage("new", dst); cvWaitKey(); };
void givedepth(IplImage *localimagergb) { IplImage*localimage=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3); cvCvtColor(localimagergb,localimage,CV_BGR2HSV); IplImage *blobbedscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3); uchar *itemp=(uchar *)(localimage->imageData); IplImage *binaryscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,1); uchar *itemp1=(uchar *)(binaryscaling ->imageData); for(int i=0;i<hi2->height;i++){ for(int j=0;j<hi2->width;j++){ if((itemp[i*localimage->widthStep+j*localimage->nChannels] <hh) && (itemp[i*localimage->widthStep+j*localimage->nChannels]>hl) && (itemp[i*localimage->widthStep+j*localimage->nChannels+1]<sh) && (itemp[i*localimage->widthStep+j*localimage->nChannels+1]>sl) && ( itemp[i*localimage->widthStep+j*localimage->nChannels+2]<vh) && ( itemp[i*localimage->widthStep+j*localimage->nChannels+2]>vl) //previous 124 ) { itemp1[i*binaryscaling->widthStep+j]=0; //dark regions black rest white } else itemp1[i*binaryscaling->widthStep+j]=255; }} cvErode( binaryscaling, binaryscaling, NULL, 4); cvDilate(binaryscaling, binaryscaling, NULL, 4); CBlobResult blob; CBlob *currentBlob=NULL; blob=CBlobResult(binaryscaling,NULL,255); blob.Filter(blob,B_EXCLUDE,CBlobGetArea(),B_LESS,500); cvMerge(binaryscaling,binaryscaling,binaryscaling,NULL,blobbedscaling); CBlob hand1,hand2; //two blobs,one for each hand blob.GetNthBlob( CBlobGetArea(), 0, (hand2)); blob.GetNthBlob( CBlobGetArea(), 1, (hand1 )); hand1.FillBlob(blobbedscaling,CV_RGB(0,0,255)); //fill the color of blob of hand one with blue hand2.FillBlob(blobbedscaling,CV_RGB(0,255,0)); //fill the color of blob of hand two with green coordinates (blobbedscaling,0); }
// threshold trackbar callback void on_trackbar( int dummy ) { if(!originalThr) { originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,1); } if(!displayedImage) { displayedImage = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,3); } // threshold input image cvThreshold( original, originalThr, param1, 255, CV_THRESH_BINARY ); // get blobs and filter them using its area CBlobResult blobs; int i; CBlob *currentBlob; // find blobs in image blobs = CBlobResult( originalThr, NULL, 255 ); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 ); // display filtered blobs cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage ); for (i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0)); } cvShowImage( wndname, displayedImage ); }
void BlobDetection::init() { /** Init is called just after construction. */ try { initStatusMask(); // Create a proxy to ALVideoDevice on the robot. ALVideoDeviceProxy* camProxy = new ALVideoDeviceProxy(getParentBroker()); behavoirProxy = new ALBehaviorManagerProxy(getParentBroker()); ledProxy = new ALLedsProxy(getParentBroker()); motionProxy = new ALMotionProxy(getParentBroker()); initLeds(); // Subscribe a client image requiring 640*480px and RGB colorspace. const std::string cameraID = camProxy->subscribeCamera("camera_01", 0, AL::kVGA, AL::kRGBColorSpace , 10); // Create a proxy to ALMemoryProxy on the robot. ALMemoryProxy fMemoryProxy = ALMemoryProxy(getParentBroker()); fMemoryProxy.subscribeToEvent("FrontTactilTouched", "BlobDetection","onFrontTactilTouched"); fMemoryProxy.subscribeToEvent("MiddleTactilTouched", "BlobDetection","onMiddleTactilTouched"); HandOrientation rightOrientationLast = NONE; HandOrientation leftOrientationLast = NONE; HandOrientation rightOrientationCur = NONE, leftOrientationCur = NONE; // stand up behavoirProxy->runBehavior(STAND); // RECODING: prepare vido recording /* int size; std::string arvFile = std::string("/home/nao/video"); streamHeader tmpStreamHeader; std::vector<streamHeader> streamHeaderVector; ALVideo videoFile; tmpStreamHeader.width = 640; tmpStreamHeader.height = 480; tmpStreamHeader.colorSpace = AL::kRGBColorSpace; // this is not really necessary, coz in pyuv u decide in which colorspace the vid is shown tmpStreamHeader.pixelDepth = 8; streamHeaderVector.push_back(tmpStreamHeader); std::cout<<"Output arv file properties: "<< streamHeaderVector[0].width <<"x"<< streamHeaderVector[0].height <<" Colorspace id:"<< streamHeaderVector[0].colorSpace <<" Pixel depth:"<< streamHeaderVector[0].pixelDepth <<std::endl; if( !videoFile.recordVideo( arvFile, 0, streamHeaderVector ) ) { std::cout<<"Error writing "<< arvFile <<" file."<<std::endl; return; } */ int j = 0; while(1) { if(touched) { //j++; //Switch LEDs RED OFF, BLUE ON if(red_on == 1) { ledProxy->off(FACE_LED_RED); red_on = 0; } if(blue_on == 0) { ledProxy->on(FACE_LED_BLUE); blue_on = 1; } // Fetch the image from the nao camera, we subscribed on. Its in RGB colorspace ALImage *img_cam = (ALImage*)camProxy->getImageLocal(cameraID); // Create an openCv Mat header to convert the aldebaran AlImage image. // To convert the aldebaran image only the data are of it are assigned to the openCv image. Mat img_hsv = Mat(Size(img_cam->getWidth(), img_cam->getHeight()), CV_8UC3); img_hsv.data = (uchar*) img_cam->getData(); // Convert the RGB image from the camera to an HSV image */ cvtColor(img_hsv, img_hsv, CV_RGB2HSV); // RECORDING: record converted to hsv video //videoFile.write((char*) img_hsv.data, size); //video ging hier // Get the separate HSV color components of the color input image. std::vector<Mat> channels(3); split(img_hsv, channels); Mat planeH = channels[0]; Mat planeS = channels[1]; Mat planeV = channels[2]; // Detect which pixels in each of the H, S and V channels are probably skin pixels. threshold(planeH, planeH, 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18 threshold(planeS, planeS, 60, UCHAR_MAX, CV_THRESH_BINARY);//50 threshold(planeV, planeV, 170, UCHAR_MAX, CV_THRESH_BINARY);//80 // Combine all 3 thresholded color components, so that an output pixel will only // be white if the H, S and V pixels were also white. Mat imageSkinPixels = Mat(img_hsv.size(), CV_8UC3); // Greyscale output image. bitwise_and(planeH, planeS, imageSkinPixels); // imageSkin = H {BITWISE_AND} S. bitwise_and(imageSkinPixels, planeV, imageSkinPixels); // imageSkin = H {BITWISE_AND} S {BITWISE_AND} V. // Assing the Mat (C++) to an IplImage (C), this is necessary because the blob detection is writtn in old opnCv C version IplImage ipl_imageSkinPixels = imageSkinPixels; // RECODING: record the video using the C container variable // RECODING: store the size (in memory meaning) of the image for recording purpouse //size = img_cam->getSize(); //videoFile.write((char*) ipl_imageSkinPixels.imageData, size/3); // Set up the blob detection. CBlobResult blobs; blobs.ClearBlobs(); blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0); // Use a black background color. // Ignore the blobs whose area is less than minArea. blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea); // ##### Gestures ##### std::cout << "Number of Blobs: " << blobs.GetNumBlobs() <<endl; if(blobs.GetNumBlobs() == 0) { //picture empty } else if(blobs.GetNumBlobs() == 1) { //head detected trackHead(getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).x, getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).y); } else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3) { //head + one hand || head + two hands Rect rect[3]; int indexHead = -1, indexHandLeft = -1, indexHandRight = -1; //Get Bounding Boxes for(int i = 0; i< blobs.GetNumBlobs(); i++) { rect[i] = blobs.GetBlob(i)->GetBoundingBox(); } //Detect Head and Hand indexes if(blobs.GetNumBlobs() == 2) { // head and one hand int indexHand = -1; if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y) { // rect[0] is head indexHead = 0; indexHand = 1; } else { // rect[1] is head indexHead = 1; indexHand = 0; } if(getHandside(rect[indexHead], rect[indexHand]) == LEFT) { // hand is left indexHandLeft = 1; indexHandRight = -1; } else { // hand ist right indexHandLeft = -1; indexHandRight = 1; } } else { //two hands int indexHand1 = -1; int indexHand2 = -1; if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y && getCenterPoint(rect[0]).y < getCenterPoint(rect[2]).y) { // rect[0] is head indexHead = 0; indexHand1 = 1; indexHand2 = 2; } else if(getCenterPoint(rect[1]).y < getCenterPoint(rect[0]).y && getCenterPoint(rect[1]).y < getCenterPoint(rect[2]).y) { // rect[1] is head indexHead = 1; indexHand1 = 0; indexHand2 = 2; } else { // rect[2] is head indexHead = 2; indexHand1 = 0; indexHand2 = 1; } if(getHandside(rect[indexHead], rect[indexHand1]) == LEFT) { indexHandLeft = indexHand1; indexHandRight = indexHand2; } else { indexHandLeft = indexHand2; indexHandRight = indexHand1; } } // bobs are detected. // adjuste naos head to detected head-bolb trackHead(getCenterPoint(rect[indexHead]).x, getCenterPoint(rect[indexHead]).y); //Get Orientations from Hand rects leftOrientationCur = (indexHandLeft != -1)?getOrientationOfRect(rect[indexHandLeft]):NONE; rightOrientationCur = (indexHandRight != -1)?getOrientationOfRect(rect[indexHandRight]):NONE; //Check Change of Left hand switch(detectHandStateChange(leftOrientationLast, leftOrientationCur)) { case PORTRAIT_TO_LANDSCAPE: handleGestures(LEFT_FLIP_DOWN); break; case LANDSCAPE_TO_PORTRAIT: handleGestures(LEFT_FLIP_UP); break; case NOCHANGE: // TODO default: break; } //Check Change of Right hand switch(detectHandStateChange(rightOrientationLast, rightOrientationCur)) { case PORTRAIT_TO_LANDSCAPE: handleGestures(RIGHT_FLIP_DOWN); break; case LANDSCAPE_TO_PORTRAIT: handleGestures(RIGHT_FLIP_UP); break; case NOCHANGE: //TODO default: break; } } else if(blobs.GetNumBlobs() > 3) { //too much information cout<<"too much information"<<endl; } leftOrientationLast = leftOrientationCur; rightOrientationLast = rightOrientationCur; // RECODING: close the video recorder //videoFile.closeVideo(); // Free all the resources. camProxy->releaseImage(cameraID); //IplImage* p_iplImage = &ipl_imageSkinPixels; //cvReleaseImage(&p_iplImage); qi::os::sleep(0.5f); //sleep(1); } else { //Switch LEDs RED ON, BLUE OFF if(red_on == 0) { ledProxy->on(FACE_LED_RED); red_on = 1; behavoirProxy->runBehavior(STAND); } if(blue_on == 1) { ledProxy->off(FACE_LED_BLUE); blue_on = 0; } } } camProxy->unsubscribe(cameraID); } catch (const AL::ALError& e) { std::cerr << "Caught exception: " << e.what() << std::endl; return; } return; }
double findShadow(IplImage *l_img, int hue,int sat,int val,int threshold, double blobLowLimit,double blobHighLimit){ // Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. // Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. //Ouput: pointer to data array, size[#ofblobs*3+1]; Format data=[Number of Blobs, Area1,X of center1, y of center1, Area2,X of center2,y of center2,...,areaN,X of centerN, Y of centerN]; // Image variables IplImage* local_copy = cvCloneImage(l_img); IplImage* imageSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image IplImage* imageSuperSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image IplImage* imageHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image IplImage* i1 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image IplImage* i2 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image IplImage* i_ts = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image IplImage* planeH = cvCreateImage(cvGetSize(l_img),8,1); //Hue IplImage* planeS = cvCreateImage(cvGetSize(l_img),8,1); //Saturation IplImage* planeV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness IplImage* planeSmoothV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness IplImage* imageSmoothHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image IplImage* obsdetmask = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask IplImage* obsdetmask_dil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask IplImage* obsdetmask_b = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask IplImage* obsdetmask_bdil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask //Blob variables CBlobResult mask_bls; CBlob mask_bl; CBlobResult blobs; CBlob blob; CBlobResult blobs1; CBlob blob1; CBlobGetXCenter getXCenter; CBlobGetYCenter getYCenter; //Output Variable //Gausian Filter cvSmooth(l_img,imageSmooth,CV_GAUSSIAN,13,13,0,0); cvSmooth(l_img,imageSuperSmooth,CV_GAUSSIAN,41,41,0,0); //cvShowImage("View2a",imageSmooth); //Covert RGB to HSV cvCvtColor(imageSmooth,imageHSV,CV_BGR2HSV); cvCvtColor(imageSuperSmooth,imageSmoothHSV,CV_BGR2HSV); cvCvtPixToPlane(imageSuperSmooth,NULL,NULL,planeSmoothV,0); cvCvtPixToPlane(imageHSV, planeH,planeS,planeV,0);//Extract the 3 color components cvSetImageROI(imageHSV,cvRect(0,imageHSV->height/3,imageHSV->width,imageHSV->height*2/3)); IplImage* planeH1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Hue IplImage* planeS1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Saturation IplImage* planeV1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Brightness cvCvtPixToPlane(imageHSV, planeH1,planeS1,planeV1,0);//Extract the 3 color components cvResetImageROI(imageHSV); cvShowImage("Dark_Value",planeV); cvShowImage("Dark_Sat",planeS); cvShowImage("Dark_Hue",planeH); cvSet(obsdetmask, cvScalar(0,0,0)); cv::waitKey(3); int maxDark = 0; int minDark = 255; int minDarknessValue=0; int maxDarknessValue = 0; int midDarknessValue = 0; //Filter image for desired Color, output image with only desired color highlighted remaining for( int y = 0; y < planeH1->height; y++ ){ unsigned char* h = &CV_IMAGE_ELEM( planeH1, unsigned char, y, 0 ); unsigned char* s = &CV_IMAGE_ELEM( planeS1, unsigned char, y, 0 ); unsigned char* v = &CV_IMAGE_ELEM( planeV1, unsigned char, y, 0 ); for( int x = 0; x < planeH1->width*planeH1->nChannels; x += planeH1->nChannels ){ //if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);} //int f= HSV_filter(h[x],s[x],v[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val); int diff = abs((h[x]-hue)); if(((diff < threshold)||(v[x]<MIN_BRIGHT)||(s[x]<MIN_SAT))) { ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=255; if(v[x]<minDark) {minDark=v[x];} if(v[x]>maxDark) {maxDark=v[x];} } else { ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=0; } } }//debug cvDilate(obsdetmask,obsdetmask_dil,NULL,1); cvShowImage("Dark_ObsDetPre",obsdetmask_dil); mask_bls = CBlobResult(obsdetmask_dil,NULL,0); mask_bls.Filter(mask_bls,B_EXCLUDE,CBlobGetArea(),B_LESS,MASK_MIN_BLOB); // Filter Blobs with min and max size mask_bls.GetNthBlob( CBlobGetArea(), 0, mask_bl ); cvSet(obsdetmask_b, cvScalar(0,0,0)); mask_bl.FillBlob(obsdetmask_b,CV_RGB(255,255,255)); cvDilate(obsdetmask_b,obsdetmask_bdil,NULL,5); cvShowImage("Dark_ObsDet",obsdetmask_bdil); cvWaitKey(3); minDarknessValue=((maxDark-minDark)*LOW_PERCENT)+minDark; if(minDarknessValue<VALUE_LOW_LIM){minDarknessValue=VALUE_LOW_LIM;} maxDarknessValue=(maxDark)-((maxDark-minDark)*HIGH_PERCENT); midDarknessValue = .5*(minDarknessValue+maxDarknessValue); ROS_INFO("minDark = %d, maxDark = %d, minDV = %d, maxDV = %d",minDark,maxDark,minDarknessValue,maxDarknessValue); for( int y = 0; y < planeH->height; y++ ){ unsigned char* h = &CV_IMAGE_ELEM( planeH, unsigned char, y, 0 ); unsigned char* s = &CV_IMAGE_ELEM( planeS, unsigned char, y, 0 ); unsigned char* v = &CV_IMAGE_ELEM( planeV, unsigned char, y, 0 ); unsigned char* m = &CV_IMAGE_ELEM( obsdetmask_bdil, unsigned char, y, 0 ); for( int x = 0; x < planeH->width*planeH->nChannels; x += planeH->nChannels ){ //if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);} int f = HSV_filter(h[x],s[x],v[x],m[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val); if((f==0))//Non-floor { ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0; ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=0; ((uchar *)(i2->imageData + y*i2->widthStep))[x]=0; } else if(f==1) //dark { ((uchar *)(i1->imageData + y*i1->widthStep))[x]=255; ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=64; ((uchar *)(i2->imageData + y*i2->widthStep))[x]=0; } else if(f==2) { ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=128; ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0; ((uchar *)(i2->imageData + y*i2->widthStep))[x]=0; } else if(f==3) { ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=196; ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0; ((uchar *)(i2->imageData + y*i2->widthStep))[x]=0; } else if(f==4) //bright { ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=255; ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0; ((uchar *)(i2->imageData + y*i2->widthStep))[x]=255; }else{ } } } cvShowImage("Dark_Triscale",i_ts); cvWaitKey(3); //Blob stuff blobs = CBlobResult(i1,NULL,0); //Get blobs of image blobs1 =CBlobResult(i2,NULL,0); blobs.Filter(blobs,B_INCLUDE,CBlobGetArea(),B_INSIDE,blobLowLimit,blobHighLimit); // Filter Blobs with min and max size blobs1.Filter(blobs1,B_INCLUDE,CBlobGetArea(),B_INSIDE,blobLowLimit,blobHighLimit); //Set up data array xCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()]; yCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()]; valCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()]; ROS_INFO("size:%d ",blobs.GetNumBlobs()+blobs1.GetNumBlobs()); double data; if(maxDark>190) { data=blobs.GetNumBlobs()+blobs1.GetNumBlobs();// Set first data value to total number of blobs //cout<<data[0]<<" "; int k=0; //ROS_INFO("Blobs gotten."); cvWaitKey(3); for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { // Get Blob Data blob = blobs.GetBlob(i);//cycle through each blob //data[i*3+1]=blob.area;//blob areaEFF xCent[i]= getXCenter(blob); //X min yCent[i]= getYCenter(blob); //X max valCent[i]= 1; //Y max //debug blob.FillBlob(local_copy, cvScalar(255, 0, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something } //ROS_INFO("loop 1 done."); cvWaitKey(3); for (int i = 0; i < blobs1.GetNumBlobs(); i++ ) { // Get Blob Data blob = blobs1.GetBlob(i);//cycle through each blob //data[i*3+1]=blob.area;//blob area xCent[blobs.GetNumBlobs()+i]= getXCenter(blob); //X min yCent[blobs.GetNumBlobs()+i]= getYCenter(blob); //X max valCent[blobs.GetNumBlobs()+i]= -1; //debug blob.FillBlob(local_copy, cvScalar(0, 255, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something } }else{ // data=blobs.GetNumBlobs();// Set first data value to total number of blobs //cout<<data[0]<<" "; int k=0; //ROS_INFO("Blobs gotten."); cvWaitKey(3); for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { // Get Blob Data blob = blobs.GetBlob(i);//cycle through each blob //data[i*3+1]=blob.area;//blob areaEFF xCent[i]= getXCenter(blob); //X min yCent[i]= getYCenter(blob); //X max valCent[i]= 1; //Y max //debug blob.FillBlob(local_copy, cvScalar(255, 0, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something } } cvShowImage("Dark_Detected",local_copy); //cv::imshow("View",cv_ptr->image); cv::waitKey(3); cvReleaseImage(&local_copy); cvReleaseImage(&imageSmooth); cvReleaseImage(&imageSuperSmooth); cvReleaseImage(&imageHSV); cvReleaseImage(&i1); cvReleaseImage(&i2); cvReleaseImage(&planeSmoothV); cvReleaseImage(&imageSmoothHSV); cvReleaseImage(&i_ts); cvReleaseImage(&planeH); cvReleaseImage(&planeS); cvReleaseImage(&planeV); cvReleaseImage(&planeH1); cvReleaseImage(&planeS1); cvReleaseImage(&planeV1); cvReleaseImage(&obsdetmask); cvReleaseImage(&obsdetmask_dil); cvReleaseImage(&obsdetmask_b); cvReleaseImage(&obsdetmask_bdil); return data; //return pointer to data array }
// starts the auto targeting sequence void MainWindow::on_startstopbutton_clicked() { shootingstopped=false; QImage* currimage=getQImage(); n=currimage->width(); k=currimage->height(); IplImage* curriplimage=Qimage2IplImage(&currimage->convertToFormat(QImage::Format_RGB32)); IplImage* threshedimage=threshimage(curriplimage); CBlobResult blobs; CBlob* currentblob; blobs=CBlobResult(threshedimage,NULL,0); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 150 ); int j=blobs.GetNumBlobs(); if(j==0) { QMessageBox::information(this,"No Targets","No Targets Found!"); cvReleaseImage(&threshedimage); cvReleaseImage(&curriplimage); return; } CBlobGetXCenter XCenter; CBlobGetYCenter YCenter; for(int i=0;i<blobs.GetNumBlobs();i++) { tmptargetcenter=new targetcenter; currentblob=blobs.GetBlob(i); tmptargetcenter->x=XCenter(*currentblob); tmptargetcenter->y=YCenter(*currentblob); getangles(tmptargetcenter); targets.append(tmptargetcenter); } checkformissiles(); ui->targetcountdisplay->display(targets.size()); setupautobuttons(); qApp->processEvents(); ui->timeNumber->display(0); timeshooting.start(100); turr->initAngle(); if(shootingstopped) { timeshooting.stop(); targets.clear(); return; } foreach(targetcenter* target,targets) { checkformissiles(); qApp->processEvents(); turr->setAngle(target->beta,target->betav); ui->shotcountdisplay->display(turr->currentmissilecount()); if(shootingstopped) { timeshooting.stop(); targets.clear(); delete target; return; } ui->targetcountdisplay->display(ui->targetcountdisplay->value()-1); qApp->processEvents(); delete target; }
//============================================================================== void PanTiltCameraClass::blobTracking(IplImage* hsv_mask, IplImage* pFour, IplImage* pImg) { //--- Get blobs and filter them using the blob area CBlobResult blobs; CBlob *currentBlob; //--- Create a thresholded image and display image -------------------- //--- Creates binary image IplImage* originalThr = cvCreateImage(cvGetSize(hsv_mask), IPL_DEPTH_8U,1); //--- Create 3-channel image IplImage* display = cvCreateImage(cvGetSize(hsv_mask),IPL_DEPTH_8U,3); //--- Copies the original cvMerge( hsv_mask, hsv_mask, hsv_mask, NULL, display ); //--- Makes a copy for processing cvCopy(hsv_mask,originalThr); //--- Find blobs in image --------------------------------------------- int blobThreshold = 0; bool blobFindMoments = true; blobs = CBlobResult( originalThr, originalThr, blobThreshold, blobFindMoments); //--- filters blobs according to size and radius constraints blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, this->minBlobSize ); //--- display filtered blobs ------------------------------------------ //--- copies the original in (for background) cvMerge( originalThr, originalThr, originalThr, NULL, display ); CvPoint pts[this->NUMBER_OF_CIRCLES]; //--- This sequence marks all the blobs for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); currentBlob->FillBlob( display, CV_RGB(0,0,255)); //--- Get blobs centerpoint CvPoint bcg; bcg.x = (int)(currentBlob->MinX()+((currentBlob->MaxX()-currentBlob->MinX())/2)); bcg.y = (int)(currentBlob->MinY()+((currentBlob->MaxY()-currentBlob->MinY())/2)); //--- Print the CG on the picture char blobtext[40]; for(int k=0;k<this->NUMBER_OF_CIRCLES;k++) { sprintf(blobtext,"%d",k+1); TargetReticle(display,&pts[k],blobtext,6,CV_RGB(255,0,0)); }//for }//for each blob //--- Set the ROI in the pFour image cvSetImageROI(pFour,cvRect(pImg->width,pImg->height+80,pImg->width,pImg->height)); cvCopy(display,pFour); //Reset region of interest cvResetImageROI(display); //Clean up cvReleaseImage( &originalThr ); cvReleaseImage( &display); }
void iptask::markerDetect(void) { IplImage * frame,*img_hsv,*img_proc,* new1; CvMemStorage * storage = cvCreateMemStorage(0); ros::NodeHandle n; ros::Publisher marker = n.advertise<ikat_ip_data::ip_marker_data>("marker_data",3); ros::Rate looprate(5); int count = 0; CvSeq * contours,*final_contour; int total_con; double maxarea; marker_data * Data =(marker_data *)malloc(sizeof(marker_data)); CBlobResult blobs; CBlob * currentblob; CvPoint2D32f vertices[4]; //CvCapture * img_video=cvCaptureFromAVI("downward-pipe-15_56_17.avi"); frame=cvQueryFrame(img); cvNamedWindow("Image Actual"); cvNamedWindow("final Image"); img_hsv=cvCreateImage(cvGetSize(frame),8,3); img_proc=cvCreateImage(cvGetSize(frame),8,1); new1=cvCreateImage(cvGetSize(frame),8,1); while(ros::ok()) { ikat_ip_data::ip_marker_data msg; IplImage * img_con=cvCreateImage(cvGetSize(frame),8,1); frame=cvQueryFrame(img); if(!frame) break; cvShowImage("Image Actual",frame); cvCvtColor(frame,img_hsv,CV_RGB2HSV); cvInRangeS(img_hsv,cvScalar(100,100,100),cvScalar(120,170,255),img_proc); cvSmooth(img_proc,img_proc,CV_GAUSSIAN,11,11); cvErode(img_proc,img_proc); blobs=CBlobResult(img_proc,NULL,0); blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,75); for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { currentblob = blobs.GetBlob(i); currentblob->FillBlob(img_proc,cvScalar(255)); } cvCanny(img_proc,img_proc,10,200); total_con=cvFindContours(img_proc,storage,&contours,sizeof(CvContour),CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); if(contours->total==0) continue; final_contour=cvApproxPoly(contours,sizeof(CvContour),storage,CV_POLY_APPROX_DP,1,1); maxarea=0; cvZero(img_con); CvBox2D rect; while(final_contour) { rect=cvMinAreaRect2(final_contour, storage); if(rect.size.height*rect.size.width>maxarea) { Data->center.x=rect.center.x; Data->center.y=rect.center.y; Data->size.x=rect.size.width; Data->size.y=rect.size.height; Data->angle=rect.angle; maxarea=rect.size.height*rect.size.width; msg.Marker_data[0]=Data->center.x; msg.Marker_data[1]=Data->center.y; msg.Marker_data[2]=Data->angle; } final_contour=final_contour->h_next; } cvBoxPoints(rect,vertices); cvLine(frame,cvPointFrom32f(vertices[0]),cvPointFrom32f(vertices[1]),cvScalarAll(255),2); cvLine(frame,cvPointFrom32f(vertices[1]),cvPointFrom32f(vertices[2]),cvScalarAll(255),2); cvLine(frame,cvPointFrom32f(vertices[2]),cvPointFrom32f(vertices[3]),cvScalarAll(255),2); cvLine(frame,cvPointFrom32f(vertices[3]),cvPointFrom32f(vertices[0]),cvScalarAll(255),2); ROS_INFO("center x :[%f]",msg.Marker_data[0]); ROS_INFO("center y :[%f]",msg.Marker_data[1]); ROS_INFO("angle : [%f]",msg.Marker_data[2]); marker.publish(msg); cvShowImage("final Image",frame); char c=cvWaitKey(33); if (c==27) break; ros::spinOnce(); ++count; looprate.sleep(); } cvDestroyWindow("Image Actual"); cvDestroyWindow("final Image"); free(Data); }
int main(int argc, char * argv[]) { vector <string> imgNames; vector <string> imgNamesMask; char strFrame[20]; readImageSequenceFiles(imgNames, imgNamesMask); list<TrackLine> trackLineArr; // read org frame and forground for process // you can modify it to read video by add a segment alg for(unsigned int i = 40; i < imgNames.size() - 1; i++) { Mat frame = imread(imgNames[i]); Mat grayImg; cvtColor(frame, grayImg, CV_RGB2GRAY); Mat maskImage = imread(imgNamesMask[i], 0); // get blobs and filter them using its area // use 'cvblobslib' to get the object blobs threshold( maskImage, maskImage, 81, 255, CV_THRESH_BINARY ); medianBlur(maskImage, maskImage, 3); IplImage ipl_maskImage = maskImage; CBlobResult blobs = CBlobResult( &ipl_maskImage, NULL, 0 ); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 30 ); // filter blobs that area smaller than a certern num list<CBlob *> remBlob; for (int k = 0; k < blobs.GetNumBlobs(); k++) { remBlob.push_back(blobs.GetBlob(k)); } printf("%d\n", trackLineArr.size()); for (list<TrackLine>::iterator trackIter = trackLineArr.begin(); trackIter != trackLineArr.end(); ) { //kf predicition, get kfRect Mat kfPrediction = (trackIter->kf).predict(); Point kfPrePt((int)(kfPrediction.at<float>(0)), (int)(kfPrediction.at<float>(1))); Rect kfRect(kfPrePt.x - (trackIter->box).width / 2, kfPrePt.y - (trackIter->box).height / 2, (trackIter->box).width, (trackIter->box).height); //ct predicition, get ctRect int ctError = 0; Rect ctRect(trackIter->box); float score = (trackIter->ct).predicition(grayImg, ctRect); rectangle(frame, kfRect, Scalar(0, 200, 0)); //green, kf predicition box rectangle(frame, ctRect, Scalar(0, 0, 200)); //red, ct predicition box //union predicit rectangle //if they have no same area, we consider ct is wrong, because kalman is physical movement float areaScale = (float)(sqrt((kfRect & ctRect).area() *1.0 / kfRect.area())); Point movePoint((int)((ctRect.x - kfRect.x) * areaScale), (int)((ctRect.y - kfRect.y) * areaScale)); Rect unionPreRect = kfRect + movePoint; //calc object box Rect objRect; int j = 0; for (list<CBlob *>::iterator blobIter = remBlob.begin(); blobIter != remBlob.end(); ) { Rect detRect((*blobIter)->GetBoundingBox()); float detArea = (float)((*blobIter)->Area()); if ((unionPreRect & detRect).area() > 0) { if (j++ == 0) objRect = detRect; else objRect = objRect | detRect; blobIter = remBlob.erase(blobIter); } else blobIter++; } // let box's area equal float objArea = (float)(objRect.area()); objRect = Rect((int)(objRect.x + objRect.width / 2.0 - unionPreRect.width / 2.0), (int)(objRect.y + objRect.height / 2.0 - unionPreRect.height / 2.0), unionPreRect.width, unionPreRect.height); float detAreaScale = (float)(sqrt(objArea * 1.0 / unionPreRect.area())); if (detAreaScale > 1.0) detAreaScale = 1.0; Point detMovePoint((int)((objRect.x - unionPreRect.x) * detAreaScale), (int)((objRect.y - unionPreRect.y) * detAreaScale)); Rect unionCorrRect = unionPreRect + detMovePoint; // if detect area > 0 if (objArea > 0) { trackIter->box = unionCorrRect; rectangle(frame, unionCorrRect, Scalar(200,0,0), 1); //kf correct Mat_<float> measurement(2,1); measurement(0) = (float)((trackIter->box).x + (trackIter->box).width / 2.0); measurement(1) = (float)((trackIter->box).y + (trackIter->box).height / 2.0); (trackIter->kf).correct(measurement); //ct update (trackIter->ct).update(grayImg, trackIter->box); trackIter++; } // else we beleve tracking miss else { if ((trackIter->miss)++ == 5) trackIter = trackLineArr.erase(trackIter); else trackIter++; } } // !!! // use a sample way to get a new track init object box, i just add all others box toghter and expand it bigger // it's not a good idea when two object appear at the same time will lead only one init box // and, this sample is reasonless. so, i suggest you to use another method to get the init box // here, i just give a tracking alg, with a bad method to get init box, all -_-! // !!! Rect tmprect; int u = 0; for (list<CBlob *>::iterator blobIter = remBlob.begin(); blobIter != remBlob.end(); blobIter++) { if (u++ == 0) tmprect = Rect((*blobIter)->GetBoundingBox()); else tmprect = tmprect | Rect((*blobIter)->GetBoundingBox()); } if (tmprect.area() > 0) tmprect = Rect(tmprect.x - 5, tmprect.y - 8, tmprect.width + 10, tmprect.height + 16); if (tmprect.area() > 0 && tmprect.x != 0 && tmprect.y != 0 && (tmprect.x + tmprect.width) != 319 && (tmprect.y + tmprect.height) != 239) { TrackLine track; track.kf.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,1,0, 0,1,0,1, 0,0,1,0, 0,0,0,1); track.kf.measurementMatrix = *(Mat_<float>(2, 4) << 1,0,0,0, 0,1,0,0); setIdentity(track.kf.processNoiseCov, Scalar::all(1e-4)); setIdentity(track.kf.measurementNoiseCov, Scalar::all(1e-1)); setIdentity(track.kf.errorCovPost, Scalar::all(.1)); // kf init track.kf.statePre.at<float>(0) = (float)(tmprect.x + tmprect.width / 2.0); track.kf.statePre.at<float>(1) = (float)(tmprect.y + tmprect.height / 2.0); track.kf.statePre.at<float>(2) = 0; track.kf.statePre.at<float>(3) = 0; track.kf.statePost.at<float>(0) = (float)(tmprect.x + tmprect.width / 2.0); track.kf.statePost.at<float>(1) = (float)(tmprect.y + tmprect.height / 2.0); track.kf.statePost.at<float>(2) = 0; track.kf.statePost.at<float>(3) = 0; // ct init track.ct.init(grayImg, tmprect); rectangle(frame, tmprect, Scalar(255, 0, 0), 2, 7); track.box = tmprect; trackLineArr.push_back(track); } sprintf(strFrame, "#%d ",i) ; putText(frame,strFrame,cvPoint(0,20),2,1,CV_RGB(25,200,25)); char outstr[20]; //if (0) //if ((i >= 450 && i <= 600) || (i >= 930 && i <= 960) || (i >= 1420 && i <= 1450)) { sprintf(outstr, "output\\%d.png", i); string outstring(outstr); imwrite(outstring, frame); sprintf(outstr, "output\\mask_%d.png", i); string outstring2(outstr); imwrite(outstring2, maskImage); } //imshow("ORG", frame); //imshow("mask", maskImage); //waitKey(1); } return 0; }
int main() { CvPoint pt1,pt2; CvRect regt; CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY ); if ( !capture ) { fprintf(stderr, "ERROR: capture is NULL \n" ); getchar(); return -1; } cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,144); cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,216); // Create a window in which the captured images will be presented cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE ); // Show the image captured from the camera in the window and repeat while ( 1 ) { // Get one frame IplImage* frame = cvQueryFrame( capture ); if ( !frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } int modfheight, modfwidth; modfheight = frame->height; modfwidth = frame->width; // create modified frame with 1/4th the original size IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels) cvResize(frame, modframe,CV_INTER_LINEAR); // create HSV(Hue, Saturation, Value) frame IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3); cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method) // create a frame within threshold. IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1); cvInRangeS(hsvframe,cvScalar(30, 25, 150),cvScalar(60, 60, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) // created dilated image IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1); cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate) CBlobResult blobs; blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels CBlob biggestblob; blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea() // get 4 points to define the rectangle pt1.x = biggestblob.MinX()*4; pt1.y = biggestblob.MinY()*4; pt2.x = biggestblob.MaxX()*4; pt2.y = biggestblob.MaxY()*4; cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob cvShowImage( "mywindow", frame); // show output image // Do not release the frame! //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version), //remove higher bits using AND operator if ( (cvWaitKey(10) & 255) == 27 ) break; } // Release the capture device housekeeping cvReleaseCapture( &capture ); cvDestroyWindow( "mywindow" ); return 0; }
int main() { CBlobResult blobs; CBlob *currentBlob; CvPoint pt1, pt2; CvRect cvRect; int key = 0; IplImage* frame = 0; // Initialize capturing live feed from video file or camera CvCapture* capture = cvCaptureFromFile( "MOV.MPG" ); // Get the frames per second int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS ); // Can't get device? Complain and quit if( !capture ) { printf( "Could not initialize capturing...\n" ); return -1; } // Windows used to display input video with bounding rectangles // and the thresholded video cvNamedWindow( "video" ); cvNamedWindow( "thresh" ); // An infinite loop while( key != 'x' ) { // If we couldn't grab a frame... quit if( !( frame = cvQueryFrame( capture ) ) ) break; // Get object's thresholded image (blue = white, rest = black) IplImage* imgThresh = GetThresholdedImageHSV( frame ); // Detect the white blobs from the black background blobs = CBlobResult( imgThresh, NULL, 0 ); // Exclude white blobs smaller than the given value (10) // The bigger the last parameter, the bigger the blobs need // to be for inclusion blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 10 ); // Attach a bounding rectangle for each blob discovered int num_blobs = blobs.GetNumBlobs(); for ( int i = 0; i < num_blobs; i++ ) { currentBlob = blobs.GetBlob( i ); cvRect = currentBlob->GetBoundingBox(); pt1.x = cvRect.x; pt1.y = cvRect.y; pt2.x = cvRect.x + cvRect.width; pt2.y = cvRect.y + cvRect.height; // Attach bounding rect to blob in orginal video input cvRectangle( frame, pt1, pt2, cvScalar(0, 0, 0, 0), 1, 8, 0 ); } // Add the black and white and original images cvShowImage( "thresh", imgThresh ); cvShowImage( "video", frame ); // Optional - used to slow up the display of frames key = cvWaitKey( 2000 / fps ); // Prevent memory leaks by releasing thresholded image cvReleaseImage( &imgThresh ); } // We're through with using camera. cvReleaseCapture( &capture ); return 0; }
/* arg1: Width of each frame arg2: Height of each frame arg3: Target frames per second of the program arg4: Maximum number of blobs to track. Each blob MAY corresspond to a person in front of the camera */ int main(int argc, char* argv[]) { if (argc < 5) { cout << "Too few arguments to the program. Exiting...\n"; return 0; } int width, height, fps, numberOfBlobs; try { //Read the arguments width = atoi(argv[1]); height = atoi(argv[2]); fps = atoi(argv[3]); numberOfBlobs = atoi(argv[4]); //Done reading arguments } catch(...) { cout << "One or more arguments are invalid!. Exiting...\n"; return 0; } /* int width = 320; int height = 240; int fps = 10; int numberOfBlobs = 2; */ tempImageV4L = cvCreateImage(cvSize(width, height), 8, 3); frameNumber = 0; //Beginning initialising cameras rightCamera = new Camera("/dev/video0", width, height, fps); leftCamera = new Camera("/dev/video1", width, height, fps); //leftCamera = rightCamera; //If only one camera is available, uncomment this line and comment the line above this. //Done initialising cameras //Waste some frames so as to get the cameras running in full flow WasteNFrames(10); //Beginning capturing background backImageRight = GetNextCameraShot(rightCamera); backImageLeft = GetNextCameraShot(leftCamera); frameNumber++; cvtColor(backImageRight, backImageRight, CV_BGR2HSV); cvtColor(backImageLeft, backImageLeft, CV_BGR2HSV); //Done capturing background //General Stuff Mat motionImageRight(backImageRight.rows, backImageRight.cols, CV_8UC1); Mat motionImageLeft(backImageLeft.rows, backImageLeft.cols, CV_8UC1); Mat HSVImageRight, HSVImageLeft; Mat displayImageRight, displayImageLeft; //End of General Stuff while (1) //The infinite loop { //Beginning getting camera shots rightImage = GetNextCameraShot(rightCamera); leftImage = GetNextCameraShot(leftCamera); frameNumber++; //Done getting camera shots //Beginning getting motion images HSVImageRight = rightImage.clone(); cvtColor(HSVImageRight, HSVImageRight, CV_BGR2HSV); CompareWithBackground(HSVImageRight, backImageRight, motionImageRight); medianBlur(motionImageRight, motionImageRight, 3); HSVImageLeft = leftImage.clone(); cvtColor(HSVImageLeft, HSVImageLeft, CV_BGR2HSV); CompareWithBackground(HSVImageLeft, backImageLeft, motionImageLeft); medianBlur(motionImageLeft, motionImageLeft, 3); //Ended getting motion images cout << "\nFor frame #" << frameNumber << " :\n"; //Beginning Getting Blobs IplImage imageblobPixels = motionImageRight; CBlobResult blobs; blobs = CBlobResult(&imageblobPixels, NULL, 0); // Use a black background color. int minArea = 100 / ((640 / width) * (640 / width)); blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minArea); int foundBlobs = blobs.GetNumBlobs(); //Ended Getting Blobs cout << "Found " << foundBlobs << " motion blobs\n"; //Creating copies of original images for modifying and displaying displayImageRight = rightImage.clone(); displayImageLeft = leftImage.clone(); //Done creating copies //Cycling through the blobs for (int blobIndex = 0; blobIndex < blobs.GetNumBlobs() && blobIndex < numberOfBlobs; blobIndex++) { cout << "Blob #" << blobIndex << " : "; //Getting blob details CBlob * blob = blobs.GetBlob(blobIndex); int x = blob->GetBoundingBox().x; int y = blob->GetBoundingBox().y; int w = blob->GetBoundingBox().width; int h = blob->GetBoundingBox().height; //Done getting blob details int sep = 0; //The point for which we want to find depth PixPoint inP = {x + w/2, y + h/2}, oP = {0, 0}; cout << "inPoint = {" << inP.x << ", " << inP.y << "} "; //Initialing the rectangle in which the corressponding point is likely in Rectangle rect; rect.location.x = -1; rect.location.y = inP.y - 5; rect.size.x = rightImage.cols; rect.size.y = 11; //Done initialising the target rectangle //Find the corressponding point and calculate the sepertion oP = PointCorresponder::correspondPoint(rightImage, leftImage, inP, rect, motionImageLeft); sep = inP.x - oP.x; cout << "foundPoint = {" << oP.x << ", " << oP.y << "} "; //Just for visual presentation DrawRect(displayImageRight, x, y, w, h); cv::circle(displayImageRight, Point(inP.x, inP.y), 10, Scalar(0), 3); cv::circle(displayImageLeft, Point(oP.x, oP.y), 10, Scalar(0), 3); //Done decoration //The thing we were looking for... how can we forget to print this? :P cout << "seperation = " << sep << "\n"; } //Show the windows cv::namedWindow("RIGHT"); cv::namedWindow("thresh"); cv::namedWindow("LEFT"); imshow("LEFT", displayImageLeft); imshow("RIGHT", displayImageRight); imshow("thresh", motionImageRight); //End of code for showing windows //The loop terminating condition if (waitKey(27) >= 0) break; } //Mission Successful!! :D :) return 0; }
bool findBiggestBlobImage(IplImage* img, int color, IplImage* &output) { CBlobResult blobs; CBlob *currentBlob; blobs = CBlobResult( img, NULL, 0 ); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, m_minBlobSize ); double biggestArea = m_minBlobSize; int biggestBlob = -1; for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); double blobArea = currentBlob->Area(); if(blobArea > biggestArea) { biggestBlob = i; biggestArea = blobArea; } } if(biggestBlob >= 0) { int x = (int) blobs.GetBlob(biggestBlob)->MinX(); int y = (int) blobs.GetBlob(biggestBlob)->MinY(); int width= (int) blobs.GetBlob(biggestBlob)->MaxX()-x; int height= (int) blobs.GetBlob(biggestBlob)->MaxY()-y; IplImage* temp = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U, 1); IplImage* temp2 = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1); IplImage* result = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1); if(biggestBlob>=0) blobs.GetBlob(biggestBlob)->FillBlob(temp,cvScalar(255),x,y); cvSetImageROI(temp, cvRect(x, y, width, height)); cvCopy(temp,temp2); uchar* tempData; uchar* resultData; tempData = (uchar *)(temp2->imageData); resultData = (uchar *) (result->imageData); for (int j = 0; j < width*height; j++) { if (tempData[j]==255) resultData[j] = color; else resultData[j] = 0; } cvResize(result, output); cvReleaseImage(&temp); cvReleaseImage(&temp2); cvReleaseImage(&result); return true; } else return false; }
int main() { CvPoint pt1,pt2; CvRect regt; CvPoint cir_center; CvPoint frame_center; CvPoint A,B,C,D; CvPoint temp; double angle,spinsize; int cir_radius=1; int frame_width=160, frame_height=120; CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY ); if ( !capture ) { fprintf(stderr, "ERROR: capture is NULL \n" ); getchar(); return -1; } cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height); //cvSetCaptureProperty(capture, CV_CAP_PROP_FPS,10); // cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,5); // Create a window in which the captured images will be presented cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE ); // Show the image captured from the camera in the window and repeat while ( 1 ) { // Get one frame IplImage* frame = cvQueryFrame( capture ); if ( !frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } int modfheight, modfwidth; modfheight = frame->height; modfwidth = frame->width; // create modified frame with 1/4th the original size IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels) cvResize(frame, modframe,CV_INTER_LINEAR); // create HSV(Hue, Saturation, Value) frame IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3); cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method) // create a frame within threshold. IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1); cvInRangeS(hsvframe,cvScalar(15, 100, 100),cvScalar(60, 220, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) // created dilated image IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1); cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate) CBlobResult blobs; blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels CBlob biggestblob; blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea() // get 4 points to define the rectangle pt1.x = biggestblob.MinX()*4; pt1.y = biggestblob.MinY()*4; pt2.x = biggestblob.MaxX()*4; pt2.y = biggestblob.MaxY()*4; cir_center.x=(pt1.x+pt2.x)/2; cir_center.y=(pt1.y+pt2.y)/2; frame_center.x=frame_width/2; frame_center.y=frame_height/2; A.x=frame_center.x-4; A.y=frame_center.y; B.x=frame_center.x+4; B.y=frame_center.y; C.y=frame_center.y-4; C.x=frame_center.x; D.y=frame_center.y+4; D.x=frame_center.x; cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob cvCircle( frame, cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle cvLine(frame, A, B,cvScalar(255,0,255),2,8,0); cvLine(frame, C, D,cvScalar(255,0,255),2,8,0); if (cir_center.x!=0&&cir_center.y!=0){ spinsize=sqrt((cir_center.x-frame_center.x)*(cir_center.x-frame_center.x) +(cir_center.y-frame_center.y)*(cir_center.y-frame_center.y)); angle = atan2((double)cir_center.y-frame_center.y,(double)cir_center.x-frame_center.x); temp.x=(int)(frame_center.x+spinsize/5*cos(angle+3.1416/4)); temp.y=(int)(frame_center.y+spinsize/5*sin(angle+3.1415/4)); cvLine(frame, temp, frame_center,cvScalar(0,255,0),1,8,0); temp.x=(int)(frame_center.x+spinsize/5*cos(angle-3.1416/4)); temp.y=(int)(frame_center.y+spinsize/5*sin(angle-3.1415/4)); cvLine(frame, temp, frame_center,cvScalar(0,255,0),1,8,0); cvLine(frame, cir_center, frame_center,cvScalar(0,255,0),1,8,0); //cvCircle( frame, frame_center, cir_radius, cvScalar(0,255,255), 2, 8, 0 ); } cvShowImage( "mywindow", frame); // show output image // Do not release the frame! //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version), //remove higher bits using AND operator if ( (cvWaitKey(10) & 255) == 27 ) break; } // Release the capture device housekeeping cvReleaseCapture( &capture ); cvDestroyWindow( "mywindow" ); return 0; }
void Auvsi_Recognize::extractShape( void ) { typedef cv::Vec<T, 1> VT; // Reduce input to two colors cv::Mat reducedColors = doClustering<T>( _image, 2 ); cv::Mat grayScaled, binary; // Make output grayscale grayScaled = convertToGray( reducedColors ); //cv::cvtColor( reducedColors, grayScaled, CV_RGB2GRAY ); // Make binary double min, max; cv::minMaxLoc( grayScaled, &min, &max ); cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY ); // ensure that background is black, image white if( binary.at<VT>(0, 0)[0] > 0.0f ) cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY_INV ); binary.convertTo( binary, CV_8U, 255.0f ); // Fill in all black regions smaller than largest black region with white CBlobResult blobs; CBlob * currentBlob; IplImage binaryIpl = binary; blobs = CBlobResult( &binaryIpl, NULL, 255 ); // Get area of biggest blob CBlob biggestBlob; blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob ); // Remove all blobs of smaller area blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() ); for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); currentBlob->FillBlob( &binaryIpl, cvScalar(255)); } // Fill in all small white regions black blobs = CBlobResult( &binaryIpl, NULL, 0 ); blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob ); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() ); for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); currentBlob->FillBlob( &binaryIpl, cvScalar(0)); } binary = cv::Scalar(0); biggestBlob.FillBlob( &binaryIpl, cvScalar(255)); _shape = binary; }
int main(int argc, char *argv[]) { CvCapture* capture = cvCreateFileCapture( "recording_01.avi"); handOrientation rightOrientationLast = NONE, leftOrientationLast = NONE; handOrientation rightOrientationCur = NONE, leftOrientationCur = NONE; //cvNamedWindow("Input Image", CV_WINDOW_AUTOSIZE); //cvNamedWindow("Skin Pixels", CV_WINDOW_AUTOSIZE); cvNamedWindow("Skin Blobs", CV_WINDOW_AUTOSIZE); while(1){ Mat imageBGR = cvQueryFrame(capture); if(imageBGR.empty())break; //imshow("Input Image", imageBGR); // Convert the image to HSV colors. Mat imageHSV = Mat(imageBGR.size(), CV_8UC3); // Full HSV color image. cvtColor(imageBGR, imageHSV, CV_BGR2HSV); // Convert from a BGR to an HSV image. std::vector<Mat> channels(3); split(imageHSV, channels); Mat planeH = channels[0]; Mat planeS = channels[1]; Mat planeV = channels[2]; // Detect which pixels in each of the H, S and V channels are probably skin pixels. threshold(channels[0], channels[0], 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18 threshold(channels[1], channels[1], 60, UCHAR_MAX, CV_THRESH_BINARY);//50 threshold(channels[2], channels[2], 170, UCHAR_MAX, CV_THRESH_BINARY);//80 // Combine all 3 thresholded color components, so that an output pixel will only // be white if the H, S and V pixels were also white. Mat imageSkinPixels = Mat( imageBGR.size(), CV_8UC3); // Greyscale output image. bitwise_and(channels[0], channels[1], imageSkinPixels); // imageSkin = H {BITWISE_AND} S. bitwise_and(imageSkinPixels, channels[2], imageSkinPixels); // imageSkin = H {BITWISE_AND} S {BITWISE_AND} V. // Show the output image on the screen. //imshow("Skin Pixels", imageSkinPixels); IplImage ipl_imageSkinPixels = imageSkinPixels; // Find blobs in the image. CBlobResult blobs; blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0); // Use a black background color. // Ignore the blobs whose area is less than minArea. blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea); srand (time(NULL)); // Show the large blobs. IplImage* imageSkinBlobs = cvCreateImage(imageBGR.size(), 8, 3); //Colored Output//,1); Greyscale output image. for (int i = 0; i < blobs.GetNumBlobs(); i++) { CBlob *currentBlob = blobs.GetBlob(i); currentBlob->FillBlob(imageSkinBlobs, CV_RGB(rand()%255,rand()%255,rand()%255)); // Draw the large blobs as white. cvDrawRect(imageSkinBlobs, cvPoint(currentBlob->GetBoundingBox().x,currentBlob->GetBoundingBox().y), cvPoint(currentBlob->GetBoundingBox().x + currentBlob->GetBoundingBox().width,currentBlob->GetBoundingBox().y + currentBlob->GetBoundingBox().height), cvScalar(0,0,255), 2);//Draw Bounding Boxes } cvShowImage("Skin Blobs", imageSkinBlobs); //Gestures //std::cout << "Number of Blobs: "<< blobs.GetNumBlobs() <<endl; if(blobs.GetNumBlobs() == 0){ //picture empty }else if(blobs.GetNumBlobs() == 1) { //head detected }else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3){ //head + one hand || head + two hands CvRect rect[3]; int indexHead = -1, indexHandLeft = -1, indexHandRight = -1; //Get Bounding Boxes for(int i = 0; i< blobs.GetNumBlobs(); i++){ rect[i] = blobs.GetBlob(i)->GetBoundingBox(); } //Detect Head and Hand indexes if(blobs.GetNumBlobs() == 2){ int indexHand = -1; if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y){ indexHead = 0; indexHand = 1; }else{ indexHead = 1; indexHand = 0; } if(getHandside(rect[indexHead], rect[indexHand]) == LEFT){ indexHandLeft = 1; indexHandRight = -1; }else{ // right hand indexHandLeft = -1; indexHandRight = 1; } }else{ //two hands int indexHand1 = -1; int indexHand2 = -1; if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y && getCenterPoint(rect[0]).y < getCenterPoint(rect[2]).y){ indexHead = 0; indexHand1 = 1; indexHand2 = 2; }else if(getCenterPoint(rect[1]).y < getCenterPoint(rect[0]).y && getCenterPoint(rect[1]).y < getCenterPoint(rect[2]).y){ indexHead = 1; indexHand1 = 0; indexHand2 = 2; }else{ indexHead = 2; indexHand1 = 0; indexHand2 = 1; } if(getHandside(rect[indexHead], rect[indexHand1]) == LEFT){ indexHandLeft = indexHand1; indexHandRight = indexHand2; }else{ indexHandLeft = indexHand2; indexHandRight = indexHand1; } } // follow the right hand if(indexHandRight > 0) { //std::cout << "right hand deteced" <<endl; if(isMoving(handRight)) { std::cout << "hand moving" <<endl; handRight.centerPrev = handRight.centerCurr; handRight.centerCurr = getCenterPoint(rect[indexHandRight]); } else { std::cout << "hand not moving" <<endl; if(handRight.centerInit.y != 0 && abs(handRight.centerInit.y - handRight.centerCurr.y) > 20) { if(handRight.centerInit.y < handRight.centerCurr.y) { // hand moved down std::cout << " hand moved down" <<endl; } else { // hand moved up std::cout << " hand moved up" <<endl; } } handRight.centerInit = getCenterPoint(rect[indexHandRight]); handRight.centerPrev = handRight.centerCurr; handRight.centerCurr = getCenterPoint(rect[indexHandRight]); } } //Get Orientations from Hand rects leftOrientationCur = (indexHandLeft != -1)?getOrientationOfRect(rect[indexHandLeft]):NONE; rightOrientationCur = (indexHandRight != -1)?getOrientationOfRect(rect[indexHandRight]):NONE; //Check Change of Left hand /*switch(detectHandStateChange(leftOrientationLast, leftOrientationCur)){ case PORTRAIT_TO_LANDSCAPE: handleGestures(LEFT_FLIP_DOWN); break; case LANDSCAPE_TO_PORTRAIT: handleGestures(LEFT_FLIP_UP); break; case NOCHANGE: default: break; } //Check Change of Right hand switch(detectHandStateChange(rightOrientationLast, rightOrientationCur)){ case PORTRAIT_TO_LANDSCAPE: handleGestures(RIGHT_FLIP_DOWN); break; case LANDSCAPE_TO_PORTRAIT: handleGestures(RIGHT_FLIP_UP); break; case NOCHANGE: default: break; }*/ }else if(blobs.GetNumBlobs() > 3){ //too much information cout<<"too much information"<<endl; } leftOrientationLast = leftOrientationCur; rightOrientationLast = rightOrientationCur; // Free all the resources. /*cvReleaseImage( &imageBGR ); cvReleaseImage( &imageHSV ); cvReleaseImage( &planeH ); cvReleaseImage( &planeS ); cvReleaseImage( &planeV ); cvReleaseImage( &imageSkinPixels ); cvReleaseImage( &imageSkinBlobs );*/ //if ESC is pressed then exit loop cvWaitKey(33); } cvWaitKey(0); return 0; }
void blobbing( IplImage *hi, char * win1, char * win2, int check ) { cvCvtColor(hi,hi2,CV_BGR2HSV); uchar *itemp=(uchar *)(hi2->imageData); uchar *itemp1=(uchar *)(hitemp->imageData); // binary conversion for(int i=0;i<hi2->height;i++){ for(int j=0;j<hi2->width;j++){ if((itemp[i*hi2->widthStep+j*hi2->nChannels] <hh) && (itemp[i*hi2->widthStep+j*hi2->nChannels]>hl) && (itemp[i*hi2->widthStep+j*hi2->nChannels+1]<sh) && (itemp[i*hi2->widthStep+j*hi2->nChannels+1]>sl) && ( itemp[i*hi2->widthStep+j*hi2->nChannels+2]<vh) && ( itemp[i*hi2->widthStep+j*hi2->nChannels+2]>vl) //previous 124 ) { itemp1[i*hitemp->widthStep+j]=0; //dark regions black rest white } else itemp1[i*hitemp->widthStep+j]=255; }} cvErode( hitemp, hitemp1, NULL, 3); cvDilate(hitemp1, hitemp1, NULL, 3); hitemp=hitemp1; CBlobResult blob; CBlob *currentBlob=NULL; blob=CBlobResult(hitemp1,NULL,255); blob.Filter(blob,B_EXCLUDE,CBlobGetArea(),B_LESS,500); cvMerge(hitemp1,hitemp1,hitemp1,NULL,out); CBlob hand1,hand2; //two blobs,one for each hand blob.GetNthBlob( CBlobGetArea(), 0, (hand2)); blob.GetNthBlob( CBlobGetArea(), 1, (hand1 )); hand1.FillBlob(out,CV_RGB(0,0,255)); //fill the color of blob of hand one with blue hand2.FillBlob(out,CV_RGB(0,255,0)); //fill the color of blob of hand two with green coordinates (out,check); //to find the coordinates of the hands we pass the image onto the function coordinates int greater1,greater2,lesser1,lesser2; if(x>X) { greater1=x,greater2=y; lesser1=X,lesser2=Y; } else { greater1=X,greater2=Y; lesser1=x,lesser2=y; } /*cvCircle ( hi, cvPoint(greater1,greater2), 10, cvScalar(0,0,255), -1, 8 ); cvCircle ( hi, cvPoint(lesser1,lesser2), 10, cvScalar(0,255,255), -1, 8 ); */ cvResizeWindow(win2,280,280); cvMoveWindow(win2,0,0); cvShowImage(win2,out); return ; }
void App::Update(Image &camera) { /*camera=camera.Scale(camera.m_Image->width/2, camera.m_Image->height/2); */ //cvFlip(camera.m_Image, NULL, 0); /////////////////////////////////// // dispatch from input int key=cvWaitKey(10); // usleep(500); static int t=150; static bool viewthresh=false; static bool off=false; static int spirit=0; static int crop_x=0; static int crop_y=0; static int crop_w=camera.m_Image->width; static int crop_h=camera.m_Image->height; switch (key) { case 't': viewthresh=!viewthresh; break; case 'q': t--; break; case 'w': t++; break; case 'e': t-=20; break; case 'r': t+=20; break; case 'o': off=!off; break; case 'p': spirit++; break; case 'z': crop_x+=10; break; case 'x': crop_x-=10; break; case 'c': crop_y+=10; break; case 'v': crop_y-=10; break; case 'b': crop_w+=10; break; case 'n': crop_w-=10; break; case 'm': crop_h+=10; break; case ',': crop_h-=10; break; } if (crop_x<0) crop_x=0; if (crop_x>=camera.m_Image->width) crop_x=camera.m_Image->width-1; if (crop_y<0) crop_x=0; if (crop_y>=camera.m_Image->height) crop_y=camera.m_Image->height-1; if (crop_w+crop_x>camera.m_Image->width) { crop_w=camera.m_Image->width-crop_x; } if (crop_h+crop_y>camera.m_Image->height) { crop_h=camera.m_Image->height-crop_y; } if (off) { sleep(1); cerr<<"off..."<<endl; return; } Image thresh=camera.RGB2GRAY().SubImage(crop_x,crop_y,crop_w,crop_h); cvThreshold(thresh.m_Image,thresh.m_Image,t,255,CV_THRESH_BINARY); // copy the threshold into a colour image Image tofill=thresh.GRAY2RGB(); cvFloodFill(tofill.m_Image,cvPoint(camera.m_Image->width/2, camera.m_Image->height/2), CV_RGB(0,255,0),cvScalar(0),cvScalar(255)); CBlobResult blobs; blobs = CBlobResult( thresh.m_Image, NULL, 255 ); // exclude the ones smaller than param2 value blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 10); CBlob *currentBlob; Image *out=NULL; if (key=='s') { // add the alpha channel Image src=camera.SubImage(crop_x,crop_y,crop_w,crop_h); out = new Image(src.m_Image->width, src.m_Image->height, 8, 4); for(int y=0; y<src.m_Image->height; y++) { for(int x=0; x<src.m_Image->width; x++) { CvScalar col = cvGet2D(src.m_Image,y,x); CvScalar alpha = cvGet2D(tofill.m_Image,y,x); if (alpha.val[0]==0 && alpha.val[1]==255 && alpha.val[2]==0) col.val[3]=0; else col.val[3]=255; cvSet2D(out->m_Image,y,x,col); } } } if (key=='s') { cerr<<"deleting old images in islands/"<<endl; int r=system("rm islands/*"); } list<CvRect> allrects; for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); allrects.push_back(currentBlob->GetBoundingBox()); } list<CvRect> filteredrects=allrects; /* for (list<CvRect>::iterator i=allrects.begin(); i!=allrects.end(); ++i) { bool in=false; for (list<CvRect>::iterator j=allrects.begin(); j!=allrects.end(); ++j) { if (Inside(*i,*j)) in=true; } if (!in) filteredrects.push_back(*i); }*/ unsigned int instance = rand(); unsigned int count=0; for (list<CvRect>::iterator i=filteredrects.begin(); i!=filteredrects.end(); ++i) { CvRect rect = *i; if (key=='s') { Image island = out->SubImage(rect.x,rect.y, rect.width,rect.height); char buf[256]; sprintf(buf,"islands/island-%d-%d-%d.png",count, rect.x+rect.width/2, rect.y+rect.height/2); cerr<<"saving "<<buf<<endl; island.Save(buf); sprintf(buf,"dump/island-%d-%d-%d-%d.png", instance, count, rect.x+rect.width/2, rect.y+rect.height/2); cerr<<"saving "<<buf<<endl; island.Save(buf); } else { cvRectangle(camera.m_Image, cvPoint(crop_x+rect.x,crop_y+rect.y), cvPoint(crop_x+rect.x+rect.width, crop_y+rect.y+rect.height), colors[1]); } count++; } if (key=='s') { cerr<<"copying images to server"<<endl; //int r=system("scp -r islands [email protected]:/home/garden/GerminationX/oak/"); string path("/home/dave/code/lirec/scenarios/GerminationX/oak/public/"); path+=string(spirits[spirit%3]); string command=string("rm ")+path+string("/*.*"); int r=system(command.c_str()); string command2=string("cp islands/* ")+path; r=system(command2.c_str()); //cerr<<"finished copying...("<<r<<")"<<endl; } if (viewthresh) camera=tofill; char buf[256]; sprintf(buf,"spirit: %s thresh: %d", spirits[spirit%3], t); cvPutText(camera.m_Image, buf, cvPoint(10,20), &m_Font, colors[0]); cvRectangle(camera.m_Image, cvPoint(crop_x,crop_y), cvPoint(crop_x+crop_w,crop_y+crop_h), colors[2]); if (out!=NULL) delete out; }
SHModel* ShapeModel( CvCapture* g_capture,StaticBGModel* BGModel , BGModelParams* BGParams){ int num_frames = 0; int total_blobs=0; float Sumatorio = 0; float SumatorioDes = 0; IplImage* frame = NULL; STFrame* frameData = NULL; SHModel* Shape = NULL; CBlobResult blobs; CBlob *currentBlob; IplImage* ImGris = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 1 ); IplImage* Imblob = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 3 ); IplImage* lastBG = cvCreateImage( cvGetSize( BGModel->Imed ),8, 1 ); IplImage* lastIdes = cvCreateImage( cvGetSize( BGModel->Imed ), IPL_DEPTH_32F, 1); cvZero(Imblob); // Iniciar estructura para modelo de forma Shape = ( SHModel *) malloc( sizeof( SHModel)); if ( !Shape ) {error(4);return 0;} Shape->FlyAreaDes = 0; Shape->FlyAreaMedia=0; //Pone a 0 los valores del vector areas //EXTRACCION DE LOS BLOBS Y CALCULO DE MEDIANA/MEDIA Y DESVIACION TIPICA PARA TODOS LOS FRAMES cvSetCaptureProperty( g_capture,1,BGParams->initDelay ); // establecemos la posición while( num_frames < ShParams->FramesTraining ){ frame = cvQueryFrame( g_capture ); if ( !frame ) { error(2); break; } if ( (cvWaitKey(10) & 255) == 27 ) break; ImPreProcess( frame, ImGris, BGModel->ImFMask, 0, BGModel->DataFROI); // Cargamos datos del fondo if(!frameData ) { //en la primera iteración iniciamos el modelo dinamico al estático // Iniciar estructura para datos del nuevo frame frameData = InitNewFrameData( frame ); cvCopy( BGModel->Imed,frameData->BGModel); cvSet(frameData->IDesvf, cvScalar(1)); cvCopy( BGModel->Imed,lastBG); } else{ // cargamos los últimos parámetros del fondo. cvCopy( lastBG, frameData->BGModel); cvCopy( lastIdes,frameData->IDesvf ); } // obtener la mascara del FG y la lista con los datos de sus blobs. //// BACKGROUND UPDATE // Actualización del fondo // establecer parametros UpdateBGModel( ImGris,frameData->BGModel,frameData->IDesvf, BGParams, BGModel->DataFROI, BGModel->ImFMask ); /////// BACKGROUND DIFERENCE. Obtención de la máscara del foreground BackgroundDifference( ImGris, frameData->BGModel,frameData->IDesvf, frameData->FG ,BGParams, BGModel->DataFROI); // guardamos las imagenes para iniciar el siguiente frame cvCopy( frameData->BGModel, lastBG); cvCopy( frameData->IDesvf,lastIdes); //Obtener los Blobs y excluir aquellos que no interesan por su tamaño // cvSetImageROI( frameData->FG , BGModel->DataFROI); blobs = CBlobResult( frameData->FG, NULL, 100, true ); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(),B_GREATER,100); blobs.Filter( blobs, B_EXCLUDE, CBlobGetPerimeter(),B_GREATER,1000); int j = blobs.GetNumBlobs();//numero de blos encontrados en el frame total_blobs=total_blobs+j; // Contabiliza los blobs encontrados para todos los frames //Recorrer Blob a blob y obtener las caracteristicas del AREA de cada uno de ellos for (int i = 0; i < blobs.GetNumBlobs(); i++ ){ //for 1 currentBlob = blobs.GetBlob(i); CBlobGetArea(); if(ShParams->SHOW_DATA_AREAS) { //printf("Area blob %d = %f ",i,currentBlob->area); } //Estimar la media de las Areas Sumatorio = Sumatorio + currentBlob->area; SumatorioDes = SumatorioDes + currentBlob->area*currentBlob->area; muestrearAreas( currentBlob->area); currentBlob->FillBlob( Imblob, CV_RGB(255,0,0)); }//Fin del For 1 Shape->FlyAreaMedia = Sumatorio / total_blobs; Shape->FlyAreaDes = (SumatorioDes / total_blobs) - Shape->FlyAreaMedia*Shape->FlyAreaMedia; num_frames += 1; // cvResetImageROI(frameData->FG); DraWWindow(Imblob, frameData, BGModel, SHOW_SHAPE_MODELING, COMPLETO); DraWWindow(Imblob, frameData, BGModel, SHAPE,SIMPLE ); } desvanecer( NULL, 20); Shape->FlyAreaDes = sqrt(abs(Shape->FlyAreaDes) ) ; if( Shape->FlyAreaDes == 0){ printf("hola"); } //Mostrar mediana y media para todos los frames if(ShParams->SHOW_DATA_AREAS ) printf("\n MEDIA AREAS: %f \t DESVIACION AREAS: %f",Shape->FlyAreaMedia,Shape->FlyAreaDes); free( ShParams); liberarSTFrame( frameData ); cvReleaseImage( &ImGris); cvReleaseImage( &Imblob); cvReleaseImage( &lastIdes); cvReleaseImage( &lastBG); return Shape; }//Fin de la función ShapeModel2
// A Simple Camera Capture Framework int main() { CvCapture* capture = cvCaptureFromCAM( 0 ); if( !capture ) { fprintf( stderr, "ERROR: capture is NULL \n" ); return -1; } #ifdef HALF_SIZE_CAPTURE cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 352/2); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 288/2); #endif // Create a window in which the captured images will be presented cvNamedWindow( "Source Image Window", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Back Projected Image", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Brightness and Contrast Window", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Blob Output Window", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Histogram Window", 0); cvNamedWindow( "Rainbow Window", CV_WINDOW_AUTOSIZE ); // Capture one frame to get image attributes: source_frame = cvQueryFrame( capture ); if( !source_frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); return -1; } cvCreateTrackbar("histogram\nnormalization", "Back Projected Image", &normalization_sum, 6000, NULL); cvCreateTrackbar("brightness", "Brightness and Contrast Window", &_brightness, 200, NULL); cvCreateTrackbar("contrast", "Brightness and Contrast Window", &_contrast, 200, NULL); cvCreateTrackbar("threshold", "Blob Output Window", &blob_extraction_threshold, 255, NULL); cvCreateTrackbar("min blob size", "Blob Output Window", &min_blob_size, 2000, NULL); cvCreateTrackbar("max blob size", "Blob Output Window", &max_blob_size, source_frame->width*source_frame->height/4, NULL); inputImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 1); histAdjustedImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 1); outputImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 3 ); hist_image = cvCreateImage(cvSize(320,200), 8, 1); rainbowImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 3 ); // object that will contain blobs of inputImage CBlobResult blobs; CBlob my_enumerated_blob; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale, vScale, 0, lineWidth); // Some brightness/contrast stuff: bright_cont_image = cvCloneImage(inputImage); lut_mat = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( lut_mat, lut, 0 ); while( 1 ) { // Get one frame source_frame = cvQueryFrame( capture ); if( !source_frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } cvShowImage( "Source Image Window", source_frame ); // Do not release the frame! cvCvtColor(source_frame, inputImage, CV_RGB2GRAY); // Histogram Stuff! my_hist = cvCreateHist(1, hist_size_array, CV_HIST_ARRAY, ranges, 1); cvCalcHist( &inputImage, my_hist, 0, NULL ); cvNormalizeHist(my_hist, normalization_sum); // NOTE: First argument MUST have an ampersand, or a segmentation fault will result cvCalcBackProject(&inputImage, histAdjustedImage, my_hist); // Histogram Picture int bin_w; float max_value = 0; cvGetMinMaxHistValue( my_hist, 0, &max_value, 0, 0 ); cvScale( my_hist->bins, my_hist->bins, ((double)hist_image->height)/max_value, 0 ); cvSet( hist_image, cvScalarAll(255), 0 ); bin_w = cvRound((double)hist_image->width/hist_size); for(int i = 0; i < hist_size; i++ ) cvRectangle( hist_image, cvPoint(i*bin_w, hist_image->height), cvPoint((i+1)*bin_w, hist_image->height - cvRound(cvGetReal1D(my_hist->bins,i))), cvScalarAll(0), -1, 8, 0 ); cvShowImage( "Histogram Window", hist_image ); cvShowImage("Back Projected Image", histAdjustedImage); // Brightness/contrast loop stuff: int brightness = _brightness - 100; int contrast = _contrast - 100; /* * The algorithm is by Werner D. Streidt * (http://visca.com/ffactory/archives/5-99/msg00021.html) */ if( contrast > 0 ) { double delta = 127.*contrast/100; double a = 255./(255. - delta*2); double b = a*(brightness - delta); for(int i = 0; i < 256; i++ ) { int v = cvRound(a*i + b); if( v < 0 ) v = 0; if( v > 255 ) v = 255; lut[i] = (uchar)v; } } else { double delta = -128.*contrast/100; double a = (256.-delta*2)/255.; double b = a*brightness + delta; for(int i = 0; i < 256; i++ ) { int v = cvRound(a*i + b); if( v < 0 ) v = 0; if( v > 255 ) v = 255; lut[i] = (uchar)v; } } cvLUT( inputImage, bright_cont_image, lut_mat ); cvShowImage( "Brightness and Contrast Window", bright_cont_image); // --------------- // Blob Manipulation Code begins here: // Extract the blobs using a threshold of 100 in the image blobs = CBlobResult( bright_cont_image, NULL, blob_extraction_threshold, true ); // discard the blobs with less area than 5000 pixels // ( the criteria to filter can be any class derived from COperadorBlob ) blobs.Filter( blobs, B_INCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, min_blob_size); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER, max_blob_size); // build an output image equal to the input but with 3 channels (to draw the coloured blobs) cvMerge( bright_cont_image, bright_cont_image, bright_cont_image, NULL, outputImage ); // plot the selected blobs in a output image for (int i=0; i < blobs.GetNumBlobs(); i++) { blobs.GetNthBlob( CBlobGetArea(), i, my_enumerated_blob ); // Color 5/6 of the color wheel (300 degrees) my_enumerated_blob.FillBlob( outputImage, cv_hsv2rgb((float)i/blobs.GetNumBlobs() * 300, 1, 1)); } // END Blob Manipulation Code // --------------- sprintf(str, "Count: %d", blobs.GetNumBlobs()); cvPutText(outputImage, str, cvPoint(50, 25), &font, cvScalar(255,0,255)); cvShowImage("Blob Output Window", outputImage); /* // Rainbow manipulation: for (int i=0; i < CV_CAP_PROP_FRAME_WIDTH; i++) { for (int j=0; j < CV_CAP_PROP_FRAME_HEIGHT; j++) { // This line is not figure out yet... // pixel_color_set = ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3] ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3] = 30; ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3 + 1] = 30; ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3 + 2] = 30; } } cvShowImage("Rainbow Window", rainbowImage); */ //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version), //remove higher bits using AND operator if( (cvWaitKey(10) & 255) == 27 ) break; } cvReleaseImage(&inputImage); cvReleaseImage(&histAdjustedImage); cvReleaseImage(&hist_image); cvReleaseImage(&bright_cont_image); cvReleaseImage(&outputImage); cvReleaseImage(&rainbowImage); // Release the capture device housekeeping cvReleaseCapture( &capture ); cvDestroyAllWindows(); return 0; }
void locator() { namedWindow("Tracking"); int hMin, hMax, sMin, sMax, vMin, vMax,area_min; hMin = 0; //hMax = 124; // night values/??? hMax = 255; //sMin = 95; sMin = 126; sMax = 255; //vMin = 139; vMin = 173; vMax = 255; area_min = 100; Mat smoothed, hsvImg, t_img; createTrackbar("blob min area","Tracking" ,&area_min ,1000); createTrackbar("Hue Min", "Tracking", &hMin, 255); createTrackbar("Hue Max", "Tracking", &hMax, 255); createTrackbar("Sat Min", "Tracking", &sMin, 255); createTrackbar("Sat Max", "Tracking", &sMax, 255); createTrackbar("Val Min", "Tracking", &vMin, 255); createTrackbar("Val MaX", "Tracking", &vMax, 255); while(ros::ok()) { Mat source = imageB; Mat copy = imageB.clone(); GaussianBlur(source, smoothed, Size(9,9), 4); cvtColor(smoothed, hsvImg, CV_BGR2HSV); inRange(hsvImg, Scalar(hMin, sMin, vMin), Scalar(hMax, sMax, vMax), t_img); CBlobResult blob; IplImage i_img = t_img; blob = CBlobResult(&i_img,NULL,0); int num_blobs = blob.GetNumBlobs(); blob.Filter(blob, B_INCLUDE, CBlobGetArea(), B_INSIDE, area_min, blob_area_absolute_max_); num_blobs = blob.GetNumBlobs(); std::string reference_frame = "/virtual_table"; // Table frame at ball_radius above the actual table plane tf::StampedTransform transform; tf_.waitForTransform(reference_frame, model.tfFrame(), ros::Time(0), ros::Duration(0.5)); tf_.lookupTransform(reference_frame, model.tfFrame(), ros::Time(0), transform); for(int i =0;i<num_blobs;i++) { CBlob* bl = blob.GetBlob(i); Point2d uv(CBlobGetXCenter()(*bl), CBlobGetYCenter()(*bl)); //Use the width as the height uv.y = bl->MinY() + (bl->MaxX() - bl->MinX()) * 0.5; circle(copy,uv,50,Scalar(255,0,0),5); cv::Point3d xyz; model.projectPixelTo3dRay(uv, xyz); // Intersect ray with plane in virtual table frame //Origin of camera frame wrt virtual table frame tf::Point P0 = transform.getOrigin(); //Point at end of unit ray wrt virtual table frame tf::Point P1 = transform * tf::Point(xyz.x, xyz.y, xyz.z); // Origin of virtual table frame tf::Point V0 = tf::Point(0.0,0.0,0.0); // normal to the table plane tf::Vector3 n(0, 0, 1); // finding scaling value double scale = (n.dot(V0-P0))/(n.dot(P1-P0)); tf::Point ball_pos = P0 + (P1-P0)*scale; cout <<ball_pos.x() << " " << ball_pos.y() << " " << ball_pos.z() <<endl; } imshow(WINDOW, copy); waitKey(3); imshow("edited", t_img); waitKey(3); ros::spinOnce(); } }
void Auvsi_Recognize::extractLetter( void ) { typedef cv::Vec<unsigned char, 1> VT_binary; #ifdef TWO_CHANNEL typedef cv::Vec<T, 2> VT; #else typedef cv::Vec<T, 3> VT; #endif typedef cv::Vec<int, 1> IT; // Erode input slightly cv::Mat input; cv::erode( _shape, input, cv::Mat() ); // Remove any small white blobs left over CBlobResult blobs; CBlob * currentBlob; CBlob biggestBlob; IplImage binaryIpl = input; blobs = CBlobResult( &binaryIpl, NULL, 0 ); blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob ); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() ); for (int i = 0; i < blobs.GetNumBlobs(); i++ ) { currentBlob = blobs.GetBlob(i); currentBlob->FillBlob( &binaryIpl, cvScalar(0)); } // Perform k-means on this region only int areaLetter = (int)biggestBlob.Area(); cv::Mat kMeansInput = cv::Mat( areaLetter, 1, _image.type() ); // Discard if we couldn't extract a letter if( areaLetter <= 0 ) { _letter = cv::Mat( _shape ); _letter = cv::Scalar(0); return; } cv::MatIterator_<VT_binary> binaryIterator = input.begin<VT_binary>(); cv::MatIterator_<VT_binary> binaryEnd = input.end<VT_binary>(); cv::MatIterator_<VT> kMeansIterator = kMeansInput.begin<VT>(); for( ; binaryIterator != binaryEnd; ++binaryIterator ) { if( (*binaryIterator)[0] > 0 ) { (*kMeansIterator) = _image.at<VT>( binaryIterator.pos() ); ++kMeansIterator; } } // Get k-means labels cv::Mat labels = doClustering<T>( kMeansInput, 2, false ); int numZeros = areaLetter - cv::countNonZero( labels ); bool useZeros = numZeros < cv::countNonZero( labels ); // Reshape into original form _letter = cv::Mat( _shape.size(), _shape.type() ); _letter = cv::Scalar(0); binaryIterator = input.begin<VT_binary>(); binaryEnd = input.end<VT_binary>(); cv::MatIterator_<IT> labelsIterator = labels.begin<IT>(); for( int index = 0; binaryIterator != binaryEnd; ++binaryIterator ) { if( (*binaryIterator)[0] > 0 ) { // Whichever label was the minority, we make that value white and all other values black unsigned char value = (*labelsIterator)[0]; if( useZeros ) if( value ) value = 0; else value = 255; else if( value ) value = 255; else value = 0; _letter.at<VT_binary>( binaryIterator.pos() ) = VT_binary( value ); ++labelsIterator; } } }
int Frame::findCandidates(Mat prev, Mat next) { Mat diff1, diff2; Mat gray_diff1, gray_diff2; Mat andMaskGray, andMaskHSV, andMaskBGR1, andMaskBGR2, andMaskBGR; Mat colourFiltered1; absdiff(prev, curr, diff1); absdiff(curr, next, diff2); cvtColor(diff1, gray_diff1, CV_BGR2GRAY); cvtColor(diff2, gray_diff2, CV_BGR2GRAY); bitwise_and(gray_diff1, gray_diff2, andMaskGray); GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 ); //morphologyEx(andMaskGray, andMaskGray, MORPH_CLOSE, getStructuringElement(MORPH_RECT, cv::Size(11, 11))); //morphologyEx(andMaskGray, andMaskGray, MORPH_OPEN, getStructuringElement(MORPH_RECT, cv::Size(5, 5))); //dilate(andMaskGray, andMaskGray, Mat()); //GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 ); threshold(andMaskGray, andMaskGray, 4, 255, THRESH_BINARY); GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 ); threshold(andMaskGray, andMaskGray, 4, 255, THRESH_BINARY); //GaussianBlur( andMaskGray, andMaskGray, cv::Size(9, 9), 2, 2 ); //threshold(andMaskGray, andMaskGray, 5, 255, THRESH_BINARY); // colour //cvtColor(curr, andMaskHSV, CV_BGR2HSV); //Scalar min(0.11*256, 0.4*256, 0.5*256, 0); //Scalar max(0.15*256, 0.7*256, 1*256, 0); //inRange(andMaskHSV, min, max, andMaskBGR); //Scalar min(140, 150, 130, 0); //Scalar max(200, 255, 180, 0); Scalar min1(90, 200, 160, 0); Scalar max1(175, 255, 220, 0); inRange(curr, min1, max1, andMaskBGR1); Scalar min2(28, 90, 35, 0); Scalar max2(100, 120, 120, 0); inRange(curr, min2, max2, andMaskBGR2); bitwise_or(andMaskBGR1, andMaskBGR2, andMaskBGR); GaussianBlur( andMaskBGR, andMaskBGR, cv::Size(9, 9), 2, 2 ); morphologyEx(andMaskBGR, andMaskBGR, MORPH_CLOSE, getStructuringElement(MORPH_RECT, cv::Size(11, 11))); morphologyEx(andMaskBGR, andMaskBGR, MORPH_OPEN, getStructuringElement(MORPH_RECT, cv::Size(5, 5))); threshold(andMaskBGR, andMaskBGR, 1, 255, THRESH_BINARY); bitwise_and(andMaskBGR, andMaskGray, colourFiltered1); /*vector<Vec3f> circles; HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 2, colourFiltered1->rows/4, 200, 100 ); for( size_t i = 0; i < circles.size(); i++ ) { Point center(cvRound(circles[i][0]), cvRound(circles[i][1])); int radius = cvRound(circles[i][2]); // draw the circle center circle( img, center, 3, Scalar(0,255,0), -1, 8, 0 ); // draw the circle outline circle( img, center, radius, Scalar(0,0,255), 3, 8, 0 ); }*/ //Mat tmp; //cvtColor(andMaskHSV, tmp, CV_HSV2BGR); //imshow("bgr", tmp); //imshow("hsv", andMaskHSV); //imshow("curr", curr); //imshow("andmask", andMaskGray); //imshow("colours", andMaskBGR); //imshow("thresholded", colourFiltered1); //waitKey(0); IplImage blobimage = andMaskGray; CBlobResult results = CBlobResult(&blobimage, NULL, 0); results.Filter(results, B_EXCLUDE, CBlobGetArea(), B_GREATER, 500); results.Filter(results, B_EXCLUDE, CBlobGetArea(), B_LESS, 200); CBlob *blob; //Blob numbering int i; for(i=0; i<results.GetNumBlobs(); i++) { blob = results.GetBlob(i); //if((blob->MaxY() - blob->MinY()) < 1*(blob->MaxX() - blob->MinX())) { candidates.push_back(*blob); //} } return i; }