void BackgroundDiff(IplImage *I, IplImage*Imask) { cvConvertScale(I, Iscratch, 1, 0); cvSplit(Iscratch, Igray1, Igray2, Igray3, 0); cvInRange(Igray1, Ilow1, Ihi1, Imask); cvInRange(Igray2, Ilow2, Ihi2, Imaskt); cvOr(Imask, Imaskt, Imask); cvInRange(Igray3, Ilow3, Ihi3, Imaskt); cvOr(Imask, Imaskt, Imask); cvSubRS(Imask, 255, Imask); }
/** * \brief Takes frame and applies image processing techniques to filter out non-laser line points. Updates images used for runtime display. */ int filterFrame() { args[0] = frame; cvCvtColor(frame, frameHSV, CV_BGR2HSV); //convert RGB values of frame to HSV and place in frameHSV cvSplit(frameHSV, hue, saturation, value, NULL); //split frameHSV into constituent components and place appropriately; we are done with frameHSV args[1] = hue; args[2] = value; cvCopy(saturation, saturation2); //make an additional copy of saturation for display //args[8] = saturation2; //cvShowImage("saturation", saturation2); cvSmooth(frame, frameHSV, CV_BLUR, 20, 20 ); //smooth frame and store in frameHSV //cvShowImage("Smoothed frame", frameHSV); cvSplit(frame, blue, green, red, NULL); //split frame into its RGB components cvSplit(frameHSV, blue2, green2, red2, NULL); //split the smoothed version into its RGB components cvMin(blue, green, min_bg); //take the min of blue and green and store in min_bg args[3] = min_bg; //cvShowImage("minimum of blue and green", min_bg); cvSub(red, min_bg, red_last); //take red less the min of the blue and green //cvShowImage("red_last = red - min_bg", red_last); cvThreshold(red_last, red_last, thresholdValue, 255, CV_THRESH_BINARY_INV); //threshold the red_last //cvShowImage("threshold of red_last", red_last); args[4] = red_last; cvSub(red, red2, deltaRed); //cvShowImage("deltaRed = Original red - smooth red", deltaRed); cvThreshold(deltaRed, deltaRed, thresholdValue, 255, CV_THRESH_BINARY); //cvShowImage("threshold(deltaRed)", deltaRed); cvCopy(deltaRed, alpha); cvInRangeS(saturation, cvScalar(0), cvScalar(25), saturation); //cvShowImage("Low saturation in original frame", saturation); cvInRangeS(hue, cvScalar(49), cvScalar(125), beta); //cvShowImage("Mixed hue in original frame", beta); cvOr(beta, saturation, beta); //cvShowImage("beta = Low saturation OR mixed hue", beta); cvOr(beta, red_last, beta); //cvShowImage("beta = beta OR red_last", beta); //args[5] = alpha; args[5] = beta; IplConvKernel*mask= cvCreateStructuringElementEx(5, 5, 2, 2, 2, NULL ); cvDilate(saturation2,dialated, mask, 20); //cvShowImage("dilate original saturation", dialated); args[6] = dialated; cvThreshold(dialated, dialated, 100, 255, CV_THRESH_BINARY); cvErode(dialated,eroded, mask, 30); args[7] = eroded; cvSub(alpha, beta, orig_filter); args[8] = orig_filter; cvAnd(orig_filter, eroded, zeta); args[9] = zeta; return 0; }
// Create a mask void backgroundDiff( IplImage *I, IplImage *Imask ){ cvCvtScale( I, Iscratch, 1, 0); cvSplit( Iscratch, Igray1, Igray2, Igray3, 0 ); // channel 1 cvInRange( Igray1, Ilow1, Ihi1, Imask ); // channel 2 cvInRange( Igray2, Ilow2, Ihi2, Imaskt ); cvOr( Imask, Imaskt, Imask ); // channel 3 cvInRange( Igray3, Ilow3, Ihi3, Imaskt ); cvOr( Imask, Imaskt, Imask ); }
void CHandDrawEffect::EffectImage(IplImage* back, IplImage* frame, IplImage* alpha, IplImage* mask, IplImage* res) { if(drawMode & 0x01) { //基本エフェクト Posterize(0xD0, frame, imageA); // DrawHatching(frame, imageA); cvAnd(imageA, mask, imageB); //エフェクト処理後のCG部分のくりぬき //囲み cvNot(mask, imageA); cvDilate(imageA, imageD, 0, 1); cvDilate(mask, imageE, 0, 3); cvXor(imageE, imageD, mask); //アルファマスク更新 cvNot(mask, imageA); cvConvertScale(imageA, imageA, 0.5); cvOr(alpha, imageA, alpha); //色付きの囲み cvNot(mask, imageA); cvAnd(imageA, imageC, imageA); cvOr(imageA, imageB, imageB); //走査線 cvAnd(imageB, scanningLine, imageB); //アルファブレンド AlphaBlend(back, imageB, alpha, res); if(0) { //drawMode & 0x02) { // DrawEdge(frame, imageB, res, 2); cvNot(mask, frame); cvDilate(frame, imageA, 0, 1); cvDilate(mask, imageB, 0, 3); cvXor(imageA, imageB, mask); cvAnd(mask, res, res); //色付きの線 cvNot(mask, imageA); cvAnd(imageA, scanningLine, imageA); cvAnd(imageA, imageC, imageA); cvOr(res, imageA, res); } } else if(drawMode & 0x02) { // DrawEdge(frame, imageB, res, 2); } }
// Create a binary: 0,255 mask where 255 means forground pixel // I Input image, 3 channel, 8u // Imask mask image to be created, 1 channel 8u // num camera number. // void ofxBackground::backgroundDiff(IplImage *I,IplImage *Imask) //Mask should be grayscale { cvCvtScale(I,Iscratch,1,0); //To float; //Channel 1 cvCvtPixToPlane( Iscratch, Igray1,Igray2,Igray3, 0 ); cvInRange(Igray1,Ilow1,Ihi1,Imask); //Channel 2 cvInRange(Igray2,Ilow2,Ihi2,Imaskt); cvOr(Imask,Imaskt,Imask); //Channel 3 cvInRange(Igray3,Ilow3,Ihi3,Imaskt); cvOr(Imask,Imaskt,Imask); //Finally, invert the results cvSubRS( Imask, cvScalar(255), Imask); }
// Create a binary: 0,255 mask where 255 means forehground pixel // I Input image, 3 channel, 8u // Imask Mask image to be created, 1 channel 8u // void backgroundDiff(IplImage *I,IplImage *Imask) { cvCvtScale(I,Iscratch,1,0); //To float; cvSplit( Iscratch, Igray1,Igray2,Igray3, 0 ); //Channel 1 cvInRange(Igray1,Ilow1,Ihi1,Imask); //Channel 2 cvInRange(Igray2,Ilow2,Ihi2,Imaskt); cvOr(Imask,Imaskt,Imask); //Channel 3 cvInRange(Igray3,Ilow3,Ihi3,Imaskt); cvOr(Imask,Imaskt,Imask) //Finally, invert the results cvSubRS( Imask, 255, Imask); }
void THISCLASS::OnStep() { if (! mCore->mDataStructureImageGray.mImage) { AddError(wxT("No input image.")); return; } // Mask the image if (mMaskImage) { if ((mCore->mDataStructureImageGray.mImage->width != mMaskImage->width) || (mCore->mDataStructureImageGray.mImage->height != mMaskImage->height)) { AddError(wxT("Wrong mask size.")); return; } if ((mMode == cMode_WhiteWhite) || (mMode == cMode_BlackWhite)) { cvOr(mCore->mDataStructureImageGray.mImage, mMaskImage, mCore->mDataStructureImageGray.mImage); } else { cvAnd(mCore->mDataStructureImageGray.mImage, mMaskImage, mCore->mDataStructureImageGray.mImage); } } // Set the display DisplayEditor de(&mDisplayOutput); if (de.IsActive()) { de.SetMainImage(mCore->mDataStructureImageGray.mImage); } }
void HandDetect::handDetecting() { skinDetect(); IplImage *tmp = cvCreateImage(cvGetSize(backproject), 8, 1); cvZero(tmp); if(track_comp.rect.height>0&&track_comp.rect.width>0) { cvCircle(tmp, handCen, track_box.size.width, CV_RGB(255, 255, 255), -1); cvDrawRect(tmp, cvPoint(track_window.x-(int)(track_box.size.width*0.2), track_window.y-(int)(track_box.size.height*0.2)), cvPoint(track_window.x+(int)(track_box.size.width*1.2), track_window.y+track_box.size.height), CV_RGB(255, 255, 255), -1); } cvAnd(backproject, tmp, backproject, 0); cvDilate(backproject, backproject, 0, 1); cvErode(backproject, backproject, 0, 1); UsingYCbCr(); cvAnd(gray, tmp, gray, 0); cvErode(gray, gray, 0, 1); cvDilate(gray, gray, 0, 1); // cvShowImage("52", gray); cvReleaseImage(&tmp); cvOr(gray, backproject, backproject, 0); handCen=cvPoint(track_box.center.x, track_box.center.y); setRad(); // cvDrawRect(image, cvPoint(track_window.x, track_window.y), cvPoint(track_window.x+track_window.width, track_window.y+track_window.height), CV_RGB(255, 0, 0)); cvCircle(image, handCen, 2, CV_RGB(255, 0, 0), 2); }
void TextLocation::getMaskImgFromRects(const IplImage* src, const vector<CvRect> &rects, IplImage* dst) { // use rects to mask src image (creat mask) cvZero(m_utilityMaskImg); for (unsigned int i = 0; i < rects.size(); ++i) { for (int row = rects[i].y; row < rects[i].y + rects[i].height; ++row) { uchar* pMask = (uchar*) (m_utilityMaskImg->imageData + row * m_utilityMaskImg->widthStep); for (int col = rects[i].x; col < rects[i].x + rects[i].width; ++col) { pMask[col] = 255; } } } cvZero(m_utilityZeroImg); cvOr(src, m_utilityZeroImg, dst, m_utilityMaskImg); }
//영역 추출 후 이진화 void ColorTracking::color_config(IplImage* image, std::string config){ //추출할 영역 변수 CvScalar hsv_min, hsv_max, hsv_min2, hsv_max2; if(image != NULL) { IplImage* m_tem1_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);//영역 추출 이미지 IplImage* m_tem2_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);//영역 추출 이미지 //필요한 색상 영역 으로 축소 if(config == "Red") { //빨강 - 영역 두개로 잡아봄 hsv_min = cvScalar(0, 85, 100, 0); hsv_max = cvScalar(10, 255, 255, 0); hsv_min2 = cvScalar(170, 85, 100, 0); hsv_max2 = cvScalar(220, 255, 255, 0); } else if(config =="Green") { //초록 hsv_min = cvScalar(55, 80, 100, 0); hsv_max = cvScalar(75, 255, 255, 0); } else if(config == "Blue") { //파랑 hsv_min = cvScalar(100, 100,100 , 0); hsv_max = cvScalar(130, 255, 200, 0); } else if(config =="Yellow") { //노랑 hsv_min = cvScalar(20, 100, 100, 0); hsv_max = cvScalar(35, 255, 255, 0); } if(config == "Red") { //일단 레드는 두 영역으로 잡아봄 cvInRangeS(image, hsv_min, hsv_max, m_tem1_img); cvInRangeS(image, hsv_min2, hsv_max2, m_tem2_img); //공통영역 추출 cvOr(m_tem1_img, m_tem2_img , m_gray_img); } else { //레드가 아닐 때는 그대로 cvInRangeS(image, hsv_min, hsv_max, m_gray_img); } cvReleaseImage(&m_tem1_img); cvReleaseImage(&m_tem2_img); } }
// Create a binary: 0,255 mask where 255 means forground pixel. // // Parameters: // I: input image, 3 channel, 8u // Imask: mask image to be created, 1 channel 8u // num: camera number // void backgroundDiff(IplImage *I, IplImage *Imask, int num) // Mask should be grayscale { cvCvtScale(I, Iscratch, 1, 0); // To float. // Channel 1 cvCvtPixToPlane( Iscratch, Igray1, Igray2, Igray3, 0 ); // TODO: book uses cvSplit: check! cvInRange( Igray1, Ilow1[num], Ihi1[num], Imask); // Channel 2 cvInRange( Igray2, Ilow2[num], Ihi2[num], Imaskt ); cvOr( Imask, Imaskt, Imask ); // Channel 3 cvInRange( Igray3, Ilow3[num], Ihi3[num], Imaskt ); cvOr( Imask, Imaskt, Imask ); // Finally, invert the results. cvSubRS( Imask, cvScalar(255), Imask); }
int filterByHSV(IplImage *src, CvScalar minHSV, CvScalar maxHSV, IplImage *dst) { IplImage *tmp3d = cvCloneImage(src); cvSmooth(tmp3d, tmp3d, CV_GAUSSIAN, 13, 0, 0, 0); cvCvtColor(tmp3d, tmp3d, CV_BGR2HSV); IplImage *tmp1dH_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1); IplImage *tmp1dS_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1); IplImage *tmp1dV_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1); cvSplit(tmp3d, tmp1dH_mask, tmp1dS_mask, tmp1dV_mask, NULL); //printf("\rmin: %03d,%03d,%03d", (int)minHSV.val[0], (int)minHSV.val[1], (int)minHSV.val[2]); //printf("\tmax: %03d,%03d,%03d", (int)maxHSV.val[0], (int)maxHSV.val[1], (int)maxHSV.val[2]); if (minHSV.val[0] < maxHSV.val[0]) { cvInRangeS(tmp1dH_mask, cvScalar(minHSV.val[0], 0, 0), cvScalar(maxHSV.val[0], 0, 0), tmp1dH_mask); } else { IplImage *tmp1d = cvCloneImage(tmp1dH_mask); cvInRangeS(tmp1dH_mask, cvScalar(0, 0, 0), cvScalar(maxHSV.val[0], 0, 0), tmp1d); cvInRangeS(tmp1dH_mask, cvScalar(minHSV.val[0], 0, 0), cvScalar(255, 0, 0), tmp1dH_mask); cvOr(tmp1d, tmp1dH_mask, tmp1dH_mask, NULL); cvReleaseImage(&tmp1d); } cvInRangeS(tmp1dS_mask, cvScalar(minHSV.val[1], 0, 0), cvScalar(maxHSV.val[1], 0, 0), tmp1dS_mask); cvInRangeS(tmp1dV_mask, cvScalar(minHSV.val[2], 0, 0), cvScalar(maxHSV.val[2], 0, 0), tmp1dV_mask); IplImage *tmp1d_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1); cvSet(tmp1d_mask, cvScalarAll(255), NULL); cvAnd(tmp1d_mask, tmp1dH_mask, tmp1d_mask, NULL); cvAnd(tmp1d_mask, tmp1dS_mask, tmp1d_mask, NULL); cvAnd(tmp1d_mask, tmp1dV_mask, tmp1d_mask, NULL); cvReleaseImage(&tmp1dH_mask); cvReleaseImage(&tmp1dS_mask); cvReleaseImage(&tmp1dV_mask); cvClose(tmp1d_mask, tmp1d_mask, NULL, 2); #define CONTROLS_WIDTHA 640/2 #define CONTROLS_HEIGHTA 480/2 #if 1 cvNamedWindow(CONTROL_WINDOW "4", 0); cvResizeWindow(CONTROL_WINDOW "4", CONTROLS_WIDTHA, CONTROLS_HEIGHTA); cvShowImage(CONTROL_WINDOW "4", tmp1d_mask); #endif cvCopy2(src, dst, tmp1d_mask); cvReleaseImage(&tmp1d_mask); return 0; }
void HandDetect::skinDetect() { setImage(); cvFlip(image, image, 1); hsv = cvCreateImage(cvGetSize(image), 8, 3); msk = cvCreateImage(cvGetSize(image), 8, 1); hue = cvCreateImage(cvGetSize(image), 8, 1); backproject1 = cvCreateImage(cvGetSize(image), 8, 1); backproject2 = cvCreateImage(cvGetSize(image), 8, 1); cvCvtColor(image, hsv, CV_RGB2HSV); cvInRangeS(hsv, cvScalar(0, smin, MIN(vmin, vmax), 0), cvScalar(180, 256, MAX(vmin, vmax), 0), msk); cvSplit(hsv, hue, 0, 0, 0); cvCalcBackProject(&hue, backproject1, hist1); cvCalcBackProject(&hue, backproject2, hist2); cvThreshold(backproject1, backproject1, 50, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); cvThreshold(backproject2, backproject2, 50, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); cvOr(backproject1, backproject2, backproject, 0); cvErode(backproject, backproject, 0, 1); cvDilate(backproject, backproject, 0, 1); cvAnd(backproject, msk, backproject, 0); if(track_box.center.x!=-1&&track_box.center.y!=-1) preCen=cvPoint(handCen.x, handCen.y); else preCen=cvPoint(0,0); cvCamShift(backproject, track_window, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1), &track_comp, &track_box); if(track_comp.rect.height>0&&track_comp.rect.width>0) track_window = track_comp.rect; else { track_box.center.x=-1; track_box.center.y=-1; } cvReleaseImage(&hsv); cvReleaseImage(&msk); cvReleaseImage(&hue); cvReleaseImage(&backproject1); cvReleaseImage(&backproject2); }
IplImage *contoursGetOutlineMorh(IplImage *src, IplImage *temp, int mask) { int radius = 3; int cols = radius * 2 + 1; int rows = cols; IplImage *res; IplImage *bin = cvCreateImage(cvGetSize(src), src->depth, 1); cvAdaptiveThreshold(src, bin, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 1); if (mask == 1) { IplImage *mask = cvCreateImage(cvGetSize(src), src->depth, 1); res = cvCreateImage(cvGetSize(src), src->depth, 1); cvThreshold(src, mask, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU); cvOr(bin, mask, res, NULL); cvReleaseImage(&mask); } else { res = bin; } IplConvKernel *element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL); cvMorphologyEx(res, res, temp, element, CV_MOP_OPEN, 1); cvReleaseStructuringElement(&element); radius = 9; cols = radius * 2 + 1; rows = cols; element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL); cvMorphologyEx(res, res, temp, element, CV_MOP_CLOSE, 1); cvReleaseStructuringElement(&element); radius = 7; cols = radius * 2 + 1; rows = cols; element = cvCreateStructuringElementEx(cols, rows, radius, radius, CV_SHAPE_ELLIPSE, NULL); cvErode(res, res, element, 1); cvDilate(res, res, element, 1); contoursDrawBorder(res); cvReleaseStructuringElement(&element); cvReleaseImage(&temp); return res; }
int main(int argc, char** argv) { IplImage* frame = cvLoadImage(argv[1],CV_LOAD_IMAGE_UNCHANGED); CvSize size = cvSize(frame->width,frame->height); IplImage* hsv_frame = cvCreateImage(size, IPL_DEPTH_8U, 3); IplImage* thresholded = cvCreateImage(size, IPL_DEPTH_8U,1); IplImage* thresholded2 = cvCreateImage(size, IPL_DEPTH_8U, 1); CvScalar hsv_min = cvScalar(0, 80, 220, 0); CvScalar hsv_max = cvScalar(50, 140, 256, 0); CvScalar hsv_min2 = cvScalar(170, 80, 220, 0); CvScalar hsv_max2 = cvScalar(256, 140, 256, 0); cvNamedWindow("Original", CV_WINDOW_AUTOSIZE); cvNamedWindow("HSV", CV_WINDOW_AUTOSIZE); int p[3]; p[0] = CV_IMWRITE_JPEG_QUALITY; p[1] = 95; p[2] = 0; cvCvtColor(frame, hsv_frame, CV_BGR2HSV); // to handle color wrap-around, two halves are detected and combined cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded); cvInRangeS(hsv_frame, hsv_min2, hsv_max2, thresholded2); cvOr(thresholded, thresholded2, thresholded, 0); //cvSaveImage("thresholded.jpg",thresholded,p); // hough detector works better with some smoothing of the image cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9, 0, 0); //cvSaveImage("frame.jpg", frame, p); cvShowImage("Original",thresholded); cvShowImage("HSV", hsv_frame); cvWaitKey(0); cvDestroyAllWindows(); cvReleaseImage(&frame); return 0; }
void reShadowing(FrameObject *frame, IplImage *background) { IplImage *source = frame->getFrame(); CvSize size = cvGetSize(source); IplImage *ghostMask = cvCreateImage(size,8,1); cvZero(ghostMask); list<DetectedObject*>::iterator it; list<DetectedObject*> det; det = frame->getDetectedObject(); for(it=det.begin(); it != det.end(); ++it){ if((*it)->isGhost){ cvOr(ghostMask,(*it)->totalMask,ghostMask); } } int channel = source->nChannels; uchar* dataSrc = (uchar *)source->imageData; int stepSrc = source->widthStep/sizeof(uchar); uchar* dataBkg= (uchar *)background->imageData; int stepBkg = background->widthStep/sizeof(uchar); uchar* dataGhost = (uchar *)ghostMask->imageData; int stepGhost = ghostMask->widthStep/sizeof(uchar); int i,j,k; for(i=0; i<source->height;i++){ for(j=0; j<source->width;j++){ if((float)dataGhost[i*stepGhost+j] == 255){ for(k=0;k<channel;k++) dataBkg[i*stepBkg+j*channel+k]=dataSrc[i*stepSrc+j*channel+k]; } } } cvReleaseImage(&ghostMask); cvReleaseImage(&source); }
/************************************** * Definition: Retrieves new images from the camera, thresholds them, * processes them finding their squares, and updates the * 3 open windows * **************************************/ void Camera::update() { // release the old thresholded images if (_pinkThresholded != NULL) { cvReleaseImage(&_pinkThresholded); } if (_yellowThresholded != NULL) { cvReleaseImage(&_yellowThresholded); } // get a red and pink thresholded image and or them together to // have an improved pink thresholded image IplImage *redThresholded = getThresholdedImage(RED_LOW, RED_HIGH); while (redThresholded == NULL) { redThresholded = getThresholdedImage(RED_LOW, RED_HIGH); } _pinkThresholded = getThresholdedImage(PINK_LOW, PINK_HIGH); while (_pinkThresholded == NULL) { _pinkThresholded = getThresholdedImage(PINK_LOW, PINK_HIGH); } cvOr(_pinkThresholded, redThresholded, _pinkThresholded); // get a yellow thresholded image _yellowThresholded = getThresholdedImage(YELLOW_LOW, YELLOW_HIGH); while (_yellowThresholded == NULL) { _yellowThresholded = getThresholdedImage(YELLOW_LOW, YELLOW_HIGH); } // smooth both thresholded images to create more solid, blobby contours cvSmooth(_pinkThresholded, _pinkThresholded, CV_BLUR_NO_SCALE); cvSmooth(_yellowThresholded, _yellowThresholded, CV_BLUR_NO_SCALE); // find all squares of a given color in each thresholded image _pinkSquares = findSquaresOf(COLOR_PINK, DEFAULT_SQUARE_SIZE); _yellowSquares = findSquaresOf(COLOR_YELLOW, DEFAULT_SQUARE_SIZE); // show the pink thresholded image so we can see what it sees cvShowImage("Thresholded", _pinkThresholded); // update all open windows cvWaitKey(10); }
//-------------------------------------------------------------- void DepthHoleFiller::fillHolesUsingHistory(ofxCvGrayscaleImage &ofxCv8uC1_Depth){ for (int i=1; i<nDepthHistory; i++){ // get all currently non-valid pixels. // note that ofxCv8uC1_Depth gets more and more filled with each iteration. bool bInvert = true; int FIRST_VALID_VALUE = 1; ofxCv8uC1_DepthInvalid = ofxCv8uC1_Depth; ofxCv8uC1_DepthInvalid.threshold(FIRST_VALID_VALUE, bInvert); // And them with the previous frame, to extract // those pixels from the previous frame which are invalid in the current one cvAnd(ofxCv8uC1_DepthInvalid.getCvImage(), ofxCv8uC1_DepthHistory[i].getCvImage(), ofxCv8uC1_DepthInvalid.getCvImage(), NULL); // Add them in together cvOr (ofxCv8uC1_Depth.getCvImage(), ofxCv8uC1_DepthInvalid.getCvImage(), ofxCv8uC1_Depth.getCvImage(), NULL); } }
int main(int argc, char **argv) { if (argc != 3) { help(); return 0; } char *image1name = argv[1]; char *image2name = argv[2]; //create two windows cvNamedWindow(WINDOW1NAME, CV_WINDOW_NORMAL); cvNamedWindow(WINDOW2NAME, CV_WINDOW_NORMAL); //read two images with gray pGrayImage1 = cvLoadImage(image1name, CV_LOAD_IMAGE_GRAYSCALE); pGrayImage2 = cvLoadImage(image2name, CV_LOAD_IMAGE_GRAYSCALE); //show two gray images cvShowImage(WINDOW1NAME, pGrayImage1); cvShowImage(WINDOW2NAME, pGrayImage2); cvWaitKey(0); //show image1 and with a track slider cvNamedWindow(WINDOW1BINARYNAME, CV_WINDOW_NORMAL); cvNamedWindow(WINDOW2BINARYNAME, CV_WINDOW_NORMAL); pBinaryImage1 = cvCreateImage(cvGetSize(pGrayImage1), IPL_DEPTH_8U, 1); pBinaryImage2 = cvCreateImage(cvGetSize(pGrayImage2), IPL_DEPTH_8U, 1); int threshold = 0; cvCreateTrackbar(TRACKNAME, WINDOW1BINARYNAME, &threshold, 254, on_trackbar); on_trackbar(1); int c = cvWaitKey(0); printf("%c, %d, %x, %x, %x, %x\n", c, c, c, 'a', 'o', 'x'); IplImage * pImageXor = cvCreateImage(cvGetSize(pGrayImage1), IPL_DEPTH_8U, 1); if (c == 0x100078) { cvXor(pBinaryImage1, pBinaryImage2, pImageXor, NULL); cvNamedWindow(WINDOWXORNAME, CV_WINDOW_NORMAL); cvShowImage(WINDOWXORNAME, pImageXor); } else if (c == 0x100061) { cvAnd(pBinaryImage1, pBinaryImage2, pImageXor, NULL); cvNamedWindow(WINDOWANDNAME, CV_WINDOW_NORMAL); cvShowImage(WINDOWANDNAME, pImageXor); } else if (c == 0x10006f) { cvOr(pBinaryImage1, pBinaryImage2, pImageXor, NULL); cvNamedWindow(WINDOWORNAME, CV_WINDOW_NORMAL); cvShowImage(WINDOWORNAME, pImageXor); } else { goto err_input; } int s = cvWaitKey(0); char* filename = NULL; if (s == 0x100073) { if (c == 0x100078) { filename = "xor.jpg"; } else if (c == 0x10006f) { filename = "or.jpg"; } else if (c == 0x100061) { filename = "and.jpg"; } cvSaveImage(filename, pImageXor, 0); } //xor two images /*cvOr(pGrayImage1, pGrayImage2, pImageXor, NULL); cvNamedWindow("xor", CV_WINDOW_NORMAL); cvShowImage("xor", pImageXor); cvWaitKey(0); cvReleaseImage(&pImageXor); cvDestroyWindow("xor");*/ err_input: cvReleaseImage(&pImageXor); cvReleaseImage(&pGrayImage1); cvReleaseImage(&pGrayImage2); cvReleaseImage(&pBinaryImage1); cvReleaseImage(&pBinaryImage2); cvDestroyWindow(WINDOW1NAME); cvDestroyWindow(WINDOW2NAME); cvDestroyWindow(WINDOW1BINARYNAME); cvDestroyWindow(WINDOW2BINARYNAME); return 0; }
static GstFlowReturn gst_gcs_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) { GstGcs *gcs = GST_GCS (btrans); GST_GCS_LOCK (gcs); ////////////////////////////////////////////////////////////////////////////// // get image data from the input, which is RGBA or BGRA gcs->pImageRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf); cvSplit(gcs->pImageRGBA, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChX ); cvCvtColor(gcs->pImageRGBA, gcs->pImgRGB, CV_BGRA2BGR); ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////MOTION CUES INTEGR//// ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // apply step 1. filtering using bilateral filter. Cannot happen in-place => scratch cvSmooth(gcs->pImgRGB, gcs->pImgScratch, CV_BILATERAL, 3, 50, 3, 0); // create GRAY image cvCvtColor(gcs->pImgScratch, gcs->pImgGRAY, CV_BGR2GRAY); // Frame difference the GRAY and the previous one // not intuitive: first smooth frames, then cvCopy( gcs->pImgGRAY, gcs->pImgGRAY_copy, NULL); cvCopy( gcs->pImgGRAY_1, gcs->pImgGRAY_1copy, NULL); get_frame_difference( gcs->pImgGRAY_copy, gcs->pImgGRAY_1copy, gcs->pImgGRAY_diff); cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3); cvDilate( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3); ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // ghost mapping gcs->dstTri[0].x = gcs->facepos.x - gcs->facepos.width/2 ; gcs->dstTri[0].y = gcs->facepos.y - gcs->facepos.height/2; gcs->dstTri[1].x = gcs->facepos.x - gcs->facepos.width/2; gcs->dstTri[1].y = gcs->facepos.y + gcs->facepos.height/2; gcs->dstTri[2].x = gcs->facepos.x + gcs->facepos.width/2; gcs->dstTri[2].y = gcs->facepos.y + gcs->facepos.height/2; if( gcs->ghostfilename){ cvGetAffineTransform( gcs->srcTri, gcs->dstTri, gcs->warp_mat ); cvWarpAffine( gcs->cvGhostBwResized, gcs->cvGhostBwAffined, gcs->warp_mat ); } ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // GrabCut algorithm preparation and running gcs->facepos.x = gcs->facepos.x - gcs->facepos.width/2; gcs->facepos.y = gcs->facepos.y - gcs->facepos.height/2; // create an IplImage with the skin colour pixels as 255 compose_skin_matrix(gcs->pImgRGB, gcs->pImg_skin); // And the skin pixels with the movement mask cvAnd( gcs->pImg_skin, gcs->pImgGRAY_diff, gcs->pImgGRAY_diff); //cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5, 5, 3, 3, CV_SHAPE_RECT,NULL), 1); cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 2); cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 2); // if there is alpha==all 1's coming in, then we ignore it: prevents from no vibe before us if((0.75*(gcs->width * gcs->height) <= cvCountNonZero(gcs->pImgChX))) cvZero(gcs->pImgChX); // OR the input Alpha cvOr( gcs->pImgChX, gcs->pImgGRAY_diff, gcs->pImgGRAY_diff); ////////////////////////////////////////////////////////////////////////////// // try to consolidate a single mask from all the sub-patches cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 3); cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 4); ////////////////////////////////////////////////////////////////////////////// // use either Ghost or boxes-model to create a PR foreground starting point in gcs->grabcut_mask if( gcs->ghostfilename) compose_grabcut_seedmatrix3(gcs->grabcut_mask, gcs->cvGhostBwAffined, gcs->pImgGRAY_diff ); else{ // toss it all to the bbox creation function, together with the face position and size compose_grabcut_seedmatrix2(gcs->grabcut_mask, gcs->facepos, gcs->pImgGRAY_diff, gcs->facefound ); } ////////////////////////////////////////////////////////////////////////////// #ifdef KMEANS gcs->num_clusters = 18; // keep it even to simplify integer arithmetics cvCopy(gcs->pImgRGB, gcs->pImgRGB_kmeans, NULL); posterize_image(gcs->pImgRGB_kmeans); create_kmeans_clusters(gcs->pImgRGB_kmeans, gcs->kmeans_points, gcs->kmeans_clusters, gcs->num_clusters, gcs->num_samples); adjust_bodybbox_w_clusters(gcs->grabcut_mask, gcs->pImgRGB_kmeans, gcs->num_clusters, gcs->facepos); #endif //KMEANS ////////////////////////////////////////////////////////////////////////////// if( gcs->debug < 70) run_graphcut_iteration( &(gcs->GC), gcs->pImgRGB, gcs->grabcut_mask, &gcs->bbox_prev); // get a copy of GRAY for the next iteration cvCopy(gcs->pImgGRAY, gcs->pImgGRAY_1, NULL); ////////////////////////////////////////////////////////////////////////////// // if we want to display, just overwrite the output if( gcs->display ){ int outputimage = gcs->debug; switch( outputimage ){ case 1: // output the GRAY difference cvCvtColor( gcs->pImgGRAY_diff, gcs->pImgRGB, CV_GRAY2BGR ); break; case 50:// Ghost remapped cvCvtColor( gcs->cvGhostBwAffined, gcs->pImgRGB, CV_GRAY2BGR ); break; case 51:// Ghost applied cvAnd( gcs->cvGhostBwAffined, gcs->pImgGRAY, gcs->pImgGRAY, NULL ); cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR ); break; case 60:// Graphcut cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL); // get only FG cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 127.0); cvCvtColor( gcs->grabcut_mask, gcs->pImgRGB, CV_GRAY2BGR ); break; case 61:// Graphcut applied on input/output image cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL); // get only FG, PR_FG cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0); cvAnd( gcs->grabcut_mask, gcs->pImgGRAY, gcs->pImgGRAY, NULL); cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR ); cvRectangle(gcs->pImgRGB, cvPoint(gcs->bbox_now.x, gcs->bbox_now.y), cvPoint(gcs->bbox_now.x + gcs->bbox_now.width, gcs->bbox_now.y+gcs->bbox_now.height), cvScalar(127,0.0), 1, 8, 0 ); break; case 70:// bboxes cvZero( gcs->pImgGRAY ); cvMul( gcs->grabcut_mask, gcs->grabcut_mask, gcs->pImgGRAY, 40.0 ); cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR ); break; case 71:// bboxes applied on the original image cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL); // get only FG, PR_FG cvMul( gcs->grabcut_mask, gcs->pImgGRAY, gcs->pImgGRAY, 1.0 ); cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR ); break; case 72: // input alpha channel mapped to output cvCvtColor( gcs->pImgChX, gcs->pImgRGB, CV_GRAY2BGR ); break; #ifdef KMEANS case 80:// k-means output cvCopy(gcs->pImgRGB_kmeans, gcs->pImgRGB, NULL); break; case 81:// k-means output filtered with bbox/ghost mask cvSplit(gcs->pImgRGB_kmeans, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL ); cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL); // get FG and PR_FG cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0); // scale any to 255. cvAnd( gcs->grabcut_mask, gcs->pImgCh1, gcs->pImgCh1, NULL ); cvAnd( gcs->grabcut_mask, gcs->pImgCh2, gcs->pImgCh2, NULL ); cvAnd( gcs->grabcut_mask, gcs->pImgCh3, gcs->pImgCh3, NULL ); cvMerge( gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL, gcs->pImgRGB); break; #endif //KMEANS default: break; } } ////////////////////////////////////////////////////////////////////////////// // copy anyhow the fg/bg to the alpha channel in the output image alpha ch cvSplit(gcs->pImgRGB, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL ); cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL); // get only FG and possible FG cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0); gcs->pImgChA->imageData = (char*)gcs->grabcut_mask->data.ptr; cvMerge( gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChA, gcs->pImageRGBA); gcs->numframes++; GST_GCS_UNLOCK (gcs); return GST_FLOW_OK; }
void THISCLASS::OnStep() { IplImage *inputimage = mCore->mDataStructureImageColor.mImage; if (! inputimage) { AddError(wxT("Cannot access input image.")); return; } if (inputimage->nChannels != 3) { AddError(wxT("Input must be a color image (3 channels).")); } //Create the images needed for the work if necessary for (int i = 0; i < 3; i++) { if (!tmpImage[i]) tmpImage[i] = cvCreateImage(cvGetSize(inputimage), 8, 1); } //Do the thresholding //We compute the average value on the three channels try { PrepareOutputImage(inputimage); cvSplit(inputimage, tmpImage[0], tmpImage[1], tmpImage[2], NULL); for (int i = 0; i < 3; i++) { switch (inputimage->channelSeq[i]) { case 'B': if (mInvertThreshold) { cvThreshold(tmpImage[i], tmpImage[i], mBlueThreshold, 255, CV_THRESH_BINARY_INV); } else { cvThreshold(tmpImage[i], tmpImage[i], mBlueThreshold, 255, CV_THRESH_BINARY); } break; case 'G': if (mInvertThreshold) { cvThreshold(tmpImage[i], tmpImage[i], mGreenThreshold, 255, CV_THRESH_BINARY_INV); } else { cvThreshold(tmpImage[i], tmpImage[i], mGreenThreshold, 255, CV_THRESH_BINARY); } break; case 'R': if (mInvertThreshold) { cvThreshold(tmpImage[i], tmpImage[i], mRedThreshold, 255, CV_THRESH_BINARY_INV); } else { cvThreshold(tmpImage[i], tmpImage[i], mRedThreshold, 255, CV_THRESH_BINARY); } break; default: AddError(wxT("Only Blue, Green and Red channels are accepted for this thresholding method.")); return; } } if (mOrBool) { cvOr(tmpImage[0], tmpImage[1], tmpImage[0]); cvOr(tmpImage[0], tmpImage[2], mOutputImage); } else { cvAnd(tmpImage[0], tmpImage[1], tmpImage[0]); cvAnd(tmpImage[0], tmpImage[2], mOutputImage); } mCore->mDataStructureImageBinary.mImage = mOutputImage; } catch (...) { AddError(wxT("Thresholding failed.")); return; } // Set the display DisplayEditor de(&mDisplayOutput); if (de.IsActive()) { de.SetMainImage(mOutputImage); } }
/* Main tracking function - gets called by MT_TrackerFrameBase every * time step when the application is not paused. */ void DanceTracker::doTracking(IplImage* frame) { /* time-keeping, if necessary * NOTE this is not necessary for keeping track of frame rate */ static double t_prev = MT_getTimeSec(); double t_now = MT_getTimeSec(); m_dDt = t_now - t_prev; t_prev = t_now; /* keeping track of the frame number, if necessary */ m_iFrameCounter++; /* This checks every time step to see if the UKF parameters have changed and modifies the UKF structures accordingly. This will also get called the first time through b/c the "Prev" values get set to zero initially. There may be a more efficient way to do this, but because the values need to be embedded into the CvMat objects I'm not sure how else to do it. */ if( m_dSigmaPosition != m_dPrevSigmaPosition || m_dSigmaSpeed != m_dPrevSigmaSpeed || m_dSigmaPositionMeas != m_dPrevSigmaPositionMeas ) { /* these are the diagonal entries of the "Q" matrix, which represents the variances of the process noise. They're modeled here as being independent and uncorrellated. */ cvSetReal2D(m_pQ, 0, 0, m_dSigmaPosition*m_dSigmaPosition); cvSetReal2D(m_pQ, 1, 1, m_dSigmaPosition*m_dSigmaPosition); cvSetReal2D(m_pQ, 2, 2, m_dSigmaHeading*m_dSigmaSpeed); cvSetReal2D(m_pQ, 3, 3, m_dSigmaSpeed*m_dSigmaSpeed); /* these are the diagonal entries of the "R matrix, also assumed to be uncorrellated. */ cvSetReal2D(m_pR, 0, 0, m_dSigmaPositionMeas*m_dSigmaPositionMeas); cvSetReal2D(m_pR, 1, 1, m_dSigmaPositionMeas*m_dSigmaPositionMeas); /* this step actually copies the Q and R matrices to the UKF and makes sure that it's internals are properly initialized - it's set up to handle the fact that the sizes of these matrices could have changed. */ for(unsigned int i = 0; i < m_iNObj; i++) { MT_UKFCopyQR(m_vpUKF[i], m_pQ, m_pR); } } HSVSplit(frame); cvThreshold(m_pHFrame, m_pThreshFrame, m_iHThresh_Low, 255, CV_THRESH_BINARY); cvThreshold(m_pHFrame, m_pTempFrame1, m_iHThresh_High, 255, CV_THRESH_BINARY_INV); cvAnd(m_pThreshFrame, m_pTempFrame1, m_pThreshFrame); cvThreshold(m_pSFrame, m_pTempFrame1, m_iSThresh_Low, 255, CV_THRESH_BINARY); cvThreshold(m_pSFrame, m_pTempFrame2, m_iSThresh_High, 255, CV_THRESH_BINARY_INV); cvAnd(m_pTempFrame1, m_pTempFrame2, m_pTempFrame1); cvAnd(m_pThreshFrame, m_pTempFrame1, m_pThreshFrame); cvThreshold(m_pVFrame, m_pTempFrame1, m_iVThresh_Low, 255, CV_THRESH_BINARY); cvThreshold(m_pVFrame, m_pTempFrame2, m_iVThresh_High, 255, CV_THRESH_BINARY_INV); cvAnd(m_pTempFrame1, m_pTempFrame2, m_pTempFrame1); cvAnd(m_pThreshFrame, m_pTempFrame1, m_pTempFrame1); cvSub(BG_frame, m_pVFrame, m_pTempFrame2); cvThreshold(m_pTempFrame2, m_pTempFrame2, m_iBGThresh, 255, CV_THRESH_BINARY); cvOr(m_pTempFrame1, m_pTempFrame2, m_pThreshFrame); cvSmooth(m_pThreshFrame, m_pThreshFrame, CV_MEDIAN, 3); if(ROI_frame) { cvAnd(m_pThreshFrame, ROI_frame, m_pThreshFrame); } /* std::vector<YABlob> yblobs = m_YABlobber.FindBlobs(m_pThreshFrame, 5, m_iBlobAreaThreshLow, NO_MAX, m_iBlobAreaThreshHigh); int ny = yblobs.size(); m_vdBlobs_X.resize(ny); m_vdBlobs_Y.resize(ny); m_vdBlobs_Orientation.resize(ny); for(unsigned int i = 0; i < yblobs.size(); i++) { m_vdBlobs_X[i] = yblobs[i].COMx; m_vdBlobs_Y[i] = yblobs[i].COMy; m_vdBlobs_Orientation[i] = 0; }*/ m_vbNoMeasurement.assign(m_iNObj, false); Dance_Segmenter segmenter(this); segmenter.setDebugFile(stdout); segmenter.m_iMinBlobPerimeter = 1; segmenter.m_iMinBlobArea = m_iBlobAreaThreshLow; segmenter.m_iMaxBlobArea = m_iBlobAreaThreshHigh; segmenter.m_dOverlapFactor = m_dOverlapFactor; if(m_iFrameCounter <= 1) { std::ifstream in_file; in_file.open("initials.dat"); double x, y; m_vBlobs.resize(0); m_vBlobs = MT_readDSGYABlobsFromFile("initials.dat"); m_vInitBlobs.resize(0); m_viAssignments.resize(0); /* m_vBlobs = segmenter.segmentFirstFrame(m_pThreshFrame, m_iNObj); */ } else { MT_writeDSGYABlobsToFile(m_vBlobs, "blobs-in.dat"); MT_writeDSGYABlobsToFile(m_vPredictedBlobs, "predicted-in.dat"); bool use_prediction = true; m_vBlobs = segmenter.doSegmentation(m_pThreshFrame, use_prediction ? m_vPredictedBlobs : m_vBlobs); m_viAssignments = segmenter.getAssignmentVector(&m_iAssignmentRows, &m_iAssignmentCols); m_vInitBlobs = segmenter.getInitialBlobs(); } /* prediction is done below - this makes sure the predicted blobs are OK no matter what */ m_vPredictedBlobs = m_vBlobs; unsigned int sc = 0; bool same_frame = false; for(unsigned int i = 0; i < m_vBlobs.size(); i++) { if(m_vdBlobs_X[i] == m_vBlobs[i].m_dXCenter) { sc++; } m_vdBlobs_X[i] = m_vBlobs[i].m_dXCenter; m_vdBlobs_Y[i] = m_vBlobs[i].m_dYCenter; m_vdBlobs_Orientation[i] = m_vBlobs[i].m_dOrientation; } same_frame = (sc >= m_iNObj - 2); if(same_frame) { return; } /* Tracking / UKF / State Estimation * * Now that we've got the mapping of which measurement goes with * which object, we need to feed the measurements into the UKF in * order to obtain a state estimate. * * This is a loop over each object we're tracking. */ for(unsigned int i = 0; i< m_iNObj; i++) { /* we could throw out a measurement and use the blob state as an estimate for various reasons. On the first frame we want to set the initial state, so we flag the measurement as invalid */ bool invalid_meas = m_vbNoMeasurement[i]; bool need_state = m_iFrameCounter == 1; /* if any state is NaN, reset the UKF * This shouldn't happen anymore, but it's a decent safety * check. It could probably be omitted if we want to * optimize for speed... */ if(m_iFrameCounter > 1 && (!CvMatIsOk(m_vpUKF[i]->x) || !CvMatIsOk(m_vpUKF[i]->P))) { MT_UKFFree(&(m_vpUKF[i])); m_vpUKF[i] = MT_UKFInit(4, 2, 0.1); MT_UKFCopyQR(m_vpUKF[i], m_pQ, m_pR); need_state = true; } if(need_state) { cvSetReal2D(m_px0, 0, 0, m_vdBlobs_X[i]); cvSetReal2D(m_px0, 1, 0, m_vdBlobs_Y[i]); cvSetReal2D(m_px0, 2, 0, 0); cvSetReal2D(m_px0, 3, 0, 0); MT_UKFSetState(m_vpUKF[i], m_px0); } /* if we're going to accept this measurement */ if(!invalid_meas) { /* UKF prediction step, note we use function pointers to the fish_dynamics and fish_measurement functions defined above. The final parameter would be for the control input vector, which we don't use here so we pass a NULL pointer */ MT_UKFPredict(m_vpUKF[i], &dance_dynamics, &dance_measurement, NULL); /* finally, set the measurement vector z */ cvSetReal2D(m_pz, 0, 0, m_vdBlobs_X[i]); cvSetReal2D(m_pz, 1, 0, m_vdBlobs_Y[i]); MT_UKFSetMeasurement(m_vpUKF[i], m_pz); /* then do the UKF correction step, which accounts for the measurement */ MT_UKFCorrect(m_vpUKF[i]); } else { /* use the predicted state */ CvMat* xp = m_vpUKF[i]->x1; MT_UKFSetState(m_vpUKF[i], xp); } /* then constrain the state if necessary - see function * definition above */ constrain_state(m_vpUKF[i]->x, m_vpUKF[i]->x1, frame); /* grab the state estimate and store it in variables that will make it convenient to save it to a file. */ CvMat* x = m_vpUKF[i]->x; m_vdTracked_X[i] = cvGetReal2D(x, 0, 0); m_vdTracked_Y[i] = cvGetReal2D(x, 1, 0); m_vdTracked_Vx[i] = cvGetReal2D(x, 2, 0); m_vdTracked_Vy[i] = cvGetReal2D(x, 3, 0); /* take the tracked positions as the blob centers */ m_vBlobs[i].m_dXCenter = m_vdTracked_X[i]; m_vBlobs[i].m_dYCenter = m_vdTracked_Y[i]; /* predict blob locations */ CvMat* xp = m_vpUKF[i]->x1; m_vPredictedBlobs[i].m_dXCenter = cvGetReal2D(xp, 0, 0); m_vPredictedBlobs[i].m_dYCenter = cvGetReal2D(xp, 1, 0); /* If we wanted the predicted state, this would be how to get it */ /* CvMat* xp = m_vpUKF[i]->x1; */ } MT_writeDSGYABlobsToFile(m_vBlobs, "blobs-out.dat"); MT_writeDSGYABlobsToFile(m_vPredictedBlobs, "predicted-out.dat"); /* write data to file */ writeData(); }
int main(int argv, char **argc) { robot_if_t ri; int major, minor, x_dist_diff, square_count, prev_square_area_1 = 0, prev_square_area_2 = 0; IplImage *image = NULL, *hsv = NULL, *threshold_1 = NULL, *threshold_2 = NULL, *final_threshold = NULL; squares_t *squares, *biggest_1, *biggest_2, , *pair_square_1, *pair_square_2, *sq_idx; bool same_square; bool hasPair = 0; square_count = 0; // Make sure we have a valid command line argument if(argv <= 1) { printf("Usage: robot_test <address of robot>\n"); exit(-1); } ri_api_version(&major, &minor); printf("Robot API Test: API Version v%i.%i\n", major, minor); // Setup the robot with the address passed in if(ri_setup(&ri, argc[1], 0)) { printf("Failed to setup the robot!\n"); exit(-1); } // Setup the camera if(ri_cfg_camera(&ri, RI_CAMERA_DEFAULT_BRIGHTNESS, RI_CAMERA_DEFAULT_CONTRAST, 5, RI_CAMERA_RES_640, RI_CAMERA_QUALITY_LOW)) { printf("Failed to configure the camera!\n"); exit(-1); } // Create a window to display the output //cvNamedWindow("Rovio Camera", CV_WINDOW_AUTOSIZE); cvNamedWindow("Biggest Square", CV_WINDOW_AUTOSIZE); cvNamedWindow("Thresholded", CV_WINDOW_AUTOSIZE); // Create an image to store the image from the camera image = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3); // Create an image to store the HSV version in // We configured the camera for 640x480 above, so use that size here hsv = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3); // And an image for each thresholded version threshold_1 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1); threshold_2 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1); final_threshold = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1); // Move the head up to the middle position ri_move(&ri, RI_HEAD_MIDDLE, RI_FASTEST); // Action loop do { // Update the robot's sensor information if(ri_update(&ri) != RI_RESP_SUCCESS) { printf("Failed to update sensor information!\n"); continue; } // Get the current camera image and display it if(ri_get_image(&ri, image) != RI_RESP_SUCCESS) { printf("Unable to capture an image!\n"); continue; } //cvShowImage("Rovio Camera", image); // Convert the image from RGB to HSV cvCvtColor(image, hsv, CV_BGR2HSV); // Pick out the first range of pink color from the image cvInRangeS(hsv, RC_PINK_LOW_1, RC_PINK_HIGH_1, threshold_1); // Pick out the second range of pink color from the image cvInRangeS(hsv, RC_PINK_LOW_2, RC_PINK_HIGH_2, threshold_2); // compute the final threshold image by using cvOr cvOr(threshold_1, threshold_2, final_threshold, NULL); cvShowImage("Thresholded", final_threshold); // Find the squares in the image squares = ri_find_squares(final_threshold, RI_DEFAULT_SQUARE_SIZE); if( squares != NULL ) { printf("Sorting squares!\n"); sort_squares(squares); printf("Sort Complete!\n"); printAreas(squares); printf("Done printing"); //find biggest pair (if it exists) sq_idx = squares; while(sq_idx != NULL){ if(sq_idx->next == NULL) break; else if(isPair(sq_idx, sq_idx->next, 0.75)){ hasPair = 1; break; } sq_idx = sq_idx->next; } printf("Pair ID complete!\n"); if(hasPair){ printf("Pair found.\n"); //draw_green_X(sq_idx, image); //draw_green_X(sq_idx->next, image); biggest_1 = sq_idx; biggest_2 = sq_idx->next; } else { printf("Pair not found. Marking largest.\n"); draw_red_X(squares, image); //temporary: biggest_1 = squares; biggest_2 = squares; } hasPair = 0; } else { printf("No squares found.\n"); } hasPair = 0; if(biggest_1 != NULL){ draw_green_X(biggest_1, image); printf("Area 1 = %d", biggest_1->area); } //we only see the last pair of squares, go straight ahead until IR_Detect stops the robot if (square_count == 3){ ri_move(&ri, RI_MOVE_FORWARD, 1); if (ri_IR_Detected(&ri)) { square_count++; printf("Object detected, square_count = %d\n", square_count); } } //once the robot is at the intersection, rotate right until it detects a pair of squares else if(square_count == 4){ printf("Rotating\n"); if (biggest_1 != NULL && biggest_2 != NULL && (biggest_1->area - biggest_2->area) < 500){ square_count++; printf("New Path Found\n"); } ri_move(&ri, RI_TURN_RIGHT, 7); } else{ /* * If we only find a single usable largest square: * if square is on left of screen, turn right, strafe right * if square is on right of screen, turn left, strafe left */ if(biggest_1 != NULL && biggest_2 != NULL ) { draw_red_X(biggest_2, image); printf("\tArea 2 = %d\n", biggest_2->area); //get the difference in distance between the two biggest squares and the center vertical line x_dist_diff = get_square_diffence(biggest_1, biggest_2, image); get_diff_in_y(biggest_1, biggest_2); //when the camera can't detect a pair of squares, which means now the second biggest square //is much smaller than the first biggest square if ((biggest_1->area - biggest_2->area) > 500){ //if both squares are at the left side of the center line if (biggest_1->center.x < image->width/2 && biggest_2->center.x < image->width/2){ printf("rotate right at speed = 6\n"); ri_move(&ri, RI_TURN_RIGHT, 6); } //if both squares are at the right side of the center line else if (biggest_1->center.x > image->width/2 && biggest_2->center.x > image->width/2){ printf("rotate left at speed = 6\n"); ri_move(&ri, RI_TURN_LEFT, 6); } //if the center line is in the middle of the two biggest squares else if (biggest_1->center.x < image->width/2 && biggest_2->center.x > image->width/2 ){ printf("rotate right at speed = 2\n"); ri_move(&ri, RI_TURN_RIGHT, 2); } else{ printf("rotate left at speed = 2\n"); ri_move(&ri, RI_TURN_LEFT, 2); } } else{ //increment square_count whenever the robot pass by a pair of squares if (prev_square_area_1 != 0 && prev_square_area_2 != 0 && biggest_1->area < prev_square_area_1 && biggest_2->area < prev_square_area_2 ){ square_count++; printf("square count = %d\n", square_count); } //rotate to the left if (x_dist_diff < -40){ printf("rotate left at speed = 6\n"); ri_move(&ri, RI_TURN_LEFT, 6); } //rotate to the right else if (x_dist_diff > 40){ printf("rotate right at speed = 6\n"); ri_move(&ri, RI_TURN_RIGHT, 6); } prev_square_area_1 = biggest_1->area; prev_square_area_2 = biggest_2->area; } ri_move(&ri, RI_MOVE_FORWARD, 5); } //once the camera can't detect any squares, make the robot go backwards else if (biggest_1 == NULL && biggest_2 == NULL){ printf("Move Backwards\n"); ri_move(&ri, RI_MOVE_BACKWARD , 1); } } // display a straight vertical line draw_vertical_line(image); // Display the image with the drawing oon ito cvShowImage("Biggest Square", image); // Update the UI (10ms wait) cvWaitKey(10); // Release the square data while(squares != NULL) { sq_idx = squares->next; free(squares); squares = sq_idx; } biggest_1 = NULL; biggest_2 = NULL; // Move forward unless there's something in front of the robot /*if(!ri_IR_Detected(&ri)) ri_move(&ri, RI_MOVE_FORWARD, RI_SLOWEST);*/ //printf("Loop Complete\n"); //getc(stdin); } while(1); // Clean up (although we'll never get here...) //cvDestroyWindow("Rovio Camera"); cvDestroyWindow("Biggest Square"); cvDestroyWindow("Thresholded"); // Free the images cvReleaseImage(&threshold_1); cvReleaseImage(&threshold_2); cvReleaseImage(&final_threshold); cvReleaseImage(&hsv); cvReleaseImage(&image); return 0; }
std::list<Garbage*> GarbageRecognition::garbageList(IplImage * src, IplImage * model){ clock_t start = clock(); std::list<Garbage*> garbageList; std::vector<int> centroid(2); //~ utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS); //~ CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model); //gets a frame for setting image size CvSize srcSize = cvGetSize(src); CvRect srcRect = cvRect(0,0,srcSize.width,srcSize.height); //images for HSV conversion IplImage* hsv = cvCreateImage( srcSize, 8, 3 ); IplImage* h_plane = cvCreateImage( srcSize, 8, 1 ); IplImage* h_plane2 = cvCreateImage( srcSize, 8, 1 ); IplImage* h_planeV ;//= cvCreateImage( srcSize, 8, 1 ); IplImage* s_plane = cvCreateImage( srcSize, 8, 1 ); IplImage* v_plane = cvCreateImage( srcSize, 8, 1 ); //Image for filtering IplImage * andImage=cvCreateImage(srcSize,8,1); IplImage * andImageV=cvCreateImage(srcSize,8,1); //Image for thresholding IplImage * threshImage=cvCreateImage(srcSize,8,1); IplImage * threshImageV=cvCreateImage(srcSize,8,1); //image for Morphing operations(Dilate-erode) IplImage * morphImage=cvCreateImage(srcSize,8,1); IplImage * morphImageV=cvCreateImage(srcSize,8,1); //image for contour-finding operations IplImage * contourImage=cvCreateImage(srcSize,8,3); clock_t create=clock(); printf("Time elapsed create Image: %f\n", ((double)create - start) / CLOCKS_PER_SEC); int frameCounter=1; int cont_index=0; //convolution kernel for morph operations IplConvKernel* element; CvRect boundingRect; //contours CvSeq * contours; CvSeq * contoursCopy; //Main loop //convert image to hsv cvCvtColor( src, hsv, CV_BGR2HSV ); clock_t conv=clock(); printf("Time elapsed create Image- convert image: %f\n", ((double)conv - create) / CLOCKS_PER_SEC); cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 ); h_planeV=cvCloneImage(h_plane); h_plane2=cvCloneImage(h_plane); CvScalar vasosL1 = cvScalar (0, 0, 170); CvScalar vasosU1 = cvScalar (20, 255, 255); CvScalar vasosL = cvScalar (40, 0, 170); CvScalar vasosU = cvScalar (255, 255, 255); CvScalar colillasL = cvScalar (20, 60, 0); CvScalar colillasU = cvScalar (40, 255,255); clock_t inrange=clock(); //~ cvInRangeSalt( hsv,vasosL,vasosU, vasosL1, vasosU1,h_plane ); cvInRangeS( hsv, vasosL1, vasosU1, h_plane ); cvInRangeS( hsv, vasosL, vasosU, h_plane2 ); cvOr(h_plane,h_plane2,h_plane); printf("inRange %f\n", ((double)clock() - inrange) / CLOCKS_PER_SEC); cvInRangeS( hsv, colillasL,colillasU,h_planeV); cvShowImage("inrange vasos",h_plane); //~ cvShowImage("inrange colillas",h_planeV); //~ for(int x=0;x<srcSize.width;x++){ //~ for(int y=0;y<srcSize.height;y++){ //~ uchar * hue=&((uchar*) (h_plane->imageData+h_plane->widthStep*y))[x]; //~ uchar * hueV=&((uchar*) (h_planeV->imageData+h_plane->widthStep*y))[x]; //~ uchar * sat=&((uchar*) (s_plane->imageData+s_plane->widthStep*y))[x]; //~ uchar * val=&((uchar*) (v_plane->imageData+v_plane->widthStep*y))[x]; //~ if((*val>170) && (( (*hue)<20 || (*hue)>40) )) //~ *hue=255; //~ else //~ *hue=0; //filter for cigar filters //~ if((*hueV>20 && *hueV<40 && *sat>60)) //~ *hueV=255; //~ else //~ *hueV=0; //~ } //~ } clock_t color=clock(); printf("Time elapsed create Image - color filter: %f\n", ((double)color - conv) / CLOCKS_PER_SEC); //--first pipeline //apply morphologic operations element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE, CV_SHAPE_RECT, NULL); cvDilate(h_plane,morphImage,element,MORPH_DILATE_ITER); cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER); cvThreshold(morphImage,threshImage,100,255,CV_THRESH_BINARY); clock_t pipe1=clock(); printf("Time elapsed color filter - first pipeline: %f\n", ((double)pipe1 - color) / CLOCKS_PER_SEC); //-- end first pipeline //-- start 2nd pipeline----- cvAnd(h_planeV, v_plane, andImageV); //apply morphologic operations cvDilate(andImageV,morphImageV,element,MORPH_DILATE_ITER); cvErode(morphImageV,morphImageV,element,MORPH_ERODE_ITER); cvThreshold(morphImageV,threshImageV,100,255,CV_THRESH_BINARY); //--end second pipeline-- clock_t pipe2=clock(); printf("Time elapsed first pipeline - second pipeline: %f\n", ((double)pipe2 - pipe1) / CLOCKS_PER_SEC); //get all contours contours=myFindContours(threshImage); contoursCopy=contours; cont_index=0; //image to write contours on cvCopy(src,contourImage,0); //contours for dishes and glasses while(contours!=NULL){ CvSeq * aContour=getPolygon(contours); utils::Contours * ct; if(this->window==NULL) ct = new Contours(aContour); else ct = new Contours(aContour,this->window->window); //apply filters for vasos if( ct->perimeterFilter(100,10000) && ct->areaFilter(1000,100000) && ct->vasoFilter() ){ //get contour bounding box boundingRect=cvBoundingRect(ct->getContour(),0); cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y), cvPoint(boundingRect.x+boundingRect.width, boundingRect.y+boundingRect.height), _GREEN,1,8,0); //if passed filters ct->printContour(3,cvScalar(127,127,0,0), contourImage); centroid=ct->getCentroid(); //build garbage List utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x, boundingRect.y,boundingRect.width,boundingRect.height); utils::Garbage * aGarbage = new utils::Garbage(r,centroid,ct); //benchmark purposes aGarbage->isVisualized=true; aGarbage->isPredicted=false; aGarbage->isFocused=false; garbageList.push_back(aGarbage); }else if( ct->perimeterFilter(100,10000) && ct->areaFilter(1000,100000) && ct->platoFilter() ){ //get contour bounding box boundingRect=cvBoundingRect(ct->getContour(),0); cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y), cvPoint(boundingRect.x+boundingRect.width, boundingRect.y+boundingRect.height), _GREEN,1,8,0); //if passed filters ct->printContour(3,cvScalar(127,127,0,0), contourImage); centroid=ct->getCentroid(); //build garbage List utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x, boundingRect.y,boundingRect.width,boundingRect.height); utils::Garbage * aGarbage = new utils::Garbage(r,centroid,ct); //benchmark purposes aGarbage->isVisualized=true; aGarbage->isPredicted=false; aGarbage->isFocused=false; garbageList.push_back(aGarbage); } //delete ct; cvReleaseMemStorage( &aContour->storage ); contours=contours->h_next; cont_index++; } clock_t vasoyplato=clock(); printf("Time elapsed first pipe2 - vasos y platos: %f\n", ((double)vasoyplato - pipe2) / CLOCKS_PER_SEC); //2nd pipeline //release temp images and data if(contoursCopy!=NULL) cvReleaseMemStorage( &contoursCopy->storage ); contours=myFindContours(threshImageV); contoursCopy=contours; cont_index=0; while(contours!=NULL){ CvSeq * aContour=getPolygon(contours); utils::Contours * ct; if(this->window==NULL) ct = new Contours(aContour); else ct = new Contours(aContour,this->window->window); //apply filters if( ct->perimeterFilter(10,800) && ct->areaFilter(50,800) && //ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO) && ct->boxAreaFilter(BOXFILTER_TOLERANCE) && //ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN)&& 1){ //get contour bounding box boundingRect=cvBoundingRect(ct->getContour(),0); cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y), cvPoint(boundingRect.x+boundingRect.width, boundingRect.y+boundingRect.height), _GREEN,1,8,0); //if passed filters ct->printContour(3,cvScalar(127,127,0,0), contourImage); centroid=ct->getCentroid(); //build garbage List utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x, boundingRect.y,boundingRect.width,boundingRect.height); utils::Garbage * aGarbage = new utils::Garbage(r,centroid,ct); //benchmark purposes aGarbage->isVisualized=true; aGarbage->isPredicted=false; aGarbage->isFocused=false; garbageList.push_back(aGarbage); } delete ct; cvReleaseMemStorage( &aContour->storage ); contours=contours->h_next; cont_index++; } clock_t colillas=clock(); printf("Time elapsed vasosyplatos - colillas: %f\n", ((double)colillas - vasoyplato) / CLOCKS_PER_SEC); //display found contours //~ cvShowImage("drawContours",contourImage); //release temp images and data if(contoursCopy!=NULL) cvReleaseMemStorage( &contoursCopy->storage ); cvReleaseStructuringElement(&element); cvReleaseImage(&threshImage); cvReleaseImage(&threshImageV); cvReleaseImage(&morphImage); cvReleaseImage(&morphImageV); cvReleaseImage(&contourImage); cvReleaseImage(&hsv); cvReleaseImage(&h_plane); cvReleaseImage(&h_planeV); cvReleaseImage(&s_plane); cvReleaseImage(&v_plane); cvReleaseImage(&andImageV); cvReleaseImage(&andImage); clock_t total=clock(); printf("total: %f\n", ((double)total - start) / CLOCKS_PER_SEC); return garbageList; }
IplImage * find_macbeth( const char *img ) { IplImage * macbeth_img = cvLoadImage( img, CV_LOAD_IMAGE_ANYCOLOR|CV_LOAD_IMAGE_ANYDEPTH ); IplImage * macbeth_original = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, macbeth_img->nChannels ); cvCopy(macbeth_img, macbeth_original); IplImage * macbeth_split[3]; IplImage * macbeth_split_thresh[3]; for(int i = 0; i < 3; i++) { macbeth_split[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 ); macbeth_split_thresh[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 ); } cvSplit(macbeth_img, macbeth_split[0], macbeth_split[1], macbeth_split[2], NULL); if( macbeth_img ) { int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C; int threshold_type = CV_THRESH_BINARY_INV; int block_size = cvRound( MIN(macbeth_img->width,macbeth_img->height)*0.02)|1; fprintf(stderr,"Using %d as block size\n", block_size); double offset = 6; // do an adaptive threshold on each channel for(int i = 0; i < 3; i++) { cvAdaptiveThreshold(macbeth_split[i], macbeth_split_thresh[i], 255, adaptive_method, threshold_type, block_size, offset); } IplImage * adaptive = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), IPL_DEPTH_8U, 1 ); // OR the binary threshold results together cvOr(macbeth_split_thresh[0],macbeth_split_thresh[1],adaptive); cvOr(macbeth_split_thresh[2],adaptive,adaptive); for(int i = 0; i < 3; i++) { cvReleaseImage( &(macbeth_split[i]) ); cvReleaseImage( &(macbeth_split_thresh[i]) ); } int element_size = (block_size/10)+2; fprintf(stderr,"Using %d as element size\n", element_size); // do an opening on the threshold image IplConvKernel * element = cvCreateStructuringElementEx(element_size,element_size,element_size/2,element_size/2,CV_SHAPE_RECT); cvMorphologyEx(adaptive,adaptive,NULL,element,CV_MOP_OPEN); cvReleaseStructuringElement(&element); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* initial_quads = cvCreateSeq( 0, sizeof(*initial_quads), sizeof(void*), storage ); CvSeq* initial_boxes = cvCreateSeq( 0, sizeof(*initial_boxes), sizeof(CvBox2D), storage ); // find contours in the threshold image CvSeq * contours = NULL; cvFindContours(adaptive,storage,&contours); int min_size = (macbeth_img->width*macbeth_img->height)/ (MACBETH_SQUARES*100); if(contours) { int count = 0; for( CvSeq* c = contours; c != NULL; c = c->h_next) { CvRect rect = ((CvContour*)c)->rect; // only interested in contours with these restrictions if(CV_IS_SEQ_HOLE(c) && rect.width*rect.height >= min_size) { // only interested in quad-like contours CvSeq * quad_contour = find_quad(c, storage, min_size); if(quad_contour) { cvSeqPush( initial_quads, &quad_contour ); count++; rect = ((CvContour*)quad_contour)->rect; CvScalar average = contour_average((CvContour*)quad_contour, macbeth_img); CvBox2D box = cvMinAreaRect2(quad_contour,storage); cvSeqPush( initial_boxes, &box ); // fprintf(stderr,"Center: %f %f\n", box.center.x, box.center.y); double min_distance = MAX_RGB_DISTANCE; CvPoint closest_color_idx = cvPoint(-1,-1); for(int y = 0; y < MACBETH_HEIGHT; y++) { for(int x = 0; x < MACBETH_WIDTH; x++) { double distance = euclidean_distance_lab(average,colorchecker_srgb[y][x]); if(distance < min_distance) { closest_color_idx.x = x; closest_color_idx.y = y; min_distance = distance; } } } CvScalar closest_color = colorchecker_srgb[closest_color_idx.y][closest_color_idx.x]; // fprintf(stderr,"Closest color: %f %f %f (%d %d)\n", // closest_color.val[2], // closest_color.val[1], // closest_color.val[0], // closest_color_idx.x, // closest_color_idx.y // ); // cvDrawContours( // macbeth_img, // quad_contour, // cvScalar(255,0,0), // cvScalar(0,0,255), // 0, // element_size // ); // cvCircle( // macbeth_img, // cvPointFrom32f(box.center), // element_size*6, // cvScalarAll(255), // -1 // ); // cvCircle( // macbeth_img, // cvPointFrom32f(box.center), // element_size*6, // closest_color, // -1 // ); // cvCircle( // macbeth_img, // cvPointFrom32f(box.center), // element_size*4, // average, // -1 // ); // CvRect rect = contained_rectangle(box); // cvRectangle( // macbeth_img, // cvPoint(rect.x,rect.y), // cvPoint(rect.x+rect.width, rect.y+rect.height), // cvScalarAll(0), // element_size // ); } } } ColorChecker found_colorchecker; fprintf(stderr,"%d initial quads found", initial_quads->total); if(count > MACBETH_SQUARES) { fprintf(stderr," (probably a Passport)\n"); CvMat* points = cvCreateMat( initial_quads->total , 1, CV_32FC2 ); CvMat* clusters = cvCreateMat( initial_quads->total , 1, CV_32SC1 ); CvSeq* partitioned_quads[2]; CvSeq* partitioned_boxes[2]; for(int i = 0; i < 2; i++) { partitioned_quads[i] = cvCreateSeq( 0, sizeof(**partitioned_quads), sizeof(void*), storage ); partitioned_boxes[i] = cvCreateSeq( 0, sizeof(**partitioned_boxes), sizeof(CvBox2D), storage ); } // set up the points sequence for cvKMeans2, using the box centers for(int i = 0; i < initial_quads->total; i++) { CvBox2D box = (*(CvBox2D*)cvGetSeqElem(initial_boxes, i)); cvSet1D(points, i, cvScalar(box.center.x,box.center.y)); } // partition into two clusters: passport and colorchecker cvKMeans2( points, 2, clusters, cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ) ); for(int i = 0; i < initial_quads->total; i++) { CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i]; int cluster_idx = clusters->data.i[i]; cvSeqPush( partitioned_quads[cluster_idx], cvGetSeqElem(initial_quads, i) ); cvSeqPush( partitioned_boxes[cluster_idx], cvGetSeqElem(initial_boxes, i) ); // cvCircle( // macbeth_img, // cvPointFrom32f(pt), // element_size*2, // cvScalar(255*cluster_idx,0,255-(255*cluster_idx)), // -1 // ); } ColorChecker partitioned_checkers[2]; // check each of the two partitioned sets for the best colorchecker for(int i = 0; i < 2; i++) { partitioned_checkers[i] = find_colorchecker(partitioned_quads[i], partitioned_boxes[i], storage, macbeth_img, macbeth_original); } // use the colorchecker with the lowest error found_colorchecker = partitioned_checkers[0].error < partitioned_checkers[1].error ? partitioned_checkers[0] : partitioned_checkers[1]; cvReleaseMat( &points ); cvReleaseMat( &clusters ); } else { // just one colorchecker to test fprintf(stderr,"\n"); found_colorchecker = find_colorchecker(initial_quads, initial_boxes, storage, macbeth_img, macbeth_original); } // render the found colorchecker draw_colorchecker(found_colorchecker.values,found_colorchecker.points,macbeth_img,found_colorchecker.size); // print out the colorchecker info for(int y = 0; y < MACBETH_HEIGHT; y++) { for(int x = 0; x < MACBETH_WIDTH; x++) { CvScalar this_value = cvGet2D(found_colorchecker.values,y,x); CvScalar this_point = cvGet2D(found_colorchecker.points,y,x); printf("%.0f,%.0f,%.0f,%.0f,%.0f\n", this_point.val[0],this_point.val[1], this_value.val[2],this_value.val[1],this_value.val[0]); } } printf("%0.f\n%f\n",found_colorchecker.size,found_colorchecker.error); } cvReleaseMemStorage( &storage ); if( macbeth_original ) cvReleaseImage( &macbeth_original ); if( adaptive ) cvReleaseImage( &adaptive ); return macbeth_img; } if( macbeth_img ) cvReleaseImage( &macbeth_img ); return NULL; }
IplImage* cvTestSeqQueryFrame(CvTestSeq* pTestSeq) { CvTestSeq_* pTS = (CvTestSeq_*)pTestSeq; CvTestSeqElem* p = pTS->pElemList; IplImage* pImg = pTS->pImg; IplImage* pImgAdd = cvCloneImage(pTS->pImg); IplImage* pImgAddG = cvCreateImage(cvSize(pImgAdd->width,pImgAdd->height),IPL_DEPTH_8U,1); IplImage* pImgMask = pTS->pImgMask; IplImage* pImgMaskAdd = cvCloneImage(pTS->pImgMask); CvMat* pT = cvCreateMat(2,3,CV_32F); if(pTS->CurFrame >= pTS->FrameNum) return NULL; cvZero(pImg); cvZero(pImgMask); for(p=pTS->pElemList; p; p=p->next) { int DirectCopy = FALSE; int frame = pTS->CurFrame - p->FrameBegin; //float t = p->FrameNum>1?((float)frame/(p->FrameNum-1)):0; CvTSTrans* pTrans = p->pTrans + frame%p->TransNum; assert(pTrans); if( p->FrameNum > 0 && (frame < 0 || frame >= p->FrameNum) ) { /* Current frame is out of range: */ //if(p->pAVI)cvReleaseCapture(&p->pAVI); p->pAVI = NULL; continue; } cvZero(pImgAdd); cvZero(pImgAddG); cvZero(pImgMaskAdd); if(p->noise_type == CV_NOISE_NONE) { /* For not noise: */ /* Get next frame: */ icvTestSeqQureyFrameElem(p, frame); if(p->pImg == NULL) continue; #if 1 /* transform using T filed in Trans */ { /* Calculate transform matrix: */ float W = (float)(pImgAdd->width-1); float H = (float)(pImgAdd->height-1); float W0 = (float)(p->pImg->width-1); float H0 = (float)(p->pImg->height-1); cvZero(pT); { /* Calcualte inverse matrix: */ CvMat mat = cvMat(2,3,CV_32F, pTrans->T); mat.width--; pT->width--; cvInvert(&mat, pT); pT->width++; } CV_MAT_ELEM(pT[0], float, 0, 2) = CV_MAT_ELEM(pT[0], float, 0, 0)*(W0/2-pTrans->T[2])+ CV_MAT_ELEM(pT[0], float, 0, 1)*(H0/2-pTrans->T[5]); CV_MAT_ELEM(pT[0], float, 1, 2) = CV_MAT_ELEM(pT[0], float, 1, 0)*(W0/2-pTrans->T[2])+ CV_MAT_ELEM(pT[0], float, 1, 1)*(H0/2-pTrans->T[5]); CV_MAT_ELEM(pT[0], float, 0, 0) *= W0/W; CV_MAT_ELEM(pT[0], float, 0, 1) *= H0/H; CV_MAT_ELEM(pT[0], float, 1, 0) *= W0/W; CV_MAT_ELEM(pT[0], float, 1, 1) *= H0/H; } /* Calculate transform matrix. */ #else { /* Calculate transform matrix: */ float SX = (float)(p->pImg->width-1)/((pImgAdd->width-1)*pTrans->Scale.x); float SY = (float)(p->pImg->height-1)/((pImgAdd->height-1)*pTrans->Scale.y); float DX = pTrans->Shift.x; float DY = pTrans->Shift.y;; cvZero(pT); ((float*)(pT->data.ptr+pT->step*0))[0]=SX; ((float*)(pT->data.ptr+pT->step*1))[1]=SY; ((float*)(pT->data.ptr+pT->step*0))[2]=SX*(pImgAdd->width-1)*(0.5f-DX); ((float*)(pT->data.ptr+pT->step*1))[2]=SY*(pImgAdd->height-1)*(0.5f-DY); } /* Calculate transform matrix. */ #endif { /* Check for direct copy: */ DirectCopy = TRUE; if( fabs(CV_MAT_ELEM(pT[0],float,0,0)-1) > 0.00001) DirectCopy = FALSE; if( fabs(CV_MAT_ELEM(pT[0],float,1,0)) > 0.00001) DirectCopy = FALSE; if( fabs(CV_MAT_ELEM(pT[0],float,0,1)) > 0.00001) DirectCopy = FALSE; if( fabs(CV_MAT_ELEM(pT[0],float,0,1)) > 0.00001) DirectCopy = FALSE; if( fabs(CV_MAT_ELEM(pT[0],float,0,2)-(pImg->width-1)*0.5) > 0.5) DirectCopy = FALSE; if( fabs(CV_MAT_ELEM(pT[0],float,1,2)-(pImg->height-1)*0.5) > 0.5) DirectCopy = FALSE; } /* Extract image and mask: */ if(p->pImg->nChannels == 1) { if(DirectCopy) { cvCvtColor( p->pImg,pImgAdd,CV_GRAY2BGR); } else { cvGetQuadrangleSubPix( p->pImg, pImgAddG, pT); cvCvtColor( pImgAddG,pImgAdd,CV_GRAY2BGR); } } if(p->pImg->nChannels == 3) { if(DirectCopy) cvCopy(p->pImg, pImgAdd); else cvGetQuadrangleSubPix( p->pImg, pImgAdd, pT); } if(p->pImgMask) { if(DirectCopy) cvCopy(p->pImgMask, pImgMaskAdd); else cvGetQuadrangleSubPix( p->pImgMask, pImgMaskAdd, pT); cvThreshold(pImgMaskAdd,pImgMaskAdd,128,255,CV_THRESH_BINARY); } if(pTrans->C != 1 || pTrans->I != 0) { /* Intensity transformation: */ cvScale(pImgAdd, pImgAdd, pTrans->C,pTrans->I); } /* Intensity transformation: */ if(pTrans->GN > 0) { /* Add noise: */ IplImage* pImgN = cvCloneImage(pImgAdd); cvRandSetRange( &p->rnd_state, pTrans->GN, 0, -1 ); cvRand(&p->rnd_state, pImgN); cvAdd(pImgN,pImgAdd,pImgAdd); cvReleaseImage(&pImgN); } /* Add noise. */ if(p->Mask) { /* Update only mask: */ cvOr(pImgMaskAdd, pImgMask, pImgMask); } else { /* Add image and mask to exist main image and mask: */ if(p->BG) { /* If image is background: */ cvCopy( pImgAdd, pImg, NULL); } else { /* If image is foreground: */ cvCopy( pImgAdd, pImg, pImgMaskAdd); if(p->ObjID>=0) cvOr(pImgMaskAdd, pImgMask, pImgMask); } } /* Not mask. */ } /* For not noise. */ else { /* Process noise video: */ if( p->noise_type == CV_NOISE_GAUSSIAN ||
//Funcion recibe el frame y la imagen del efecto que le deseo añadir para su fusion. // Tambien se recibe el escalamiento para que se encuentre redimensionada el efecto sobre el frame-video, y ademàs los desplazamientos de la imagen efecto void fusionImagenes(IplImage* frame, IplImage* img_effect,CvRect* r, double escalamiento, double despX,double despY){ IplImage *cloneFrame,*imgObj,*imgObj2,*mascara,*mascara2,*resize_imgEffect,*resize_mascara; //Creo mi mascara IplImage* mascara_imgEffect = crearMascaraBinarizada(img_effect); //cvShowImage("MI MASCARA",mascara_imgEffect); //Se clona la Imagen que captura la camara cloneFrame = cvCloneImage(frame); //Incializo imagenes base de mi imagen efectos imgObj = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//imagen para pegar el img_effect imgObj2 = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//img_effect pero trasladada //Pinto mi imagen base de color blanco cvSet(imgObj, cvScalar(255,255,255,0),NULL); cvSet(imgObj2, cvScalar(255,255,255,0),NULL); //Incializo imagenes base de mi macara mascara = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//imagen para pegar la Mascara mascara2 = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//mascara pero trasladada //Pinto mi imagen base de color negro cvSet(mascara, cvScalar(0,0,0,0),NULL); cvSet(mascara2, cvScalar(0,0,0,0),NULL); //Se define el nuevo tamaño int nuevoWidth = (int)(escalamiento*img_effect->width*r->width);//tamaño horizontal nuevo para el img_effect, tiene que ser proporcional al de r, por eso lo multiplico int nuevoHeight = (int)(escalamiento*img_effect->height*r->height);//tamaño vertical nuevo para el img_effect.. //printf("\neffect(%d,%d)-> %d,%d",img_effect->width,img_effect->height,r->width,r->height); if(nuevoWidth > frame->width) nuevoWidth = frame->width; // valida que el nuevo tamaño horizontal del objeto no sobrepase el del frame if(nuevoHeight > frame->height) nuevoHeight = frame->height; // valida que el nuevo tamaño vertical del objeto no sobrepase el del frame //Se redimensiona la imagen effecto con los nuevos valores para el width-heigth resize_imgEffect = cvCreateImage(cvSize(nuevoWidth,nuevoHeight),img_effect->depth,img_effect->nChannels);//Imagen con el nuevo tamaño cvResize(img_effect,resize_imgEffect,1); resize_mascara = cvCreateImage(cvSize(nuevoWidth,nuevoHeight),mascara_imgEffect->depth,mascara_imgEffect->nChannels);//Mascara con el nuevo tamaño cvResize(mascara_imgEffect,resize_mascara,1);//crea la mascara con el nuevo tamaño cvShowImage("imgObj-antes",resize_imgEffect);cvShowImage("mascara-antes",resize_mascara); //ROI de la imagen effecto cvSetImageROI(imgObj, cvRect(0,0,resize_imgEffect->width,resize_imgEffect->height));//crea un roi en el origen con el tamaño de la imagen efecto cvCopy(resize_imgEffect,imgObj);//copia el objeto cvResetImageROI(imgObj);//quita el roi cvShowImage("copy",imgObj); //ROI de la mascara cvSetImageROI(mascara, cvRect(0,0,resize_mascara->width,resize_mascara->height)); cvCopy(resize_mascara,mascara); cvResetImageROI(mascara); cvShowImage("MASK-ROI",mascara); //Se define los deplazamientos int dx = r->x+despX;//La posicion x del rectangulo que detecta la cara+desplazamiento basado en el ancho del rectangulo int dy = r->y+despY; //printf("\ndespx= %d , despy= %d ----- %d,%d",dx,dy,despX,despY); //cvShowImage("imgObj-antes",imgObj);cvShowImage("mascara-antes",mascara); //Matriz de transformacion, solo tomamos en cuenta dx,dy para la traslacion CvMat *M = cvCreateMat( 2, 3, CV_32FC1); cvmSet(M,0,0,1); // asignamos valor 1 al elemento (0,0) cvmSet(M,0,1,0); // asignamos valor 0 al elemento (0,1) cvmSet(M,1,0,0); // asignamos valor 0 al elemento (1,0) cvmSet(M,1,1,1); // asignamos valor 1 al elemento (1,1) cvmSet(M,0,2,dx); // el cuarto número indica los píxeles que se recorren en el eje x cvmSet(M,1,2,dy); cvWarpAffine (imgObj, imgObj2, M, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(255));// aplicamos la transformación para obtener la imagen trasladada cvWarpAffine (mascara,mascara2, M, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0));//lo mismo pero para la mascara //cvShowImage("imgObj-despues",imgObj2);cvShowImage("mascara-despues",mascara2); cvShowImage("imgObj-despues",imgObj2);cvShowImage("mascara-despues",mascara2); cvAnd(imgObj2,mascara2,imgObj2,0);//Se recorta el objeto usando la mascara cvShowImage("AND imgObj2-mascara2",imgObj2); cvNot(mascara2,mascara2); //Se crea la mascara inversa cvShowImage("NOT mascara2",mascara2); cvShowImage("CLoNE FRAME",cloneFrame); cvAnd(cloneFrame,mascara2,cloneFrame,0); //Se usa la macara inversa para quitar la parte en donde ira el objeto cvShowImage("AND cloneFrame-mascara2",cloneFrame); cvOr(cloneFrame,imgObj2,frame,0); // Se unen las dos imagenes con OR //Libero memoria de cada una de las imagenes /* cvReleaseImage(&cloneFrame); cvReleaseImage(&imgObj); cvReleaseImage(&imgObj2); cvReleaseImage(&mascara_imgEffect); cvReleaseImage(&mascara); cvReleaseImage(&mascara2); cvReleaseImage(&resize_imgEffect); cvReleaseImage(&resize_mascara); */ }
void Frame::or(Frame *B) { cvOr(image,B->image,image); }
void run() { int key; IplImage *mask = cvCreateImage( cvGetSize(img_gui), 8, 1 ); IplImage *display = cvCreateImage( cvGetSize(img_gui), 8, 3 ); IplImage *fgcolor = cvCloneImage( img_gui), *bgcolor = cvCloneImage( img_gui); cvSet( fgcolor, cvScalar( 255, 0, 0)), cvSet( bgcolor, cvScalar( 0, 255, 255)); // gui int swmin = _swmin; int swmax = std::max( std::max( img_gui->width, img_gui->height) / 8, 1); int swstep = std::max( std::min( img_gui->width, img_gui->height) / 100, 1); int swdefault = std::min( std::max( std::min( img_gui->width, img_gui->height) / 32, swmin), swmax); strokewidth = swdefault; RenderMsg( display); cvNamedWindow( "working space" ); cvNamedWindow( "trimap" ); cvShowImage( "working space" , display ); cvShowImage( "trimap" , usr); cvSetMouseCallback( "working space", DrawStroke); cvSetMouseCallback( "trimap", DrawStroke); while(1) { key = cvWaitKey(5); if(key=='q') break; else if(key=='w') { stroketype++; stroketype = stroketype % 3; printf("%d\n", stroketype); } else if( key == 'd') // decrease stroke width strokewidth = ( strokewidth - swstep < swmin) ? swmin : strokewidth - swstep; else if( key == 'a') // increase stroke width strokewidth = ( strokewidth + swstep > swmax) ? swmax : strokewidth + swstep; else if( key == 'u' && flashOnlyImg_gui!=NULL ) { T += T_step; FlashMatting::GenerateTrimap( flashOnlyImg_gui, usr, T); } else if( key == 'i' && flashOnlyImg_gui!=NULL ) { T -= T_step; FlashMatting::GenerateTrimap( flashOnlyImg_gui, usr, T); } // display cvCopy( img_gui, display ); cvCmpS( usr, _strokeColor[_strokebg], mask, CV_CMP_EQ); cvOr( img_gui, bgcolor, display, mask); cvCmpS( usr, _strokeColor[_strokefg], mask, CV_CMP_EQ); cvOr( img_gui, fgcolor, display, mask); cvConvertScale( display, display, 0.7); //cvCmpS( usr, _strokeColor[_strokeu], mask, CV_CMP_EQ); //cvCopy( img_gui, display, mask); RenderMsg( display); cvShowImage( "working space", display); cvShowImage( "trimap" , usr); } cvReleaseImage( &display ); cvDestroyAllWindows(); }
int process_frame(IplImage* frame, IplImage* h_plane, IplImage* v_plane, CvHistogram* hist_h, CvHistogram* hist_v, int Hthresh, int Vthresh) { IplImage *resultF; int c; if(!frame) return -1; resultF = cvCreateImage( cvGetSize(frame), frame->depth, frame->nChannels ); cvCopy(frame,resultF,NULL); cvShowImage("Display1", frame ); //cvShowImage("Display", frame ); //cvShowImage("Display", frame ); IplImage* result = cvCreateImage( cvGetSize(h_plane ), 8, 1 ); int width = frame->width; int height = frame->height; int h_bins = H_BINS, v_bins = V_BINS; int *h_s; h_s = malloc(sizeof(int)*h_bins); int *v_s; v_s = malloc(sizeof(int)*v_bins); int j; float pr = (float)Hthresh/100000; pr = pr*result->width*result->height; for(j = 0 ; j < h_bins ; j++) { if(cvQueryHistValue_1D(hist_h,j) < pr) { h_s[j] = 0;//is obstical } else { h_s[j] = H_MAX; } } pr = (float)Vthresh/1000; pr = pr*result->width*result->height; for(j = 0 ; j < v_bins ; j++) { if(cvQueryHistValue_1D(hist_v,j) < pr) { // printf("%f %f\n",cvQueryHistValue_1D(hist_v,j),pr); v_s[j] = 0; } else v_s[j] = V_MAX; // printf("%f %d %d\n",cvQueryHistValue_1D(hist_v,j),Vthresh,v_s[j]); } // getchar(); int i; CvScalar Black, White; Black.val[0] = 0; White.val[0] = 255; int p,q; cvCopy(v_plane, result,NULL); int h_bsize = H_MAX/H_BINS; int v_bsize = V_MAX/V_BINS; //printf("%d %d\n",h_bsize,v_bsize); for(i = 0 ; i < result->height ; i++) { for(j = 0 ; j < result->width ; j++) { CvScalar s; s = cvGet2D(h_plane ,i ,j); if(h_s[(int)s.val[0]/(h_bsize)] != 0 )//obsticals are white cvSet2D(h_plane,i,j,Black); else cvSet2D(h_plane,i,j,White); s = cvGet2D(v_plane ,i ,j); if(s.val[0] == 255) s.val[0] = 254; if(v_s[(int)s.val[0]/(v_bsize)] != 0) cvSet2D(v_plane,i,j,Black); else { cvSet2D(v_plane,i,j,White); } } } //for(i = 0; i < result->height; i++) //{ // for(j = 0; j < result->width; j++) // { // if(countArray[i/BLOCK_DIM][j/BLOCK_DIM] <= AMT_BLACK) // cvSet2D(result, i, j, Black); // else // cvSet2D(result, i, j, White); // } //} cvOr(v_plane,h_plane,result,NULL); cvShowImage("Result", result ); cvShowImage("HDisplay", h_plane ); cvShowImage("VDisplay", v_plane ); //UNCOMMENT FOR CONTOUR /* CvMemStorage* memStorage = cvCreateMemStorage(0); CvSeq* contours = 0; cvFindContours(result, memStorage, &contours,sizeof(CvContour),CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); CvSeq* co; for( co = contours; co != NULL; co = co->h_next) { cvDrawContours(resultF,co,cvScalarAll(0),cvScalarAll(0),-1,5,8,cvPoint(0,0)); } cvShowImage("FinalDisplay", resultF ); cvReleaseMemStorage(&memStorage); */ //comment stops here cvReleaseImage(&result ); cvReleaseImage(&resultF ); free(h_s); free(v_s); c = cvWaitKey(30); if( c == 'q' || c == 'Q' || (c & 100) == 27 ) return -1; return 0; }