IplImage* preprocess(IplImage* img){ //creates a Image with the contours in the picture CvMemStorage* g_storage = NULL; IplImage* gray; gray = cvCreateImage( cvGetSize( img ), 8, 1 ); //creates the immage, allocating memory for the pixel values g_storage = cvCreateMemStorage(0); cvClearMemStorage( g_storage ); CvSeq* contours = 0; cvCvtColor( img, gray, CV_BGR2GRAY ); cvThreshold( gray, gray, 100, 255, CV_THRESH_BINARY ); cvFindContours( gray, g_storage, &contours ); //find the contours with the thresholdimmage cvZero( gray ); if( contours ) { cvDrawContours(gray,contours,cvScalarAll(255),cvScalarAll(255),100 ); //paint the contours on immage contours } return gray; }
int main(int argc, char **argv) { int thresh = 128; int erode = 0; int dilate = 0; int do_contour = 0; IplImage *image_bw = cvCreateImage(SIZE, 8, 1); IplImage *image_thresh = cvCreateImage(SIZE, 8, 1); IplImage *image_temp = cvCreateImage(SIZE, 8, 1); cvNamedWindow("config", CV_WINDOW_AUTOSIZE); cvCreateTrackbar("threshold", "config", &thresh, 255, NULL); cvCreateTrackbar("erode", "config", &erode, 10, NULL); cvCreateTrackbar("dilate", "config", &dilate, 10, NULL); cvCreateTrackbar("contour", "config", &do_contour, 1, NULL); CvMemStorage *storage = cvCreateMemStorage(); while (cvWaitKey(10) < 0) { IplImage *image = freenect_sync_get_rgb_cv(0); if (!image) { printf("Error: Kinect not connected?\n"); return -1; } cvCvtColor(image, image, CV_RGB2BGR); cvCvtColor(image, image_bw, CV_RGB2GRAY); cvThreshold(image_bw, image_thresh, thresh, 255, CV_THRESH_BINARY); cvErode(image_thresh, image_thresh, NULL, erode); cvDilate(image_thresh, image_thresh, NULL, dilate); if (do_contour) { CvSeq *contours; cvCopy(image_thresh, image_temp); cvFindContours(image_temp, storage, &contours); cvDrawContours(image, contours, CV_RGB(0, 255, 0), CV_RGB(0, 255, 255), 1); } cvShowImage("RGB", image); cvShowImage("BW", image_bw); cvShowImage("THRESH", image_thresh); } return 0; }
static void node_composit_exec_cvDrawContour(void *data, bNode *node, bNodeStack **in, bNodeStack **out) { IplImage *img, *dst, *img1, *img2, *img3,*imgRed, *umbral; CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contour = 0; //TODO: Use atach buffers if(out[0]->hasoutput==0) return; img=in[0]->data; dst = cvCreateImage( cvGetSize(img), 8, 3 ); img1=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1); img2=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1); img3=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1); imgRed=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1); umbral=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1); cvSplit(img, img1, img2, imgRed, img3); cvThreshold( umbral,imgRed,210,255, CV_THRESH_BINARY ); cvFindContours( img, storage, &contour, sizeof(CvContour),CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE,cvPoint(0, 0) ); cvZero( dst ); for( ; contour != 0; contour = contour->h_next ) { CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 ); /* replace CV_FILLED with 1 to see the outlines */ cvDrawContours( dst, contour, color, color, -1, CV_FILLED, 8, cvPoint(0,0) ); } out[0]->data= dst; /*CvSeq* contour = in[1]->data; if(in[0]->data && in[1]->data){ IplImage* dst = cvCreateImage( cvGetSize(image), 8, 3 ); cvZero(dst); //cvDrawContours( dst, contour, CV_RGB(255,0,0),CV_RGB(0,255,0), -1,3, CV_AA,cvPoint(0,0)); CvSeq* c=contour; for( ; c != 0; c = c->h_next ) { CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 ); cvDrawContours( dst, c, color, color, -1, 1, 8 ,cvPoint(0,0)); } out[0]->data= dst; }*/ }
CvSeq*cvSegmentFGMask(CvArr* _mask, int poly1Hull0, float perimScale, CvMemStorage* storage, CvPoint offset) { CvMat mstub, *mask = cvGetMat(_mask, &mstub); CvMemStorage* tempStorage = storage ? storage : cvCreateMemStorage(); CvSeq *contours, *c; int nContours = 0; CvContourScanner scanner; // clean up raw mask cvMorphologyEx(mask, mask, 0, 0, CV_MOP_OPEN, 1); cvMorphologyEx(mask, mask, 0, 0, CV_MOP_CLOSE, 1); // find contours around only bigger regions scanner = cvStartFindContours(mask, tempStorage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset); while ((c = cvFindNextContour(scanner)) != 0) { double len = cvContourPerimeter(c); double q = (mask->rows + mask->cols) / perimScale; // calculate perimeter len threshold if (len < q) //Get rid of blob if it's perimeter is too small cvSubstituteContour(scanner, 0); else //Smooth it's edges if it's large enough { CvSeq* newC; if (poly1Hull0) //Polygonal approximation of the segmentation newC = cvApproxPoly(c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0); else //Convex Hull of the segmentation newC = cvConvexHull2(c, tempStorage, CV_CLOCKWISE, 1); cvSubstituteContour(scanner, newC); nContours++; } } contours = cvEndFindContours(&scanner); // paint the found regions back into the image cvZero(mask); for (c = contours; c != 0; c = c->h_next) cvDrawContours(mask, c, cvScalarAll(255), cvScalarAll(0), -1, CV_FILLED, 8, cvPoint(-offset.x, -offset.y)); if (tempStorage != storage) { cvReleaseMemStorage(&tempStorage); contours = 0; } return contours; }
void moBlobFinderModule::applyFilter(IplImage *src) { this->storage = cvCreateMemStorage(0); this->clearBlobs(); this->storage = cvCreateMemStorage(0); cvCopy(src, this->output_buffer); CvSeq *contours = 0; cvFindContours(this->output_buffer, this->storage, &contours, sizeof(CvContour), CV_RETR_CCOMP); cvDrawContours(this->output_buffer, contours, cvScalarAll(255), cvScalarAll(255), 100); // Consider each contour a blob and extract the blob infos from it. int size; int ratio; int min_size = this->property("min_size").asInteger(); int max_size = this->property("max_size").asInteger(); CvSeq *cur_cont = contours; while (cur_cont != 0) { CvRect rect = cvBoundingRect(cur_cont, 0); size = rect.width * rect.height; // Check ratio to make sure blob can physically represent a finger // magic number 6 is used for now to represent maximum ratio of // Length/thickness of finger if (rect.width < rect.height) { ratio = rect.height / (double)rect.width; } else { ratio = rect.width / (double)rect.height; } if ((ratio <= 6) && (size >= min_size) && (size <= max_size)) { moDataGenericContainer *blob = new moDataGenericContainer(); blob->properties["implements"] = new moProperty("pos,size"); blob->properties["x"] = new moProperty((rect.x + rect.width / 2) / (double) src->width); blob->properties["y"] = new moProperty((rect.y + rect.height / 2) / (double) src->height); blob->properties["width"] = new moProperty(rect.width); blob->properties["height"] = new moProperty(rect.height); this->blobs->push_back(blob); cvRectangle(this->output_buffer, cvPoint(rect.x,rect.y), cvPoint(rect.x + rect.width,rect.x + rect.height), cvScalar(250,10,10), 1); } cur_cont = cur_cont->h_next; } cvReleaseMemStorage(&this->storage); this->output_data->push(this->blobs); }
int main() { const int imgHeight = 500; const int imgWidth = 500; IplImage* pImgSrc = cvCreateImage(cvSize(imgWidth, imgHeight), IPL_DEPTH_8U, 1); // Ôʼͼ IplImage* pImgContour = NULL; // ÂÖÀªÍ¼ CvMemStorage* pMemStorage = cvCreateMemStorage(0); // ÁÙʱ´æ´¢Çø CvSeq* pContour = 0; // ´æ´¢ÂÖÀªµã // »æÖÆÔʼͼƬ DrawImage(pImgSrc); // ÏÔʾÔʼͼ cvNamedWindow("Source", CV_WINDOW_AUTOSIZE); cvShowImage("Source", pImgSrc); // ΪÂÖÀªÍ¼ÉêÇë¿Õ¼ä, 3ͨµÀͼÏñ pImgContour = cvCreateImage(cvGetSize(pImgSrc), IPL_DEPTH_8U, 3); // ½«µ¥Í¨µÀ»Ò¶Èͼת»¯Îª3ͨµÀ»Ò¶Èͼ //cvCvtColor(pImgSrc, pImgContour, CV_GRAY2BGR); cvZero(pImgContour); // ²éÕÒÂÖÀª cvFindContours(pImgSrc, pMemStorage, &pContour, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0)); // ½«ÂÖÀª»³ö cvDrawContours(pImgContour, pContour, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), 2, 2, 8, cvPoint(0, 0)); // ÏÔʾÂÖÀªÍ¼ cvNamedWindow("Contour", CV_WINDOW_AUTOSIZE); cvShowImage("Contour", pImgContour); cvWaitKey(0); cvDestroyWindow("Contour"); cvDestroyWindow("Source"); cvReleaseImage(&pImgSrc); cvReleaseImage(&pImgContour); cvReleaseMemStorage(&pMemStorage); return 0; }
void MeanShift::startTracking(const Image* image, const CvConnectedComp* cComp) { if (!cComp->contour) /* Not really connected component */ return startTracking(image, cComp->rect); Image* mask = new Image(image->size(), UByte, 1); cvDrawContours(mask->cvImage(), cComp->contour, cvScalar(255), cvScalar(255), -1, CV_FILLED, 8); delete m_trackingHist; m_trackingHist = Histogram::createHSHistogram(image, mask); m_lastPostition = cComp->rect; delete mask; }
IplImage* contour(IplImage* img) { static int i; char fileName[20]; CvMemStorage* store; IplImage* aux=NULL; if(aux == NULL) { aux = cvCreateImage(cvGetSize(img),8,1); store = cvCreateMemStorage(0); } CvSeq * contours =0; cvFindContours(img,store,&contours); //finding contours in an image cvZero(aux); //if(contours->total) { cvDrawContours(aux,contours,cvScalarAll(255),cvScalarAll(255),100); } CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); double M00, M01, M10; fruitCount=0; while(contours!=NULL) //detects the moments means coords of individual contours { if( cvContourArea(contours,CV_WHOLE_SEQ) < 5 ) //detects only sizable objects { contours = contours->h_next; continue; } cvMoments(contours, moments); M00 = cvGetSpatialMoment(moments,0,0); M10 = cvGetSpatialMoment(moments,1,0); M01 = cvGetSpatialMoment(moments,0,1); centers[fruitCount].x = (int)(M10/M00); //global variable, stores the centre coords of an object centers[fruitCount].y = (int)(M01/M00); fruitCount++; //important global variable, it represents the total no. of objects detected in the image if it is zero the no action :) contours = contours->h_next; } cvClearMemStorage(store); return aux; }
/* * Prints a contour on a dst Image. Used for debugging. * prints text at the side of a contour. * depthLevel sets the level in the contour tree(to include/exclue holes) */ void Contours::printContour(int depthLevel, CvScalar color,IplImage * dst){ CvFont font; int line_type=CV_AA; char * a=(char *) malloc(20); char * b=(char *) malloc(20); char * c=(char *) malloc(20); char * d=(char *) malloc(20); char * e=(char *) malloc(20); cvDrawContours( dst, this->c, CV_RGB(255,0,0), CV_RGB(0,255,0), depthLevel, 3, CV_AA, cvPoint(0,0) ); CvMemStorage* mem = cvCreateMemStorage(0); CvBox2D box=cvMinAreaRect2(this->c,mem); //~ traversePoints(this->c); std::vector<int> centroid=this->getCentroid(); CvPoint pt2=cvPoint(centroid[0]+5,centroid[1]+5); CvPoint pt3=cvPoint(centroid[0]+5,centroid[1]+15); CvPoint pt4=cvPoint(centroid[0]+5,centroid[1]+25); CvPoint pt5=cvPoint(centroid[0]+5,centroid[1]+35); CvPoint pt6=cvPoint(centroid[0]+5,centroid[1]+45); sprintf(a,"per: %g",this->getPerimeter()); sprintf(b,"zone: %d",getPointZone(this->x,this->y)); sprintf(c,"area: %g",this->getArea()); sprintf(d,"ecc: %g",this->getPerimeter()*this->getPerimeter()/this->getArea()); //~ sprintf(d,"boxArea: %g",(double) this->getArea()/(box.size.width*box.size.height)); cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5, 0.0,0.5, line_type ); cvPutText( dst, a, pt2, &font, CV_RGB(255,255,0)); cvPutText( dst, c, pt3, &font, CV_RGB(255,255,0)); cvPutText( dst, b, pt4, &font, CV_RGB(255,255,0)); cvPutText( dst, d, pt5, &font, CV_RGB(255,255,0)); //~ free(a); cvReleaseMemStorage(&mem); }
int _tmain(int argc, _TCHAR* argv[]) { CvSeq* contours = NULL; CvMemStorage* storage = cvCreateMemStorage(0); IplImage* img = cvLoadImage("answer_reveal.png"); cvNamedWindow("win"); IplImage* grayImg = cvCreateImage(cvGetSize(img), 8, 1); cvCvtColor(img, grayImg, CV_RGB2GRAY); cvThreshold(grayImg, grayImg, 160, 255, CV_THRESH_BINARY); cvFindContours(grayImg, storage, &contours); // cvZero(grayImg); -- if we were displaying the gray image with the contours, in only black and white if (contours) { cvDrawContours(img, contours, cvScalar(255, 0, 0), // ext color (red) cvScalar(0, 255, 0), // hole color (green) 100, // max level of contours to draw 5); // thickness } cvShowImage("win", img); // experiment to read a frame from an image CvCapture* capture = cvCaptureFromFile("C:\\Projects\\meancat\\misc\\100Bot\\1v100_translated.mpeg"); if (capture == NULL) { printf("capture is null"); } else { cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, 0); IplImage* oneFrame = cvQueryFrame(capture); cvShowImage("win", oneFrame); } cvWaitKey(0); cvReleaseImage(&img); cvReleaseImage(&grayImg); return 0; }
CvSeq* connected_components( IplImage* source, IplImage* result ) { IplImage* binary_image = cvCreateImage( cvGetSize(source), 8, 1 ); cvConvertImage( source, binary_image ); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contours = 0; cvThreshold( binary_image, binary_image, 1, 255, CV_THRESH_BINARY ); cvFindContours( binary_image, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); if (result) { cvZero( result ); for(CvSeq* contour = contours ; contour != 0; contour = contour->h_next ) { CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 ); /* replace CV_FILLED with 1 to see the outlines */ cvDrawContours( result, contour, color, color, -1, CV_FILLED, 8 ); } } return contours; }
void ShapeClassifier::UpdateContourImage() { cvZero(filterImage); // first, determine how many template contours we need to draw by counting the length of the sequence int numContours = 0; for (CvSeq *contour = templateContours; contour != NULL; contour = contour->h_next) { numContours++; } if (numContours > 0) { int gridSize = (int) ceil(sqrt((double)numContours)); int gridX = 0; int gridY = 0; int gridSampleW = FILTERIMAGE_WIDTH / gridSize; int gridSampleH = FILTERIMAGE_HEIGHT / gridSize; int contourNum = 0; for (CvSeq *contour = templateContours; contour != NULL; contour = contour->h_next) { cvSetImageROI(filterImage, cvRect(gridX*gridSampleW, gridY*gridSampleH, gridSampleW, gridSampleH)); CvRect bounds = cvBoundingRect(contour, 1); int contourSize = max(bounds.width, bounds.height); IplImage *contourImg = cvCreateImage(cvSize(contourSize, contourSize), filterImage->depth, filterImage->nChannels); cvZero(contourImg); cvDrawContours(contourImg, contour, colorSwatch[contourNum], CV_RGB(255,255,255), 0, 2, CV_AA, cvPoint(-bounds.x, -bounds.y)); cvResize(contourImg, filterImage); cvReleaseImage(&contourImg); cvResetImageROI(filterImage); contourNum = (contourNum+1) % COLOR_SWATCH_SIZE; gridX++; if (gridX >= gridSize) { gridX = 0; gridY++; } } } IplToBitmap(filterImage, filterBitmap); }
void Frame::fill() { IplImage *mor = cvCreateImage(cvGetSize(this->image), 8, 1); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contour = 0; if(this->image->nChannels > 1) this->grayScale(); cvFindContours(this->image, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); cvZero(mor); for( ; contour != 0; contour = contour->h_next ) { CvScalar color = CV_RGB( 255, 255, 255 ); cvDrawContours( mor, contour, color, color, 0, CV_FILLED, 8 ); } cvConvertImage(mor, this->image, 0); cvClearMemStorage(storage); cvReleaseImage(&mor); }
void on_trackbar(int) { if( g_storage==NULL ) { g_gray = cvCreateImage( cvGetSize(g_image), 8, 1 ); g_storage = cvCreateMemStorage(0); } else { cvClearMemStorage( g_storage ); } CvSeq* contours = 0; cvCvtColor( g_image, g_gray, CV_BGR2GRAY ); cvThreshold( g_gray, g_gray, g_thresh, 255, CV_THRESH_BINARY ); cvFindContours( g_gray, g_storage, &contours ); cvZero( g_gray ); if( contours ) cvDrawContours( g_gray, contours, cvScalarAll(255), cvScalarAll(255), 100 ); cvShowImage( "Contours", g_gray ); }
void BlobDetectionEngine::findBlobs(IplImage *grayImg, bool drawBlobs) { cvFindContours(grayImg, mem, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); int i = 0; for (ptr = contours; ptr != NULL; ptr = ptr->h_next) { //Filter small contours CvRect rect = cvBoundingRect(ptr); if ( rect.height * rect.width < minAreaFilter ){ continue; } filtCont[i] = ptr; //CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 ); CvScalar color = CV_RGB( 255, 255, 255 ); cvDrawContours(visualImg, ptr, color, CV_RGB(0,0,0), 0, CV_FILLED, 8, cvPoint(0,0)); cvRectangle(visualImg, cvPoint(rect.x +3, rect.y +3), cvPoint(rect.x + rect.width, rect.y + rect.height), color, 1); //sprintf(text, "B%d [%d,%d]", i, rect.x, rect.y); sprintf(text, "Blob %d", i); //cvPutText(visualImg, text, cvPoint(rect.x, rect.y), &font, color); i++; } numOfFiltCont = i; }
void FindFeaturesPlugin::ProcessImage( ImagePlus *img ){ FetchParams(); IplImage *orig = img->orig; if (!gray){ gray = cvCreateImage( cvGetSize(orig), IPL_DEPTH_8U, 1 ); cnt_mask = cvCreateImage(cvGetSize(orig), IPL_DEPTH_8U, 1); eig = cvCreateImage( cvGetSize(orig), IPL_DEPTH_32F, 1 ); tempimg = cvCreateImage(cvGetSize(orig), IPL_DEPTH_32F, 1); } cvCvtColor(orig, gray, CV_BGR2GRAY); CvPoint2D32f* feats = (CvPoint2D32f*)malloc(maxCount*sizeof(CvPoint2D32f)); for(int c=0; c<(int)img->contourArray.size(); c++){ int count = maxCount; CvSeq *seq = img->contourArray[c]; cvZero(cnt_mask); CvSeq *h_next = seq->h_next; seq->h_next = NULL; cvDrawContours(cnt_mask, seq, CV_RGB(255,255,255), CV_RGB(0,0,0), 1, CV_FILLED, CV_AA, cvPoint(0,0)); seq->h_next = h_next; cvGoodFeaturesToTrack( gray, eig, tempimg, feats, &count, quality, minDist, cnt_mask, blockSize, method, harrisK ); img->AddFeats(c, feats, count, clean); } free(feats); }
void Image_OP::Draw_Contours(int threshold, IplImage * orig_img, IplImage* manipulated_img) { if( threshold > 0) { this->Reset_Manipulators(); this->my_pic_manipulators.contour_threshold = true; bool already_exists = true; // linked lists of memory blocks (for fast allocation or de-allocation) CvMemStorage* mem_storage = cvCreateMemStorage(0); // found contours are stored in a sequence CvSeq* contours = 0; // allocates mem for grey-scale image IplImage* gray_img = cvCreateImage(cvSize(orig_img->width,orig_img->height), IPL_DEPTH_8U, 1); int found_contours =0; if (manipulated_img == NULL) { already_exists = false; manipulated_img = cvCreateImage(cvSize(orig_img->width,orig_img->height), IPL_DEPTH_8U, 3); } cvNamedWindow("contours only"); // converts frame into grey-scale frame cvCvtColor( orig_img, gray_img, CV_RGB2GRAY ); // extends threshold range int g_thresh = threshold *5; // defines a threshold for operations // creates binary image (only 0 and 1 as pixel values) // pixels will be set to 0, to the source value // or to max value depending on threshold type // here: CV_THRESH_BINARY => destination // value = if source > threshold then MAX else 0 // Parameters => 1) source- and 2) destination image // 3) threshold, 4) MAX value (255 in 8 bit grayscale) 5) threshold type cvThreshold( gray_img, gray_img, g_thresh, 255, CV_THRESH_BINARY ); // findings contours; return value is number of found contours // Parameters => 1) Image, that is used for computations // 2) memory to store recorded contours, 3) pointer for stored contours 4) rest // of parameters are optional found_contours = cvFindContours(gray_img,mem_storage, &contours); // sets all elements of an array to Null cvZero( gray_img ); if( contours ){ // drawing contours: Parameters => 1) Image to draw on, 2) is sequence in which // found contours were stored, 3) color of contour, 4) contours marked as a hole // are drawn ins this color 5) depending on the number of max level contours of // different levels are drawn; rest are optional arguments cvDrawContours(gray_img,contours,cvScalarAll(255),cvScalarAll(255),100 ); } this->my_manipulation_applied = threshold; cvShowImage("contours only", gray_img); // turn 1 channel image into 3 channel image (important for CvVideoWriter) cvCvtColor( gray_img, manipulated_img, CV_GRAY2RGB ); // or: cvMerge(gray_img,gray_img,gray_img,NULL,manipulated_img); cvReleaseImage(&gray_img); cvReleaseMemStorage(&mem_storage); if(already_exists == false) cvReleaseImage(&manipulated_img); } }
/** - FUNCTION: FillBlob - FUNCTIONALITY: - Fills the blob with a specified colour - PARAMETERS: - imatge: where to paint - color: colour to paint the blob - RESULT: - modifies input image and returns the seed point used to fill the blob - RESTRICTIONS: - AUTHOR: Ricard Borràs - CREATION DATE: 25-05-2005. - MODIFICATION: Date. Author. Description. */ void CBlob::FillBlob( IplImage *imatge, CvScalar color, int offsetX /*=0*/, int offsetY /*=0*/) { cvDrawContours( imatge, m_externalContour.GetContourPoints(), color, color,0, CV_FILLED, 8 ); }
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage) { /* Create contours: */ IplImage* pIB = NULL; CvSeq* cnt = NULL; CvSeq* cnt_list = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSeq*), storage ); CvSeq* clasters = NULL; int claster_cur, claster_num; pIB = cvCloneImage(pFG); cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnt, sizeof(CvContour), CV_RETR_EXTERNAL); cvReleaseImage(&pIB); /* Create cnt_list. */ /* Process each contour: */ for(; cnt; cnt=cnt->h_next) { cvSeqPush( cnt_list, &cnt); } claster_num = cvSeqPartition( cnt_list, storage, &clasters, CompareContour, NULL ); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvBlob NewBlob; double M00,X,Y,XX,YY; /* image moments */ CvMoments m; CvRect rect_res = cvRect(-1,-1,-1,-1); CvMat mat; for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvRect rect; CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); rect = ((CvContour*)cnt)->rect; if(rect_res.height<0) { rect_res = rect; } else { /* Unite rects: */ int x0,x1,y0,y1; x0 = MIN(rect_res.x,rect.x); y0 = MIN(rect_res.y,rect.y); x1 = MAX(rect_res.x+rect_res.width,rect.x+rect.width); y1 = MAX(rect_res.y+rect_res.height,rect.y+rect.height); rect_res.x = x0; rect_res.y = y0; rect_res.width = x1-x0; rect_res.height = y1-y0; } } if(rect_res.height < 1 || rect_res.width < 1) { X = 0; Y = 0; XX = 0; YY = 0; } else { cvMoments( cvGetSubRect(pFG,&mat,rect_res), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; } NewBlob = cvBlob(rect_res.x+(float)X,rect_res.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); pBlobs->AddBlob(&NewBlob); } /* Next cluster. */ #if 0 { // Debug info: IplImage* pI = cvCreateImage(cvSize(pFG->width,pFG->height),IPL_DEPTH_8U,3); cvZero(pI); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvScalar color = CV_RGB(rand()%256,rand()%256,rand()%256); for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); cvDrawContours( pI, cnt, color, color, 0, 1, 8); } CvBlob* pB = pBlobs->GetBlob(claster_cur); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); cvEllipse( pI, cvPointFrom32f(CV_BLOB_CENTER(pB)), cvSize(MAX(1,x), MAX(1,y)), 0, 0, 360, color, 1 ); } cvNamedWindow( "Clusters", 0); cvShowImage( "Clusters",pI ); cvReleaseImage(&pI); } /* Debug info. */ #endif } /* cvFindBlobsByCCClasters */
JNIEXPORT jbooleanArray JNICALL Java_org_siprop_opencv_OpenCV_findContours(JNIEnv* env, jobject thiz, jint width, jint height) { IplImage *grayImage = cvCreateImage( cvGetSize(m_sourceImage), IPL_DEPTH_8U, 1 ); // グレースケール画像用IplImage IplImage *binaryImage = cvCreateImage( cvGetSize(m_sourceImage), IPL_DEPTH_8U, 1 ); // 2値画像用IplImage IplImage *contourImage = cvCreateImage( cvGetSize(m_sourceImage), IPL_DEPTH_8U, 3 ); // 輪郭画像用IplImage // BGRからグレースケールに変換する cvCvtColor( m_sourceImage, grayImage, CV_BGR2GRAY ); // グレースケールから2値に変換する cvThreshold( grayImage, binaryImage, THRESHOLD, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY ); // 輪郭抽出用のメモリを確保する CvMemStorage* storage = cvCreateMemStorage( 0 ); // 抽出された輪郭を保存する領域 CvSeq* find_contour = 0; // 輪郭へのポインタ // 2値画像中の輪郭を見つけ、その数を返す int find_contour_num = cvFindContours( binaryImage, // 入力画像(8ビットシングルチャンネル) storage, // 抽出された輪郭を保存する領域 &find_contour, // 一番外側の輪郭へのポインタへのポインタ sizeof( CvContour ), // シーケンスヘッダのサイズ CV_RETR_LIST, // 抽出モード CV_CHAIN_APPROX_NONE, // 推定手法 cvPoint( 0, 0 ) // オフセット ); // 物体の輪郭を赤色で描画する CvScalar red = CV_RGB( 255, 0, 0 ); cvDrawContours( m_sourceImage, // 輪郭を描画する画像 find_contour, // 最初の輪郭へのポインタ red, // 外側輪郭線の色 red, // 内側輪郭線(穴)の色 CONTOUR_MAX_LEVEL, // 描画される輪郭の最大レベル LINE_THICKNESS, // 描画される輪郭線の太さ LINE_TYPE, // 線の種類 cvPoint( 0, 0 ) // オフセット ); int imageSize; CvMat stub, *mat_image; int channels, ipl_depth; mat_image = cvGetMat( m_sourceImage, &stub ); channels = CV_MAT_CN( mat_image->type ); ipl_depth = cvCvToIplDepth(mat_image->type); LOGV("Load loadImageBytes."); WLNonFileByteStream* strm = new WLNonFileByteStream(); loadImageBytes(mat_image->data.ptr, mat_image->step, mat_image->width, mat_image->height, ipl_depth, channels, strm); imageSize = strm->GetSize(); jbooleanArray res_array = env->NewBooleanArray(imageSize); LOGV("Load NewBooleanArray."); if (res_array == 0) { return 0; } env->SetBooleanArrayRegion(res_array, 0, imageSize, (jboolean*)strm->GetByte()); LOGV("Load SetBooleanArrayRegion."); LOGV("Release sourceImage"); if (m_sourceImage) { cvReleaseImage(&m_sourceImage); m_sourceImage = 0; } LOGV("Release binaryImage"); cvReleaseImage( &binaryImage ); LOGV("Release grayImage"); cvReleaseImage( &grayImage ); LOGV("Release contourImage"); cvReleaseImage( &contourImage ); LOGV("Release storage"); cvReleaseMemStorage( &storage ); LOGV("Delete strm"); strm->Close(); SAFE_DELETE(strm); return res_array; }
/* * Prints a contour on a dst Image. */ void Contours::printContour(int depthLevel, CvScalar color,IplImage * dst){ cvDrawContours( dst, this->c, CV_RGB(255,0,0), CV_RGB(0,255,0), depthLevel, 3, CV_AA, cvPoint(0,0) ); }
// -------------------------------------------------------------------------- // main(Number of arguments, Argument values) // Description : This is the entry point of the program. // Return value : SUCCESS:0 ERROR:-1 // -------------------------------------------------------------------------- int main(int argc, char **argv) { // AR.Drone class ARDrone ardrone; // Initialize if (!ardrone.open()) { printf("Failed to initialize.\n"); return -1; } // Kalman filter CvKalman *kalman = cvCreateKalman(4, 2); // Setup cvSetIdentity(kalman->measurement_matrix, cvRealScalar(1.0)); cvSetIdentity(kalman->process_noise_cov, cvRealScalar(1e-5)); cvSetIdentity(kalman->measurement_noise_cov, cvRealScalar(0.1)); cvSetIdentity(kalman->error_cov_post, cvRealScalar(1.0)); // Linear system kalman->DynamMatr[0] = 1.0; kalman->DynamMatr[1] = 0.0; kalman->DynamMatr[2] = 1.0; kalman->DynamMatr[3] = 0.0; kalman->DynamMatr[4] = 0.0; kalman->DynamMatr[5] = 1.0; kalman->DynamMatr[6] = 0.0; kalman->DynamMatr[7] = 1.0; kalman->DynamMatr[8] = 0.0; kalman->DynamMatr[9] = 0.0; kalman->DynamMatr[10] = 1.0; kalman->DynamMatr[11] = 0.0; kalman->DynamMatr[12] = 0.0; kalman->DynamMatr[13] = 0.0; kalman->DynamMatr[14] = 0.0; kalman->DynamMatr[15] = 1.0; // Thresholds int minH = 0, maxH = 255; int minS = 0, maxS = 255; int minV = 0, maxV = 255; // Create a window cvNamedWindow("binalized"); cvCreateTrackbar("H max", "binalized", &maxH, 255); cvCreateTrackbar("H min", "binalized", &minH, 255); cvCreateTrackbar("S max", "binalized", &maxS, 255); cvCreateTrackbar("S min", "binalized", &minS, 255); cvCreateTrackbar("V max", "binalized", &maxV, 255); cvCreateTrackbar("V min", "binalized", &minV, 255); cvResizeWindow("binalized", 0, 0); // Main loop while (1) { // Key input int key = cvWaitKey(1); if (key == 0x1b) break; // Update if (!ardrone.update()) break; // Get an image IplImage *image = ardrone.getImage(); // HSV image IplImage *hsv = cvCloneImage(image); cvCvtColor(image, hsv, CV_RGB2HSV_FULL); // Binalized image IplImage *binalized = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1); // Binalize CvScalar lower = cvScalar(minH, minS, minV); CvScalar upper = cvScalar(maxH, maxS, maxV); cvInRangeS(image, lower, upper, binalized); // Show result cvShowImage("binalized", binalized); // De-noising cvMorphologyEx(binalized, binalized, NULL, NULL, CV_MOP_CLOSE); // Detect contours CvSeq *contour = NULL, *maxContour = NULL; CvMemStorage *contourStorage = cvCreateMemStorage(); cvFindContours(binalized, contourStorage, &contour, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // Find largest contour double max_area = 0.0; while (contour) { double area = fabs(cvContourArea(contour)); if ( area > max_area) { maxContour = contour; max_area = area; } contour = contour->h_next; } // Object detected if (maxContour) { // Draw a contour cvZero(binalized); cvDrawContours(binalized, maxContour, cvScalarAll(255), cvScalarAll(255), 0, CV_FILLED); // Calculate the moments CvMoments moments; cvMoments(binalized, &moments, 1); int my = (int)(moments.m01/moments.m00); int mx = (int)(moments.m10/moments.m00); // Measurements float m[] = {mx, my}; CvMat measurement = cvMat(2, 1, CV_32FC1, m); // Correct phase const CvMat *correction = cvKalmanCorrect(kalman, &measurement); } // Prediction phase const CvMat *prediction = cvKalmanPredict(kalman); // Display the image cvCircle(image, cvPointFrom32f(cvPoint2D32f(prediction->data.fl[0], prediction->data.fl[1])), 10, CV_RGB(0,255,0)); cvShowImage("camera", image); // Release the memories cvReleaseImage(&hsv); cvReleaseImage(&binalized); cvReleaseMemStorage(&contourStorage); } // Release the kalman filter cvReleaseKalman(&kalman); // See you ardrone.close(); return 0; }
// Function cvUpdateFGDStatModel updates statistical model and returns number of foreground regions // parameters: // curr_frame - current frame from video sequence // p_model - pointer to CvFGDStatModel structure static int CV_CDECL icvUpdateFGDStatModel( IplImage* curr_frame, CvFGDStatModel* model, double ) { int mask_step = model->Ftd->widthStep; CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL; IplImage* prev_frame = model->prev_frame; int region_count = 0; int FG_pixels_count = 0; int deltaC = cvRound(model->params.delta * 256 / model->params.Lc); int deltaCC = cvRound(model->params.delta * 256 / model->params.Lcc); int i, j, k, l; //clear storages cvClearMemStorage(model->storage); cvZero(model->foreground); // From foreground pixel candidates using image differencing // with adaptive thresholding. The algorithm is from: // // Thresholding for Change Detection // Paul L. Rosin 1998 6p // http://www.cis.temple.edu/~latecki/Courses/CIS750-03/Papers/thresh-iccv.pdf // cvChangeDetection( prev_frame, curr_frame, model->Ftd ); cvChangeDetection( model->background, curr_frame, model->Fbd ); for( i = 0; i < model->Ftd->height; i++ ) { for( j = 0; j < model->Ftd->width; j++ ) { if( ((uchar*)model->Fbd->imageData)[i*mask_step+j] || ((uchar*)model->Ftd->imageData)[i*mask_step+j] ) { float Pb = 0; float Pv = 0; float Pvb = 0; CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j; CvBGPixelCStatTable* ctable = stat->ctable; CvBGPixelCCStatTable* cctable = stat->cctable; uchar* curr_data = (uchar*)(curr_frame->imageData) + i*curr_frame->widthStep + j*3; uchar* prev_data = (uchar*)(prev_frame->imageData) + i*prev_frame->widthStep + j*3; int val = 0; // Is it a motion pixel? if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] ) { if( !stat->is_trained_dyn_model ) { val = 1; } else { // Compare with stored CCt vectors: for( k = 0; PV_CC(k) > model->params.alpha2 && k < model->params.N1cc; k++ ) { if ( abs( V_CC(k,0) - prev_data[0]) <= deltaCC && abs( V_CC(k,1) - prev_data[1]) <= deltaCC && abs( V_CC(k,2) - prev_data[2]) <= deltaCC && abs( V_CC(k,3) - curr_data[0]) <= deltaCC && abs( V_CC(k,4) - curr_data[1]) <= deltaCC && abs( V_CC(k,5) - curr_data[2]) <= deltaCC) { Pv += PV_CC(k); Pvb += PVB_CC(k); } } Pb = stat->Pbcc; if( 2 * Pvb * Pb <= Pv ) val = 1; } } else if( stat->is_trained_st_model ) { // Compare with stored Ct vectors: for( k = 0; PV_C(k) > model->params.alpha2 && k < model->params.N1c; k++ ) { if ( abs( V_C(k,0) - curr_data[0]) <= deltaC && abs( V_C(k,1) - curr_data[1]) <= deltaC && abs( V_C(k,2) - curr_data[2]) <= deltaC ) { Pv += PV_C(k); Pvb += PVB_C(k); } } Pb = stat->Pbc; if( 2 * Pvb * Pb <= Pv ) val = 1; } // Update foreground: ((uchar*)model->foreground->imageData)[i*mask_step+j] = (uchar)(val*255); FG_pixels_count += val; } // end if( change detection... } // for j... } // for i... //end BG/FG classification // Foreground segmentation. // Smooth foreground map: if( model->params.perform_morphing ){ cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN, model->params.perform_morphing ); cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, model->params.perform_morphing ); } if( model->params.minArea > 0 || model->params.is_obj_without_holes ){ // Discard under-size foreground regions: // cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST ); for( seq = first_seq; seq; seq = seq->h_next ) { CvContour* cnt = (CvContour*)seq; if( cnt->rect.width * cnt->rect.height < model->params.minArea || (model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) ) { // Delete under-size contour: prev_seq = seq->h_prev; if( prev_seq ) { prev_seq->h_next = seq->h_next; if( seq->h_next ) seq->h_next->h_prev = prev_seq; } else { first_seq = seq->h_next; if( seq->h_next ) seq->h_next->h_prev = NULL; } } else { region_count++; } } model->foreground_regions = first_seq; cvZero(model->foreground); cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1); } else { model->foreground_regions = NULL; } // Check ALL BG update condition: if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH ) { for( i = 0; i < model->Ftd->height; i++ ) for( j = 0; j < model->Ftd->width; j++ ) { CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j; stat->is_trained_st_model = stat->is_trained_dyn_model = 1; } } // Update background model: for( i = 0; i < model->Ftd->height; i++ ) { for( j = 0; j < model->Ftd->width; j++ ) { CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j; CvBGPixelCStatTable* ctable = stat->ctable; CvBGPixelCCStatTable* cctable = stat->cctable; uchar *curr_data = (uchar*)(curr_frame->imageData)+i*curr_frame->widthStep+j*3; uchar *prev_data = (uchar*)(prev_frame->imageData)+i*prev_frame->widthStep+j*3; if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] || !stat->is_trained_dyn_model ) { float alpha = stat->is_trained_dyn_model ? model->params.alpha2 : model->params.alpha3; float diff = 0; int dist, min_dist = 2147483647, indx = -1; //update Pb stat->Pbcc *= (1.f-alpha); if( !((uchar*)model->foreground->imageData)[i*mask_step+j] ) { stat->Pbcc += alpha; } // Find best Vi match: for(k = 0; PV_CC(k) && k < model->params.N2cc; k++ ) { // Exponential decay of memory PV_CC(k) *= (1-alpha); PVB_CC(k) *= (1-alpha); if( PV_CC(k) < MIN_PV ) { PV_CC(k) = 0; PVB_CC(k) = 0; continue; } dist = 0; for( l = 0; l < 3; l++ ) { int val = abs( V_CC(k,l) - prev_data[l] ); if( val > deltaCC ) break; dist += val; val = abs( V_CC(k,l+3) - curr_data[l] ); if( val > deltaCC) break; dist += val; } if( l == 3 && dist < min_dist ) { min_dist = dist; indx = k; } } if( indx < 0 ) { // Replace N2th elem in the table by new feature: indx = model->params.N2cc - 1; PV_CC(indx) = alpha; PVB_CC(indx) = alpha; //udate Vt for( l = 0; l < 3; l++ ) { V_CC(indx,l) = prev_data[l]; V_CC(indx,l+3) = curr_data[l]; } } else { // Update: PV_CC(indx) += alpha; if( !((uchar*)model->foreground->imageData)[i*mask_step+j] ) { PVB_CC(indx) += alpha; } } //re-sort CCt table by Pv for( k = 0; k < indx; k++ ) { if( PV_CC(k) <= PV_CC(indx) ) { //shift elements CvBGPixelCCStatTable tmp1, tmp2 = cctable[indx]; for( l = k; l <= indx; l++ ) { tmp1 = cctable[l]; cctable[l] = tmp2; tmp2 = tmp1; } break; } } float sum1=0, sum2=0; //check "once-off" changes for(k = 0; PV_CC(k) && k < model->params.N1cc; k++ ) { sum1 += PV_CC(k); sum2 += PVB_CC(k); } if( sum1 > model->params.T ) stat->is_trained_dyn_model = 1; diff = sum1 - stat->Pbcc * sum2; // Update stat table: if( diff > model->params.T ) { //printf("once off change at motion mode\n"); //new BG features are discovered for( k = 0; PV_CC(k) && k < model->params.N1cc; k++ ) { PVB_CC(k) = (PV_CC(k)-stat->Pbcc*PVB_CC(k))/(1-stat->Pbcc); } assert(stat->Pbcc<=1 && stat->Pbcc>=0); } } // Handle "stationary" pixel: if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] ) { float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3; float diff = 0; int dist, min_dist = 2147483647, indx = -1; //update Pb stat->Pbc *= (1.f-alpha); if( !((uchar*)model->foreground->imageData)[i*mask_step+j] ) { stat->Pbc += alpha; } //find best Vi match for( k = 0; k < model->params.N2c; k++ ) { // Exponential decay of memory PV_C(k) *= (1-alpha); PVB_C(k) *= (1-alpha); if( PV_C(k) < MIN_PV ) { PV_C(k) = 0; PVB_C(k) = 0; continue; } dist = 0; for( l = 0; l < 3; l++ ) { int val = abs( V_C(k,l) - curr_data[l] ); if( val > deltaC ) break; dist += val; } if( l == 3 && dist < min_dist ) { min_dist = dist; indx = k; } } if( indx < 0 ) {//N2th elem in the table is replaced by a new features indx = model->params.N2c - 1; PV_C(indx) = alpha; PVB_C(indx) = alpha; //udate Vt for( l = 0; l < 3; l++ ) { V_C(indx,l) = curr_data[l]; } } else {//update PV_C(indx) += alpha; if( !((uchar*)model->foreground->imageData)[i*mask_step+j] ) { PVB_C(indx) += alpha; } } //re-sort Ct table by Pv for( k = 0; k < indx; k++ ) { if( PV_C(k) <= PV_C(indx) ) { //shift elements CvBGPixelCStatTable tmp1, tmp2 = ctable[indx]; for( l = k; l <= indx; l++ ) { tmp1 = ctable[l]; ctable[l] = tmp2; tmp2 = tmp1; } break; } } // Check "once-off" changes: float sum1=0, sum2=0; for( k = 0; PV_C(k) && k < model->params.N1c; k++ ) { sum1 += PV_C(k); sum2 += PVB_C(k); } diff = sum1 - stat->Pbc * sum2; if( sum1 > model->params.T ) stat->is_trained_st_model = 1; // Update stat table: if( diff > model->params.T ) { //printf("once off change at stat mode\n"); //new BG features are discovered for( k = 0; PV_C(k) && k < model->params.N1c; k++ ) { PVB_C(k) = (PV_C(k)-stat->Pbc*PVB_C(k))/(1-stat->Pbc); } stat->Pbc = 1 - stat->Pbc; } } // if !(change detection) at pixel (i,j) // Update the reference BG image: if( !((uchar*)model->foreground->imageData)[i*mask_step+j]) { uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3; if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] && !((uchar*)model->Fbd->imageData)[i*mask_step+j] ) { // Apply IIR filter: for( l = 0; l < 3; l++ ) { int a = cvRound(ptr[l]*(1 - model->params.alpha1) + model->params.alpha1*curr_data[l]); ptr[l] = (uchar)a; //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l]*=(1 - model->params.alpha1); //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] += model->params.alpha1*curr_data[l]; } } else { // Background change detected: for( l = 0; l < 3; l++ ) { //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] = curr_data[l]; ptr[l] = curr_data[l]; } } } } // j } // i // Keep previous frame: cvCopy( curr_frame, model->prev_frame ); return region_count; }
void BreakingPointsScreen::doWatershedAndLayers(BreakingPointsImage* theBreakingPointsImage) { cvZero( theBreakingPointsImage->marker_mask ); //zero out the mask to start with int imageWidth = theBreakingPointsImage->theWatershed.width; int imageHeight = theBreakingPointsImage->theWatershed.height; ofxCvGrayscaleImage cvGrayWater; cvGrayWater.allocate(imageWidth, imageHeight); cvGrayWater.setFromPixels(theBreakingPointsImage->theWatershed.getPixels(), imageWidth, imageHeight); cvCopy(cvGrayWater.getCvImage(), theBreakingPointsImage->marker_mask); CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* contours = 0; CvMat* color_tab; int i, j, comp_count = 0; cvZero( theBreakingPointsImage->markers ); cvZero( theBreakingPointsImage->wshed ); cvFindContours( theBreakingPointsImage->marker_mask, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); for( ; contours != 0; contours = contours->h_next, comp_count++ ) { cvDrawContours( theBreakingPointsImage->markers, contours, cvScalarAll(comp_count+1), cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) ); } if(comp_count == 0) { cout << "Can't do watershed with no contours! " << endl; return; } color_tab = cvCreateMat( 1, comp_count, CV_8UC3 ); for( i = 0; i < comp_count; i++ ) { uchar* ptr = color_tab->data.ptr + i*3; ptr[0] = (uchar)(cvRandInt(&theBreakingPointsImage->rng)%180 + 50); ptr[1] = (uchar)(cvRandInt(&theBreakingPointsImage->rng)%180 + 50); ptr[2] = (uchar)(cvRandInt(&theBreakingPointsImage->rng)%180 + 50); } // double t = (double)cvGetTickCount(); ofxCvColorImage cvTempImage; cvTempImage.allocate(imageWidth, imageHeight); cvWatershed( cvTempImage.getCvImage(), theBreakingPointsImage->markers ); // t = (double)cvGetTickCount() - t; // printf( "exec time = %gms\n", t/(cvGetTickFrequency()*1000.) ); // paint the watershed image for( i = 0; i < theBreakingPointsImage->markers->height; i++ ) { for( j = 0; j < theBreakingPointsImage->markers->width; j++ ) { int idx = CV_IMAGE_ELEM( theBreakingPointsImage->markers, int, i, j ); uchar* dst = &CV_IMAGE_ELEM( theBreakingPointsImage->wshed, uchar, i, j*3 ); if( idx == -1 ) dst[0] = dst[1] = dst[2] = (uchar)255; else if( idx <= 0 || idx > comp_count ) dst[0] = dst[1] = dst[2] = (uchar)0; // should not get here else { uchar* ptr = color_tab->data.ptr + (idx-1)*3; dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2]; } } } cvReleaseMemStorage( &storage ); cvReleaseMat( &color_tab ); ofxCvColorImage tempToDrawWith; tempToDrawWith.allocate(imageWidth, imageHeight); ofxCvGrayscaleImage tempToDrawWithGrey; tempToDrawWithGrey.allocate(imageWidth, imageHeight); cvCopy(theBreakingPointsImage->wshed, tempToDrawWith.getCvImage() ); tempToDrawWith.flagImageChanged(); tempToDrawWithGrey = tempToDrawWith;//converting automatically i hope tempToDrawWithGrey.contrastStretch(); //as much contrast as we can get tempToDrawWithGrey.dilate(); //stretch out the white borders tempToDrawWithGrey.invert(); //make them black tempToDrawWithGrey.threshold(1); //make all the grey white theBreakingPointsImage->contourFinder.findContours(tempToDrawWithGrey, 20, 0.9f*(float)(imageWidth * imageHeight), 10, true, true); int numberOfBlobsFound = theBreakingPointsImage->contourFinder.blobs.size(); //cout << contourFinder.blobs.size() << " was the number of blobs" << endl; if(numberOfBlobsFound > 0) { theBreakingPointsImage->layers.clear(); theBreakingPointsImage->layerMasks.clear(); theBreakingPointsImage->layerFades.clear(); theBreakingPointsImage->fadeSpeeds.clear(); theBreakingPointsImage->layers.resize(numberOfBlobsFound); theBreakingPointsImage->layerMasks.resize(numberOfBlobsFound); theBreakingPointsImage->layerFades.resize(numberOfBlobsFound); theBreakingPointsImage->fadeSpeeds.resize(numberOfBlobsFound); for(int i=0; i< numberOfBlobsFound; i++) { theBreakingPointsImage->layers[i].allocate(imageWidth, imageHeight,OF_IMAGE_COLOR_ALPHA); theBreakingPointsImage->layerMasks[i].allocate(imageWidth, imageHeight); theBreakingPointsImage->layerMasks[i].drawBlobIntoMe(theBreakingPointsImage->contourFinder.blobs[i], 255); theBreakingPointsImage->layerMasks[i].flagImageChanged(); unsigned char * pixelsSrc = theBreakingPointsImage->theImage.getPixels(); unsigned char * pixelsMask = theBreakingPointsImage->layerMasks[i].getPixels(); unsigned char * pixelsFinal = new unsigned char[imageWidth*imageHeight*4]; //RGBA so *4 memset(pixelsFinal,0,imageWidth*imageHeight*4); for( int j = 0; j < imageWidth*imageHeight; j++) { if( pixelsMask[j*3] == 255 ) //i.e. if the mask is white at this point { pixelsFinal[j*4] = pixelsSrc[ j*3 ]; pixelsFinal[j*4+1] = pixelsSrc[ j*3+1 ]; pixelsFinal[j*4+2] = pixelsSrc[ j*3+2 ]; pixelsFinal[j*4+3] = 255; } } theBreakingPointsImage->layers[i].setFromPixels(pixelsFinal, imageWidth, imageHeight, OF_IMAGE_COLOR_ALPHA); delete[] pixelsFinal; theBreakingPointsImage->layerFades[i] = 0; //start faded out, nahhhh random, nahh zero theBreakingPointsImage->fadeSpeeds[i] = ofRandomuf(); //ofRandomuf(); //random 0..1 fade speed } } theBreakingPointsImage->watershedDone = true; if(ofRandomuf() > 0.5f) { theBreakingPointsImage->isStrobe = true; } else { theBreakingPointsImage->isStrobe = false; } }
unsigned __stdcall FrameCaptureThread( void* Param ) { cout << "First thread started!" << endl; //---------------------------------------------------------- OpData* pInfo = (OpData*) Param; CvSeq** contour = pInfo->ppCont; //variable for storing contours CvCapture* capture = 0; //interface for capturing frames of the video/camera //---------------------------------------------------------- string strVid = "test"; strVid.append( NumberToString( pInfo->nConv ) ); strVid.append( ".avi" ); //---------------------------------------------------------- capture = cvCaptureFromAVI( strVid.c_str() ); //select video based on conveyor id //capture = cvCaptureFromAVI( "test.avi" ); //should be selection of file/camera here if( !capture ) { cout << "Could not initialize capturing..." << endl; return 0; } cvNamedWindow( strVid.c_str() ); while( true ) { //---------------------------------------------------------- IplImage* frame = 0; //---------------------------------------------------------- frame = cvQueryFrame( capture ); if( !frame ) { break; } //reprocess frame, creating only black & white image IplImage* imgThr = GetThresholdedImage( frame ); //transform image into its binary representation cvThreshold( imgThr, imgThr, 128, 255, CV_THRESH_BINARY); CvMemStorage* storage = cvCreateMemStorage(0); IplImage *imgNew = cvCloneImage( imgThr ); //find all contours cvFindContours( imgNew, storage, contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); SetEvent( hEvent ); CvSeq* temp = *contour; for( ; temp != 0; temp = temp->h_next ) { cvDrawContours( frame, temp, cvScalar( 104, 178, 70 ), cvScalar( 130, 240, 124 ), 1, 1, 8 ); } //SetEvent( hEvent ); cvShowImage( strVid.c_str(), frame ); // Wait for a keypress int c = cvWaitKey( 300 ); if( c != -1 ) { // If pressed, break out of the loop break; } // Release the thresholded image... we need no memory leaks.. please cvClearMemStorage( storage ); cvReleaseMemStorage( &storage ); cvReleaseImage( &imgNew ); cvReleaseImage( &imgThr ); } return 0; }
vector<float> AverageColorFeatureExtractor::extractFeatures(IplImage * image, IplImage * segmented) { g_image = image; /* For Debugging Purposes: Show the window with the images in them */ cvNamedWindow( "Images", 2); //cvShowImage("Images", g_image); //cvShowImage("Segmentation", segmented); /* We'll create some storage structures to store the contours we get later */ IplImage * sixforty = cvCreateImage( cvGetSize(image), 8 , 1); cvResize(segmented, sixforty); CvSeq * first_contour = NULL; CvMemStorage * g_storage = cvCreateMemStorage(); /* Perform the contour finding */ cvFindContours( sixforty, g_storage, &first_contour, sizeof(CvContour), CV_RETR_LIST ); /* Find the contour with the largest area This contour has the highest likelyhood of surrounding the object we care about */ CvSeq * largest = 0; int l_area = 0; for(CvSeq * c=first_contour; c!=NULL; c=c->h_next ){ CvRect rect = cvBoundingRect( c ); if(rect.width*rect.height > l_area) { l_area = rect.width*rect.height; largest = c; } } /* For Debugging purposes: create image to see resulting contour */ IplImage * view = cvCreateImage( cvGetSize(sixforty), 8, 3); cvZero(view); vector<float> features; if(largest) { cvDrawContours(view, largest, cvScalarAll(255), cvScalarAll(255), 0, 2, 8); cvShowImage( "View", view); /* Polygonal Approximation */ CvSeq * result; // Will hold approx CvMemStorage * storage = cvCreateMemStorage(); result = cvApproxPoly( largest, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(largest)*0.015 ); /* The parameter value above (set to perimeter * 0.01 ) found by experimentation The value is smaller than the one used for L shape or the square finder Because we wan't some element of noisyness. (It determines when the Algorithm stops adding points) */ /* For Debugging purposes: create image to see resulting contour */ IplImage * mask = cvCreateImage( cvGetSize(sixforty), IPL_DEPTH_8U, 1); cvZero(mask); cvDrawContours(mask, result, cvScalarAll(255), cvScalarAll(255), 0, -1, 8); IplImage * sendMask = cvCreateImage (cvGetSize(image), IPL_DEPTH_8U, 1); cvResize(mask, sendMask); //cvShowImage( "Result", sendMask ); cout << image->nChannels << " " << image->imageSize << " " << sendMask->imageSize << " " << sendMask->depth << endl; CvScalar avg = cvAvg( image, sendMask ); //cvWaitKey(); /* Until we actually can send out a real feature vector: export a dummy */ //for(int i=0; i<bins; i++) // features.push_back( histogram[i] ); features.push_back(floor((19*avg.val[0])/255)); features.push_back(floor((19*avg.val[1])/255)); features.push_back(floor((19*avg.val[2])/255)); // Cleanup the temp files cvReleaseImage( &mask ); cvReleaseImage( &sendMask ); cvReleaseMemStorage( &storage ); } cvReleaseImage( &view ); cvReleaseImage( &sixforty ); cvReleaseMemStorage( &g_storage ); return features; }
double * computeFDFeatures(IplImage* segmented, int N) { cvNamedWindow( "Edge",1); cvMoveWindow("Capture", 100, 10); IplImage* img_edge = cvCreateImage( cvGetSize(segmented), 8, 1 ); IplImage* img_8uc3 = cvCreateImage( cvGetSize(segmented), 8, 3 ); cvThreshold( segmented, img_edge, 128, 255, CV_THRESH_BINARY ); CvMemStorage* storage = cvCreateMemStorage(); CvSeq* first_contour = NULL; int Nc = cvFindContours( img_edge, storage, &first_contour, sizeof(CvContour), CV_RETR_EXTERNAL // Try all four values and see what happens ); int i; int n=0; int best=0; int current=0; int n2; double Scale; double * Features; Features=(double *)malloc(sizeof(double)*N); //malloc error checking fftw_complex *contour; fftw_complex *FD; fftw_plan plan_forward; //printf( "Total Contours Detected: %d\n", Nc ); //Find max contour for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) { if(c->total>current); best=n; n++; } //fprintf(stderr,"best is %d\n",best); n=0; for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) { if(n==best && c->total >20){ cvCvtColor( segmented, img_8uc3, CV_GRAY2BGR ); cvDrawContours( img_8uc3, c, CVX_RED, CVX_BLUE, 1, // Try different values of max_level, and see what happens 4, 4 ); //printf("Contour #%d\n", n ); cvShowImage("Edge", img_8uc3 ); // cvWaitKey(0); // printf("%d elements:\n", c->total ); contour=(fftw_complex*) fftw_malloc(sizeof(fftw_complex)*(c->total)); FD=(fftw_complex*) fftw_malloc(sizeof(fftw_complex)*(c->total)); for( int i=0; i<c->total; ++i ) { CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, c, i ); // printf("(%d,%d)\n", p->x, p->y ); //assemble complex representation here contour[i][0]=p->x; contour[i][1]=p->y; } plan_forward=fftw_plan_dft_1d(c->total,contour,FD,FFTW_FORWARD,FFTW_ESTIMATE); fftw_execute(plan_forward); //do fft n2=c->total/2; Scale=(double)sqrt(pow(FD[1][0],2)+pow(FD[1][1],2)); //reduce to 10 coefficients //normalize if(N+2>=c->total) { fprintf(stderr,"Contour Is too small"); return 0; } //positive frequency components for(i=0;i<N/2;i++) { //fftshift stuff Features[i]=(double)sqrt(pow(FD[i+2][0],2)+pow(FD[i+2][1],2))/Scale; } for(i=0;i<N/2;i++) { Features[i+N/2]=(double)sqrt(pow(FD[N-1-i][0],2)+pow(FD[N-1-i][1],2))/Scale; } //cvWaitKey(0); } n++; } //try downspampling later //printf("Finished all contours.\n"); //destroy fftw_plan cvCvtColor( segmented, img_8uc3, CV_GRAY2BGR ); cvShowImage( "Edge", img_8uc3 ); //cvWaitKey(0); //cvDestroyWindow( "Edge" ); cvReleaseImage( &segmented ); cvReleaseImage( &img_8uc3 ); cvReleaseImage( &img_edge ); return Features; }
int Contour_detection( char*filename) { //����IplImageָ�� IplImage* pImg = NULL; IplImage* pContourImg = NULL; CvMemStorage * storage = cvCreateMemStorage(0); CvSeq * contour = 0; int mode = CV_RETR_EXTERNAL; /*if( argc == 3) if(strcmp(argv[2], "all") == 0)*/ mode = CV_RETR_CCOMP; //������������� //�������� cvNamedWindow("src", 1); cvNamedWindow("contour",1); //����ͼ��ǿ��ת��ΪGray if((pImg = cvLoadImage(filename, 0)) != 0 ) { cvShowImage( "src", pImg ); //Ϊ������ʾͼ������ռ� //3ͨ��ͼ���Ա��ò�ɫ��ʾ pContourImg = cvCreateImage(cvGetSize(pImg), IPL_DEPTH_8U, 3); //copy source image and convert it to BGR image cvCvtColor(pImg, pContourImg, CV_GRAY2BGR); //����contour cvFindContours( pImg, storage, &contour, sizeof(CvContour), mode, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0)); } else { //���ٴ��� cvDestroyWindow( "src" ); cvDestroyWindow( "contour" ); cvReleaseMemStorage(&storage); return -1; } //���������� cvDrawContours(pContourImg, contour, CV_RGB(0,0,255), CV_RGB(255, 0, 0), 2, 2, 8, cvPoint(0,0)); //��ʾͼ�� cvShowImage( "contour", pContourImg ); cvWaitKey(0); //���ٴ��� cvDestroyWindow( "src" ); cvDestroyWindow( "contour" ); //�ͷ�ͼ�� cvReleaseImage( &pImg ); cvReleaseImage( &pContourImg ); cvReleaseMemStorage(&storage); return 0; }
int cam() //calling main { int hdims = 16; printf("I am main"); CvCapture* capture = cvCreateCameraCapture(1); //determining usb camera CvHistogram *hist = 0; CvMemStorage* g_storage = NULL; Display *display=construct_display(); int x,y, tmpx=0, tmpy=0, chk=0; IplImage* image=0; IplImage* lastimage1=0; IplImage* lastimage=0; IplImage* diffimage; IplImage* bitimage; IplImage* src=0,*hsv=0,*hue=0,*backproject=0; IplImage* hsv1=0,*hue1=0,*histimg=0,*frame=0,*edge=0; float* hranges; cvNamedWindow( "CA", CV_WINDOW_AUTOSIZE ); //display window 3 //Calculation of Histogram// cvReleaseImage(&src); src= cvLoadImage("images/skin.jpg"); //taking patch while(1) { frame = cvQueryFrame( capture ); //taking frame by frame for image prcessing int j=0; float avgx=0; float avgy=0; if( !frame ) break; //#########################Background Substraction#########################// if(!image) { image=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1); bitimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1); diffimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1); lastimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1); } cvCvtColor(frame,image,CV_BGR2GRAY); if(!lastimage1) { lastimage1=cvLoadImage("images/img.jpg"); } cvCvtColor(lastimage1,lastimage,CV_BGR2GRAY); cvAbsDiff(image,lastimage,diffimage); cvThreshold(diffimage,bitimage,65,225,CV_THRESH_BINARY); cvInRangeS(bitimage,cvScalar(0),cvScalar(30),bitimage); cvSet(frame,cvScalar(0,0,0),bitimage); cvReleaseImage(&hsv); hsv= cvCreateImage( cvGetSize(src), 8, 3 ); cvReleaseImage(&hue); hue= cvCreateImage( cvGetSize(src), 8, 1); cvCvtColor(src,hsv,CV_BGR2HSV); cvSplit(hsv,hue,0,0,0); float hranges_arr[] = {0,180}; hranges = hranges_arr; hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 ); cvCalcHist(&hue, hist, 0, 0 ); cvThreshHist( hist, 100 ); //#############################Display histogram##############################// cvReleaseImage(&histimg); histimg = cvCreateImage( cvSize(320,200), 8, 3 ); cvZero( histimg ); int bin_w = histimg->width / hdims; //#### Calculating the Probablity of Finding the skin with in-built method ###// if(0) { free (backproject); free (hsv1); free (hue1); } cvReleaseImage(&backproject); backproject= cvCreateImage( cvGetSize(frame), 8, 1 ); cvReleaseImage(&hsv1); hsv1 = cvCreateImage( cvGetSize(frame), 8, 3); cvReleaseImage(&hue1); hue1 = cvCreateImage( cvGetSize(frame), 8, 1); cvCvtColor(frame,hsv1,CV_BGR2HSV); cvSplit(hsv1,hue1,0,0,0); cvCalcBackProject( &hue1, backproject, hist ); cvSmooth(backproject,backproject,CV_GAUSSIAN); cvSmooth(backproject,backproject,CV_MEDIAN); if( g_storage == NULL ) g_storage = cvCreateMemStorage(0); else cvClearMemStorage( g_storage ); CvSeq* contours=0; CvSeq* result =0; cvFindContours(backproject, g_storage, &contours ); if(contours) { result=cvApproxPoly(contours, sizeof(CvContour), g_storage, CV_POLY_APPROX_DP, 7, 1); } cvZero( backproject); for( ; result != 0; result = result->h_next ) { double area = cvContourArea( result ); cvDrawContours( backproject,result, CV_RGB(255,255, 255), CV_RGB(255,0, 255) , -1,CV_FILLED, 8 ); for( int i=1; i<=result-> total; i++ ) { if(i>=1 and abs(area)>300) { CvPoint* p2 = CV_GET_SEQ_ELEM( CvPoint, result, i ); if(1) { avgx=avgx+p2->x; avgy=avgy+p2->y; j=j+1; cvCircle(backproject,cvPoint(p2->x,p2->y ),10, cvScalar(255,255,255)); } } } } cvCircle( backproject, cvPoint(avgx/j, avgy/j ), 40, cvScalar(255,255,255) ); x = ( avgx/j ); y = ( avgy/j ); x=( (x*1240)/640 )-20; y=( (y*840)/480 )-20; if ( (abs(tmpx-x)>6 or abs(tmpy-y)>6 ) and j ) { tmpx = x; tmpy = y; chk=0; } else chk++; mouse_move1( tmpx, tmpy, display ); if ( chk==10 ) { mouse_click( 5, 2, display ); mouse_click( 5, 3, display ); } cvSaveImage( "final.jpg", frame ); cvSaveImage( "final1.jpg", backproject ); cvShowImage( "CA", backproject ); char c = cvWaitKey(33); if( c == 27 ) break; //function break and destroying windows if press <escape> key } cvReleaseCapture( &capture ); cvDestroyWindow( "CA" ); }
void moFlatlandColorPairFinderModule::applyFilter(IplImage *src) { ///////////////////////////////////////////////////////////////////////////////////// //Step 1 get gray version of input, retain colored version ///////////////////////////////////////////////////////////////////////////////////// //Step 2 pass gray along normally to contour finder. this->clearBlobs(); //imagePreprocess(src); //cvCopy(src, this->output_buffer); cvCvtColor(src, this->output_buffer, CV_RGB2GRAY); CvSeq *contours = 0; cvFindContours(this->output_buffer, this->storage, &contours, sizeof(CvContour), CV_RETR_CCOMP); cvDrawContours(this->output_buffer, contours, cvScalarAll(255), cvScalarAll(255), 100); //cvCircle(this->output_buffer, /* the dest image */ // cvPoint(110, 60), 35, /* center point and radius */ // cvScalarAll(255), /* the color; red */ // 1, 8, 0); // Consider each contour a blob and extract the blob infos from it. int size; int min_size = this->property("min_size").asInteger(); int max_size = this->property("max_size").asInteger(); CvSeq *cur_cont = contours; ///////////////////////////////////////////////////////////////////////////////////// //Step 3 check window around contour centers and find color //clear the console? //system("cls"); //system("clear"); //clrscr(); //printf("\033[2J"); //std::cout << std::string( 100, '\n' ); std::vector<ColoredPt> cPts; //printf("==================================\n"); int blobi = 0; while (cur_cont != 0) { CvRect rect = cvBoundingRect(cur_cont, 0); size = rect.width * rect.height; //printf(":: %d\n", size); if ((size >= min_size) && (size <= max_size)) { //TODO use a Vector double red = 0; double green = 0; double blue = 0; int blobColor = 0; //in reality, probably could filter heavily and just look at 1 pixel, or at least a very small window // [!!!] for (int x = rect.x; x < rect.x + rect.width; x++) { for (int y = rect.y; y < rect.y + rect.height; y++) { int blueVal = ( ((uchar*)(src->imageData + src->widthStep*y))[x*3+0] ); int greenVal = ( ((uchar*)(src->imageData + src->widthStep*y))[x*3+1] ); int redVal = ( ((uchar*)(src->imageData + src->widthStep*y))[x*3+2] ); double colorNorm = 1.0;//sqrt((blueVal*blueVal) + (greenVal*greenVal) + (redVal * redVal)); //weight dark pixels less double weight = 1.0;//(1.0*blueVal + greenVal + redVal) / (1.5 * 255.0); if (weight > 1) { weight = 1; } if (colorNorm > 0) { red += weight*redVal/colorNorm; green += weight*greenVal/colorNorm; blue += weight*blueVal/colorNorm; } } } //the channel totals //printf("%d : %f\n%f\n%f\n\n",blobi , red, green, blue); blobi++; if (red > green && red > blue) { blobColor = RED; } if (blue > green && blue > red) { blobColor = BLUE; } if (green > red && green > blue) { blobColor = GREEN; } blobColor = matchColor(red, green, blue); // Draw a letter corresponding to the LED color CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, .7f, .7f, 0, 1, CV_AA); if (blobColor == RED) { cvPutText(this->output_buffer, "R", cvPoint(rect.x + rect.width / 2.0, rect.y + rect.height / 2.0), &font, cvScalar(255, 255, 255, 0)); } else if (blobColor == GREEN) { cvPutText(this->output_buffer, "G", cvPoint(rect.x + rect.width / 2.0, rect.y + rect.height / 2.0), &font, cvScalar(255, 255, 255, 0)); } else if (blobColor == BLUE) { cvPutText(this->output_buffer, "B", cvPoint(rect.x + rect.width / 2.0, rect.y + rect.height / 2.0), &font, cvScalar(255, 255, 255, 0)); } else if (blobColor == WHITE) { cvPutText(this->output_buffer, "Y", cvPoint(rect.x + rect.width / 2.0, rect.y + rect.height / 2.0), &font, cvScalar(255, 255, 255, 0)); } /*moDataGenericContainer *blob = new moDataGenericContainer(); blob->properties["implements"] = new moProperty("pos,size"); blob->properties["x"] = new moProperty((rect.x + rect.width / 2) / (double) src->width ); blob->properties["y"] = new moProperty((rect.y + rect.height / 2) / (double) src->height ); blob->properties["width"] = new moProperty(rect.width); blob->properties["height"] = new moProperty(rect.height); blob->properties["color"] = new moProperty(blobColor); this->blobs->push_back(blob);*/ struct ColoredPt thisPt; thisPt.x = (rect.x + rect.width / 2);// / (double) src->width; thisPt.y = (rect.y + rect.height / 2);// / (double) src->height; thisPt.color = blobColor; cPts.push_back(thisPt); } cur_cont = cur_cont->h_next; } ///////////////////////////////////////////////////////////////////////////////////// //Step 4 iterate over blobs again, to find close pairs //TODO Currently, this algorithm assumes the best, and does nothing to ensure robustness/smoothness //e.g. add a distance threshold (would need to be "settable" in a Gui) int nPlayersFound = 0; //Init the adjacency list int MAX_N_LIGHTS = 20; // TODO! more lights may need to be identified for field markers! int pairs[MAX_N_LIGHTS]; for ( int i = 0; i < MAX_N_LIGHTS; i++ ) { pairs[i] = -1; } //printf("+++++++++++++++++++++++++++++++++++++++++\n"); // map out closest pairs of lights. //TODO need to iterate through blobs and throw out obviously non-player-light blobs. (big blobs) //TODO //TODO //TODO //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // In more realistic scenarios, an arbitrary number of lights is likely to appear! // Need to account for this! //! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! for ( int i = 0; i < cPts.size() && i < MAX_N_LIGHTS; i++ ) // dynamically allocate pairs based on number of lights? { if(pairs[i] == -1) { double minDist = 1000000;//distances are < 1, so should be OK. int closestIdx = -1; for ( int j = i; j < cPts.size() && j < MAX_N_LIGHTS; j++ ) { if (j != i) { double x1 = cPts[i].x; double y1 = cPts[i].y; double x2 = cPts[j].x; double y2 = cPts[j].y; double distance = sqrt((x2 - x1)*(x2 - x1) + (y2 - y1)*(y2 - y1)); if (distance < minDist) { minDist = distance; closestIdx = j; } } } if (closestIdx >= 0) { pairs[i] = closestIdx; pairs[closestIdx] = -9999; //designate as 'slave' point. nPlayersFound ++; //printf("%d ___ %d\n",i, pairs[i]); } } else { } } //printf("==================================\n"); //for ( int i = 0; i < cPts.size(); i++ ) //{ // printf("%d ___ %d\n", i, pairs[i]); //} /////////////////////////////////////// // Clear the player list ////////////// moDataGenericList::iterator pit; for ( pit = this->players->begin(); pit != this->players->end(); pit++ ) { delete (*pit); } this->players->clear(); // look at pair colors and determine player number for (int i = 0; i < MAX_N_LIGHTS; i++) { if (pairs[i] >= 0) { //printf("%d ___ %d\n",pairs[i], pairs[i]); int color1 = cPts[i].color; int color2 = cPts[pairs[i]].color; //write a function to choose the player int playerIdx = getPlayerIndex(color1, color2); std::ostringstream labelStream; labelStream << playerIdx; /*if ((color1 == 0 && color2 == 2) || (color2 == 0 && color1 == 2)) //red and blue { label = "1"; } else if ((color1 == 0 && color2 == 1) || (color2 == 0 && color1 == 1)) //red and green { label = "2"; }*/ double avX = (cPts[i].x + cPts[pairs[i]].x)/2; double avY = (cPts[i].y + cPts[pairs[i]].y)/2; CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.7f, 1.7f, 0, 1, CV_AA); cvPutText(this->output_buffer, labelStream.str().c_str(), cvPoint(avX, avY), &font, cvScalar(255, 255, 255, 0)); /*moDataGenericContainer *player = new moDataGenericContainer(); player->properties["implements"] = new moProperty("pos"); player->properties["x"] = new moProperty(avX / src->width); player->properties["y"] = new moProperty(avY / src->height); player->properties["blob_id"] = new moProperty(playerIdx); std::string implements = player->properties["implements"]->asString(); // Indicate that the blob has been tracked, i.e. blob_id exists. implements += ",tracked"; player->properties["implements"]->set(implements); this->players->push_back(player);*/ //->properties["blob_id"]->set(old_id); } } //Add in some fake players, so I don't have to have the lights out to test the connection. double debugX = .5 + .25 * sin(2*3.14 * frameCounter / 200); if (frameCounter % 2 == 0) { moDataGenericContainer *player = new moDataGenericContainer(); player->properties["implements"] = new moProperty("pos"); player->properties["x"] = new moProperty(debugX); player->properties["y"] = new moProperty(.75); player->properties["blob_id"] = new moProperty(0); std::string implements = player->properties["implements"]->asString(); // Indicate that the blob has been tracked, i.e. blob_id exists. implements += ",tracked"; player->properties["implements"]->set(implements); moDataGenericContainer *player2 = new moDataGenericContainer(); player2->properties["implements"] = new moProperty("pos"); player2->properties["x"] = new moProperty(1 - debugX); player2->properties["y"] = new moProperty(.75); player2->properties["blob_id"] = new moProperty(1); player2->properties["implements"]->set(implements); this->players->push_back(player); this->players->push_back(player2); } frameCounter = (frameCounter + 1) % 200; this->output_data->push(this->players); }