void CodeBook::updateModel(IplImage* videoFrame) { cvCvtColor(videoFrame, yuvFrame, CV_BGR2YCrCb );//YUV For codebook method //This is where we build our background model if( nframes-1 < nframesToLearnBG ) cvBGCodeBookUpdate( model, yuvFrame ); if( nframes-1 == nframesToLearnBG ) cvBGCodeBookClearStale( model, model->t/2 ); if( nframes-1 >= nframesToLearnBG ) { // Find foreground by codebook method cvBGCodeBookDiff( model, yuvFrame, mask); cvThreshold( mask, mask, 1, 255, CV_THRESH_BINARY ); cvSegmentFGMask(mask); emit { output(mask); output(mask, videoFrame); } }
Mat CodeBookBackGround::process(Mat inputMat, Mat &foreMat) { Mat temp = inputMat.clone(); IplImage * tempRGB = &IplImage(temp);// IplImage * rawImage = cvCreateImage(cvGetSize(tempRGB), 8, 3); cvCvtColor(tempRGB, rawImage, CV_RGBA2BGR); if (nframes == 1 && !inputMat.empty()) { yuvImage = cvCloneImage(rawImage); //对于mat到IplImage指针的转换,不能直接将mat的地址传给IplImage*;要用函数先转IplImage ImaskCodeBook = cvCreateImage(cvGetSize(yuvImage), IPL_DEPTH_8U, 1); ImaskCodeBookCC = cvCreateImage(cvGetSize(yuvImage), IPL_DEPTH_8U, 1); cvSet(ImaskCodeBook, cvScalar(0)); //cvNamedWindow("Raw", 1); // cvNamedWindow("ForegroundCodeBook", 1); // cvNamedWindow("CodeBook_ConnectComp", 1); } else{ cvCvtColor(rawImage, yuvImage, CV_BGR2YCrCb);//YUV For codebook method //This is where we build our background model if ( nframes - 1 < nframesToLearnBG)//这nframesToLearnBG帧之前都是跟新 cvBGCodeBookUpdate(model, yuvImage); if (nframes - 1 == nframesToLearnBG) cvBGCodeBookClearStale(model, model->t / 2);//根据时间跟新 //Find the foreground if any if (nframes - 1 >= nframesToLearnBG) { // Find foreground by codebook method cvBGCodeBookDiff(model, yuvImage, ImaskCodeBook);//判断是否在码本范围内 // This part just to visualize bounding boxes and centers if desired cvCopy(ImaskCodeBook, ImaskCodeBookCC); cvSegmentFGMask(ImaskCodeBookCC);//对前景做连通域分割 } //Display // cvShowImage("Raw", rawImage); // cvShowImage("ForegroundCodeBook", ImaskCodeBook); // cvShowImage("CodeBook_ConnectComp", ImaskCodeBookCC); } foreMat = ImaskCodeBook; ++nframes; // cout << "coodebook 第" << nframes << "帧 " << endl; return foreMat; }
/** Member function to subtract background **/ void BackgroundSubtraction::getSubtractedBackground(IplImage *rawImage) { if( !pause1 ) ++nframes; if( singlestep ) pause1 = true; /** For first iteration **/ if( nframes == 1 ) { std::cout<<"I am here"<<std::endl; /** Codebook method allocation **/ yuvImage = cvCloneImage(rawImage); ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); cvSet(ImaskCodeBook,cvScalar(255)); } /** YUV For codebook method **/ cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb ); /** This is where we build our background model **/ /** Find the foreground if any **/ if( nframes-1 >= nframesToLearnBG ) { /** Find foreground by codebook method **/ cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook ); /** This part just to visualize bounding boxes and centers if desired **/ cvCopy(ImaskCodeBook,ImaskCodeBookCC); cvSegmentFGMask( ImaskCodeBookCC ); /** Calling member function to convert the obtained grayscale image to colored image **/ diff_pix(ImaskCodeBookCC,rawImage); } /** Update the background model till the specified number of frames **/ else if( !pause1 && (nframes-1 < nframesToLearnBG) ) cvBGCodeBookUpdate( model, yuvImage ); /** Clear stale **/ else if( nframes-1 == nframesToLearnBG ) cvBGCodeBookClearStale( model, model->t/2 ); /** Display the output image **/ std::cout<<"Showing image"<<std::endl; cvShowImage( "Raw", rawImage ); }
int main(int argc, char* argv[]) { long frameToLearn = 16; cvNamedWindow("result", 1); cvNamedWindow("bg", 1); #if 1 CvBGCodeBookModel* model = cvCreateBGCodeBookModel(); //Set color thresholds to default values model->modMin[0] = 3; model->modMin[1] = model->modMin[2] = 3; model->modMax[0] = 10; model->modMax[1] = model->modMax[2] = 10; model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10; CvCapture* capture = cvCreateFileCapture(argv[1]); if(3 == argc) { frameToLearn = atol(argv[2]); } IplImage *rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method IplImage *imaskCodeBook = 0; long frameSeq = 0; while(true) { rawImage = cvQueryFrame(capture); if(!rawImage) { std::cout << "null frame" << std::endl; break; } ++frameSeq; std::cout << frameSeq << std::endl; char fnStr[32]; snprintf(fnStr, sizeof(fnStr), "images/%08ld.jpg", frameSeq); cv::Mat mImg = rawImage; char fmStr[64]; blobs.clear(); if(1 == frameSeq) { yuvImage = cvCloneImage(rawImage); imaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); cvSet(imaskCodeBook,cvScalar(255)); } cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method if(frameSeq < frameToLearn) { // learning cvBGCodeBookUpdate(model, yuvImage); snprintf(fmStr, sizeof(fmStr), "%ld : %d", frameSeq, blobs.size()); cv::putText(mImg, fmStr, cv::Point(10, 40), cv::FONT_HERSHEY_SIMPLEX, 1, CV_RGB(255, 0, 0)); cv::Mat mImg = rawImage; cv::imwrite(fnStr, mImg); } else { if(frameSeq == frameToLearn) cvBGCodeBookClearStale( model, model->t/2 ); cvBGCodeBookDiff( model, yuvImage, imaskCodeBook ); ProcessFrame(imaskCodeBook, rawImage, 20, blob_collector); for(std::vector<CvRect>::const_iterator cr = blobs.begin(); cr != blobs.end(); ++cr) { std::cout << cr->x << ", " << cr->y << ", " << cr->width << ", " << cr->height << std::endl; cv::rectangle(mImg, *cr, CV_RGB(0,255,255)); } snprintf(fmStr, sizeof(fmStr), "%ld : %d", frameSeq, blobs.size()); cv::putText(mImg, fmStr, cv::Point(10, 40), cv::FONT_HERSHEY_SIMPLEX, 1, CV_RGB(255, 0, 0)); cv::Mat m = imaskCodeBook; cv::imshow("bg", m); cv::imshow("result", mImg); cv::waitKey(0); //cv::imwrite(fnStr, mImg); } } std::cout << "done" << std::endl; cvReleaseImage(&imaskCodeBook); cvReleaseImage(&rawImage); cvReleaseImage(&yuvImage); cvReleaseCapture(&capture); #else cv::Mat m; m = cv::imread(argv[1]); cv::imshow("result", m); cv::waitKey(0); #endif }
// //USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera] //If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V // int main(int argc, char** argv) { const char* filename = 0; IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0; CvCapture* capture = 0; int c, n, nframes = 0; int nframesToLearnBG = 300; model = cvCreateBGCodeBookModel(); //Set color thresholds to default values model->modMin[0] = 3; model->modMin[1] = model->modMin[2] = 3; model->modMax[0] = 10; model->modMax[1] = model->modMax[2] = 10; model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10; bool pause = false; bool singlestep = false; for( n = 1; n < argc; n++ ) { static const char* nframesOpt = "--nframes="; if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 ) { if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 ) { help(); return -1; } } else filename = argv[n]; } if( !filename ) { printf("Capture from camera\n"); capture = cvCaptureFromCAM( 0 ); } else { printf("Capture from file %s\n",filename); capture = cvCreateFileCapture( filename ); } if( !capture ) { printf( "Can not initialize video capturing\n\n" ); help(); return -1; } //MAIN PROCESSING LOOP: for(;;) { if( !pause ) { rawImage = cvQueryFrame( capture ); ++nframes; if(!rawImage) break; } if( singlestep ) pause = true; //First time: if( nframes == 1 && rawImage ) { // CODEBOOK METHOD ALLOCATION yuvImage = cvCloneImage(rawImage); ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); cvSet(ImaskCodeBook,cvScalar(255)); cvNamedWindow( "Raw", 1 ); cvNamedWindow( "ForegroundCodeBook",1); cvNamedWindow( "CodeBook_ConnectComp",1); } // If we've got an rawImage and are good to go: if( rawImage ) { cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method //This is where we build our background model if( !pause && nframes-1 < nframesToLearnBG ) cvBGCodeBookUpdate( model, yuvImage ); if( nframes-1 == nframesToLearnBG ) cvBGCodeBookClearStale( model, model->t/2 ); //Find the foreground if any if( nframes-1 >= nframesToLearnBG ) { // Find foreground by codebook method cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook ); // This part just to visualize bounding boxes and centers if desired cvCopy(ImaskCodeBook,ImaskCodeBookCC); cvSegmentFGMask( ImaskCodeBookCC ); //bwareaopen_(ImaskCodeBookCC,100); cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC); detect(ImaskCodeBookCC,rawImage); } //Display cvShowImage( "Raw", rawImage ); cvShowImage( "ForegroundCodeBook",ImaskCodeBook); } // User input: c = cvWaitKey(10)&0xFF; c = tolower(c); // End processing on ESC, q or Q if(c == 27 || c == 'q') break; //Else check for user input switch( c ) { case 'h': help(); break; case 'p': pause = !pause; break; case 's': singlestep = !singlestep; pause = false; break; case 'r': pause = false; singlestep = false; break; case ' ': cvBGCodeBookClearStale( model, 0 ); nframes = 0; break; //CODEBOOK PARAMS case 'y': case '0': case 'u': case '1': case 'v': case '2': case 'a': case '3': case 'b': ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3'; ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b'; ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b'; printf("CodeBook YUV Channels active: %d, %d, %d\n", ch[0], ch[1], ch[2] ); break; case 'i': //modify max classification bounds (max bound goes higher) case 'o': //modify max classification bounds (max bound goes lower) case 'k': //modify min classification bounds (min bound goes lower) case 'l': //modify min classification bounds (min bound goes higher) { uchar* ptr = c == 'i' || c == 'o' ? model->modMax : model->modMin; for(n=0; n<NCHANNELS; n++) { if( ch[n] ) { int v = ptr[n] + (c == 'i' || c == 'l' ? 1 : -1); ptr[n] = CV_CAST_8U(v); } printf("%d,", ptr[n]); } printf(" CodeBook %s Side\n", c == 'i' || c == 'o' ? "High" : "Low" ); } break; } } cvReleaseCapture( &capture ); cvDestroyWindow( "Raw" ); cvDestroyWindow( "ForegroundCodeBook"); cvDestroyWindow( "CodeBook_ConnectComp"); return 0; }
int main(int argc, char** argv) { CvMemStorage* mstrg = cvCreateMemStorage(); CvSeq* contours = 0; CvSeq* contours2 = 0; const char* filename = 0; IplImage* rawImage = 0, *yuvImage = 0, *borde = 0; //yuvImage is for codebook method IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0; CvCapture* capture = 0; int c, n, nframes = 0; int nframesToLearnBG = 300; model = cvCreateBGCodeBookModel(); //Set color thresholds to default values model->modMin[0] = 3; model->modMin[1] = model->modMin[2] = 3; model->modMax[0] = 10; model->modMax[1] = model->modMax[2] = 10; model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10; bool pause = false; bool singlestep = false; printf("Capturando de la camara...\n"); capture = cvCaptureFromCAM( 0 ); if( !capture ) { printf( "No se pudo inicializar la captura de video\n\n" ); return -1; } while (true) { rawImage = cvQueryFrame( capture ); ++nframes; if(!rawImage) break; //First time: if( nframes == 1 && rawImage ) { borde = cvLoadImage("Borde.png",0); // CODEBOOK METHOD ALLOCATION yuvImage = cvCloneImage(rawImage); int w = yuvImage->width; cvSetImageROI(yuvImage, cvRect(w-250,0,250,250)); IplImage *tmp = cvCreateImage(cvGetSize(yuvImage),yuvImage->depth,yuvImage->nChannels); cvCopy(yuvImage, tmp, NULL); cvResetImageROI(yuvImage); yuvImage = cvCloneImage(tmp); ImaskCodeBook = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 ); cvSet(ImaskCodeBook,cvScalar(255)); cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE); cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE); cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE); printf (">>Aprendiendo fondo\n"); } // If we've got an rawImage and are good to go: if( rawImage ) { cvFlip(rawImage, NULL, 1); int w = rawImage->width; cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL); //Dibujar contorno cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ; cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ; // if(nframes - 1 < nframesToLearnBG) { char buffer [33]; _itoa (nframesToLearnBG - nframes,buffer,10); CvFont font2; cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA); cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0)); } cvSetImageROI(rawImage, cvRect(w-250,0,250,250)); IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels); cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb ); //YUV para el metodo del codebook //Construccion del modelo del fondo if(nframes-1 < nframesToLearnBG ) cvBGCodeBookUpdate( model, yuvImage ); if( nframes-1 == nframesToLearnBG ) { cvBGCodeBookClearStale( model, model->t/2 ); printf (">>Fondo aprendido\n"); } //Se encuentran objetos por el metodo de codebook if( nframes-1 >= nframesToLearnBG ) { cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook ); cvCopy(ImaskCodeBook,ImaskCodeBookCC); cvSegmentFGMask( ImaskCodeBookCC ); cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC); //deteccion de imagen detect(ImaskCodeBookCC,rawImage); //base para dibujar la mano if(contours) cvDrawContours(rawImage,contours, cvScalar(255, 0, 0, 0), cvScalarAll(128), 1 ); } //Display cvResetImageROI(rawImage); cvShowImage( "CapturaCam", rawImage ); cvShowImage( "ForegroundCodeBook",ImaskCodeBook); } // User input: c = cvWaitKey(10)&0xFF; c = tolower(c); // End processing on ESC, q or Q if(c == 27 || c == 'q') break; //Else check for user input switch( c ) { case 'c': saveLength = true; break; case ' ': cvBGCodeBookClearStale( model, 0 ); nframes = 0; break; } if (c != 'c') saveLength=false; } cvReleaseCapture( &capture ); cvReleaseMemStorage(&mstrg); cvDestroyWindow( "CapturaCam" ); cvDestroyWindow( "ForegroundCodeBook"); cvDestroyWindow( "CodeBook_ConnectComp"); return 0; }
void clsTracking2D::calcCodeBook(IplImage *image, IplImage *foreground, IplImage *debugImage) { if(!BgCbstarted ) { // usleep(1000); startCodeBook(image); BgCbstarted = true; BgCbNrframes = 0; } cvCopy(image,bgfgImage); if(paramsInt["blur"] > 0) cvSmooth(bgfgImage, bgfgImage, CV_BLUR, paramsInt["blur"], paramsInt["blur"], 0, 0); cvCvtColor( bgfgImage, bgfgImage, CV_BGR2YCrCb ); //POR CORES: cvInRangeS(frame_buffer, cvScalar(minH, minS, minL), cvScalar(maxH, maxS, maxL), frame_threshold); try { if( BgCbNrframes-1 < paramsInt["bgCodeBookNFrames"] ) { cvBGCodeBookUpdate( BGCodeBookModel, bgfgImage ); BgCbNrframes++; DEBUGMSG(("adding frames..%d",BgCbNrframes )); } if( BgCbNrframes-1 == paramsInt["bgCodeBookNFrames"] ) { cvBGCodeBookClearStale( BGCodeBookModel, BGCodeBookModel->t/2 ); BgCbNrframes++; readyForParticles = true; } //Find the foreground if any if( BgCbNrframes-1 >= paramsInt["bgCodeBookNFrames"] ) { // Find foreground by codebook method cvBGCodeBookDiff( BGCodeBookModel, bgfgImage, BgCbImaskCodeBook ); // This part just to visualize bounding boxes and centers if desired cvCopy(BgCbImaskCodeBook,BgCbImaskCodeBookCC); cvSegmentFGMask( BgCbImaskCodeBookCC ); // readyTo2Dtrack = true; } } catch(Exception e) { ERRMSG(("EXCEPTION:: trying to restart the BGFGCodeBook")); BgCbstarted = false; } //process erode dilate //removing loose points if(paramsInt["erosion"] > 0) { cvErode(BgCbImaskCodeBook, BgCbImaskCodeBook, NULL,paramsInt["erosion"] ); } //augmenting neighbour points // cvDilate(BgCbImaskCodeBook, BgCbImaskCodeBook, NULL,1); // cvCanny(BgCbImaskCodeBook,BgCbImaskCodeBook,10,100,3); cvCopy(BgCbImaskCodeBook,foreground); if(debugImage != NULL) cvCopy(BgCbImaskCodeBook,debugImage); }