int main(int argc, char * argv[]){ VideoCapture capture; capture.open(0); FileStorage fs; //Read options read_options(argc,argv,capture,fs); //Init camera if (!capture.isOpened()) { cout << "capture device failed to open!" << endl; return 1; } //Register mouse callback to draw the bounding box cvNamedWindow("TLD",CV_WINDOW_AUTOSIZE); cvSetMouseCallback( "TLD", mouseHandler, NULL ); //TLD framework TLD tld; //Read parameters file tld.read(fs.getFirstTopLevelNode()); Mat frame; Mat last_gray; Mat first; if (fromfile){ capture >> frame; cvtColor(frame, last_gray, CV_RGB2GRAY); frame.copyTo(first); }else{
int main(int argc, char * argv[]){ //create a videoInput object //videoInput VI; //Prints out a list of available devices and returns num of devices found // int numDevices = VI.listDevices(); int device = 0; //this could be any deviceID that shows up in listDevices VideoCapture capture; //capture.open(0); // capture.open("e:\\imgs\\girl\\output.avi"); FileStorage fs; //Read options read_options(argc,argv,capture,fs); int width, height; unsigned char * framePixels; //Init camera if(!fromfile) { if(!capture.isOpened()) { cout << "capture device failed to open!" << endl; return 1; } //As requested width and height can not always be accomodated //make sure to check the size once the device is setup width = 340; height = 240; int size = width*height; framePixels = new unsigned char[size]; } else if(!capture.isOpened()) { cout << "capture device failed to open!" << endl; return 1; } //Register mouse callback to draw the bounding box cvNamedWindow("TLD_LOVE",CV_WINDOW_AUTOSIZE); cvSetMouseCallback( "TLD_LOVE", mouseHandler, NULL ); //TLD framework TLD tld; //Read parameters file tld.read(fs.getFirstTopLevelNode()); Mat frame; Mat last_gray; Mat first; if (fromfile){ capture >> frame; cvtColor(frame, last_gray, CV_RGB2GRAY); frame.copyTo(first); } else {
//对起始帧初始化,然后逐帧处理 int main(int argc, char * argv[]){ VideoCapture capture; capture.open(0); //OpenCV的C++接口中,用于保存图像的imwrite只能保存整数数据,且需作为图像格式。 //而浮点数据或XML\YAML文件在OpenCV中的数据结构为FileStorage FileStorage fs; //Read options read_options(argc,argv,capture,fs);//分析命令行参数 //Init camera if (!capture.isOpened()) { cout << "capture device failed to open!" << endl; return 1; } //Register mouse callback to draw the bounding box cvNamedWindow("TLD",CV_WINDOW_AUTOSIZE); /* * void cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param=NULL ); * window_name 窗口的名字。 * on_mouse 指定窗口里每次鼠标事件发生的时候,被调用的函数指针。这个函数的原型应该为 * void Foo(int event, int x, int y, int flags, void* param); */ cvSetMouseCallback( "TLD", mouseHandler, NULL );//鼠标选中初始目标的bounding box //TLD framework TLD tld; //Read parameters file tld.read(fs.getFirstTopLevelNode()); Mat frame; Mat last_gray; Mat first; if (fromfile){ //若是读取文件 capture >> frame;//读取当前帧 cvtColor(frame, last_gray, CV_RGB2GRAY);//转换为灰度图像 frame.copyTo(first);//拷贝做第一帧 }else{
int main(int argc, char* argv[]) { int ret = 0; TLD tld; ret = tld.read("parameters.dat"); if(ret < 0){ cerr<<"tld.read error"<<endl; exit(-1); } unsigned char* frame; frame = new unsigned char[IMAGE_ROWS*IMAGE_COLS]; fstream videoIn("car.raw", ios::in|ios::binary); if(!videoIn){ cerr<<"open video file error"<<endl; exit(-1); } videoIn.read((char*)frame, IMAGE_ROWS*IMAGE_COLS); BoundingBox box; fstream boxfile("box.dat", ios::in); if(!boxfile){ cerr<<"open box file error"<<endl; exit(-1); } boxfile>>box.x; boxfile>>box.y; boxfile>>box.width; boxfile>>box.height; box.width -= box.x; box.height -= box.y; box.overlap = 0.0; box.sidx = -1; tld.init(frame, &box); return 0; }
int main(int argc, char * argv[]) { VideoCapture capture; if(argc>1) { capture.open(argv[1][0]-'0'); } else { capture.open(0); } FileStorage fs; fs.open("parameters.yml", FileStorage::READ); if(!capture.isOpened()) { cout<<"Cam Invalid"<<endl; return -1; } //serial initialize #ifdef SERIAL_PORT Serial serial; unsigned char sendBuff[10]; if(serial.init()!=-1) { cout<<"serial init success"<<endl; } else { cout<<"serial init failed,please check the interface"<<endl; return -1; } #endif //set capture size capture.set(CV_CAP_PROP_FRAME_WIDTH,340); capture.set(CV_CAP_PROP_FRAME_HEIGHT,240); namedWindow("Intelisu",CV_WINDOW_NORMAL);//CV_WINDOW_AUTOSIZE setMouseCallback( "Intelisu", mouseHandler, NULL ); TLD tld; tld.read(fs.getFirstTopLevelNode()); Mat frame; Mat last_gray; Mat first; capture>>frame;//test image acquirement; cout<<frame.size()<<endl; //aquire the object patch // while(!gotBB) // { // if (!fromfile){ // capture >> frame; // } // else // first.copyTo(frame); // cvtColor(frame, last_gray, CV_RGB2GRAY); // drawBox(frame,box); // putText(frame,"Wating for the object...",Point(20,20),FONT_HERSHEY_SIMPLEX,0.6,10,2); // //Mat scaleImage; // //resize(frame,scaleImage,Size(2*round(frame.cols),2*round(frame.rows))); // imshow("Intelisu", frame); // if (cvWaitKey(33) == 'q') // return 0; // } //motion track myupdate_mhi myupdate1; Rect result; Mat motion=Mat::zeros(frame.rows,frame.cols, CV_8UC1); for(;;) { capture>>frame; IplImage pframe(frame); IplImage pmotion(motion); myupdate1.init( &pframe, &pmotion, 60 ); if(myupdate1.update_mhi(result)==0) { cout<<"x:"<<result.x+result.width/2<<" "<<"y:"<<result.y+result.height/2<<endl; rectangle(frame,result, Scalar(255,0,0)); break; } //resize(frame,frame,Size(3*round(frame.cols),3*round(frame.rows))); static unsigned char safeCount=0; putText(frame,"Environment Safe",Point(20,20),FONT_HERSHEY_SIMPLEX,0.7,CV_RGB(0,safeCount,0),2); safeCount+=16; imshow("Intelisu",frame); waitKey(10); } cvtColor(frame, last_gray, CV_RGB2GRAY); //waitKey(-1); //Remove callback cvSetMouseCallback( "Intelisu", NULL, NULL ); //open the output file FILE *bb_file = fopen("bounding_boxes.txt","w"); //TLD initialization //tld.init(last_gray,box,bb_file); tld.init(last_gray,result,bb_file); //runtime variable Mat current_gray; BoundingBox pbox; vector<Point2f> pts1; vector<Point2f> pts2; bool status=true; int frames = 1; int detections = 1; while(capture.read(frame)){ //<<<<<<<<<<<<<< double timeMs=0; timeMs=getTickCount(); //get frame cvtColor(frame, current_gray, CV_RGB2GRAY); //Process Frame tld.processFrame(last_gray,current_gray,pts1,pts2,pbox,status,tl,bb_file); //Draw Points if (status){ drawPoints(frame,pts1); drawPoints(frame,pts2,Scalar(0,255,0)); drawBox(frame,pbox); detections++; //drawWarning static unsigned char count=0; Rect symbol(Point(0,0),Size(frame.cols,frame.rows)); rectangle(frame,symbol.tl(),symbol.br(),Scalar(0,0,count),15); putText(frame,"WARNING",Point(90,120),FONT_HERSHEY_SIMPLEX,1.2,CV_RGB(count,0,0),2); count+=16; //attach coordinate ostringstream printText; Rect detdObj; detdObj.x=(pbox.x+pbox.width/2)/2; detdObj.y=(pbox.y+pbox.height/2)/2; printText<<"X:"<<detdObj.x<<" "<<"Y:"<<detdObj.y<<" "<<"Area:"<<pbox.width*pbox.height; putText(frame,printText.str(),Point(20,40),FONT_HERSHEY_SIMPLEX,0.7,CV_RGB(100,255,0),2); //send data through serial port #ifdef SERIAL_PORT static int sendRate=0; if((sendRate++)%8==0) { sendBuff[0]=0xff; sendBuff[1]=detdObj.x; sendBuff[2]=detdObj.y; sendBuff[3]=0xfe; serial.send_data_tty(sendBuff,4); } #endif // } else { //drawSafe static unsigned char safeCount=0; putText(frame,"Environment Safe",Point(20,20),FONT_HERSHEY_SIMPLEX,0.7,CV_RGB(0,safeCount,0),2); safeCount+=16; } //Display //<<<<<<<<<<<<<< //double timeMs=0; //timeMs=getTickCount(); //resize(frame,frame,Size(3*round(frame.cols),3*round(frame.rows))); //timeMs=(getTickCount()-timeMs)/getTickFrequency(); //cout<<"<<<<<<<<<< "<<"Time: "<<timeMs*1000<<" >>>>>>>>>>"<<endl; //>>>>>>>>>>>>>> imshow("Intelisu", frame); //Super Display // Mat scaleImage; // resize(frame,scaleImage,Size(2*round(frame.cols),2*round(frame.rows))); // ostringstream printText; // printText<<"X:"<<pbox.x+pbox.width/2<<" "<<"Y:"<<pbox.y+pbox.height/2; // putText(scaleImage,printText.str(),Point(40,40),FONT_HERSHEY_SIMPLEX,1,CV_RGB(100,255,0),2); // imshow("Intelisu", scaleImage); //swap points and images swap(last_gray,current_gray); //clean the track pts1.clear();//clean the pts2.clear(); frames++; //printf("Detection rate: %d/%d\n",detections,frames); if ((char)waitKey(1) == 'q') break; timeMs=(getTickCount()-timeMs)/getTickFrequency(); cout<<"<<<<<<<<<< "<<"Time: "<<timeMs*1000<<" >>>>>>>>>>"<<endl; } fclose(bb_file); return 0; }
int main(int argc, char * argv[]){ cvReleaseHaarClassifierCascade(&cascade); cascade = (CvHaarClassifierCascade*)cvLoad(cascade_name, 0, 0, 0); storage = cvCreateMemStorage(0); //NSST init HBManager nManager; nManager.initHB(); //create a videoInput object videoInput VI; //Prints out a list of available devices and returns num of devices found int numDevices = VI.listDevices(); int device = 0; //this could be any deviceID that shows up in listDevices VideoCapture capture; FileStorage fs; //Read options read_options(argc, argv, capture, fs); int width, height; unsigned char * framePixels = NULL; //Init camera if (!fromfile) { if (!VI.setupDevice(device, 320, 240)) { cout << "capture device failed to open!" << endl; return 1; } //As requested width and height can not always be accomodated //make sure to check the size once the device is setup width = VI.getWidth(device); height = VI.getHeight(device); int size = VI.getSize(device); framePixels = new unsigned char[size]; } else if (!capture.isOpened()) { cout << "capture device failed to open!" << endl; return 1; } //Register mouse callback to draw the bounding box namedWindow("FG Mask MOG 2"); cvNamedWindow("TLD", CV_WINDOW_AUTOSIZE); cvSetMouseCallback("TLD", mouseHandler, NULL); //MOG2 BackgroundSubtractor *pMOG2 = new BackgroundSubtractorMOG2(); Mat fgMaskMOG2; vector<vector<Point>> contours; vector<Vec4i> hierarchy; Rect r; int centerP = 2; //HOG Mat ROI; //cv::HOGDescriptor hog; // 采用默认参数 //hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector()); // 采用已经训练好的行人检测分类器 std::vector<cv::Rect> regions; //TLD framework TLD tld; //Read parameters file tld.read(fs.getFirstTopLevelNode()); Mat frame; Mat last_gray; Mat first; Size size; if (fromfile){ capture >> frame; cvtColor(frame, last_gray, CV_RGB2GRAY); frame.copyTo(first); } else { while (!VI.isFrameNew(device));
int main(int argc, char * argv[]){ int patcharray[6]={15,20,25,30,35}; int minwind[3]={5,10,15}; FILE *pfilezp;//=fopen("Record.txt","w"); FILE *objectf; FILE *tablef; FILE *patchf; time_t start,end; double wholecost; struct tm *ptr; int retry; int startFrame=0; bool nopoint=true;//是否显示点 bool drawDec=false;//是否显示detection的框框 bool cameraAgain=false; bool breaknow=false;//为了退出大循环所设的变量 bool play=false;//是否切换到play模式 char *test[]={ "-p parameters.yml -s car.mpg -b car.txt", "-p ../parameters.yml -s ../datasets/01_david/david.mpg -b ../datasets/01_david/init.txt", "-p ../parameters.yml -s ../datasets/02_jumping/jumping.mpg -b ../datasets/02_jumping/init.txt", "-p ../parameters.yml -s ../datasets/03_pedestrian1/pedestrian1.mpg -b ../datasets/03_pedestrian1/init.txt", "-p ../parameters.yml -s ../datasets/04_pedestrian2/pedestrian2.mpg -b ../datasets/04_pedestrian2/init.txt", "-p ../parameters.yml -s ../datasets/05_pedestrian3/pedestrian3.mpg -b ../datasets/05_pedestrian3/init.txt", "-p ../parameters.yml -s ../datasets/06_car/car.mpg -b ../datasets/06_car/init.txt", "-p ../parameters.yml -s ../datasets/07_motocross/motocross.mpg -b ../datasets/07_motocross/init.txt", //"-p ../parameters.yml -s ../datasets/08_volkswagen/volkswagen.mpg -b ../datasets/08_volkswagen/init.txt", "-p ../parameters.yml -s ../datasets/09_carchase/carchase.mpg -b ../datasets/09_carchase/init.txt", "-p ../parameters.yml -s ../datasets/10_panda/panda.mpg -b ../datasets/10_panda/init.txt", "-p ../parameters.yml -s ../datasets/11_test/test2.avi"}; char *testt[]={"-p parameters.yml -im data"};//,"-p parameters.yml -s car.mpg -b init1.txt", //"-p parameters.yml -s test.avi", // "-p parameters.yml -s motocross.mpg -b init2.txt"}; for(int i=0;i<1;i++){ for (int flag=0;flag<1;flag++) //for (int pi=0;pi<15;pi++) { RNG RNG( int64 seed=-1 ); double costsum[7]={0.0,0.0,0.0,0.0,0.0,0.0,0.0}; if(flag==1) int tempp=1; isImage=false; breaknow=false; retry=-1; patchf=fopen("patchgpu.txt", "at"); pfilezp=fopen("Record.txt","at"); tablef=fopen("tableout.txt","at"); objectf=fopen("objectf.txt", "at"); drawing_box = false; gotBB = false; tl = true; rep = false; fromfile=false; start=time(NULL); ptr=localtime(&start); printf(asctime(ptr)); fprintf(pfilezp,asctime(ptr)); wholecost = (double)getTickCount(); VideoCapture capture; //CvCapture* capture; capture.open(1); //capture = cvCaptureFromCAM( CV_CAP_ANY); FileStorage fs; //Read options string s = test[flag]; string del = " "; char test2[10][100]; test2[4][0]='0';//这里有很奇怪的事情,下次循环时竟然保留了上次循环的test2的值,按理说test2是在循环里面定义的,应该是个局部变量,每次循环应该是新开的变量啊。 vector<string> strs = splitEx(s, del); for ( unsigned int i = 0; i < strs.size(); i++) { // cout << strs[i].c_str() << endl; // test2[i]=strs[i].c_str(); strcpy(test2[i],strs[i].c_str()); //cout<<test2[i]<<endl; } //int tp=strs.size(); char *p[10]; char **test3;//搞不太懂这里啊。。。 for(int i=0;i<10;i++) p[i]=test2[i]; test3=p; read_options(10,test3,capture,fs); // video = string(argv[1]);//目标视频//实验中输入参数就是这三行 // capture.open(video); // readBB(argv[2]);//目标框 // read_options(argc,argv,capture,fs); if(startFrame>0)//说明按下了r键,要我们重新手动选择框框 { box = Rect( 0, 0, 0, 0 ); gotBB=false; } // read_options(argc,argv,capture,fs); //Init camera if (!capture.isOpened()&&!isImage)//打不开视频而且不是图像序列 { cout << "capture device failed to open!" << endl; return 1; } //Register mouse callback to draw the bounding box cvNamedWindow("TLD",CV_WINDOW_AUTOSIZE); cvSetMouseCallback("TLD", mouseHandler, NULL); //TLD framework TLD tld; //Read parameters file tld.read(fs.getFirstTopLevelNode()); // tld.patch_size=atoi(argv[3]); // tld.min_win=atoi(argv[4]); Mat frame; Mat last_gray; Mat first; if(fromCa) fromfile=false; fromCa=false; if (fromfile){ if(!isImage){ // capture >> frame; totalFrameNumber = capture.get(CV_CAP_PROP_FRAME_COUNT); cout<<"整个视频共"<<totalFrameNumber<<"帧"<<endl; // capture.set( CV_CAP_PROP_POS_FRAMES,0); 似乎没有用 for(int i=0;i<=startFrame;i++){ capture.read(frame);} cvtColor(frame, last_gray, CV_RGB2GRAY); frame.copyTo(first); } else{ totalFrameNumber = listCount; cout<<"整个图像序列共"<<listCount<<"帧"<<endl; // capture.set( CV_CAP_PROP_POS_FRAMES,0); 似乎没有用 frame=imread(imageList[startFrame+2]); cvtColor(frame, last_gray, CV_RGB2GRAY); frame.copyTo(first); } }else{ capture.set(CV_CAP_PROP_FRAME_WIDTH,340); capture.set(CV_CAP_PROP_FRAME_HEIGHT,240); } ///Initialization GETBOUNDINGBOX: while(!gotBB) { if (!fromfile){ capture >> frame; } else first.copyTo(frame); cvtColor(frame, last_gray, CV_RGB2GRAY); drawBox(frame,box); imshow("TLD", frame); int cw=cvWaitKey(1); if (cw == 'q') return 0; if(cw=='p') { play=true;box=Rect( 0, 0, 15, 15 ); break; } } if (min(box.width,box.height)<(int)fs.getFirstTopLevelNode()["min_win"]){ cout << "Bounding box too small, try again." << endl; gotBB = false; goto GETBOUNDINGBOX; } //Remove callback cvSetMouseCallback( "TLD", NULL, NULL ); printf("Initial Bounding Box = x:%d y:%d h:%d w:%d\n",box.x,box.y,box.width,box.height); //Output file FILE *bb_file = fopen("bounding_boxes.txt","w"); //fprintf(tablef,"%s\n",test2[3]); //TLD initialization tld.initNcc(); tld.init(last_gray,box,bb_file); tld.initGpu(last_gray); ///Run-time Mat current_gray; BoundingBox pbox; vector<Point2f> pts1; vector<Point2f> pts2; bool status=true; int frames = 1; int detections = 1; int flagg=startFrame;//记录是第几帧 // pfilezp=fopen("Record.txt","w"); REPEAT: // capture.set( CV_CAP_PROP_POS_FRAMES,startFrame); while((!isImage&&capture.read(frame))||(isImage)){ if(isImage){ frame=imread(imageList[startFrame++]); if(startFrame>listCount-1){ box=Rect( 0, 0, 0, 0 ); break;} } flagg++; double cost = (double)getTickCount(); //get frame cvtColor(frame, current_gray, CV_RGB2GRAY); //Process Frame if(!play) tld.processFrame(last_gray,current_gray,pts1,pts2,pbox,status,tl,bb_file,tablef,costsum,objectf); //Draw Points if (status&&!play){ if(!nopoint){ drawPoints(frame,pts1); drawPoints(frame,pts2,Scalar(0,255,0)); } drawBox(frame,pbox,Scalar(255,255,255),2); detections++; } if(drawDec){ // for(int j=0;j<tld.dt.bb.size();j++) // drawBox(frame,tld.grid[tld.dt.bb[j]]); for(int j=0;j<tld.dbb.size();j++)//此处为了论文中图片所写,才把dbb的显示出来,因为使用孤立点算法以后会存入dbb。 drawBox(frame,tld.dbb[j]); } //Display imshow("TLD", frame); //swap points and images swap(last_gray,current_gray); pts1.clear(); pts2.clear(); frames++; if(frames==tld.pausenum) system("pause"); printf("Detection rate: %d/%d\n",detections,frames); if(frames==totalFrameNumber) tld.islast=true; cost=getTickCount()-cost; printf("--------------------------------process cost %gms\n", cost*1000/getTickFrequency()); int c = waitKey(1); //int c= 'm'; if(cameraAgain) c='c';//如果在camera模式下按下了r键则会回到这里。 switch(c){ case 'n'://测试下一段视频 { // retry==1; breaknow=true; gotBB=false; box = Rect(0, 0, 0, 0); break; } case 'q':{ tld.endGpu(); return 0;} case 'r'://要手动在当前帧重新选择目标框框 { if(fromfile) { if(play) startFrame=flagg; else startFrame=flagg-1; play=false; flag--; retry=1; breaknow=true; break; } else{//如果摄像头模式时按了r想重新选择目标框框键,则相当于想再次打开Cam模式 cameraAgain=true; break; } } case 'x': { nopoint=!nopoint; break; } case 'd': { drawDec=!drawDec; break; } case 'p': { play=!play; break; } case 's': { //想用来打开或者关闭控制台 //#pragma comment( linker, "/subsystem:\"windows\" /entry:\"mainCRTStartup\"" ) } case 'c'://切换到使用摄像头模式 { cameraAgain=false; breaknow=true; fromCa=true;//readoptions函数里会用到这个判断,如果fromCa为true,即使有文件也不读相当于只是输入"-p ../parameters.yml" retry=1; box=Rect( 0, 0, 0, 0 ); flag--;//循环倒退回去,但是不能 break; } } if(breaknow) break; // if(flagg>=9) // fclose(pfilezp); } tld.endGpu(); fprintf(pfilezp,"num=%d %s\n",flag,test2[3]); fprintf(pfilezp,"patch_size=%d\n",tld.patch_size); fprintf(pfilezp,"USE GPU OR CPU:%s\n",tld.use); fprintf(pfilezp,"Initial Bounding Box = x:%d y:%d h:%d w:%d\n",box.x,box.y,box.width,box.height); fprintf(pfilezp,"Detection rate: %d/%d\n",detections,frames); fprintf(pfilezp,"classifier.pEx: %d classifier.nEx: %d\n", tld.classifier.pEx.size(),tld.classifier.nEx.size()); fclose(bb_file); wholecost=(getTickCount()-wholecost); end=time(NULL); fprintf(pfilezp,"timecost = %gms \n",wholecost*1000/getTickFrequency()); fprintf(pfilezp,"every frame cost %g ms \n",wholecost*1000/getTickFrequency()/frames); fprintf(pfilezp,"%gms %gms %gms\n\n",costsum[0]/frames,costsum[1]/frames,costsum[2]/frames); //依次打印视频名字/检测率/patch宽/patch高/总时间/filter1/filter2/detect/track/learn/filter1数据拷贝时间/filter2数据拷贝时间 fprintf(patchf,"%s\t %d/%d\t%d\t%d\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\n",argv[1],detections,frames,tld.patch_size,tld.min_win,wholecost*1000/getTickFrequency()/frames,costsum[0]/frames,costsum[1]/frames,costsum[2]/frames,costsum[3]/frames,costsum[4]/frames,costsum[5]/frames,costsum[6]/frames); //time_t start,end; //start=time(NULL); ptr=localtime(&start); printf(asctime(ptr)); //fprintf(pfilezp,"timecost2=%f ms\n",difftime(end,start)*1000); fclose(pfilezp); fclose(tablef); fclose(patchf); if(retry==1) { continue; }//startFrame不归零 startFrame=0;//重置startFrame if (rep){ rep = false; tl = false; fclose(bb_file); bb_file = fopen("final_detector.txt","w"); //capture.set(CV_CAP_PROP_POS_AVI_RATIO,0); capture.release(); capture.open(video); goto REPEAT; } }