double SimpleTracker::track_frame(Matrixu &frame, Matrixu &framedisp) { static SampleSet posx, negx, detectx; static vectorf prob; static vectori order; static Matrixu *img; double resp; // copy a color version into framedisp (this is where we will draw a colored box around the object for output) frame.conv2RGB(framedisp); img = &frame; frame.initII(); // run current clf on search window detectx.sampleImage(img,(uint)_curState[0],(uint)_curState[1],(uint)_curState[2],(uint)_curState[3], (float)_trparams._srchwinsz); prob = _clf->classify(detectx,_trparams._useLogR); /////// DEBUG /////// display actual probability map if( _trparams._debugv ){ Matrixf probimg(frame.rows(),frame.cols()); for( uint k=0; k<(uint)detectx.size(); k++ ) probimg(detectx[k]._row, detectx[k]._col) = prob[k]; probimg.convert2img().display(2,2); cvWaitKey(1); } // find best location int bestind = max_idx(prob); resp=prob[bestind]; _curState[1] = (float)detectx[bestind]._row; _curState[0] = (float)detectx[bestind]._col; // train location clf (negx are randomly selected from image, posx is just the current tracker location) if( _trparams._negsamplestrat == 0 ) negx.sampleImage(img, _trparams._negnumtrain, (int)_curState[2], (int)_curState[3]); else negx.sampleImage(img, (int)_curState[0], (int)_curState[1], (int)_curState[2], (int)_curState[3], (1.5f*_trparams._srchwinsz), _trparams._posradtrain+5, _trparams._negnumtrain); if( _trparams._posradtrain == 1 ) posx.push_back(img, (int)_curState[0], (int)_curState[1], (int)_curState[2], (int)_curState[3]); else posx.sampleImage(img, (int)_curState[0], (int)_curState[1], (int)_curState[2], (int)_curState[3], _trparams._posradtrain, 0, _trparams._posmaxtrain); _clf->update(posx,negx); /////// DEBUG /////// display sampled negative points if( _trparams._debugv ){ for( int j=0; j<negx.size(); j++ ) framedisp.drawEllipse(1,1,(float)negx[j]._col,(float)negx[j]._row,1,255,0,255); } // clean up img->FreeII(); posx.clear(); negx.clear(); detectx.clear(); // draw a colored box around object framedisp.drawRect(_curState[2], _curState[3], _curState[0], _curState[1], 1, 0, _trparams._lineWidth, _trparams._boxcolor[0], _trparams._boxcolor[1], _trparams._boxcolor[2] ); _cnt++; return resp; }
bool Tracker::initFace(TrackerParams* params, Matrixu &frame) { const char* cascade_name = "haarcascade_frontalface_alt_tree.xml"; const int minsz = 20; if( Tracker::facecascade == NULL ) Tracker::facecascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); frame.createIpl(); IplImage *img = frame.getIpl(); IplImage* gray = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1 ); cvCvtColor(img, gray, CV_BGR2GRAY ); frame.freeIpl(); cvEqualizeHist(gray, gray); CvMemStorage* storage = cvCreateMemStorage(0); cvClearMemStorage(storage); CvSeq* faces = cvHaarDetectObjects(gray, Tracker::facecascade, storage, 1.05, 3, CV_HAAR_DO_CANNY_PRUNING ,cvSize(minsz, minsz)); int index = faces->total-1; CvRect* r = (CvRect*)cvGetSeqElem( faces, index ); while(r && (r->width<minsz || r->height<minsz || (r->y+r->height+10)>frame.rows() || (r->x+r->width)>frame.cols() || r->y<0 || r->x<0)){ r = (CvRect*)cvGetSeqElem( faces, --index); } //if( r == NULL ){ // cout << "ERROR: no face" << endl; // return false; //} //else // cout << "Face Found: " << r->x << " " << r->y << " " << r->width << " " << r->height << endl; if( r==NULL ) return false; //fprintf(stderr,"x=%f y=%f xmax=%f ymax=%f imgw=%f imgh=%f\n",(float)r->x,(float)r->y,(float)r->x+r->width,(float)r->y+r->height,(float)frame.cols(),(float)frame.rows()); params->_initstate.resize(4); params->_initstate[0] = (float)r->x;// - r->width; params->_initstate[1] = (float)r->y;// - r->height; params->_initstate[2] = (float)r->width; params->_initstate[3] = (float)r->height+10; return true; }
/**************************************************************** Tracker::InitializeWithFace Initialize the tracker with opencv's face detector. Exceptions: None ****************************************************************/ bool Tracker::InitializeWithFace( TrackerParameters* params, Matrixu& frame ) { const int minsz = 20; //Get the name of the haar-cascade .xml file const char* cascade_name = HAAR_CASCADE_FILE_NAME; ASSERT_TRUE( cascade_name != NULL ); //Load the cascade if ( Tracker::s_faceCascade == NULL ) { Tracker::s_faceCascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); } frame.createIpl(); IplImage* img = frame.getIpl(); ASSERT_TRUE( img != NULL ); //convert to grayscale IplImage* gray = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1 ); ASSERT_TRUE( gray != NULL ); cvCvtColor(img, gray, CV_BGR2GRAY ); frame.freeIpl(); //histogram equalization cvEqualizeHist( gray, gray ); //memory storage CvMemStorage* storage = cvCreateMemStorage(0); cvClearMemStorage(storage); //call opencv's haar feature based detector CvSeq* faces = cvHaarDetectObjects( gray, Tracker::s_faceCascade, storage, 1.05, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize( minsz, minsz ) ); int index = faces->total-1; CvRect* r = (CvRect*)cvGetSeqElem( faces, index ); if ( r == NULL ) { return false; } while ( r && (r->width<minsz || r->height < minsz || (r->y+r->height+10)>frame.rows() || (r->x+r->width)>frame.cols() || r->y<0 || r->x<0) ) { r = (CvRect*)cvGetSeqElem( faces, --index ); } //set the params params->m_initState.resize(4); params->m_initState[0] = (float)r->x; params->m_initState[1] = (float)r->y; params->m_initState[2] = (float)r->width; params->m_initState[3] = (float)r->height+10; return true; }