// Load SVM models -- Histogram Intersectin Kernel SVM trained by libHIK double UseSVM_CD_FastEvaluationStructure(const char* modelfile, const int m, const int upper_bound, Array2dC<double>& result) { std::ifstream fs(modelfile, std::fstream::binary); if( !fs.is_open() ) { std::cout << "SVM model " << modelfile << " can not be loaded." << std::endl; exit(-1); } // Header int rows, cols, type, channels; fs.read((char*)&rows, sizeof(int)); // rows fs.read((char*)&cols, sizeof(int)); // cols fs.read((char*)&type, sizeof(int)); // type fs.read((char*)&channels, sizeof(int)); // channels // Data cv::Mat mat(rows, cols, type); fs.read((char*)mat.data, CV_ELEM_SIZE(type) * rows * cols); int num_dim = m; result.Create(num_dim, upper_bound); for(int i=0; i<num_dim; i++) for (int j = 0; j < upper_bound; j++) { result.p[i][j]= mat.at<double>(i, j); } return -0.00455891; }
// Load SVM models -- linear SVM trained using LIBLINEAR double UseSVM_CD_FastEvaluationStructure(const char* modelfile,const int m,Array2dC<double>& result) { std::ifstream in(modelfile); if(in.good()==false) { std::cout<<"SVM model "<<modelfile<<" can not be loaded."<<std::endl; exit(-1); } std::string buffer; std::getline(in,buffer); // first line std::getline(in,buffer); // second line std::getline(in,buffer); // third line in>>buffer; assert(buffer=="nr_feature"); int num_dim; in>>num_dim; assert(num_dim>0 && num_dim==m); std::getline(in,buffer); // end of line 4 in>>buffer; assert(buffer=="bias"); int bias; in>>bias; std::getline(in,buffer); //end of line 5; in>>buffer; assert(buffer=="w"); std::getline(in,buffer); //end of line 6 result.Create(1,num_dim); for(int i=0; i<num_dim; i++) in>>result.buf[i]; double rho = 0; if(bias>=0) in>>rho; in.close(); return rho; }
void ComputeMeanAndVariance(Array2d<double>& data,Array2dC<double>& avg,Array2d<double>& cov,const bool subtractMean) { avg.Create(1,data.ncol); avg.Zero(); cov.Create(data.ncol,data.ncol); cov.Zero(); for(int i=0;i<data.nrow;i++) { for(int j=0;j<data.ncol;j++) avg.buf[j] += data.p[i][j]; for(int j=0;j<data.ncol;j++) for(int k=0;k<data.ncol;k++) cov.p[j][k] += data.p[i][j]*data.p[i][k]; } const double r = 1.0/data.nrow; for(int i=0;i<data.ncol;i++) avg.buf[i] *= r; if(subtractMean) { for(int i=0;i<data.ncol;i++) for(int j=0;j<data.ncol;j++) cov.p[i][j] = cov.p[i][j]*r-avg.buf[i]*avg.buf[j]; } else { for(int i=0;i<data.ncol;i++) for(int j=0;j<data.ncol;j++) cov.p[i][j] = cov.p[i][j]*r; } }
double UseSVM_Linear_FastEvaluationStructure(const svm_model& model,const int m,Array2dC<double>& result) { Array2d<double> eval(1,m); result.Create(1,m); double rho = UseSVM_Linear_FastEvaluationStructure(model,m,eval,0); std::copy(eval.p[0],eval.p[0]+m,result.buf); return rho; }
// The function that does the real detection int DetectionScanner::FastScan(IntImage<double>& original,std::vector<CRect>& results,const int stepsize) { if(original.nrow<height+5 || original.ncol<width+5) return 0; const int hd = height/xdiv; const int wd = width/ydiv; InitImage(original); results.clear(); hist.Create(1,baseflength*(xdiv-EXT)*(ydiv-EXT)); NodeDetector* node = cascade->nodes[1]; double** pc = node->classifier.p; int oheight = original.nrow, owidth = original.ncol; CRect rect; while(image.nrow>=height && image.ncol>=width) { InitIntegralImages(stepsize); for(int i=2; i+height<image.nrow-2; i+=stepsize) { const double* sp = scores.p[i]; for(int j=2; j+width<image.ncol-2; j+=stepsize) { if(sp[j]<=0) continue; int* p = hist.buf; hist.Zero(); for(int k=0; k<xdiv-EXT; k++) { for(int t=0; t<ydiv-EXT; t++) { for(int x=i+k*hd+1; x<i+(k+1+EXT)*hd-1; x++) { int* ctp = ct.p[x]; for(int y=j+t*wd+1; y<j+(t+1+EXT)*wd-1; y++) p[ctp[y]]++; } p += baseflength; } } double score = node->thresh; for(int k=0; k<node->classifier.nrow; k++) score += pc[k][hist.buf[k]]; if(score>0) { rect.top = i*oheight/image.nrow; rect.bottom = (i+height)*oheight/image.nrow; rect.left = j*owidth/image.ncol; rect.right = (j+width)*owidth/image.ncol; results.push_back(rect); } } } ResizeImage(); } return 0; }
void DetectionScanner::LoadDetector(std::vector<NodeDetector::NodeType>& types,std::vector<int>& upper_bounds,std::vector<std::string>& filenames) { unsigned int depth = types.size(); assert(depth>0 && depth==upper_bounds.size() && depth==filenames.size()); if(cascade) delete cascade; cascade = new CascadeDetector; assert(xdiv>0 && ydiv>0); for(unsigned int i=0; i<depth; i++) cascade->AddNode(types[i],(xdiv-EXT)*(ydiv-EXT)*baseflength,upper_bounds[i],filenames[i].c_str()); hist.Create(1,baseflength*(xdiv-EXT)*(ydiv-EXT)); }
double UseSVM_CS_Histogram_FastEvaluationStructure(const svm_model& model,const int m,const int upper_bound,Array2dC<double>& eval) // structure for fast average histogram similarity, NOT using codebook, for BSVM Crammer-Singer and t=5 (HIK) ONLY // There are nr_class classifiers, each taking 'm' rows of 'eval' { assert(model.param.svm_type == 5); assert(model.param.kernel_type == 5); int nr_class = model.nr_class; // Check that the input is valid int max_index = -1; for(int i=0;i<model.l;i++) { svm_node* sv = model.SV[i]; while(sv->index!=-1) { if(sv->index<=0 || (sv->index>m && m<0)) { std::cout<<"Invalid input: one feature index is out of range (<0 or >m)."<<std::endl; exit(-1); } if(sv->index>max_index) max_index = sv->index; if(sv->value<0 || sv->value>=upper_bound) { std::cout<<"Invalid input: one feature value is out of range (<0 or >=upper bound)"<<std::endl; exit(-1); } sv++; } } // eval: data structure for fast evalutation of SVMs // eval[i*m : (i+1)*m, :] is a boundary for class i // each row is the lookup table value for a dimension of the feature vector, corresponding to feature value 0 to (upper_bound - 1) if(m>max_index) max_index = m; eval.Create(nr_class*max_index,upper_bound); // SVs, for sorting then filling the table Array2d<int> SVs(model.l,max_index); for(int i=0;i<model.l;i++) std::fill(SVs.p[i],SVs.p[i]+max_index,(int)0); for(int i=0;i<model.l;i++) { svm_node* sv = model.SV[i]; while(sv->index!=-1) { SVs.p[i][sv->index-1] = (int)sv->value; sv++; } } Array2dC< sort_struct<int> > onecolumn(1,model.l); // data structure for sorting Array2dC<double> sv_coef(1,model.l); for(int fi=0;fi<max_index;fi++) // feature index, ranging from 0 to 'm'-1 { // first sort a column for(int i=0;i<model.l;i++) { onecolumn.buf[i].value = SVs.p[i][fi]; onecolumn.buf[i].id = i; } std::sort(onecolumn.buf,onecolumn.buf+model.l,Template_Less<int>); // Now fill the table for(int c=0;c<nr_class;c++) { for(int i=0;i<model.l;i++) sv_coef.buf[i] = model.sv_coef[c][i]; double coef_sum = std::accumulate(sv_coef.buf,sv_coef.buf+model.l,0.0); int curpos = 0; double cumsum = 0; // cumulative partial sum for(int i=0;i<upper_bound;i++) { while(curpos<model.l && onecolumn.buf[curpos].value==i) { cumsum += sv_coef.buf[onecolumn.buf[curpos].id]*onecolumn.buf[curpos].value; coef_sum -= sv_coef.buf[onecolumn.buf[curpos].id]; curpos++; } eval.p[c*m+fi][i] = cumsum + coef_sum * i; } } } return 0; // this is not used, it's here for historical reasons }
double UseSVM_Histogram_FastEvaluationStructure(const svm_model& model,const int m,const int upper_bound,Array2dC<double>& eval,const bool normalize) // structure for fast average histogram similarity, NOT using codebook, to be used with LIBSVM classifiers // This function should work with all cases (one-class, nu-SVM, C-SVC, binary, multi-class, etc) { int nr_class = model.nr_class; int num_classifier = nr_class*(nr_class-1)/2; if(model.param.svm_type == ONE_CLASS || model.param.svm_type == EPSILON_SVR || model.param.svm_type == NU_SVR) { nr_class = 1; num_classifier = 1; } // 'start[i]': index of where the SVs for class i starts Array2dC<int> start(1,nr_class); start.buf[0] = 0; for(int i=1;i<nr_class;i++) start.buf[i] = start.buf[i-1]+model.nSV[i-1]; // Check that the input is valid int max_index = -1; for(int i=0;i<nr_class;i++) { for(int j=0;j<model.nSV[i];j++) { svm_node* sv = model.SV[start.buf[i]+j]; while(sv->index!=-1) { if(sv->index<=0 || (sv->index>m && m<0)) { std::cout<<"Invalid input: one feature index is out of range (<0 or >m)."<<std::endl; exit(-1); } if(sv->index>max_index) max_index = sv->index; if(sv->value<0 || sv->value>=upper_bound) { std::cout<<"Invalid input: one feature value is out of range (<0 or >=upper bound)"<<std::endl; exit(-1); } sv++; } } } // eval: data structure for fast evalutation of SVMs // eval[(i*nr_class+j)*m : (i*nr_class+j+1)*m, :] is a sub-classifier of class i versus class j // each row is the lookup table value for a dimension of the feature vector, corresponding to feature value 0 to (upper_bound - 1) // refer to our ICCV 2009 paper if(m>max_index) max_index = m; eval.Create(num_classifier*max_index,upper_bound); int pos = 0; // index of the classifier for(int i=0;i<nr_class;i++) { for(int j=i+1;j<nr_class;j++) { // sub-classifier for class i versus class j int size = model.nSV[i] + model.nSV[j]; // number of SVs in this sub-classifier Array2dC<double> sv_coef(1,size); // prepare \alpha values for(int k=0;k<model.nSV[i];k++) sv_coef.buf[k] = model.sv_coef[j-1][k+start.buf[i]]; for(int k=0;k<model.nSV[j];k++) sv_coef.buf[model.nSV[i]+k] = model.sv_coef[i][k+start.buf[j]]; // normalize these \alpha values and \rho double coef_sum = std::accumulate(sv_coef.buf,sv_coef.buf+model.nSV[i],0.0); coef_sum = 1.0/coef_sum; if(normalize) for(int k=0;k<size;k++) sv_coef.buf[k] *= coef_sum; if(normalize) model.rho[pos] *= coef_sum; // SVs of this sub-classifier, for sorting then filling the table Array2d<int> SVs(size,max_index); for(int k=0;k<size;k++) std::fill(SVs.p[k],SVs.p[k]+max_index,(int)0); for(int k=0;k<model.nSV[i];k++) { svm_node* sv = model.SV[start.buf[i]+k]; while(sv->index!=-1) { SVs.p[k][sv->index-1] = (int)sv->value; sv++; } } // SV from class i for(int k=0;k<model.nSV[j];k++) { svm_node* sv = model.SV[start.buf[j]+k]; while(sv->index!=-1) { SVs.p[k+model.nSV[i]][sv->index-1] = (int)sv->value; sv++; } } // SV from class j Array2dC< sort_struct<int> > onecolumn(1,size); // data structure for sorting for(int fi=0;fi<max_index;fi++) // feature index, ranging from 0 to 'm'-1 { // first sort a column coef_sum = std::accumulate(sv_coef.buf,sv_coef.buf+size,0.0); // should this be 0? for(int k=0;k<size;k++) { onecolumn.buf[k].value = SVs.p[k][fi]; onecolumn.buf[k].id = k; } std::sort(onecolumn.buf,onecolumn.buf+size,Template_Less<int>); // Now fill the table int curpos = 0; double cumsum = 0; // cumulative partial sum for(int k=0;k<upper_bound;k++) { while(curpos<size && onecolumn.buf[curpos].value==k) { cumsum += sv_coef.buf[onecolumn.buf[curpos].id]*onecolumn.buf[curpos].value; coef_sum -= sv_coef.buf[onecolumn.buf[curpos].id]; curpos++; } eval.p[pos*m+fi][k] = cumsum + coef_sum * k; } } pos++; } } return -model.rho[0]; // this is not used, it's here for historical reasons }