Ejemplo n.º 1
0
    virtual void operator()(const cv::BlockedRange& range) const
	{
#ifdef HAVE_TBB
        tbb::spin_mutex::scoped_lock lock;
#endif
        CvSeqReader reader;
		int begin = range.begin();
		int end = range.end();
		
		int weak_count = end - begin;
		CvDTree* tree;

		for (int i=0; i<k; ++i)
		{
			float tmp_sum = 0.0f;
			if ((weak[i]) && (weak_count))
			{
				cvStartReadSeq( weak[i], &reader ); 
				cvSetSeqReaderPos( &reader, begin );
				for (int j=0; j<weak_count; ++j)
				{
					CV_READ_SEQ_ELEM( tree, reader );
					tmp_sum += shrinkage*(float)(tree->predict(sample, missing)->value);
				}
			}
#ifdef HAVE_TBB
            lock.acquire(SumMutex);
			sum[i] += tmp_sum;
            lock.release();
#else
            sum[i] += tmp_sum;
#endif
		}
	} // Tree_predictor::operator()
void bg_update_parallel::operator()(const cv::BlockedRange& range)const{
	for(int i=range.begin();i<range.end();i++){
		int y = i / src.cols;
		int x = i % src.cols;

		cv::Vec3b sval,bval1,bval2;
		sval = src.at<cv::Vec3b>(y,x);
		bval1 = bg1.at<cv::Vec3b>(y,x);
		bval2 = bg2.at<cv::Vec3b>(y,x);
		int ssum,bsum1,bsum2;
		ssum = sval[0] + sval[1] +sval[2];
		bsum1 = bval1[0] + bval1[1] + bval1[2];
		bsum2 = bval2[0] + bval2[1] + bval2[2];
		double weight = no_touch_fg.at<float>(y,x);
		if(ssum < bsum2){
			if(weight==0){
				weight = (1.0 - non_update_mask.at<float>(y,x)) * _learning_rate;
			}
//			std::cout << "weight for dark :" << weight << std::endl;
			bval2 = blend<cv::Vec3b>(bval2,sval,1.0-weight,weight);
			bg2.at<cv::Vec3b>(y,x) = bval2;
		}
		else if(ssum > bsum1){
			if(weight==0){
				weight = (1.0 - non_update_mask.at<float>(y,x)) * _learning_rate;
			}
//			std::cout << "weight for light:" << weight << std::endl;
			bval1 = blend<cv::Vec3b>(bval1,sval,1.0-weight,weight);
			bg1.at<cv::Vec3b>(y,x) = bval1;
		}
		else{
			if(weight==0){
				weight = (1.0 - non_update_mask.at<float>(y,x)) * _learning_rate * _learning_rate2;
			}
			else{
				weight *= _learning_rate2;
			}
//			std::cout << "weight for both :" << weight << std::endl;
			bval1 = blend<cv::Vec3b>(sval,bval1,_learning_rate2,1.f - _learning_rate2);
			bg1.at<cv::Vec3b>(y,x) = bval1;
			bval2 = blend<cv::Vec3b>(sval,bval2,_learning_rate2,1.f - _learning_rate2);
			bg2.at<cv::Vec3b>(y,x) = bval2;
		}
	}
}
void pVT_best_split_finder::operator() (const cv::BlockedRange &r)
{

  // for each variable, find the best split
  for (int vi = r.begin(); vi != r.end(); ++vi) {
    pVTLogitSplit the_split;
    the_split.reset();
    bool ret;
    ret = tree_->find_best_split_num_var(node_, data_, vi, 
      the_split);

    // update the cb_split (currently best split)
    if (!ret) continue; // nothing found
    if (the_split.expected_gain_ > cb_split_.expected_gain_) {
      cb_split_ = the_split;
    } // if
  } // for vi
}
Ejemplo n.º 4
0
  void operator()( const cv::BlockedRange& range ) const
  {
    cv::AutoBuffer<float> buf(buf_sz);
    for(int i = range.begin(); i < range.end(); i += 1 )
    {
        float* neighbor_responses = &buf[0];
        float* dist = neighbor_responses + 1*k;
        Cv32suf* sort_buf = (Cv32suf*)(dist + 1*k);

        pointer->find_neighbors_direct( _samples, k, i, i + 1,
                    neighbor_responses, _neighbors, dist );

        float r = pointer->write_results( k, k1, i, i + 1, neighbor_responses, dist,
                                 _results, _neighbor_responses, _dist, sort_buf );

        if( i == 0 )
            *result = r;
    }
  }
void pAOSOGrad_best_split_finder::operator() (const cv::BlockedRange &r)
{

  // for each variable, find the best split
  for (int ii = r.begin(); ii != r.end(); ++ii) {
    int vi = this->tree_->sub_fi_[ii];

    pAOSOAutostepGradSplit the_split;
    the_split.reset();
    bool ret;
    ret = tree_->find_best_split_num_var(node_, data_, vi, 
      the_split);

    // update the cb_split (currently best split)
    if (!ret) continue; // nothing found
    if (the_split.expected_gain_ > cb_split_.expected_gain_) {
      cb_split_ = the_split;
    } // if
  } // for vi
}
Ejemplo n.º 6
0
    virtual void operator()(const cv::BlockedRange& range) const
	{
		int begin = range.begin();
		int end = range.end();

		CvMat x;
        CvMat miss;

        for (int i=begin; i<end; ++i)
        {
            int j = idx ? idx->data.i[i] : i;
            cvGetRow(samples, &x, j);
            if (!missing)
            {
                predictions[i] = gbt->predict_serial(&x,0,0,slice);
            }
            else
            {
                cvGetRow(missing, &miss, j);
                predictions[i] = gbt->predict_serial(&x,&miss,0,slice);
            }
        }
	} // Sample_predictor::operator()
Ejemplo n.º 7
0
  void operator()( const cv::BlockedRange& range ) const
  {

    int cls = -1;
    int rtype = 0, rstep = 0; 
    int nclasses = cls_labels->cols;
    int _var_count = avg[0]->cols;
    
    if (results)
    {
        rtype = CV_MAT_TYPE(results->type);
        rstep = CV_IS_MAT_CONT(results->type) ? 1 : results->step/CV_ELEM_SIZE(rtype);
    }
    // allocate memory and initializing headers for calculating
    cv::AutoBuffer<double> buffer(nclasses + var_count1);
    CvMat diff = cvMat( 1, var_count1, CV_64FC1, &buffer[0] );
    
    for(int k = range.begin(); k < range.end(); k += 1 )
    {
        int ival;
        double opt = FLT_MAX;

        for(int i = 0; i < nclasses; i++ )
        {

            double cur = c->data.db[i];
            CvMat* u = cov_rotate_mats[i];
            CvMat* w = inv_eigen_values[i];

            const double* avg_data = avg[i]->data.db;
            const float* x = (const float*)(samples->data.ptr + samples->step*k);

            // cov = u w u'  -->  cov^(-1) = u w^(-1) u'
            for(int j = 0; j < _var_count; j++ )
                diff.data.db[j] = avg_data[j] - x[vidx ? vidx[j] : j];

            cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T );
            for(int j = 0; j < _var_count; j++ )
            {
                double d = diff.data.db[j];
                cur += d*d*w->data.db[j];
            }

            if( cur < opt )
            {
                cls = i;
                opt = cur;
            }
            /* probability = exp( -0.5 * cur ) */
        }

        ival = cls_labels->data.i[cls];
        if( results )
        {
            if( rtype == CV_32SC1 )
                results->data.i[k*rstep] = ival;
            else
                results->data.fl[k*rstep] = (float)ival;
        }
        if( k == 0 )
            *value = (float)ival;
    }
  }
		void operator()(const cv::BlockedRange& range)const{
			for(int i = range.begin(); i != range.end(); i++){
				cam_interface[i]->retrieve(mat_vec[i],0);
			}
		}
Ejemplo n.º 9
0
    void operator()(const cv::BlockedRange & range) const
    {  
      const cv::Mat & label = * label_ptr;
      cv::Mat & gaussian_kernel = * gaussian_kernel_ptr;
      vector<cv::Mat> & gradients = * gradients_ptr;
      
      double *oris;
      cv::Mat weights, slice_map, label_exp;
      cv::Mat hist_left  = cv::Mat::zeros(1, num_bins, CV_32FC1);
      cv::Mat hist_right = cv::Mat::zeros(1, num_bins, CV_32FC1);    
      weights = weight_matrix_disc(r);
      slice_map = orientation_slice_map(r, range.end());
      oris = standard_filter_orientations(range.end(), DEG);
      gradients.resize(range.end());
      for(size_t i=0; i<range.end(); i++)
	gradients[i] = cv::Mat::zeros(label.rows, label.cols, CV_32FC1);
      cv::copyMakeBorder(label, label_exp, r, r, r, r, cv::BORDER_REFLECT);
    
      for(size_t idx = range.begin(); idx < range.end(); idx++)
	for(int i=r; i<label_exp.rows-r; i++)
	  for(int j=r; j<label_exp.cols-r; j++){
	    hist_left.setTo(0.0);
	    hist_right.setTo(0.0);
	    for(int x= -r; x <= r; x++)
	      for(int y= -r; y <= r; y++){
		int bin = int(label_exp.at<float>(i+x, j+y));
		if(slice_map.at<float>(x+r, y+r) > oris[idx]-180.0 && 
		   slice_map.at<float>(x+r, y+r) <= oris[idx])
		  hist_right.at<float>(0, bin) += double(weights.at<int>(x+r, y+r));
		else
		  hist_left.at<float>(0, bin) += double(weights.at<int>(x+r, y+r));
	      }
	    
	    convolveDFT(hist_right, gaussian_kernel, hist_right, SAME_SIZE);
	    convolveDFT(hist_left, gaussian_kernel, hist_left, SAME_SIZE);
	  
	    double sum_l = 0.0, sum_r =0.0; 
	    for(size_t nn = 0; nn<num_bins; nn++){
	      sum_l += hist_left.at<float>(0, nn);
	      sum_r += hist_right.at<float>(0, nn);
	    }
	  
	    double tmp = 0.0, tmp1 = 0.0, tmp2 = 0.0, hist_r, hist_l;
	    for(size_t nn = 0; nn<num_bins; nn++){
	      if(sum_r == 0)
		hist_r = hist_right.at<float>(0,nn);
	      else
		hist_r = hist_right.at<float>(0,nn)/sum_r;
	    
	      if(sum_l == 0)
		hist_l = hist_left.at<float>(0,nn);
	      else
		hist_l = hist_left.at<float>(0,nn)/sum_l;
	      
	      tmp1 = hist_r-hist_l;
	      tmp2 = hist_r+hist_l;
	      if(tmp2 < 0.00001)
		tmp2 = 1.0;

	      tmp += 4.0*(tmp1*tmp1)/tmp2;
	    }
	    gradients[idx].at<float>(i-r,j-r) = tmp;
	  }
    }