示例#1
0
void EncoderBoFSoft::encode(const cv::Mat_<float>& descriptors, cv::Mat_<float>& encoded)
{
	int ndata = descriptors.rows;
	int ndim = descriptors.cols;

	if ( ndim != this->_m_nDim)
	{
		throw std::runtime_error("dimension not match when encode");
	}
	
	encoded.create(ndata,this->_m_nCode);
	encoded.setTo(0.0f);
	//encoded.zeros(ndata,this->_m_nCode);

#pragma omp parallel for
	for(int i=0;i<ndata;i++)
	{
		Mat index,dist;
		this->_m_pTree->findNearest(descriptors.row(i),_m_nz,INT_MAX,index,noArray(),dist);

		Scalar mean,std;
		cv::meanStdDev(dist,mean,std);
		cv::divide(std(0),dist,dist);
		
		for(int j=0;j<_m_nz;j++)
		{
			encoded(i,index.at<int>(j)) = dist.at<float>(j);
		}
	}
}
示例#2
0
void paste_images_gallery(const std::vector<cv::Mat_<T> > & in,
                cv::Mat_<T> & out,
                int gallerycols, T background_color,
                bool draw_borders = false, cv::Scalar border_color = CV_RGB(0,0,0)) {
  if (in.size() == 0) {
    out.create(0, 0);
    return;
  }
  int cols1 = in[0].cols, rows1 = in[0].rows, nimgs = in.size();
  // prepare output
  int galleryrows = std::ceil(1. * nimgs / gallerycols);
  out.create(rows1 * galleryrows, cols1 * gallerycols);
  // printf("nimgs:%i, gallerycols:%i, galleryrows:%i\n", nimgs, gallerycols, galleryrows);
  out.setTo(background_color);
  // paste images
  for (int img_idx = 0; img_idx < nimgs; ++img_idx) {
    int galleryx = img_idx % gallerycols, galleryy = img_idx / gallerycols;
    cv::Rect roi(galleryx * cols1, galleryy * rows1, cols1, rows1);
    // printf("### out:%ix%i, roi %i:'%s'\n", out.cols, out.rows, img_idx, geometry_utils::print_rect(roi).c_str());
    if (cols1 != in[img_idx].cols || rows1 != in[img_idx].rows) {
      printf("Image %i of size (%ix%i), different from (%ix%i), skipping it.\n",
             img_idx, in[img_idx].cols, in[img_idx].rows, cols1, rows1);
      cv::line(out, roi.tl(), roi.br(), border_color, 2);
      cv::line(out, roi.br(), roi.tl(), border_color, 2);
    }
    else
      in[img_idx].copyTo( out(roi) );
    if (draw_borders)
      cv::rectangle(out, roi, border_color, 1);
  } // end loop img_idx
} // end paste_images_gallery<_T>
//===========================================================================
void Multi_SVR_patch_expert::Response(const cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	// For the purposes of the experiment only use the response of normal intensity, for fair comparison

	if(svr_patch_experts.size() == 1)
	{
		svr_patch_experts[0].Response(area_of_interest, response);		
	}
	else
	{
		// responses from multiple patch experts these can be gradients, LBPs etc.
		response.setTo(1.0);
		
		cv::Mat_<float> modality_resp(response_height, response_width);

		for(size_t i = 0; i < svr_patch_experts.size(); i++)
		{			
			svr_patch_experts[i].Response(area_of_interest, modality_resp);			
			response = response.mul(modality_resp);	
		}	
		
	}

}
示例#4
0
//===========================================================================
void CCNF_patch_expert::Response(cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}
		
	response.setTo(0);
	
	// the placeholder for the DFT of the image, the integral image, and squared integral image so they don't get recalculated for every response
	cv::Mat_<double> area_of_interest_dft;
	cv::Mat integral_image, integral_image_sq;
	
	cv::Mat_<float> neuron_response;

	// responses from the neural layers
	for(size_t i = 0; i < neurons.size(); i++)
	{		
		// Do not bother with neuron response if the alpha is tiny and will not contribute much to overall result
		if(neurons[i].alpha > 1e-4)
		{
			neurons[i].Response(area_of_interest, area_of_interest_dft, integral_image, integral_image_sq, neuron_response);
			response = response + neuron_response;						
		}
	}

	int s_to_use = -1;

	// Find the matching sigma
	for(size_t i=0; i < window_sizes.size(); ++i)
	{
		if(window_sizes[i] == response_height)
		{
			// Found the correct sigma
			s_to_use = i;			
			break;
		}
	}

	cv::Mat_<float> resp_vec_f = response.reshape(1, response_height * response_width);

	cv::Mat out = Sigmas[s_to_use] * resp_vec_f;
	
	response = out.reshape(1, response_height);

	// Making sure the response does not have negative numbers
	double min;

	minMaxIdx(response, &min, 0);
	if(min < 0)
	{
		response = response - min;
	}

}
void OvershootClusterer::drawTo(cv::Mat_<cv::Vec3b> &out) const
{
    out.setTo(0);
    out.setTo(Scalar(255,255,255), img > 0);

    Mat_<Vec3b>::iterator itOut = out.begin(), itOutEnd = out.end();
    Mat_<unsigned char>::const_iterator it = smallestOvershootVisit.begin(),
                                        itEnd = smallestOvershootVisit.end();
    for (; itOut != itOutEnd && it != itEnd; ++it, ++itOut)
    {
        if ((*it) < CLUSTERER_OVERSHOOTS)
        {
            unsigned char nonRed = (*it)*(255/CLUSTERER_OVERSHOOTS);
            *itOut = Vec3b(nonRed,nonRed,255);
        }
    }
}
void SegmenterHumanSimple::getPixelProbability(const cv::Mat& img, cv::Mat_<float>& prob, vector<Rect> faces)
{
	prob.create(img.rows,img.cols);
	prob.setTo(0.3f);
	for(int i=0;i<faces.size();i++)
	{
		Rect rect = faces[i];
		rect = UtilsOpencv::ScaleRect(rect,Rect(0,0,img.cols,img.rows),1.2,1.5,1.2,1.2);
		prob(Rect(rect.x,rect.y,rect.width,img.rows-rect.y)) = 0.7f;
	}
	
}
int ExtractSubImageDaisyDescriptors(daisy *desc, cv::Mat_<float> &descOut, float py, float px, float step, int ori, int h, int w, float limH, float limW)
{
	//printf("here 0 %d %d py = %f px = %f\n", w, h, py, px);
	descOut.create(w*h, DAISY_FEATURE_LENGTH);
	//printf("here 0-1\n");
	descOut.setTo(cv::Scalar(0.0));
	//printf("here 0-2\n");
	int iy, ix;
	float cy, cx, ssinOri, scosOri;
	ssinOri = step*sin((float)ori/180.0*M_PI);
	scosOri = step*cos((float)ori/180.0*M_PI);
	float ry, rx; // the first pos of each row
	ry = py; rx = px;
	//printf("here 1\n");
	//float *thor = new float [DAISY_FEATURE_LENGTH];
	for (iy=0; iy<h; ++iy)
	{
		cy = ry; cx = rx;
		float *ptr = (float *)(descOut.ptr(iy*w));
		//printf("here 2 %f %f %f %f\n", cy, cx, ssinOri, scosOri);
		
		for (ix=0; ix<w; ++ix)
		{			
			//memset(thor, 0, sizeof(float)*desc->descriptor_size());
			//desc->get_descriptor(std::min<double>(limH-1.0, std::max<double>(cy, 0.0)), std::min<double>(limW-1.0, std::max<double>(cx, 0.0)), ori, ptr);
			desc->ExtractDescriptorFromNormalizedHistogram(ptr, std::min<double>(limH-1.0, std::max<double>(cy, 0.0)), std::min<double>(limW-1.0, std::max<double>(cx, 0.0)), ori);
			ptr += DAISY_FEATURE_LENGTH;
			//printf("here 2-1\n");
			//memcpy(ptr, thor, sizeof(float)*DAISY_FEATURE_LENGTH);
			//printf("here 2-2\n");
			//printf("here 2-3\n");
			cy = cy+ssinOri;
			cx = cx+scosOri;
		}
		ry = ry+scosOri;
		rx = rx-ssinOri;
		//printf("here 2\n");
	}
	//delete [] thor;
	//printf("here 3\n");
	return 1;
}
        KalmanFilter( cv::Point initialPt )
        {
            mKF = cv::KalmanFilter( 4, 2, 0 );
            mKF.transitionMatrix = ( cv::Mat_<float>(4, 4) << 1,0,1,0,   0,1,0,1,  0,0,1,0,  0,0,0,1 );
            mMeasurement = cv::Mat_<float>( 2, 1 );
            mMeasurement.setTo( cv::Scalar( 0 ) );
        
            // init...
            mKF.statePre.at<float>( 0 ) = initialPt.x;
            mKF.statePre.at<float>( 1 ) = initialPt.y;
            mKF.statePre.at<float>( 2 ) = 0;
            mKF.statePre.at<float>( 3 ) = 0;
            setIdentity( mKF.measurementMatrix );
            setIdentity( mKF.processNoiseCov, cv::Scalar::all( 1e-4 ) );
            setIdentity( mKF.measurementNoiseCov, cv::Scalar::all( 1e-1 ) );
            setIdentity( mKF.errorCovPost, cv::Scalar::all( .1 ));
            
            mPrediction = initialPt;

            // prime the filter, otherwise it thinks it's at 0,0
            correct( initialPt );
        }
示例#9
0
void generate_gabor_filter(cv::Mat_<T>& image, double peakFreq, double theta, double sigma_x, double sigma_y)
{
    const size_t w = image.size().width;
    const size_t h = image.size().height;
    const double step_u = 1.0 / static_cast<double>(w);
    const double step_v = 1.0 / static_cast<double>(h);
    const double cos_theta = std::cos(theta);
    const double sin_theta = std::sin(theta);

    const double sigmaXSquared = sigma_x*sigma_x;
    const double sigmaYSquared = sigma_y*sigma_y;

    image.setTo(cv::Scalar(0));

    for (int ny = -1; ny <= 1; ny++)
        for (int nx = -1; nx <= 1; nx++)
        {
            double v = ny;
            for (size_t y = 0; y < h; y++)
            {
                double u = nx;
                for (size_t x = 0; x < w; x++)
                {
                    double ur = u * cos_theta - v * sin_theta;
                    double vr = u * sin_theta + v * cos_theta;

                    double tmp = ur-peakFreq;
                    double value = std::exp(-2*M_PI*M_PI * (tmp*tmp*sigmaXSquared + vr*vr*sigmaYSquared));
                    image(y, x) += value;

                    u += step_u;
                }
                v += step_v;
            }
        }
}