コード例 #1
0
ファイル: test_voronoi.cpp プロジェクト: mkgessen/voronoi
void paste_images_gallery(const std::vector<cv::Mat_<T> > & in,
                cv::Mat_<T> & out,
                int gallerycols, T background_color,
                bool draw_borders = false, cv::Scalar border_color = CV_RGB(0,0,0)) {
  if (in.size() == 0) {
    out.create(0, 0);
    return;
  }
  int cols1 = in[0].cols, rows1 = in[0].rows, nimgs = in.size();
  // prepare output
  int galleryrows = std::ceil(1. * nimgs / gallerycols);
  out.create(rows1 * galleryrows, cols1 * gallerycols);
  // printf("nimgs:%i, gallerycols:%i, galleryrows:%i\n", nimgs, gallerycols, galleryrows);
  out.setTo(background_color);
  // paste images
  for (int img_idx = 0; img_idx < nimgs; ++img_idx) {
    int galleryx = img_idx % gallerycols, galleryy = img_idx / gallerycols;
    cv::Rect roi(galleryx * cols1, galleryy * rows1, cols1, rows1);
    // printf("### out:%ix%i, roi %i:'%s'\n", out.cols, out.rows, img_idx, geometry_utils::print_rect(roi).c_str());
    if (cols1 != in[img_idx].cols || rows1 != in[img_idx].rows) {
      printf("Image %i of size (%ix%i), different from (%ix%i), skipping it.\n",
             img_idx, in[img_idx].cols, in[img_idx].rows, cols1, rows1);
      cv::line(out, roi.tl(), roi.br(), border_color, 2);
      cv::line(out, roi.br(), roi.tl(), border_color, 2);
    }
    else
      in[img_idx].copyTo( out(roi) );
    if (draw_borders)
      cv::rectangle(out, roi, border_color, 1);
  } // end loop img_idx
} // end paste_images_gallery<_T>
コード例 #2
0
ファイル: PDM.cpp プロジェクト: ezzaouia/OpenFace
//===========================================================================
// Get the 2D shape (in image space) from global and local parameters
void PDM::CalcShape2D(cv::Mat_<double>& out_shape, const cv::Mat_<double>& params_local, const cv::Vec6d& params_global) const
{

	int n = this->NumberOfPoints();

	double s = params_global[0]; // scaling factor
	double tx = params_global[4]; // x offset
	double ty = params_global[5]; // y offset

	// get the rotation matrix from the euler angles
	cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
	cv::Matx33d currRot = Euler2RotationMatrix(euler);
	
	// get the 3D shape of the object
	cv::Mat_<double> Shape_3D = mean_shape + princ_comp * params_local;

	// create the 2D shape matrix (if it has not been defined yet)
	if((out_shape.rows != mean_shape.rows) || (out_shape.cols != 1))
	{
		out_shape.create(2*n,1);
	}
	// for every vertex
	for(int i = 0; i < n; i++)
	{
		// Transform this using the weak-perspective mapping to 2D from 3D
		out_shape.at<double>(i  ,0) = s * ( currRot(0,0) * Shape_3D.at<double>(i, 0) + currRot(0,1) * Shape_3D.at<double>(i+n  ,0) + currRot(0,2) * Shape_3D.at<double>(i+n*2,0) ) + tx;
		out_shape.at<double>(i+n,0) = s * ( currRot(1,0) * Shape_3D.at<double>(i, 0) + currRot(1,1) * Shape_3D.at<double>(i+n  ,0) + currRot(1,2) * Shape_3D.at<double>(i+n*2,0) ) + ty;
	}
}
コード例 #3
0
// static
cv::Mat_<float> CrossValidator::createCrossValidationMatrix( const vector< const vector<cv::Mat_<float> >* >& rowVectors,
                                                             cv::Mat_<int>& labels)
{
    assert( !rowVectors.empty());
    cv::Mat_<float> vecs;
    labels.create( 0,1);
    for ( int label = 0; label < rowVectors.size(); ++label)
    {
        assert( rowVectors[label] != 0);
        const vector<cv::Mat_<float> >& rvecs = *rowVectors[label];
        assert( !rvecs.empty());
        const int colDim = rvecs[0].cols; // Should be the length of each row vector in this class
        if ( vecs.empty())
            vecs.create(0, colDim);
        assert( colDim == vecs.cols);   // Ensure this class's row vector length matches what's already stored

        for ( int i = 0; i < rvecs.size(); ++i)
        {
            const cv::Mat_<float>& rv = rvecs[i];
            if ( rv.rows != 1 || rv.cols != colDim)
            {
                std::cerr << "ERROR feature vector size: " << rv.size() << std::endl;
                assert( rv.rows == 1 && rv.cols == colDim);
            }   // end if

            vecs.push_back( rv);    // Append the row vector to the bottom of the matrix
            labels.push_back(label);    // Set this vector's class label
        }   // end for
    }   // end for

    labels = labels.t();    // Make row vector
    return vecs;
}   // end createCrossValidationMatrix
コード例 #4
0
//===========================================================================
void Multi_SVR_patch_expert::Response(const cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	// For the purposes of the experiment only use the response of normal intensity, for fair comparison

	if(svr_patch_experts.size() == 1)
	{
		svr_patch_experts[0].Response(area_of_interest, response);		
	}
	else
	{
		// responses from multiple patch experts these can be gradients, LBPs etc.
		response.setTo(1.0);
		
		cv::Mat_<float> modality_resp(response_height, response_width);

		for(size_t i = 0; i < svr_patch_experts.size(); i++)
		{			
			svr_patch_experts[i].Response(area_of_interest, modality_resp);			
			response = response.mul(modality_resp);	
		}	
		
	}

}
コード例 #5
0
void EncoderBoFSoft::encode(const cv::Mat_<float>& descriptors, cv::Mat_<float>& encoded)
{
	int ndata = descriptors.rows;
	int ndim = descriptors.cols;

	if ( ndim != this->_m_nDim)
	{
		throw std::runtime_error("dimension not match when encode");
	}
	
	encoded.create(ndata,this->_m_nCode);
	encoded.setTo(0.0f);
	//encoded.zeros(ndata,this->_m_nCode);

#pragma omp parallel for
	for(int i=0;i<ndata;i++)
	{
		Mat index,dist;
		this->_m_pTree->findNearest(descriptors.row(i),_m_nz,INT_MAX,index,noArray(),dist);

		Scalar mean,std;
		cv::meanStdDev(dist,mean,std);
		cv::divide(std(0),dist,dist);
		
		for(int j=0;j<_m_nz;j++)
		{
			encoded(i,index.at<int>(j)) = dist.at<float>(j);
		}
	}
}
コード例 #6
0
ファイル: opencv_serialization.hpp プロジェクト: Cerarus/v4r
    void serialize(Archive & ar, ::cv::Mat_<T>& m, const unsigned int version)
    {
      if(Archive::is_loading::value == true)
      {
        int cols, rows;
        size_t elem_size, elem_type;

        ar & cols;
        ar & rows;
        ar & elem_size;
        ar & elem_type;

        m.create(rows, cols);

        size_t data_size = m.cols * m.rows * elem_size;
        ar & boost::serialization::make_array(m.ptr(), data_size);
      }
      else
      {
        size_t elem_size = m.elemSize();
        size_t elem_type = m.type();

        ar & m.cols;
        ar & m.rows;
        ar & elem_size;
        ar & elem_type;

        const size_t data_size = m.cols * m.rows * elem_size;
        ar & boost::serialization::make_array(m.ptr(), data_size);
      }
    }
コード例 #7
0
ファイル: camerageometry.cpp プロジェクト: CURG/v4r_libraries
void CameraGeometry::cameraPose ( cv::Mat_<double> &tvec )
{
    tvec.create ( 3,1 );
    tvec ( 0,0 ) = mInvExt ( 0,3 );
    tvec ( 1,0 ) = mInvExt ( 1,3 );
    tvec ( 2,0 ) = mInvExt ( 2,3 );
}
コード例 #8
0
void DetectorLatentSVMOpencv::detectMultiScale(const cv::Mat& img, std::vector<cv::Rect>& boxes, cv::Mat_<float>& confidence, double scaleFactor
		, double maxSize, double minSize)
{
	double r = 1.0;
	Mat imgBGR = img.clone();
	if (img.cols > this->_m_intMaxSize)
	{
		r = double(_m_intMaxSize) / img.cols;

		cv::resize(imgBGR,imgBGR,Size(),r,r);
	}

	LatentSvmDetector detector(this->_m_vecModelFilenames);

	vector<LatentSvmDetector::ObjectDetection> detections;
	detector.detect(imgBGR,detections);

	vector<float> confs;
	
	for(int i=0;i<(int)detections.size();i++)
	{
		if ( detections[i].score > this->_m_fltThresh)
		{
			Rect rect = detections[i].rect;
			boxes.push_back(Rect(int(rect.x/r),int(rect.y/r),int(rect.width/r),int(rect.height/r)));
			confs.push_back(detections[i].score);
		}
	}

	confidence.create((int)confs.size(),1);
	for(int i=0;i<confs.size();i++)
	{
		confidence(i) = confs[i];
	}
}
コード例 #9
0
void convertQImageToMat(QImage &img_qt, cv::Mat_<cv::Vec3b>& img_cv)
{

	img_cv.create(img_qt.height(), img_qt.width());

	img_qt.convertToFormat(QImage::Format_RGB32);

	int lineNum = 0;

	int height = img_qt.height();

	int width = img_qt.width();

	uchar *imgBits = img_qt.bits();

	for (int i = 0; i < height; i++)
	{
		lineNum = i* width * 4;
		for (int j = 0; j < width; j++)
		{
			img_cv(i, j)[2] = imgBits[lineNum + j * 4 + 2];
			img_cv(i, j)[1] = imgBits[lineNum + j * 4 + 1];
			img_cv(i, j)[0] = imgBits[lineNum + j * 4 + 0];
		}
	}
}
コード例 #10
0
//===========================================================================
void CCNF_patch_expert::Response(cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}
		
	response.setTo(0);
	
	// the placeholder for the DFT of the image, the integral image, and squared integral image so they don't get recalculated for every response
	cv::Mat_<double> area_of_interest_dft;
	cv::Mat integral_image, integral_image_sq;
	
	cv::Mat_<float> neuron_response;

	// responses from the neural layers
	for(size_t i = 0; i < neurons.size(); i++)
	{		
		// Do not bother with neuron response if the alpha is tiny and will not contribute much to overall result
		if(neurons[i].alpha > 1e-4)
		{
			neurons[i].Response(area_of_interest, area_of_interest_dft, integral_image, integral_image_sq, neuron_response);
			response = response + neuron_response;						
		}
	}

	int s_to_use = -1;

	// Find the matching sigma
	for(size_t i=0; i < window_sizes.size(); ++i)
	{
		if(window_sizes[i] == response_height)
		{
			// Found the correct sigma
			s_to_use = i;			
			break;
		}
	}

	cv::Mat_<float> resp_vec_f = response.reshape(1, response_height * response_width);

	cv::Mat out = Sigmas[s_to_use] * resp_vec_f;
	
	response = out.reshape(1, response_height);

	// Making sure the response does not have negative numbers
	double min;

	minMaxIdx(response, &min, 0);
	if(min < 0)
	{
		response = response - min;
	}

}
コード例 #11
0
void DistanceTransform<T>::compute(const cv::Mat_<T>& score_in, const PenaltyFunction& fx, const PenaltyFunction& fy, const cv::Point os, cv::Mat_<T>& score_out, cv::Mat_<int>& Ix, cv::Mat_<int>& Iy) const {

	// get the dimensionality of the score
	const size_t M = score_in.rows;
	const size_t N = score_in.cols;

	// allocate the output and working matrices
	score_out.create(cv::Size(M, N));
	Ix.create(cv::Size(N, M));
	Iy.create(cv::Size(M, N));
	cv::Mat_<T> score_tmp(cv::Size(N, M));

	// compute the distance transform across the rows
	for (size_t m = 0; m < M; ++m) {
		computeRow(score_in[m], score_tmp[m], Ix[m], N, fx, os.x);
	}

	// transpose the intermediate matrices
	transpose(score_tmp, score_tmp);

	// compute the distance transform down the columns
	for (size_t n = 0; n < N; ++n) {
		computeRow(score_tmp[n], score_out[n], Iy[n], M, fy, os.y);
	}

	// transpose back to the original layout
	transpose(score_out, score_out);
	transpose(Iy, Iy);

	// get argmins
	cv::Mat_<int> row(cv::Size(N, 1));
	int * const row_ptr = row[0];
	for (size_t m = 0; m < M; ++m) {
		int * const Iy_ptr = Iy[m];
		int * const Ix_ptr = Ix[m];
		for (size_t n = 0; n < N; ++n) {
			row_ptr[n] = Iy_ptr[Ix_ptr[n]];
		}
		for (size_t n = 0; n < N; ++n) {
			Iy_ptr[n] = row_ptr[n];
		}
	}
}
コード例 #12
0
void SegmenterHumanSimple::getPixelProbability(const cv::Mat& img, cv::Mat_<float>& prob, vector<Rect> faces)
{
	prob.create(img.rows,img.cols);
	prob.setTo(0.3f);
	for(int i=0;i<faces.size();i++)
	{
		Rect rect = faces[i];
		rect = UtilsOpencv::ScaleRect(rect,Rect(0,0,img.cols,img.rows),1.2,1.5,1.2,1.2);
		prob(Rect(rect.x,rect.y,rect.width,img.rows-rect.y)) = 0.7f;
	}
	
}
コード例 #13
0
void Multi_SVR_patch_expert::ResponseDepth(const cv::Mat_<float>& area_of_interest, cv::Mat_<float>& response)
{
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}
	
	// With depth patch experts only do raw data modality
	svr_patch_experts[0].ResponseDepth(area_of_interest, response);
}
コード例 #14
0
ファイル: camerageometry.cpp プロジェクト: CURG/v4r_libraries
int CameraGeometry::quaterion ( cv::Mat_<double> &quat, bool update )
{
    quat.create ( 4,1 );
    if ( update ) computePoseMatrix4x4 ( mExt );
    double w = mExt ( 0,0 ) + mExt ( 1,1 ) + mExt ( 2,2 ) + 1;
    if ( w < 0.0 ) return 1;
    w = sqrt ( w );
    quat ( 0,0 ) = ( mExt ( 2,1 ) - mExt ( 1,2 ) ) / ( w*2.0 );
    quat ( 1,0 ) = ( mExt ( 0,2 ) - mExt ( 2,0 ) ) / ( w*2.0 );
    quat ( 2,0 ) = ( mExt ( 1,0 ) - mExt ( 0,1 ) ) / ( w*2.0 );
    quat ( 3,0 ) = w / 2.0;
    return 0;
}
コード例 #15
0
void CostBoxFilter::TheBoxFilterArrayForm( const cv::Mat_<float> &bIn, cv::Mat_<float> &bOut, int radius )
{
	int height, width, iy, ix;
	height = bIn.rows;
	width = bIn.cols;

	bOut.create(height, width);
	cv::Mat_<float> cumHorSum(height, width);
	float *horSum = new float [width];
	for (iy=0; iy<height; ++iy)
	{
		float s = 0.0f;
		for (ix=0; ix<width; ++ix)
		{
			s += bIn[iy][ix];
			horSum[ix] = s;
		}

		for (ix=0; ix<=radius; ++ix)
			cumHorSum[iy][ix] = horSum[ix+radius];

		for (; ix<width-radius; ++ix)
			cumHorSum[iy][ix] = horSum[ix+radius]-horSum[ix-radius-1];

		for (; ix<width; ++ix)
			cumHorSum[iy][ix] = horSum[width-1]-horSum[ix-radius-1];
	}

	float *colSum = new float [height];
	for (ix=0; ix<width; ++ix)
	{
		float s = 0.0f;
		for (iy=0; iy<height; ++iy)
		{
			s += cumHorSum[iy][ix];
			colSum[iy] = s;
		}

		for (iy=0; iy<=radius; ++iy)
			bOut[iy][ix] = colSum[iy+radius];

		for (; iy<height-radius; ++iy)
			bOut[iy][ix] = colSum[iy+radius]-colSum[iy-radius-1];

		for (; iy<height; ++iy)
			bOut[iy][ix] = colSum[height-1]-colSum[iy-radius-1];
	}

	delete [] horSum;
	delete [] colSum;
}
コード例 #16
0
void CascadeTrainer::Classify(const Data& data, cv::Mat_<label_t>& labels) {

    labels.create(data.rows, 1);
    for (int i = 0; i < labels.rows; i++) labels(i, 0) = POSITIVE_LABEL;

    for (int i = 0; i < data.rows; i++) {
        for (uint j = 0; j < stages.size(); j++) {
            Mat_<label_t> labels_inner;
            stages[j]->Classify(data.row(i), labels_inner);

            if (labels_inner(0, 0) == NEGATIVE_LABEL) {
                labels(i, 0) = NEGATIVE_LABEL;
                break;
            }
        }
    }
}
コード例 #17
0
int ExtractSubImageDaisyDescriptors(daisy *desc, cv::Mat_<float> &descOut, float py, float px, float step, int ori, int h, int w, float limH, float limW)
{
	//printf("here 0 %d %d py = %f px = %f\n", w, h, py, px);
	descOut.create(w*h, DAISY_FEATURE_LENGTH);
	//printf("here 0-1\n");
	descOut.setTo(cv::Scalar(0.0));
	//printf("here 0-2\n");
	int iy, ix;
	float cy, cx, ssinOri, scosOri;
	ssinOri = step*sin((float)ori/180.0*M_PI);
	scosOri = step*cos((float)ori/180.0*M_PI);
	float ry, rx; // the first pos of each row
	ry = py; rx = px;
	//printf("here 1\n");
	//float *thor = new float [DAISY_FEATURE_LENGTH];
	for (iy=0; iy<h; ++iy)
	{
		cy = ry; cx = rx;
		float *ptr = (float *)(descOut.ptr(iy*w));
		//printf("here 2 %f %f %f %f\n", cy, cx, ssinOri, scosOri);
		
		for (ix=0; ix<w; ++ix)
		{			
			//memset(thor, 0, sizeof(float)*desc->descriptor_size());
			//desc->get_descriptor(std::min<double>(limH-1.0, std::max<double>(cy, 0.0)), std::min<double>(limW-1.0, std::max<double>(cx, 0.0)), ori, ptr);
			desc->ExtractDescriptorFromNormalizedHistogram(ptr, std::min<double>(limH-1.0, std::max<double>(cy, 0.0)), std::min<double>(limW-1.0, std::max<double>(cx, 0.0)), ori);
			ptr += DAISY_FEATURE_LENGTH;
			//printf("here 2-1\n");
			//memcpy(ptr, thor, sizeof(float)*DAISY_FEATURE_LENGTH);
			//printf("here 2-2\n");
			//printf("here 2-3\n");
			cy = cy+ssinOri;
			cx = cx+scosOri;
		}
		ry = ry+scosOri;
		rx = rx-ssinOri;
		//printf("here 2\n");
	}
	//delete [] thor;
	//printf("here 3\n");
	return 1;
}
コード例 #18
0
ファイル: opticalFLow.cpp プロジェクト: ems0000/spm-bp
void opticalFlow::ReadFlowFile( const char *flowFile, cv::Mat_<cv::Vec2f> &flowVec, int height, int width )
{
	float *fBuffer = new float[height*width*2];
	::ReadFlowFile(fBuffer, flowFile, height, width);
	int iy, ix;
	flowVec.create(height, width);
	for (iy=0; iy<height; ++iy)
	{
		for (ix=0; ix<width; ++ix)
		{
			float tmp0, tmp1;
			tmp0 = fBuffer[iy*2*width+2*ix];
			tmp1 = fBuffer[iy*2*width+2*ix+1];
			flowVec[iy][ix][0] = tmp0;
			flowVec[iy][ix][1] = tmp1;
		}
	}

	delete [] fBuffer;
}
コード例 #19
0
    void TemplateMatchCandidates::weakClassifiersForTemplate(
        const cv::Mat &templ, 
        const cv::Mat &templMask,
        const std::vector< cv::Rect > &rects, 
        cv::Mat_<int> &classifiers, 
        cv::Scalar &mean)
    {
        const int nChannels = templ.channels();
        classifiers.create(nChannels, (int)rects.size());

        // Note we use cv::mean here to make use of mask.
        mean = cv::mean(templ, templMask);

        for (int x = 0; x < (int)rects.size(); ++x) {
            cv::Scalar blockMean = cv::mean(templ(rects[x]), templMask.empty() ? cv::noArray() : templMask(rects[x]));
            
            for (int y = 0; y < nChannels; ++y) {
                classifiers(y, x) = blockMean[y] > mean[y] ? 1 : -1;
            }            
        }
    }
コード例 #20
0
void computeFlowField(const cv::Mat &image1, const cv::Mat &image2, std::unordered_map<std::string, parameter> &parameters,
                         cv::Mat_<cv::Vec2d> &flowfield){

  // convert images into 64 bit floating point images
  cv::Mat i1, i2;
  i1 = image1.clone();
  i1.convertTo(i1, CV_64F);
  i2 = image2.clone();
  i2.convertTo(i2, CV_64F);

  // Blurring
  double sigma = (double)parameters.at("sigma").value/parameters.at("sigma").divfactor;
  cv::GaussianBlur(i1, i1, cv::Size(0,0), sigma, sigma, cv::BORDER_REFLECT);
  cv::GaussianBlur(i2, i2, cv::Size(0,0), sigma, sigma, cv::BORDER_REFLECT);

  // compute Brightness and Gradient Tensors
  double gamma = (double)parameters.at("gamma").value/parameters.at("gamma").divfactor;
  cv::Mat_<cv::Vec6d> t = (1.0 - gamma) * ComputeBrightnessTensor(i1, i2, 1, 1) + gamma * ComputeGradientTensor(i1, i2, 1, 1);

  // create flowfield
  flowfield.create(i1.rows, i1.cols);
  flowfield = cv::Vec2d(0,0);
  cv::copyMakeBorder(flowfield, flowfield, 1, 1, 1 , 1, cv::BORDER_CONSTANT, 0);

  // make sure all parameter exist
  if (parameters.count("maxiter") == 0 || parameters.count("alpha") == 0 ||
      parameters.count("omega") == 0 || parameters.count("gamma") == 0){

    std::cout << "Parameter nicht gefunden" << std::endl;
    std::exit(1);
  }

  // main loop
  for (int i = 0; i < parameters.at("maxiter").value; i++){
    HS_Stepfunction(t, flowfield, parameters);
  }

  flowfield = flowfield(cv::Rect(1,1,image1.cols, image1.rows));
}
コード例 #21
0
void
gen_points_3d(std::vector<Plane>& planes_out, cv::Mat_<unsigned char> &plane_mask, cv::Mat& points3d, cv::Mat& normals,
              int n_planes)
{
    std::vector<Plane> planes;
    for (int i = 0; i < n_planes; i++)
    {
        Plane px;
        for (int j = 0; j < 1; j++)
        {
            px.set_d(rng.uniform(-3.f, -0.5f));
            planes.push_back(px);
        }
    }
    cv::Mat_ < cv::Vec3f > outp(H, W);
    cv::Mat_ < cv::Vec3f > outn(H, W);
    plane_mask.create(H, W);

    // n  ( r - r_0) = 0
    // n * r_0 = d
    //
    // r_0 = (0,0,0)
    // r[0]
    for (int v = 0; v < H; v++)
    {
        for (int u = 0; u < W; u++)
        {
            unsigned int plane_index = (u / float(W)) * planes.size();
            Plane plane = planes[plane_index];
            outp(v, u) = plane.intersection(u, v, Kinv);
            outn(v, u) = plane.n;
            plane_mask(v, u) = plane_index;
        }
    }
    planes_out = planes;
    points3d = outp;
    normals = outn;
}
コード例 #22
0
ファイル: PDM.cpp プロジェクト: ezzaouia/OpenFace
//===========================================================================
// Calculate the PDM's Jacobian over rigid parameters (rotation, translation and scaling), the additional input W represents trust for each of the landmarks and is part of Non-Uniform RLMS 
void PDM::ComputeRigidJacobian(const cv::Mat_<float>& p_local, const cv::Vec6d& params_global, cv::Mat_<float> &Jacob, const cv::Mat_<float> W, cv::Mat_<float> &Jacob_t_w)
{
  	
	// number of verts
	int n = this->NumberOfPoints();
  
	Jacob.create(n * 2, 6);

	float X,Y,Z;

	float s = (float)params_global[0];
  	
	cv::Mat_<double> shape_3D_d;
	cv::Mat_<double> p_local_d;
	p_local.convertTo(p_local_d, CV_64F);
	this->CalcShape3D(shape_3D_d, p_local_d);
	
	cv::Mat_<float> shape_3D;
	shape_3D_d.convertTo(shape_3D, CV_32F);

	 // Get the rotation matrix
	cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
	cv::Matx33d currRot = Euler2RotationMatrix(euler);
	
	float r11 = (float) currRot(0,0);
	float r12 = (float) currRot(0,1);
	float r13 = (float) currRot(0,2);
	float r21 = (float) currRot(1,0);
	float r22 = (float) currRot(1,1);
	float r23 = (float) currRot(1,2);
	float r31 = (float) currRot(2,0);
	float r32 = (float) currRot(2,1);
	float r33 = (float) currRot(2,2);

	cv::MatIterator_<float> Jx = Jacob.begin();
	cv::MatIterator_<float> Jy = Jx + n * 6;

	for(int i = 0; i < n; i++)
	{
    
		X = shape_3D.at<float>(i,0);
		Y = shape_3D.at<float>(i+n,0);
		Z = shape_3D.at<float>(i+n*2,0);    
		
		// The rigid jacobian from the axis angle rotation matrix approximation using small angle assumption (R * R')
		// where R' = [1, -wz, wy
		//             wz, 1, -wx
		//             -wy, wx, 1]
		// And this is derived using the small angle assumption on the axis angle rotation matrix parametrisation

		// scaling term
		*Jx++ =  (X  * r11 + Y * r12 + Z * r13);
		*Jy++ =  (X  * r21 + Y * r22 + Z * r23);
		
		// rotation terms
		*Jx++ = (s * (Y * r13 - Z * r12) );
		*Jy++ = (s * (Y * r23 - Z * r22) );
		*Jx++ = (-s * (X * r13 - Z * r11));
		*Jy++ = (-s * (X * r23 - Z * r21));
		*Jx++ = (s * (X * r12 - Y * r11) );
		*Jy++ = (s * (X * r22 - Y * r21) );
		
		// translation terms
		*Jx++ = 1.0f;
		*Jy++ = 0.0f;
		*Jx++ = 0.0f;
		*Jy++ = 1.0f;

	}

	cv::Mat Jacob_w = cv::Mat::zeros(Jacob.rows, Jacob.cols, Jacob.type());
	
	Jx =  Jacob.begin();
	Jy =  Jx + n*6;

	cv::MatIterator_<float> Jx_w =  Jacob_w.begin<float>();
	cv::MatIterator_<float> Jy_w =  Jx_w + n*6;

	// Iterate over all Jacobian values and multiply them by the weight in diagonal of W
	for(int i = 0; i < n; i++)
	{
		float w_x = W.at<float>(i, i);
		float w_y = W.at<float>(i+n, i+n);

		for(int j = 0; j < Jacob.cols; ++j)
		{
			*Jx_w++ = *Jx++ * w_x;
			*Jy_w++ = *Jy++ * w_y;
		}		
	}

	Jacob_t_w = Jacob_w.t();
}
コード例 #23
0
void CFFilter::GetCrossUsingSlidingWindow(const cv::Mat_<cv::Vec3b> &img, cv::Mat_<cv::Vec4b> &crMap, int maxL, int tau)
{
	if ((img.data == NULL) || img.empty()) return;

	////CalcTime ct;
	//ct.Start();

	int width, height;
	width = img.cols;
	height = img.rows;

	crMap.create(height, width);
	int iy, ix;
#pragma omp parallel for private(iy) schedule(guided, 1)
	for (iy=0; iy<height; ++iy)
	{
		// to determine right most, cr[2]
		cv::Vec3i sumColor;
		int lp, rp;
		rp = 0;
		for (lp=0; lp<width; ++lp)
		{
			int diff = rp-lp;
			if (diff == 0)
			{
				sumColor = img[iy][rp];
				++rp;
				++diff;
			}

			while (diff <= maxL)
			{
				if (rp >= width) break;

				//if (abs(sumColor[0]/diff-img[iy][rp][0])>tau || abs(sumColor[1]/diff-img[iy][rp][1])>tau
				//	|| abs(sumColor[2]/diff-img[iy][rp][2])>tau) break;
				if (abs(sumColor[0]-img[iy][rp][0]*diff)>tau*diff 
					|| abs(sumColor[1]-img[iy][rp][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[iy][rp][2]*diff)>tau*diff) break;
				sumColor += img[iy][rp];
				++diff;
				++rp;
			}
			sumColor -= img[iy][lp];

			crMap[iy][lp][2] = diff-1;
		}

		// to determine left most, cr[0]
		lp = width-1;
		for (rp=width-1; rp>=0; --rp)
		{
			int diff = rp-lp;
			if (diff == 0)
			{
				sumColor = img[iy][lp];
				--lp;
				++diff;
			}
			while (diff <= maxL)
			{
				if (lp < 0) break;
				//if (abs(sumColor[0]/diff-img[iy][lp][0])>tau || abs(sumColor[1]/diff-img[iy][lp][1])>tau
				//	|| abs(sumColor[2]/diff-img[iy][lp][2])>tau) break;
				if (abs(sumColor[0]-img[iy][lp][0]*diff)>tau*diff 
					|| abs(sumColor[1]-img[iy][lp][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[iy][lp][2]*diff)>tau*diff) break;
				sumColor += img[iy][lp];
				++diff;
				--lp;
			}

			sumColor -= img[iy][rp];

			crMap[iy][rp][0] = diff-1;
		}
	}

	// determine up and down arm length
#pragma omp parallel for private(ix) schedule(guided, 1)
	for (ix=0; ix<width; ++ix)
	{
		// to determine down most, cr[3]
		cv::Vec3i sumColor;
		int up, dp;
		dp = 0;
		for (up=0; up<height; ++up)
		{
			int diff = dp-up;
			if (diff == 0)
			{
				sumColor = img[dp][ix];
				++dp;
				++diff;
			}
			while (diff <= maxL)
			{
				if (dp >= height) break;
				//if (abs(sumColor[0]/diff-img[dp][ix][0])>tau || abs(sumColor[1]/diff-img[dp][ix][1])>tau
				//	|| abs(sumColor[2]/diff-img[dp][ix][2])>tau) break;
				if (abs(sumColor[0]-img[dp][ix][0]*diff)>tau*diff 
					|| abs(sumColor[1]-img[dp][ix][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[dp][ix][2]*diff)>tau*diff) break;
				sumColor += img[dp][ix];
				++diff;
				++dp;
			}

			sumColor -= img[up][ix];

			crMap[up][ix][3] = diff-1;
		}

		// to determine up most, cr[1]
		up = height-1;
		for (dp=height-1; dp>=0; --dp)
		{
			int diff = dp-up;
			if (diff == 0)
			{
				sumColor = img[up][ix];
				--up;
				++diff;
			}

			while (diff <= maxL)
			{
				if (up < 0) break;
				//if (abs(sumColor[0]/diff-img[up][ix][0])>tau || abs(sumColor[1]/diff-img[up][ix][1])>tau
				//	|| abs(sumColor[2]/diff-img[up][ix][2])>tau) break;
				if (abs(sumColor[0]-img[up][ix][0]*diff)>tau*diff
					|| abs(sumColor[1]-img[up][ix][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[up][ix][2]*diff)>tau*diff) break;
				sumColor += img[up][ix];
				++diff;
				--up;
			}

			sumColor -= img[dp][ix];

			crMap[dp][ix][1] = diff-1;
		}
	}
	
	//ct.End("SlWin Construct CrossMap");
}
コード例 #24
0
void SpmBowExtractor::extractSpm(const cv::Mat& image, const cv::Mat_<uchar>& mask, cv::Mat_<int>& spm_histogram, feature_type::T feature_types)
{
    const std::size_t histogram_count = BowExtractor::getHistogramCount(pyramid_levels_);
    const cv::Rect roi = cv::Rect(0,0, image.cols, image.rows);

    const std::size_t histo_dim = compute_histogram_length(feature_types, histogram_count);
    spm_histogram.create(1, histo_dim);
    std::size_t last_offset = 0;

    // extract surf
    if (feature_types & feature_type::Surf)
    {
        cv::Mat_<int> surf_histo = getHistogramView(
                spm_histogram,
                0,
                surf_.wordCount(),
                histogram_count);
        extract_feature(surf_extractor_, surf_, image, roi, mask, surf_histo);
        last_offset += surf_histo.total();
    }

    // extract hog
    if (feature_types & feature_type::Hog)
    {
        cv::Mat_<int> hog_histo = getHistogramView(
                spm_histogram,
                last_offset,
                hog_.wordCount(),
                histogram_count);
        extract_feature(hog_extractor_, hog_, image, roi, mask, hog_histo);
        last_offset += hog_histo.total();
    }

    // extract color
    if (feature_types & feature_type::Color)
    {
        cv::Mat_<int> color_histo = getHistogramView(
                spm_histogram,
                last_offset,
                color_.wordCount(),
                histogram_count);
        extract_feature(color_extractor_, color_, image, roi, mask, color_histo);
        last_offset += color_histo.total();
    }

    // extract lbp
    if (feature_types & feature_type::Lbp)
    {
        cv::Mat_<int> lbp_histo = getHistogramView(
                spm_histogram,
                last_offset,
                lbp_.wordCount(),
                histogram_count);
        lbp_.sumPool(
                image,
                roi,
                mask,
                lbp_.wordCount(),
                pyramid_levels_,
                lbp_histo);
        last_offset += lbp_histo.total();
    }

    // extract ssd
    if ((feature_types & feature_type::Ssd) || (feature_types & feature_type::ColorSsd))
    {
        cv::Mat_<int> ssd_histo = getHistogramView(
                spm_histogram,
                last_offset,
                ssd_.wordCount(),
                histogram_count);
        extract_feature(ssd_extractor_, ssd_, image, roi, mask, ssd_histo);
        last_offset += ssd_histo.total();
    }

    // extract holbp
    if (feature_types & feature_type::HoLbp)
    {
        cv::Mat_<int> holbp_histo = getHistogramView(
                spm_histogram,
                last_offset,
                holbp_.wordCount(),
                histogram_count);
        extract_feature(holbp_extractor_, holbp_, image, roi, mask, holbp_histo);
        last_offset += holbp_histo.total();
    }

    // extract dense_surf 
    if (feature_types & feature_type::DenseSurf)
    {
        cv::Mat_<int> dense_surf_histo = getHistogramView(
                spm_histogram,
                last_offset,
                dense_surf_.wordCount(),
                histogram_count);
        extract_feature(dense_surf_extractor_, dense_surf_, image, roi, mask,
												dense_surf_histo);
        last_offset += dense_surf_histo.total();
    }
}
コード例 #25
0
ファイル: PDM.cpp プロジェクト: ezzaouia/OpenFace
//===========================================================================
// Calculate the PDM's Jacobian over all parameters (rigid and non-rigid), the additional input W represents trust for each of the landmarks and is part of Non-Uniform RLMS
void PDM::ComputeJacobian(const cv::Mat_<float>& params_local, const cv::Vec6d& params_global, cv::Mat_<float> &Jacobian, const cv::Mat_<float> W, cv::Mat_<float> &Jacob_t_w)
{ 
	
	// number of vertices
	int n = this->NumberOfPoints();
		
	// number of non-rigid parameters
	int m = this->NumberOfModes();

	Jacobian.create(n * 2, 6 + m);
	
	float X,Y,Z;
	
	float s = (float) params_global[0];
  	
	cv::Mat_<double> shape_3D_d;
	cv::Mat_<double> p_local_d;
	params_local.convertTo(p_local_d, CV_64F);
	this->CalcShape3D(shape_3D_d, p_local_d);
	
	cv::Mat_<float> shape_3D;
	shape_3D_d.convertTo(shape_3D, CV_32F);

	cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
	cv::Matx33d currRot = Euler2RotationMatrix(euler);
	
	float r11 = (float) currRot(0,0);
	float r12 = (float) currRot(0,1);
	float r13 = (float) currRot(0,2);
	float r21 = (float) currRot(1,0);
	float r22 = (float) currRot(1,1);
	float r23 = (float) currRot(1,2);
	float r31 = (float) currRot(2,0);
	float r32 = (float) currRot(2,1);
	float r33 = (float) currRot(2,2);

	cv::MatIterator_<float> Jx =  Jacobian.begin();
	cv::MatIterator_<float> Jy =  Jx + n * (6 + m);
	cv::MatConstIterator_<double> Vx =  this->princ_comp.begin();
	cv::MatConstIterator_<double> Vy =  Vx + n*m;
	cv::MatConstIterator_<double> Vz =  Vy + n*m;

	for(int i = 0; i < n; i++)
	{
    
		X = shape_3D.at<float>(i,0);
		Y = shape_3D.at<float>(i+n,0);
		Z = shape_3D.at<float>(i+n*2,0);    
    
		// The rigid jacobian from the axis angle rotation matrix approximation using small angle assumption (R * R')
		// where R' = [1, -wz, wy
		//             wz, 1, -wx
		//             -wy, wx, 1]
		// And this is derived using the small angle assumption on the axis angle rotation matrix parametrisation

		// scaling term
		*Jx++ = (X  * r11 + Y * r12 + Z * r13);
		*Jy++ = (X  * r21 + Y * r22 + Z * r23);
		
		// rotation terms
		*Jx++ = (s * (Y * r13 - Z * r12) );
		*Jy++ = (s * (Y * r23 - Z * r22) );
		*Jx++ = (-s * (X * r13 - Z * r11));
		*Jy++ = (-s * (X * r23 - Z * r21));
		*Jx++ = (s * (X * r12 - Y * r11) );
		*Jy++ = (s * (X * r22 - Y * r21) );
		
		// translation terms
		*Jx++ = 1.0f;
		*Jy++ = 0.0f;
		*Jx++ = 0.0f;
		*Jy++ = 1.0f;

		for(int j = 0; j < m; j++,++Vx,++Vy,++Vz)
		{
			// How much the change of the non-rigid parameters (when object is rotated) affect 2D motion
			*Jx++ = (float) ( s*(r11*(*Vx) + r12*(*Vy) + r13*(*Vz)) );
			*Jy++ = (float) ( s*(r21*(*Vx) + r22*(*Vy) + r23*(*Vz)) );
		}
	}	

	// Adding the weights here
	cv::Mat Jacob_w = Jacobian.clone();
	
	if(cv::trace(W)[0] != W.rows) 
	{
		Jx =  Jacobian.begin();
		Jy =  Jx + n*(6+m);

		cv::MatIterator_<float> Jx_w =  Jacob_w.begin<float>();
		cv::MatIterator_<float> Jy_w =  Jx_w + n*(6+m);

		// Iterate over all Jacobian values and multiply them by the weight in diagonal of W
		for(int i = 0; i < n; i++)
		{
			float w_x = W.at<float>(i, i);
			float w_y = W.at<float>(i+n, i+n);

			for(int j = 0; j < Jacobian.cols; ++j)
			{
				*Jx_w++ = *Jx++ * w_x;
				*Jy_w++ = *Jy++ * w_y;
			}
		}
	}
	Jacob_t_w = Jacob_w.t();

}
コード例 #26
0
//===========================================================================
void CCNF_neuron::Response(cv::Mat_<float> &im, cv::Mat_<double> &im_dft, cv::Mat &integral_img, cv::Mat &integral_img_sq, cv::Mat_<float> &resp)
{

	int h = im.rows - weights.rows + 1;
	int w = im.cols - weights.cols + 1;
	
	// the patch area on which we will calculate reponses
	cv::Mat_<float> I;

	if(neuron_type == 3)
	{
		// Perform normalisation across whole patch (ignoring the invalid values indicated by <= 0

		cv::Scalar mean;
		cv::Scalar std;
		
		// ignore missing values
		cv::Mat_<uchar> mask = im > 0;
		cv::meanStdDev(im, mean, std, mask);

		// if all values the same don't divide by 0
		if(std[0] != 0)
		{
			I = (im - mean[0]) / std[0];
		}
		else
		{
			I = (im - mean[0]);
		}

		I.setTo(0, mask == 0);
	}
	else
	{
		if(neuron_type == 0)
		{
			I = im;
		}
		else
		{
			printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__,neuron_type);
			abort();
		}
	}
  
	if(resp.empty())
	{		
		resp.create(h, w);
	}

	// The response from neuron before activation
	if(neuron_type == 3)
	{
		// In case of depth we use per area, rather than per patch normalisation
		matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF); // the linear multiplication, efficient calc of response
	}
	else
	{
		matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF_NORMED); // the linear multiplication, efficient calc of response
	}

	cv::MatIterator_<float> p = resp.begin();

	cv::MatIterator_<float> q1 = resp.begin(); // respone for each pixel
	cv::MatIterator_<float> q2 = resp.end();

	// the logistic function (sigmoid) applied to the response
	while(q1 != q2)
	{
		*p++ = (2 * alpha) * 1.0 /(1.0 + exp( -(*q1++ * norm_weights + bias )));
	}

}
コード例 #27
0
ファイル: PDM.cpp プロジェクト: ezzaouia/OpenFace
//===========================================================================
// Compute the 3D representation of shape (in object space) using the local parameters
void PDM::CalcShape3D(cv::Mat_<double>& out_shape, const cv::Mat_<double>& p_local) const
{
	out_shape.create(mean_shape.rows, mean_shape.cols);
	out_shape = mean_shape + princ_comp*p_local;
}
コード例 #28
0
void SVR_patch_expert::ResponseDepth(const Mat_<float>& area_of_interest, cv::Mat_<double> &response)
{

	// How big the response map will be
	int response_height = area_of_interest.rows - weights.rows + 1;
	int response_width = area_of_interest.cols - weights.cols + 1;
	
	// the patch area on which we will calculate reponses
	Mat_<float> normalised_area_of_interest;
  
	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	if(type == 0)
	{
		// Perform normalisation across whole patch
		cv::Scalar mean;
		cv::Scalar std;
		
		// ignore missing values
		cv::Mat_<uchar> mask = area_of_interest > 0;
		cv::meanStdDev(area_of_interest, mean, std, mask);

		// if all values the same don't divide by 0
		if(std[0] == 0)
		{
			std[0] = 1;
		}

		normalised_area_of_interest = (area_of_interest - mean[0]) / std[0];

		// Set the invalid pixels to 0
		normalised_area_of_interest.setTo(0, mask == 0);
	}
	else
	{
		printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__,type);
		abort();
	}
  
	Mat_<float> svr_response;
		
	// The empty matrix as we don't pass precomputed dft's of image
	Mat_<double> empty_matrix_0(0,0,0.0);
	Mat_<float> empty_matrix_1(0,0,0.0);
	Mat_<float> empty_matrix_2(0,0,0.0);

	// Efficient calc of patch expert response across the area of interest
	matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, CV_TM_CCOEFF); 

	response.create(svr_response.size());
	MatIterator_<double> p = response.begin();

	cv::MatIterator_<float> q1 = svr_response.begin(); // respone for each pixel
	cv::MatIterator_<float> q2 = svr_response.end();

	while(q1 != q2)
	{
		// the SVR response passed through a logistic regressor
		*p++ = 1.0/(1.0 + exp( -(*q1++ * scaling + bias )));
	}	
}
コード例 #29
0
void CFFilter::FastCLMF0FloatFilterPointer( const cv::Mat_<cv::Vec4b> &crMap, const cv::Mat_<float> &src, cv::Mat_<float> &dst )
{
//	qx_timer tt;
//	tt.start();

	int iy, ix, width, height;
	width = crMap.cols;
	height = crMap.rows;

	cv::Mat_<float> cost = src;

	// first iteration
	cv::Mat_<float> crossHorSum(height, width);
	cv::Mat_<int> sizeHorSum(height, width);
	cv::Mat_<float> crossHorSumTranspose(width, height);
	cv::Mat_<int> sizeHorSumTranspose(width, height);
	cv::Mat_<cv::Vec4b> crMapTranspose(width, height);
	cv::transpose(crMap, crMapTranspose);

	float *horSum = new float [width+1];
	float *rowSizeHorSum = new float [width+1];
	float *verSum = new float [height+1];
	int *colSizeVerSum = new int [height+1];

	float *costPtr = (float *)(cost.ptr(0));
	float *crossHorPtr = (float *)(crossHorSum.ptr(0));
	int *sizeHorPtr = (int *)(sizeHorSum.ptr(0));
	cv::Vec4b *crMapPtr = (cv::Vec4b *)(crMap.ptr(0));

	for (iy=0; iy<height; ++iy)
	{
		float s = 0.0;		
		float *horPtr = horSum;
		*horPtr++ = s;
		for (ix=0; ix<width; ++ix)
		{
			s += *costPtr++;
			*horPtr++ = s;
		}


		for (ix=0; ix<width; ++ix)
		{
			cv::Vec4b cross = *crMapPtr++;
			*crossHorPtr++ = horSum[ix+cross[2]+1]-horSum[ix-cross[0]];
			*sizeHorPtr++ = cross[2]+cross[0]+1;
		}
	}


	cv::transpose(crossHorSum, crossHorSumTranspose);
	cv::transpose(sizeHorSum, sizeHorSumTranspose);
	crossHorPtr = (float *)(crossHorSumTranspose.ptr(0));
	sizeHorPtr = (int *)(sizeHorSumTranspose.ptr(0));

	cv::Mat_<float> crossVerSum(height, width);
	cv::Mat_<int> sizeVerSum(height, width);
	cv::Mat_<float> crossVerSumTranpose(width, height);
	cv::Mat_<int> sizeVerSumTranpose(width, height);
	float *crossVerPtr = (float *)(crossVerSumTranpose.ptr(0));
	int *sizeVerPtr = (int *)(sizeVerSumTranpose.ptr(0));

	crMapPtr = (cv::Vec4b *)(crMapTranspose.ptr(0));
	const int W_FAC = width;
	for (ix=0; ix<width; ++ix)
	{
		float s = 0.0;
		int cs = 0;	
		

		float *verPtr = verSum;
		int *colSizeVerPtr = colSizeVerSum;
		*verPtr++ = s;
		*colSizeVerPtr++ = cs;
		for (iy=0; iy<height; ++iy)
		{
			s += *crossHorPtr++;
			*verPtr++ =s;
			cs += *sizeHorPtr++;
			*colSizeVerPtr++ = cs;
		}


		for (iy=0; iy<height; ++iy)
		{
			cv::Vec4b cross = *crMapPtr++;
			*crossVerPtr++ = verSum[iy+cross[3]+1]-verSum[iy-cross[1]];
			*sizeVerPtr++ = colSizeVerSum[iy+cross[3]+1]-colSizeVerSum[iy-cross[1]];
		}
	}

	cv::transpose(crossVerSumTranpose, crossVerSum);
	cv::transpose(sizeVerSumTranpose, sizeVerSum);

	// second iteration
	crossVerPtr = (float *)(crossVerSum.ptr(0));
	sizeVerPtr = (int *)(sizeVerSum.ptr(0));
	crossHorPtr = (float *)(crossHorSum.ptr(0));
	sizeHorPtr = (int *)(sizeHorSum.ptr(0));
	crMapPtr = (cv::Vec4b *)(crMap.ptr(0));
	for (iy=0; iy<height; ++iy)
	{
		float s = 0.0;		
		int rs = 0;
		float *horPtr = horSum;
		float *rowSizeHorPtr = rowSizeHorSum;
		*horPtr++ = s;
		*rowSizeHorPtr++ = rs;
		for (ix=0; ix<width; ++ix)
		{
			s += *crossVerPtr++;
			*horPtr++ = s;
			rs += *sizeVerPtr++;
			*rowSizeHorPtr++ = rs;
		}
		
		for (ix=0; ix<width; ++ix)
		{
			cv::Vec4b cross = *crMapPtr++;
			*crossHorPtr++ = horSum[ix+cross[2]+1]-horSum[ix-cross[0]];
			*sizeHorPtr++ = rowSizeHorSum[ix+cross[2]+1]-rowSizeHorSum[ix-cross[0]];
		}
	}
	cv::transpose(crossHorSum, crossHorSumTranspose);
	cv::transpose(sizeHorSum, sizeHorSumTranspose);
	crossHorPtr = (float *)(crossHorSumTranspose.ptr(0));
	sizeHorPtr = (int *)(sizeHorSumTranspose.ptr(0));
	crossVerPtr = (float *)(crossVerSumTranpose.ptr(0));
	sizeVerPtr = (int *)(sizeVerSumTranpose.ptr(0));
	crMapPtr = (cv::Vec4b *)(crMapTranspose.ptr(0));
	for (ix=0; ix<width; ++ix)
	{
		float s = 0.0;
		int cs = 0;
		float *verPtr = verSum;
		int *colSizeVerPtr = colSizeVerSum;
		*verPtr++ = s;
		*colSizeVerPtr++ = cs;

		for (iy=0; iy<height; ++iy)
		{
			s += *crossHorPtr++;
			*verPtr++ = s;
			cs += *sizeHorPtr++;
			*colSizeVerPtr++ = cs;
		}

		for (iy=0; iy<height; ++iy)
		{
			cv::Vec4b cross = *crMapPtr++;
			*crossVerPtr++ = verSum[iy+cross[3]+1]-verSum[iy-cross[1]];
			*sizeVerPtr++ = colSizeVerSum[iy+cross[3]+1]-colSizeVerSum[iy-cross[1]];
		}

	}
	//tt.time_display("Smoothing");

	delete [] horSum;
	delete [] rowSizeHorSum;
	delete [] verSum;
	delete [] colSizeVerSum;

	cv::transpose(crossVerSumTranpose, crossVerSum);
	cv::transpose(sizeVerSumTranpose, sizeVerSum);

	dst.create(height, width);
	cv::Mat_<float> tmpDst = dst;
	float *pCrossVerSum = (float *)crossVerSum.ptr(0);
	int *pSizeVerSum = (int *)sizeVerSum.ptr(0);

	float *pTmpDst = (float *)tmpDst.ptr(0);

	for (iy=0; iy<height*width; ++iy)
	{
		(*pTmpDst++) = (*pCrossVerSum++)/(*pSizeVerSum++);
	}
}
コード例 #30
0
void CostBoxFilter::TheBoxFilter( const cv::Mat_<float> &bIn, cv::Mat_<float> &bOut, int radius )
{
	int height, width, iy, ix;
	height = bIn.rows;
	width = bIn.cols;

	bOut.create(height, width);
	cv::Mat_<float> cumHorSum(height, width);
	float *horSum = new float [width];
	for (iy=0; iy<height; ++iy)
	{
		float *bPtr = (float *)(bIn.ptr(iy));
		float *hPtr = horSum;
		float s = 0.0f;
		for (ix=0; ix<width; ++ix)
		{
			//s += bIn[iy][ix];
			//horSum[ix] = s;
			s += *bPtr++;
			*hPtr++ = s;
		}

		float *ptrR = horSum+radius;
		float *dPtr = (float *)(cumHorSum.ptr(iy));
		for (ix=0; ix<=radius; ++ix)
			*dPtr++ = *ptrR++;
		// cumHorSum[iy][ix] = horSum[ix+radius];


		float *ptrL = horSum;
		for (; ix<width-radius; ++ix)
			*dPtr++ = *ptrR++-*ptrL++;
		//cumHorSum[iy][ix] = horSum[ix+radius]-horSum[ix-radius-1];

		--ptrR;
		for (; ix<width; ++ix)
			*dPtr++ = *ptrR-*ptrL++;
		//cumHorSum[iy][ix] = horSum[width-1]-horSum[ix-radius-1];
	}

	const int W_FAC = width;
	float *colSum = new float [height];
	for (ix=0; ix<width; ++ix)
	{
		float s = 0.0f;
		float *cuPtr = (float *)(cumHorSum.ptr(0))+ix;
		float *coPtr = colSum;
		for (iy=0; iy<height; ++iy, cuPtr+=W_FAC)
		{
			//s += cumHorSum[iy][ix];
			//colSum[iy] = s;
			s += *cuPtr;
			*coPtr++ = s;
		}

		float *ptrD = colSum+radius;
		float *dPtr = (float *)(bOut.ptr(0))+ix;

		for (iy=0; iy<=radius; ++iy, dPtr+=W_FAC)
			//bOut[iy][ix] = colSum[iy+radius];
			*dPtr = *ptrD++;

		float *ptrU = colSum;
		for (; iy<height-radius; ++iy, dPtr+=W_FAC)
			//bOut[iy][ix] = colSum[iy+radius]-colSum[iy-radius-1];
			*dPtr = *ptrD++-*ptrU++;

		--ptrD;
		for (; iy<height; ++iy, dPtr+=W_FAC)
			// bOut[iy][ix] = colSum[height-1]-colSum[iy-radius-1];
			*dPtr = *ptrD-*ptrU++;
	}

	delete [] horSum;
	delete [] colSum;
}