Exemplo n.º 1
0
// static
cv::Mat_<float> CrossValidator::createCrossValidationMatrix( const vector< const vector<cv::Mat_<float> >* >& rowVectors,
                                                             cv::Mat_<int>& labels)
{
    assert( !rowVectors.empty());
    cv::Mat_<float> vecs;
    labels.create( 0,1);
    for ( int label = 0; label < rowVectors.size(); ++label)
    {
        assert( rowVectors[label] != 0);
        const vector<cv::Mat_<float> >& rvecs = *rowVectors[label];
        assert( !rvecs.empty());
        const int colDim = rvecs[0].cols; // Should be the length of each row vector in this class
        if ( vecs.empty())
            vecs.create(0, colDim);
        assert( colDim == vecs.cols);   // Ensure this class's row vector length matches what's already stored

        for ( int i = 0; i < rvecs.size(); ++i)
        {
            const cv::Mat_<float>& rv = rvecs[i];
            if ( rv.rows != 1 || rv.cols != colDim)
            {
                std::cerr << "ERROR feature vector size: " << rv.size() << std::endl;
                assert( rv.rows == 1 && rv.cols == colDim);
            }   // end if

            vecs.push_back( rv);    // Append the row vector to the bottom of the matrix
            labels.push_back(label);    // Set this vector's class label
        }   // end for
    }   // end for

    labels = labels.t();    // Make row vector
    return vecs;
}   // end createCrossValidationMatrix
cv::Mat_<cv::Vec3b> getWrongColorSegmentationImage(cv::Mat_<int>& labels, int labelcount)
{
	std::vector<cv::Vec3b> colors;
	colors.reserve(labelcount);

	std::srand(0);

	for(int i = 0; i < labelcount; ++i)
	{
		cv::Vec3b ccolor;
		ccolor[0] = std::rand() % 256;
		ccolor[1] = std::rand() % 256;
		ccolor[2] = std::rand() % 256;
		colors.push_back(ccolor);
	}

	cv::Mat result(labels.size(), CV_8UC3);

	cv::Vec3b *dst_ptr = result.ptr<cv::Vec3b>(0);
	int *src_ptr = labels[0];

	for(std::size_t i = 0; i < labels.total(); ++i)
	{
		int label = *src_ptr++;
		assert(label < (int)colors.size());
		*dst_ptr++ = colors[label];
	}
	return result;
}
Exemplo n.º 3
0
void writeImageData( ostream& os, const cv::Mat_<cv::Vec3b>& cimg, const cv::Mat_<cv::Vec3f>& points)
{
    const cv::Size imgSz = cimg.size();
    assert( imgSz == points.size());

    os << imgSz.height << " " << imgSz.width << endl;
    for ( int i = 0; i < imgSz.height; ++i)
    {
        const cv::Vec3f* pptr = points.ptr<cv::Vec3f>(i);
        const cv::Vec3b* cptr = cimg.ptr<cv::Vec3b>(i);
        for ( int j = 0; j < imgSz.width; ++j)
        {
            const cv::Vec3f& p = pptr[j];
            const cv::Vec3b& c = cptr[j];

            // Write the x,y,z
            os.write( (char*)&p[0], sizeof(float));
            os.write( (char*)&p[1], sizeof(float));
            os.write( (char*)&p[2], sizeof(float));

            // Write the colour
            os.write( (char*)&c[0], sizeof(byte));
            os.write( (char*)&c[1], sizeof(byte));
            os.write( (char*)&c[2], sizeof(byte));
        }   // end for - columns
    }   // end for - rows

    os << endl;
}   // end writeImageData
  /**
   * \breif convert an opencv collection of points to a pcl::PoinCloud, your opencv mat should have NAN's for invalid points.
   * @param points3d opencv matrix of nx1 3 channel points
   * @param cloud output cloud
   * @param rgb the rgb, required, will color points
   * @param mask the mask, required, must be same size as rgb
   */
  inline void
  cvToCloudXYZRGB(const cv::Mat_<cv::Point3f>& points3d, pcl::PointCloud<pcl::PointXYZRGB>& cloud, const cv::Mat& rgb,
                  const cv::Mat& mask, bool brg = true)
  {
    cloud.clear();
    cv::Mat_<cv::Point3f>::const_iterator point_it = points3d.begin(), point_end = points3d.end();
    cv::Mat_<cv::Vec3b>::const_iterator rgb_it = rgb.begin<cv::Vec3b>();
    cv::Mat_<uchar>::const_iterator mask_it;
    if(!mask.empty())
      mask_it = mask.begin<uchar>();
    for (; point_it != point_end; ++point_it, ++rgb_it)
    {
      if(!mask.empty())
      {
        ++mask_it;
        if (!*mask_it)
          continue;
      }

      cv::Point3f p = *point_it;
      if (p.x != p.x && p.y != p.y && p.z != p.z) //throw out NANs
        continue;
      pcl::PointXYZRGB cp;
      cp.x = p.x;
      cp.y = p.y;
      cp.z = p.z;
      cp.r = (*rgb_it)[2]; //expecting in BGR format.
      cp.g = (*rgb_it)[1];
      cp.b = (*rgb_it)[0];
      cloud.push_back(cp);
    }
  }
Exemplo n.º 5
0
//===========================================================================
void Multi_SVR_patch_expert::Response(const cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	// For the purposes of the experiment only use the response of normal intensity, for fair comparison

	if(svr_patch_experts.size() == 1)
	{
		svr_patch_experts[0].Response(area_of_interest, response);		
	}
	else
	{
		// responses from multiple patch experts these can be gradients, LBPs etc.
		response.setTo(1.0);
		
		cv::Mat_<float> modality_resp(response_height, response_width);

		for(size_t i = 0; i < svr_patch_experts.size(); i++)
		{			
			svr_patch_experts[i].Response(area_of_interest, modality_resp);			
			response = response.mul(modality_resp);	
		}	
		
	}

}
Exemplo n.º 6
0
void paste_images_gallery(const std::vector<cv::Mat_<T> > & in,
                cv::Mat_<T> & out,
                int gallerycols, T background_color,
                bool draw_borders = false, cv::Scalar border_color = CV_RGB(0,0,0)) {
  if (in.size() == 0) {
    out.create(0, 0);
    return;
  }
  int cols1 = in[0].cols, rows1 = in[0].rows, nimgs = in.size();
  // prepare output
  int galleryrows = std::ceil(1. * nimgs / gallerycols);
  out.create(rows1 * galleryrows, cols1 * gallerycols);
  // printf("nimgs:%i, gallerycols:%i, galleryrows:%i\n", nimgs, gallerycols, galleryrows);
  out.setTo(background_color);
  // paste images
  for (int img_idx = 0; img_idx < nimgs; ++img_idx) {
    int galleryx = img_idx % gallerycols, galleryy = img_idx / gallerycols;
    cv::Rect roi(galleryx * cols1, galleryy * rows1, cols1, rows1);
    // printf("### out:%ix%i, roi %i:'%s'\n", out.cols, out.rows, img_idx, geometry_utils::print_rect(roi).c_str());
    if (cols1 != in[img_idx].cols || rows1 != in[img_idx].rows) {
      printf("Image %i of size (%ix%i), different from (%ix%i), skipping it.\n",
             img_idx, in[img_idx].cols, in[img_idx].rows, cols1, rows1);
      cv::line(out, roi.tl(), roi.br(), border_color, 2);
      cv::line(out, roi.br(), roi.tl(), border_color, 2);
    }
    else
      in[img_idx].copyTo( out(roi) );
    if (draw_borders)
      cv::rectangle(out, roi, border_color, 1);
  } // end loop img_idx
} // end paste_images_gallery<_T>
ClusteredLinearRegression::ClusteredLinearRegression(const cv::Mat_<double>& X, const cv::Mat_<double>& Y, int minClusterSize) {
	int N = X.rows;

	vector<cv::Mat_<float> > clusterX, clusterY;
	vector<cv::Mat_<float> > floatCentroids;
	{
		cv::Mat floatX, floatY;
		X.convertTo(floatX, CV_32F);
		Y.convertTo(floatY, CV_32F);
		
		cv::Mat_<float> centroid;
		X.convertTo(centroid, CV_32F);
		cv::reduce(centroid, centroid, 0, CV_REDUCE_AVG);
		partition(floatX, floatY, centroid, minClusterSize, clusterX, clusterY, floatCentroids);
	}

	clusterCentroids.resize(clusterX.size());
	W.resize(clusterX.size());

	for (int i = 0; i < clusterX.size(); ++i) {
		floatCentroids[i].convertTo(clusterCentroids[i], CV_64F);

		cv::Mat_<double> X2;
		clusterX[i].convertTo(X2, CV_64F);
		ml::addBias(X2);
		cv::Mat_<double> Y2;
		clusterY[i].convertTo(Y2, CV_64F);

		W[i] = X2.inv(cv::DECOMP_SVD) * Y2;
	}
}
 inline void
 cvToCloud(const cv::Mat_<cv::Point3f>& points3d, pcl::PointCloud<PointT>& cloud, const cv::Mat& mask = cv::Mat())
 {
   cloud.clear();
   cloud.width = points3d.size().width;
   cloud.height = points3d.size().height;
   cv::Mat_<cv::Point3f>::const_iterator point_it = points3d.begin(), point_end = points3d.end();
   const bool has_mask = !mask.empty();
   cv::Mat_<uchar>::const_iterator mask_it;
   if (has_mask)
     mask_it = mask.begin<uchar>();
   for (; point_it != point_end; ++point_it, (has_mask ? ++mask_it : mask_it))
   {
     if (has_mask && !*mask_it)
       continue;
     cv::Point3f p = *point_it;
     if (p.x != p.x && p.y != p.y && p.z != p.z) //throw out NANs
       continue;
     PointT cp;
     cp.x = p.x;
     cp.y = p.y;
     cp.z = p.z;
     cloud.push_back(cp);
   }
 }
Exemplo n.º 9
0
    void serialize(Archive & ar, ::cv::Mat_<T>& m, const unsigned int version)
    {
      if(Archive::is_loading::value == true)
      {
        int cols, rows;
        size_t elem_size, elem_type;

        ar & cols;
        ar & rows;
        ar & elem_size;
        ar & elem_type;

        m.create(rows, cols);

        size_t data_size = m.cols * m.rows * elem_size;
        ar & boost::serialization::make_array(m.ptr(), data_size);
      }
      else
      {
        size_t elem_size = m.elemSize();
        size_t elem_type = m.type();

        ar & m.cols;
        ar & m.rows;
        ar & elem_size;
        ar & elem_type;

        const size_t data_size = m.cols * m.rows * elem_size;
        ar & boost::serialization::make_array(m.ptr(), data_size);
      }
    }
cv::Mat test_with_args(const cv::Mat_<float>& in, const int& var1 = 1,
const double& var2 = 10.0, const std::string& name=std::string("test_name")) {
    std::cerr << "in: " << in << std::endl;
    std::cerr << "sz: " << in.size() << std::endl;
    std::cerr << "Returning transpose" << std::endl;
    return in.t();
}
Exemplo n.º 11
0
/** Orthogonal matching pursuit
* x: input signal, N * 1 
* D: dictionary, N * M 
* L: number of non_zero elements in output
* coeff: coefficent of each atoms in dictionary, M * 1
*/
void OMP(const cv::Mat_<double>& x, const cv::Mat_<double>& D, int L, cv::Mat_<double>& coeff){
    int dim = x.rows;
    int atom_num = D.cols;
    coeff = Mat::zeros(atom_num, 1, CV_64FC1);
    Mat_<double> residual = x.clone();
    Mat_<double> selected_index(L, 1);
    Mat_<double> a;
    for (int i = 0; i < L; i++){
        cout << "here ok 1" << endl;
        Mat_<double> dot_p = D.t() * residual; 
        Point max_index;
        minMaxLoc(abs(dot_p), NULL, NULL, NULL, &max_index);
        int max_row = max_index.y;
        selected_index(i) = max_row;
        Mat_<double> temp(dim, i + 1);
        for (int j = 0; j < i + 1; j++){
            D.col(selected_index(j)).copyTo(temp.col(j));
        }
        Mat_<double> invert_temp;
        invert(temp, invert_temp, CV_SVD);
        a = invert_temp * x;
        residual = x - temp * a;
    }

    for (int i = 0; i < L; i++){
        coeff(selected_index(i)) = a(i);
    }
} 
Exemplo n.º 12
0
/** get 3D points out of the image */
float matToVec(const cv::Mat_<cv::Vec3f> &src_ref, const cv::Mat_<cv::Vec3f> &src_mod, std::vector<cv::Vec3f>& pts_ref, std::vector<cv::Vec3f>& pts_mod)
{
  pts_ref.clear();
  pts_mod.clear();
  int px_missing = 0;

  cv::MatConstIterator_<cv::Vec3f> it_ref = src_ref.begin();
  cv::MatConstIterator_<cv::Vec3f> it_mod = src_mod.begin();
  for (; it_ref != src_ref.end(); ++it_ref, ++it_mod)
  {
    if (!cv::checkRange(*it_ref))
      continue;

    pts_ref.push_back(*it_ref);
    if (cv::checkRange(*it_mod))
    {
      pts_mod.push_back(*it_mod);
    }
    else
    {
      pts_mod.push_back(cv::Vec3f(0.0f, 0.0f, 0.0f));
      ++px_missing;
    }
  }

  float ratio = 0.0f;
  if ((src_ref.cols > 0) && (src_ref.rows > 0))
    ratio = float(px_missing) / float(src_ref.cols * src_ref.rows);
  return ratio;
}
int crslic_segmentation::operator()(const cv::Mat& image, cv::Mat_<int>& labels)
{
	float directCliqueCost = 0.3;
	unsigned int const iterations = 3;
	double const diagonalCliqueCost = directCliqueCost / sqrt(2);

	bool isColorImage = (image.channels() == 3);
	std::vector<FeatureType> features;
	if (isColorImage)
		features.push_back(Color);
	else
		features.push_back(Grayvalue);

	features.push_back(Compactness);

	ContourRelaxation<int> crslic_obj(features);
	cv::Mat labels_temp = createBlockInitialization<int>(image.size(), settings.superpixel_size, settings.superpixel_size);

	crslic_obj.setCompactnessData(settings.superpixel_compactness);

	if (isColorImage)
	{
		cv::Mat imageYCrCb;
		cv::cvtColor(image, imageYCrCb, CV_BGR2YCrCb);
		std::vector<cv::Mat> imageYCrCbChannels;
		cv::split(imageYCrCb, imageYCrCbChannels);

		crslic_obj.setColorData(imageYCrCbChannels[0], imageYCrCbChannels[1], imageYCrCbChannels[2]);
	}
	else
		crslic_obj.setGrayvalueData(image.clone());

	crslic_obj.relax(labels_temp, directCliqueCost, diagonalCliqueCost, iterations, labels);
	return 1+*(std::max_element(labels.begin(), labels.end()));
}
Exemplo n.º 14
0
void EncoderBoFSoft::encode(const cv::Mat_<float>& descriptors, cv::Mat_<float>& encoded)
{
	int ndata = descriptors.rows;
	int ndim = descriptors.cols;

	if ( ndim != this->_m_nDim)
	{
		throw std::runtime_error("dimension not match when encode");
	}
	
	encoded.create(ndata,this->_m_nCode);
	encoded.setTo(0.0f);
	//encoded.zeros(ndata,this->_m_nCode);

#pragma omp parallel for
	for(int i=0;i<ndata;i++)
	{
		Mat index,dist;
		this->_m_pTree->findNearest(descriptors.row(i),_m_nz,INT_MAX,index,noArray(),dist);

		Scalar mean,std;
		cv::meanStdDev(dist,mean,std);
		cv::divide(std(0),dist,dist);
		
		for(int j=0;j<_m_nz;j++)
		{
			encoded(i,index.at<int>(j)) = dist.at<float>(j);
		}
	}
}
Exemplo n.º 15
0
void GrayWorldEstimator::preprocessImage(const cv::Mat_<cv::Vec3d>& image, const cv::Mat_<unsigned char>& mask, cv::Mat_<cv::Vec3d> &inputImage, cv::Mat_<unsigned char> &inputMask) const
{
    inputImage = image.clone();
    inputMask = mask.clone();
    if ((image.rows != mask.rows) || (image.cols != mask.cols)) {
        inputMask = cv::Mat_<unsigned char>(inputImage.rows, inputImage.cols, (unsigned char)0);
    }

    Mask::maskSaturatedPixels(inputImage, inputMask, 1);

    cv::Mat_<unsigned char> element = cv::Mat_<unsigned char>::ones(3, 3);
    cv::dilate(inputMask, inputMask, element);

    const double kernelsize = cvRound(m_sigma * 3 * 2 + 1) | 1;
    Mask::maskBorderPixels(inputImage, inputMask, (kernelsize + 1) / 2);

    if (m_sigma > 0) {
        if (m_n == 0) {
            const double kernelsize = cvRound(m_sigma * 3 * 2 + 1) | 1;
            cv::GaussianBlur(inputImage, inputImage, cv::Size(kernelsize, kernelsize), m_sigma, m_sigma);
        } else if (m_n > 0) {
            inputImage = Derivative::normDerivativeFilter(inputImage, m_n, m_sigma);
        }
    }
}
Exemplo n.º 16
0
void CCalibrateKinect::undistortImages ( const std::vector< cv::Mat >& vImages_,  const cv::Mat_<double>& cvmK_, const cv::Mat_<double>& cvmInvK_, const cv::Mat_<double>& cvmDistCoeffs_, std::vector< cv::Mat >* pvUndistorted_ ) const
{
    std::cout << "undistortImages() "<< std::endl << std::flush;
    CHECK ( !vImages_.empty(),      "undistortImages(): # of undistorted images can not be zero.\n" );
    CHECK ( !cvmK_.empty(),         "undistortImages(): K matrix cannot be empty.\n" );
    CHECK ( !cvmInvK_.empty(),      "undistortImages(): inverse of K matrix cannot be empty.\n" );
    CHECK ( !cvmDistCoeffs_.empty(), "undistortImages(): distortion coefficients cannot be empty.\n" );

    cv::Size cvFrameSize = vImages_[0].size(); //x,y;
    pvUndistorted_->clear();

    for ( unsigned int n = 0; n < vImages_.size(); n++ )
    {
        cv::Mat cvUndistorted;
        std::cout << "distort: "<< n << "-th image.\n"<< std::flush;
        undistortImage ( vImages_[n],  cvmK_, cvmInvK_, cvmDistCoeffs_, &cvUndistorted );
        pvUndistorted_->push_back ( cvUndistorted );

        //string strNum = boost::lexical_cast< std::string> ( n );
        //string strRGBUndistortedFileName = "rgbUndistorted" + strNum + ".bmp";
        //cv::imwrite ( strRGBUndistortedFileName, cvUndistorted );
    }

    return;
}
void FaceAnalyser::UpdateRunningMedian(cv::Mat_<unsigned int>& histogram, int& hist_count, cv::Mat_<double>& median, const cv::Mat_<double>& descriptor, bool update, int num_bins, double min_val, double max_val)
{

	double length = max_val - min_val;
	if(length < 0)
		length = -length;

	// The median update
	if(histogram.empty())
	{
		histogram = Mat_<unsigned int>(descriptor.cols, num_bins, (unsigned int)0);
		median = descriptor.clone();
	}

	if(update)
	{
		// Find the bins corresponding to the current descriptor
		Mat_<double> converted_descriptor = (descriptor - min_val)*((double)num_bins)/(length);

		// Capping the top and bottom values
		converted_descriptor.setTo(Scalar(num_bins-1), converted_descriptor > num_bins - 1);
		converted_descriptor.setTo(Scalar(0), converted_descriptor < 0);

		// Only count the median till a certain number of frame seen?
		for(int i = 0; i < histogram.rows; ++i)
		{
			int index = (int)converted_descriptor.at<double>(i);
			histogram.at<unsigned int>(i, index)++;
		}

		// Update the histogram count
		hist_count++;
	}

	if(hist_count == 1)
	{
		median = descriptor.clone();
	}
	else
	{
		// Recompute the median
		int cutoff_point = (hist_count + 1)/2;

		// For each dimension
		for(int i = 0; i < histogram.rows; ++i)
		{
			int cummulative_sum = 0;
			for(int j = 0; j < histogram.cols; ++j)
			{
				cummulative_sum += histogram.at<unsigned int>(i, j);
				if(cummulative_sum > cutoff_point)
				{
					median.at<double>(i) = min_val + j * (length/num_bins) + (0.5*(length)/num_bins);
					break;
				}
			}
		}
	}
}
Exemplo n.º 18
0
//===========================================================================
void CCNF_patch_expert::Response(cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}
		
	response.setTo(0);
	
	// the placeholder for the DFT of the image, the integral image, and squared integral image so they don't get recalculated for every response
	cv::Mat_<double> area_of_interest_dft;
	cv::Mat integral_image, integral_image_sq;
	
	cv::Mat_<float> neuron_response;

	// responses from the neural layers
	for(size_t i = 0; i < neurons.size(); i++)
	{		
		// Do not bother with neuron response if the alpha is tiny and will not contribute much to overall result
		if(neurons[i].alpha > 1e-4)
		{
			neurons[i].Response(area_of_interest, area_of_interest_dft, integral_image, integral_image_sq, neuron_response);
			response = response + neuron_response;						
		}
	}

	int s_to_use = -1;

	// Find the matching sigma
	for(size_t i=0; i < window_sizes.size(); ++i)
	{
		if(window_sizes[i] == response_height)
		{
			// Found the correct sigma
			s_to_use = i;			
			break;
		}
	}

	cv::Mat_<float> resp_vec_f = response.reshape(1, response_height * response_width);

	cv::Mat out = Sigmas[s_to_use] * resp_vec_f;
	
	response = out.reshape(1, response_height);

	// Making sure the response does not have negative numbers
	double min;

	minMaxIdx(response, &min, 0);
	if(min < 0)
	{
		response = response - min;
	}

}
Exemplo n.º 19
0
EDTFeature::EDTFeature( const cv::Mat_<byte> img, const cv::Size fvDims)
    : RFeatures::FeatureOperator( img.size(), fvDims)
{
    const cv::Mat_<int> sedt = RFeatures::DistanceTransform::calcdt( img);   // Distance map to edges
    cv::Mat fsedt;  // convert to 64 bit float for sqrt
    sedt.convertTo( fsedt, CV_64F);
    cv::sqrt( fsedt, _dtimg);
    cv::integral( _dtimg, _iimg, CV_64F);
}   // end ctor
Exemplo n.º 20
0
cv::Point3f GetPupilPosition(cv::Mat_<double> eyeLdmks3d){
	
	eyeLdmks3d = eyeLdmks3d.t();

	cv::Mat_<double> irisLdmks3d = eyeLdmks3d.rowRange(0,8);

	cv::Point3f p (mean(irisLdmks3d.col(0))[0], mean(irisLdmks3d.col(1))[0], mean(irisLdmks3d.col(2))[0]);
	return p;
}
Exemplo n.º 21
0
void writeMat(cv::Mat_<T>& mat, char* file){
	ofstream* ofile = new ofstream(file,ofstream::out|ofstream::binary);
	(*ofile)<<mat.rows<<" ";
	(*ofile)<<mat.cols<<" ";
	(*ofile)<<mat.type()<<" ";
	(*ofile)<<mat.total()*mat.elemSize();
	
	ofile->write((char*) mat.data,mat.total()*mat.elemSize());
	ofile->close();
}
bool CubicSplineInterpolation::caltridiagonalMatrices( 
    cv::Mat_<double> &input_a, 
    cv::Mat_<double> &input_b, 
    cv::Mat_<double> &input_c,
    cv::Mat_<double> &input_d,
    cv::Mat_<double> &output_x )
{
    int rows = input_a.rows;
    int cols = input_a.cols;

    if ( ( rows == 1 && cols > rows ) || 
        (cols == 1 && rows > cols ) )
    {
        const int count = ( rows > cols ? rows : cols ) - 1;

        output_x = cv::Mat_<double>::zeros(rows, cols);

        cv::Mat_<double> cCopy, dCopy;
        input_c.copyTo(cCopy);
        input_d.copyTo(dCopy);

        if ( input_b(0) != 0 )
        {
            cCopy(0) /= input_b(0);
            dCopy(0) /= input_b(0);
        }
        else
        {
            return false;
        }

        for ( int i=1; i < count; i++ )
        {
            double temp = input_b(i) - input_a(i) * cCopy(i-1);
            if ( temp == 0.0 )
            {
                return false;
            }

            cCopy(i) /= temp;
            dCopy(i) = ( dCopy(i) - dCopy(i-1)*input_a(i) ) / temp;
        }

        output_x(count) = dCopy(count);
        for ( int i=count-2; i > 0; i-- )
        {
            output_x(i) = dCopy(i) - cCopy(i)*output_x(i+1);
        }
        return true;
    }
    else
    {
        return false;
    }
}
Exemplo n.º 23
0
 void find_zeros(const cv::Mat_<T>& binary, std::vector<cv::Point> &idx) {
     assert(binary.cols > 0 && binary.rows > 0 && binary.channels() == 1 && binary.channels() == 1);
     const int M = binary.rows;
     const int N = binary.cols;
     for (int m = 0; m < M; ++m) {
         const T* bin_ptr = (T*) binary.ptr(m);
         for (int n = 0; n < N; ++n) {
             if (bin_ptr[n] == 0) idx.push_back(cv::Point(n,m));
         }
     }
 }
Exemplo n.º 24
0
	void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep)
	{
		if(descriptors.empty())
		{
			descriptors = Mat_<double>(num_frames_to_keep, new_descriptor.cols, 0.0);
		}

		int row_to_change = curr_frame % num_frames_to_keep;

		new_descriptor.copyTo(descriptors.row(row_to_change));
	}	
Exemplo n.º 25
0
void Tan::preprocessImage(const cv::Mat_<cv::Vec3d>& image, const cv::Mat_<unsigned char>& mask, cv::Mat_<cv::Vec3d> &outputImage, cv::Mat_<unsigned char> &outputMask) const
{
	outputImage = image.clone();
	if ((image.rows != mask.rows) || (image.cols != mask.cols)) {
		std::cerr << "No mask!" << std::endl;
		outputMask = cv::Mat_<unsigned char>::zeros(outputImage.rows, outputImage.cols);
	} else {
		outputMask = mask.clone();
	}
	illumestimators::Mask::maskSaturatedPixels(outputImage, outputMask, config.max_intensity);
	illumestimators::Mask::maskDarkPixels(outputImage, outputMask, config.min_intensity);
}
Exemplo n.º 26
0
void SegmenterHumanSimple::getPixelProbability(const cv::Mat& img, cv::Mat_<float>& prob, vector<Rect> faces)
{
	prob.create(img.rows,img.cols);
	prob.setTo(0.3f);
	for(int i=0;i<faces.size();i++)
	{
		Rect rect = faces[i];
		rect = UtilsOpencv::ScaleRect(rect,Rect(0,0,img.cols,img.rows),1.2,1.5,1.2,1.2);
		prob(Rect(rect.x,rect.y,rect.width,img.rows-rect.y)) = 0.7f;
	}
	
}
Exemplo n.º 27
0
// matrix version
void multi_img::setPixel(unsigned int row, unsigned int col,
						 const cv::Mat_<Value>& values)
{
	assert((int)row < height && (int)col < width);
	assert(values.rows*values.cols == (int)size());
	Pixel &p = pixels[row*width + col];
	p.assign(values.begin(), values.end());

	for (size_t i = 0; i < size(); ++i)
		bands[i](row, col) = p[i];

	dirty(row, col) = 0;
}
Exemplo n.º 28
0
// static
void CrossValidator::splitIntoPositiveAndNegativeClasses( const cv::Mat_<float>& xs, const cv::Mat_<int>& labels,
                                                          vector<cv::Mat_<float> >& pset,
                                                          vector<cv::Mat_<float> >& nset)
{
    const int *labsVec = labels.ptr<int>(0);
    for ( int i = 0; i < xs.rows; ++i)
    {
        assert( labsVec[i] == 0 || labsVec[i] == 1);
        if (labsVec[i] == 1)
            pset.push_back(xs.row(i));
        else if (labsVec[i] == 0)
            nset.push_back(xs.row(i));
    }   // end for
}   // end splitIntoPositiveAndNegativeClasses
Exemplo n.º 29
0
	//=============================================================================
	// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
	cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst)
	{
		int n = src.rows;

		// First we mean normalise both src and dst
		float mean_src_x = cv::mean(src.col(0))[0];
		float mean_src_y = cv::mean(src.col(1))[0];

		float mean_dst_x = cv::mean(dst.col(0))[0];
		float mean_dst_y = cv::mean(dst.col(1))[0];

		cv::Mat_<float> src_mean_normed = src.clone();
		src_mean_normed.col(0) = src_mean_normed.col(0) - mean_src_x;
		src_mean_normed.col(1) = src_mean_normed.col(1) - mean_src_y;

		cv::Mat_<float> dst_mean_normed = dst.clone();
		dst_mean_normed.col(0) = dst_mean_normed.col(0) - mean_dst_x;
		dst_mean_normed.col(1) = dst_mean_normed.col(1) - mean_dst_y;

		// Find the scaling factor of each
		cv::Mat src_sq;
		cv::pow(src_mean_normed, 2, src_sq);

		cv::Mat dst_sq;
		cv::pow(dst_mean_normed, 2, dst_sq);

		float s_src = sqrt(cv::sum(src_sq)[0] / n);
		float s_dst = sqrt(cv::sum(dst_sq)[0] / n);

		src_mean_normed = src_mean_normed / s_src;
		dst_mean_normed = dst_mean_normed / s_dst;

		float s = s_dst / s_src;

		// Get the rotation
		cv::Matx22f R = AlignShapesKabsch2D(src_mean_normed, dst_mean_normed);

		cv::Matx22f	A;
		cv::Mat(s * R).copyTo(A);

		cv::Mat_<float> aligned = (cv::Mat(cv::Mat(A) * src.t())).t();
		cv::Mat_<float> offset = dst - aligned;

		float t_x = cv::mean(offset.col(0))[0];
		float t_y = cv::mean(offset.col(1))[0];

		return A;

	}
Exemplo n.º 30
0
//===========================================================================
// Clamping the parameter values to be within 3 standard deviations
void PDM::Clamp(cv::Mat_<float>& local_params, cv::Vec6d& params_global, const FaceModelParameters& parameters)
{
	double n_sigmas = 3;
	cv::MatConstIterator_<double> e_it  = this->eigen_values.begin();
	cv::MatIterator_<float> p_it =  local_params.begin();

	double v;

	// go over all parameters
	for(; p_it != local_params.end(); ++p_it, ++e_it)
	{
		// Work out the maximum value
		v = n_sigmas*sqrt(*e_it);

		// if the values is too extreme clamp it
		if(fabs(*p_it) > v)
		{
			// Dealing with positive and negative cases
			if(*p_it > 0.0)
			{
				*p_it=v;
			}
			else
			{
				*p_it=-v;
			}
		}
	}
	
	// do not let the pose get out of hand
	if(parameters.limit_pose)
	{
		if(params_global[1] > M_PI / 2)
			params_global[1] = M_PI/2;
		if(params_global[1] < -M_PI / 2)
			params_global[1] = -M_PI/2;
		if(params_global[2] > M_PI / 2)
			params_global[2] = M_PI/2;
		if(params_global[2] < -M_PI / 2)
			params_global[2] = -M_PI/2;
		if(params_global[3] > M_PI / 2)
			params_global[3] = M_PI/2;
		if(params_global[3] < -M_PI / 2)
			params_global[3] = -M_PI/2;
	}
	

}