Ejemplo n.º 1
0
void RPCA::transform(const cv::Mat1f & descr,
                     cv::Mat1f & out) const
{
    if(descr.cols != mean.cols) {
        std::stringstream s;
        s << "Input and mean data missmatch." << std::endl;
        s << "Input: " << descr.size() << " Mean: " << mean.size() << std::endl;
        throw pl::DimensionalityReductionError(s.str(), currentMethod, currentLine);
    } else if(descr.cols != components.cols) {
        std::stringstream s;
        s << "Input and transformation data missmatch." << std::endl;
        s << "Input: " << descr.size() << " Transform: " << components.size() << std::endl;
        throw pl::DimensionalityReductionError(s.str(), currentMethod, currentLine);
    }
    // make data zero-mean
    cv::Mat1f X(descr.rows,descr.cols);
#ifdef USE_TBB
        tbb::parallel_for(int32_t(0), X.rows, [&](int32_t i) {
            X.row(i) = descr.row(i) - mean;
        }
        );
#else
    for( int32_t y = 0; y < X.rows; y++){
        X.row(y) = descr.row(y) - mean;
    }
#endif
    // the actual transformation
    X = X * components.t();

    // whiten the data
    if ( whiten ) {
        cv::Mat1f var_reg = variance.clone();
        var_reg = var_reg.reshape(1, 1);
        if ( reg > 0.0 )
            var_reg += reg;
        cv::sqrt(var_reg, var_reg);
#ifdef USE_TBB
        tbb::parallel_for(int32_t(0), X.rows, [&](int32_t i) {
            cv::Mat1f row = X.row(i);
            row /= var_reg;
        }
        );
#else
        for( int32_t i = 0; i < X.rows; i++ ) {
            cv::Mat1f row = X.row(i);
            row /= var_reg;
        }
#endif
    }
    out = X;
}
Ejemplo n.º 2
0
void ObjectDetector :: detect(const cv::Mat1f& distance_image,
                              cv::Mat1b& mask_image,
                              std::list<cv::Rect>& rects)
{
  if (mask_image.size() != distance_image.size())
    mask_image = cv::Mat1b(distance_image.size());

  for (int r = 0; r < distance_image.rows; ++r)
    for (int c = 0; c < distance_image.cols; ++c)
    {
      if (distance_image(r,c) >= m_min_threshold && distance_image(r,c) <= m_max_threshold)
        mask_image(r,c) = 255;
      else
        mask_image(r,c) = 0;
    }

  cv::morphologyEx(mask_image, mask_image,
                   cv::MORPH_OPEN,
                   getStructuringElement(cv::MORPH_RECT,
                                         cv::Size(3,3)));
  cv::morphologyEx(mask_image, mask_image,
                   cv::MORPH_CLOSE,
                   getStructuringElement(cv::MORPH_RECT,
                                         cv::Size(3,3)));

  std::vector< std::vector<cv::Point> > contours;
  cv::Mat1b contour_image = mask_image.clone();
  cv::findContours(contour_image, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
  for (int i = 0; i < contours.size(); ++i)
  {
    cv::Rect rect = cv::boundingRect(cv::Mat(contours[i]));
    if (rect.area() > 300)
      rects.push_back(rect);
  }
}
Ejemplo n.º 3
0
 void apply_mask(cv::Mat1f& im, const cv::Mat1b& mask)
 {
   if (!mask.data)
     return;
   ntk_assert(im.size() == mask.size(), "Wrong mask size");
   for_all_rc(im)
     if (mask(r,c) == 0)
       im(r,c) = 0.f;
 }
Ejemplo n.º 4
0
/**
 * @brief	Computes a Colorized Image from a Depth Image within a given range.
 */
void NIKinect::compute_color_encoded_depth(const cv::Mat1f& depth_im, cv::Mat& color_depth_im,
                                     double* i_min_val, double* i_max_val){
	double min_val, max_val;
	if (i_min_val && i_max_val)
	{
		min_val = *i_min_val;
		max_val = *i_max_val;
	}
	else
	{
		minMaxLoc(depth_im, &min_val, &max_val);
	}

	color_depth_im.create(depth_im.size(),CV_8UC3);
	for (int r = 0; r < depth_im.rows; ++r)
	{
		const float* depth_data = depth_im.ptr<float>(r);
		cv::Vec3b* depth_color_data = color_depth_im.ptr<cv::Vec3b>(r);
		for (int c = 0; c < depth_im.cols; ++c)
		{
			int v = 255*6*(depth_data[c]-min_val)/(max_val-min_val);
			if (v < 0) v = 0;
			char r,g,b;
			int lb = v & 0xff;
			switch (v / 256) {
			case 0:
				r = 255;	g = 255-lb;	b = 255-lb;
				break;
			case 1:
				r = 255;	g = lb;		b = 0;
				break;
			case 2:
				r = 255-lb;	g = 255;	b = 0;
				break;
			case 3:
				r = 0;		g = 255;	b = lb;
				break;
			case 4:
				r = 0;		g = 255-lb;	b = 255;
				break;
			case 5:
				r = 0;		g = 0;		b = 255-lb;
				break;
			default:
				r = 0;		g = 0;		b = 0;
				break;
			}
			if (v == 0){
				r = g = b = 0;
			}
			depth_color_data[c] = cv::Vec3b(b,g,r);
		}
	}
}
Ejemplo n.º 5
0
// ccw rotation (size of return mat is greater than size of parameter img)
void LPIPDetector::getDerivativesAfterRotation(cv::Mat1f &img, double degree, Derivatives &d)
{			
	Mat1f rotated;
	Mat rotationMatrix = getRotationMatrix2D(Point2f(img.cols/2.f, img.rows/2.f), degree, 1);
	warpAffine(img, rotated, rotationMatrix, img.size(), INTER_LINEAR);

	Mat1f partialDerivation;
	rotated = rotated(Range(1, img.rows-1), Range(1, img.cols-1));

	Sobel(rotated, partialDerivation, CV_32F, 1, 0, m_sobelSize);
	d.Rx = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 0, 1, m_sobelSize);
	d.Ry = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 2, 0, m_sobelSize);
	d.Rxx = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 0, 2, m_sobelSize);
	d.Ryy = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 1, 1, m_sobelSize);
	d.Rxy = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
}