コード例 #1
0
ファイル: rpca.cpp プロジェクト: s1hofmann/PipeR
void RPCA::fit(const cv::Mat1f & descr)
{
    // make data zero mean
    cv::reduce(descr, mean, 0, CV_REDUCE_AVG);
    cv::Mat1f X(descr.rows, descr.cols);

#ifdef USE_TBB
    tbb::parallel_for(int32_t(0), X.rows, [&](int32_t y) {
        X.row(y) = descr.row(y) - mean;
    }
    );
#else
    for( int32_t y = 0; y < X.rows; y++){
        X.row(y) = descr.row(y) - mean;
    }
#endif

    cv::Mat1f U, S, V;
    cv::SVD::compute(X, S, U, V);
    cv::pow(S, 2, variance);
    variance /= X.rows;
    if (n_components <= 0) {
        n_components = X.cols;
    }

    components = V.rowRange(0, n_components);
    if(variance.rows < n_components) {
        whiten = false;
    } else {
        variance = variance.rowRange(0, n_components);
    }

    this->dataLoaded = true;
}
コード例 #2
0
ファイル: rpca.cpp プロジェクト: s1hofmann/PipeR
void RPCA::transform(const cv::Mat1f & descr,
                     cv::Mat1f & out) const
{
    if(descr.cols != mean.cols) {
        std::stringstream s;
        s << "Input and mean data missmatch." << std::endl;
        s << "Input: " << descr.size() << " Mean: " << mean.size() << std::endl;
        throw pl::DimensionalityReductionError(s.str(), currentMethod, currentLine);
    } else if(descr.cols != components.cols) {
        std::stringstream s;
        s << "Input and transformation data missmatch." << std::endl;
        s << "Input: " << descr.size() << " Transform: " << components.size() << std::endl;
        throw pl::DimensionalityReductionError(s.str(), currentMethod, currentLine);
    }
    // make data zero-mean
    cv::Mat1f X(descr.rows,descr.cols);
#ifdef USE_TBB
        tbb::parallel_for(int32_t(0), X.rows, [&](int32_t i) {
            X.row(i) = descr.row(i) - mean;
        }
        );
#else
    for( int32_t y = 0; y < X.rows; y++){
        X.row(y) = descr.row(y) - mean;
    }
#endif
    // the actual transformation
    X = X * components.t();

    // whiten the data
    if ( whiten ) {
        cv::Mat1f var_reg = variance.clone();
        var_reg = var_reg.reshape(1, 1);
        if ( reg > 0.0 )
            var_reg += reg;
        cv::sqrt(var_reg, var_reg);
#ifdef USE_TBB
        tbb::parallel_for(int32_t(0), X.rows, [&](int32_t i) {
            cv::Mat1f row = X.row(i);
            row /= var_reg;
        }
        );
#else
        for( int32_t i = 0; i < X.rows; i++ ) {
            cv::Mat1f row = X.row(i);
            row /= var_reg;
        }
#endif
    }
    out = X;
}
コード例 #3
0
void ObjectDetector :: detect(const cv::Mat1f& distance_image,
                              cv::Mat1b& mask_image,
                              std::list<cv::Rect>& rects)
{
  if (mask_image.size() != distance_image.size())
    mask_image = cv::Mat1b(distance_image.size());

  for (int r = 0; r < distance_image.rows; ++r)
    for (int c = 0; c < distance_image.cols; ++c)
    {
      if (distance_image(r,c) >= m_min_threshold && distance_image(r,c) <= m_max_threshold)
        mask_image(r,c) = 255;
      else
        mask_image(r,c) = 0;
    }

  cv::morphologyEx(mask_image, mask_image,
                   cv::MORPH_OPEN,
                   getStructuringElement(cv::MORPH_RECT,
                                         cv::Size(3,3)));
  cv::morphologyEx(mask_image, mask_image,
                   cv::MORPH_CLOSE,
                   getStructuringElement(cv::MORPH_RECT,
                                         cv::Size(3,3)));

  std::vector< std::vector<cv::Point> > contours;
  cv::Mat1b contour_image = mask_image.clone();
  cv::findContours(contour_image, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
  for (int i = 0; i < contours.size(); ++i)
  {
    cv::Rect rect = cv::boundingRect(cv::Mat(contours[i]));
    if (rect.area() > 300)
      rects.push_back(rect);
  }
}
コード例 #4
0
ファイル: opencv_utils.cpp プロジェクト: gpodevijn/nestk
 void apply_mask(cv::Mat1f& im, const cv::Mat1b& mask)
 {
   if (!mask.data)
     return;
   ntk_assert(im.size() == mask.size(), "Wrong mask size");
   for_all_rc(im)
     if (mask(r,c) == 0)
       im(r,c) = 0.f;
 }
コード例 #5
0
ファイル: NIKinect.cpp プロジェクト: mariojgpinto/NIKinect
/**
 * @brief	Computes a Colorized Image from a Depth Image within a given range.
 */
void NIKinect::compute_color_encoded_depth(const cv::Mat1f& depth_im, cv::Mat& color_depth_im,
                                     double* i_min_val, double* i_max_val){
	double min_val, max_val;
	if (i_min_val && i_max_val)
	{
		min_val = *i_min_val;
		max_val = *i_max_val;
	}
	else
	{
		minMaxLoc(depth_im, &min_val, &max_val);
	}

	color_depth_im.create(depth_im.size(),CV_8UC3);
	for (int r = 0; r < depth_im.rows; ++r)
	{
		const float* depth_data = depth_im.ptr<float>(r);
		cv::Vec3b* depth_color_data = color_depth_im.ptr<cv::Vec3b>(r);
		for (int c = 0; c < depth_im.cols; ++c)
		{
			int v = 255*6*(depth_data[c]-min_val)/(max_val-min_val);
			if (v < 0) v = 0;
			char r,g,b;
			int lb = v & 0xff;
			switch (v / 256) {
			case 0:
				r = 255;	g = 255-lb;	b = 255-lb;
				break;
			case 1:
				r = 255;	g = lb;		b = 0;
				break;
			case 2:
				r = 255-lb;	g = 255;	b = 0;
				break;
			case 3:
				r = 0;		g = 255;	b = lb;
				break;
			case 4:
				r = 0;		g = 255-lb;	b = 255;
				break;
			case 5:
				r = 0;		g = 0;		b = 255-lb;
				break;
			default:
				r = 0;		g = 0;		b = 0;
				break;
			}
			if (v == 0){
				r = g = b = 0;
			}
			depth_color_data[c] = cv::Vec3b(b,g,r);
		}
	}
}
コード例 #6
0
ImageSaliencyDetector::ImageSaliencyDetector(const cv::Mat1f& src) {
	if (src.empty()) {
		throw std::invalid_argument("ImageSaliencyDetector: Source image cannot be empty!");
	}

	setSourceImage(src);

	mDensityEstimates.resize(mSrcImage.rows);
	for (int i = 0; i < mSrcImage.rows; ++i) {
		mDensityEstimates[i].resize(mSrcImage.cols);
	}
}
コード例 #7
0
ファイル: lpip_detector.cpp プロジェクト: anygo/uniprojectsdn
// ccw rotation (size of return mat is greater than size of parameter img)
void LPIPDetector::getDerivativesAfterRotation(cv::Mat1f &img, double degree, Derivatives &d)
{			
	Mat1f rotated;
	Mat rotationMatrix = getRotationMatrix2D(Point2f(img.cols/2.f, img.rows/2.f), degree, 1);
	warpAffine(img, rotated, rotationMatrix, img.size(), INTER_LINEAR);

	Mat1f partialDerivation;
	rotated = rotated(Range(1, img.rows-1), Range(1, img.cols-1));

	Sobel(rotated, partialDerivation, CV_32F, 1, 0, m_sobelSize);
	d.Rx = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 0, 1, m_sobelSize);
	d.Ry = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 2, 0, m_sobelSize);
	d.Rxx = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 0, 2, m_sobelSize);
	d.Ryy = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
	Sobel(rotated, partialDerivation, CV_32F, 1, 1, m_sobelSize);
	d.Rxy = partialDerivation[partialDerivation.rows/2][partialDerivation.cols/2];
}
コード例 #8
0
ファイル: main.cpp プロジェクト: psk239/Kinect-Mouse
std::vector<cv::Point2i> detectFingertips(cv::Mat1f z, float zMin = 0.0f, float zMax = 0.75f, cv::Mat1f& debugFrame = cv::Mat1f()) {
	using namespace cv;
	using namespace std;
	bool debug = !debugFrame.empty();

	vector<Point2i> fingerTips;

	Mat handMask = z < zMax & z > zMin;

	std::vector<std::vector<cv::Point> > contours;
	findContours(handMask.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); // we are cloning here since method will destruct the image

	if (contours.size()) {
		for (int i=0; i<contours.size(); i++) {
			vector<Point> contour = contours[i];
			Mat contourMat = Mat(contour);
			double area = cv::contourArea(contourMat);

			if (area > 3000)  { // possible hand
				Scalar center = mean(contourMat);
				Point centerPoint = Point(center.val[0], center.val[1]);

				vector<Point> approxCurve;
				cv::approxPolyDP(contourMat, approxCurve, 20, true);

				vector<int> hull;
				cv::convexHull(Mat(approxCurve), hull);

				// find upper and lower bounds of the hand and define cutoff threshold (don't consider lower vertices as fingers)
				int upper = 640, lower = 0;
				for (int j=0; j<hull.size(); j++) {
					int idx = hull[j]; // corner index
					if (approxCurve[idx].y < upper) upper = approxCurve[idx].y;
					if (approxCurve[idx].y > lower) lower = approxCurve[idx].y;
				}
				float cutoff = lower - (lower - upper) * 0.1f;

				// find interior angles of hull corners
				for (int j=0; j<hull.size(); j++) {
					int idx = hull[j]; // corner index
					int pdx = idx == 0 ? approxCurve.size() - 1 : idx - 1; //  predecessor of idx
					int sdx = idx == approxCurve.size() - 1 ? 0 : idx + 1; // successor of idx

					Point v1 = approxCurve[sdx] - approxCurve[idx];
					Point v2 = approxCurve[pdx] - approxCurve[idx];

					float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) );

					// low interior angle + within upper 90% of region -> we got a finger
					if (angle < 1 && approxCurve[idx].y < cutoff) {
						int u = approxCurve[idx].x;
						int v = approxCurve[idx].y;

						fingerTips.push_back(Point2i(u,v));
						
						if (debug) {
							cv::circle(debugFrame, approxCurve[idx], 10, Scalar(1), -1);
						}
					}
				}

				if (debug) {
					// draw cutoff threshold
					cv::line(debugFrame, Point(center.val[0]-100, cutoff), Point(center.val[0]+100, cutoff), Scalar(1.0f));

					// draw approxCurve
					for (int j=0; j<approxCurve.size(); j++) {
						cv::circle(debugFrame, approxCurve[j], 10, Scalar(1.0f));
						if (j != 0) {
							cv::line(debugFrame, approxCurve[j], approxCurve[j-1], Scalar(1.0f));
						} else {
							cv::line(debugFrame, approxCurve[0], approxCurve[approxCurve.size()-1], Scalar(1.0f));
						}
					}

					// draw approxCurve hull
					for (int j=0; j<hull.size(); j++) {
						cv::circle(debugFrame, approxCurve[hull[j]], 10, Scalar(1.0f), 3);
						if(j == 0) {
							cv::line(debugFrame, approxCurve[hull[j]], approxCurve[hull[hull.size()-1]], Scalar(1.0f));
						} else {
							cv::line(debugFrame, approxCurve[hull[j]], approxCurve[hull[j-1]], Scalar(1.0f));
						}
					}
				}
			}
		}
	}

	return fingerTips;
}