inline void
 cvToCloud(const cv::Mat_<cv::Point3f>& points3d, pcl::PointCloud<PointT>& cloud, const cv::Mat& mask = cv::Mat())
 {
   cloud.clear();
   cloud.width = points3d.size().width;
   cloud.height = points3d.size().height;
   cv::Mat_<cv::Point3f>::const_iterator point_it = points3d.begin(), point_end = points3d.end();
   const bool has_mask = !mask.empty();
   cv::Mat_<uchar>::const_iterator mask_it;
   if (has_mask)
     mask_it = mask.begin<uchar>();
   for (; point_it != point_end; ++point_it, (has_mask ? ++mask_it : mask_it))
   {
     if (has_mask && !*mask_it)
       continue;
     cv::Point3f p = *point_it;
     if (p.x != p.x && p.y != p.y && p.z != p.z) //throw out NANs
       continue;
     PointT cp;
     cp.x = p.x;
     cp.y = p.y;
     cp.z = p.z;
     cloud.push_back(cp);
   }
 }
Esempio n. 2
0
void writeImageData( ostream& os, const cv::Mat_<cv::Vec3b>& cimg, const cv::Mat_<cv::Vec3f>& points)
{
    const cv::Size imgSz = cimg.size();
    assert( imgSz == points.size());

    os << imgSz.height << " " << imgSz.width << endl;
    for ( int i = 0; i < imgSz.height; ++i)
    {
        const cv::Vec3f* pptr = points.ptr<cv::Vec3f>(i);
        const cv::Vec3b* cptr = cimg.ptr<cv::Vec3b>(i);
        for ( int j = 0; j < imgSz.width; ++j)
        {
            const cv::Vec3f& p = pptr[j];
            const cv::Vec3b& c = cptr[j];

            // Write the x,y,z
            os.write( (char*)&p[0], sizeof(float));
            os.write( (char*)&p[1], sizeof(float));
            os.write( (char*)&p[2], sizeof(float));

            // Write the colour
            os.write( (char*)&c[0], sizeof(byte));
            os.write( (char*)&c[1], sizeof(byte));
            os.write( (char*)&c[2], sizeof(byte));
        }   // end for - columns
    }   // end for - rows

    os << endl;
}   // end writeImageData
Esempio n. 3
0
cv::Mat_<double> subtractPlane(cv::Mat_<double> phase, cv::Mat_<bool> mask){
    cv::Mat_<double> coeff(3,1);
    cv::Mat_<double> X(phase.rows * phase.cols,3);
    cv::Mat_<double> Z(phase.rows * phase.cols,1);
    int ndx = 0;
    for (int y = 0; y < phase.rows; ++y){
        for (int x = 0; x < phase.cols; ++x){
            if (mask(y,x)){
                Z(ndx) =  phase(y,x);
                X(ndx,0) = x;
                X(ndx,1) = y;
                X(ndx++,2) = 1.;
            }
        }
    }
    cv::solve(X,Z,coeff,CV_SVD);
    // plane generation, Z = Ax + By + C
    // distance calculation d = Ax + By - z + C / sqrt(A^2 + B^2 + C^2)
qDebug() << "plane coeffs" << coeff(0) << coeff(1) << coeff(2);

    cv::Mat_<double> newPhase(phase.size());
    for (int y = 0; y < phase.rows; ++y){
        for (int x = 0; x  < phase.cols; ++x){
            int b = (int)mask(y,x);
            if (b == 0 ){
                continue;
            }

            double val = x * coeff(0) + y * coeff(1) + coeff(2) - phase(y,x);
            double z = val/sqrt(coeff(0) * coeff(0) + coeff(1) * coeff(1) + 1);
            newPhase(y,x) = z;
        }
    }
    return newPhase;
}
cv::Mat ComputeBrightnessTensor(const cv::Mat_<double> &i1, const cv::Mat_<double> &i2, double hx, double hy){

  // compute derivatives
  cv::Mat x, y, t, kernel, middle;
  t = i2 - i1;
  middle = 0.5 * i1 + 0.5 * i2;

  kernel = (cv::Mat_<double>(1,5) << 1, -8, 0, 8, -1);
  cv::filter2D(middle, x, -1, kernel * 1.0/(12*hx), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);

  kernel = (cv::Mat_<double>(5,1) << 1, -8, 0, 8, -1);
  cv::filter2D(middle, y, -1, kernel * 1.0/(12*hy), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);

  // compute tensor
  // channel 0=J11, 1=J22, 2=J33, 3=J12, 4=J13, 5=J23
  cv::Mat_<cv::Vec6d> b(i1.size());
  for (int i = 0; i < i1.rows; i++){
    for (int j = 0; j < i1.cols; j++){
      b(i,j)[0] = x.at<double>(i,j) * x.at<double>(i,j);
      b(i,j)[1] = y.at<double>(i,j) * y.at<double>(i,j);
      b(i,j)[2] = t.at<double>(i,j) * t.at<double>(i,j);
      b(i,j)[3] = x.at<double>(i,j) * y.at<double>(i,j);
      b(i,j)[4] = x.at<double>(i,j) * t.at<double>(i,j);
      b(i,j)[5] = y.at<double>(i,j) * t.at<double>(i,j);

    }
  }

  return b;
}
Esempio n. 5
0
// erode foreground and background regions to increase the size of unknown region
static void erodeFB(cv::Mat_<uchar> &trimap, int r)
{
    int w = trimap.cols;
    int h = trimap.rows;

    cv::Mat_<uchar> foreground(trimap.size(), (uchar)0);
    cv::Mat_<uchar> background(trimap.size(), (uchar)0);

    for (int y = 0; y < h; ++y)
        for (int x = 0; x < w; ++x)
        {
            if (trimap(y, x) == 0)
                background(y, x) = 1;
            else if (trimap(y, x) == 255)
                foreground(y, x) = 1;
        }


    cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(r, r));

    cv::erode(background, background, kernel);
    cv::erode(foreground, foreground, kernel);

    for (int y = 0; y < h; ++y)
        for (int x = 0; x < w; ++x)
        {
            if (background(y, x) == 0 && foreground(y, x) == 0)
                trimap(y, x) = 128;
        }
}
cv::Mat test_with_args(const cv::Mat_<float>& in, const int& var1 = 1,
const double& var2 = 10.0, const std::string& name=std::string("test_name")) {
    std::cerr << "in: " << in << std::endl;
    std::cerr << "sz: " << in.size() << std::endl;
    std::cerr << "Returning transpose" << std::endl;
    return in.t();
}
WTLF::Cluster::Cluster(const cv::Mat_<float> &img, cv::Point start)
{
	// actually find the cluster
	WTLF::Cluster::FloodFillScratchpad scratch;
	scratch.img = img;
	scratch.marking.create(img.size());
	scratch.marking.setTo(0);
	maxDepth = 0;
	deepest = start;
	offset = start;
	scratch.br = start;
#ifdef FLOOD_FILL_DFS
	scratch.depth = 0;
#endif

	// need to use BFS due to stack overflow!
	// note that the deepest is just the last point visited!
	scratch.q.push(start);
	while (!scratch.q.empty())
	{
		floodFill(scratch.q.front(), &scratch);
		scratch.q.pop();
	}

	mat = scratch.marking(
		Rect(offset, scratch.br));
	deepest -= offset;
}
cv::Mat_<cv::Vec3b> getWrongColorSegmentationImage(cv::Mat_<int>& labels, int labelcount)
{
	std::vector<cv::Vec3b> colors;
	colors.reserve(labelcount);

	std::srand(0);

	for(int i = 0; i < labelcount; ++i)
	{
		cv::Vec3b ccolor;
		ccolor[0] = std::rand() % 256;
		ccolor[1] = std::rand() % 256;
		ccolor[2] = std::rand() % 256;
		colors.push_back(ccolor);
	}

	cv::Mat result(labels.size(), CV_8UC3);

	cv::Vec3b *dst_ptr = result.ptr<cv::Vec3b>(0);
	int *src_ptr = labels[0];

	for(std::size_t i = 0; i < labels.total(); ++i)
	{
		int label = *src_ptr++;
		assert(label < (int)colors.size());
		*dst_ptr++ = colors[label];
	}
	return result;
}
Esempio n. 9
0
EDTFeature::EDTFeature( const cv::Mat_<byte> img, const cv::Size fvDims)
    : RFeatures::FeatureOperator( img.size(), fvDims)
{
    const cv::Mat_<int> sedt = RFeatures::DistanceTransform::calcdt( img);   // Distance map to edges
    cv::Mat fsedt;  // convert to 64 bit float for sqrt
    sedt.convertTo( fsedt, CV_64F);
    cv::sqrt( fsedt, _dtimg);
    cv::integral( _dtimg, _iimg, CV_64F);
}   // end ctor
OvershootClusterer::OvershootClusterer(const cv::Mat_<float> &img)
    : Clusterer(img), smallestOvershootVisit(img.size(), CLUSTERER_OVERSHOOTS),
      currRow(0), currCol(0), debugWindowName(NULL), debugX(-1), debugY(-1),
      temps(CLUSTERER_OVERSHOOTS)
{
    for (int i = 0; i < CLUSTERER_OVERSHOOTS; ++i)
    {
        //temps[i].marking.create(img.size());
        temps[i].emitsClusters = (i == 0 || !(i&(i-1)));
        // only emit clusters if 0,1,2,4,8,...
    }
}
Esempio n. 11
0
static int openWindow(const cv::Mat_<cv::Vec3b>& image)
{
	char *argv[1] = { 0 };
	int argc = 0;
	QApplication app(argc, argv);
	QWidget *window = new QWidget;
	QVBoxLayout *layout = new QVBoxLayout;

	// adapted from http://www.qtcentre.org/threads/11655-OpenCV-integration
	cv::Mat rgb = image;
	std::vector<cv::Mat> channels;
	cv::split(rgb, channels);  // split the image into r, g, b channels
	cv::Mat alpha = cv::Mat_<uchar>(image.size());
	alpha.setTo(cv::Scalar(255));
	channels.push_back(alpha);
	cv::Mat rgba = cv::Mat_<cv::Vec4b>(image.size());
	cv::merge(channels, rgba);  // add an alpha (so the image is r, g, b, a
	IplImage iplImage = (IplImage) rgba;  // get the raw bytes
	const unsigned char *imageData = (unsigned char *)(iplImage.imageData);
	QImage qimage(imageData, iplImage.width, iplImage.height, iplImage.widthStep,
		QImage::Format_RGB32); // and convert to a QImage

	QLabel *imageFrame = new QLabel;
	imageFrame->setPixmap(QPixmap::fromImage(qimage, 0));

	QPushButton *quit = new QPushButton(QString("Quit"));
    quit->setGeometry(62, 40, 75, 30);
    quit->setFont(QFont("Times", 18, QFont::Bold));
	window->connect(quit, SIGNAL(clicked()), &app, SLOT(quit()));

	layout->addWidget(new QLabel("top"));
	layout->addWidget(imageFrame);
	layout->addWidget(new QLabel("bottom"));
	layout->addWidget(quit);

	window->setLayout(layout);
	window->setWindowTitle(QString("Qt Image Display"));
	window->show();
	return app.exec();
}
Esempio n. 12
0
void ColorEdge::detectColorEdge(const cv::Mat_<cv::Vec3b> &image, cv::Mat_<uchar> &edge)
{
    cv::Mat_<double> edge_map(image.size());
    const int filter_half = static_cast<int>(filter_size_ / 2);

    for(int y = filter_half; y < (edge_map.rows - filter_half); ++y)
    {
        for(int x = filter_half; x < (edge_map.cols - filter_half); ++x)
        {
            cv::Mat_<cv::Vec3b> roi(image, cv::Rect(x - filter_half, y - filter_half, filter_size_, filter_size_));
            edge_map(y, x) = calculateMVD(roi);
        }
    }
    
    edge_map.convertTo(edge, edge.type());
}
Esempio n. 13
0
cv::Mat_<cv::Vec3f> Utility::getChromacityImage(cv::Mat_<cv::Vec3f>& rgbImage) {
	///normalize r,g,b channels
	cv::Size imgSize=rgbImage.size();
	cv::Mat_<float> normImage(imgSize);
	normImage.setTo(cv::Scalar(0, 0, 0));
	cv::Mat_<cv::Vec3f> chromacityImage=rgbImage.clone();
	std::vector<cv::Mat_<float> > rgbPlanes;
	cv::split(rgbImage, rgbPlanes);
	cv::Mat_<float> singlePlane=normImage.clone();
	for (int i=0; i<3; i++) {
		cv::pow(rgbPlanes[i], 2.0, singlePlane);
		cv::add(normImage, singlePlane, normImage);
	}
	cv::sqrt(normImage, normImage);
	for (int i=0; i<3; i++) {
		cv::divide(rgbPlanes[i], normImage, rgbPlanes[i]);
	}
	cv::merge(&rgbPlanes[0], 3, chromacityImage);
	return chromacityImage;
}
Esempio n. 14
0
void generate_gaussian_filter_ocv(cv::Mat_<T>& image, double sigma)
{
    const int w = image.size().width;
    const int h = image.size().height;
    const int wh = w / 2;
    const int hh = h / 2;

    const double s = 1.0 / (sigma*sigma);

    for (int y = -hh; y < hh; y++)
    {
        size_t yy = (y + h) % h;
        for (int x = -wh; x < wh; x++)
        {
            size_t xx = (x + w) % w;
            double fx = x;
            double fy = y;
            image(yy, xx) = std::exp(-(fx*fx + fy*fy) * s);
        }
    }
}
Esempio n. 15
0
cv::Mat ComputeGradientTensor(const cv::Mat_<double> &i1, const cv::Mat_<double> &i2, double hx, double hy){
  // for now give hx and hy as parameters, maybe later calculate them with ROI
  cv::Mat middle, kernel, t, x, y, xx, yy, xy, yx, xt, yt;

  middle = 0.5 * i1 + 0.5 * i2;
  t = i2 - i1;

  kernel = (cv::Mat_<double>(1,5) << 1, -8, 0, 8, -1);
  cv::filter2D(middle, x, CV_64F, kernel * 1.0/(12*hx), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);
  cv::filter2D(x, xx, CV_64F, kernel * 1.0/(12*hx), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);
  cv::filter2D(t, xt, CV_64F, kernel * 1.0/(12*hx), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);

  kernel = (cv::Mat_<double>(5,1) << 1, -8, 0, 8, -1);
  cv::filter2D(middle, y, CV_64F, kernel * 1.0/(12*hy), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);
  cv::filter2D(y, yy, CV_64F, kernel * 1.0/(12*hy), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);
  cv::filter2D(x, xy, CV_64F, kernel * 1.0/(12*hy), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);
  cv::filter2D(t, yt, CV_64F, kernel * 1.0/(12*hy), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);
  
  kernel = (cv::Mat_<double>(1,5) << 1, -8, 0, 8, -1);
  cv::filter2D(y, yx, CV_64F, kernel * 1.0/(12*hy), cv::Point(-1,-1), 0, cv::BORDER_REFLECT_101);

  xy = 0.5 * xy + 0.5 * yx;

  // channel 0=J11, 1=J22, 2=J33, 3=J12, 4=J13, 5=J23
  cv::Mat_<cv::Vec6d> b(i1.size());
  for (int i = 0; i < b.rows; i++){
    for (int j = 0; j < b.cols; j++){
      b(i,j)[0] = xx.at<double>(i,j) * xx.at<double>(i,j) + xy.at<double>(i,j) * xy.at<double>(i,j);
      b(i,j)[1] = xy.at<double>(i,j) * xy.at<double>(i,j) + yy.at<double>(i,j) * yy.at<double>(i,j);
      b(i,j)[2] = xt.at<double>(i,j) * xt.at<double>(i,j) + yt.at<double>(i,j) * yt.at<double>(i,j);
      b(i,j)[3] = xx.at<double>(i,j) * xy.at<double>(i,j) + xy.at<double>(i,j) * yy.at<double>(i,j);
      b(i,j)[4] = xx.at<double>(i,j) * xt.at<double>(i,j) + xy.at<double>(i,j) * yt.at<double>(i,j);
      b(i,j)[5] = xy.at<double>(i,j) * xt.at<double>(i,j) + yy.at<double>(i,j) * yt.at<double>(i,j);
    }
  }

  return b;
}
Esempio n. 16
0
void generate_gabor_filter(cv::Mat_<T>& image, double peakFreq, double theta, double sigma_x, double sigma_y)
{
    const size_t w = image.size().width;
    const size_t h = image.size().height;
    const double step_u = 1.0 / static_cast<double>(w);
    const double step_v = 1.0 / static_cast<double>(h);
    const double cos_theta = std::cos(theta);
    const double sin_theta = std::sin(theta);

    const double sigmaXSquared = sigma_x*sigma_x;
    const double sigmaYSquared = sigma_y*sigma_y;

    image.setTo(cv::Scalar(0));

    for (int ny = -1; ny <= 1; ny++)
        for (int nx = -1; nx <= 1; nx++)
        {
            double v = ny;
            for (size_t y = 0; y < h; y++)
            {
                double u = nx;
                for (size_t x = 0; x < w; x++)
                {
                    double ur = u * cos_theta - v * sin_theta;
                    double vr = u * sin_theta + v * cos_theta;

                    double tmp = ur-peakFreq;
                    double value = std::exp(-2*M_PI*M_PI * (tmp*tmp*sigmaXSquared + vr*vr*sigmaYSquared));
                    image(y, x) += value;

                    u += step_u;
                }
                v += step_v;
            }
        }
}
bool GroundPolyFitter<degree>::findLanes(const cv::Mat_<float> &img, Polynomial<degree>& retp)
{
	cv::Size size = img.size();
	if(nrows != size.width || ncols != size.height)
		return false;

	// use RANSAC to fit a polynomial to the ground points

	std::vector<Point2D> bestConsensusSet;
	double bestNormalizedSqError;
	Polynomial bestPolynomial;

	// temporary variables for passing data to fitting algorithms
	// nrows * ncols is the maximum number of points that could be fit
	double rowColX[nrows * ncols];
	double rowColY[nrows * ncols];

	CumulativeImageSampler sampler(img);
	std::vector<Point2D> inliers;

	for(int iterations = 0; iterations < RANSAC_NUM_ITERATIONS; iterations++) {

		// first, take an initial random sample to be the first inliers
		RowCol rcs[RANSAC_MIN_SAMPLE];
		sampler.multiSample(rcs, RANSAC_MIN_SAMPLE);
		for(int i = 0; i < RANSAC_MIN_SAMPLE; i++) {
			inliers.push_back(convertPixelToGround(rcs[i]));
		}

		// find a model for the inliers
		rowColVectorToArrays(inliers, rowColX, rowColY);
		Polynomial<degree> inlierPoly = Polynomial<degree>::fitRegressionPoly(
				rowColX, rowColY, RANSAC_MIN_SAMPLE);

		// check the model with unused points to see if those are inliers too
		std::vector<Point2D> remainingPoints;
		std::set_difference(allPixels.begin(), allPixels.end(),
							inliers.begin(), inliers.end(),
							remainingPoints.begin());
		
		std::vector<Point2D>::iterator it;
		for(it = remainingPoints.begin(); it < remainingPoints.end(); it++) {
			if(inlierPoly.sqDistance(it->getX(), it->getY()) < RANSAC_SQ_DISTANCE_THRESHOLD)
				inliers.push_back(*it);
		}

		// fit new model with larger inlier set and compare to previous best
		rowColVectorToArrays(inliers, rowColX, rowColY);
		Polynomial<degree> newInlierPoly = Polynomial<degree>::fitRegressionPoly(
				rowColX, rowColY, inliers.size());

		double sumSqError = 0.0;
		for(it = inliers.begin(); it < inliers.end(); it++) {
			sumSqError += newInlierPoly.sqDistance(it->getX(), it->getY());
		}
		double normalizedSqError = sumSqError / inliers.size();

		if(normalizedSqError < bestNormalizedSqError) {
			bestNormalizedSqError = normalizedSqError;
			bestConsensusSet = inliers;
			bestPolynomial = newInlierPoly;
		}

	}
}
Esempio n. 18
0
float singleeyefitter::cvx::histKmeans(const cv::Mat_<float>& hist, int bin_min, int bin_max, int K, float init_centres[], cv::Mat_<uchar>& labels, cv::TermCriteria termCriteria)
{
    using namespace math;

    CV_Assert( hist.rows == 1 || hist.cols == 1 && K > 0 );

    labels = cv::Mat_<uchar>::zeros(hist.size());
    int nbins = hist.total();
    float binWidth = (bin_max - bin_min)/nbins;
    float binStart = bin_min + binWidth/2;

    cv::Mat_<float> centres(K, 1, init_centres, 4);

    int iters = 0;
    bool finalRun = false;
    while (true)
    {
        ++iters;
        cv::Mat_<float> old_centres = centres.clone();

        int i_bin;
        cv::Mat_<float>::const_iterator i_hist;
        cv::Mat_<uchar>::iterator i_labels;
        cv::Mat_<float>::iterator i_centres;
        uchar label;

        float sumDist = 0;
        int movedCount = 0;

        // Step 1. Assign each element a label
        for (i_bin = 0, i_labels = labels.begin(), i_hist = hist.begin();
             i_bin < nbins;
             ++i_bin, ++i_labels, ++i_hist)
        {
            float bin_val = binStart + i_bin*binWidth;
            float minDist = sq(bin_val - centres(*i_labels));
            int curLabel = *i_labels;

            for (label = 0; label < K; ++label)
            {
                float dist = sq(bin_val - centres(label));
                if (dist < minDist)
                {
                    minDist = dist;
                    *i_labels = label;
                }
            }

            if (*i_labels != curLabel)
                movedCount++;

            sumDist += (*i_hist) * std::sqrt(minDist);
        }

        if (finalRun)
            return sumDist;

        // Step 2. Recalculate centres
        cv::Mat_<float> counts(K, 1, 0.0f);
        for (i_bin = 0, i_labels = labels.begin(), i_hist = hist.begin();
             i_bin < nbins;
             ++i_bin, ++i_labels, ++i_hist)
        {
            float bin_val = binStart + i_bin*binWidth;

            centres(*i_labels) += (*i_hist) * bin_val;
            counts(*i_labels) += *i_hist;
        }
        for (label = 0; label < K; ++label)
        {
            if (counts(label) == 0)
                return std::numeric_limits<float>::infinity();

            centres(label) /= counts(label);
        }

        // Step 3. Detect termination criteria
        if (movedCount == 0)
            finalRun = true;
        else if (termCriteria.type | cv::TermCriteria::COUNT && iters >= termCriteria.maxCount)
            finalRun = true;
        else if (termCriteria.type | cv::TermCriteria::EPS)
        {
            float max_movement = 0;
            for (label = 0; label < K; ++label)
            {
                max_movement = std::max(max_movement, sq(centres(label) - old_centres(label)));
            }
            if (sqrt(max_movement) < termCriteria.epsilon)
                finalRun = true;
        }
    }
    return std::numeric_limits<float>::infinity();
}
Esempio n. 19
0
View::View( cv::Mat_<cv::Vec3b> img, cv::Mat_<float> rng) : img2d(img), rngImg(rng)
{
    points.create(img.size());
}   // end ctor
cv::Mat_<cv::Vec2b> adaptive_window_size(const cv::Mat& image, const cv::Mat_<int>& labels, float threshold, int minsize, int maxsize, const std::vector<disparity_region>& regions, lambda_type func)
{
	assert(minsize > 0);
	assert(maxsize > 0);

	std::vector<short> disparities(regions.size());
	for(std::size_t i = 0; i < regions.size(); ++i)
		disparities[i] = regions[i].disparity;

	cv::Mat_<cv::Vec2b> result = cv::Mat::zeros(labels.size(), CV_8UC2);
	const int onesidesizeMin = (minsize-1)/2;
	const int onesidesizeMax = (maxsize-1)/2;
	#pragma omp parallel for default(none) shared(labels, image, result, disparities, threshold, func, minsize, maxsize)
	for(int y = onesidesizeMin; y < labels.rows - onesidesizeMin; ++y)
	{
		int lastWindowSize = onesidesizeMin*2+1;

		for(int x = onesidesizeMin; x < labels.cols - onesidesizeMin; ++x)
		{
			int clabel = labels(y,x);
			int maxposs = std::min( std::min(labels.cols - x-1, labels.rows - y-1), onesidesizeMax );
			maxposs = std::min( maxposs, std::min( std::min(y-1, x-1), onesidesizeMax ) );

			maxposs = maxposs*2+1;

			int windowsizeX = std::min(lastWindowSize, maxposs);
			int windowsizeY = std::min(lastWindowSize, maxposs);

			bool grow = (lastWindowSize == minsize) ? true : false;

			while(true)
			{
				float measured = func(subwindow(labels, x,y, windowsizeX, windowsizeY), clabel, disparities);
				if(grow)
				{
					if(measured < threshold)
					{
						//shrink each direction seperatly
						float measured_altY = func(subwindow(labels, x,y, windowsizeX, windowsizeY-2), clabel, disparities);
						float measured_altX = func(subwindow(labels, x,y, windowsizeX-2, windowsizeY), clabel, disparities);

						if(measured_altY > threshold)
							windowsizeY -= 2;
						else if(measured_altX > threshold)
							windowsizeX -= 2;
						else {
							windowsizeX -= 2;
							windowsizeY -= 2;
							break;
						}
					}
					if(windowsizeX < maxposs && windowsizeY < maxposs)
					{
						windowsizeX += 2;
						windowsizeY += 2;
					} else
						break;
				} else {
					if(measured >= threshold)
					{
						grow = true;
						continue;
					}
					if( windowsizeX > minsize && windowsizeY > minsize) {
						windowsizeX -= 2;
						windowsizeY -= 2;
					} else
						break;
				}
			}
			lastWindowSize = std::min(windowsizeX, windowsizeY);
			result(y,x) = cv::Vec2b(std::max(windowsizeY, minsize), std::max(windowsizeX, minsize));
		}
	}

	return result;
}
Esempio n. 21
0
void markersCallback(const markers_msgs::Markers& msg)
{
  // This callback only makes sense if we have more than one marker
  if( msg.num_markers < 1 )
   return;

  /// Before using the markers information, we need to update the estimated
  /// pose considering the amount of time that elapsed since the last update
  /// from odometry.
  /// We will use the linear and angular velocity of the robot for that.

  /// Step 1 - Update particles with robot movement:
  if( odom_first_update == false )
  {
    double dt = msg.header.stamp.toSec() - last_step1_time;
    double distance = odo_lin_vel*dt; // Distance travelled
    double dtheta = odo_ang_vel*dt; // Rotation performed
    ParticleFilterStep1(distance, dtheta);
    last_step1_time = msg.header.stamp.toSec();

    // Store updated odometry pose
    odo_robot_pose.x += distance*cos(odo_robot_pose.theta);
    odo_robot_pose.y += distance*sin(odo_robot_pose.theta);
    odo_robot_pose.theta += dtheta;

  }

  /// Step 2 - Update the particle weights given the sensor model and map
  /// knowledge
#ifdef STEP_UPDATE
  // For now we only perform this step if there was any marker detected.
  // We could use the expected and not detected beacons for each particle,
  // but it woul become too expensive.
  //
  // The weight for each particle will be:
  //   w(j) = mean(1/(1+sqrt((x_e(i)-x_r(i))^2+(y_e(i)-y_r(i))^2)*DISTANCE_ERROR_GAIN))
  // where x_e(i)/y_e(i) and x_r(i)/y_r(i) are the expected and obtained
  // x/y world coordinates of the detected marker respectively. The GAIN are
  // constant gains wich can be tuned to value smaller detection errors.
  //

  // Reset normalization factor
  double norm_factor = 0;
  for(int j = 0; j < NUM_PARTICLES; j++)
  {
    // Compute the weight for each particle
    // For each obtained beacon value
    particles_weight(j,0) = 0;
    for( uint n=0; n < msg.num_markers; n++ )
    {
      // Obtain beacon position in world coordinates
      geometry_msgs::Pose2D particle;
      particle.x = particles_x(j,0);
      particle.y = particles_y(j,0);
      particle.theta = particles_theta(j,0);

      point_2d marker_lpos = {msg.range[n]*cos(msg.bearing[n]),
                              msg.range[n]*sin(msg.bearing[n])};
      point_2d marker_wpos;
      local2World( particle, marker_lpos, &marker_wpos );

      particles_weight(j,0) +=
          1.0/(1+sqrt(pow(markers_wpos[msg.id[n]-1].x-marker_wpos.x,2)
                      +pow(markers_wpos[msg.id[n]-1].y-marker_wpos.y,2)*DISTANCE_ERROR_GAIN));
    }
    // Perform the mean. We summed all elements above and now divide by
    // the number of elements summed.
    particles_weight(j,0) /= msg.num_markers;
    // Update the normalization factor
    norm_factor += particles_weight(j,0);
  }

  // Normalize the weight
  max_weight = 0.0;
  for(int j = 0; j < particles_x.size().height; j++)
  {
    particles_weight(j,0) /= norm_factor;
    // Store the particle with the best weight has our posture estimate
    if( particles_weight(j,0) > max_weight )
    {
      posture_estimate.x = particles_x(j,0);
      posture_estimate.y = particles_y(j,0);
      posture_estimate.theta = particles_theta(j,0);
      // This max_factor is just used for debug, so that we have more
      // different colors between particles.
      max_weight = particles_weight(j,0);
    }

  }
#endif

  // Show debug information
  showDebugInformation();

  /// Step 3 - Resample
#ifdef STEP_RESAMPLE
  // The resampling step is the exact implementation of the algorithm
  // described in the theoretical classes, i.e., the "Importance resampling
  // algorithm".

  // Save the current values
  // The old particles will be needed in the resampling algorithm
  cv::Mat_<double> old_particles_weight(NUM_PARTICLES, 1, 1.0/NUM_PARTICLES);

  particles_x.copyTo(old_particles_x);
  particles_y.copyTo(old_particles_y);
  particles_theta.copyTo(old_particles_theta);
  particles_weight.copyTo(old_particles_weight);

  double delta = rng.uniform(0.0, 1.0/NUM_PARTICLES);
  double c = old_particles_weight(0,0);
  int i = 0;
  for(int j = 0; j < NUM_PARTICLES; j++)
  {
    double u = delta + j/(1.0*NUM_PARTICLES);
    while( u > c )
    {
      i++;
      c += old_particles_weight(i,0);
    }
    particles_x(j,0) = old_particles_x(i,0);
    particles_y(j,0) = old_particles_y(i,0);
    particles_theta(j,0) = old_particles_theta(i,0);
    // The weight is indicative only, it will be recomputed on marker detection
    particles_weight(j,0) = old_particles_weight(i,0);
  }
#endif
  ///
  /// Particle filter steps end here
  ///

  // Save map from time to time
  iteration++;
  if( iteration == DELTA_SAVE )
  {
    cv::imwrite("mapa.png", map);
    iteration = 0;
  }

}