void BlockMatching::setImages(cv::Mat &imageLeft, cv::Mat &imageRight)
{
    this->imageLeft = imageLeft.clone();
    this->imageRight = imageRight.clone();
}
Пример #2
0
void HumanModel::faceOff(const cv::Mat& image, const std::vector<vec2f>& points)
{
	face_texture = image;
	// model face has been triangulated when loading, if has polygons.

	// model use OpenGL coordinates, in which -Z forward, Y up.
	std::vector<vec3f> verts;
	std::vector<vec2f> face_texcoords;

#pragma warning (disable: 4305)
	// vertex and corresponding texture coordinates, grabed by hand in Blender.
	// Note that Blender use OpenGL coodinate system, texture origin is at bottom left.
	verts.push_back(vec3f(0.03344, 1.65360, 0.14763));     face_texcoords.push_back(vec2f(0.631, 0.571));
	verts.push_back(vec3f(-0.03354, 1.65374, 0.14678));    face_texcoords.push_back(vec2f(0.377, 0.571));
	verts.push_back(vec3f(-0.0000, 1.65530, 0.15450));     face_texcoords.push_back(vec2f(0.504, 0.592));
	verts.push_back(vec3f(0.04550, 1.65460, 0.14315));     face_texcoords.push_back(vec2f(0.667, 0.574));
	verts.push_back(vec3f(0.01932, 1.65027, 0.14915));     face_texcoords.push_back(vec2f(0.589, 0.568));
	verts.push_back(vec3f(-0.01932, 1.65027, 0.14915));    face_texcoords.push_back(vec2f(0.411, 0.568));
	verts.push_back(vec3f(-0.04550, 1.65460, 0.14315));    face_texcoords.push_back(vec2f(0.333, 0.574));
	verts.push_back(vec3f(0.0182, 1.6172, 0.1539));        face_texcoords.push_back(vec2f(0.574, 0.442));
	verts.push_back(vec3f(-0.0182, 1.6172, 0.1539));       face_texcoords.push_back(vec2f(0.426, 0.442));
	verts.push_back(vec3f(0.00000, 1.6145, 0.1730));       face_texcoords.push_back(vec2f(0.503, 0.447));
	verts.push_back(vec3f(0.00907, 1.6037, 0.15788));      face_texcoords.push_back(vec2f(0.532, 0.405));
	verts.push_back(vec3f(-0.00907, 1.6037, 0.15788));     face_texcoords.push_back(vec2f(0.468, 0.405));
	verts.push_back(vec3f(0.0232, 1.5770, 0.15163));       face_texcoords.push_back(vec2f(0.575, 0.305));
	verts.push_back(vec3f(-0.0232, 1.5770, 0.15163));      face_texcoords.push_back(vec2f(0.425, 0.305));
	verts.push_back(vec3f(0.00000, 1.57779, 0.15502));     face_texcoords.push_back(vec2f(0.508, 0.225));
	verts.push_back(vec3f(0.00000, 1.58820, 0.16580));     face_texcoords.push_back(vec2f(0.500, 0.356));
	verts.push_back(vec3f(0.07360, 1.65420, 0.0936));      face_texcoords.push_back(vec2f(0.889, 0.594));
	verts.push_back(vec3f(-0.07360, 1.65420, 0.0936));     face_texcoords.push_back(vec2f(0.111, 0.594));
	verts.push_back(vec3f(0.0555, 1.5669, 0.1042));        face_texcoords.push_back(vec2f(0.824, 0.245));
	verts.push_back(vec3f(-0.0555, 1.5669, 0.1042));       face_texcoords.push_back(vec2f(0.176, 0.245));
	verts.push_back(vec3f(0.0403, 1.6178, 0.1487));        face_texcoords.push_back(vec2f(0.656, 0.434));
	verts.push_back(vec3f(-0.0403, 1.6178, 0.1487));       face_texcoords.push_back(vec2f(0.344, 0.434));
	verts.push_back(vec3f(0.0727, 1.6180, 0.0912));        face_texcoords.push_back(vec2f(0.916, 0.460));
	verts.push_back(vec3f(-0.0727, 1.6180, 0.0912));       face_texcoords.push_back(vec2f(0.094, 0.460));
	verts.push_back(vec3f(0.0674, 1.6900, 0.1101));        face_texcoords.push_back(vec2f(0.804, 0.721));
	verts.push_back(vec3f(-0.0674, 1.6900, 0.1101));       face_texcoords.push_back(vec2f(0.196, 0.721));
	verts.push_back(vec3f(0.0325, 1.67235, 0.15495));      face_texcoords.push_back(vec2f(0.622, 0.648));
	verts.push_back(vec3f(-0.0325, 1.67235, 0.15495));     face_texcoords.push_back(vec2f(0.378, 0.648));
	verts.push_back(vec3f(0.0000, 1.6740, 0.15900));       face_texcoords.push_back(vec2f(0.500, 0.656));
	verts.push_back(vec3f(0.02380, 1.5391, 0.1280));       face_texcoords.push_back(vec2f(0.625, 0.112));
	verts.push_back(vec3f(0.0000, 1.5340, 0.13860));       face_texcoords.push_back(vec2f(0.500, 0.089));
	verts.push_back(vec3f(-0.02380, 1.5391, 0.1280));      face_texcoords.push_back(vec2f(0.375, 0.112));
	verts.push_back(vec3f(0.0436, 1.7156, 0.1360));        face_texcoords.push_back(vec2f(0.662, 0.796));
	verts.push_back(vec3f(0.0000, 1.7248, 0.1462));        face_texcoords.push_back(vec2f(0.500, 0.819));
	verts.push_back(vec3f(-0.0436, 1.7156, 0.1360));       face_texcoords.push_back(vec2f(0.338, 0.796));
#pragma warning (default: 4305)

	assert(points.size() == POINT_MAX);
	assert(verts.size() == POINT_MAX);

	// NOTE: image or cv::Mat is top down on Y axis, while OpenGL use bottom up Y axis.
	float scale_x = (verts[LEYE_C].x - verts[REYE_C].x)/(points[LEYE_C].x - points[REYE_C].x);
	float scale_y = (verts[EYEBROW_M].y - verts[NOSTRIL_R].y)/(points[EYEBROW_M].y - points[NOSTRIL_R].y);
	std::cout << "face area scaling: (" << scale_x << "," << scale_y << ")\n";
	float ratio = scale_y / scale_x;
	
	// looking for a material named "face"
	auto is_face_material = [](const material_t& material) -> bool { return material.name == "face"; };
	auto it = std::find_if(materials.begin(), materials.end(), is_face_material);
	assert(it !=  materials.end());
	size_t face_material_id = std::distance(materials.begin(), it);
	std::cout << "face material id =" << _ << face_material_id << std::endl;
	
#ifdef DEBUG
	Mat texture = cv::imread("Model_obj/face.png");
	assert(!texture.empty());

	const Size texture_size = texture.size();
#else
	const Size texture_size(1024, 1024);
#endif

	Rect rect_src(Point(0, 0), texture_size);
	const std::vector<vec3i> delaunay = getSubdivTriangle(rect_src, face_texcoords);

#ifdef DEBUG
	const Size image_size = image.size();
	Rect rect_dst(Point(0, 0), image_size);

	Mat image2 = image.clone();
	std::vector<vec2f> points_tl;
	points_tl.reserve(points.size());
	for(const vec2f& point : points)
	{
		vec2f point_tl(point.x, 1.0f - point.y);
		Point pt(cvRound(point_tl.x * image_size.width), cvRound(point_tl.y * image_size.height));
		circle(image2, pt, 3, CV_RGB(0, 255, 00), 1, LINE_8);

		points_tl.push_back(point_tl);
	}

	std::vector<vec2f> face_texcoords_tl;
	face_texcoords_tl.reserve(face_texcoords.size());
	for(const vec2f& texcoord : face_texcoords)
		face_texcoords_tl.push_back(vec2f(texcoord.u, 1.0f - texcoord.v));

	cv::imshow("texture", drawTriangle(texture, face_texcoords_tl, delaunay));
	cv::imshow("image", drawTriangle(image2, points_tl, delaunay));
	imwrite("Output_obj/texture2.png", texture);
	imwrite("Output_obj/image.png", image2);
	cv::waitKey();

#endif // DEBUG

	const float H = 165.937f;  // Cutoff height, above which is the head, below the body.
//	const vec2f head_bl(-10.09333f, 167.03551f), head_tr(10.09333, 187.22217);  // face projects to XOZ plane
//	const vec2f head_bl(-0.36657f, -0.50000f), head_tr(0.36657f, 0.50000f);
	const vec2f head_bl(-0.08885, 1.52904/*, 0.09143*/), head_tr(0.08885, 1.77111/*, 0.09143*/);
	const vec2f head_size = head_tr - head_bl;
	int sum = 0;

	std::vector<vec2f> verts_xy(POINT_MAX);
	for(int i = 0; i < POINT_MAX; ++i)
		verts_xy[i] = vec2f(verts[i].x, verts[i].y) - head_bl;  // relative to the head bottom left. I'm running out of names!

	for(std::pair<std::string, mesh_t>& string_mesh : meshes) 
	{
		mesh_t& mesh = string_mesh.second;
		std::vector<vec3f>& vertices = mesh.vertices;
		std::vector<vec2f>& texcoords= mesh.texcoords;
		const size_t vertex_count = vertices.size();

		// head height remains unchanged, scale head width according to the ratio.
		for(vec3f& vertex : vertices)
		{
//			if(vertex.y > H)
//				vertex.x = (vertex.x - verts[EYES_C].x) * ratio + verts[EYES_C].x;
		}

		if(mesh.material_id != face_material_id)
			continue;

		// map position from source delaunay mesh to destination delaunay mesh
		// I was expecting that cv::remap can fulfill this job.
		for(size_t j = 0; j < vertex_count; ++j)
		{
			// handle vertex positions and texture coordinates
			vec3f& vertex = vertices[j];
			vec2f& texcoord = texcoords[j];
			vec2f floor(std::floor(texcoord.s), std::floor(texcoord.t));
			vec2f clamp = texcoord - floor;
	
			for(const vec3i& tri : delaunay)
			{
				vec2f st;
				Triangle2 src(face_texcoords[tri.i], face_texcoords[tri.j], face_texcoords[tri.k]);
				if(src.contains(clamp, st))
				{
					Triangle2 dst(points[tri.i], points[tri.j], points[tri.k]);
					texcoord = floor + dst.getPosition(st);  // flip vertical
					break;
				}
			}
/*
			float model_x_m = verts[NOSE_C].x, image_x_m = points[NOSE_C].x;
			int edge[] = { EDGE_CHIN_R, EDGE_LIP_R, EDGE_NOSE_R, EDGE_EYES_R, FOREHEAD_R, EDGE_FRONT_R};
			for(int i = 0; i < sizeof(edge)/sizeof(edge[0]) - 1; ++i)
			{
				if(verts[i].y <= vertex.y && vertex.y < verts[i+1].y)
					vertex.x = model_x_m + (vertex.x - image_x_m) / scale_x;
			}
*/
			vec2f st;
			bool out_of_area = true;
			vec2f point = vec2f(vertex.x, vertex.y) - head_bl;
			for(const vec3i& tri : delaunay)
			{
				Triangle2 src(verts_xy[tri.i], verts_xy[tri.j], verts_xy[tri.k]);
				if(src.contains(point, st))
				{
					// src.A + s * (src.B - src.A) + t * (src.C - src.A);
					Triangle2 dst(face_texcoords[tri.i], face_texcoords[tri.j], face_texcoords[tri.k]);
					vec2f texcoord_map = dst.getPosition(st);
					vec2f vertex_map = texcoord_map * head_size;
//					vertex.x = head_bl.x + vertex_map.width;
//					vertex.y = head_bl.y + vertex_map.height;
					out_of_area = false;
					break;
				}
			}

			// handle border vertex, we assume nose the center of face when projected on XOY plane.
			if(out_of_area)  // choose the nearest one
			{
				auto less_than = [&point](const vec2f& lhs, const vec2f& rhs) -> bool
				{
					vec2f vl = lhs - point, vr = rhs - point;
					return (vl.x*vl.x + vl.y*vl.y) < (vr.x*vr.x + vr.y*vr.y);
				};

				auto it = std::min_element(verts_xy.begin(), verts_xy.end(), less_than);
				size_t index = std::distance(verts_xy.begin(), it);
				vec2f neareast_vertex = *it;

				assert(index != NOSE_C);
				float factor = distance(verts_xy[index], verts_xy[NOSE_C]) / distance(points[index], points[NOSE_C]);
				//assert(0.01f < factor && factor < 100.0f);
				vec2f tmp = factor * (point - verts_xy[index]);
				point = points[index] + tmp;
//				vertex.x = head_bl.x + point.x;
//				vertex.y = head_bl.y + point.y;
			}
		}

		sum += vertices.size();
	}

	std::cout<< "vertex count:" << _ << sum << std::endl;

}
Пример #3
0
    //! Fitting line or Curve via RANSAC has constraint in some conditions.
    // \minDataNum:   the minimum number of data required to fit the model. 
    // \closeDataNum: the number of close data values required to assert that a model fits well to data.
    // \iterNum:      the number of iterations performed by the algorithm.
    // \thValue:      a threshold value for determining when a datum fits a model.
    void FittingCurve_RANSAC(const std::vector<cv::Point2d> &pointSet, 
                             const int &termNum, const int &minDataNum, 
                             const int &iterNum, const double &thValue,
                             const int &closeDataNum, cv::Mat &coefs, const cv::Mat &img)
    {
        double startTime = (double)cv::getTickCount();

        std::vector<cv::Point2d> bestPointSet;   //data points from which this model has been estimated
        double bestError = INFINITY;        //the error of this model relative to the data
        double errorSum, minErrorSum;       //Prevent the sampling number smaller than closeDataNum
        int errorFlag = 0; 
        
        cv::Mat maybeModel = cv::Mat::zeros(termNum, 1, CV_64F);
        cv::Mat img2 = img.clone();
        
        for(int iter = 0; iter < iterNum; iter++)
        {
            std::vector<cv::Point2d> tempPointSet = pointSet;
            errorSum = 0;
            
            //! Generate a set of maybe_inliers
            std::vector<cv::Point2d> maybeInliersPointSet;
            uint64 seed = iter + 1; 
            cv::RNG rng(seed);
            for(int i = 0; i < minDataNum; i++)
            {
                int n = rng.uniform(0, (int)tempPointSet.size()-1);
                maybeInliersPointSet.push_back(tempPointSet.at(n));
                tempPointSet.erase(tempPointSet.begin()+n);
                if (tempPointSet.size() == 0)
                    break;
            }//end for
            
            
            //! Fitting maybe_inliers to model by Least Squares Method
            FittingCurve_LS(maybeInliersPointSet, termNum, maybeModel);
            bestPointSet = maybeInliersPointSet; //Random Sampling Model
#if 0
            for(int i=0; i<bestPointSet.size(); i++)
            {
                cv::circle(img2, bestPointSet.at(i), 1, CV_RGB(125, 125, 0));
            }
            std::vector<cv::Point2d> sampledPoints;
            IPMDrawCurve(maybeModel, img2, sampledPoints, CV_RGB(100, 0, 0) );
            //cv::imshow("RANSAC", img2);
            //cv::waitKey();
#endif
            
            
            //! Different from the linear modeling, the non-linear asks for computation of the distance between point and a polynomial curve.
            //! deviration of distance function d{l(x)}/d{x} = 0
            //! It is: 4*a2*x^3 + 6*a1*a2*x^2 + 2*(1+2*a2*(a0-y0)+a1^2)*x + 2*(a1*(-y0+a0)-x0) = 0 
            //! Solve this cubic funtion (polynomial of degree three)
            double a0, a1, a2;
            if (termNum == 2) {
                //!Linear RANSAC
                a0 = maybeModel.at<double>(0,0);
                a1 = maybeModel.at<double>(1,0);
                for (int i = 0; i < tempPointSet.size(); i++) {
                    int x = cvRound(tempPointSet.at(i).x);
                    int y = cvRound(tempPointSet.at(i).y);
                    double error = std::pow(std::abs(a1*x - y + a0)/sqrt(a1*a1+1), 2);
                    errorSum += error;
                    if (error < thValue) {
                        bestPointSet.push_back(tempPointSet.at(i));
                    }
                }//end for
            }
            else if (termNum == 3) {
                //!NonLinear RANSAC
                a0 = maybeModel.at<double>(0,0);
                a1 = maybeModel.at<double>(1,0);
                a2 = maybeModel.at<double>(2,0);
                for (int i = 0; i < tempPointSet.size(); i++) {
                    int x0 = cvRound(tempPointSet.at(i).x);
                    int y0 = cvRound(tempPointSet.at(i).y);
                    double error = std::pow(std::abs(y0 - (a0+(a1+a2*x0)*x0)), 2);
                    errorSum += error;
                    //cout << "Outliers Error: " << error << endl;
                    if (error < thValue) {
                        bestPointSet.push_back(tempPointSet.at(i));
                    }
                }//end for
            }
            else {
                std::cerr << "Only fitting line of degree 1 or 2." << std::endl;
            }

            //! First sampling as initialization
            if(iter == 0) minErrorSum = errorSum;
//            cout << "errorSum: " << errorSum << " minError: " << minErrorSum << endl;
            //! Prevent the situation that all maybePoint number smaller than the close data number
            if(errorSum < minErrorSum)
            {
                minErrorSum = errorSum;
                if(errorFlag == 0)
                    coefs = maybeModel; //! Only works when all sampling doesn't have a good model. 
            }
            
            //! this implies that we may have found a good model;
            //! now test how good it is
            if((int)bestPointSet.size() > closeDataNum)
            {
                double thisError = 0;
                //cout << "Error Update!" << endl;
                cv::Mat thisModel = cv::Mat::zeros(termNum, 1, CV_64F);
                FittingCurve_LS(bestPointSet, termNum, thisModel);
                
                if (termNum == 2) {
                    a0 = thisModel.at<double>(0,0);
                    a1 = thisModel.at<double>(1,0);
                    for(int i = 0; i < bestPointSet.size(); i++) 
                    {
                        int x = cvRound(bestPointSet.at(i).x);
                        int y = cvRound(bestPointSet.at(i).y);
                        thisError += pow((a1*x - y + a0)/sqrt(a1*a1+1), 2);
                    }//end for
                }
                else if(termNum == 3) {
                    a0 = thisModel.at<double>(0,0);
                    a1 = thisModel.at<double>(1,0);
                    a2 = thisModel.at<double>(2,0);
                    for(int i = 0; i < bestPointSet.size(); i++) 
                    {
                        int x0 = cvRound(bestPointSet.at(i).x);
                        int y0 = cvRound(bestPointSet.at(i).y);
                        thisError += std::pow(std::abs(y0 - (a0+(a1+a2*x0)*x0)), 2);
                    }//end for
                }
                else {
                    std::cerr << "Only fitting line of degree 1 or 2." << std::endl;
                }
                
                //! The line above contains the bug. 
                //! This_error should be replaced by a score that is either the size of the consensus set, 
                //! or the robust error norm computed on ALL samples (not just the consensus set).
//                cout << "thisError: " << thisError << " bestError: " << bestError << endl;
                if(thisError < bestError)
                {
                    //! we have found a model which is better than any of the previous ones;
                    //! keep it until a better one is found)
                    //! best_consensus_set := consensus_set;
                    coefs = thisModel; //bestModel := thisModel
                    bestError = thisError;//bestError := thisError
                    errorFlag = 1;
                }//end if

            }//end if
            
            
//            std::vector<cv::Point2d> sampledPoints2;
//            IPMDrawCurve(coefs, img2, sampledPoints2, CV_RGB(0, 255, 0) );
//            cv::imshow("RANSAC", img2);
            
        }//end for iteration
        
        
        double costTime = ((double)cv::getTickCount() - startTime)/cv::getTickFrequency();
        printf("RANSAC Line Fitting Needs %.2f msec about %.2f Hz\n", costTime*1000, 1/costTime);
        
    }//end FittingCurve_RANSAC
Пример #4
0
// ----------------------------------------------------------------------------------
void QualityMatcher::matchImagesAsync(cv::Mat imageSrc, cv::Mat imageDst, cv::Mat priorH, MatchingResultCallback cb)
{
  if (m_matchingThread)
  {
    m_matchingThread->join();
  }
  
  // extract one channel for matching -> better have YUV, but green channel is god enough
  cv::Mat rgbSrc[4];
  cv::Mat rgbDst[4];
  cv::split(imageSrc, rgbSrc);
  cv::split(imageDst, rgbDst);
    
  m_matchingThread.reset(new std::thread(std::bind(&QualityMatcher::doTheMagic, this, rgbSrc[1], rgbDst[1], priorH.clone(), cb)));
}
Пример #5
0
void LayoutTest::testFeatureCollector(const cv::Mat & src) const {
	
	rdf::Timer dt;

	// parse xml
	PageXmlParser parser;
	parser.read(mConfig.xmlPath());

	// test loading of label lookup
	LabelManager lm = LabelManager::read(mConfig.featureCachePath());
	qInfo().noquote() << lm.toString();

	// compute super pixels
	SuperPixel sp(src);

	if (!sp.compute())
		qCritical() << "could not compute super pixels!";

	// feed the label lookup
	SuperPixelLabeler spl(sp.getMserBlobs(), Rect(src));
	spl.setLabelManager(lm);

	// set the ground truth
	if (parser.page())
		spl.setRootRegion(parser.page()->rootRegion());

	if (!spl.compute())
		qCritical() << "could not compute SuperPixel labeling!";

	SuperPixelFeature spf(src, spl.set());
	if (!spf.compute())
		qCritical() << "could not compute SuperPixel features!";

	FeatureCollectionManager fcm(spf.features(), spf.set());
	fcm.write(spl.config()->featureFilePath());
	
	FeatureCollectionManager testFcm = FeatureCollectionManager::read(spl.config()->featureFilePath());

	for (int idx = 0; idx < testFcm.collection().size(); idx++) {

		if (testFcm.collection()[idx].label() != fcm.collection()[idx].label())
			qWarning() << "wrong labels!" << testFcm.collection()[idx].label() << "vs" << fcm.collection()[idx].label();
		else
			qInfo() << testFcm.collection()[idx].label() << "is fine...";
	}

	// drawing
	cv::Mat rImg = src.clone();

	// save super pixel image
	//rImg = superPixel.drawSuperPixels(rImg);
	//rImg = tabStops.draw(rImg);
	rImg = spl.draw(rImg);
	rImg = spf.draw(rImg);
	QString dstPath = rdf::Utils::instance().createFilePath(mConfig.outputPath(), "-textlines");
	rdf::Image::save(rImg, dstPath);
	qDebug() << "debug image saved: " << dstPath;

	qDebug() << "image path: " << mConfig.imagePath();

}
Пример #6
0
//===========================================================================
void Patch::Init(int t, double a, double b, cv::Mat &W)
{
  assert((W.type() == CV_32F)); _t=t; _a=a; _b=b; _W=W.clone(); return;
}
Пример #7
0
bool Stabilizer::forward_backward_track(const cv::Mat& frame)
{
    cv::Mat previous_frame_gray;
    cv::cvtColor(prevFrame, previous_frame_gray, cv::COLOR_BGR2GRAY);
  
    cv::goodFeaturesToTrack(previous_frame_gray, previousFeatures, 500, 0.01, 5);

    size_t n = previousFeatures.size();
    CV_Assert(n);
    
    // Compute optical flow in selected points.
    std::vector<cv::Point2f> currentFeatures;
    std::vector<uchar> state;
    std::vector<float> error;
    cv::calcOpticalFlowPyrLK(prevFrame, frame, previousFeatures, currentFeatures, state, error);

    float median_error = median<float>(error);

    std::vector<cv::Point2f> good_points;
    std::vector<cv::Point2f> curr_points;
    for (size_t i = 0; i < n; ++i)
    {
        if (state[i] && (error[i] <= median_error))
        {
            good_points.push_back(previousFeatures[i]);
            curr_points.push_back(currentFeatures[i]);
        }
    }

    size_t s = good_points.size();
    CV_Assert(s == curr_points.size());
    
    //Compute backward optical flow
    std::vector<cv::Point2f> backwardPoints;
    std::vector<uchar> backState;
    std::vector<float> backError;

    cv::calcOpticalFlowPyrLK(frame, prevFrame, curr_points, backwardPoints, backState, backError);
    float median_back_error = median<float>(backError);

    CV_Assert(s == backwardPoints.size());
    std::vector<float> diff(s);

    for (size_t i = 0; i < s; ++i)
    {
        diff[i] = cv::norm(good_points[i] - backwardPoints[i]);
        // diff[i] = (good_points[i].x - backwardPoints[i].x) * (good_points[i].x - backwardPoints[i].x) + (good_points[i].y - backwardPoints[i].y) * (good_points[i].y - backwardPoints[i].y);
    }

    for (int i = s - 1; i >= 0; --i)
    {
        if (!backState[i] || (backError[i] <= median_back_error) || (diff[i] > 400))
        {
            good_points.erase(good_points.begin() + i);
            curr_points.erase(curr_points.begin() + i);
        }
    }

    s = good_points.size();

    // Find points shift.
    std::vector<float> shifts_x(s);
    std::vector<float> shifts_y(s);
    
    for (size_t i = 0; i < s; ++i)
    {
        shifts_x[i] = curr_points[i].x - good_points[i].x;
        shifts_y[i] = curr_points[i].y - good_points[i].y;
    }
    
    // Find median shift.
    cv::Point2f median_shift(median<float>(shifts_x), median<float>(shifts_y));
    xshift.push_back(median_shift.x);
    yshift.push_back(median_shift.y);

    prevFrame = frame.clone(); 
    return true;
}
bool place::reshowPlacement(const std::string &scanName,
                            const std::string &zerosFile,
                            const std::string &doorName,
                            const place::DoorDetector &d,
                            const std::string &preDone) {
  const std::string buildName = scanName.substr(scanName.find("_") - 3, 3);
  const std::string scanNumber = scanName.substr(scanName.find(".") - 3, 3);
  const std::string placementName =
      buildName + "_placement_" + scanNumber + ".dat";

  std::ifstream in(preDone + placementName, std::ios::in | std::ios::binary);
  if (!in.is_open())
    return false;
  if (!FLAGS_reshow)
    return true;

  if (!FLAGS_quietMode)
    std::cout << placementName << std::endl;

  std::vector<cv::Mat> rotatedScans, toTrim;
  std::vector<Eigen::Vector2i> zeroZero;
  place::loadInScans(scanName, zerosFile, toTrim, zeroZero);
  place::trimScans(toTrim, rotatedScans, zeroZero);

  std::vector<std::vector<place::Door>> doors = loadInDoors(doorName, zeroZero);

  int num;
  in.read(reinterpret_cast<char *>(&num), sizeof(num));
  std::vector<place::posInfo> scores(num);
  for (auto &s : scores)
    in.read(reinterpret_cast<char *>(&s), sizeof(place::posInfo));

  int cutOffNum = place::getCutoffIndex(
      placementName, scores, [](const place::posInfo &s) { return s.score; });
  cutOffNum = FLAGS_top > 0 ? FLAGS_top : cutOffNum;

  num = std::min(num, cutOffNum);

  cvNamedWindow("Preview", CV_WINDOW_NORMAL);

  if (!FLAGS_quietMode)
    std::cout << "Showing minima: " << num << std::endl;

  for (int k = 0; k < std::min(num, (int)scores.size());) {
    auto &currentScore = scores[k];

    const cv::Mat &bestScan = rotatedScans[currentScore.rotation];

    const int xOffset = currentScore.x - zeroZero[currentScore.rotation][0];
    const int yOffset = currentScore.y - zeroZero[currentScore.rotation][1];

    cv::Mat_<cv::Vec3b> output = fpColor.clone();

    auto &res = d.getResponse(0);
    for (int i = 0; i < res.outerSize(); ++i)
      for (Eigen::SparseMatrix<char>::InnerIterator it(res, i); it; ++it)
        if (it.value() > 1)
          output(it.row(), it.col()) = cv::Vec3b(0, 255, 0);

    for (int j = 0; j < bestScan.rows; ++j) {
      if (j + yOffset < 0 || j + yOffset >= fpColor.rows)
        continue;
      const uchar *src = bestScan.ptr<uchar>(j);
      for (int i = 0; i < bestScan.cols; ++i) {
        if (i + xOffset < 0 || i + xOffset >= fpColor.cols)
          continue;

        if (src[i] != 255) {
          output(j + yOffset, i + xOffset) = cv::Vec3b(0, 0, 255 - src[i]);
        }
      }
    }

    for (int j = -10; j < 10; ++j)
      for (int i = -10; i < 10; ++i)
        output(j + currentScore.y, i + currentScore.x) = cv::Vec3b(255, 0, 0);

    for (auto &d : doors[currentScore.rotation]) {
      auto color = randomColor();
      for (double x = 0; x < d.w; ++x) {
        Eigen::Vector3i index =
            (d.corner + x * d.xAxis + Eigen::Vector3d(xOffset, yOffset, 0))
                .unaryExpr([](auto v) { return std::round(v); })
                .cast<int>();

        for (int k = -2; k <= 2; ++k) {
          for (int l = -2; l <= 2; ++l) {
            output(index[1] + k, index[0] + l) = color;
          }
        }
      }
    }

    if (!FLAGS_quietMode) {
      std::cout << &currentScore << std::endl;
      std::cout << "% of scan unexplained: "
                << currentScore.scanFP / currentScore.scanPixels
                << "   Index: " << k << std::endl
                << std::endl;
    }

    const int keyCode = cv::rectshow(output);

    if (keyCode == 27) {
      cv::imwrite(preDone + buildName + "_ss_" + scanNumber + ".png", output);
      break;
    } else if (keyCode == 8)
      k = k > 0 ? k - 1 : k;
    else
      ++k;
  }
  return true;
}
Пример #9
0
void OpenCV_Function::filter(){
	g_srcImage=cv::imread("girl-t1.jpg");

	//均值滤波(邻域平均滤波)
	cv::blur(g_srcImage,g_resultImage,cv::Size(2,2)); //值越大越模糊
	cv::imshow( "blur", g_resultImage );

	//高斯滤波
	cv::GaussianBlur( g_srcImage, g_resultImage, cv::Size( 99, 99 ), 0, 0 );   //值越大越模糊,且值只能是正数和奇数
	cv::imshow("GaussianBlur", g_resultImage );

	//方框滤波
	cv::boxFilter(g_srcImage,g_resultImage,-1,cv::Size(5,5));//
	cv::imshow("boxFilter", g_resultImage );

	//中值滤波
	cv::medianBlur(g_srcImage,g_resultImage,9);//这个参数必须是大于1的奇数
	cv::imshow("medianBlur", g_resultImage );

	//bilateralFilter 双边滤波器
	cv::bilateralFilter( g_srcImage, g_resultImage, 25, 25*2, 25/2 );  
	cv::imshow("bilateralFilter", g_resultImage );

	//erode函数,使用像素邻域内的局部极小运算符来腐蚀一张图片
	cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,3));  
	cv::erode(g_srcImage,g_resultImage,element,cv::Point(-1,-1),1);
	cv::imshow("erode", g_resultImage );
	//dilate函数,使用像素邻域内的局部极大运算符来膨胀一张图片
	cv::dilate(g_srcImage,g_resultImage,element);
	cv::imshow("dilate", g_resultImage );

	//开运算(Opening Operation),其实就是先腐蚀后膨胀的过程
	//dst=open(src,element)=dilate(erode(src,element));

	//闭运算(Closing Operation),  其实就是先膨胀后腐蚀的过程
	//dst=close(src,element)=erode(dilate(src,element));

	//形态学梯度(Morphological Gradient)为膨胀图与腐蚀图之差
	//dst=morph_grad(src,element)=dilate(src,element)-erode(src,element);

	//顶帽运算(Top Hat)又常常被译为”礼帽“运算。为原图像与上文刚刚介绍的“开运算“的结果图之差
	//dst=src-open(src,element);

	//黑帽(Black Hat)运算为”闭运算“的结果图与原图像之差。
	//dst=close(src,element)-src;
	cv::morphologyEx(g_srcImage,g_resultImage, cv::MORPH_OPEN, element);  
	cv::imshow( "morphologyEx", g_resultImage );

	//最简单的canny用法,拿到原图后直接用。  
	//这个函数阈值1和阈值2两者的小者用于边缘连接,而大者用来控制强边缘的初始段,推荐的高低阈值比在2:1到3:1之间。
	cv::Mat cannyMat=g_srcImage.clone();
	cv::Canny(cannyMat,cannyMat,3,9);
	cv::imshow( "Canny", cannyMat );
	//----------------------------------------------------------------------------------  
    //  二、高阶的canny用法,转成灰度图,降噪,用canny,最后将得到的边缘作为掩码,拷贝原图到效果图上,得到彩色的边缘图  
    //----------------------------------------------------------------------------------  
	cv::Mat dst,edge,gray; 
	dst.create( g_srcImage.size(), g_srcImage.type() );   // 【1】创建与src同类型和大小的矩阵(dst)  
	cv::cvtColor(g_srcImage,gray,CV_BGR2GRAY);// 【2】将原图像转换为灰度图像  
    cv::blur( gray, edge, cv::Size(3,3) );  // 【3】先用使用 3x3内核来降噪  
	cv::Canny(edge,edge,3,9);
	dst = cv::Scalar::all(0);   //【5】将dst内的所有元素设置为0   
	g_srcImage.copyTo( dst, edge); //【6】使用Canny算子输出的边缘图g_cannyDetectedEdges作为掩码,来将原图g_srcImage拷到目标图g_dstImage中  
	cv::imshow( "Canny2", dst );
	
	//----------------------------------------------------------------------------------  
    //  调用Sobel函数的实例代码
    //----------------------------------------------------------------------------------  
	cv::Mat grad_x, grad_y;  
   cv::Mat abs_grad_x, abs_grad_y;  
	 //【3】求 X方向梯度  
    cv::Sobel( g_srcImage, grad_x, CV_16S, 1, 0, 3, 1, 1, cv::BORDER_DEFAULT );  
    cv::convertScaleAbs( grad_x, abs_grad_x );  
    cv::imshow("【效果图】 X方向Sobel", abs_grad_x);   
  
    //【4】求Y方向梯度  
    cv::Sobel( g_srcImage, grad_y, CV_16S, 0, 1, 3, 1, 1, cv::BORDER_DEFAULT );  
    cv::convertScaleAbs( grad_y, abs_grad_y );  
    cv::imshow("【效果图】Y方向Sobel", abs_grad_y);   
  
    //【5】合并梯度(近似)  
    addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst );  
    cv::imshow("【效果图】整体方向Sobel", dst);   
  
	//----------------------------------------------------------------------------------  
    // Laplacian 算子是n维欧几里德空间中的一个二阶微分算子,定义为梯度grad()的散度div()。因此如果f是二阶可微的实函数,则f的拉普拉斯算子定义为:
    //----------------------------------------------------------------------------------  
	cv::Laplacian( g_srcImage, dst, CV_16S, 3, 1, 0, cv::BORDER_DEFAULT );  
	cv::convertScaleAbs( dst, abs_grad_y );  
	cv::imshow("Laplacian", abs_grad_y); 
	

	//----------------------------------------------------------------------------------  
    //  scharr一般我就直接称它为滤波器,而不是算子。上文我们已经讲到,它在OpenCV中主要是配合Sobel算子的运算而存在的,
    //----------------------------------------------------------------------------------  
	 //【3】求 X方向梯度  
	cv::Scharr( g_srcImage, grad_x, CV_16S, 1, 0, 1, 0, cv::BORDER_DEFAULT );  
    cv:: convertScaleAbs( grad_x, abs_grad_x );  
    cv::imshow("【效果图】 X方向Scharr", abs_grad_x);   
	//【4】求Y方向梯度  
	cv::Scharr( g_srcImage, grad_y, CV_16S, 0, 1, 1, 0, cv::BORDER_DEFAULT );  
    cv::convertScaleAbs( grad_y, abs_grad_y );  
    cv::imshow("【效果图】Y方向Scharr", abs_grad_y);

	cv::imshow( WINDOW_NAME1, g_srcImage );
}
Пример #10
0
scoredContour goal_pipeline(cv::Mat input, bool suppress_output=false) {
        std::vector< std::vector<cv::Point> > contours;

        cv::Mat contourOut = input.clone();

        cv::findContours(contourOut, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
        std::vector<scoredContour> finalscores;

        if(!suppress_output) { std::cout << "Found " << contours.size() << " contours." << std::endl; }

        unsigned int ctr = 0;
        for(std::vector< std::vector<cv::Point> >::iterator i = contours.begin();
            i != contours.end(); ++i) {
                double area = cv::contourArea(*i);
                double perimeter = cv::arcLength(*i, true);
                cv::Rect bounds = cv::boundingRect(*i);

                const double cvarea_target = (80.0/240.0);
                const double asratio_target = (20.0/12.0);
                const double area_threshold = 1000;

                /* Area Thresholding Test
                 * Only accept contours of a certain total size.
                 */

                if(area < area_threshold) {
                    continue;
                }

				if(!suppress_output) {
					std::cout << std::endl;
			        std::cout << "Contour " << ctr << ": " << std::endl;
			        ctr++;
			        std::cout << "Area: "  << area << std::endl;
			        std::cout << "Perimeter: " << perimeter << std::endl;
				}

                /* Coverage Area Test
                 * Compare particle area vs. Bounding Rectangle area.
                 * score = 1 / abs((1/3)- (particle_area / boundrect_area))
                 * Score decreases linearly as coverage area tends away from 1/3. */
                double cvarea_score = 0;

                double coverage_area = area / bounds.area();
                cvarea_score = scoreDistanceFromTarget(cvarea_target, coverage_area);

                /* Aspect Ratio Test
                 * Computes aspect ratio of detected objects.
                 */

                double tmp = bounds.width;
                double aspect_ratio = tmp / bounds.height;
                double ar_score = scoreDistanceFromTarget(asratio_target, aspect_ratio);

                /* Image Moment Test
                 * Computes image moments and compares it to known values.
                 */

                cv::Moments m = cv::moments(*i);
                double moment_score = scoreDistanceFromTarget(0.28, m.nu02);

                /* Image Orientation Test
                 * Computes angles off-axis or contours.
                 */
                // theta = (1/2)atan2(mu11, mu20-mu02) radians
                // theta ranges from -90 degrees to +90 degrees.
                double theta = (atan2(m.mu11,m.mu20-m.mu02) * 90) / pi;
                double angle_score = (90 - fabs(theta))+10;

				if(!suppress_output) {
		            std::cout << "nu-02: " << m.nu02 << std::endl;
		            std::cout << "CVArea: "  <<  coverage_area << std::endl;
		            std::cout << "AsRatio: " << aspect_ratio << std::endl;
		            std::cout << "Orientation: " << theta << std::endl;
				}

                double total_score = (moment_score + cvarea_score + ar_score + angle_score) / 4;

				if(!suppress_output) {
		            std::cout << "CVArea Score: "  <<  cvarea_score << std::endl;
		            std::cout << "AsRatio Score: " << ar_score << std::endl;
		            std::cout << "Moment Score: " << moment_score << std::endl;
		            std::cout << "Angle Score: " << angle_score << std::endl;
		            std::cout << "Total Score: " << total_score << std::endl;
				}

                finalscores.push_back(std::make_pair(total_score, std::move(*i)));
        }

       if(finalscores.size() > 0) {
			std::sort(finalscores.begin(), finalscores.end(), &scoresort);

			return finalscores.back();
		} else {
			return std::make_pair(0.0, std::vector<cv::Point>());	
		}
}
Пример #11
0
bool pixkit::labeling::twoPass(const cv::Mat &src,cv::Mat &dst,const int offset){

	//////////////////////////////////////////////////////////////////////////
	///// EXCEPTION
	if(src.type()!=CV_8UC1){
		CV_Error(CV_StsBadArg,"[xxx] src's type should be CV_8UC1.");
	}

	//標籤為從1開始
	int LableNumber = 1;
	bool **Table = NULL;
	bool *assitT = NULL;
	bool *checkTable = NULL;
	int *refTable = NULL;
	int **result = NULL;
	int *resultT = NULL;

	int C[5],min,temp;
	int W,A,Q,E,S;

	dst = src.clone();
	//Mat convert to [][]
	result = new int *[src.rows];
	resultT = new int [src.rows*src.cols];
	memset(resultT,0,src.rows*src.cols);
	for(int i=0;i<src.rows;i++,resultT+=src.cols)
		result[i] = resultT;

	for(int i=0;i<src.rows;i++)
	{
		int temp = i*src.cols;
		for(int j=0;j<src.cols;j++)
		{
			result[i][j] = (int)src.data[temp+j];
		}
	}


	//正規化用標籤
	std::vector<int> ObjectIndex;

	//第一次掃描
	for(int i=0;i<src.rows;i++)
	{
		for(int j=0;j<src.cols;j++)
		{
			C[0] = result[i][j];
			if (C[0] <128)
				continue;
			/*如果 C[0] > 128 代表有物件*/
			min = src.rows*src.cols;

			if(j-1 <0)
				C[1] = 0;
			else
				C[1] = result[i][j-1];
			if (i-1<0 || j-1 <0)
				C[2] = 0;
			else
				C[2] = result[i-1][j-1];
			if(i-1<0)
				C[3] = 0;
			else
				C[3] = result[i-1][j];
			if (i-1<0 || j+1 >=src.cols)
				C[4] = 0;
			else
				C[4] = result[i-1][j+1];


			if(C[1] ==0 && C[2] ==0 && C[3] ==0 && C[4] ==0)
			{
				C[0] = LableNumber;
				LableNumber++;
			}
			else
			{
				for(int k=1;k<=4;k++)
				{
					if(C[k]<min && C[k] != 0)
						min = C[k];
				}
				C[0] = min;
			}
			result[i][j] = C[0];
		}
	}


	//LableNumber != 1 代表有前景物件存在
	//LableNumber == 1 代表無前景物件
	//使用動態新增Table記憶體
	if(LableNumber != 1)
	{
		Table = new bool *[LableNumber];
		assitT = new bool [LableNumber*LableNumber];
		memset(assitT,0,LableNumber*LableNumber);
		refTable = new int [LableNumber];
		checkTable = new bool [LableNumber];
		for(int i=0;i<LableNumber;i++,assitT+=LableNumber)
		{
			Table[i] = assitT;
			refTable[i] = 0;
			checkTable[i] = 0;
		}
	}

	//第二次掃描
	for(int i=0;i<src.rows;i++)
	{
		for(int j=0;j<src.cols;j++)
		{	
			S = result[i][j];
			if(S == 0)
				continue;

			if(i-1<0)
				W = 0;
			else
				W = result[i-1][j];
			if(i-1<0 || j-1<0)
				Q = 0;
			else
				Q = result[i-1][j-1];
			if(j-1<0)
				A = 0;
			else
				A = result[i][j-1];
			if (i-1<0 || j+1 >= src.cols)
				E = 0;
			else
				E = result[i-1][j+1];

			/*填Table值*/
			Table[S][S] = 1;
			if(W != 0)
			{
				Table[S][W] = 1;
				Table[W][S] = 1;
			}
			if(A!=0)
			{
				Table[S][A] = 1;
				Table[A][S] = 1;
			}
			if(Q!=0)
			{
				Table[S][Q] = 1;
				Table[Q][S] = 1;
			}
			if (E!=0)
			{
				Table[S][E] = 1;
				Table[E][S] = 1;
			}					
		}
	}
	//Equivalent Table
	//因為標籤值是從1開始,所以i,j從1開始
	for(int i=1;i<LableNumber;i++)
	{
		for (int j=1;j<LableNumber;j++)
		{
			if(Table[i][j] ==1)
			{
				for(int ii = 1;ii<LableNumber;ii++)
					if(Table[j][ii] == 1)
					{
						Table[i][ii] = 1;
						Table[ii][i] = 1;
					}
			}
		}
	}

	//建立ref對照表
	for(int i=1;i<LableNumber;i++)
	{
		for (int j=1;j<LableNumber;j++)
		{
			if(Table[i][j] != 0)
			{
				refTable[i] = j;
				checkTable[j] = true;
				break;
			}
		}
	}


	//LableNumber != 1 代表有前景物件存在
	for(int i=1;i<LableNumber;i++)
	{
		if(checkTable[i] == true)
		{
			ObjectIndex.push_back(i);
		}
	}

	//利用對照表去Refine並同時將標籤正規化
	int labelValue = 0,labelIndex = 0;
	for(int i=0;i<src.rows;i++)
	{
		for(int j=0;j<src.cols;j++)
		{
			temp = result[i][j];
			if(temp != 0)
			{
				labelValue = refTable[temp];
				for(int k=0;k<ObjectIndex.size();k++)
				{
					if(ObjectIndex[k] == labelValue)
					{
						result[i][j] = k+offset;
						break;
					}
				}
			}
		}
	}

	//[][] convert to Mat
	for(int i=0;i<src.rows;i++)
	{
		int temp = i*src.cols;
		for(int j=0;j<src.cols;j++)
		{
			dst.data[temp+j] = result[i][j];
		}
	}


	//LableNumber != 1 代表有前景物件存在
	//刪除Table記憶體
	if(LableNumber != 1)
	{
		delete []Table[0];
		delete []Table;
		delete []result[0];
		delete []result;
		delete []refTable;
		delete []checkTable;
	}

	return true;
}
Пример #12
0
void SolutionViewer::LoadImage(cv::Mat& img)
{
	input_img = img.clone();
	image_loaded_flag = true;
}
WeightedGaussian::WeightedGaussian(double weight, const cv::Mat &mean, const cv::Mat &covariance)
    : _Weight(weight), _Mean(mean.clone()), _Covariance(covariance.clone())
{
    
}
Пример #14
0
void FSolver::normalizePoints(const cv::Mat &P, cv::Mat &Q) const
{
  // turn P into homogeneous coords first
  
  if(P.rows == 3) // P is 3xN
  {
    if(P.type() == CV_64F)
      Q = P.clone();
    else
      P.convertTo(Q, CV_64F);
    
    cv::Mat aux = Q.row(0);
    cv::divide(aux, Q.row(2), aux);
    
    aux = Q.row(1);
    cv::divide(aux, Q.row(2), aux);
    
  }
  else if(P.rows == 2) // P is 2xN
  {
    const int N = P.cols;
    
    Q.create(3, N, CV_64F);
    Q.row(2) = 1;
    if(P.type() == CV_64F)
    {
      Q.rowRange(0,2) = P * 1;
    }
    else
    {
      cv::Mat aux = Q.rowRange(0,2);
      P.convertTo(aux, CV_64F);
    }
  }
  else if(P.cols == 3) // P is Nx3
  {
    if(P.type() == CV_64F)
      Q = P.t();
    else
    {
      P.convertTo(Q, CV_64F);
      Q = Q.t();
    } 
    
    cv::Mat aux = Q.row(0);
    cv::divide(aux, Q.row(2), aux);
    
    aux = Q.row(1);
    cv::divide(aux, Q.row(2), aux);
  }
  else if(P.cols == 2) // P is Nx2
  {
    const int N = P.rows;
    
    Q.create(N, 3, CV_64F);
    Q.col(2) = 1;
    cv::Mat aux;
    if(P.type() == CV_64F)
    {
      aux = Q.rowRange(0,2);
      P.rowRange(0,2).copyTo(aux);
    }
    else
    {
      aux = Q.colRange(0,2);
      P.convertTo(aux, CV_64F);
    }
    
    Q = Q.t();
  }  
}
Пример #15
0
bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
                       const cv::Mat& image0, const cv::Mat& _depth0, const cv::Mat& validMask0,
                       const cv::Mat& image1, const cv::Mat& _depth1, const cv::Mat& validMask1,
                       const cv::Mat& cameraMatrix, float minDepth, float maxDepth, float maxDepthDiff,
                       const std::vector<int>& iterCounts, const std::vector<float>& minGradientMagnitudes,
                       int transformType )
{
    const int sobelSize = 3;
    const double sobelScale = 1./8;

    Mat depth0 = _depth0.clone(),
        depth1 = _depth1.clone();

    // check RGB-D input data
    CV_Assert( !image0.empty() );
    CV_Assert( image0.type() == CV_8UC1 );
    CV_Assert( depth0.type() == CV_32FC1 && depth0.size() == image0.size() );

    CV_Assert( image1.size() == image0.size() );
    CV_Assert( image1.type() == CV_8UC1 );
    CV_Assert( depth1.type() == CV_32FC1 && depth1.size() == image0.size() );

    // check masks
    CV_Assert( validMask0.empty() || (validMask0.type() == CV_8UC1 && validMask0.size() == image0.size()) );
    CV_Assert( validMask1.empty() || (validMask1.type() == CV_8UC1 && validMask1.size() == image0.size()) );

    // check camera params
    CV_Assert( cameraMatrix.type() == CV_32FC1 && cameraMatrix.size() == Size(3,3) );

    // other checks
    CV_Assert( iterCounts.empty() || minGradientMagnitudes.empty() ||
               minGradientMagnitudes.size() == iterCounts.size() );
    CV_Assert( initRt.empty() || (initRt.type()==CV_64FC1 && initRt.size()==Size(4,4) ) );

    vector<int> defaultIterCounts;
    vector<float> defaultMinGradMagnitudes;
    vector<int> const* iterCountsPtr = &iterCounts;
    vector<float> const* minGradientMagnitudesPtr = &minGradientMagnitudes;

    if( iterCounts.empty() || minGradientMagnitudes.empty() )
    {
        defaultIterCounts.resize(4);
        defaultIterCounts[0] = 7;
        defaultIterCounts[1] = 7;
        defaultIterCounts[2] = 7;
        defaultIterCounts[3] = 10;

        defaultMinGradMagnitudes.resize(4);
        defaultMinGradMagnitudes[0] = 12;
        defaultMinGradMagnitudes[1] = 5;
        defaultMinGradMagnitudes[2] = 3;
        defaultMinGradMagnitudes[3] = 1;

        iterCountsPtr = &defaultIterCounts;
        minGradientMagnitudesPtr = &defaultMinGradMagnitudes;
    }

    preprocessDepth( depth0, depth1, validMask0, validMask1, minDepth, maxDepth );

    vector<Mat> pyramidImage0, pyramidDepth0,
                pyramidImage1, pyramidDepth1, pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1,
                pyramidCameraMatrix;
    buildPyramids( image0, image1, depth0, depth1, cameraMatrix, sobelSize, sobelScale, *minGradientMagnitudesPtr,
                   pyramidImage0, pyramidDepth0, pyramidImage1, pyramidDepth1,
                   pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1, pyramidCameraMatrix );

    Mat resultRt = initRt.empty() ? Mat::eye(4,4,CV_64FC1) : initRt.clone();
    Mat currRt, ksi;
    for( int level = (int)iterCountsPtr->size() - 1; level >= 0; level-- )
    {
        const Mat& levelCameraMatrix = pyramidCameraMatrix[level];

        const Mat& levelImage0 = pyramidImage0[level];
        const Mat& levelDepth0 = pyramidDepth0[level];
        Mat levelCloud0;
        cvtDepth2Cloud( pyramidDepth0[level], levelCloud0, levelCameraMatrix );

        const Mat& levelImage1 = pyramidImage1[level];
        const Mat& levelDepth1 = pyramidDepth1[level];
        const Mat& level_dI_dx1 = pyramid_dI_dx1[level];
        const Mat& level_dI_dy1 = pyramid_dI_dy1[level];

        CV_Assert( level_dI_dx1.type() == CV_16S );
        CV_Assert( level_dI_dy1.type() == CV_16S );

        const double fx = levelCameraMatrix.at<double>(0,0);
        const double fy = levelCameraMatrix.at<double>(1,1);
        const double determinantThreshold = 1e-6;

        Mat corresps( levelImage0.size(), levelImage0.type() );

        // Run transformation search on current level iteratively.
        for( int iter = 0; iter < (*iterCountsPtr)[level]; iter ++ )
        {
            int correspsCount = computeCorresp( levelCameraMatrix, levelCameraMatrix.inv(), resultRt.inv(DECOMP_SVD),
                                                levelDepth0, levelDepth1, pyramidTexturedMask1[level], maxDepthDiff,
                                                corresps );

            if( correspsCount == 0 )
                break;

            bool solutionExist = computeKsi( transformType,
                                             levelImage0, levelCloud0,
                                             levelImage1, level_dI_dx1, level_dI_dy1,
                                             corresps, correspsCount,
                                             fx, fy, sobelScale, determinantThreshold,
                                             ksi );

            if( !solutionExist )
                break;

            computeProjectiveMatrix( ksi, currRt );

            resultRt = currRt * resultRt;

#if SHOW_DEBUG_IMAGES
            std::cout << "currRt " << currRt << std::endl;
            Mat warpedImage0;
            const Mat distCoeff(1,5,CV_32FC1,Scalar(0));
            warpImage<uchar>( levelImage0, levelDepth0, resultRt, levelCameraMatrix, distCoeff, warpedImage0 );

            imshow( "im0", levelImage0 );
            imshow( "wim0", warpedImage0 );
            imshow( "im1", levelImage1 );
            waitKey();
#endif
        }
    }

    Rt = resultRt;

    return !Rt.empty();
}
Пример #16
0
/**
 * @author      JIA Pei, YAO Wei
 * @version     2010-05-20
 * @brief       Additive ASM ND Profiles Fitting, for static images, so that we record the whole fitting process
 * @param       iImg            Input - image to be fitted
 * @param       oImages         Output - the fitting process
 * @param       dim             Input - profile dimension, 1, 2, 4 or 8
 * @param       epoch           Input - the iteration epoch
 * @param       pyramidlevel    Input - pyramid level, 1, 2, 3 or 4 at most
 * @note        Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
float VO_FittingASMNDProfiles::VO_ASMNDProfileFitting(  const cv::Mat& iImg,
                                                        std::vector<cv::Mat>& oImages,
                                                        unsigned int epoch,
                                                        unsigned int pyramidlevel,
                                                        unsigned int dim,
                                                        bool record)
{
double t = (double)cv::getTickCount();

    this->m_iNbOfPyramidLevels = pyramidlevel;
    this->SetProcessingImage(iImg, this->m_VOASMNDProfile);
    this->m_iIteration = 0;

if(record)
{
    cv::Mat temp = iImg.clone();
    VO_Fitting2DSM::VO_DrawMesh(this->m_VOFittingShape, this->m_VOASMNDProfile, temp);
    oImages.push_back(temp);
}

    // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity
    this->m_VOASMNDProfile->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                    this->m_MatModelAlignedShapeParam,
                                                                    this->m_fScale,
                                                                    this->m_vRotateAngles,
                                                                    this->m_MatCenterOfGravity);
    this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);
if(record)
{
    cv::Mat temp1 = iImg.clone();
    VO_Fitting2DSM::VO_DrawMesh(this->m_VOFittingShape, this->m_VOASMNDProfile, temp1);
    oImages.push_back(temp1);
}

    // Explained by YAO Wei, 2008-2-9.
    // Scale this->m_VOFittingShape, so face width is a constant StdFaceWidth.
    //this->m_fScale2 = this->m_VOASMNDProfile->m_VOReferenceShape.GetWidth() / this->m_VOFittingShape.GetWidth();
    this->m_fScale2 = this->m_VOASMNDProfile->m_VOReferenceShape.GetCentralizedShapeSize() / this->m_VOFittingShape.GetCentralizedShapeSize();
    this->m_VOFittingShape *= this->m_fScale2;

    int w = (int)(iImg.cols*this->m_fScale2);
    int h = (int)(iImg.rows*this->m_fScale2);
    cv::Mat SearchImage = cv::Mat(cv::Size( w, h ), this->m_ImageProcessing.type(), this->m_ImageProcessing.channels() );

    float PyrScale = pow(2.0f, (float) (this->m_iNbOfPyramidLevels-1.0f) );
    this->m_VOFittingShape /= PyrScale;

    const int nQualifyingDisplacements = (int)(this->m_VOASMNDProfile->m_iNbOfPoints * VO_Fitting2DSM::pClose);

    // for each level in the image pyramid
    for (int iLev = this->m_iNbOfPyramidLevels-1; iLev >= 0; iLev--)
    {
        // Set image roi, instead of cvCreateImage a new image to speed up
        cv::Mat siROI = SearchImage(cv::Rect(0, 0, (int)(w/PyrScale), (int)(h/PyrScale) ) );
        cv::resize(this->m_ImageProcessing, siROI, siROI.size());

if(record)
{
        cv::Mat temp2 = cv::Mat(iImg.size(), iImg.type(), iImg.channels());
        cv::Mat temp2ROI = temp2(cv::Range (0, (int)(iImg.rows/PyrScale) ), cv::Range (0, (int)(iImg.cols/PyrScale) ) );
        cv::resize(iImg, temp2ROI, temp2ROI.size() );
        oImages.push_back(temp2);
        VO_Fitting2DSM::VO_DrawMesh(this->m_VOFittingShape / this->m_fScale2, this->m_VOASMNDProfile, temp2);
}

        this->m_VOEstimatedShape = this->m_VOFittingShape;
        this->PyramidFit(   this->m_VOEstimatedShape,
                            SearchImage,
                            oImages,
                            iLev,
                            VO_Fitting2DSM::pClose,
                            epoch,
                            dim,
                            record);
        this->m_VOFittingShape = this->m_VOEstimatedShape;

        if (iLev != 0)
        {
            PyrScale /= 2.0f;
            this->m_VOFittingShape *= 2.0f;
        }
    }

    // Explained by YAO Wei, 2008-02-09.
    // this->m_fScale2 back to original size
    this->m_VOFittingShape /= this->m_fScale2;

t = ((double)cv::getTickCount() -  t )/  (cv::getTickFrequency()*1000.);
printf("MRASM fitting time cost: %.2f millisec\n", t);

    return t;
}
Пример #17
0
//\fn void ExtrinsicParam::computeRtMatrix(double pan, double tilt, cv::Mat image);
///\brief This function computes the rotation matrix and the translation vector of the extrinsic parameters.
///\param pan Value of the camera panoramique when the image has been taken.
///\param tilt Value of the camera tilt when the image has been taken.
///\param image The image from which the rotation matrix and the translation vector should be computed.
void ExtrinsicParam::computeRtMatrix(double pan, double tilt, cv::Mat image)
{
  // Save the value of the pan and the tilt in order to modify the extrinsic parameters when the camera position change
  this->pan = pan;
  this->tilt = tilt;
  cv::Mat img = image.clone();
  cv::Mat initImg = image.clone();
  int cnt = 0;

  double fx = K(0,0), fy = K(1,1), cx = K(0,2), cy = K(1,2);
  double u, v, x, y;
  double dist_to_point;

  Eigen::MatrixXd Pr, Pi;
  Pr.resize(4,6);
  Pr.setZero();
  Pi.resize(3,6);
  Pi.setZero();

  pp.cnt = 0;

  // Create a window of the scene
  cv::namedWindow("Extrinsic Image",CV_WINDOW_AUTOSIZE);
  cvSetMouseCallback("Extrinsic Image",On_Mouse,0);
  cv::waitKey(10);
  cv::imshow("Extrinsic Image", img);
  cv::waitKey(10);

  // We need 6 points to compute the translation vector and the rotation matrix
  while (pp.cnt <= 6)
    {
      if (cnt > pp.cnt) // Case where we do a right click in order to remove the last point inserted
	{
	  switch (cnt)
	    {
	    case 1:
	      {
		img = image.clone();
		cnt = pp.cnt;
	      }
	      break;
	      
	    case 2:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 3:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 4:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[2],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,1.5,0)",pp.p1[2],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 5:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[2],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,1.5,0)",pp.p1[2],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[3],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,1)",pp.p1[3],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    case 6:
	      {
		img = image.clone();
		cv::circle(img,pp.p1[0],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,0)",pp.p1[0],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[1],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,0)",pp.p1[1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[2],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,1.5,0)",pp.p1[2],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[3],3,cv::Scalar(255,0,0));
		cv::putText(img,"(0,0,1)",pp.p1[3],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cv::circle(img,pp.p1[4],3,cv::Scalar(255,0,0));
		cv::putText(img,"(1.5,0,1)",pp.p1[4],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
		cnt = pp.cnt;
	      }
	      break;

	    default:
	      break;
	    }
	  cv::imshow("Extrinsic Image", img);
	  cv::waitKey(10);
	}
      if (pp.clicked) // Case where we do a left click in order to insert a point
	{
	  cv::circle(img,pp.p1[pp.cnt-1],3,cv::Scalar(255,0,0));
	  
	  if (pp.cnt == 1) // First point to insert (0,0,0) (on the mocap basis)
	    {
	      cv::putText(img,"(0,0,0)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      // Get the image coordinates
	      u = pp.p1[0].x;
	      v = pp.p1[0].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      // Get the distance between the camera and the 3d point (the scale s)
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_000_CAM1;
		}
	      else if (cam == 2)
		{
		  dist_to_point= DIST_TO_000_CAM2;
		}
	      Pi(0,0) = u*dist_to_point;
	      Pi(1,0) = v*dist_to_point;
	      Pi(2,0) = dist_to_point;
	      Pr(3,0) = 1.;

	    } else if (pp.cnt == 2) // Second point to insert (500mm,0,0) (on the mocap basis)
	    {
	      cv::putText(img,"(1.5,0,0)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[1].x;
	      v = pp.p1[1].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_0500_CAM1;
		}
	      if (cam == 2)
		{
		  dist_to_point = DIST_TO_0500_CAM2;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,1) = x;
	      Pi(1,1) = y;
	      Pi(2,1) = dist_to_point;
	      Pr(0,1) = 1500.;
	      Pr(3,1) = 1.;

	    } else if (pp.cnt == 3) // Third point to insert (0,500mm,0) (on the mocap basis)
	    {
	      cv::putText(img,"(0,1.5,0)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[2].x;
	      v = pp.p1[2].y;
	      Eigen::Vector3d undist;
	      undist(0) = x;
	      undist(1) = y;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_0050_CAM1;
		}
	      if (cam == 2)
		{
		  dist_to_point = DIST_TO_0050_CAM2;
		}
	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);
	      Pi(0,2) = x;
	      Pi(1,2) = y;
	      Pi(2,2) = dist_to_point;
	      Pr(1,2) = 1500.;
	      Pr(3,2) = 1.;

	    } else if (pp.cnt == 4) // Fourth point to insert (0,0,1000mm) (on the mocap basis)
	    {
	      cv::putText(img,"(0,0,1)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[3].x;
	      v = pp.p1[3].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = DIST_TO_001_CAM1;
		}
	      if (cam == 2)
		{
		  dist_to_point = DIST_TO_001_CAM2;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,3) = x;
	      Pi(1,3) = y;
	      Pi(2,3) = dist_to_point;
	      Pr(2,3) = 1000.;
	      Pr(3,3) = 1.;
	    }else if (pp.cnt == 5) // Fifth point to insert (1500mm,0,1000mm) (on the mocap basis)
	    {
	      cv::putText(img,"(1.5,0,1)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[4].x;
	      v = pp.p1[4].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = 4856.439;
		}
	      if (cam == 2)
		{
		  dist_to_point = 6333.64;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,4) = x;
	      Pi(1,4) = y;
	      Pi(2,4) = dist_to_point;
	      Pr(0,4) = 1500.;
	      Pr(2,4) = 1000.;
	      Pr(3,4) = 1.;
	    }else if (pp.cnt == 6) // sixth point to insert (0,1500mm,1000mm) (on the mocap basis)
	    {
	      cv::putText(img,"(0,1.5,1)",pp.p1[pp.cnt-1],CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,cv::Scalar(255,0,0));
	      u = pp.p1[5].x;
	      v = pp.p1[5].y;
	      Eigen::Vector3d undist;
	      undist(0) = u;
	      undist(1) = v;
	      undist(2) = 1.;
	      
	      if (cam == 1)
		{
		  dist_to_point = 5142.713;
		}
	      if (cam == 2)
		{
		  dist_to_point = 4389.19;
		}

	      x = (u)*(dist_to_point);
	      y = (v)*(dist_to_point);

	      Pi(0,5) = x;
	      Pi(1,5) = y;
	      Pi(2,5) = dist_to_point;
	      Pr(1,5) = 1500.;
	      Pr(2,5) = 1000.;
	      Pr(3,5) = 1.;
	    }
	  cnt = pp.cnt;
	  pp.clicked = false;
	}
      // Keep showing the image and the modification on it
      cv::imshow("Extrinsic Image", img);
      cv::waitKey(10);
    }

  // Compute the rotation matrix and the translation vector
  Eigen::MatrixXd Prinv, Rt;
  pseudoInverse(Pr,Prinv);
  Rt.noalias() = (K.inverse())*Pi*(Prinv);

  rotation(0,0) = Rt(0,0);rotation(0,1) = Rt(0,1);rotation(0,2) = Rt(0,2);
  rotation(1,0) = Rt(1,0);rotation(1,1) = Rt(1,1);rotation(1,2) = Rt(1,2);
  rotation(2,0) = Rt(2,0);rotation(2,1) = Rt(2,1);rotation(2,2) = Rt(2,2);
  translation(0) = Rt(0,3);
  translation(1) = Rt(1,3);
  translation(2) = Rt(2,3);

  // Save the values in files
  if (cam == 1)
    {
      if (!fprintMatrix(rotation, "rotation_cam1.txt"))
	std::cout << "Error writing in rotation_cam1.txt" << std::endl;
      if (!fprintMatrix(translation, "translation_cam1.txt"))
	std::cout << "Error writing in rotation_cam1.txt" << std::endl;
      if(!fprintPT(pan,tilt, "PT_cam1.txt"))
	std::cout << "Error writing file PT_cam1.txt" << std::endl;
    } else if (cam == 2)
    {
      if (!fprintMatrix(rotation, "rotation_cam2.txt"))
	std::cout << "Error writing in rotation_cam2.txt" << std::endl;
      if (!fprintMatrix(translation, "translation_cam2.txt"))
	std::cout << "Error writing in rotation_cam2.txt" << std::endl;
      if(!fprintPT(pan,tilt, "PT_cam2.txt"))
	std::cout << "Error writing file PT_cam2.txt" << std::endl;
    }

  initial_rotation.noalias() = rotation;
  initial_translation.noalias() = translation;
  rotation_computed = true;

  // Destroy the window
  cv::destroyWindow("Extrinsic Image");
}
bool PatternDetector::findPattern(const cv::Mat& image, PatternTrackingInfo& info)
{
	// Convert input image to gray
    getGray(image, m_grayImg);
	
	// Extract feature points from input gray image
    extractFeatures(m_grayImg, m_queryKeypoints, m_queryDescriptors);
	
	// Get matches with current pattern
    getMatches(m_queryDescriptors, m_matches);

#if _DEBUG
    cv::showAndSave("Raw matches", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif

#if _DEBUG
    cv::Mat tmp = image.clone();
#endif

	// Find homography transformation and detect good matches
    bool homographyFound = refineMatchesWithHomography(
        m_queryKeypoints, 
        m_pattern.keypoints, 
        homographyReprojectionThreshold, 
        m_matches, 
        m_roughHomography);

    if (homographyFound)
    {
#if _DEBUG
        cv::showAndSave("Refined matches using RANSAC", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif
		// If homography refinement enabled improve found transformation
        if (enableHomographyRefinement)
        {
			// Warp image using found homography
            cv::warpPerspective(m_grayImg, m_warpedImg, m_roughHomography, m_pattern.size, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
#if _DEBUG
            cv::showAndSave("Warped image",m_warpedImg);
#endif
            // Get refined matches:
            std::vector<cv::KeyPoint> warpedKeypoints;
            std::vector<cv::DMatch> refinedMatches;

			// Detect features on warped image
            extractFeatures(m_warpedImg, warpedKeypoints, m_queryDescriptors);

			// Match with pattern
            getMatches(m_queryDescriptors, refinedMatches);

			// Estimate new refinement homography
            homographyFound = refineMatchesWithHomography(
                warpedKeypoints, 
                m_pattern.keypoints, 
                homographyReprojectionThreshold, 
                refinedMatches, 
                m_refinedHomography);
#if _DEBUG
            cv::showAndSave("MatchesWithRefinedPose", getMatchesImage(m_warpedImg, m_pattern.grayImg, warpedKeypoints, m_pattern.keypoints, refinedMatches, 100));
#endif
			// Get a result homography as result of matrix product of refined and rough homographies:
            info.homography = m_roughHomography * m_refinedHomography;

            // Transform contour with rough homography
#if _DEBUG
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif

            // Transform contour with precise homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, info.homography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(200,0,0));
#endif
        }
        else
        {
            info.homography = m_roughHomography;

            // Transform contour with rough homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif
        }
    }

#if _DEBUG
    if (1)
    {
        cv::showAndSave("Final matches", getMatchesImage(tmp, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
    }
    std::cout << "Features:" << std::setw(4) << m_queryKeypoints.size() << " Matches: " << std::setw(4) << m_matches.size() << std::endl;
#endif

    return homographyFound;
}
Пример #19
0
    void reset_output() const {
#ifdef REAK_HAS_OPENCV
      world_map_output = world_map_image.clone();
#endif
    };
bool Stabilizer::init( const cv::Mat& frame)
{
    prevFrame = frame.clone();
	return true;
}
Пример #21
0
bool Stabilizer::track(const cv::Mat& frame)
{
    cv::Mat previous_frame_gray;
    cv::cvtColor(prevFrame, previous_frame_gray, cv::COLOR_BGR2GRAY);
  
    size_t n = previousFeatures.size();
    CV_Assert(n);

    if (flagUpdateFeatures) {
        cv::goodFeaturesToTrack(previous_frame_gray, previousFeatures, 500, 0.01, 5);
        flagUpdateFeatures = false;
    }
    
    // Compute optical flow in selected points.
    std::vector<cv::Point2f> currentFeatures;
    std::vector<uchar> state;
    std::vector<float> error;

    cv::calcOpticalFlowPyrLK(prevFrame, frame, previousFeatures, currentFeatures, state, error);

    float median_error = median<float>(error);

    std::vector<cv::Point2f> good_points;
    std::vector<cv::Point2f> curr_points;
    for (size_t i = 0; i < n; ++i)
    {
        if (state[i] && (error[i] <= median_error))
        {
            good_points.push_back(previousFeatures[i]);
            curr_points.push_back(currentFeatures[i]);
        }
    }

    size_t s = good_points.size();
    CV_Assert(s == curr_points.size());

    // Find points shift.
    std::vector<float> shifts_x(s);
    std::vector<float> shifts_y(s);
    
    for (size_t i = 0; i < s; ++i)
    {
        shifts_x[i] = curr_points[i].x - good_points[i].x;
        shifts_y[i] = curr_points[i].y - good_points[i].y;
    }
    
    std::sort(shifts_x.begin(), shifts_x.end());
    std::sort(shifts_y.begin(), shifts_y.end());

    // Find median shift.
    cv::Point2f median_shift(shifts_x[s / 2], shifts_y[s / 2]);
    xshift.push_back(median_shift.x);
    yshift.push_back(median_shift.y);

    if (s < 100) {
        previousFeatures.clear();
        previousFeatures = currentFeatures;
        flagUpdateFeatures = true;
    }

    prevFrame = frame.clone(); 

    return true;
}
Пример #22
0
// SETTERS #############################################################
void Sakbot::setImage( cv::Mat current )
{
    m_image = current.clone();
}
Пример #23
0
 void drawImageWithLock(cv::Mat &disp)
 {
   disp = color.clone();
 }
Пример #24
0
bool ddbtc::compress(cv::Mat &src,cv::Mat &dst,short BlockSize){

	if(BlockSize!=8&&BlockSize!=16){	// current version supports only 8 and 16
		return false;
	}
	if(src.type()!=CV_8U){	// should be grayscale image
		return false;
	}

	// = = = = = pre-defined data = = = = = //
	int	&imgWidth=src.cols,&imgHeight=src.rows;
	short			CM_Size=BlockSize;	// class matrix size
	const	short	DM_Size=3;			// diffused weighting size
	const	short	CM8[8][8]={	{42,	47,	46,	45,	16,	13,	11,	2},	
								{61,	57,	53,	8,	27,	22,	9,	50},	
								{63,	58,	0,	15,	26,	31,	40,	30},	
								{10,	4,	17,	21,	3,	44,	18,	6},	
								{14,	24,	25,	7,	5,	48,	52,	39},	
								{20,	28,	23,	32,	38,	51,	54,	60},
								{19,	33,	36,	37,	49,	43,	56,	55},	
								{12,	62,	29,	35,	1,	59,	41,	34}};
	const	float	DW8[3][3]={	{0.271630,	1.000000,	0.271630},	
								{1.000000,	0.000000,	1.000000},	
								{0.271630,	1.000000,	0.271630}};
	const	short	CM16[16][16]={	{6,	7	,20		,10		,53		,55		,66	,	87 ,	137,	142,	143,	144,	172,	122,	175,	164},	
								{3,	9	,23		,50		,60		,51		,65	,	74,		130,	145,	138,	148,	179,	180,	214,	221},	
								{0,	14	,24		,37		,67		,79		,96	,	116,	39,		149,	162,	198,	12,		146,	224,	1},	
								{15,26	,43		,28		,71		,54		,128,	112,	78,		159,	177,	201,	208,	223,	225,	242},	
								{22,4	,48		,32		,94		,98		,80	,	135,	157,	173,	113,	182,	222,	226,	227,	16},	
								{40,85	,72		,83		,104	,117	,163,	133,	168,	184,	200,	219,	244,	237,	183,	21},	
								{47,120	,101	,105	,123	,132	,170,	176,	190,	202,	220,	230,	245,	235,	17,	41},
								{76,73	,127	,109	,97		,134	,178,	181,	206,	196,	229,	231,	246,	19,		42,	49},	
								{103,99	,131	,147	,169	,171	,166,	203,	218,	232,	243,	248,	247,	33,		52,	68},	
								{108,107,	140	,102	,185	,167	,204,	217,	233,	106,	249,	255,	44,		45,		70,	69},	
								{110,141,	88	,75		,192	,205	,195,	234,	241,	250,	254,	38,		46,		77,		5,	100},	
								{111,158,	160	,174	,119	,215	,207,	240,	251,	252,	253,	61,		62,		93,		84,	125},	
								{151,136,	189	,199	,197	,216	,236,	239,	25,		31,		56,		82,		92,		95,		124,	114},	
								{156,188,	191	,209	,213	,228	,238,	29,		36,		59,		64,		91,		118,	139,	115,	155},	
								{187,194,	165	,212	,2		,13		,30	,	35,		58,		63,		90,		86,		152,	129,	154,	161},	
								{193,210,	211	,8		,11		,27		,34	,	57,		18,		89,		81,		121,	126,	153,	150,	186}};
	const	float	DW16[3][3]={{0.305032,	1.000000,	0.305032},	
								{1.000000,	0.000000,	1.000000},	
								{0.305032,	1.000000,	0.305032}};

	// = = = = = give cm and dw data = = = = = //
	std::vector<std::vector<short>>	CM(CM_Size,std::vector<short>(CM_Size,0));
	std::vector<std::vector<float>>	DM(DM_Size,std::vector<float>(DM_Size,0));
	if(BlockSize==8){
		for(int i=0;i<CM_Size;i++){
			for(int j=0;j<CM_Size;j++){
				CM[i][j]=CM8[i][j];
			}
		}
		for(int i=0;i<DM_Size;i++){
			for(int j=0;j<DM_Size;j++){
				DM[i][j]=DW8[i][j];
			}
		}
	}else if(BlockSize==16){
		for(int i=0;i<CM_Size;i++){
			for(int j=0;j<CM_Size;j++){
				CM[i][j]=CM16[i][j];
			}
		}
		for(int i=0;i<DM_Size;i++){
			for(int j=0;j<DM_Size;j++){
				DM[i][j]=DW16[i][j];
			}
		}
	}

	// = = = = = create Temp space = = = = = //
	std::vector<std::vector<float>>	Tempmap(imgHeight,std::vector<float>(imgWidth,0));
	for(int i=0;i<imgHeight;i++){
		for(int j=0;j<imgWidth;j++){
			Tempmap[i][j]=src.data[i*src.cols+j];
		}
	}

	// = = = = = get processing positions = = = = = //
	std::vector<std::vector<short>>	ProPo(CM_Size*CM_Size,std::vector<short>(2,0));
	for(int m=0;m<CM_Size;m++){
		for(int n=0;n<CM_Size;n++){
			ProPo[CM[m][n]][0]=m;
			ProPo[CM[m][n]][1]=n;
		}
	}

	// = = = = = get bitmap = = = = = //
	std::vector<std::vector<char>>	DoMap(imgHeight,std::vector<char>(imgWidth,false));	// it is originally the bool, not the current char, because that the process of vector<bool> is "very" slow. 



	//////////////////////////////////////////////////////////////////////////
	// initialization
	cv::Mat	tdst;	// temp dst
	tdst=src.clone();
	// = = = = = process = = = = = //
	for(int i=0;i<imgHeight;i+=CM_Size){
		for(int j=0;j<imgWidth;j+=CM_Size){

			//////////////////////////////////////////////////////////////////////////
			// calculate the local mean, maxv and minv or original image's block
			float	mean=0.;
			uchar	maxv=tdst.data[i*tdst.cols+j],
					minv=tdst.data[i*tdst.cols+j];
			short	count_mean=0;
			for(int m=0;m<CM_Size;m++){
				for(int n=0;n<CM_Size;n++){
					if(i+m>=0&&i+m<imgHeight&&j+n>=0&&j+n<imgWidth){
						count_mean++;
						mean+=(float)tdst.data[(i+m)*tdst.cols+(j+n)];
						if(tdst.data[(i+m)*tdst.cols+(j+n)]>maxv){
							maxv=tdst.data[(i+m)*tdst.cols+(j+n)];
						}
						if(tdst.data[(i+m)*tdst.cols+(j+n)]<minv){
							minv=tdst.data[(i+m)*tdst.cols+(j+n)];
						}
					}					
				}
			}
			mean/=(float)count_mean;

			//////////////////////////////////////////////////////////////////////////
			// = = = = = diffusion = = = = = //
			short memberIndex=0;
			while(memberIndex!=CM_Size*CM_Size){

				// to decide whether the prospective coordinate is out of scope or not
				int		ni=i+ProPo[memberIndex][0],nj=j+ProPo[memberIndex][1];
				if(ni>=0&&ni<imgHeight&&nj>=0&&nj<imgWidth){
					// = = = = = dot diffusion = = = = = //
					// get error	// for maintain the dot diffusion structure, here have to take DifMap into account				
					float	error;
					if(Tempmap[ni][nj]<mean){														// y = max or min (determined by the bitmap)
						tdst.data[ni*tdst.cols+nj]=minv;
					}else{
						tdst.data[ni*tdst.cols+nj]=maxv;
					}
					error=Tempmap[ni][nj]-(float)tdst.data[ni*tdst.cols+nj];						// e = v - y
					DoMap[ni][nj]=true;

					// get fm
					double	fm=0;		
					short	hDM_Size=DM_Size/2;
					for(int m=-hDM_Size;m<=hDM_Size;m++){
						for(int n=-hDM_Size;n<=hDM_Size;n++){
							if(ni+m>=0&&ni+m<imgHeight&&nj+n>=0&&nj+n<imgWidth){
								if(DoMap[ni+m][nj+n]==false){
									fm+=DM[m+hDM_Size][n+hDM_Size];
								}
							}
						}
					}

					// diffusing
					for(int m=-hDM_Size;m<=hDM_Size;m++){
						for(int n=-hDM_Size;n<=hDM_Size;n++){
							if(ni+m>=0&&ni+m<imgHeight&&nj+n>=0&&nj+n<imgWidth){
								if(DoMap[ni+m][nj+n]==false){
									Tempmap[ni+m][nj+n]+=error	*DM[m+hDM_Size][n+hDM_Size]/fm;		// v = x + e*weight
								}
							}
						}
					}
				}				
				memberIndex++;
			}
		}
	}

	dst	=	tdst.clone();
	return true;
}
Пример #25
0
void LayoutTest::testLayout(const cv::Mat & src) const {

	// TODOS
	// - line spacing needs smoothing -> graphcut
	// - DBScan is very sensitive to the line spacing
	
	// Workflow:
	// - implement noise/text etc classification on SuperPixel level
	// - smooth labels using graphcut
	// - perform everything else without noise pixels
	// Training:
	// - open mode (whole image only contains e.g. machine printed)
	// - baseline mode -> overlap with superpixel

	cv::Mat img = src.clone();
	//cv::resize(src, img, cv::Size(), 0.25, 0.25, CV_INTER_AREA);

	Timer dt;

	// find super pixels
	rdf::SuperPixel superPixel(img);
	
	if (!superPixel.compute())
		qWarning() << "could not compute super pixel!";

	QVector<QSharedPointer<Pixel> > sp = superPixel.getSuperPixels();

	// find local orientation per pixel
	rdf::LocalOrientation lo(sp);
	if (!lo.compute())
		qWarning() << "could not compute local orientation";

	// smooth estimation
	rdf::GraphCutOrientation pse(sp);
	
	if (!pse.compute())
		qWarning() << "could not compute set orientation";
	
	// pixel labeling
	QSharedPointer<SuperPixelModel> model = SuperPixelModel::read(mConfig.classifierPath());
	//FeatureCollectionManager fcm = FeatureCollectionManager::read(mConfig.featureCachePath());

	//// train classifier
	//SuperPixelTrainer spt(fcm);

	//if (!spt.compute())
	//	qCritical() << "could not train data...";

	//auto model = spt.model();

	SuperPixelClassifier spc(src, sp);
	spc.setModel(model);

	if (!spc.compute())
		qWarning() << "could not classify SuperPixels";

	//// find tab stops
	//rdf::TabStopAnalysis tabStops(sp);
	//if (!tabStops.compute())
	//	qWarning() << "could not compute text block segmentation!";

	//// find text lines
	//rdf::TextLineSegmentation textLines(sp);
	//textLines.addLines(tabStops.tabStopLines(30));	// TODO: fix parameter
	//if (!textLines.compute())
	//	qWarning() << "could not compute text block segmentation!";

	qInfo() << "algorithm computation time" << dt;

	// drawing
	//cv::Mat rImg(img.rows, img.cols, CV_8UC1, cv::Scalar::all(150));
	cv::Mat rImg = img.clone();

	//// draw edges
	//rImg = textBlocks.draw(rImg);
	//rImg = lo.draw(rImg, "1012", 256);
	//rImg = lo.draw(rImg, "507", 128);
	//rImg = lo.draw(rImg, "507", 64);

	//// save super pixel image
	//rImg = superPixel.drawSuperPixels(rImg);
	//rImg = tabStops.draw(rImg);
	//rImg = textLines.draw(rImg);
	rImg = spc.draw(rImg);
	QString maskPath = rdf::Utils::instance().createFilePath(mConfig.outputPath(), "-classified");
	rdf::Image::save(rImg, maskPath);
	qDebug() << "debug image added" << maskPath;


	//// write XML -----------------------------------
	//QString loadXmlPath = rdf::PageXmlParser::imagePathToXmlPath(mConfig.imagePath());

	//rdf::PageXmlParser parser;
	//parser.read(loadXmlPath);
	//auto pe = parser.page();
	//pe->setCreator(QString("CVL"));
	//pe->setImageSize(QSize(img.rows, img.cols));
	//pe->setImageFileName(QFileInfo(mConfig.imagePath()).fileName());

	//// start writing content
	//auto ps = PixelSet::fromEdges(PixelSet::connect(sp, Rect(0, 0, img.cols, img.rows)));

	//if (!ps.empty()) {
	//	QSharedPointer<Region> textRegion = QSharedPointer<Region>(new Region());
	//	textRegion->setType(Region::type_text_region);
	//	textRegion->setPolygon(ps[0]->convexHull());
	//	
	//	for (auto tl : textLines.textLines()) {
	//		textRegion->addUniqueChild(tl);
	//	}

	//	pe->rootRegion()->addUniqueChild(textRegion);
	//}

	//parser.write(mConfig.xmlPath(), pe);
	//qDebug() << "results written to" << mConfig.xmlPath();

}
Пример #26
0
bool ShapeDiscriptor::discribeImage(cv::Mat &image)
{
    std::vector< std::vector<cv::Point> > contours;
    cv::Mat timage = image.clone();
    printf("rows : %d\n", image.rows);
    findContours(timage, contours, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
    for(int contourIdx = 0; contourIdx < contours.size(); contourIdx++)
    {

        cv::Moments moms = moments(cv::Mat(contours[contourIdx]));
        if(doFilterArea)
        {
            double area = moms.m00;
            if (area < minArea || area > maxArea)
            //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);
            continue;
        }
        
        if(doFilterCircularity)
        {
          double area = moms.m00;
          double perimeter = cv::arcLength(cv::Mat(contours[contourIdx]), true);
          double ratio = 4 * CV_PI * area / (perimeter * perimeter);
          if (ratio < minCircularity)
            //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);
            continue;
        }

        if(doFilterInertia)
        {
          double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
          const double eps = 1e-2;
          double ratio;
          if (denominator > eps)
          {
            double cosmin = (moms.mu20 - moms.mu02) / denominator;
            double sinmin = 2 * moms.mu11 / denominator;
            double cosmax = -cosmin;
            double sinmax = -sinmin;

            double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
            double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
            ratio = imin / imax;
          }
          else
          {
            ratio = 1;
          }
          //p.inErtia = ratio;
          if (ratio < minInertia)
             //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);
             
            continue;
        }

        if(doFilterConvexity)
        {
          cv::vector < cv::Point > hull;
          convexHull(cv::Mat(contours[contourIdx]), hull);
          double area = cv::contourArea(cv::Mat(contours[contourIdx]));
          double hullArea = cv::contourArea(cv::Mat(hull));
          double ratio = area / hullArea;
          //p.convexity = ratio;
          if (ratio < minConvexity)
            //drawContours(image, contours, contourIdx, cv::Scalar(0,0,0), CV_FILLED);

            continue;
        }

        timage.release();
        return true;
    }
    timage.release();
    return false;
} 
Пример #27
0
void ObjectDetectorProvider::processRequest(
	const re_vision::SearchFor::Request  &req,
	const cv::Mat &image,
	re_vision::SearchFor::Response &res)
{
  ROS_DEBUG("Processing request...");

  cv::Mat _image = image.clone(); // ###

  ros::WallTime t_begin = ros::WallTime::now();

  vector<string> valid_objects;
  vector<re_msgs::DetectedObject *> pointers;
  getValidObjects(req.Objects, valid_objects, res.Detections, pointers);

  detectObjects(valid_objects, image, req.MaxPointsPerObject, pointers);
  rectifyDetections(res.Detections, image.cols, image.rows,
    req.MaxPointsPerObject);

#if 0
  if(!res.Detections.empty() && !res.Detections[0].points2d.empty())
  {
    vector<cv::Point3f> points3d;
    for(unsigned int i = 0; i < res.Detections[0].points3d.size(); ++i)
    {
      points3d.push_back(cv::Point3f(
        -res.Detections[0].points3d[i].y,
        -res.Detections[0].points3d[i].z,
        res.Detections[0].points3d[i].x));
    }

    cv::Mat cP(points3d);
    cv::Mat R = cv::Mat::eye(3,3,CV_64F);
    cv::Mat t = cv::Mat::zeros(3,1,CV_64F);

    vector<cv::Point2f> cam_pixels;

    cv::projectPoints(cP, R, t, m_camera.GetIntrinsicParameters(),
      cv::Mat::zeros(4,1,CV_32F), cam_pixels);

    CvScalar color = cvScalar(255, 255, 255);
    for(unsigned int i = 0; i < cam_pixels.size(); ++i)
    {
      cv::circle(_image, cvPoint(cam_pixels[i].x, cam_pixels[i].y),
        2, color, 1);
    }

    cv::imwrite("_debug.png", _image);

  }
#endif

  ros::WallTime t_end = ros::WallTime::now();

  {
    int counter = 0;
    vector<re_msgs::DetectedObject>::const_iterator dit;
    for(dit = res.Detections.begin(); dit != res.Detections.end(); ++dit)
    {
      if(!dit->points3d.empty()) counter++;
    }
    ROS_INFO("%d objects detected", counter);
  }

  ros::WallDuration d = t_end - t_begin;
  ROS_DEBUG("Processing ok. Elapsed time: %f", d.toSec());
}
Пример #28
0
vavImage::vavImage(const cv::Mat& im): m_pTextureDraw(0), m_pShaderResView(0)
{
	m_Image = im.clone();
}
Пример #29
0
cv::Mat Local_Scharr(cv::Mat input_img,int sub_images,bool using_histogram_percentile,int histogram_percentile,bool using_threshold,int threshold, bool dx_checked,bool dy_checked,bool Otsu,double x_weight, double y_weight)
{
    cv::Mat output_img;
    if((input_img.channels()==1) && (!input_img.empty()))
    {
        int colsize = input_img.cols;
        int rowsize = input_img.rows;

        cv::Mat sub_image,sub_image2,edge_img;
        cv::Mat seg_dx,seg_dy;
        cv::Mat abs_seg_x,abs_seg_y;

         edge_img = input_img.clone();
         if(!(dx_checked & dy_checked))
         {
             edge_img.convertTo(edge_img,CV_32F);
             sub_image.create(rowsize/sub_images,colsize/sub_images,CV_32F);
             sub_image2.create(rowsize/sub_images,colsize/sub_images,CV_32F);
         }
         else
         {
             sub_image.create(rowsize/sub_images,colsize/sub_images,CV_8U);
             sub_image2.create(rowsize/sub_images,colsize/sub_images,CV_8U);
         }


         int c,d,e,f;
         for(int a = 0;a<sub_images;a++)
         {
             if(a == 0)
             {
                 c=0;
                 d = floor(1.0*rowsize/sub_images)-1;
             }else
             {
                 c = d+1;
                 d = d+floor(1.0*rowsize/sub_images);
             }

             for(int b = 0;b<sub_images;b++)
             {
                 if(b == 0)
                 {
                     e=0;
                     f = floor(1.0*colsize/sub_images)-1;
                 }else
                 {
                     e = f+1;
                     f = f+floor(1.0*colsize/sub_images);
                 }

                 sub_image = edge_img(cv::Rect(e,c,sub_image.cols,sub_image.rows));
                 sub_image.copyTo(sub_image2);

                 if(dx_checked && dy_checked)
                 {

                     cv::Sobel(sub_image2,seg_dx,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                     cv::Sobel(sub_image2,seg_dy,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                     cv::convertScaleAbs(seg_dx,abs_seg_x);
                     cv::convertScaleAbs(seg_dy,abs_seg_y);
                     cv::addWeighted(abs_seg_x,x_weight,abs_seg_y,y_weight,0,sub_image);//not perfect, can adjust dx and dy
                 }
                 else if(dx_checked)
                 {
                    cv::Sobel(sub_image2,sub_image,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                 }
                 else if(dy_checked)
                 {
                     cv::Sobel(sub_image2,sub_image,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                 }

             }
         }

         if(((rowsize-1)-d)>0)//adds missing rows
          {
              sub_image.create((rowsize-1)-d,colsize/sub_images,CV_8U);
              sub_image2.create((rowsize-1)-d,colsize/sub_images,CV_8U);
              c = d+1;
              for(int b = 0;b<sub_images;b++)
              {
                  if(b == 0)
                  {
                      e=0;
                      f = floor(1.0*colsize/sub_images)-1;
                  }else
                  {
                      e = f+1;
                      f = f+floor(1.0*colsize/sub_images);
                  }
                  sub_image = edge_img(cv::Rect(e,c,sub_image.cols,sub_image.rows));
                  sub_image.copyTo(sub_image2);

                  if(dx_checked && dy_checked)
                  {

                      cv::Sobel(sub_image2,seg_dx,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                      cv::Sobel(sub_image2,seg_dy,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                      cv::convertScaleAbs(seg_dx,abs_seg_x);
                      cv::convertScaleAbs(seg_dy,abs_seg_y);
                      cv::addWeighted(abs_seg_x,x_weight,abs_seg_y,y_weight,0,sub_image);//not perfect, can adjust dx and dy
                  }
                  else if(dx_checked)
                  {
                     cv::Sobel(sub_image2,sub_image,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                  }
                  else if(dy_checked)
                  {
                      cv::Sobel(sub_image2,sub_image,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                  }

              }

          }
          if(((colsize-1)-d)>0)//adds missing coloumns
          {
              sub_image.create(rowsize/sub_images,(colsize-1)-f,CV_8U);
              sub_image2.create(rowsize/sub_images,(colsize-1)-f,CV_8U);

              e = f+1;
              for(int a = 0;a<sub_images;a++)
              {
                  if(a == 0)
                  {
                      c=0;
                      d = floor(1.0*rowsize/sub_images)-1;
                  }else
                  {
                      c = d+1;
                      d = d+floor(1.0*rowsize/sub_images);
                  }

                  sub_image = edge_img(cv::Rect(e,c,sub_image.cols,sub_image.rows));
                  sub_image.copyTo(sub_image2);

                  if(dx_checked && dy_checked)
                  {

                      cv::Sobel(sub_image2,seg_dx,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                      cv::Sobel(sub_image2,seg_dy,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                      cv::convertScaleAbs(seg_dx,abs_seg_x);
                      cv::convertScaleAbs(seg_dy,abs_seg_y);
                      cv::addWeighted(abs_seg_x,x_weight,abs_seg_y,y_weight,0,sub_image);//not perfect, can adjust dx and dy
                  }
                  else if(dx_checked)
                  {
                     cv::Sobel(sub_image2,sub_image,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                  }
                  else if(dy_checked)
                  {
                      cv::Sobel(sub_image2,sub_image,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                  }
              }

          }
          if((((colsize-1)-d)>0) && (((rowsize-1)-d)>0))
          {
              sub_image.create((rowsize-1)-d,(colsize-1)-f,CV_8U);
              sub_image2.create((rowsize-1)-d,colsize-1-f,CV_8U);
              c = d+1;
              e = f+1;
              sub_image = edge_img(cv::Rect(e,c,sub_image.cols,sub_image.rows));
              sub_image.copyTo(sub_image2);

              if(dx_checked && dy_checked)
              {

                  cv::Sobel(sub_image2,seg_dx,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                  cv::Sobel(sub_image2,seg_dy,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
                  cv::convertScaleAbs(seg_dx,abs_seg_x);
                  cv::convertScaleAbs(seg_dy,abs_seg_y);
                  cv::addWeighted(abs_seg_x,x_weight,abs_seg_y,y_weight,0,sub_image);//not perfect, can adjust dx and dy
              }
              else if(dx_checked)
              {
                 cv::Sobel(sub_image2,sub_image,CV_32F,1,0,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
              }
              else if(dy_checked)
              {
                  cv::Sobel(sub_image2,sub_image,CV_32F,0,1,CV_SCHARR,1,0,cv::BORDER_REPLICATE);
              }
          }


         if(!(dx_checked & dy_checked))
         {
             edge_img.convertTo(edge_img,CV_8U);
         }

         if(Otsu)
         {
             output_img = Histogram_seg(histogram_percentile,edge_img,input_img);
         }
         else if(using_histogram_percentile)
         {
             output_img = Histogram_seg(histogram_percentile,edge_img);
         }
         else if(using_threshold)
         {
             int num_pix = edge_img.rows*edge_img.cols;
             edge_img.copyTo(output_img);
             output_img.create(edge_img.rows,edge_img.cols,edge_img.type());

             uchar* data = output_img.ptr<uchar>(0);
             uchar* old_data = edge_img.ptr<uchar>(0);
             for(int i = 0; i<num_pix;i++)
             {
                 if(old_data[i]>threshold)
                 {
                     data[i]=255;
                 }
                 else
                 {
                     data[i] = 0;
                 }


             }

         }
    }

    return output_img;
}
// Calculates the Gaussian Blur and stores the result on the height map given
cv::Mat GaussianBlur(cv::Mat img,int gaussian_kernel_size,float segma)
{

    if(gaussian_kernel_size%2 == 0)
    {
        cout << "Use odd size for gaussian kernel ";
        return img;
    }

    if(img.cols!=img.rows){
        cout << "Use Image with same width and height";
        return img;
    }


    if(img.cols%2 != 0){
        cout << "Use Image with size 2 power n";
        return img;
    }

    // init the 1D gaussian Kernel using open CV
    cv::Mat gussian_kernal=cv::getGaussianKernel(gaussian_kernel_size,segma);
    print_var("g_ker",gussian_kernal);

    //init the variables
    cv::Mat blur_img_x=img.clone();
    cv::Mat final_blur_img=img.clone();

    int img_size = img.rows;
    int kernal_radius=int((gaussian_kernel_size-1)/2);

    int start_pixel_x=kernal_radius;
    int start_pixel_y=kernal_radius;

    int end_pixel_x=  img_size - kernal_radius;
    int end_pixel_y=  img_size - kernal_radius;

    double temp_sum_res=0;
    int temp_kernal_shift=0;
    int temp_pixcel_val=0;
    double temp_gussian_val=0;

    //apply 1D filter on  x direction of the image
    for (int y=start_pixel_y;y<end_pixel_y;y++)
    {
          for(int x=start_pixel_x;x<end_pixel_x;x++)
          {
              temp_sum_res=0;
              temp_kernal_shift=0;
              for (int kernal_shift=x-kernal_radius;kernal_shift< x+kernal_radius+1;kernal_shift++)
              {
                  temp_pixcel_val=img.at<uchar>(kernal_shift,y);
                  temp_gussian_val=gussian_kernal.at<double>(temp_kernal_shift,0);

                  temp_sum_res+=temp_pixcel_val*temp_gussian_val;
                  temp_kernal_shift++;
              }
              blur_img_x.at<uchar>(x,y)=temp_sum_res;

          }
    }

    //apply 1D filter on the the result blurred_x image on Y direction
    for (int y=start_pixel_y;y<end_pixel_y;y++)
    {
        for(int x=start_pixel_x;x<end_pixel_x;x++)
        {
              temp_sum_res=0;
              temp_kernal_shift=0;
              for (int kernal_shift=y-kernal_radius;kernal_shift< y+kernal_radius+1;kernal_shift++)
              {
                  temp_pixcel_val=blur_img_x.at<uchar>(x,kernal_shift);
                  temp_gussian_val=gussian_kernal.at<double>(temp_kernal_shift,0);

                  temp_sum_res+=temp_pixcel_val*temp_gussian_val;
                  temp_kernal_shift++;
              }
              final_blur_img.at<uchar>(x,y)=temp_sum_res;
        }
    }
    return final_blur_img;

}