Esempio n. 1
0
// The image is seperated into two parts, each part's length maximum loc+1;
// x is the seperation line, both two part have x;
float ReidDescriptor::sym_div(int x, cv::Mat img, cv::Mat MSK, int loc, float alpha)
{
	int H = img.rows;
	int W = img.cols;
	int chs = img.channels();

	cv::Mat imgL = img.colRange(0, x + 1); // [0,x]
	cv::Mat imgR = img.colRange(x, img.cols);
	cv::Mat MSK_L = MSK.colRange(0, x + 1);
	cv::Mat MSK_R = MSK.colRange(x, MSK.cols);

	int dimLoc = min(min(x + 1, MSK_R.cols), loc + 1);

	if (dimLoc != 0)
	{
		cv::Mat imgLloc = img.colRange(x - dimLoc + 1, x + 1);//[x-dimLoc+1,x]
		cv::Mat imgRloc;
		cv::flip(imgR.colRange(0, dimLoc), imgRloc, 1);
		cv::Mat temp;
		cv::pow(imgLloc - imgRloc, 2, temp);
		float ans = alpha * sqrt(cv::sum(temp.reshape(1))[0]) / dimLoc +
					(1 - alpha) * (abs(sum(MSK_R)[0] - sum(MSK_L)[0])) / max(MSK_R.rows * MSK_R.cols, MSK_L.rows * MSK_L.cols);

		return ans;
	}
	else
	{
		return 1;
	}
}
Esempio n. 2
0
void RandomFerns::get_linepoint(cv::Mat img,cv::Mat init_pose,std::vector<l1l2f1> line_pids, cv::Mat& feats){
	int cs = init_pose.cols;
	int w = img.cols;
	int h = img.rows;
	assert(cs==posdim);
	assert(line_pids.size()==num_fea_locs);
	feats = cv::Mat(num_fea_locs,1,CV_32FC1);
	cv::Mat xs = init_pose.colRange(0,cs/2).clone();
	cv::Mat ys = init_pose.colRange(cs/2,cs).clone();
	float mx = cv::mean(xs).val[0];
	float my = cv::mean(ys).val[0];
	xs -= mx;
	ys -= my;
	for (int k =0; k < line_pids.size(); k++){
		float p1x = xs.at<float>(0,line_pids[k].id1-1);
		float p1y = ys.at<float>(0,line_pids[k].id1-1);
		float p2x = xs.at<float>(0,line_pids[k].id2-1);
		float p2y = ys.at<float>(0,line_pids[k].id2-1);
		float a = (p2y-p1y)/(p2x-p1x);
		float b = p1y - a*p1x;
		float distx = (p2x-p1x)/2;
		float ctrx = p1x + distx;
		float cs1 = ctrx+line_pids[k].t1*distx;
		float rs1 = a*cs1+b;
		int cs = (int)(cs1+mx);
		int rs = (int)(rs1+my);
//		std::cout<< rs  << " " << cs << std::endl;
		feats.at<float>(k,0) = img.at<float>(max(0,min(h-1,rs)),max(0,min(w-1,cs)));
//		std::cout << feats.at<float>(k,0) << std::endl;
	}

}
	void data_transformer_util::rotate_band(
		cv::Mat image,
		int shift_x_to_left,
		int shift_y_to_top)
	{
		int actual_shift_x = (shift_x_to_left % image.cols);
		if (actual_shift_x < 0)
			actual_shift_x += image.cols;
		int actual_shift_y = (shift_y_to_top % image.rows);
		if (actual_shift_y < 0)
			actual_shift_y += image.rows;
		if ((actual_shift_x == 0) && (actual_shift_y == 0))
			return;

		cv::Mat cloned_image = image.clone();

		if (actual_shift_y == 0)
		{
			cloned_image.colRange(actual_shift_x, image.cols).copyTo(image.colRange(0, image.cols - actual_shift_x));
			cloned_image.colRange(0, actual_shift_x).copyTo(image.colRange(image.cols - actual_shift_x, image.cols));
		}
		else if (actual_shift_x == 0)
		{
			cloned_image.rowRange(actual_shift_y, image.rows).copyTo(image.rowRange(0, image.rows - actual_shift_y));
			cloned_image.rowRange(0, actual_shift_y).copyTo(image.rowRange(image.rows - actual_shift_y, image.rows));
		}
		else
		{
			cloned_image.colRange(actual_shift_x, image.cols).rowRange(actual_shift_y, image.rows).copyTo(image.colRange(0, image.cols - actual_shift_x).rowRange(0, image.rows - actual_shift_y));
			cloned_image.colRange(0, actual_shift_x).rowRange(actual_shift_y, image.rows).copyTo(image.colRange(image.cols - actual_shift_x, image.cols).rowRange(0, image.rows - actual_shift_y));
			cloned_image.colRange(actual_shift_x, image.cols).rowRange(0, actual_shift_y).copyTo(image.colRange(0, image.cols - actual_shift_x).rowRange(image.rows - actual_shift_y, image.rows));
			cloned_image.colRange(0, actual_shift_x).rowRange(0, actual_shift_y).copyTo(image.colRange(image.cols - actual_shift_x, image.cols).rowRange(image.rows - actual_shift_y, image.rows));
		}
	}
Esempio n. 4
0
cv::Rect_<T> get_enclosing_bbox(cv::Mat landmarks)
{
	auto num_landmarks = landmarks.cols / 2;
	double min_x_val, max_x_val, min_y_val, max_y_val;
	cv::minMaxLoc(landmarks.colRange(0, num_landmarks), &min_x_val, &max_x_val);
	cv::minMaxLoc(landmarks.colRange(num_landmarks, landmarks.cols), &min_y_val, &max_y_val);
	return cv::Rect_<T>(min_x_val, min_y_val, max_x_val - min_x_val, max_y_val - min_y_val);
};
Esempio n. 5
0
void projectpose(cv::Mat pose1, cv::Mat& pose2, cv::Rect bbox){
	float sx = bbox.width/2.0;
	float sy = bbox.height/2.0;
	float ctrx = bbox.x + sx;
	float ctry = bbox.y + sy;
	pose2 = pose1.clone();
	int nc = pose1.cols;
	pose2.colRange(0,nc/2) -= ctrx;
	pose2.colRange(0,nc/2) /= sx;
	pose2.colRange(nc/2,nc) -= ctry;
	pose2.colRange(nc/2,nc) /= sy;
}
Esempio n. 6
0
void circshift(cv::Mat &A, int shitf_row, int shift_col) {
    int row = A.rows, col = A.cols;
    shitf_row = (row + (shitf_row % row)) % row;
    shift_col = (col + (shift_col % col)) % col;
    cv::Mat temp = A.clone();
    if (shitf_row){
        temp.rowRange(row - shitf_row, row).copyTo(A.rowRange(0, shitf_row));
        temp.rowRange(0, row - shitf_row).copyTo(A.rowRange(shitf_row, row));
    }
    if (shift_col){
        temp.colRange(col - shift_col, col).copyTo(A.colRange(0, shift_col));
        temp.colRange(0, col - shift_col).copyTo(A.colRange(shift_col, col));
    }
    return;
}
Esempio n. 7
0
void mergePoints(const std::vector<cv::Mat> &in_descriptors, const std::vector<cv::Mat> &in_points,
                 cv::Mat &out_descriptors, cv::Mat &out_points) {
  // Figure out the number of points
  size_t n_points = 0, n_images = in_descriptors.size();
  for (size_t image_id = 0; image_id < n_images; ++image_id)
    n_points += in_descriptors[image_id].rows;
  if (n_points == 0)
    return;

  // Fill the descriptors and 3d points
  out_descriptors = cv::Mat(n_points, in_descriptors[0].cols, in_descriptors[0].depth());
  out_points = cv::Mat(1, n_points, CV_32FC3);
  size_t row_index = 0;
  for (size_t image_id = 0; image_id < n_images; ++image_id) {
    // Copy the descriptors
    const cv::Mat & descriptors = in_descriptors[image_id];
    int n_points = descriptors.rows;
    cv::Mat sub_descriptors = out_descriptors.rowRange(row_index, row_index + n_points);
    descriptors.copyTo(sub_descriptors);
    // Copy the 3d points
    const cv::Mat & points = in_points[image_id];
    cv::Mat sub_points = out_points.colRange(row_index, row_index + n_points);
    points.copyTo(sub_points);

    row_index += n_points;
  }
}
void AkazeSymmetryMatcher::apply(cv::Mat &srcGray, cv::Mat &dst) {

    // clear points
    leftKeyPoints.clear();
    rightKeyPoints.clear();

    try {

        detector_descriptor->detectAndCompute(srcGray.colRange(0, srcGray.cols / 2), cv::Mat(),
                                              leftKeyPoints, leftDescriptors, false);
        detector_descriptor->detectAndCompute(srcGray.colRange(srcGray.cols / 2, srcGray.cols),
                                              cv::Mat(), rightKeyPoints, rightDescriptors, false);

        if(leftKeyPoints.size() < 1 || rightKeyPoints.size() < 1 ) return;

        matches.clear();

        descriptorMatcher->match(leftDescriptors, rightDescriptors, matches, cv::Mat());


        //Select the best matching points and draw them
        float min_dist = 100;
        for( int i = 0; i < leftDescriptors.rows; i++ )
        {
            float dist = matches[i].distance;
            if( dist < min_dist ) min_dist = dist;
        }

        bestMatches.clear();
        for( int i = 0; i < leftDescriptors.rows; i++ )
        {
            if( matches[i].distance <= 3 * min_dist )
            {
                bestMatches.push_back( matches[i]);
            }
        }


        cv::drawMatches(srcGray.colRange(0, srcGray.cols / 2), leftKeyPoints,
                        srcGray.colRange(srcGray.cols / 2, srcGray.cols), rightKeyPoints,
                        bestMatches, dst);

    } catch (cv::Exception& e){
       // LOGD(LOG_TAG, e.msg.c_str());
    }

}
Esempio n. 9
0
bool checkCheirality (cv::Mat P, cv::Mat X) // cheirality constraint
	//	input: Project Matrix P 4x3, P = [R | t], Homogeneous 3D point X 4x1 
	//  output: true - if X is in front of camera
{
	if(X.cols * X.rows < 4) {
		X = (cv::Mat_<double>(4,1)<<X.at<double>(0),X.at<double>(1),X.at<double>(2),1);
	}
	cv::Mat x = P*X;
	return cv::determinant(P.colRange(0,3))*x.at<double>(2)*X.at<double>(3)>0;
}
Esempio n. 10
0
		NearestNeighbor(std::vector<cv::Mat> vm) : 
		confusion_Mat(3,3,CV_64F), NN_Mat(vm.size(),vm[0].cols,CV_64F)
		{
			std::cout << "cols are: " << vm[0].cols << std::endl;
			for(size_t i = 0; i < vm.size();i++)
			{
				cv::Mat dest(NN_Mat.colRange(0,vm[0].cols));
				vm[i].copyTo(dest);
			}
		}
Esempio n. 11
0
void ofxIcp::compute_transformation(const vector<cv::Point3d> & input, const vector<cv::Point3d> & target, cv::Mat & transformation){
    
    vector<cv::Point3d> transformed;
    cv::Mat rotation, translation;
    double error;
    
    vector<cv::Point3d> closest_current;
    vector<cv::Point3d> closest_target;
    
    int increment = input.size()/5;
    for(int i = 0; i < input.size(); i += increment){
        closest_current.push_back(input[i]);
        closest_target.push_back(target[i]);
    }
    
    cv::Mat centroid_current;
    cv::Mat centroid_target;
    
    cv::reduce(cv::Mat(closest_current, false), centroid_current, 0, CV_REDUCE_AVG);
    cv::reduce(cv::Mat(closest_target, false), centroid_target, 0, CV_REDUCE_AVG);
    
    centroid_current = centroid_current.reshape(1);
    centroid_target = centroid_target.reshape(1);
    
    compute_rigid_transformation(closest_current, centroid_current, closest_target, centroid_target, rotation, translation);
    
    vector<cv::Point3d> transformed_closest_current;
    vector<cv::Point3d> transformed_current;
    
    transform(closest_current, rotation, translation, transformed_closest_current);
    
    compute_error(transformed_closest_current, closest_target, error);
    
    transformation = (cv::Mat_<double>(4,4) << 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1);
    cv::Mat rotation_tmp = transformation.colRange(0,3).rowRange(0,3);
    cv::Mat translation_tmp = transformation.colRange(3,4).rowRange(0,3);
    
    cv::Mat translation_t = translation.reshape(1).t();
    
    rotation.copyTo(rotation_tmp);
    translation_t.copyTo(translation_tmp);
}
Esempio n. 12
0
void convolveDFT(const cv::Mat& imgOriginal, const cv::Mat& kernel, cv::Mat& out, const bool& corr, const bool& full)
{ //this method is also valid for complex image and kernel, set DFT_COMPLEX_OUTPUT then
  //convolution in fourier space, keep code for future use
  //CONVOLUTION_FULL: Return the full convolution, including border
  //to completeley emulate filter2D operation, image should be first double sized and then cut back to origianl size
  cv::Mat source, kernelPadded;
  const int marginSrcTop = corr ? std::ceil((kernel.rows-1)/2.0) : std::floor((kernel.rows-1)/2.0);
  const int marginSrcBottom = corr ? std::floor((kernel.rows-1)/2.0) : std::ceil((kernel.rows-1)/2.0);
  const int marginSrcLeft = corr ? std::ceil((kernel.cols-1)/2.0) : std::floor((kernel.cols-1)/2.0);
  const int marginSrcRight = corr ? std::floor((kernel.cols-1)/2.0) : std::ceil((kernel.cols-1)/2.0);
  cv::copyMakeBorder(imgOriginal, source, marginSrcTop, marginSrcBottom, marginSrcLeft, marginSrcRight, cv::BORDER_CONSTANT);

  const int marginKernelTop = std::ceil((source.rows-kernel.rows)/2.0);
  const int marginKernelBottom = std::floor((source.rows-kernel.rows)/2.0);
  const int marginKernelLeft = std::ceil((source.cols-kernel.cols)/2.0);
  const int marginKernelRight = std::floor((source.cols-kernel.cols)/2.0);
  cv::copyMakeBorder(kernel, kernelPadded, marginKernelTop, marginKernelBottom, marginKernelLeft, marginKernelRight, cv::BORDER_CONSTANT);

  //Divided by 2.0 instead of 2 to consider the result as double instead of as an int
  //The +1 in the shift changes slightly the finest plane in the wavelet,
  shift(kernelPadded, kernelPadded, std::ceil(kernelPadded.cols/2.0), std::ceil(kernelPadded.rows/2.0));

  cv::Mat kernelPadded_ft, input_ft, output_ft;
  cv::dft(kernelPadded, kernelPadded_ft, cv::DFT_COMPLEX_OUTPUT + cv::DFT_SCALE);
  cv::dft(source, input_ft, cv::DFT_COMPLEX_OUTPUT + cv::DFT_SCALE);
  cv::mulSpectrums(input_ft, kernelPadded_ft.mul(kernelPadded.total()), output_ft, cv::DFT_COMPLEX_OUTPUT, corr);
  
  if(imgOriginal.channels() == 1 && kernel.channels() == 1) cv::idft(output_ft, out, cv::DFT_REAL_OUTPUT);
  else cv::idft(output_ft, out, cv::DFT_COMPLEX_OUTPUT);
  
  if(!full)
  {
    //colRange and rowRange are semi-open intervals. first included, last is not
    //this frist option is what i think should be the correct one, but the next is what filter2D function gives for this inputs
//    out = out.colRange(marginSrcLeft, out.cols - marginSrcRight).
//              rowRange(marginSrcTop, out.rows - marginSrcBottom);
        out = out.colRange(marginSrcRight, out.cols - marginSrcLeft).
                  rowRange(marginSrcBottom, out.rows - marginSrcTop);
  }
}
Esempio n. 13
0
void conv2D(const cv::Mat &img, cv::Mat& dest, const cv::Mat& kernel, ConvolutionType ctype, int btype) {

    cv::Mat source = img;
    cv::Mat flip_kernel(kernel.rows,kernel.cols,kernel.type());
    cv::flip(kernel,flip_kernel,-1);

    if(ctype == CONVOLUTION_FULL) {
        source = cv::Mat();
        const int additionalRows = kernel.rows-1, additionalCols = kernel.cols-1;
        cv::copyMakeBorder(img, source, (additionalRows+1)/2, additionalRows/2, (additionalCols+1)/2, additionalCols/2, btype, cv::Scalar(0));
    }

    cv::Point anchor(kernel.cols - kernel.cols/2 - 1, kernel.rows - kernel.rows/2 - 1);

    int borderMode = btype;
    cv::filter2D(source, dest, img.depth(), flip_kernel, anchor, 0, borderMode);

    if(ctype == CONVOLUTION_VALID) {
        dest = dest.colRange((kernel.cols-1)/2, dest.cols - kernel.cols/2)
                .rowRange((kernel.rows-1)/2, dest.rows - kernel.rows/2);
    }

}
Esempio n. 14
0
std::vector<Entry> HierClassifier::extractEntries(	cv::Mat imageBGR,
													cv::Mat terrain,
													cv::Mat regionsOnImage,
													cv::Mat maskIgnore,
													int entryWeightThreshold)
{
	using namespace std::chrono;
	high_resolution_clock::time_point start = high_resolution_clock::now();

	if(maskIgnore.empty()){
		maskIgnore = Mat(imageBGR.rows, imageBGR.cols, CV_32SC1, Scalar(0));
	}
	//namedWindow("imageBGR");
	Mat imageHSV;
	cvtColor(imageBGR, imageHSV, CV_BGR2HSV);
	vector<Pixel> pixels;
	//pixels.resize(imageHSV.rows * imageHSV.cols);
	for(int r = 0; r < imageHSV.rows; r++){
		for(int c = 0; c < imageHSV.cols; c++){
			if(maskIgnore.at<int>(r, c) == 0){
				pixels.push_back(Pixel(r, c, regionsOnImage.at<int>(r, c)));
			}
		}
	}
	sort(pixels.begin(), pixels.end());

	vector<pair<int, int> > terrainRegion;
	if(!terrain.empty()){
		Mat terrainPointsImage(terrain.cols, 2, CV_32FC1);
		Mat tmpTerrain = terrain.rowRange(0, 3).t();
		Mat tvec(1, 3, CV_32FC1, Scalar(0));
		Mat rvec(1, 3, CV_32FC1, Scalar(0));
		Mat distCoeffs(1, 5, CV_32FC1, Scalar(0));
		projectPoints(tmpTerrain, tvec, rvec, cameraMatrix, Mat(), terrainPointsImage);
		//terrainPointsImage = terrainPointsImage.t();
		terrainPointsImage = terrainPointsImage.reshape(1).t();
		//cout << tmpTerrain.rowRange(1, 10) << endl;
		//cout << terrainPointsImage.rowRange(1, 10) << endl;
		//cout << cameraMatrix << endl;
		for(int p = 0; p < terrain.cols; p++){
			int imageRow = round(terrainPointsImage.at<float>(1, p));
			int imageCol = round(terrainPointsImage.at<float>(0, p));
			if(imageRow >= 0 && imageRow < imageBGR.rows &&
				imageCol >= 0 && imageCol < imageBGR.cols &&
				maskIgnore.at<int>(imageRow, imageCol) == 0)
			{
				int region = regionsOnImage.at<int>(imageRow, imageCol);
				terrainRegion.push_back(pair<int, int>(region, p));
			}
		}
		sort(terrainRegion.begin(), terrainRegion.end());
	}

	high_resolution_clock::time_point endSorting = high_resolution_clock::now();

	if(debugLevel >= 1){
		cout << "End sorting terrain, terrainRegion.size() = " << terrainRegion.size() << endl;
	}

	vector<Entry> ret;
	int endIm = 0;
	int endTer = 0;
	while(endIm < pixels.size()){
		Mat values, valuesTer, histogramHS, histogramV, statisticsHSV, stisticsLaser;
		int begIm = endIm;
		while(pixels[begIm].imageId == pixels[endIm].imageId){
			endIm++;
			if(endIm == pixels.size()){
				break;
			}
		}
		if(debugLevel >= 1){
			cout << "segment id = " << pixels[begIm].imageId << ", begIm = " << begIm << ", endIm = " << endIm << endl;
		}
		values = Mat(1, endIm - begIm, CV_8UC3);
		for(int p = begIm; p < endIm; p++){
			values.at<Vec3b>(p - begIm) = imageHSV.at<Vec3b>(pixels[p].r, pixels[p].c);
		}

		int begTer = endTer;
		if(begTer < terrainRegion.size()){
			while(terrainRegion[begTer].first != pixels[begIm].imageId){
				begTer++;
				if(begTer >= terrainRegion.size()){
					break;
				}
			}
		}
		endTer = begTer;
		if(endTer < terrainRegion.size()){
			while(terrainRegion[begTer].first == terrainRegion[endTer].first){
				endTer++;
				if(endTer >= terrainRegion.size()){
					break;
				}
			}
		}
		if(endTer - begTer > 0){
			valuesTer = Mat(terrain.rows, endTer - begTer, CV_32FC1);
			Mat tmpImageBGR(imageBGR);
			for(int p = begTer; p < endTer; p++){
				//cout << terrainRegion[p].second << endl;
				terrain.colRange(terrainRegion[p].second, terrainRegion[p].second + 1).copyTo(valuesTer.colRange(p - begTer, p - begTer + 1));
				//cout << "terrainRegion[p].second = " << terrainRegion[p].second << endl;
				//int imageRow = round(terrainPointsImage.at<float>(1 ,terrainRegion[p].second));
				//int imageCol = round(terrainPointsImage.at<float>(0, terrainRegion[p].second));
				//cout << "Point: " << imageRow << ", " << imageCol << endl;
				//tmpImageBGR.at<Vec3b>(imageRow, imageCol) = Vec3b(0x00, 0x00, 0xff);
			}
			//cout << "ImageId = " << pixels[begIm].imageId << endl;
			//imshow("imageBGR", imageBGR);
			//waitKey();
		}
		else{
			//cout << "Warning - no terrain values for imageId " << pixels[begIm].imageId << endl;
			valuesTer = Mat(6, 1, CV_32FC1, Scalar(0));
		}

		if(endIm - begIm > 0){
			int channelsHS[] = {0, 1};
			float rangeH[] = {0, 60};
			float rangeS[] = {0, 256};
			const float* rangesHS[] = {rangeH, rangeS};
			int sizeHS[] = {histHLen, histSLen};
			int channelsV[] = {2};
			float rangeV[] = {0, 256};
			const float* rangesV[] = {rangeV};
			int sizeV[] = {histVLen};
			calcHist(&values, 1, channelsHS, Mat(), histogramHS, 2, sizeHS, rangesHS);
			calcHist(&values, 1, channelsV, Mat(), histogramV, 1, sizeV, rangesV);
			histogramHS = histogramHS.reshape(0, 1);
			histogramV = histogramV.reshape(0, 1);
			normalize(histogramHS, histogramHS);
			normalize(histogramV, histogramV);

			if(debugLevel >= 1){
				cout << "V size = " << histogramV.size() << ", HS size = " << histogramHS.size() << endl;
			}


			values = values.reshape(1, 3);
			//cout << "values size = " << values.size() << endl;
			Mat covarHSV;
			Mat meanHSV;
			calcCovarMatrix(values,
							covarHSV,
							meanHSV,
							CV_COVAR_NORMAL | CV_COVAR_SCALE | CV_COVAR_COLS,
							CV_32F);

			if(debugLevel >= 1){
				cout << "Calculated covar matrix" << endl;
			}
			covarHSV = covarHSV.reshape(0, 1);
			meanHSV = meanHSV.reshape(0, 1);
			//normalize(covarHSV, covarHSV);
			//normalize(meanHSV, meanHSV);

			Mat covarLaser, meanLaser;
			calcCovarMatrix(valuesTer.rowRange(4, 6),
							covarLaser,
							meanLaser,
							CV_COVAR_NORMAL | CV_COVAR_SCALE | CV_COVAR_COLS,
							CV_32F);
			covarLaser = covarLaser.reshape(0, 1);
			meanLaser = meanLaser.reshape(0, 1);
			//normalize(covarLaser, covarLaser);
			//normalize(meanLaser, meanLaser);
			//cout << "covarLaser = " << covarLaser << endl;
			//cout << "meanLaser = " << meanLaser << endl;

			//cout << "Entry " << ret.size() << endl;
			//cout << "histHS = " << histogramHS << endl;
			//cout << "histV = " << histogramV << endl;
			//cout << "covarHSV = " << covarHSV << endl;
			//cout << "meanHSV = " << meanHSV << endl;

			Mat kurtLaser(1, 2, CV_32FC1);
			Mat tmpVal;
			valuesTer.rowRange(4, 6).copyTo(tmpVal);
			//cout << "tmpVal = " << tmpVal << endl;
			//cout << "mean(0) = " << meanLaser.at<float>(0) << ", mean(1) = " << meanLaser.at<float>(1) << endl;
			//cout << "stdDev^4(0) = " << pow(covarLaser.at<float>(0), 2) << ", stdDev^4(3) = " << pow(covarLaser.at<float>(3), 2) << endl;
			tmpVal.rowRange(0, 1) -= meanLaser.at<float>(0);
			tmpVal.rowRange(1, 2) -= meanLaser.at<float>(1);

			pow(tmpVal, 4, tmpVal);
			kurtLaser.at<float>(0) = sum(tmpVal.rowRange(0, 1))(0);
			if(tmpVal.cols * pow(covarLaser.at<float>(0), 2) != 0){
				kurtLaser.at<float>(0) = kurtLaser.at<float>(0) / (tmpVal.cols * pow(covarLaser.at<float>(0), 2)) - 3;
			}
			kurtLaser.at<float>(1) = sum(tmpVal.rowRange(1, 2))(0);
			if(tmpVal.cols * pow(covarLaser.at<float>(3), 2) != 0){
				kurtLaser.at<float>(1) = kurtLaser.at<float>(1) / (tmpVal.cols * pow(covarLaser.at<float>(3), 2)) - 3;
			}

			Mat histogramDI;
			int channelsDI[] = {0, 1};
			float rangeD[] = {1500, 2500};
			float rangeI[] = {1800, 4000};
			const float* rangesDI[] = {rangeD, rangeI};
			int sizeDI[] = {histDLen, histILen};
			Mat valHistD = valuesTer.rowRange(4, 5);
			Mat valHistI = valuesTer.rowRange(5, 6);
			Mat valuesHistDI[] = {valHistD, valHistI};
			calcHist(valuesHistDI, 2, channelsDI, Mat(), histogramDI, 2, sizeDI, rangesDI);
			histogramDI = histogramDI.reshape(0, 1);
			normalize(histogramDI, histogramDI);

			//cout << "histogramDI = " << histogramDI << endl;

			Entry tmp;
			tmp.imageId = pixels[begIm].imageId;
			tmp.weight = (endIm - begIm)/* + (endTer - begTer)*/;
			tmp.descriptor = Mat(1, histHLen*histSLen +
								histVLen +
								meanHSVLen +
								covarHSVLen +
								histDLen*histILen +
								meanLaserLen +
								covarLaserLen,
								CV_32FC1);

			int begCol = 0;
			histogramHS.copyTo(tmp.descriptor.colRange(begCol, begCol + histHLen*histSLen));
			begCol += histHLen*histSLen;
			histogramV.copyTo(tmp.descriptor.colRange(begCol, begCol + histVLen));
			begCol += histVLen;
			meanHSV.copyTo(tmp.descriptor.colRange(begCol, begCol + meanHSVLen));
			begCol += meanHSVLen;
			covarHSV.copyTo(tmp.descriptor.colRange(begCol, begCol + covarHSVLen));
			begCol += covarHSVLen;
			histogramDI.copyTo(tmp.descriptor.colRange(begCol, begCol + histDLen*histILen));
			begCol += histDLen*histILen;
			meanLaser.copyTo(tmp.descriptor.colRange(begCol, begCol + meanLaserLen));
			begCol += meanLaserLen;
			covarLaser.copyTo(tmp.descriptor.colRange(begCol, begCol + covarLaserLen));
			begCol += covarLaserLen;
			//kurtLaser.copyTo(tmp.descriptor.colRange(begCol, begCol + kurtLaserLen));
			//begCol += kurtLaserLen;
			//cout << "descriptor = " << tmp.descriptor << endl;

			if(endIm - begIm > entryWeightThreshold){
				ret.push_back(tmp);
			}
		}
	}

	static duration<double> sortingTime = duration<double>::zero();
	static duration<double> compTime = duration<double>::zero();
	static duration<double> wholeTime = duration<double>::zero();
	static int times = 0;

	high_resolution_clock::time_point endComp = high_resolution_clock::now();
	sortingTime += duration_cast<duration<double> >(endSorting - start);
	compTime += duration_cast<duration<double> >(endComp - endSorting);
	wholeTime += duration_cast<duration<double> >(endComp - start);
	times++;
	if(debugLevel >= 1){
		cout << "Times: " << times << endl;
		cout << "Extract Average sorting time: " << sortingTime.count()/times << endl;
		cout << "Extract Average computing time: " << compTime.count()/times << endl;
		cout << "Extract Average whole time: " << wholeTime.count()/times << endl;
	}

	return ret;
}
Esempio n. 15
0
void ConstraintsHelpers::processPointCloud(cv::Mat hokuyoData,
											cv::Mat& pointCloudOrigMapCenter,
											std::queue<PointsPacket>& pointsInfo,
											std::chrono::high_resolution_clock::time_point hokuyoTimestamp,
											std::chrono::high_resolution_clock::time_point curTimestamp,
											cv::Mat curPosOrigMapCenter,
											std::mutex& mtxPointCloud,
											cv::Mat cameraOrigLaser,
											cv::Mat cameraOrigImu,
											ros::NodeHandle &nh)
{
	Mat hokuyoCurPoints(6, hokuyoData.cols, CV_32FC1);
	int minLaserDist, pointCloudTimeout;

	nh.getParam("minLaserDist", minLaserDist);
	nh.getParam("pointCloudTimeout", pointCloudTimeout);

	//cout << "Copying hokuyo data" << endl;
	int countPoints = 0;
	for(int c = 0; c < hokuyoData.cols; c++){
		//cout << hokuyoData.at<int>(2, c) << endl;
		if(hokuyoData.at<int>(2, c) > minLaserDist){
			hokuyoCurPoints.at<float>(0, countPoints) = -hokuyoData.at<int>(1, c);
			hokuyoCurPoints.at<float>(1, countPoints) = 0.0;
			hokuyoCurPoints.at<float>(2, countPoints) = hokuyoData.at<int>(0, c);
			hokuyoCurPoints.at<float>(3, countPoints) = 1.0;
			hokuyoCurPoints.at<float>(4, countPoints) = hokuyoData.at<int>(2, c);
			hokuyoCurPoints.at<float>(5, countPoints) = hokuyoData.at<int>(3, c);
			countPoints++;
		}
	}
	hokuyoCurPoints = hokuyoCurPoints.colRange(0, countPoints);
//	cout << "hokuyoCurPoints.size() = " << hokuyoCurPoints.size() << endl;
//	cout << "hokuyoData.size() = " << hokuyoData.size() << endl;

	//cout << "Removing old points" << endl;
	//remove all points older than pointCloudTimeout ms
	int pointsSkipped = 0;
	if(pointsInfo.size() > 0){
		//cout << "dt = " << std::chrono::duration_cast<std::chrono::milliseconds>(curTimestamp - pointsQueue.front().timestamp).count() << endl;
		while(std::chrono::duration_cast<std::chrono::milliseconds>(curTimestamp - pointsInfo.front().timestamp).count() > pointCloudTimeout){
			pointsSkipped += pointsInfo.front().numPoints;
			pointsInfo.pop();
			if(pointsInfo.size() == 0){
				break;
			}
		}
	}

	std::unique_lock<std::mutex> lck(mtxPointCloud);
	//cout << "Moving pointCloudOrigMapCenter, pointsSkipped = " << pointsSkipped << endl;
	Mat tmpAllPoints(hokuyoCurPoints.rows, pointCloudOrigMapCenter.cols + hokuyoCurPoints.cols - pointsSkipped, CV_32FC1);
	if(!pointCloudOrigMapCenter.empty()){
		//cout << "copyTo" << endl;
		if(pointsSkipped <	pointCloudOrigMapCenter.cols){
			pointCloudOrigMapCenter.colRange(pointsSkipped,
												pointCloudOrigMapCenter.cols).
							copyTo(tmpAllPoints.colRange(0, pointCloudOrigMapCenter.cols - pointsSkipped));
		}
	}
//	if(debugLevel >= 1){
//		cout << "countPoints = " << countPoints << ", hokuyoCurPoints.size = " << hokuyoCurPoints.size() << endl;
//	}
	if(countPoints > 0){

		//cout << "Moving to camera orig" << endl;
		hokuyoCurPoints.rowRange(0, 4) = cameraOrigLaser.inv()*hokuyoCurPoints.rowRange(0, 4);
		//cout << "Addding hokuyoCurPoints" << endl;
//		if(debugLevel >= 1){
//			cout << "Addding hokuyoCurPoints" << endl;
//		}
		Mat curPointCloudOrigMapCenter(hokuyoCurPoints.rows, hokuyoCurPoints.cols, CV_32FC1);
		Mat tmpCurPoints = curPosOrigMapCenter*cameraOrigImu*hokuyoCurPoints.rowRange(0, 4);
		tmpCurPoints.copyTo(curPointCloudOrigMapCenter.rowRange(0, 4));
		hokuyoCurPoints.rowRange(4, 6).copyTo(curPointCloudOrigMapCenter.rowRange(4, 6));
		//cout << hokuyoCurPointsGlobal.channels() << ", " << hokuyoAllPointsGlobal.channels() << endl;
//		cout << pointCloudOrigMapCenter.size() << " " << curPointCloudCameraMapCenter.size() << " " << tmpAllPoints.size() << endl;
//		if(debugLevel >= 1){
//			cout << "pointsSkipped = " << pointsSkipped << endl;
//		}
		curPointCloudOrigMapCenter.copyTo(tmpAllPoints.colRange(pointCloudOrigMapCenter.cols - pointsSkipped,
																	pointCloudOrigMapCenter.cols + hokuyoCurPoints.cols - pointsSkipped));
//		if(debugLevel >= 1){
//			cout << "curPointCloudCameraMapCenter copied" << endl;
//		}
		pointsInfo.push(PointsPacket(hokuyoTimestamp, hokuyoCurPoints.cols));

		pointCloudOrigMapCenter = tmpAllPoints;
	}
	lck.unlock();
}
Esempio n. 16
0
void FSolver::normalizePoints(const cv::Mat &P, cv::Mat &Q) const
{
  // turn P into homogeneous coords first
  
  if(P.rows == 3) // P is 3xN
  {
    if(P.type() == CV_64F)
      Q = P.clone();
    else
      P.convertTo(Q, CV_64F);
    
    cv::Mat aux = Q.row(0);
    cv::divide(aux, Q.row(2), aux);
    
    aux = Q.row(1);
    cv::divide(aux, Q.row(2), aux);
    
  }
  else if(P.rows == 2) // P is 2xN
  {
    const int N = P.cols;
    
    Q.create(3, N, CV_64F);
    Q.row(2) = 1;
    if(P.type() == CV_64F)
    {
      Q.rowRange(0,2) = P * 1;
    }
    else
    {
      cv::Mat aux = Q.rowRange(0,2);
      P.convertTo(aux, CV_64F);
    }
  }
  else if(P.cols == 3) // P is Nx3
  {
    if(P.type() == CV_64F)
      Q = P.t();
    else
    {
      P.convertTo(Q, CV_64F);
      Q = Q.t();
    } 
    
    cv::Mat aux = Q.row(0);
    cv::divide(aux, Q.row(2), aux);
    
    aux = Q.row(1);
    cv::divide(aux, Q.row(2), aux);
  }
  else if(P.cols == 2) // P is Nx2
  {
    const int N = P.rows;
    
    Q.create(N, 3, CV_64F);
    Q.col(2) = 1;
    cv::Mat aux;
    if(P.type() == CV_64F)
    {
      aux = Q.rowRange(0,2);
      P.rowRange(0,2).copyTo(aux);
    }
    else
    {
      aux = Q.colRange(0,2);
      P.convertTo(aux, CV_64F);
    }
    
    Q = Q.t();
  }  
}
Esempio n. 17
0
std::shared_ptr<Detector2DResult> Detector2D::detect(Detector2DProperties& props, cv::Mat& image, cv::Mat& color_image, cv::Mat& debug_image, cv::Rect& roi, bool visualize, bool use_debug_image, bool pause_video = false)
{
	std::shared_ptr<Detector2DResult> result = std::make_shared<Detector2DResult>();
	result->current_roi = roi;
	result->image_width =  image.size().width;
	result->image_height =  image.size().height;

	const int image_width = image.size().width;
	const int image_height = image.size().height;
	const cv::Mat pupil_image = cv::Mat(image, roi).clone();  // image with roi, copy the image, since we alter it
	const int w = pupil_image.size().width / 2;
	const float coarse_pupil_width = w / 2.0f;
	const int padding = int(coarse_pupil_width / 4.0f);
	const int offset = props.intensity_range;
	const int spectral_offset = 5;

	cv::Mat histogram;
	int histSize;
	histSize = 256; //from 0 to 255
	/// Set the ranges
	float range[] = { 0, 256 } ; //the upper boundary is exclusive
	const float* histRange = { range };
	cv::calcHist(&pupil_image, 1 , 0, cv::Mat(), histogram , 1 , &histSize, &histRange, true, false);

	int lowest_spike_index = 255;
	int highest_spike_index = 0;
	float max_intensity = 0;
	singleeyefitter::detector::calculate_spike_indices_and_max_intenesity(histogram, 40, lowest_spike_index, highest_spike_index, max_intensity);

	if (visualize) {
		const int scale_x  = 100;
		const int scale_y = 1 ;

		// display the histogram and the spikes
		for (int i = 0; i < histogram.rows; i++) {
			const float norm_i  = histogram.ptr<float>(i)[0] / max_intensity ; // normalized intensity
			cv::line(color_image, {image_width, i * scale_y}, { image_width - int(norm_i * scale_x), i * scale_y}, mBlue_color);
		}

		cv::line(color_image, {image_width, lowest_spike_index * scale_y}, { int(image_width - 0.5f * scale_x), lowest_spike_index * scale_y }, mRed_color);
		cv::line(color_image, {image_width, (lowest_spike_index + offset)* scale_y}, { int(image_width - 0.5f * scale_x), (lowest_spike_index + offset)* scale_y }, mYellow_color);
		cv::line(color_image, {image_width, (highest_spike_index)* scale_y}, { int(image_width - 0.5f * scale_x), highest_spike_index * scale_y }, mRed_color);
		cv::line(color_image, {image_width, (highest_spike_index - spectral_offset)* scale_y}, { int(image_width - 0.5f * scale_x), (highest_spike_index - spectral_offset)* scale_y }, mWhite_color);
	}

	//create dark and spectral glint masks
	cv::Mat binary_img,spec_mask,kernel;
	cv::inRange(pupil_image, cv::Scalar(0) , cv::Scalar(lowest_spike_index + props.intensity_range), binary_img);    // binary threshold
	kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, {7, 7});
	cv::dilate(binary_img, binary_img, kernel, { -1, -1}, 2);
	cv::inRange(pupil_image, cv::Scalar(0) , cv::Scalar(highest_spike_index - spectral_offset), spec_mask);    // binary threshold
	cv::erode(spec_mask, spec_mask, kernel);

	kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, {9, 9});
	//open operation to remove eye lashes
	cv::morphologyEx(pupil_image, pupil_image, cv::MORPH_OPEN, kernel);

	if (props.blur_size > 1)
		cv::medianBlur(pupil_image, pupil_image, props.blur_size);

	cv::Mat edges;
	cv::Canny(pupil_image, edges, props.canny_treshold, props.canny_treshold * props.canny_ration, props.canny_aperture);

	//remove edges in areas not dark enough and where the glint is (spectral refelction from IR leds)
	cv::min(edges, spec_mask, edges);
	cv::min(edges, binary_img, edges);

	if (visualize) {
		// get sub matrix
		cv::Mat overlay = cv::Mat(color_image, roi);
		cv::Mat g_channel(overlay.rows, overlay.cols, CV_8UC1);
		cv::Mat b_channel(overlay.rows, overlay.cols, CV_8UC1);
		cv::Mat r_channel(overlay.rows, overlay.cols, CV_8UC1);
		cv::Mat out[] = {b_channel, g_channel, r_channel};

		cv::split(overlay, out);

		cv::max(g_channel, edges, g_channel);
		cv::max(b_channel, binary_img, b_channel);
		cv::min(b_channel, spec_mask, b_channel);
		cv::merge(out, 3, overlay);

		//draw a frame around the automatic pupil ROI in overlay.
		auto rect = cv::Rect(0, 0, overlay.size().width, overlay.size().height);
		cvx::draw_dotted_rect(overlay, rect, mWhite_color);

		//draw a frame around the area we require the pupil center to be.
		rect = cv::Rect(padding, padding, roi.width - padding, roi.height - padding);
		cvx::draw_dotted_rect(overlay, rect, mWhite_color);

		//draw size ellipses
		cv::Point center(100, image_height - 100);
		cv::circle(color_image, center, props.pupil_size_min / 2.0, mRed_color);

		// real pupil size of this frame is calculated further down, so this size is from the last frame
		cv::circle(color_image, center, mPupil_Size / 2.0, mGreen_color);
		cv::circle(color_image, center, props.pupil_size_max / 2.0, mRed_color);
		auto text_string = std::to_string(mPupil_Size);
		cv::Size text_size = cv::getTextSize(text_string, cv::FONT_HERSHEY_SIMPLEX, 0.4 , 1, 0);
		cv::Point text_pos = { center.x - text_size.width / 2 , center.y + text_size.height / 2};
		cv::putText(color_image, text_string, text_pos, cv::FONT_HERSHEY_SIMPLEX, 0.4, mRoyalBlue_color);
	}


	//GuoHallThinner thinner;
    //thinner.thin(edges, true);


	//get raw edge pixel for later
	std::vector<cv::Point> raw_edges;
    // find zero crashes if it doesn't find one. replace with cv implementation if opencv version is 3.0 or above
	singleeyefitter::cvx::findNonZero(edges, raw_edges);


	///////////////////////////////
	/// Strong Prior Part Begin ///
	///////////////////////////////
	//if we had a good ellipse before ,let see if it is still a good first guess:
	if(  mUse_strong_prior ){

	  mUse_strong_prior = false;
	  //recalculate center in coords system of new ROI views! roi changes every framesub
	  Ellipse ellipse = mPrior_ellipse;
	  ellipse.center[0] -= roi.x  ;
	  ellipse.center[1] -= roi.y ;

	  if( !raw_edges.empty() ){

	    std::vector<cv::Point> support_pixels;
	    double ellipse_circumference = ellipse.circumference();
	    support_pixels = ellipse_true_support(props,ellipse, ellipse_circumference, raw_edges);
	    double support_ratio = support_pixels.size() / ellipse_circumference;

	    if(support_ratio >= props.strong_perimeter_ratio_range_min){
	      	cv::RotatedRect refit_ellipse = cv::fitEllipse(support_pixels);

			if(use_debug_image){
			  cv::ellipse(debug_image, toRotatedRect(ellipse), mRoyalBlue_color, 4);
			  cv::ellipse(debug_image, refit_ellipse, mRed_color, 1);
			}

			ellipse = toEllipse<double>(refit_ellipse);

	    	ellipse_circumference = ellipse.circumference();
			support_pixels = ellipse_true_support(props,ellipse, ellipse_circumference, raw_edges);
	     	support_ratio = support_pixels.size() / ellipse_circumference;

			ellipse.center[0] += roi.x;
			ellipse.center[1] += roi.y;

			mPrior_ellipse = ellipse;
			mUse_strong_prior = true;
			double goodness = std::min(1.0, support_ratio);
			mPupil_Size = ellipse.major_radius * 2.0;
		 	result->confidence = goodness;
			result->ellipse = ellipse;

			//result->final_contours = std::move(best_contours); // no contours when strong prior
			//result->contours = std::move(split_contours);
			result->raw_edges = std::move(raw_edges); // do we need it when strong prior ?
			result->final_edges = std::move(support_pixels);  // need for optimisation
	      	return result;
	    }
	  }
	}
	///////////////////////////////
	///  Strong Prior Part End  ///
	///////////////////////////////

	//from edges to contours
	Contours_2D contours ;
	cv::findContours(edges, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

	//first we want to filter out the bad stuff, to short ones
	const auto contour_size_min_pred = [&props](const Contour_2D & contour) {
		return contour.size() > props.contour_size_min;
	};
	contours = singleeyefitter::fun::filter(contour_size_min_pred , contours);

	//now we learn things about each contour through looking at the curvature.
	//For this we need to simplyfy the contour so that pt to pt angles become more meaningfull
	Contours_2D approx_contours;
	std::for_each(contours.begin(), contours.end(), [&](const Contour_2D & contour) {
		std::vector<cv::Point> approx_c;
		cv::approxPolyDP(contour, approx_c, 1.5, false);
		approx_contours.push_back(std::move(approx_c));
	});

	// split contours looking at curvature and angle
	double split_angle = 80;
	int split_contour_size_min = 3;  //removing stubs makes combinatorial search feasable

	//split_contours = singleeyefitter::detector::split_rough_contours(approx_contours, split_angle );
	//removing stubs makes combinatorial search feasable
	//  MOVED TO split_contours_optimized
	//split_contours = singleeyefitter::fun::filter( [](std::vector<cv::Point>& v){ return v.size() <= 3;} , split_contours);
	Contours_2D split_contours = singleeyefitter::detector::split_rough_contours_optimized(approx_contours, split_angle , split_contour_size_min);

	if (split_contours.empty()) {
		result->confidence = 0.0;
		// Does it make seens to return anything ?
		//result->ellipse = toEllipse<double>(refit_ellipse);
		//result->final_contours = std::move(best_contours);
		//result->contours = std::move(split_contours);
		//result->raw_edges = std::move(raw_edges);
		return result;
	}

	if (use_debug_image) {
		// debug segments
		int colorIndex = 0;

		for (const auto& segment : split_contours) {
			const cv::Scalar_<int> colors[] = {mRed_color, mBlue_color, mRoyalBlue_color, mYellow_color, mWhite_color, mGreen_color};
			cv::polylines(debug_image, segment, false, colors[colorIndex], 1, 4);
			colorIndex++;
			colorIndex %= 6;
		}
	}

	std::sort(split_contours.begin(), split_contours.end(), [](const Contour_2D & a,const Contour_2D & b) { return a.size() > b.size(); });
	const cv::Rect ellipse_center_varianz = cv::Rect(padding, padding, pupil_image.size().width - 2.0 * padding, pupil_image.size().height - 2.0 * padding);
	const EllipseEvaluation2D is_Ellipse(ellipse_center_varianz, props.ellipse_roundness_ratio, props.pupil_size_min, props.pupil_size_max);

	//finding potential candidates for ellipse seeds that describe the pupil.
	auto seed_contours  = detector::divide_strong_and_weak_contours(
	                          split_contours, is_Ellipse, props.initial_ellipse_fit_treshhold,
	                          props.strong_perimeter_ratio_range_min, props.strong_perimeter_ratio_range_max,
	                          props.strong_area_ratio_range_min, props.strong_area_ratio_range_max
	                      );
	std::vector<int> seed_indices = seed_contours.first; // strong contours

	if (seed_indices.empty() && !seed_contours.second.empty()) {
		seed_indices = seed_contours.second; // weak contours
	}

	// still empty ? --> exits
	if (seed_indices.empty()) {
		result->confidence = 0.0;
		// Does it make seens to return anything ?
		//result->ellipse = toEllipse<double>(refit_ellipse);
		//result->final_contours = std::move(best_contours);
		//result->contours = std::move(split_contours);
		result->raw_edges = std::move(raw_edges);
		return result;
	}

	auto pruning_quick_combine = [&](const std::vector<std::vector<cv::Point>>& contours,  std::set<int>& seed_indices, int max_evals = 1e20, int max_depth = 5) {
		// describes different combinations of contours
		typedef std::set<int> Path;
		// combinations we wanna test
		std::vector<Path> unknown(seed_indices.size());
		// init with paths of size 1 == seed indices
		int n = 0;
		std::generate(unknown.begin(), unknown.end(), [&]() { return Path{n++}; }); // fill with increasing values, starting from 0
		std::vector<int> mapping; // contains all indices, starting with seed_indices
		mapping.reserve(contours.size());
		mapping.insert(mapping.begin(), seed_indices.begin(), seed_indices.end());

		// add indices which are not used to the end of mapping
		for (int i = 0; i < contours.size(); i++) {
			if (seed_indices.find(i) == seed_indices.end()) { mapping.push_back(i); }
		}

		// contains all the indices for the contours, which altogther fit best
		std::vector<Path> results;

		// contains bad paths, we won't test again
		// even a superset is not tested again, because if a subset is bad, we can't make it better if more contours are added
		std::vector<Path> prune;
		int eval_count = 0;

		while (!unknown.empty() && eval_count <= max_evals) {
			eval_count++;
			//take a path and combine it with others to see if the fit gets better
			Path current_path = unknown.back();
			unknown.pop_back();

			if (current_path.size() <= max_depth) {
				bool includes_bad_paths = fun::isSubset(current_path, prune);

				if (!includes_bad_paths) {
					int size = 0;

					for (int j : current_path) { size += contours.at(mapping.at(j)).size(); };

					std::vector<cv::Point> test_contour;

					test_contour.reserve(size);

					std::set<int> test_contour_indices;

					//concatenate contours to one contour
					for (int k : current_path) {
						const std::vector<cv::Point>& c = contours.at(mapping.at(k));
						test_contour.insert(test_contour.end(), c.begin(), c.end());
						test_contour_indices.insert(mapping.at(k));
					}

					//we have not tested this and a subset of this was sucessfull before
					double fit_variance = detector::contour_ellipse_deviation_variance(test_contour);

					if (fit_variance < props.initial_ellipse_fit_treshhold) {
						//yes this was good, keep as solution
						results.push_back(test_contour_indices);

						//lets explore more by creating paths to each remaining node
						for (int l = (*current_path.rbegin()) + 1 ; l < mapping.size(); l++) {
							unknown.push_back(current_path);
							unknown.back().insert(l); // add a new path
						}

					} else {
						prune.push_back(current_path);
					}
				}
			}
		}

		return results;
	};
	std::set<int> seed_indices_set = std::set<int>(seed_indices.begin(), seed_indices.end());
	std::vector<std::set<int>> solutions = pruning_quick_combine(split_contours, seed_indices_set, 1000, 5);

	//find largest sets which contains all previous ones
	auto filter_subset = [](std::vector<std::set<int>>& sets) {
		std::vector<std::set<int>> filtered_set;
		int i = 0;

		for (auto& current_set : sets) {
			//check if this current_set is a subset of set
			bool isSubset = false;

			for (int j = 0; j < sets.size(); j++) {
				if (j == i) continue;// don't compare to itself

				auto& set = sets.at(j);
				// for std::include both containers need to be ordered. std::set guarantees this
				isSubset |= std::includes(set.begin(), set.end(), current_set.begin(), current_set.end());
			}

			if (!isSubset) {
				filtered_set.push_back(current_set);
			}

			i++;
		}

		return filtered_set;
	};

	solutions = filter_subset(solutions);

	int index_best_Solution = -1;
	int enum_index = 0;

	for (auto& s : solutions) {
		std::vector<cv::Point> test_contour;

		//concatenate contours to one contour
		for (int i : s) {
			std::vector<cv::Point>& c = split_contours.at(i);
			test_contour.insert(test_contour.end(), c.begin(), c.end());
		}

		auto cv_ellipse = cv::fitEllipse(test_contour);

		if (use_debug_image) {
			cv::ellipse(debug_image, cv_ellipse , mRed_color);
		}

		Ellipse ellipse = toEllipse<double>(cv_ellipse);
		double ellipse_circumference = ellipse.circumference();
		std::vector<cv::Point>  support_pixels = ellipse_true_support(props, ellipse, ellipse_circumference, raw_edges);
		double support_ratio = support_pixels.size() / ellipse_circumference;
		//TODO: refine the selection of final candidate

		if (support_ratio >= props.final_perimeter_ratio_range_min && is_Ellipse(cv_ellipse)) {
			index_best_Solution = enum_index;

			if (support_ratio >= props.strong_perimeter_ratio_range_min) {
				ellipse.center[0] += roi.x;
				ellipse.center[1] += roi.y;
				mPrior_ellipse = ellipse;
				mUse_strong_prior = true;

				if (use_debug_image) {
					cv::ellipse(debug_image, cv_ellipse , mGreen_color);
				}
			}
		}

		enum_index++;
	}

	// select ellipse

	if (index_best_Solution == -1) {
		// no good final ellipse found
		result->confidence = 0.0;
		// Does it make seens to return anything ?
		//result->ellipse = toEllipse<double>(refit_ellipse);
		//result->final_contours = std::move(best_contours);
		//result->contours = std::move(split_contours);
		result->raw_edges = std::move(raw_edges);
		return result;
	}

	auto& best_solution = solutions.at(index_best_Solution);
	std::vector<std::vector<cv::Point>> best_contours;
	std::vector<cv::Point>best_contour;

	//concatenate contours to one contour
	for (int i : best_solution) {
		std::vector<cv::Point>& c = split_contours.at(i);
		best_contours.push_back(c);
		best_contour.insert(best_contour.end(), c.begin(), c.end());
	}

	auto cv_ellipse = cv::fitEllipse(best_contour);
	// final calculation of goodness of fit
	auto ellipse = toEllipse<double>(cv_ellipse);
	double ellipse_circumference = ellipse.circumference();
	std::vector<cv::Point>  support_pixels = ellipse_true_support(props, ellipse, ellipse_circumference, raw_edges);
	double support_ratio = support_pixels.size() / ellipse_circumference;
	double goodness = std::min(double(0.99), support_ratio);
	//final fitting and return of result
	auto final_fitting = [&](std::vector<std::vector<cv::Point>>& contours, cv::Mat & edges) -> std::vector<cv::Point> {
		//use the real edge pixels to fit, not the aproximated contours
		cv::Mat support_mask(edges.rows, edges.cols, edges.type(), {0, 0, 0});
		cv::polylines(support_mask, contours, false, {255, 255, 255}, 2);

		//draw into the suport mask with thickness 2
		cv::Mat new_edges;
		std::vector<cv::Point> new_contours;
		cv::min(edges, support_mask, new_edges);

		// can't do this here, because final result gets much distorted.
		// see if it even can crash !!!
		//new_edges.at<int>(0,0) = 1; // find zero crashes if it doesn't find one. remove if opencv version is 3.0 or above
		cv::findNonZero(new_edges, new_contours);

		if (visualize)
		{
			cv::Mat overlay = color_image.colRange(roi.x, roi.x + roi.width).rowRange(roi.y, roi.y + roi.height);
			cv::Mat g_channel(overlay.rows, overlay.cols, CV_8UC1);
			cv::Mat b_channel(overlay.rows, overlay.cols, CV_8UC1);
			cv::Mat r_channel(overlay.rows, overlay.cols, CV_8UC1);
			cv::Mat out[] = {b_channel, g_channel, r_channel};
			cv::split(overlay, out);
			cv::threshold(new_edges, new_edges, 0, 255, cv::THRESH_BINARY);
			cv::max(r_channel, new_edges, r_channel);
			cv::merge(out, 3, overlay);
		}

		return new_contours;

	};
	std::vector<cv::Point> final_edges =  final_fitting(best_contours, edges);
	auto cv_new_Ellipse = cv::fitEllipse(final_edges);
	double size_difference  = std::abs(1.0 - cv_ellipse.size.height / cv_new_Ellipse.size.height);
	auto& cv_final_Ellipse = cv_ellipse;
	if (is_Ellipse(cv_new_Ellipse) && size_difference < 0.3) {
		if (use_debug_image) {
			cv::ellipse(debug_image, cv_new_Ellipse, mGreen_color);
		}

		cv_final_Ellipse = cv_new_Ellipse;
	}

	//cv::imshow("debug_image", debug_image);
	mPupil_Size =  cv_final_Ellipse.size.height;
	result->confidence = goodness;
	result->ellipse = toEllipse<double>(cv_final_Ellipse);
	result->ellipse.center[0] += roi.x;
	result->ellipse.center[1] += roi.y;
	//result->final_contours = std::move(best_contours);

	// TODO optimize
	// just do this if we really need it
	// std::for_each(contours.begin(), contours.end(), [&](const Contour_2D & contour) {
	// 	std::vector<cv::Point> approx_c;
	// 	cv::approxPolyDP(contour, approx_c, 1.0, false);
	// 	approx_contours.push_back(std::move(approx_c));
	// });
	// split_contours = singleeyefitter::detector::split_rough_contours_optimized(approx_contours, 150.0 , split_contour_size_min);

	// result->contours = std::move(split_contours);
	 result->final_edges = std::move(final_edges);// need for optimisation

	result->raw_edges = std::move(raw_edges);
	return result;
}