Beispiel #1
0
static void onMouse(int event, int x, int y, int flags, void *param)
{
    Mat im_draw;
    im_select.copyTo(im_draw);

    if(event == CV_EVENT_LBUTTONUP && !tl_set)
    {
        tl = Point(x,y);
        tl_set = true;
    }

    else if(event == CV_EVENT_LBUTTONUP && tl_set)
    {
        br = Point(x,y);
        br_set = true;
        screenLog(im_draw, "Initializing...");
    }

    if (!tl_set) screenLog(im_draw, "Click on the top left corner of the object");
    else
    {
        rectangle(im_draw, tl, Point(x, y), Scalar(255,0,0));

        if (!br_set) screenLog(im_draw, "Click on the bottom right corner of the object");
    }

    imshow(win_name_, im_draw);
}
Beispiel #2
0
Point3d Camera::pixelToWorld(const Point2d& pixel) const
{
	double im[2] = {pixel.x, pixel.y};
	double wl[3];
	double z;

	// setup intrinsic and extrinsic parameters
	CvMat img_frm = cvMat(1, 1, CV_64FC2, im);
	Mat world_frm = Mat(T_ROWS, T_COLS, TYPE, wl);

	// convert from distorted pixels to normalized camera frame
	// cv:: version does not allow doubles for some odd reason,
	// so use the C version
	CvMat A = _A;
	CvMat k = _k;
	cvUndistortPoints(&img_frm, &img_frm, &A, &k);
	
		// convert from camera frame to world frame
	/*z = _t(2, 0) ? _t(2, 0) : 1; // Wrong z != t3. z = r31 X + r32 Y + t3
	wl[0] = z*im[0] - _t(0, 0);
	wl[1] = z*im[1] - _t(1, 0);
	wl[2] = 0;
	world_frm = _R.t() * world_frm;
    */

	wl[0] = (_R(1,1) * _t(0,0)-_R(0,1) * _t(1,0)+_R(2,1) * _t(1,0) * im[0]- _R(1,1)* _t(2,0) * im[0]-_R(2,1)* _t(0,0)* im[1]+_R(0,1) * _t(2,0) * im[1])/(_R(0,1) * _R(1,0)-_R(0,0)* _R(1,1)+_R(1,1)* _R(2,0) *im[0]-_R(1,0) *_R(2,1) *im[0]-_R(0,1) *_R(2,0) * im[1]+_R(0,0) *_R(2,1) *im[1]);
	wl[1] = (-_R(0,0) * _t(1,0)+_R(2,0)* _t(1,0)*im[0]+_R(1,0) *(_t(0,0)-_t(2,0)*im[0])-_R(2,0) * _t(0,0)*im[1]+_R(0,0) * _t(2,0) * im[1])/(-_R(0,1) * _R(1,0)+_R(0,0) * _R(1,1)-_R(1,1)*_R(2,0) *im[0]+_R(1,0) * _R(2,1) * im[0]+_R(0,1) * _R(2,0) * im[1]-_R(0,0) * _R(2,1) *im[1]);
	wl[2] = 0;

	return Point3d(wl[0], wl[1], wl[2]);
}
Beispiel #3
0
TEST(hole_filling_test, elliptical_hole_on_repeated_texture_should_give_good_result)
{
    Mat img = imread("test_images/brick_pavement.jpg");
    convert_for_computation(img, 0.5f);

    // Add some hole
    Mat hole_mask = Mat::zeros(img.size(), CV_8U);

    Point center(100, 110);
    Size axis(20, 5);
    float angle = 20;
    ellipse(hole_mask, center, axis, angle, 0, 360, Scalar(255), -1);
    int patch_size = 7;
    HoleFilling hf(img, hole_mask, patch_size);

    // Dump image with hole as black region.
    Mat img_with_hole_bgr;
    cvtColor(img, img_with_hole_bgr, CV_Lab2BGR);
    img_with_hole_bgr.setTo(Scalar(0,0,0), hole_mask);
    imwrite("brick_pavement_hole.exr", img_with_hole_bgr);

    // Dump reconstructed image
    Mat filled = hf.run();
    cvtColor(filled, filled, CV_Lab2BGR);
    imwrite("brick_pavement_hole_filled.exr", filled);


    // The reconstructed image should be close to the original one, in this very simple case.
    Mat img_bgr;
    cvtColor(img, img_bgr, CV_Lab2BGR);
    double ssd = norm(img_bgr, filled, cv::NORM_L2SQR);
    EXPECT_LT(ssd, 0.2);
}
Beispiel #4
0
int display(Mat im, CMT & cmt)
{
    //Visualize the output
    //It is ok to draw on im itself, as CMT only uses the grayscale image
    for(size_t i = 0; i < cmt.points_active.size(); i++)
    {
        circle(im, cmt.points_active[i], 2, Scalar(0, 255, 0));
    }

    Scalar color;
    if (cmt.confidence < 0.3) {
      color = Scalar(0, 0, 255);
    } else if (cmt.confidence < 0.4) {
      color = Scalar(0, 255, 255);
    } else {
      color = Scalar(255, 0, 0);
    }
    Point2f vertices[4];
    cmt.bb_rot.points(vertices);
    for (int i = 0; i < 4; i++)
    {
        line(im, vertices[i], vertices[(i+1)%4], color);
    }

    imshow(WIN_NAME, im);

    return waitKey(5);
}
Beispiel #5
0
TEST(hole_filling_test, rectangular_hole_on_repeated_texture_should_give_good_result)
{
    Mat img = imread("test_images/brick_pavement.jpg");
    convert_for_computation(img, 0.5f);

    // Add some hole
    Mat hole_mask = Mat::zeros(img.size(), CV_8U);

    hole_mask(Rect(72, 65, 5, 20)) = 255;
    int patch_size = 7;
    HoleFilling hf(img, hole_mask, patch_size);

    // Dump image with hole as black region.
    Mat img_with_hole_bgr;
    cvtColor(img, img_with_hole_bgr, CV_Lab2BGR);
    img_with_hole_bgr.setTo(Scalar(0,0,0), hole_mask);
    imwrite("brick_pavement_hole.exr", img_with_hole_bgr);

    // Dump reconstructed image
    Mat filled = hf.run();
    cvtColor(filled, filled, CV_Lab2BGR);
    imwrite("brick_pavement_hole_filled.exr", filled);


    // The reconstructed image should be close to the original one, in this very simple case.
    Mat img_bgr;
    cvtColor(img, img_bgr, CV_Lab2BGR);
    double ssd = norm(img_bgr, filled, cv::NORM_L2SQR);
    EXPECT_LT(ssd, 0.2);
}
Beispiel #6
0
void Descriptor::get_proper_colors( const Mat &image, Mat &converted_image ){
    
    if (image.channels() == 1 && required_color_mode() != BW){
        cout << "Must give a colored image" << endl;
        throw;
    }
    switch (required_color_mode()){
        case BW:
            cvtColor(image, converted_image, CV_BGR2GRAY);
            break;
        case RGB:
            cvtColor(image, converted_image, CV_BGR2RGB);
            break;
        case HSV:
            converted_image.create(image.size().height, image.size().width, CV_32FC3);
            cvtColor(image, converted_image, CV_BGR2HSV);
            break;
        case LUV:
            cvtColor(image, converted_image, CV_BGR2Luv);
            break;
        default:
            image.copyTo(converted_image);
            break;
    }
}
bool DatasetCIFAR10::loadBatch(std::string filename)
{
	std::ifstream ifs(filename);

	if (not ifs.good()) {
		std::cerr << "(loadImages) ERROR cannot open file: " << filename << std::endl;
		return false;
	}

	while (not ifs.eof()) {
		char lbl = 0;
		ifs.read(&lbl, 1);
		char buffer[imgSize * 3];
		ifs.read(buffer, imgSize * 3);
		if (ifs.eof())
			break;
		// load channels, convert to float, normalize
		cv::Size size(imgRows, imgCols);
		std::vector<Mat> mats({Mat(size, CV_8UC1, buffer, Mat::AUTO_STEP),
		                       Mat(size, CV_8UC1, buffer + imgSize, Mat::AUTO_STEP),
		                       Mat(size, CV_8UC1, buffer + 2 * imgSize, Mat::AUTO_STEP)});
		Mat img(size, CV_8UC3);
		cv::merge(mats, img);
		img.convertTo(img, CV_64FC3);
		img = img / 255.0;
		img = img.reshape(1, imgRows);

		labels.push_back(lbl);
		images.push_back(img);
	}
	size = images.size();
	return true;
}
void service_request(void* a){
	//cout<<"111\n";
    mutex1.lock();
    for(int i=0; i<disks.size(); ++i){
    	thread t2 ((thread_startfunc_t) requester, (void *)i);
    }
    mutex1.unlock();
    mutex1.lock();
    while(diskQueue.size()<max_disk_queue && totalnum>0){
    	cv2.wait(mutex1);
    }
    int min=abs(track-diskQueue[0].second);
    int n=0;
    for(int i=1; i<diskQueue.size(); ++i){
    	if(abs(track-diskQueue[i].second) < min){
    		min=abs(track-diskQueue[i].second);
    		n=i;
    	}
    }
    track=diskQueue[n].second;
    cout<<"service requester "<<diskQueue[n].first<<" track "<<diskQueue[n].second<<endl;
    diskQueue.erase(diskQueue.begin()+n);
    cv1.signal();
    mutex1.unlock();
}
TEST(SupervisedDescentOptimiser, SinConvergence) {
	// sin(x):
	auto h = [](Mat value, size_t, int) { return std::sin(value.at<float>(0)); };
	auto h_inv = [](float value) {
		if (value >= 1.0f) // our upper border of y is 1.0f, but it can be a bit larger due to floating point representation. asin then returns NaN.
			return std::asin(1.0f);
		else
			return std::asin(value);
		};

	float startInterval = -1.0f; float stepSize = 0.2f; int numValues = 11; Mat y_tr(numValues, 1, CV_32FC1); // sin: [-1:0.2:1]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	SupervisedDescentOptimiser<LinearRegressor<>> sdo({ LinearRegressor<>() });
	
	// Test the callback mechanism as well: (better move to a separate unit test?)
	auto checkResidual = [&](const Mat& currentX) {
		double residual = cv::norm(currentX, x_tr, cv::NORM_L2) / cv::norm(x_tr, cv::NORM_L2);
		EXPECT_DOUBLE_EQ(0.21369851877468238, residual);
	};

	sdo.train(x_tr, x0, y_tr, h, checkResidual);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_DOUBLE_EQ(0.21369851877468238, trainingResidual);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -1.0f; float stepSizeTest = 0.05f; int numValuesTest = 41; Mat y_ts(numValuesTest, 1, CV_32FC1); // sin: [-1:0.05:1]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.
	
	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.1800101229, testResidual, 0.0000000003);
}
Beispiel #10
0
void callback(int, void *)
{
  vector<Vec4i> lines;
  HoughLinesP(sent_to_callback, lines, 1, CV_PI / 180, lineThresh, minLineLength, maxLineGap);

  frame.copyTo(imgLines);
  imgLines = Scalar(0, 0, 0);
  vector<double> angles(lines.size());

  lineCount = lines.size();
  int j = 0;
  for (size_t i = 0; i < lines.size(); i++)
  {
    Vec4i l = lines[i];
    line(imgLines, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 255, 0), 1, CV_AA);
    if ((l[2] == l[0]) || (l[1] == l[3])) continue;
    angles[j] = atan(static_cast<double>(l[2] - l[0]) / (l[1] - l[3]));
    j++;
  }

  imshow("LINES", imgLines + frame);

  // if num of lines are large than one or two stray lines won't affect the mean
  // much
  // but if they are small in number than mode has to be taken to save the error
  // due to those stray line

  if (lines.size() > 0 && lines.size() < 10)
    finalAngle = computeMode(angles);
  else if (lines.size() > 0)
    finalAngle = computeMean(angles);
}
TEST(SupervisedDescentOptimiser, SinConvergenceCascade) {
	// sin(x):
	auto h = [](Mat value, size_t, int) { return std::sin(value.at<float>(0)); };
	auto h_inv = [](float value) {
		if (value >= 1.0f) // our upper border of y is 1.0f, but it can be a bit larger due to floating point representation. asin then returns NaN.
			return std::asin(1.0f);
		else
			return std::asin(value);
	};

	float startInterval = -1.0f; float stepSize = 0.2f; int numValues = 11; Mat y_tr(numValues, 1, CV_32FC1); // sin: [-1:0.2:1]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	vector<LinearRegressor<>> regressors(10);
	SupervisedDescentOptimiser<LinearRegressor<>> sdo(regressors);
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.040279395, trainingResidual, 0.00000008);
		
	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -1.0f; float stepSizeTest = 0.05f; int numValuesTest = 41; Mat y_ts(numValuesTest, 1, CV_32FC1); // sin: [-1:0.05:1]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.026156775, testResidual, 0.00000005);
}
string cv__str__(const cv& v)
{
    ostringstream sout;
    for (long i = 0; i < v.size(); ++i)
    {
        sout << v(i);
        if (i+1 < v.size())
            sout << "\n";
    }
    return sout.str();
}
void cv__setitem__(cv& c, long p, double val)
{
    if (p < 0) {
        p = c.size() + p; // negative index
    }
    if (p > c.size()-1) {
        PyErr_SetString( PyExc_IndexError, "index out of range"
        );
        boost::python::throw_error_already_set();
    }
    c(p) = val;
}
double cv__getitem__(cv& m, long r)
{
    if (r < 0) {
        r = m.size() + r; // negative index
    }
    if (r > m.size()-1 || r < 0) {
        PyErr_SetString( PyExc_IndexError, "index out of range"
        );
        boost::python::throw_error_already_set();
    }
    return m(r);
}
Beispiel #15
0
 void resize(Mat& img, float fx) {
     Mat dst;
     int inter = (fx > 1.0) ? CV_INTER_CUBIC : CV_INTER_AREA;
     cv::resize(img, dst, Size(), fx, 1.0, inter);
     assert(img.rows == dst.rows);
     if (img.cols > dst.cols) {
         dst.copyTo(img(Range::all(), Range(0, dst.cols)));
         img(Range::all(), Range(dst.cols, img.cols)) = cv::Scalar::all(0);
     } else {
         dst(Range::all(), Range(0, img.cols)).copyTo(img);
     }
 }
Beispiel #16
0
int xOffset(Mat y_LR) {
  int w = y_LR.size().width / 2;
  int h = y_LR.size().height;
  Mat left(y_LR, Rect(0, 0, w, h));
  Mat right(y_LR, Rect(w, 0, w, h));
  Mat disp(h, w, CV_16S);
  disparity(left, right, disp);
  // now compute the average disparity
  Mat mask = (disp > -200) & (disp < 2000); // but not where it's out of range
  // FIXME compute the median instead
  Scalar avg = cv::mean(disp, mask);
  return avg[0];
}
string cv__repr__ (const cv& v)
{
    std::ostringstream sout;
    sout << "dlib.vector([";
    for (long i = 0; i < v.size(); ++i)
    {
        sout << v(i);
        if (i+1 < v.size())
            sout << ", ";
    }
    sout << "])";
    return sout.str();
}
Point FingerTracker::GetFingerTopPosition(Mat thresholdedFingerFrame) const
{
	uchar* ptr = thresholdedFingerFrame.data;
	for (int i = 0; i < thresholdedFingerFrame.rows * thresholdedFingerFrame.cols; i++)
	{
		if (*ptr++ == 255)
		{
			int y = i / thresholdedFingerFrame.cols;
			int x = i % thresholdedFingerFrame.cols;
			return Point(x,y);
		}
	}
	return Point(0,0);
}
void DeepPyramid::constructImagePyramid(const Mat& img, vector<Mat>& imgPyramid) const {
    Size imgSize(img.cols, img.rows);
    cout << "Create image pyramid..." << endl;
    for (int level = 0; level < levelCount; level++) {
        Mat imgAtLevel(net->inputLayerSize(), CV_8UC3, Scalar::all(0));

        Mat resizedImg;
        Size resizedImgSize = embeddedImageSize(imgSize, level);
        resize(img, resizedImg, resizedImgSize);
        resizedImg.copyTo(imgAtLevel(Rect(Point(0, 0), resizedImgSize)));
        imgPyramid.push_back(imgAtLevel);
    }
    cout << "Status: Success!" << endl;
}
TEST(SupervisedDescentOptimiser, ExpConvergenceCascade) {
	// exp(x):
	auto h = [](Mat value, size_t, int) { return std::exp(value.at<float>(0)); };
	auto h_inv = [](float value) { return std::log(value); };

	float startInterval = 1.0f; float stepSize = 3.0f; int numValues = 10; Mat y_tr(numValues, 1, CV_32FC1); // exp: [1:3:28]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	vector<LinearRegressor<>> regressors(10);
	SupervisedDescentOptimiser<LinearRegressor<>> sdo(regressors);
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.02510868, trainingResidual, 0.00000005);
	
	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = 1.0f; float stepSizeTest = 0.5f; int numValuesTest = 55; Mat y_ts(numValuesTest, 1, CV_32FC1); // exp: [1:0.5:28]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.01253494, testResidual, 0.00000004);
}
TEST(SupervisedDescentOptimiser, XCubeConvergenceCascade) {
	// x^3:
	auto h = [](Mat value, size_t, int) { return static_cast<float>(std::pow(value.at<float>(0), 3)); };
	auto h_inv = [](float value) { return std::cbrt(value); }; // cubic root

	float startInterval = -27.0f; float stepSize = 3.0f; int numValues = 19; Mat y_tr(numValues, 1, CV_32FC1); // cube: [-27:3:27]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	vector<LinearRegressor<>> regressors(10);
	SupervisedDescentOptimiser<LinearRegressor<>> sdo(regressors);
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.04312725, trainingResidual, 0.00000002);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -27.0f; float stepSizeTest = 0.5f; int numValuesTest = 109; Mat y_ts(numValuesTest, 1, CV_32FC1); // cube: [-27:0.5:27]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.05889855, testResidual, 0.00000002);
}
TEST(SupervisedDescentOptimiser, ErfConvergence) {
	// erf(x):
	auto h = [](Mat value, size_t, int) { return std::erf(value.at<float>(0)); };
	auto h_inv = [](float value) { return boost::math::erf_inv(value); };

	float startInterval = -0.99f; float stepSize = 0.11f; int numValues = 19; Mat y_tr(numValues, 1, CV_32FC1); // erf: [-0.99:0.11:0.99]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	SupervisedDescentOptimiser<LinearRegressor<>> sdo({ LinearRegressor<>() });
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.30944183, trainingResidual, 0.00000005);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -0.99f; float stepSizeTest = 0.03f; int numValuesTest = 67; Mat y_ts(numValuesTest, 1, CV_32FC1); // erf: [-0.99:0.03:0.99]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.25736006, testResidual, 0.0000002);
}
cv::Mat montageList(const Dataset &dataset, const std::vector<std::pair<int, int>> &list,
                    int tiles_per_row = 30)
{
	using cv::Mat;

	const int count = list.size();
	const int xMargin = 2;
	const int yMargin = 14;

	const int width = dataset.imgCols + xMargin;
	const int height = dataset.imgRows + yMargin;

	assert(dataset.images.size() > 0);
	const int type = dataset.images[0].type();

	if (list.size() == 0)
		return Mat(0, 0, type);

	Mat mat = Mat::ones((count / tiles_per_row + 1) * height, tiles_per_row * width, type);

	for (size_t i = 0; i < list.size(); i++) {
		int x = (i % tiles_per_row) * width;
		int y = (i / tiles_per_row) * height;
		int id = list[i].first;
		dataset.images[id].copyTo(mat(cv::Rect(x, y, dataset.imgCols, dataset.imgRows)));

		std::string label =
		    std::to_string(list[i].second) + "/" + std::to_string(dataset.labels[id]);

		cv::putText(mat, label, cv::Point(x, y + height - 2), cv::FONT_HERSHEY_SIMPLEX, 0.4,
		            cv::Scalar({0.0, 0.0, 0.0, 0.0}));
	}
	return mat;
}
void Calibration::setDefaults()
{
	views.n = 0;
	views.prompt = false;
	views.save_views = false;

	find_chessboard.flags = 0;
	find_chessboard.grid = Size(0, 0);

	sub_pixel.win = WIN_SIZE;
	sub_pixel.zz = ZERO_ZNE;
	sub_pixel.crit = ERR_TOL;

	calib_cam.flags = 0;

	solve_pnp.useExtGuess = false;

	polka_dots.dilate = 0;
	polka_dots.erode = 0;
	polka_dots.thr1 = 0;
	polka_dots.thr2 = 0;

	intrinsic_params.file = "";
	extrinsic_params.file = "";
}
Beispiel #25
0
static bool read_labels(const string& path,
    vector<string>& filenames, vector< vector<Rect> >& labels)
{
    string labels_path = path + "/gt.txt";
    string filename, line;
    int x1, y1, x2, y2;
    char delim;
    ifstream ifs(labels_path.c_str());
    if( !ifs.good() )
        return false;

    while( getline(ifs, line) )
    {
        stringstream stream(line);
        stream >> filename;
        filenames.push_back(path + "/" + filename);
        vector<Rect> filename_labels;
        while( stream >> x1 >> y1 >> x2 >> y2 >> delim )
        {
            filename_labels.push_back(Rect(x1, y1, x2, y2));
        }
        labels.push_back(filename_labels);
        filename_labels.clear();
    }
    return true;
}
void ShapeModel::viewShapeModel()
{
    int q1, q2;
    static ModelViewInfo vData;
    vData.vList.resize(this->nShapeParams, 30/2);
    vData.pModel = this;
    vData.curParam = 0;
    viewShapeModelUpdate(&vData);
    q1 = 15;
    q2 = 0;
    namedWindow("Viewing Shape Model", CV_WINDOW_AUTOSIZE);
    createTrackbar("param value", "Viewing Shape Model", 
                   &q1, 30, &viewShapeUpdateValue, &vData);
    createTrackbar("which param", "Viewing Shape Model", 
                   &q2, nShapeParams-1, &viewShapeUpdateCurParam, &vData);
}
Beispiel #27
0
/**
 * Convenience function that fits the shape model and expression blendshapes to
 * landmarks. Makes the fitted PCA shape and blendshape coefficients accessible
 * via the out parameters \p pca_shape_coefficients and \p blendshape_coefficients.
 * It iterates PCA-shape and blendshape fitting until convergence
 * (usually it converges within 5 to 10 iterations).
 *
 * See fit_shape_model(cv::Mat, eos::morphablemodel::MorphableModel, std::vector<eos::morphablemodel::Blendshape>, std::vector<cv::Vec2f>, std::vector<int>, float lambda)
 * for a simpler overload that just returns the shape instance.
 *
 * @param[in] affine_camera_matrix The estimated pose as a 3x4 affine camera matrix that is used to fit the shape.
 * @param[in] morphable_model The 3D Morphable Model used for the shape fitting.
 * @param[in] blendshapes A vector of blendshapes that are being fit to the landmarks in addition to the PCA model.
 * @param[in] image_points 2D landmarks from an image to fit the model to.
 * @param[in] vertex_indices The vertex indices in the model that correspond to the 2D points.
 * @param[in] lambda Regularisation parameter of the PCA shape fitting.
 * @param[out] pca_shape_coefficients Output parameter that will contain the resulting pca shape coefficients.
 * @param[out] blendshape_coefficients Output parameter that will contain the resulting blendshape coefficients.
 * @return The fitted model shape instance.
 */
cv::Mat fit_shape_model(cv::Mat affine_camera_matrix, eos::morphablemodel::MorphableModel morphable_model, std::vector<eos::morphablemodel::Blendshape> blendshapes, std::vector<cv::Vec2f> image_points, std::vector<int> vertex_indices, float lambda, std::vector<float>& pca_shape_coefficients, std::vector<float>& blendshape_coefficients)
{
	using cv::Mat;
	
	Mat blendshapes_as_basis(blendshapes[0].deformation.rows, blendshapes.size(), CV_32FC1); // assert blendshapes.size() > 0 and all of them have same number of rows, and 1 col
	for (int i = 0; i < blendshapes.size(); ++i)
	{
		blendshapes[i].deformation.copyTo(blendshapes_as_basis.col(i));
	}

	std::vector<float> last_blendshape_coeffs, current_blendshape_coeffs; 
	std::vector<float> last_pca_coeffs, current_pca_coeffs;
	current_blendshape_coeffs.resize(blendshapes.size()); // starting values t_0, all zeros
	current_pca_coeffs.resize(morphable_model.get_shape_model().get_num_principal_components()); // starting values, all zeros
	Mat combined_shape;

	do // run at least once:
	{
		last_blendshape_coeffs = current_blendshape_coeffs;
		last_pca_coeffs = current_pca_coeffs;
		// Estimate the PCA shape coefficients with the current blendshape coefficients (0 in the first iteration):
		Mat mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Mat(last_blendshape_coeffs);
		current_pca_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_camera_matrix, image_points, vertex_indices, mean_plus_blendshapes, lambda);

		// Estimate the blendshape coefficients with the current PCA model estimate:
		Mat pca_model_shape = morphable_model.get_shape_model().draw_sample(current_pca_coeffs);
		current_blendshape_coeffs = eos::fitting::fit_blendshapes_to_landmarks_linear(blendshapes, pca_model_shape, affine_camera_matrix, image_points, vertex_indices, 0.0f);

		combined_shape = pca_model_shape + blendshapes_as_basis * Mat(current_blendshape_coeffs);
	} while (std::abs(cv::norm(current_pca_coeffs) - cv::norm(last_pca_coeffs)) >= 0.01 || std::abs(cv::norm(current_blendshape_coeffs) - cv::norm(last_blendshape_coeffs)) >= 0.01);
	
	pca_shape_coefficients = current_pca_coeffs;
	blendshape_coefficients = current_blendshape_coeffs;
	return combined_shape;
};
Beispiel #28
0
Mat* MyImage::getOpenCVMat(){
  if(mat == NULL)  {
    try {
      Mat orig;
      getMagickImage();
      magick.magick("BGR");
      Blob blb ;
      magick.write(&blb);
      mat = new Mat();
      orig = Mat(magick.size().height(),
  	       magick.size().width(), 
  	       CV_8UC3, 
  	       (void *) blb.data());
      cvtColor(orig, *mat, CV_RGB2HSV);
      magick.magick("RGB");
    }
    catch (Magick::Exception &e)
    {
      cout << "Caught a magick exception: " << e.what() << endl;
      throw e;
    }
  }
  return mat;

}
void CaffeClassifier::Impl::FillBlob(const vector<Mat>& images,
                                     Blobf* blob)
{
    // Check that net is configured to use a proper batch size.
    CV_Assert(static_cast<size_t>(data_blob->shape(0)) == images.size());
    float* blob_data = blob->mutable_cpu_data();
    for (size_t i = 0; i < images.size(); ++i)
    {
        Mat image = images[i];
        // Check that all other dimentions of blob and image match.
        CV_Assert(blob->shape(1) == image.channels());
        CV_Assert(blob->shape(2) == image.rows);
        CV_Assert(blob->shape(3) == image.cols);

        Mat image_float = image;
        if (image.type() != CV_32F) {
            image.convertTo(image_float, CV_32F);
        }

        vector<Mat> image_channels;
        for (int j = 0; j < image.channels(); ++j)
        {
            image_channels.push_back(Mat(image.size(), CV_32F,
                                         blob_data + blob->offset(i, j)));
        }
        cv::split(image_float, image_channels);
    }
}
/*
 * Class:     pt_chambino_p_pulse_Pulse_Face
 * Method:    _box
 * Signature: (JJ)V
 */
JNIEXPORT void JNICALL Java_pt_chambino_p_pulse_Pulse_00024Face__1box
  (JNIEnv *jenv, jclass, jlong self, jlong mat)
  {
    LOGD("Java_pt_chambino_p_pulse_Pulse_00024Face__1box enter");
    try
    {
        if (self) {
            vector<Rect> v;
            v.push_back(((Pulse::Face*)self)->evm.box);
            *((Mat*)mat) = Mat(v, true);
        }
    }
    catch(cv::Exception& e)
    {
        jclass je = jenv->FindClass("org/opencv/core/CvException");
        if(!je) je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, e.what());
    }
    catch (...)
    {
        jclass je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, "Unknown exception in JNI code.");
    }
    LOGD("Java_pt_chambino_p_pulse_Pulse_00024Face__1box exit");
  }