Esempio n. 1
0
bool DatasetCIFAR10::loadBatch(std::string filename)
{
	std::ifstream ifs(filename);

	if (not ifs.good()) {
		std::cerr << "(loadImages) ERROR cannot open file: " << filename << std::endl;
		return false;
	}

	while (not ifs.eof()) {
		char lbl = 0;
		ifs.read(&lbl, 1);
		char buffer[imgSize * 3];
		ifs.read(buffer, imgSize * 3);
		if (ifs.eof())
			break;
		// load channels, convert to float, normalize
		cv::Size size(imgRows, imgCols);
		std::vector<Mat> mats({Mat(size, CV_8UC1, buffer, Mat::AUTO_STEP),
		                       Mat(size, CV_8UC1, buffer + imgSize, Mat::AUTO_STEP),
		                       Mat(size, CV_8UC1, buffer + 2 * imgSize, Mat::AUTO_STEP)});
		Mat img(size, CV_8UC3);
		cv::merge(mats, img);
		img.convertTo(img, CV_64FC3);
		img = img / 255.0;
		img = img.reshape(1, imgRows);

		labels.push_back(lbl);
		images.push_back(img);
	}
	size = images.size();
	return true;
}
TEST(SupervisedDescentOptimiser, SinConvergence) {
	// sin(x):
	auto h = [](Mat value, size_t, int) { return std::sin(value.at<float>(0)); };
	auto h_inv = [](float value) {
		if (value >= 1.0f) // our upper border of y is 1.0f, but it can be a bit larger due to floating point representation. asin then returns NaN.
			return std::asin(1.0f);
		else
			return std::asin(value);
		};

	float startInterval = -1.0f; float stepSize = 0.2f; int numValues = 11; Mat y_tr(numValues, 1, CV_32FC1); // sin: [-1:0.2:1]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	SupervisedDescentOptimiser<LinearRegressor<>> sdo({ LinearRegressor<>() });
	
	// Test the callback mechanism as well: (better move to a separate unit test?)
	auto checkResidual = [&](const Mat& currentX) {
		double residual = cv::norm(currentX, x_tr, cv::NORM_L2) / cv::norm(x_tr, cv::NORM_L2);
		EXPECT_DOUBLE_EQ(0.21369851877468238, residual);
	};

	sdo.train(x_tr, x0, y_tr, h, checkResidual);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_DOUBLE_EQ(0.21369851877468238, trainingResidual);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -1.0f; float stepSizeTest = 0.05f; int numValuesTest = 41; Mat y_ts(numValuesTest, 1, CV_32FC1); // sin: [-1:0.05:1]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.
	
	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.1800101229, testResidual, 0.0000000003);
}
TEST(SupervisedDescentOptimiser, SinConvergenceCascade) {
	// sin(x):
	auto h = [](Mat value, size_t, int) { return std::sin(value.at<float>(0)); };
	auto h_inv = [](float value) {
		if (value >= 1.0f) // our upper border of y is 1.0f, but it can be a bit larger due to floating point representation. asin then returns NaN.
			return std::asin(1.0f);
		else
			return std::asin(value);
	};

	float startInterval = -1.0f; float stepSize = 0.2f; int numValues = 11; Mat y_tr(numValues, 1, CV_32FC1); // sin: [-1:0.2:1]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	vector<LinearRegressor<>> regressors(10);
	SupervisedDescentOptimiser<LinearRegressor<>> sdo(regressors);
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.040279395, trainingResidual, 0.00000008);
		
	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -1.0f; float stepSizeTest = 0.05f; int numValuesTest = 41; Mat y_ts(numValuesTest, 1, CV_32FC1); // sin: [-1:0.05:1]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.026156775, testResidual, 0.00000005);
}
TEST(SupervisedDescentOptimiser, XCubeConvergenceCascade) {
	// x^3:
	auto h = [](Mat value, size_t, int) { return static_cast<float>(std::pow(value.at<float>(0), 3)); };
	auto h_inv = [](float value) { return std::cbrt(value); }; // cubic root

	float startInterval = -27.0f; float stepSize = 3.0f; int numValues = 19; Mat y_tr(numValues, 1, CV_32FC1); // cube: [-27:3:27]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	vector<LinearRegressor<>> regressors(10);
	SupervisedDescentOptimiser<LinearRegressor<>> sdo(regressors);
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.04312725, trainingResidual, 0.00000002);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -27.0f; float stepSizeTest = 0.5f; int numValuesTest = 109; Mat y_ts(numValuesTest, 1, CV_32FC1); // cube: [-27:0.5:27]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.05889855, testResidual, 0.00000002);
}
TEST(SupervisedDescentOptimiser, ExpConvergenceCascade) {
	// exp(x):
	auto h = [](Mat value, size_t, int) { return std::exp(value.at<float>(0)); };
	auto h_inv = [](float value) { return std::log(value); };

	float startInterval = 1.0f; float stepSize = 3.0f; int numValues = 10; Mat y_tr(numValues, 1, CV_32FC1); // exp: [1:3:28]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	vector<LinearRegressor<>> regressors(10);
	SupervisedDescentOptimiser<LinearRegressor<>> sdo(regressors);
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.02510868, trainingResidual, 0.00000005);
	
	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = 1.0f; float stepSizeTest = 0.5f; int numValuesTest = 55; Mat y_ts(numValuesTest, 1, CV_32FC1); // exp: [1:0.5:28]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.01253494, testResidual, 0.00000004);
}
TEST(SupervisedDescentOptimiser, ErfConvergence) {
	// erf(x):
	auto h = [](Mat value, size_t, int) { return std::erf(value.at<float>(0)); };
	auto h_inv = [](float value) { return boost::math::erf_inv(value); };

	float startInterval = -0.99f; float stepSize = 0.11f; int numValues = 19; Mat y_tr(numValues, 1, CV_32FC1); // erf: [-0.99:0.11:0.99]
	{
		vector<float> values(numValues);
		strided_iota(std::begin(values), std::next(std::begin(values), numValues), startInterval, stepSize);
		y_tr = Mat(values, true);
	}
	Mat x_tr(numValues, 1, CV_32FC1); // Will be the inverse of y_tr
	{
		vector<float> values(numValues);
		std::transform(y_tr.begin<float>(), y_tr.end<float>(), begin(values), h_inv);
		x_tr = Mat(values, true);
	}

	Mat x0 = 0.5f * Mat::ones(numValues, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	SupervisedDescentOptimiser<LinearRegressor<>> sdo({ LinearRegressor<>() });
	sdo.train(x_tr, x0, y_tr, h);

	// Make sure the training converges, i.e. the residual is correct on the training data:
	Mat predictions = sdo.test(x0, y_tr, h);
	double trainingResidual = normalisedLeastSquaresResidual(predictions, x_tr);
	EXPECT_NEAR(0.30944183, trainingResidual, 0.00000005);

	// Test the trained model:
	// Test data with finer resolution:
	float startIntervalTest = -0.99f; float stepSizeTest = 0.03f; int numValuesTest = 67; Mat y_ts(numValuesTest, 1, CV_32FC1); // erf: [-0.99:0.03:0.99]
	{
		vector<float> values(numValuesTest);
		strided_iota(std::begin(values), std::next(std::begin(values), numValuesTest), startIntervalTest, stepSizeTest);
		y_ts = Mat(values, true);
	}
	Mat x_ts_gt(numValuesTest, 1, CV_32FC1); // Will be the inverse of y_ts
	{
		vector<float> values(numValuesTest);
		std::transform(y_ts.begin<float>(), y_ts.end<float>(), begin(values), h_inv);
		x_ts_gt = Mat(values, true);
	}
	Mat x0_ts = 0.5f * Mat::ones(numValuesTest, 1, CV_32FC1); // fixed initialization x0 = c = 0.5.

	predictions = sdo.test(x0_ts, y_ts, h);
	double testResidual = normalisedLeastSquaresResidual(predictions, x_ts_gt);
	ASSERT_NEAR(0.25736006, testResidual, 0.0000002);
}
Esempio n. 7
0
Point3d Camera::pixelToWorld(const Point2d& pixel) const
{
	double im[2] = {pixel.x, pixel.y};
	double wl[3];
	double z;

	// setup intrinsic and extrinsic parameters
	CvMat img_frm = cvMat(1, 1, CV_64FC2, im);
	Mat world_frm = Mat(T_ROWS, T_COLS, TYPE, wl);

	// convert from distorted pixels to normalized camera frame
	// cv:: version does not allow doubles for some odd reason,
	// so use the C version
	CvMat A = _A;
	CvMat k = _k;
	cvUndistortPoints(&img_frm, &img_frm, &A, &k);
	
		// convert from camera frame to world frame
	/*z = _t(2, 0) ? _t(2, 0) : 1; // Wrong z != t3. z = r31 X + r32 Y + t3
	wl[0] = z*im[0] - _t(0, 0);
	wl[1] = z*im[1] - _t(1, 0);
	wl[2] = 0;
	world_frm = _R.t() * world_frm;
    */

	wl[0] = (_R(1,1) * _t(0,0)-_R(0,1) * _t(1,0)+_R(2,1) * _t(1,0) * im[0]- _R(1,1)* _t(2,0) * im[0]-_R(2,1)* _t(0,0)* im[1]+_R(0,1) * _t(2,0) * im[1])/(_R(0,1) * _R(1,0)-_R(0,0)* _R(1,1)+_R(1,1)* _R(2,0) *im[0]-_R(1,0) *_R(2,1) *im[0]-_R(0,1) *_R(2,0) * im[1]+_R(0,0) *_R(2,1) *im[1]);
	wl[1] = (-_R(0,0) * _t(1,0)+_R(2,0)* _t(1,0)*im[0]+_R(1,0) *(_t(0,0)-_t(2,0)*im[0])-_R(2,0) * _t(0,0)*im[1]+_R(0,0) * _t(2,0) * im[1])/(-_R(0,1) * _R(1,0)+_R(0,0) * _R(1,1)-_R(1,1)*_R(2,0) *im[0]+_R(1,0) * _R(2,1) * im[0]+_R(0,1) * _R(2,0) * im[1]-_R(0,0) * _R(2,1) *im[1]);
	wl[2] = 0;

	return Point3d(wl[0], wl[1], wl[2]);
}
Esempio n. 8
0
Mat* MyImage::getOpenCVMat(){
  if(mat == NULL)  {
    try {
      Mat orig;
      getMagickImage();
      magick.magick("BGR");
      Blob blb ;
      magick.write(&blb);
      mat = new Mat();
      orig = Mat(magick.size().height(),
  	       magick.size().width(), 
  	       CV_8UC3, 
  	       (void *) blb.data());
      cvtColor(orig, *mat, CV_RGB2HSV);
      magick.magick("RGB");
    }
    catch (Magick::Exception &e)
    {
      cout << "Caught a magick exception: " << e.what() << endl;
      throw e;
    }
  }
  return mat;

}
Esempio n. 9
0
/**
 * Convenience function that fits the shape model and expression blendshapes to
 * landmarks. Makes the fitted PCA shape and blendshape coefficients accessible
 * via the out parameters \p pca_shape_coefficients and \p blendshape_coefficients.
 * It iterates PCA-shape and blendshape fitting until convergence
 * (usually it converges within 5 to 10 iterations).
 *
 * See fit_shape_model(cv::Mat, eos::morphablemodel::MorphableModel, std::vector<eos::morphablemodel::Blendshape>, std::vector<cv::Vec2f>, std::vector<int>, float lambda)
 * for a simpler overload that just returns the shape instance.
 *
 * @param[in] affine_camera_matrix The estimated pose as a 3x4 affine camera matrix that is used to fit the shape.
 * @param[in] morphable_model The 3D Morphable Model used for the shape fitting.
 * @param[in] blendshapes A vector of blendshapes that are being fit to the landmarks in addition to the PCA model.
 * @param[in] image_points 2D landmarks from an image to fit the model to.
 * @param[in] vertex_indices The vertex indices in the model that correspond to the 2D points.
 * @param[in] lambda Regularisation parameter of the PCA shape fitting.
 * @param[out] pca_shape_coefficients Output parameter that will contain the resulting pca shape coefficients.
 * @param[out] blendshape_coefficients Output parameter that will contain the resulting blendshape coefficients.
 * @return The fitted model shape instance.
 */
cv::Mat fit_shape_model(cv::Mat affine_camera_matrix, eos::morphablemodel::MorphableModel morphable_model, std::vector<eos::morphablemodel::Blendshape> blendshapes, std::vector<cv::Vec2f> image_points, std::vector<int> vertex_indices, float lambda, std::vector<float>& pca_shape_coefficients, std::vector<float>& blendshape_coefficients)
{
	using cv::Mat;
	
	Mat blendshapes_as_basis(blendshapes[0].deformation.rows, blendshapes.size(), CV_32FC1); // assert blendshapes.size() > 0 and all of them have same number of rows, and 1 col
	for (int i = 0; i < blendshapes.size(); ++i)
	{
		blendshapes[i].deformation.copyTo(blendshapes_as_basis.col(i));
	}

	std::vector<float> last_blendshape_coeffs, current_blendshape_coeffs; 
	std::vector<float> last_pca_coeffs, current_pca_coeffs;
	current_blendshape_coeffs.resize(blendshapes.size()); // starting values t_0, all zeros
	current_pca_coeffs.resize(morphable_model.get_shape_model().get_num_principal_components()); // starting values, all zeros
	Mat combined_shape;

	do // run at least once:
	{
		last_blendshape_coeffs = current_blendshape_coeffs;
		last_pca_coeffs = current_pca_coeffs;
		// Estimate the PCA shape coefficients with the current blendshape coefficients (0 in the first iteration):
		Mat mean_plus_blendshapes = morphable_model.get_shape_model().get_mean() + blendshapes_as_basis * Mat(last_blendshape_coeffs);
		current_pca_coeffs = fitting::fit_shape_to_landmarks_linear(morphable_model, affine_camera_matrix, image_points, vertex_indices, mean_plus_blendshapes, lambda);

		// Estimate the blendshape coefficients with the current PCA model estimate:
		Mat pca_model_shape = morphable_model.get_shape_model().draw_sample(current_pca_coeffs);
		current_blendshape_coeffs = eos::fitting::fit_blendshapes_to_landmarks_linear(blendshapes, pca_model_shape, affine_camera_matrix, image_points, vertex_indices, 0.0f);

		combined_shape = pca_model_shape + blendshapes_as_basis * Mat(current_blendshape_coeffs);
	} while (std::abs(cv::norm(current_pca_coeffs) - cv::norm(last_pca_coeffs)) >= 0.01 || std::abs(cv::norm(current_blendshape_coeffs) - cv::norm(last_blendshape_coeffs)) >= 0.01);
	
	pca_shape_coefficients = current_pca_coeffs;
	blendshape_coefficients = current_blendshape_coeffs;
	return combined_shape;
};
Esempio n. 10
0
cv::Mat montageList(const Dataset &dataset, const std::vector<std::pair<int, int>> &list,
                    int tiles_per_row = 30)
{
	using cv::Mat;

	const int count = list.size();
	const int xMargin = 2;
	const int yMargin = 14;

	const int width = dataset.imgCols + xMargin;
	const int height = dataset.imgRows + yMargin;

	assert(dataset.images.size() > 0);
	const int type = dataset.images[0].type();

	if (list.size() == 0)
		return Mat(0, 0, type);

	Mat mat = Mat::ones((count / tiles_per_row + 1) * height, tiles_per_row * width, type);

	for (size_t i = 0; i < list.size(); i++) {
		int x = (i % tiles_per_row) * width;
		int y = (i / tiles_per_row) * height;
		int id = list[i].first;
		dataset.images[id].copyTo(mat(cv::Rect(x, y, dataset.imgCols, dataset.imgRows)));

		std::string label =
		    std::to_string(list[i].second) + "/" + std::to_string(dataset.labels[id]);

		cv::putText(mat, label, cv::Point(x, y + height - 2), cv::FONT_HERSHEY_SIMPLEX, 0.4,
		            cv::Scalar({0.0, 0.0, 0.0, 0.0}));
	}
	return mat;
}
void CaffeClassifier::Impl::FillBlob(const vector<Mat>& images,
                                     Blobf* blob)
{
    // Check that net is configured to use a proper batch size.
    CV_Assert(static_cast<size_t>(data_blob->shape(0)) == images.size());
    float* blob_data = blob->mutable_cpu_data();
    for (size_t i = 0; i < images.size(); ++i)
    {
        Mat image = images[i];
        // Check that all other dimentions of blob and image match.
        CV_Assert(blob->shape(1) == image.channels());
        CV_Assert(blob->shape(2) == image.rows);
        CV_Assert(blob->shape(3) == image.cols);

        Mat image_float = image;
        if (image.type() != CV_32F) {
            image.convertTo(image_float, CV_32F);
        }

        vector<Mat> image_channels;
        for (int j = 0; j < image.channels(); ++j)
        {
            image_channels.push_back(Mat(image.size(), CV_32F,
                                         blob_data + blob->offset(i, j)));
        }
        cv::split(image_float, image_channels);
    }
}
/*
 * Class:     pt_chambino_p_pulse_Pulse_Face
 * Method:    _box
 * Signature: (JJ)V
 */
JNIEXPORT void JNICALL Java_pt_chambino_p_pulse_Pulse_00024Face__1box
  (JNIEnv *jenv, jclass, jlong self, jlong mat)
  {
    LOGD("Java_pt_chambino_p_pulse_Pulse_00024Face__1box enter");
    try
    {
        if (self) {
            vector<Rect> v;
            v.push_back(((Pulse::Face*)self)->evm.box);
            *((Mat*)mat) = Mat(v, true);
        }
    }
    catch(cv::Exception& e)
    {
        jclass je = jenv->FindClass("org/opencv/core/CvException");
        if(!je) je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, e.what());
    }
    catch (...)
    {
        jclass je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, "Unknown exception in JNI code.");
    }
    LOGD("Java_pt_chambino_p_pulse_Pulse_00024Face__1box exit");
  }
Esempio n. 13
0
    static void transpose(Type* data, int height, int width) {
#if HAS_IMGLIB
        int elemType;
        if (sizeof(Type) == 1) {
            elemType = CV_8UC1;
        } else if (sizeof(Type) == 4) {
            width /= 4;
            elemType = CV_32F;
        } else {
            throw std::runtime_error("Unsupported type in transpose\n");
        }
        Mat input = Mat(height, width, elemType, data).clone();
        Mat output = Mat(width, height, elemType, data);
        cv::transpose(input, output);
#else
#warning ("OpenCV support not built-in")
        string message = "OpenCV " UNSUPPORTED_MEDIA_MESSAGE;
        throw std::runtime_error(message);
#endif
    }
//计算直方图;
Mat  KswSegment::histCalculate(const Mat& src)
{
	Mat hist;
	int bins = 256;
	int histSize[] = {bins};
	float range[] = {0, 256};
	const float* ranges[] = {range};
	int channels[] = {0};
	calcHist(&src, 1, channels, Mat(), hist, 1, histSize, ranges, true, false);//计算原图像直方图
	return hist;
}
Esempio n. 15
0
bool Calibration::readExtrinsics(const string& file, Mat& R, Mat& t)
{
	CvMat* _R = NULL;
	CvMat* _t = NULL;

	if(file.empty()) {
		return false;
	}

	FileStorage fs(file, FileStorage::READ);
	if(fs.isOpened()) {
		_R = static_cast<CvMat*> (
			fs["Rotation Matrix"].readObj());
		_t = static_cast<CvMat*> 
			(fs["Translation Vector"].readObj());
	}

	R = _R == NULL ? Mat() : Mat(_R);
	t = _t == NULL ? Mat() : Mat(_t);

	return !R.empty() && !t.empty();
}
Esempio n. 16
0
double Calibration::getExtrinsics(const Mat& Tr)
{
	// set/create various parameters
	Mat rvec;
	Mat w(views.world.front());
	Mat p(views.pixel.front());

	Mat& A = intrinsic_params.A;
	Mat& k = intrinsic_params.k;
	Mat& R = extrinsic_params.R;
	Mat& t = extrinsic_params.t;

	// get extrinsic parameters
	cv::solvePnP(w, p, A, k, rvec, t, solve_pnp.useExtGuess);
	cv::Rodrigues(rvec, R);
	
	// convert default coordinate system to new one
	if(!Tr.empty()) {
		CV_Assert(Tr.rows == 3 && Tr.cols == 4);
		Mat T_Tr = Mat(Tr, Range(0, 3), Range(3, 4));
		Mat R_Tr = Mat(Tr, Range(0, 3), Range(0, 3));

		t += R*T_Tr;
		R *= R_Tr;
	}
	/*std::cout << "you are printing modified R" << std::endl;
	for(int i = 0; i < R.rows; i++) {
		for(int j = 0; j < R.cols; j++) {
			std::cout << R.at<double>(i,j) << " ";
		}
		std::cout << std::endl;
	}*/

	


	return 0.;
}
Esempio n. 17
0
bool Calibration::readIntrinsics(const string& file, Mat& A, Mat& k)
{
	CvMat* _A = NULL;
	CvMat* _k = NULL;

	if(file.empty()) {
		return false;
	}

	FileStorage fs(file, FileStorage::READ);

	if(fs.isOpened()) {
		_A = static_cast<CvMat*> (
			fs["Camera Matrix"].readObj());
		_k = static_cast<CvMat*> 
			(fs["Distortion Coefficients"].readObj());
	}

	A = _A == NULL ? Mat() : Mat(_A);
	k = _k == NULL ? Mat() : Mat(_k);

	return !A.empty() && !k.empty();
}
	void train(cv::Mat parameters, cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection, OnTrainingEpochCallback onTrainingEpochCallback)
	{
		using cv::Mat;
		Mat currentX = initialisations;
		for (size_t regressorLevel = 0; regressorLevel < regressors.size(); ++regressorLevel) {
			// 1) Project current parameters x to feature space:
			// Enqueue all tasks in a thread pool:
			auto concurentThreadsSupported = std::thread::hardware_concurrency();
			if (concurentThreadsSupported == 0) {
				concurentThreadsSupported = 4;
			}
			utils::ThreadPool threadPool(concurentThreadsSupported);
			std::vector<std::future<typename std::result_of<ProjectionFunction(Mat, size_t, int)>::type>> results; // will be float or Mat. I might remove float for the sake of code clarity, as it's only useful for very simple examples.
			results.reserve(currentX.rows);
			for (int sampleIndex = 0; sampleIndex < currentX.rows; ++sampleIndex) {
				results.emplace_back(
					threadPool.enqueue(projection, currentX.row(sampleIndex), regressorLevel, sampleIndex)
				);
			}
			// Gather the results from all threads and store the features:
			Mat features;
			for (auto&& result : results) {
				features.push_back(result.get());
			}
			// Set the observed values, depending on if a template y is used:
			Mat observedValues;
			if (templates.empty()) { // unknown template training case
				observedValues = features;
			}
			else { // known template
				observedValues = features - templates;
			}
			Mat b = currentX - parameters; // currentX - x;
			// 2) Learn using that data:
			regressors[regressorLevel].learn(observedValues, b);
			// 3) Apply the learned regressor and use the predictions to learn the next regressor in next loop iteration:
			Mat x_k; // x_k = currentX - R * (h(currentX) - y):
			for (int sampleIndex = 0; sampleIndex < currentX.rows; ++sampleIndex) {
				// No need to re-extract the features, we already did so in step 1)
				x_k.push_back(Mat(currentX.row(sampleIndex) - regressors[regressorLevel].predict(observedValues.row(sampleIndex))));
			}
			currentX = x_k;
			onTrainingEpochCallback(currentX);
		}
	};
Esempio n. 19
0
TEST(hole_filling_test, one_pixel_hole_on_random_image_should_produce_correct_target_rect)
{
    // Make some random image data.
    Mat img = Mat(100, 100, CV_32FC1);
    randu(img, 0.f, 1.f);

    // Put a hole in middle.
    Mat hole = Mat::zeros(100, 100, CV_8U);

    // Set one pixel as hole
    hole(Rect(50, 50, 1, 1)) = 1;
    int patch_size = 7;
    HoleFilling hf(img, hole, patch_size);

    Rect expected_target_rect(Point(50 - 6, 50 - 6), Point(50 + 7, 50 + 7));

    ASSERT_EQ(expected_target_rect, hf._target_rect_pyr[0]);
}
Esempio n. 20
0
    // adjusts contrast, brightness, and saturation according
    // to values in cbs[0], cbs[1], cbs[2], respectively
    void cbsjitter(Mat& inout, float cbs[]) {
        // Skip transformations if given deterministic settings
        if (_params->_contrastMin == _params->_contrastMax) {
            return;
        }

        /****************************
        *  BRIGHTNESS & SATURATION  *
        *****************************/
        Mat satmtx = cbs[1] * (cbs[2] * Mat::eye(3, 3, CV_32FC1) +
                                (1 - cbs[2]) * Mat::ones(3, 1, CV_32FC1) * GSCL.t());
        cv::transform(inout, inout, satmtx);

        /*************
        *  CONTRAST  *
        **************/
        Mat gray_mean;
        cv::cvtColor(Mat(1, 1, CV_32FC3, cv::mean(inout)), gray_mean, CV_BGR2GRAY);
        inout = cbs[0] * inout + (1 - cbs[0]) * gray_mean.at<Scalar_<float>>(0, 0);
    }
	cv::Mat test(cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection, OnRegressorIterationCallback onRegressorIterationCallback)
	{
		using cv::Mat;
		Mat currentX = initialisations;
		for (size_t regressorLevel = 0; regressorLevel < regressors.size(); ++regressorLevel) {
			// Enqueue all tasks in a thread pool:
			auto concurentThreadsSupported = std::thread::hardware_concurrency();
			if (concurentThreadsSupported == 0) {
				concurentThreadsSupported = 4;
			}
			utils::ThreadPool threadPool(concurentThreadsSupported);
			std::vector<std::future<typename std::result_of<ProjectionFunction(Mat, size_t, int)>::type>> results; // will be float or Mat. I might remove float for the sake of code clarity, as it's only useful for very simple examples.
			results.reserve(currentX.rows);
			for (int sampleIndex = 0; sampleIndex < currentX.rows; ++sampleIndex) {
				results.emplace_back(
					threadPool.enqueue(projection, currentX.row(sampleIndex), regressorLevel, sampleIndex)
					);
			}
			// Gather the results from all threads and store the features:
			Mat features;
			for (auto&& result : results) {
				features.push_back(result.get());
			}

			Mat observedValues;
			if (templates.empty()) { // unknown template training case
				observedValues = features;
			}
			else { // known template
				observedValues = features - templates;
			}
			Mat x_k;
			// Calculate x_k = currentX - R * (h(currentX) - y):
			for (int sampleIndex = 0; sampleIndex < currentX.rows; ++sampleIndex) {
				x_k.push_back(Mat(currentX.row(sampleIndex) - regressors[regressorLevel].predict(observedValues.row(sampleIndex)))); // we need Mat() because the subtraction yields a (non-persistent) MatExpr
			}
			currentX = x_k;
			onRegressorIterationCallback(currentX);
		}
		return currentX; // Return the final predictions
	};
Esempio n. 22
0
    void decode(char* item, int itemSize, char* buf) {
        Mat image = Mat(1, itemSize, CV_8UC3, item);
        Mat decodedImage = cv::imdecode(image, CV_LOAD_IMAGE_COLOR);
        Rect cropBox;
        _augParams->getRandomCrop(decodedImage.size(), &cropBox);
        auto cropArea = cropBox.area();
        auto innerSize = _augParams->getSize();

        Mat croppedImage = decodedImage(cropBox);
        // This would be more efficient, but we should allocate separate bufs for each thread
        // Mat resizedImage = Mat(innerSize, CV_8UC3, _scratchbuf);
        Mat resizedImage;
        if (innerSize.width == cropBox.width && innerSize.height == cropBox.height) {
            resizedImage = croppedImage;
        } else {
            int interp_method = cropArea < innerSize.area() ? CV_INTER_AREA : CV_INTER_CUBIC;
            cv::resize(croppedImage, resizedImage, innerSize, 0, 0, interp_method);
        }
        Mat flippedImage;
        Mat *finalImage;

        if (_augParams->doRandomFlip()) {
            cv::flip(resizedImage, flippedImage, 1);
            finalImage = &flippedImage;
        } else {
            finalImage = &resizedImage;
        }
        Mat newImage;
        float alpha = _augParams->getRandomContrast();
        if (alpha) {
            finalImage->convertTo(newImage, -1, alpha);
            finalImage = &newImage;
        }

        Mat ch_b(innerSize, CV_8U, buf + innerSize.area()*0);
        Mat ch_g(innerSize, CV_8U, buf + innerSize.area()*1);
        Mat ch_r(innerSize, CV_8U, buf + innerSize.area()*2);

        Mat channels[3] = {ch_b, ch_g, ch_r};
        cv::split(*finalImage, channels);
    }
Esempio n. 23
0
void resizeInput(vector<char> &jpgdata, int maxDim) {
    // Takes the buffer containing encoded jpg, determines if its shortest dimension
    // is greater than maxDim.  If so, it scales it down so that the shortest dimension
    // is equal to maxDim.  equivalent to "512x512^>" for maxDim=512 geometry argument in
    // imagemagick

    Mat image = Mat(1, jpgdata.size(), CV_8UC3, &jpgdata[0]);
    Mat decodedImage = cv::imdecode(image, CV_LOAD_IMAGE_COLOR);

    int minDim = std::min(decodedImage.rows, decodedImage.cols);
    // If no resizing necessary, just return, original image still in jpgdata;
    if (minDim <= maxDim)
        return;

    vector<int> param = {CV_IMWRITE_JPEG_QUALITY, 90};
    double scaleFactor = (double) maxDim / (double) minDim;
    Mat resizedImage;
    cv::resize(decodedImage, resizedImage, Size2i(0, 0), scaleFactor, scaleFactor, CV_INTER_AREA);
    cv::imencode(".jpg", resizedImage, *(reinterpret_cast<vector<uchar>*>(&jpgdata)), param);
    //    cv::imencode(".jpg", resizedImage, jpgdata, param);
    return;
}
Esempio n. 24
0
void FingerTracker::Setup()
{
	VideoCapture capture(0);
	if (!capture.isOpened())
	{
		throw std::runtime_error("Could not start camera capture");
	}

	int windowSize = 25;
	int Xpos = 200;
	int Ypos = 50;
	int update = 0;
	int buttonClicked = 0;
	namedWindow("RGB", CV_WINDOW_AUTOSIZE);
	createTrackbar("X", "RGB",  &Xpos, 320, TrackBarCallback, (void*)&update);
	createTrackbar("Y", "RGB",  &Ypos, 240, TrackBarCallback, (void*)&update);
	createTrackbar("Size", "RGB",  &windowSize, 100, TrackBarCallback, (void*)&update);
	setMouseCallback("RGB", MouseCallback, (void*)&buttonClicked);
	Mat fingerWindowBackground, fingerWindowBackgroundGray;

	m_calibrationData.reset(new CalibrationData());
	bool ticking = false;
	std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
    while (true)
	{  		
		Mat frame, frameHSV;

		if (capture.read(frame))
		{
			flip(frame, frame, 1);

			pyrDown(frame, frame, Size(frame.cols / 2, frame.rows / 2));

			Rect fingerWindow(Point(Xpos, Ypos), Size(windowSize, windowSize*3));
			if (Xpos + windowSize >= frame.cols || Ypos + windowSize*3 >= frame.rows)
			{
				windowSize = 20;
				Xpos = 200;
				Ypos = 50;
				update = 0;
			}
			else if (buttonClicked == 1)
			{
				frame(fingerWindow).copyTo(fingerWindowBackground);
				cvtColor(fingerWindowBackground, fingerWindowBackgroundGray, CV_BGR2GRAY);
				buttonClicked  = 0;
				update = 0;
				cvDestroyAllWindows();
			}

			if (fingerWindowBackgroundGray.rows && !m_calibrationData->m_ready)
			{
				Mat diff, thd;
				absdiff(frame(fingerWindow), fingerWindowBackground, diff);
				std::vector<Mat> ch;
				split(diff, ch);
				threshold(ch[0], ch[0], m_calibrationDiffThreshold, 255, 0);
				threshold(ch[1], ch[1], m_calibrationDiffThreshold, 255, 0);
				threshold(ch[2], ch[2], m_calibrationDiffThreshold, 255, 0);
				thd = ch[0];
				add(thd, ch[1], thd);
				add(thd, ch[2], thd);
				medianBlur(thd, thd, 5);

				Mat top, middle, bottom;
				Rect r1 = Rect(0, 0, thd.cols, thd.rows/3);
				Rect r2 = Rect(0, thd.rows / 3 + 1, thd.cols, thd.rows/3);
				Rect r3 = Rect(0, thd.rows * 2 / 3 + 1, thd.cols, thd.rows -  thd.rows * 2 / 3 - 1);
				top = thd(r1);
				middle = thd(r2);
				bottom = thd(r3);			

				auto percentageTop = countNonZero(top) * 100.0 / top.size().area();
				auto percentageMiddle = countNonZero(middle) * 100.0 / middle.size().area();
				auto percentageBottom = countNonZero(bottom) * 100.0 / bottom.size().area();

				bool topReady = false;
				bool middleReady = false;
				bool bottomReady = false;

				Scalar c1, c2, c3;
				if (percentageTop > m_calibrationTopLowerThd && percentageTop < m_calibrationTopUpperThd)
				{
					topReady = true;
					c1 = Scalar(0, 255, 255);
				}
				else
				{
					c1 = Scalar(0, 0, 255);
				}

				if (percentageMiddle > m_calibrationMiddleLowerThd && percentageMiddle < m_calibrationMiddleUppperThd)
				{
					middleReady = true;					
					c2 = Scalar(0, 255, 255);
				}
				else
				{
					c2 = Scalar(0, 0, 255);
				}

				if (percentageBottom > m_calibrationBottomLowerThd && percentageBottom < m_calibrationBottomUpperThd)
				{
					bottomReady = true;
					c3 = Scalar(0, 255, 255);
				}
				else
				{
					c3 = Scalar(0, 0, 255);
				}

				bool readyToGo = false;
				if (middleReady && topReady && bottomReady)
				{
					c1 = Scalar(0, 255, 0);
					c2 = Scalar(0, 255, 0);
					c3 = Scalar(0, 255, 0);
					if (!ticking)
					{
						start = std::chrono::system_clock::now();
						ticking = true;
					}
					if (std::chrono::system_clock::now() - start > std::chrono::seconds(1))
					{
						readyToGo = true;
					}
				}
				else
				{
					ticking = false;
				}

#if ENABLE_DEBUG_WINDOWS
				std::stringstream ss;
				ss << percentageTop << ", " << percentageMiddle << ", " << percentageBottom;
				putText(frame, ss.str(), Point(0, getTextSize(ss.str(), 0, 0.5, 1, nullptr).height), 0, 0.5, Scalar::all(255), 1);
				cv::imshow("Thresholded", thd);
#endif

				if (percentageTop >= m_calibrationTopUpperThd && percentageBottom >= m_calibrationBottomUpperThd && percentageMiddle >= m_calibrationMiddleUppperThd)
				{
					putText(frame, "Move finger away from camera", Point(0, getTextSize("Move finger away from camera", 0, 0.5, 1, nullptr).height), 0, 0.5, Scalar::all(255), 1);
				}
				else if (percentageTop <= m_calibrationTopLowerThd && percentageBottom <= m_calibrationBottomLowerThd && percentageMiddle <= m_calibrationMiddleLowerThd)
				{
					putText(frame, "Move finger closer to camera", Point(0, getTextSize("Move finger closer to camera", 0, 0.5, 1, nullptr).height), 0, 0.5, Scalar::all(255), 1);
				}
				
				if (readyToGo)
				{
					Mat framePatchHSV;
					cvtColor(frame(fingerWindow), framePatchHSV, CV_BGR2HSV);
					cvtColor(frame, frameHSV, CV_BGR2HSV);

					MatND hist;
					calcHist(&framePatchHSV, 1, m_calibrationData->m_channels, thd, hist, 2, m_calibrationData->m_histSize, (const float**)m_calibrationData->m_ranges, true, false);
					m_calibrationData->m_hist = hist;
					normalize(m_calibrationData->m_hist, m_calibrationData->m_hist, 0, 255, NORM_MINMAX, -1, Mat());

#if ENABLE_DEBUG_WINDOWS

					double maxVal=0;
					minMaxLoc(m_calibrationData->m_hist, 0, &maxVal, 0, 0);
					int scale = 10;
					Mat histImg = Mat::zeros(m_calibrationData->m_sbins*scale, m_calibrationData->m_hbins*10, CV_8UC3);
					for( int h = 0; h < m_calibrationData->m_hbins; h++)
					{
						for( int s = 0; s < m_calibrationData->m_sbins; s++ )
						{
							float binVal = m_calibrationData->m_hist.at<float>(h, s);
							int intensity = cvRound(binVal*255/maxVal);
							rectangle( histImg, Point(h*scale, s*scale),
										Point( (h+1)*scale - 1, (s+1)*scale - 1),
										Scalar::all(intensity),
										CV_FILLED );
						}
					}

					imshow("H-S Histogram", histImg);
#endif
					m_calibrationData->m_ready = true;
					frame(fingerWindow).copyTo(m_calibrationData->m_fingerPatch);
					m_calibrationData->m_fingerRect = fingerWindow;
					m_currentCandidate.m_windowRect = fingerWindow;
					m_currentCandidate.m_fingerPosition = fingerWindow.tl();
					return;
				}

				rectangle(frame, r1.tl() + fingerWindow.tl(), r1.br() + fingerWindow.tl(), c1);
				rectangle(frame, r2.tl() + fingerWindow.tl(), r2.br() + fingerWindow.tl(), c2);
				rectangle(frame, r3.tl() + fingerWindow.tl(), r3.br() + fingerWindow.tl(), c3);
				imshow("Calibration", frame);
			}
			else
			{
				int baseline = 0;
				putText(frame, "Adjust calibration window, click when ready", Point(0, getTextSize("Adjust calibration window", 0, 0.4, 2, &baseline).height), 0, 0.4, Scalar::all(255), 1);
				rectangle(frame, fingerWindow.tl(), fingerWindow.br(), Scalar(0, 0, 255));
				imshow("RGB", frame);
			}
			auto key = cvWaitKey(10);
			if (char(key) == 27)
			{
				break;
			}
		}
	}
	capture.release();
}
Esempio n. 25
0
void FingerTracker::Process(Mat frame)
{
#if ENABLE_DEBUG_WINDOWS
	Mat img_display;
	frame.copyTo(img_display);
#endif
	
	//	Process only Region of Interest i.e. region around current finger position
	Rect roi = Rect(		
		Point(std::max(m_currentCandidate.m_windowRect.tl().x - m_roiSpanX, 0), std::max(m_currentCandidate.m_windowRect.tl().y - m_roiSpanY, 0)), 		
		Point(std::min(m_currentCandidate.m_windowRect.tl().x + m_roiSpanX + m_calibrationData->m_fingerPatch.cols, frame.cols), 
			  std::min(m_currentCandidate.m_windowRect.tl().y + m_roiSpanY + m_calibrationData->m_fingerPatch.rows, frame.rows)));

	Mat frameRoi;
	frame(roi).copyTo(frameRoi);

	//================TEMPLATE MATCHING
	int result_cols =  frameRoi.cols - m_calibrationData->m_fingerPatch.cols + 1;
	int result_rows = frameRoi.rows - m_calibrationData->m_fingerPatch.rows + 1;
	assert(result_cols > 0 && result_rows > 0);

	Mat scoreMap;
	scoreMap.create(result_cols, result_rows, CV_32FC1);

	//	Compare current frame roi region to known candidate
	//	Using OpenCV matchTemplate function with correlation coefficient matching method
	matchTemplate(frameRoi, m_calibrationData->m_fingerPatch, scoreMap, 3);

	//================HISTOGRAM BACK PROJECTION
	MatND backProjection;
	Mat frameHSV;
	cvtColor(frameRoi, frameHSV, CV_BGR2HSV);

	calcBackProject(&frameHSV, 1, m_calibrationData->m_channels, m_calibrationData->m_hist, backProjection, (const float**)(m_calibrationData->m_ranges), 1, true);

	Mat backProjectionThresholded;
	threshold(backProjection, backProjectionThresholded, m_backProjectionThreshold, 255, 0);
	erode(backProjectionThresholded, backProjectionThresholded, getStructuringElement(MORPH_RECT, Size(2 * m_erosionSize + 1, 2 * m_erosionSize + 1), Point(m_erosionSize, m_erosionSize)));
	dilate(backProjectionThresholded, backProjectionThresholded, getStructuringElement(MORPH_RECT, Size(2 * m_dilationSize + 1, 2 * m_dilationSize + 1), Point(m_dilationSize, m_dilationSize)));

	Mat backProjectionThresholdedShifted;
	Rect shifted(Rect(m_calibrationData->m_fingerPatch.cols - 1, m_calibrationData->m_fingerPatch.rows - 1, scoreMap.cols, scoreMap.rows));
	backProjectionThresholded(shifted).copyTo(backProjectionThresholdedShifted);

	Mat maskedOutScoreMap(scoreMap.size(), CV_8U);
	scoreMap.copyTo(maskedOutScoreMap, backProjectionThresholdedShifted);

	//====================Localizing the best match with minMaxLoc
	double minVal; double maxVal; Point minLoc; Point maxLoc;
	Point matchLoc;
	minMaxLoc(maskedOutScoreMap, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
	matchLoc = maxLoc + roi.tl();

	m_currentCandidate.m_confidence = static_cast<float>(maxVal);

	if (maxVal > m_candidateDetecionConfidenceThreshold)
	{
		m_currentCandidate.m_found = true;
		m_currentCandidate.m_windowRect = Rect(matchLoc, Point(matchLoc.x + m_calibrationData->m_fingerPatch.cols , matchLoc.y + m_calibrationData->m_fingerPatch.rows));		

		//================Find finger position
		Mat fingerWindowThresholded;
		backProjectionThresholded(Rect(maxLoc, Point(maxLoc.x + m_calibrationData->m_fingerPatch.cols , maxLoc.y + m_calibrationData->m_fingerPatch.rows))).copyTo(fingerWindowThresholded);
		
		m_currentCandidate.m_fingerPosition = GetFingerTopPosition(fingerWindowThresholded) + matchLoc;

#if ENABLE_DEBUG_WINDOWS
		rectangle(img_display, m_currentCandidate.m_windowRect.tl(), m_currentCandidate.m_windowRect.br(), Scalar(255,0,0), 2, 8, 0 );
		rectangle(scoreMap, m_currentCandidate.m_windowRect.tl(), m_currentCandidate.m_windowRect.br(), Scalar::all(0), 2, 8, 0 );
		rectangle(img_display, m_currentCandidate.m_fingerPosition, m_currentCandidate.m_fingerPosition + Point(5,5), Scalar(255,0,0));
#endif
	}
	else
	{
		m_currentCandidate.m_found = false;
	}

#if ENABLE_DEBUG_WINDOWS
	std::stringstream ss;
	ss << maxVal;
	putText(img_display, ss.str(), Point(50, 50), 0, 0.5, Scalar(255,255,255), 1);

	imshow("Overlays", img_display);
	imshow("Results", scoreMap);
	imshow("ResultMasked", maskedOutScoreMap);
#endif
}
Esempio n. 26
0
void Calibration::reproject(Scalar pixel_color, Scalar reproj_color)
{
	std::fstream filess ("extrinsicInfo.txt", fstream::out);

	filess << "px py ProX ProY wX wY " <<std::endl;
	
		
	stringstream ss;
	size_t nimgs = views.imgs.size();
	size_t npxs = views.pixel.size();
	size_t nwls = views.world.size();

	if(nimgs == npxs && nimgs == nwls) {
		// setup parameters
		Mat r;
		cv::Rodrigues(extrinsic_params.R, r);
		Mat& t = extrinsic_params.t;
		Mat& A = intrinsic_params.A;
		Mat& k = intrinsic_params.k;
		vector<Point2f> pixel;

		// iterate through each image
		for(size_t i = 0; i < nimgs; ++i) {
			// project from world to image frame
			pixel.clear();
			cv::projectPoints(Mat(views.world[i]), r, t, A, k, pixel);
			
			// get original world and pixel values from image views
			vector<Point3f>& w = views.world[i];
			vector<Point2f>& p = views.pixel[i];


			// get the image number in string format
			ss.str("");
			ss.seekp(0);
			ss.seekg(0);
			ss << "Image: " << (i + 1);
			std::cout << ss.str() << std::endl;

			for(size_t j = 0; j < views.pixel[i].size(); ++j) {
				// iterate over each point
				cv::circle(views.imgs[i], p[j], 5, pixel_color);
				cv::circle(views.imgs[i], pixel[j], 3, 
					reproj_color, CV_FILLED);

				std::cout << "  px=" << p[j].x << " rx=" << pixel[j].x
					<< " ex=" << p[j].x - pixel[j].x 
					<< " py=" << p[j].y << " ry=" << pixel[j].y
					<< " ey=" << p[j].y - pixel[j].y << std::endl;
				
				// Write original pixel, projected pixel and world to file
				filess << p[j].x << " " << p[j].y << " " << pixel[j].x << " " << pixel[j].y << " " << w[j].x << " " << w[j].y << " "<< w[j].z << std::endl;
			}

			Mat diff;
			cv::absdiff(Mat(p), Mat(pixel), diff);

			Scalar mean, stddev;
			cv::meanStdDev(diff, mean, stddev);
			std::cout << "average error |(px,py) - (rx,ry)|=(" << mean[0] << 
				","  << mean[1] << ")" << " +/- (" << stddev[0] << 
				","  << stddev[1] << ")" << std::endl;

			cv::imshow(ss.str(), views.imgs[i]);
			cv::waitKey(1);
		}
	}

	filess.close();
}
Esempio n. 27
0
/**
 * Renders the given mesh onto a 2D image using 4x4 model-view and
 * projection matrices. Conforms to OpenGL conventions.
 *
 * @param[in] mesh A 3D mesh.
 * @param[in] model_view_matrix A 4x4 OpenGL model-view matrix.
 * @param[in] projection_matrix A 4x4 orthographic or perspective OpenGL projection matrix.
 * @param[in] viewport_width Screen width.
 * @param[in] viewport_height Screen height.
 * @param[in] texture An optional texture map (TODO: Not optional yet!).
 * @param[in] enable_backface_culling Whether the renderer should perform backface culling. If true, only draw triangles with vertices ordered CCW in screen-space.
 * @param[in] enable_near_clipping Screen height.
 * @param[in] enable_far_clipping Screen height.
 * @return A pair with the colourbuffer as its first element and the depthbuffer as the second element.
 */
std::pair<cv::Mat, cv::Mat> render(Mesh mesh, cv::Mat model_view_matrix, cv::Mat projection_matrix, int viewport_width, int viewport_height, const Texture& texture, bool enable_backface_culling = false, bool enable_near_clipping = true, bool enable_far_clipping = true)
{
    // Some internal documentation / old todos or notes:
    // maybe change and pass depthBuffer as an optional arg (&?), because usually we never need it outside the renderer. Or maybe even a getDepthBuffer().
    // modelViewMatrix goes to eye-space (camera space), projection does ortho or perspective proj.
    // bool enable_texturing = false; Maybe re-add later, not sure
    // take a cv::Mat texture instead and convert to Texture internally? no, we don't want to recreate mipmap levels on each render() call.

    assert(mesh.vertices.size() == mesh.colors.size() || mesh.colors.empty()); // The number of vertices has to be equal for both shape and colour, or, alternatively, it has to be a shape-only model.
    assert(mesh.vertices.size() == mesh.texcoords.size() || mesh.texcoords.empty()); // same for the texcoords
    // another assert: If cv::Mat texture != empty, then we need texcoords?

    using cv::Mat;
    using std::vector;

    Mat colourbuffer = Mat::zeros(viewport_height, viewport_width, CV_8UC4); // make sure it's CV_8UC4?
    Mat depthbuffer = std::numeric_limits<float>::max() * Mat::ones(viewport_height, viewport_width, CV_64FC1);

    // Vertex shader:
    //processedVertex = shade(Vertex); // processedVertex : pos, col, tex, texweight
    // Assemble the vertices, project to clip space, and store as detail::Vertex (the internal representation):
    vector<detail::Vertex> clipspace_vertices;
    clipspace_vertices.reserve(mesh.vertices.size());
    for (int i = 0; i < mesh.vertices.size(); ++i) { // "previously": mesh.vertex
        Mat clipspace_coords = projection_matrix * model_view_matrix * Mat(mesh.vertices[i]);
        cv::Vec3f vertex_colour;
        if (mesh.colors.empty()) {
            vertex_colour = cv::Vec3f(0.5f, 0.5f, 0.5f);
        }
        else {
            vertex_colour = mesh.colors[i];
        }
        clipspace_vertices.push_back(detail::Vertex(clipspace_coords, vertex_colour, mesh.texcoords[i]));
    }

    // All vertices are in clip-space now.
    // Prepare the rasterisation stage.
    // For every vertex/tri:
    vector<detail::TriangleToRasterize> triangles_to_raster;
    for (const auto& tri_indices : mesh.tvi) {
        // Todo: Split this whole stuff up. Make a "clip" function, ... rename "processProspective..".. what is "process"... get rid of "continue;"-stuff by moving stuff inside process...
        // classify vertices visibility with respect to the planes of the view frustum
        // we're in clip-coords (NDC), so just check if outside [-1, 1] x ...
        // Actually we're in clip-coords and it's not the same as NDC. We're only in NDC after the division by w.
        // We should do the clipping in clip-coords though. See http://www.songho.ca/opengl/gl_projectionmatrix.html for more details.
        // However, when comparing against w_c below, we might run into the trouble of the sign again in the affine case.
        // 'w' is always positive, as it is -z_camspace, and all z_camspace are negative.
        unsigned char visibility_bits[3];
        for (unsigned char k = 0; k < 3; k++)
        {
            visibility_bits[k] = 0;
            float x_cc = clipspace_vertices[tri_indices[k]].position[0];
            float y_cc = clipspace_vertices[tri_indices[k]].position[1];
            float z_cc = clipspace_vertices[tri_indices[k]].position[2];
            float w_cc = clipspace_vertices[tri_indices[k]].position[3];
            if (x_cc < -w_cc)			// true if outside of view frustum. False if on or inside the plane.
                visibility_bits[k] |= 1;	// set bit if outside of frustum
            if (x_cc > w_cc)
                visibility_bits[k] |= 2;
            if (y_cc < -w_cc)
                visibility_bits[k] |= 4;
            if (y_cc > w_cc)
                visibility_bits[k] |= 8;
            if (enable_near_clipping && z_cc < -w_cc) // near plane frustum clipping
                visibility_bits[k] |= 16;
            if (enable_far_clipping && z_cc > w_cc) // far plane frustum clipping
                visibility_bits[k] |= 32;
        } // if all bits are 0, then it's inside the frustum
        // all vertices are not visible - reject the triangle.
        if ((visibility_bits[0] & visibility_bits[1] & visibility_bits[2]) > 0)
        {
            continue;
        }
        // all vertices are visible - pass the whole triangle to the rasterizer. = All bits of all 3 triangles are 0.
        if ((visibility_bits[0] | visibility_bits[1] | visibility_bits[2]) == 0)
        {
            boost::optional<detail::TriangleToRasterize> t = detail::process_prospective_tri(clipspace_vertices[tri_indices[0]], clipspace_vertices[tri_indices[1]], clipspace_vertices[tri_indices[2]], viewport_width, viewport_height, enable_backface_culling);
            if (t) {
                triangles_to_raster.push_back(*t);
            }
            continue;
        }
        // at this moment the triangle is known to be intersecting one of the view frustum's planes
        std::vector<detail::Vertex> vertices;
        vertices.push_back(clipspace_vertices[tri_indices[0]]);
        vertices.push_back(clipspace_vertices[tri_indices[1]]);
        vertices.push_back(clipspace_vertices[tri_indices[2]]);
        // split the triangle if it intersects the near plane:
        if (enable_near_clipping)
        {
            vertices = detail::clip_polygon_to_plane_in_4d(vertices, cv::Vec4f(0.0f, 0.0f, -1.0f, -1.0f)); // "Normal" (or "4D hyperplane") of the near-plane. I tested it and it works like this but I'm a little bit unsure because Songho says the normal of the near-plane is (0,0,-1,1) (maybe I have to switch around the < 0 checks in the function?)
        }

        // triangulation of the polygon formed of vertices array
        if (vertices.size() >= 3)
        {
            for (unsigned char k = 0; k < vertices.size() - 2; k++)
            {
                boost::optional<detail::TriangleToRasterize> t = detail::process_prospective_tri(vertices[0], vertices[1 + k], vertices[2 + k], viewport_width, viewport_height, enable_backface_culling);
                if (t) {
                    triangles_to_raster.push_back(*t);
                }
            }
        }
    }

    // Fragment/pixel shader: Colour the pixel values
    // for every tri:
    for (const auto& tri : triangles_to_raster) {
        detail::raster_triangle(tri, colourbuffer, depthbuffer, texture, enable_far_clipping);
    }
    return std::make_pair(colourbuffer, depthbuffer);
};
Esempio n. 28
0
Mat get_L(Mat LR) {
  int w = LR.size().width / 2;
  int h = LR.size().height;
  return Mat(LR, Rect(0, 0, w, h));
}