Exemplo n.º 1
0
MarkerDetector::MarkerDetector(CameraCalibration calibration)
    : m_minContourLengthAllowed(100)
    , markerSize(100,100)
{
    cv::Mat(3,3, CV_32F, const_cast<float*>(&calibration.getIntrinsic().data[0])).copyTo(camMatrix);
    cv::Mat(4,1, CV_32F, const_cast<float*>(&calibration.getDistorsion().data[0])).copyTo(distCoeff);

    bool centerOrigin = true;
    if (centerOrigin)
    {
        m_markerCorners3d.push_back(cv::Point3f(-0.5f,-0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(+0.5f,-0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(+0.5f,+0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(-0.5f,+0.5f,0));
    }
    else
    {
        m_markerCorners3d.push_back(cv::Point3f(0,0,0));
        m_markerCorners3d.push_back(cv::Point3f(1,0,0));
        m_markerCorners3d.push_back(cv::Point3f(1,1,0));
        m_markerCorners3d.push_back(cv::Point3f(0,1,0));    
    }

    m_markerCorners2d.push_back(cv::Point2f(0,0));
    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,0));
    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,markerSize.height-1));
    m_markerCorners2d.push_back(cv::Point2f(0,markerSize.height-1));
}
Exemplo n.º 2
0
void PatternTrackingInfo::computePose(const Pattern& pattern, const CameraCalibration& calibration)
{
  cv::Mat Rvec;
  cv::Mat_<float> Tvec;
  cv::Mat raux,taux;

  cv::solvePnP(pattern.points3d, points2d, calibration.getIntrinsic(), calibration.getDistorsion(),raux,taux);
  raux.convertTo(Rvec,CV_32F);
  taux.convertTo(Tvec ,CV_32F);

  cv::Mat_<float> rotMat(3,3); 
  cv::Rodrigues(Rvec, rotMat);

  // Copy to transformation matrix
  for (int col=0; col<3; col++)
  {
    for (int row=0; row<3; row++)
    {        
     pose3d.r().mat[row][col] = rotMat(row,col); // Copy rotation component
    }
    pose3d.t().data[col] = Tvec(col); // Copy translation component
  }

  // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.
  pose3d = pose3d.getInverted();
}
void ARDrawingContext::buildProjectionMatrix(const CameraCalibration& calibration, int screen_width, int screen_height, Matrix44& projectionMatrix)
{
  float nearPlane = 0.01f;  // Near clipping distance
  float farPlane  = 100.0f;  // Far clipping distance

  // Camera parameters
  float f_x = calibration.fx(); // Focal length in x axis
  float f_y = calibration.fy(); // Focal length in y axis (usually the same?)
  float c_x = calibration.cx(); // Camera primary point x
  float c_y = calibration.cy(); // Camera primary point y

  projectionMatrix.data[0] = -2.0f * f_x / screen_width;
  projectionMatrix.data[1] = 0.0f;
  projectionMatrix.data[2] = 0.0f;
  projectionMatrix.data[3] = 0.0f;

  projectionMatrix.data[4] = 0.0f;
  projectionMatrix.data[5] = 2.0f * f_y / screen_height;
  projectionMatrix.data[6] = 0.0f;
  projectionMatrix.data[7] = 0.0f;

  projectionMatrix.data[8] = 2.0f * c_x / screen_width - 1.0f;
  projectionMatrix.data[9] = 2.0f * c_y / screen_height - 1.0f;    
  projectionMatrix.data[10] = -( farPlane + nearPlane) / ( farPlane - nearPlane );
  projectionMatrix.data[11] = -1.0f;

  projectionMatrix.data[12] = 0.0f;
  projectionMatrix.data[13] = 0.0f;
  projectionMatrix.data[14] = -2.0f * farPlane * nearPlane / ( farPlane - nearPlane );        
  projectionMatrix.data[15] = 0.0f;
}
CameraCalibration::CameraCalibration(const CameraCalibration &calibration)
{
    internal_calibration = calibration.get_internal_calibration();
    radial_distortion_parameters = calibration.get_radial_distortion_parameters();
    tangential_distortion_parameters = calibration.get_tangential_distortion_parameters();

    pose_p.reset(new Pose(calibration.get_pose()));

    return;
}
void CameraCalibrationCv::setCalibration( const CameraCalibration& calib )
{
    if( !calib.isValid() )
        throw std::runtime_error("Camera calibration values are not valid.");
    this->calib = calib;

    camMatrix.create(3, 3, CV_64F);
    camMatrix = 0.0;
    camMatrix.at<double>(2,2) = 1.0;
    camMatrix.at<double>(0,0) = calib.fx;
    camMatrix.at<double>(0,2) = calib.cx;
    camMatrix.at<double>(1,1) = calib.fy;
    camMatrix.at<double>(1,2) = calib.cy;

    distCoeffs.create(1, 4, CV_64F);
    distCoeffs.at<double>(0,0) = calib.d0;
    distCoeffs.at<double>(0,1) = calib.d1;
    distCoeffs.at<double>(0,2) = calib.d2;
    distCoeffs.at<double>(0,3) = calib.d3;

    imageSize = cv::Size( calib.width, calib.height );

    initialized = false;
    valid = true;
}
Exemplo n.º 6
0
int main(int argc, char* argv[])
{
	CameraCalibration cc = CameraCalibration();
	VanishingPointDetector vpd = VanishingPointDetector();
	HoughLinesDetector hld = HoughLinesDetector();
	EdgeLineDetector el = EdgeLineDetector();
	PersonDetector pd = PersonDetector();
	PersonMeasurement pm = PersonMeasurement();
	CalibrateCam cl = CalibrateCam();

	Mat dist, undist, undistEdge, undistHoughLines, undistVL;

#pragma region Introduction

	cout << " Firstly, I want to thanks to my teacher for the support he gave me," <<
		"\nto all the developers, students, lecturers that shared their work." <<
		"\n This application was developesd in the summer semester of 2016, at " <<
		"\nthe Technical Univerity - Brno, Czech Republic, by a student from " <<
		"\noTechnical University of Cluj-Napoca, Romania." <<
		"\n It is freely to use any part of the code long as you point for " <<
		"\nreferences of this work." <<
		"\n\tWork done on the foundation created by Ing. Adam Olejar" <<
		"\n\tWith the supervision of" <<
		"\n\t\t\tdoc. Ing. Kamil Riha, Ph.D. - VUT Brno-CZ" <<
		"\n\t\t\tdoc. Ing. Raul MALUTAN - UT Cluj-Napoca-RO" <<
		"\n\n\t\t\t\t\tMarius N.\n\n";

	cout << "*****\nApplication folder structure\n*****\n\nMainFolder";
	cout << "\n-> calibration";
	cout << "\n  --> input(images from which the information will be extracted)";
	cout << "\n  --> output (Images took within the process)";
	cout << "\n  --> calibfiles ('default.xml', 'imageList.xml'";
	cout << "\n      yml / xml with calibration output)";
	cout << "\n     ---> calibImages(Images used for calibrating the camera)";
	cout << "\n\nThe process consists of having a picture with a person\n";
	cout << "within it, detecting edges in order to detect the \n";
	cout << "vanishing lines (and points), necessary later for \n";
	cout << "calculating the height of the person by knowing \n";
	cout << "the measure of one object/distance from the image.\n";
	cout << "\nA brief tree of the process is showed below.\n\n";
	//_setmode(_fileno(stdout), _O_U16TEXT);
	//// Borders (all thick line)
	//const wchar_t BL = L'\x2514';
	//const wchar_t BR = L'\x251C';
	//cout << "Is the camera calibrated? (Camera lenses usually distort the image)\n";
	//cout << BR << " No -> Calibrate the camera with the calibration process\n";
	//cout << "" << BL << " Yes -> Calibrate the image?\n";
	//cout << " " << BR << " Yes --> The image is undistorted\n";
	//cout << " " << BL << " No  --> Continue with detecting edges\n";
	//cout << "  " << BL << " Detect Edges\n";
	//cout << "   " << BL << " Detect vanishing lines\n";
	//cout << "    " << BL << " Calculate vanishing points\n";
	//cout << "     " << BL << " Detect person\n";
	//cout << "      " << BL << " Select the known object size and enter it\n";
	//cout << "       " << BL << " Calculate the height of the person(s).\n";
	cout << "Is the camera calibrated? (Camera lenses usually distort the image).\n";
	cout << " | No -> Calibrate the camera with the calibration process.\n";
	cout << " > Yes -> No calibration needed. Continue with\n";
	cout << " > Opening the image. (image in *.jpg format in 'input' folder)\n";
	cout << "  > Is the loaded image distorted?\n";
	cout << "   | No --> The image is undistorted with the calibration matrix.\n";
	cout << "   > Yes  --> Continue with \n";
	cout << "    > Detect Edges...\n";
	cout << "     > Detect vanishing lines...\n";
	cout << "      > Calculate vanishing points...\n";
	cout << "       > Detect person(s)...\n";
	cout << "        > Select the known object size and enter it.\n";
	cout << "         > Calculate the height of the person(s).\n";
	do
	{
		cout << endl
			<< '\n'
			<< "After you read all the information, please contine by" << endl
			<< "Pressing ENTER.";
	} while (cin.get() != '\n');

#pragma endregion

	/// Calibrate camera function
	char calib;
	cout << endl << "Is the camera calibrated? y/n" << endl;
	cin >> calib;
	if (calib == 'n')
	{
		/*if (calib != 'y' || calib != 'n')
		{
			cout << "The input is not valid.";
			return -1;
		}*/
		cout << "*****\nWhen the process is finished, press 's' to save an image,";
		cout << "\nor 'ESC' to go to the next step while vieweing an image.\n*****\n";
		cl.calibrateCam(argc, argv);
		cout << endl << "Calibration finished.";
	}
	///*Load image*/
	string in = "13";
	cout << endl << "Please type image name (without extension): ";
	cin >> in;
	string inputImg = "calibration/input/" + in + ".jpg";
	dist = imread(inputImg, CV_LOAD_IMAGE_COLOR);

	if (!dist.data)                              // Check for invalid input
	{
		cout << "Could not open or find the distorted image!!" << std::endl;
		return 0;
	}
	else
		cout << "*******Image loaded with success!*******" << std::endl;
	undist.create(dist.size(), dist.type());
	
	///*Remove distortion from loaded image*/
	/*Calibrate Camera with defined calibration*/
	undist = imread("calibration/input/" + in + ".jpg", CV_LOAD_IMAGE_COLOR);
	if (!undist.data)                              // Check for invalid input
	{
		cout << "Could not open or find the undistorted image!!" << std::endl;
		return 0;
	}
	char clb;
	clb = 'y';
	cout << "Do you want to calibrate the image? y/n" << endl;
	cin >> clb;
	if (clb == 'y')
	{
		cc.LoadCalibration("calibration/calibfiles/4kUltra.yml");
		cout << "*Calibration matrices were loaded with success!*" << endl;
		// Undistort the image
		cc.RemoveDistortion(dist, undist);
		imwrite("calibration/output/UndistortedImag.jpg", undist);
		cout << "***The image has been undistorted with success!***" << std::endl;
		cout << "It can be found in 'calibration/output/UndistortedImag.jpg'" << std::endl << std::endl;
	}

	///*Draw Edges*/
	///*Draw Hough Lines*/
	///*Find vanishin points*/
	vpd.computeVanishingPoints(undist);
	undistVL = undist;
	//imshow("Undistorted&VP", undistVL);

	///*Draw VanishPoints*/
	vpd.DrawVanishingPoints(undistVL);
	//namedWindow("Undistorted&VP", 0);
	//imshow("Undistorted&VP", undist);

	///*Find person*/
	vector<Rect> persons = pd.FindPerson(undist);
	persons.push_back(Rect());

	///*Compute height*/
	pm.setVariables(vpd.verticalVP, vpd.horizontalVP, vpd.depthVP, persons, undist);

	//Wait for key to be pressed to end
	char c = (char)waitKey();
	if (c == 'q' || c == 'Q')
	{
		cout << endl << "\n***\nAn output file has been saved in 'output.txt'.\nProgram will now exit.\n***";
		freopen("calibration/output/output.txt", "w", stdout);
		return 0;
	}
	freopen("calibration/output/outputErr.txt", "w", stdout);
	waitKey(0);
	freopen("calibration/output/outputErr(afe.txt", "w", stdout);
}
Exemplo n.º 7
0
// --------------------------------------------------------------------------
// display(引数なし)
// 画面の更新時に呼ばれる関数です
// 戻り値: なし
// --------------------------------------------------------------------------
void display(void)
{
    // 描画用のバッファクリア
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    if (cap.isOpened()) {
        // カメラ画像取得
        cv::Mat image_raw;
        cap >> image_raw;

        // 歪み補正
        cv::Mat image;
        cv::remap(image_raw, image, mapx, mapy, cv::INTER_LINEAR);

        // カメラ画像(RGB)表示
        cv::Mat rgb;
        cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
        cv::flip(rgb, rgb, 0);
        glDepthMask(GL_FALSE);
        glDrawPixels(rgb.cols, rgb.rows, GL_RGB, GL_UNSIGNED_BYTE, rgb.data);

        // BGRA画像
        cv::Mat bgra;
        cv::cvtColor(image, bgra, cv::COLOR_BGR2BGRA);

        // データを渡す
        BGRAVideoFrame frame;
        frame.width = bgra.cols;
        frame.height = bgra.rows;
        frame.data = bgra.data;
        frame.stride = bgra.step;

        // マーカ検出
        MarkerDetector detector(calibration);
        detector.processFrame(frame);
        std::vector<Transformation> transformations = detector.getTransformations();

        // 射影変換行列を計算
        Matrix44 projectionMatrix = buildProjectionMatrix(calibration.getIntrinsic(), frame.width, frame.height);

        // 射影変換行列を適用
        glMatrixMode(GL_PROJECTION);
        glLoadMatrixf(projectionMatrix.data);

        // ビュー行列の設定
        glMatrixMode(GL_MODELVIEW);
        glLoadIdentity();

        // デプス有効
        glDepthMask(GL_TRUE);

        // 頂点配列有効
        glEnableClientState(GL_VERTEX_ARRAY);
        glEnableClientState(GL_COLOR_ARRAY);

        // ビュー行列を退避
        glPushMatrix();

        // ラインの太さ設定
        glLineWidth(3.0f);

        // ライン頂点配列
        float lineX[] = { 0, 0, 0, 1, 0, 0 };
        float lineY[] = { 0, 0, 0, 0, 1, 0 };
        float lineZ[] = { 0, 0, 0, 0, 0, 1 };

        // 2D平面
        const GLfloat squareVertices[] = { -0.5f, -0.5f,
                                            0.5f, -0.5f,
                                           -0.5f,  0.5f,
                                            0.5f,  0.5f };

        // 2D平面カラー(RGBA)
        const GLubyte squareColors[] = { 255, 255,   0, 255,
                                           0, 255, 255, 255,
                                           0,   0,   0,   0,
                                         255,   0, 255, 255 };

        // AR描画
        for (size_t i = 0; i < transformations.size(); i++) {
            // 変換行列
            Matrix44 glMatrix = transformations[i].getMat44();

            // ビュー行列にロード
            glLoadMatrixf(reinterpret_cast<const GLfloat*>(&glMatrix.data[0]));

            // 2D平面の描画
            glEnableClientState(GL_COLOR_ARRAY);
            glVertexPointer(2, GL_FLOAT, 0, squareVertices);
            glColorPointer(4, GL_UNSIGNED_BYTE, 0, squareColors);
            glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
            glDisableClientState(GL_COLOR_ARRAY);

            // 座標軸のスケール
            float scale = 0.5;
            glScalef(scale, scale, scale);

            // カメラから見えるようにちょっと移動
            glTranslatef(0, 0, 0.1f);

            // X軸
            glColor4f(1.0f, 0.0f, 0.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineX);
            glDrawArrays(GL_LINES, 0, 2);

            // Y軸
            glColor4f(0.0f, 1.0f, 0.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineY);
            glDrawArrays(GL_LINES, 0, 2);

            // Z軸
            glColor4f(0.0f, 0.0f, 1.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineZ);
            glDrawArrays(GL_LINES, 0, 2);
        }

        // 頂点配列無効
        glDisableClientState(GL_VERTEX_ARRAY);

        // ビュー行列を戻す
        glPopMatrix();
    }