Ejemplo n.º 1
0
MarkerDetector::MarkerDetector(CameraCalibration calibration)
    : m_minContourLengthAllowed(100)
    , markerSize(100,100)
{
    cv::Mat(3,3, CV_32F, const_cast<float*>(&calibration.getIntrinsic().data[0])).copyTo(camMatrix);
    cv::Mat(4,1, CV_32F, const_cast<float*>(&calibration.getDistorsion().data[0])).copyTo(distCoeff);

    bool centerOrigin = true;
    if (centerOrigin)
    {
        m_markerCorners3d.push_back(cv::Point3f(-0.5f,-0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(+0.5f,-0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(+0.5f,+0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(-0.5f,+0.5f,0));
    }
    else
    {
        m_markerCorners3d.push_back(cv::Point3f(0,0,0));
        m_markerCorners3d.push_back(cv::Point3f(1,0,0));
        m_markerCorners3d.push_back(cv::Point3f(1,1,0));
        m_markerCorners3d.push_back(cv::Point3f(0,1,0));    
    }

    m_markerCorners2d.push_back(cv::Point2f(0,0));
    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,0));
    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,markerSize.height-1));
    m_markerCorners2d.push_back(cv::Point2f(0,markerSize.height-1));
}
Ejemplo n.º 2
0
void PatternTrackingInfo::computePose(const Pattern& pattern, const CameraCalibration& calibration)
{
  cv::Mat Rvec;
  cv::Mat_<float> Tvec;
  cv::Mat raux,taux;

  cv::solvePnP(pattern.points3d, points2d, calibration.getIntrinsic(), calibration.getDistorsion(),raux,taux);
  raux.convertTo(Rvec,CV_32F);
  taux.convertTo(Tvec ,CV_32F);

  cv::Mat_<float> rotMat(3,3); 
  cv::Rodrigues(Rvec, rotMat);

  // Copy to transformation matrix
  for (int col=0; col<3; col++)
  {
    for (int row=0; row<3; row++)
    {        
     pose3d.r().mat[row][col] = rotMat(row,col); // Copy rotation component
    }
    pose3d.t().data[col] = Tvec(col); // Copy translation component
  }

  // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.
  pose3d = pose3d.getInverted();
}
Ejemplo n.º 3
0
// --------------------------------------------------------------------------
// display(引数なし)
// 画面の更新時に呼ばれる関数です
// 戻り値: なし
// --------------------------------------------------------------------------
void display(void)
{
    // 描画用のバッファクリア
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    if (cap.isOpened()) {
        // カメラ画像取得
        cv::Mat image_raw;
        cap >> image_raw;

        // 歪み補正
        cv::Mat image;
        cv::remap(image_raw, image, mapx, mapy, cv::INTER_LINEAR);

        // カメラ画像(RGB)表示
        cv::Mat rgb;
        cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
        cv::flip(rgb, rgb, 0);
        glDepthMask(GL_FALSE);
        glDrawPixels(rgb.cols, rgb.rows, GL_RGB, GL_UNSIGNED_BYTE, rgb.data);

        // BGRA画像
        cv::Mat bgra;
        cv::cvtColor(image, bgra, cv::COLOR_BGR2BGRA);

        // データを渡す
        BGRAVideoFrame frame;
        frame.width = bgra.cols;
        frame.height = bgra.rows;
        frame.data = bgra.data;
        frame.stride = bgra.step;

        // マーカ検出
        MarkerDetector detector(calibration);
        detector.processFrame(frame);
        std::vector<Transformation> transformations = detector.getTransformations();

        // 射影変換行列を計算
        Matrix44 projectionMatrix = buildProjectionMatrix(calibration.getIntrinsic(), frame.width, frame.height);

        // 射影変換行列を適用
        glMatrixMode(GL_PROJECTION);
        glLoadMatrixf(projectionMatrix.data);

        // ビュー行列の設定
        glMatrixMode(GL_MODELVIEW);
        glLoadIdentity();

        // デプス有効
        glDepthMask(GL_TRUE);

        // 頂点配列有効
        glEnableClientState(GL_VERTEX_ARRAY);
        glEnableClientState(GL_COLOR_ARRAY);

        // ビュー行列を退避
        glPushMatrix();

        // ラインの太さ設定
        glLineWidth(3.0f);

        // ライン頂点配列
        float lineX[] = { 0, 0, 0, 1, 0, 0 };
        float lineY[] = { 0, 0, 0, 0, 1, 0 };
        float lineZ[] = { 0, 0, 0, 0, 0, 1 };

        // 2D平面
        const GLfloat squareVertices[] = { -0.5f, -0.5f,
                                            0.5f, -0.5f,
                                           -0.5f,  0.5f,
                                            0.5f,  0.5f };

        // 2D平面カラー(RGBA)
        const GLubyte squareColors[] = { 255, 255,   0, 255,
                                           0, 255, 255, 255,
                                           0,   0,   0,   0,
                                         255,   0, 255, 255 };

        // AR描画
        for (size_t i = 0; i < transformations.size(); i++) {
            // 変換行列
            Matrix44 glMatrix = transformations[i].getMat44();

            // ビュー行列にロード
            glLoadMatrixf(reinterpret_cast<const GLfloat*>(&glMatrix.data[0]));

            // 2D平面の描画
            glEnableClientState(GL_COLOR_ARRAY);
            glVertexPointer(2, GL_FLOAT, 0, squareVertices);
            glColorPointer(4, GL_UNSIGNED_BYTE, 0, squareColors);
            glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
            glDisableClientState(GL_COLOR_ARRAY);

            // 座標軸のスケール
            float scale = 0.5;
            glScalef(scale, scale, scale);

            // カメラから見えるようにちょっと移動
            glTranslatef(0, 0, 0.1f);

            // X軸
            glColor4f(1.0f, 0.0f, 0.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineX);
            glDrawArrays(GL_LINES, 0, 2);

            // Y軸
            glColor4f(0.0f, 1.0f, 0.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineY);
            glDrawArrays(GL_LINES, 0, 2);

            // Z軸
            glColor4f(0.0f, 0.0f, 1.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineZ);
            glDrawArrays(GL_LINES, 0, 2);
        }

        // 頂点配列無効
        glDisableClientState(GL_VERTEX_ARRAY);

        // ビュー行列を戻す
        glPopMatrix();
    }