Esempio n. 1
0
aruco::CameraParameters aruco_ros::rosCameraInfo2ArucoCamParams(const sensor_msgs::CameraInfo& cam_info,
                                                                bool useRectifiedParameters)
{
    cv::Mat cameraMatrix(3, 3, CV_32FC1);
    cv::Mat distorsionCoeff(4, 1, CV_32FC1);
    cv::Size size(cam_info.height, cam_info.width);

    if ( useRectifiedParameters )
    {
      cameraMatrix.setTo(0);
      cameraMatrix.at<float>(0,0) = cam_info.P[0];   cameraMatrix.at<float>(0,1) = cam_info.P[1];   cameraMatrix.at<float>(0,2) = cam_info.P[2];
      cameraMatrix.at<float>(1,0) = cam_info.P[4];   cameraMatrix.at<float>(1,1) = cam_info.P[5];   cameraMatrix.at<float>(1,2) = cam_info.P[6];
      cameraMatrix.at<float>(2,0) = cam_info.P[8];   cameraMatrix.at<float>(2,1) = cam_info.P[9];   cameraMatrix.at<float>(2,2) = cam_info.P[10];

      for(int i=0; i<4; ++i)
        distorsionCoeff.at<float>(i, 0) = 0;
    }
    else
    {
      for(int i=0; i<9; ++i)
        cameraMatrix.at<float>(i%3, i-(i%3)*3) = cam_info.K[i];

      for(int i=0; i<4; ++i)
        distorsionCoeff.at<float>(i, 0) = cam_info.D[i];
    }

    return aruco::CameraParameters(cameraMatrix, distorsionCoeff, size);
}
Esempio n. 2
0
Eigen::Isometry3d getRelativeTransform(TagMatch const& match, Eigen::Matrix3d const & camera_matrix, const Eigen::Vector4d &distortion_coefficients, double tag_size) 
{
  std::vector<cv::Point3f> objPts;
  std::vector<cv::Point2f> imgPts;
  double s = tag_size/2.;
  objPts.push_back(cv::Point3f(-s,-s, 0));
  objPts.push_back(cv::Point3f( s,-s, 0));
  objPts.push_back(cv::Point3f( s, s, 0));
  objPts.push_back(cv::Point3f(-s, s, 0));


  imgPts.push_back(match.p0);
  imgPts.push_back(match.p1);
  imgPts.push_back(match.p2);
  imgPts.push_back(match.p3);

  cv::Mat rvec, tvec;
  cv::Matx33f cameraMatrix(
                           camera_matrix(0,0), 0, camera_matrix(0,2),
                           0, camera_matrix(1,1), camera_matrix(1,2),
                           0,  0,  1);

  cv::Vec4f distParam(distortion_coefficients(0), distortion_coefficients(1), distortion_coefficients(2), distortion_coefficients(3)); 
  cv::solvePnP(objPts, imgPts, cameraMatrix, distParam, rvec, tvec);
  cv::Matx33d r;
  cv::Rodrigues(rvec, r);
  Eigen::Matrix3d wRo;
  wRo << r(0,0), r(0,1), r(0,2), r(1,0), r(1,1), r(1,2), r(2,0), r(2,1), r(2,2);

  Eigen::Isometry3d T; 
  T.linear() = wRo;
  T.translation() << tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2);
  return T;
}
void VTerrainSectorRenderer::SetSector(VTerrainSector *pSector)
{
  const VTerrainConfig &config(pSector->m_Config);
  m_pSector = pSector;
  
  const int iSx = m_pSector->m_iIndexX;
  const int iSy = m_pSector->m_iIndexY;

  const int iCountX = (config.m_iSectorCount[0] == 1) ? 1 : (((iSx > 0) && (iSx < config.m_iSectorCount[0] - 1)) ? 3 : 2);
  const int iCountY = (config.m_iSectorCount[1] == 1) ? 1 : (((iSy > 0) && (iSy < config.m_iSectorCount[1] - 1)) ? 3 : 2);

  m_spOwner->EnsureSectorRangeLoaded(hkvMath::Max(0, iSx-1), hkvMath::Max(0, iSy-1), iCountX, iCountY);
  
  m_pRenderLoop->SetSector(m_pSector);
  m_pRenderLoop->m_pTargetData = m_spTargetData; // save next time...

  float fDistance = 0.0f;

  const int iBaseX = hkvMath::Max(0, m_pSector->m_iIndexX - 1);
  const int iBaseY = hkvMath::Max(0, m_pSector->m_iIndexY - 1);

  for (int x = 0; x < iCountX; ++x)
  {
    for (int y = 0; y < iCountY; ++y)
    {
      VTerrainSector* pCurrentSector = m_pSector->GetSectorManager()->GetSector(iBaseX + x, iBaseY + y);
      fDistance = hkvMath::Max(fDistance, hkvMath::Max(config.m_vTerrainPos.z, pCurrentSector->m_fMaxHeightValue) -
        hkvMath::Min(config.m_vTerrainPos.z, pCurrentSector->m_fMinHeightValue));
    }
  }

  // In case the terrain's sectors all have min/max height values of zero, a default distance of 100.0f is assumed.
  if (fDistance <= 0.0f)
    fDistance = 100.0f;

  hkvVec3 vCenter = m_pSector->GetSectorOrigin() + config.m_vSectorSize.getAsVec3(0.0f)*0.5f;
  vCenter.z = config.m_vTerrainPos.z + fDistance;

  hkvMat3 cameraMatrix (hkvNoInitialization);
  cameraMatrix.setLookInDirectionMatrix (hkvVec3(0, 0, -1), hkvVec3(0, 1, 0));

  m_pCamera->Set(cameraMatrix,vCenter);

  const float fWidth = (float)(m_spTargetTex->GetTextureWidth());
  const float fBorderWidth = ((config.m_vSectorSize.x * fWidth) / (fWidth - m_iBorderWidth*2));

  const float fHeigth = (float)(m_spTargetTex->GetTextureHeight());
  const float fBorderHeigth = ((config.m_vSectorSize.y * fHeigth) / (fHeigth - m_iBorderWidth*2));

  m_spContext->GetViewProperties()->setOrthographicSize(fBorderWidth, fBorderHeigth);

  

  const float fNear = 0.0f;
  const float fFar  = config.m_vTerrainPos.z + fDistance*2.0f;
  m_spContext->GetViewProperties()->setClipPlanes(fNear, fFar);

  m_pCamera->ReComputeVisibility();
}
PMatrix4x4 PRenderTransform::mvp() const
{
    pfloat32 mv[16];
    m_drawable->calculateModelCameraMatrix(cameraMatrix(), mv);

    PMatrix4x4 ret;
    pMatrix4x4Multiply(projectionMatrix().m_m, mv, ret.m_m);
    
    return ret;
}
Esempio n. 5
0
Eigen::Isometry3d transformEstimation(const Mat& rgb1,const Mat& rgb2,const Mat& depth1,const Mat& depth2,const CAMERA_INTRINSIC_PARAMETERS& C)
{
    vector<KeyPoint> keyPts1,keyPts2;
    Mat descriptors1,descriptors2;

    extractKeypointsAndDescripotrs(rgb1,rgb2,keyPts1,keyPts2,descriptors1,descriptors2);


    vector<DMatch> matches;
    descriptorsMatch(rgb1,rgb2,keyPts1,keyPts2,descriptors1,descriptors2,matches);


    vector<Point2f> points;
    vector<Point3f> objectPoints;
    vector<Eigen::Vector2d> imagePoints1,imagePoints2;

    getObjectPointsAndImagePoints(depth1,depth2,keyPts1,keyPts2,matches,points,objectPoints,imagePoints1,imagePoints2,C);

    Mat translation,rotation;
    double camera_matrix_data[3][3] = {
        {C.fx, 0, C.cx},
        {0, C.fy, C.cy},
        {0, 0, 1}    };

    Mat cameraMatrix(3,3,CV_64F,camera_matrix_data);


    solvePnPRansac(objectPoints,points,cameraMatrix,Mat(),rotation,translation,false, 100, 1.0, 20);


    Mat rot;
    Rodrigues(rotation,rot);

    Eigen::Matrix3d r;
    Eigen::Vector3d t;

    cout<<rot<<endl;
    cout<<translation<<endl;

    r<< ((double*)rot.data)[0],((double*)rot.data)[1],((double*)rot.data)[2],
            ((double*)rot.data)[3],((double*)rot.data)[4],((double*)rot.data)[5],
            ((double*)rot.data)[6],((double*)rot.data)[7],((double*)rot.data)[8];
    t<<((double*)translation.data)[0],((double*)translation.data)[1],((double*)translation.data)[2];

    Eigen::Isometry3d T = Eigen::Isometry3d::Identity();
    T.rotate(r);
    T.pretranslate(t);
    cout<<T.matrix()<<endl;

    BundleAdjustmentOptimization(objectPoints,imagePoints1,imagePoints2);

    return T;
}
Esempio n. 6
0
int main( int argc, char* argv[])
{
  // Read points
  std::vector<cv::Point2f> imagePoints = Generate2DPoints();
  std::vector<cv::Point3f> objectPoints = Generate3DPoints();

  std::cout << "There are " << imagePoints.size() << " imagePoints and " << objectPoints.size() << " objectPoints." << std::endl;

    //camera parameters
  double fx = 525.0; //focal length x
  double fy = 525.0; //focal le

  double cx = 319.5; //optical centre x
  double cy = 239.5; //optical centre y

  cv::Mat cameraMatrix(3,3,cv::DataType<double>::type);
  cameraMatrix.at<double>(0,0)=fx;
  cameraMatrix.at<double>(1,1)=fy;
  cameraMatrix.at<double>(2,2)=1;
  cameraMatrix.at<double>(0,2)=cx;
  cameraMatrix.at<double>(1,2)=cy;
  cameraMatrix.at<double>(0,1)=0;
  cameraMatrix.at<double>(1,0)=0;
  cameraMatrix.at<double>(2,0)=0;
  cameraMatrix.at<double>(2,1)=0;


  std::cout << "Initial cameraMatrix: " << cameraMatrix << std::endl;

  cv::Mat distCoeffs(4,1,cv::DataType<double>::type);
  distCoeffs.at<double>(0) = 0;
  distCoeffs.at<double>(1) = 0;
  distCoeffs.at<double>(2) = 0;
  distCoeffs.at<double>(3) = 0;

  cv::Mat rvec(3,1,cv::DataType<double>::type);
  cv::Mat tvec(3,1,cv::DataType<double>::type);

  cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

  std::cout << "rvec: " << rvec << std::endl;
  std::cout << "tvec: " << tvec << std::endl;

  std::vector<cv::Point2f> projectedPoints;
  cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);

  for(unsigned int i = 0; i < projectedPoints.size(); ++i)
    {
    std::cout << "Image point: " << imagePoints[i] << " Projected to " << projectedPoints[i] << std::endl;
    }

  return 0;
}
    /**
    * Computes the pose using homography matrix (NOT WORKING PROPERLY)
    * @param contours contours from ellipse
    */
    bool Circular::estimatePoseHom(nav_msgs::Odometry &odom_estimated)
    {
        
        std::vector<cv::Point3f> circle;
        cv::Mat rvec, tvec;
        
        // Assemble circle in the object plane
        double radius = this->landmark_diag_ / 2.0;
        
        for (int i=0; i<4; ++i){
            double theta = atan2(this->contours_.at(i).y - this->descriptor_.dynamic.vc, 
                                 this->contours_.at(i).x - this->descriptor_.dynamic.uc);
            circle.push_back(cv::Point3f(radius*cos(theta),-radius*sin(theta),0.0));    
        }
        
        // Camera Calibration Matrix
        cv::Matx33f cameraMatrix(
                                 this->fx_,    0     , this->cx_,
                                     0    ,this->fy_ , this->cy_,
                                     0    ,    0     ,     1    );
        // Distortion parameters
        cv::Vec4f distParam(this->k1_, this->k2_, this->p1_, this->p2_); // distortion parameters
        // PnP solver
        cv::solvePnP(circle, this->contours_, cameraMatrix, distParam, rvec, tvec);
        
        // Rodrigues to rotation matrix
        cv::Matx33d r;
        cv::Rodrigues(rvec, r);
        Eigen::Matrix3d wRo;
        wRo << r(0,0), r(0,1), r(0,2), r(1,0), r(1,1), r(1,2), r(2,0), r(2,1), r(2,2);

        Eigen::Matrix4d T, transform; 
        transform.topLeftCorner(3,3) = wRo;
        transform.col(3).head(3) << tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2);
        transform.row(3) << 0,0,0,1;

        Eigen::Matrix3d rot = wRo;
        Eigen::Quaternion<double> rot_quaternion = Eigen::Quaternion<double>(rot);

        
        odom_estimated.pose.pose.position.x = transform(0,3);
        odom_estimated.pose.pose.position.y = transform(1,3);
        odom_estimated.pose.pose.position.z = transform(2,3);
        
        odom_estimated.pose.pose.orientation.x = rot_quaternion.x();
        odom_estimated.pose.pose.orientation.y = rot_quaternion.y();
        odom_estimated.pose.pose.orientation.z = rot_quaternion.z();
        odom_estimated.pose.pose.orientation.w = rot_quaternion.w();     
        
    }
aruco::CameraParameters aruco_ros::rosCameraInfo2ArucoCamParams(const sensor_msgs::CameraInfo& cam_info,
                                                                bool useRectifiedParameters)
{
    cv::Mat cameraMatrix(3, 3, CV_32FC1);
    cv::Mat distorsionCoeff(4, 1, CV_32FC1);
    cv::Size size(cam_info.height, cam_info.width);

    if ( useRectifiedParameters )
    {
      cameraMatrix.setTo(0);
      cameraMatrix.at<float>(0,0) = cam_info.K[0];   
	  cameraMatrix.at<float>(0,1) = cam_info.K[1];   
	  cameraMatrix.at<float>(0,2) = cam_info.K[2];
      cameraMatrix.at<float>(1,0) = cam_info.K[3];   
	  cameraMatrix.at<float>(1,1) = cam_info.K[4];   
	  cameraMatrix.at<float>(1,2) = cam_info.K[5];
      cameraMatrix.at<float>(2,0) = cam_info.K[6];   
	  cameraMatrix.at<float>(2,1) = cam_info.K[7];   
	  cameraMatrix.at<float>(2,2) = cam_info.K[8];

      for(int i=0; i<4; ++i)
        distorsionCoeff.at<float>(i, 0) = 0;
    }
    else
    {
      for(int i=0; i<9; ++i)
        cameraMatrix.at<float>(i%3, i-(i%3)*3) = cam_info.K[i];

      if(cam_info.D.size() == 4)
      {
        for(int i=0; i<4; ++i)
          distorsionCoeff.at<float>(i, 0) = cam_info.D[i];
      }
      else
      {
        ROS_WARN("length of camera_info D vector is not 4, assuming zero distortion...");
        for(int i=0; i<4; ++i)
          distorsionCoeff.at<float>(i, 0) = 0;
      }
    }

    return aruco::CameraParameters(cameraMatrix, distorsionCoeff, size);
}
void MonoDvsCalibration::addPattern(int id)
{
  // add detection
  image_points_.push_back(transition_maps_[id].pattern);
  object_points_.push_back(world_pattern_);
  num_detections_++;

  // compute and publish camera pose if camera is calibrated
  if (got_camera_info_)
  {
    cv::Mat rvec, tvec;
    cv::Mat cameraMatrix(3, 3, CV_64F);
    cv::Mat distCoeffs(1, 5, CV_64F);

    // convert to OpenCV
    for (int i = 0; i < 5; i++)
      distCoeffs.at<double>(i) = camera_info_external_.D[i];
    for (int i = 0; i < 9; i++)
      cameraMatrix.at<double>(i) = camera_info_external_.K[i];

    cv::solvePnP(world_pattern_, transition_maps_[id].pattern, cameraMatrix, distCoeffs, rvec, tvec);

    geometry_msgs::PoseStamped pose_msg;
    pose_msg.header.stamp = ros::Time::now();
    pose_msg.header.frame_id = "dvs";
    pose_msg.pose.position.x = tvec.at<double>(0);
    pose_msg.pose.position.y = tvec.at<double>(1);
    pose_msg.pose.position.z = tvec.at<double>(2);

    double angle = cv::norm(rvec);
    cv::normalize(rvec, rvec);
    pose_msg.pose.orientation.x = rvec.at<double>(0) * sin(angle/2.0);
    pose_msg.pose.orientation.y = rvec.at<double>(1) * sin(angle/2.0);
    pose_msg.pose.orientation.z = rvec.at<double>(2) * sin(angle/2.0);
    pose_msg.pose.orientation.w = cos(angle/2.0);

    camera_pose_pub_.publish(pose_msg);
  }
}
void PhaseCorrelation::controller(const sensor_msgs::ImageConstPtr& msg) {

	//Transform the image to opencv format
   cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }
	
	Mat image = cv_ptr->image; 
	
	imshow("image", image);
	waitKey(1);

	//Transform image to grayscale
	cvtColor(image, image2_in, CV_RGB2GRAY); //Image2 is the newest image, the one we get from this callback

	//image1 is the image2 from the last callback (we use the one that already has the dft applied. 
	Mat image1_dft = last_image_dft;

	//Image2, preparation and dft
	 Mat padded2;                            //expand input image to optimal size
	 int m2 = getOptimalDFTSize( image2_in.rows );
	 int n2 = getOptimalDFTSize( image2_in.cols ); // on the border add zero values
	 copyMakeBorder(image2_in, padded2, 0, m2 - image2_in.rows, 0, n2 - image2_in.cols, BORDER_CONSTANT, Scalar::all(0));

	 Mat planes2[] = {Mat_<float>(padded2), Mat::zeros(padded2.size(), CV_32F)};
	 Mat image2,image2_dft;
	 merge(planes2, 2, image2);         // Add to the expanded another plane with zeros

	 dft(image2, image2_dft); 	


	//Image2 will be image1 on next iteration
	last_image_dft=image2_dft;

	if( !first){
	 	//obtain the cross power spectrum  c(u,v)=F(u,v)·G*(u,v)/abs(F(u,v)·G*(u,v))
		Mat divident, divisor, cross_power_spec, trans_mat;
		mulSpectrums(image1_dft, image2_dft, divident, 0, true); 	
																		//=F(u,v)·G*(u,v) --> multiply the result of a dft //divident-> where it stores the result.
																		// flags=0. conj=true, because i want the image2 to be the complex conjugate.
		divisor=abs(divident);

		divide(divident, divisor, cross_power_spec, 1);	

		//dft of the cross_power_spec
	 	dft(cross_power_spec, trans_mat, DFT_INVERSE);

		//Normalize the trans_mat so that all the values are between 0 and 1
		normalize(trans_mat, trans_mat, NORM_INF);

		//Split trans_mat in it's real and imaginary parts
		vector<Mat> trans_mat_vector;
		split(trans_mat, trans_mat_vector);
		Mat trans_mat_real = trans_mat_vector.at(0); 
		Mat trans_mat_im = trans_mat_vector.at(1); 

		imshow("trans_mat_real", trans_mat_real);
		waitKey(1);

		//Look for maximum value and it's location on the trans_mat_real matrix
		double* max_value;
		Point* max_location;

		double max_val;
		Point max_loc; 
		max_value = &max_val;
		max_location = &max_loc;
	
		minMaxLoc(trans_mat_real, NULL, max_value, NULL, max_location);

		ROS_INFO_STREAM("max_value: " << max_val << "  -  " << "max_location: " << max_loc);			

		int pixel_x, pixel_y;

		if(max_loc.x < (image2.cols/2) && max_loc.y < (image2.rows/2)){  // top-left quadrant
			ROS_INFO_STREAM(" top - left quadrant");
			pixel_x = max_loc.x;
			pixel_y = - max_loc.y;
		}
		if(max_loc.x > (image2.cols/2) && max_loc.y > (image2.rows/2)){  // lower-right quadrant
			ROS_INFO_STREAM(" lower - right quadrant");
			pixel_x = - image2.cols + max_loc.x;
			pixel_y = + image2.rows - max_loc.y;
		}
		if(max_loc.x > (image2.cols/2) && max_loc.y < (image2.rows/2)){  // top-right quadrant
			ROS_INFO_STREAM(" top - right quadrant");
			pixel_x = - image2.cols + max_loc.x;
			pixel_y = - max_loc.y;
		}
		if(max_loc.x < (image2.cols/2) && max_loc.y > (image2.rows/2)){  // lower-left quadrant  
			ROS_INFO_STREAM(" lower - left quadrant");
			pixel_x = max_loc.x;
			pixel_y = image2.rows - max_loc.y;
		}


		//Add the new displacement to the accumulated
		pixels_x = pixels_x + pixel_x;
		pixels_y = pixels_y + pixel_y;
		ROS_INFO_STREAM("pixels_x: " << pixels_x << "  -  " << "pixel_y: " << pixels_y);		

		//------ transform pixels to mm ---------
 
		//To get the focal lenght:
		if (first){	

			Mat cameraMatrix(3, 3, CV_32F);
			cameraMatrix.at<float>(0,0)= 672.03175; //Values from the camera matrix are from the visp calibration
			cameraMatrix.at<float>(0,1) = 0.00000;
			cameraMatrix.at<float>(0,2) = 309.39349;
			cameraMatrix.at<float>(1,0) = 0.00000;
			cameraMatrix.at<float>(1,1) = 673.05595;
			cameraMatrix.at<float>(1,2) = 166.52006; 
			cameraMatrix.at<float>(2,0) = 0.00000;
			cameraMatrix.at<float>(2,1) = 0.00000;
			cameraMatrix.at<float>(2,2) = 1.00000;

			double apertureWidth, apertureHeight, fovx, fovy, focalLength, aspectRatio;
			Point2d principalPoint;
 
 			calibrationMatrixValues(cameraMatrix, image.size(), apertureWidth, apertureHeight, fovx, fovy, focalLength,  principalPoint, aspectRatio);
	
			ROS_INFO_STREAM("focalLength: " << focalLength);
			fl = focalLength;

			first=false;
		}
		
		float mov_x = pixel_x/fl*h;
		float mov_y = pixel_y/fl*h;
		mov_x_acc =	mov_x_acc + mov_x;
		mov_y_acc = mov_y_acc + mov_y;

		ROS_INFO_STREAM("mov_x:  " << mov_x << " - mov_y:  " << mov_y);
		ROS_INFO_STREAM("mov_x_acc:  " << mov_x_acc << " - mov_y_acc:  " << mov_y_acc);

	}

}
Esempio n. 11
0
void Storm3D_SpotlightShared::updateMatrices(const D3DXMATRIX &cameraView, float bias)
{
	D3DXVECTOR3 lightPosition(position.x, position.y, position.z);
	D3DXVECTOR3 up(0, 1.f, 0);
	D3DXVECTOR3 lookAt = lightPosition;
	lookAt += D3DXVECTOR3(direction.x, direction.y, direction.z);

	D3DXMatrixPerspectiveFovLH(&lightProjection, D3DXToRadian(fov), 1.f, .2f, range);
	D3DXMATRIX cameraMatrix(cameraView);
	float det = D3DXMatrixDeterminant(&cameraMatrix);
	D3DXMatrixInverse(&cameraMatrix, &det, &cameraMatrix);

	float currentBias = bias;
	for(int i = 0; i < 2; ++i)
	{	
		D3DXMatrixLookAtLH(&lightView[i], &lightPosition, &lookAt, &up);
		//if(i == 1)
		//	currentBias = 0;
		if(i == 1)
			currentBias = 1.f;

		// Tweak matrix
		float soffsetX = 0.5f;
		float soffsetY = 0.5f;
		float scale = 0.5f;

		/*
		D3DXMATRIX shadowTweak( scale,    0.0f,     0.0f,				0.0f,
								0.0f,     -scale,   0.0f,				0.0f,
								0.0f,      0.0f,     float(tweakRange),	0.0f,
								soffsetX,  soffsetY, currentBias,		1.0f );
		*/
		D3DXMATRIX shadowTweak( scale,    0.0f,      0.0f,				0.0f,
								0.0f,     -scale,    0.0f,				0.0f,
								0.0f,      0.0f,	 currentBias,		0.0f,
								soffsetX,  soffsetY, 0.f,				1.0f );

		D3DXMatrixMultiply(&shadowProjection[i], &lightProjection, &shadowTweak);
		D3DXMatrixMultiply(&shadowProjection[i], &lightView[i], &shadowProjection[i]);
		D3DXMatrixMultiply(&lightViewProjection[i], &lightView[i], &lightProjection);

		shaderProjection[i] = shadowProjection[i];
		D3DXMatrixMultiply(&shadowProjection[i], &cameraMatrix, &shadowProjection[i]);
	}

	{
		float xf = (1.f / resolutionX * .5f);
		float yf = (1.f / resolutionY * .5f);
		float sX = soffsetX + (2 * targetPos.x * soffsetX) - xf;
		float sY = soffsetY + (2 * targetPos.y * soffsetY) - yf;

		/*
		D3DXMATRIX shadowTweak( scaleX,    0.0f,	0.0f,				0.0f,
								0.0f,    -scaleY,	0.0f,				0.0f,
								0.0f,     0.0f,     float(tweakRange),	0.0f,
								sX,       sY,		bias,				1.0f );
		*/

		D3DXMATRIX shadowTweak( scaleX,    0.0f,	0.0f,				0.0f,
								0.0f,    -scaleY,	0.0f,				0.0f,
								0.0f,     0.0f,     bias,				0.0f,
								sX,       sY,		0.f,				1.0f );

		D3DXMatrixMultiply(&targetProjection, &lightProjection, &shadowTweak);
		D3DXMatrixMultiply(&targetProjection, &lightView[0], &targetProjection);
	}
}
Esempio n. 12
0
void Storm3D_SpotlightShared::updateMatricesOffCenter(const D3DXMATRIX &cameraView, const VC2 &min, const VC2 &max, float height, Storm3D_Camera &camera)
{
	// Position of the light in global coordinates
	// Y-axis is height
	D3DXVECTOR3 lightPosition(position.x, position.y, position.z);
	// Up vector (z-axis)
	D3DXVECTOR3 up(0.f, 0.f, 1.f);
	// Look direction
	D3DXVECTOR3 lookAt = lightPosition;
	lookAt.y -= 1.f;

	{
		// max and min define the extents of light area in local coordinates
		// Z-axis is height
		float zmin = 0.2f;
		//float zmax = std::max(range, height) * 1.4f;
		// height is light height from light properties
		float zmax = height;
		float factor = 1.5f * zmin / height;
		float xmin = min.x * factor;
		float xmax = max.x * factor;
		float ymin = min.y * factor;
		float ymax = max.y * factor;
		D3DXMatrixPerspectiveOffCenterLH(&lightProjection, xmin, xmax, ymin, ymax, zmin, zmax);

		// Calculate the extents of light area in global coordinates
		VC2 worldMin = min;
		worldMin.x += position.x;
		worldMin.y += position.z;
		VC2 worldMax = max;
		worldMax.x += position.x;
		worldMax.y += position.z;

		// Generate approximate camera for culling.

		// Calculate range of the camera.
		// Y-axis is height
		float planeY = position.y - height;
		// Calculate distances from light position to light plane edges
		VC3 p1 = VC3( worldMin.x, planeY, worldMin.y ) - position;
		VC3 p2 = VC3( worldMax.x, planeY, worldMin.y ) - position;
		VC3 p3 = VC3( worldMax.x, planeY, worldMax.y ) - position;
		VC3 p4 = VC3( worldMin.x, planeY, worldMax.y ) - position;
		float d1 = p1.GetLength();
		float d2 = p2.GetLength();
		float d3 = p3.GetLength();
		float d4 = p4.GetLength();
		float maxRange = 0.0f;
		maxRange = MAX( maxRange, d1 );
		maxRange = MAX( maxRange, d2 );
		maxRange = MAX( maxRange, d3 );
		maxRange = MAX( maxRange, d4 );
		//maxRange = sqrtf(maxRange);

		// Calculate FOV of the camera.
		VC3 planeCenter = VC3( (worldMin.x + worldMax.x) * 0.5f, planeY, (worldMin.y + worldMax.y) * 0.5f );
		VC3 camVec = planeCenter - position;
		camVec.Normalize();
		float minDot = 10000.0f;
		float t1 = camVec.GetDotWith( p1 ) / d1;
		float t2 = camVec.GetDotWith( p2 ) / d2;
		float t3 = camVec.GetDotWith( p3 ) / d3;
		float t4 = camVec.GetDotWith( p4 ) / d4;
		minDot = MIN( minDot, t1 );
		minDot = MIN( minDot, t2 );
		minDot = MIN( minDot, t3 );
		minDot = MIN( minDot, t4 );
		float maxAngle = acosf( minDot );

		// Place camera to light position
		camera.SetPosition(position);
		camera.SetUpVec(VC3(0.f, 0.f, 1.f));
		// Point camera at light plane center
		camera.SetTarget(planeCenter);
		camera.SetFieldOfView( maxAngle );
		camera.SetVisibilityRange( maxRange );
	}
	
	D3DXMATRIX cameraMatrix(cameraView);
	float det = D3DXMatrixDeterminant(&cameraMatrix);
	D3DXMatrixInverse(&cameraMatrix, &det, &cameraMatrix);

	unsigned int tweakRange = 1;
	float bias = 0.f;
	float currentBias = 0.f;
	for(int i = 0; i < 2; ++i)
	{	
		D3DXMatrixLookAtLH(&lightView[i], &lightPosition, &lookAt, &up);
		if(i == 1)
			currentBias = 0;

		// Tweak matrix
		float soffsetX = 0.5f;
		float soffsetY = 0.5f;
		float scale = 0.5f;

		D3DXMATRIX shadowTweak( scale,    0.0f,     0.0f,				0.0f,
								0.0f,     -scale,   0.0f,				0.0f,
								0.0f,      0.0f,     float(tweakRange),	0.0f,
								soffsetX,  soffsetY, currentBias,		1.0f );

		D3DXMatrixMultiply(&shadowProjection[i], &lightProjection, &shadowTweak);
		D3DXMatrixMultiply(&shadowProjection[i], &lightView[i], &shadowProjection[i]);
		D3DXMatrixMultiply(&lightViewProjection[i], &lightView[i], &lightProjection);

		shaderProjection[i] = shadowProjection[i];
		D3DXMatrixMultiply(&shadowProjection[i], &cameraMatrix, &shadowProjection[i]);
	}

	{
		float xf = (1.f / resolutionX * .5f);
		float yf = (1.f / resolutionY * .5f);
		float sX = soffsetX + (2 * targetPos.x * soffsetX) - xf;
		float sY = soffsetY + (2 * targetPos.y * soffsetY) - yf;

		D3DXMATRIX shadowTweak( scaleX,    0.0f,	0.0f,				0.0f,
								0.0f,    -scaleY,	0.0f,				0.0f,
								0.0f,     0.0f,     float(tweakRange),	0.0f,
								sX,       sY,		bias,				1.0f );

		D3DXMatrixMultiply(&targetProjection, &lightProjection, &shadowTweak);
		D3DXMatrixMultiply(&targetProjection, &lightView[0], &targetProjection);
	}
}
Esempio n. 13
0
RESULT_OF_PNP estimateMotion( FRAME& frame1, FRAME& frame2, CAMERA_INTRINSIC_PARAMETERS& camera, double good_match_threshold, int min_good_match){
	// 匹配描述子
	vector<cv::DMatch> matches;
	cv::FlannBasedMatcher matcher;
	matcher.match(frame1.desp,frame2.desp,matches);
	//cout<<"Find total "<<matches.size()<<" matches."<<endl;
	/*
	// 可视化:显示匹配的特征
	cv::Mat imgMatches;
	cv::drawMatches(frame1.rgb,frame1.kp,frame2.rgb,frame2.kp,matches,imgMatches);
	cv::imshow("matches",imgMatches);
	cv::imwrite("../pic/matches.png",imgMatches);
	cv::waitKey(0);
	*/
	// 筛选匹配,把距离太大的去掉
	// 这里使用的准则是去掉大于四倍最小距离的匹配
	vector<cv::DMatch> goodMatches;
	double minDis = 9999;
	for(size_t i=0;i<matches.size();i++){
		if(matches[i].distance<minDis){
			minDis = matches[i].distance;
		}
	}
	for(size_t i=0;i<matches.size();i++){
		if(matches[i].distance < good_match_threshold*minDis){
			goodMatches.push_back(matches[i]);
		}
	}
	
	/*
	cv::drawMatches(frame1.rgb,frame1.kp,frame2.rgb,frame2.kp,goodMatches,imgMatches);
	cv::imshow("good matches",imgMatches);
	cv::imwrite("../pic/good_matches.png",imgMatches);
	cv::waitKey(0);
	*/
	// 计算图像间的运动关系
	// 关键函数:cv::solvePnPRansac()
	// 为调用此函数准备必要的参数

	// 第一个帧的三维点
	vector<cv::Point3f> pts_obj;
	// 第二个帧的图像点
	vector<cv::Point2f> pts_img;
	RESULT_OF_PNP result;
	
	if(goodMatches.size() <  min_good_match){
		result.inliers = 0;
	}else{	
		// 显示 good matches
		cout<<"good matches = "<<goodMatches.size()<<endl;
		for(size_t i=0;i<goodMatches.size();i++){
			// query 是第一个, train 是第二个
			cv::Point2f p = frame1.kp[goodMatches[i].queryIdx].pt;
			// 获取d是要小心!x是向右的,y是向下的,所以y才是行,x是列!
			unsigned short d = frame1.depth.ptr<unsigned short>(int(p.y))[int(p.x)];
			if(d == 0){
				continue;
			}
			pts_img.push_back(cv::Point2f(frame2.kp[goodMatches[i].trainIdx].pt));
			// 将(u,v,d)转成(x,y,z)
			cv::Point3f pt(p.x,p.y,d);
			cv::Point3f pd = point2dTo3d(pt,camera);
			pts_obj.push_back(pd);
		}
		if(pts_obj.size() == 0){
			result.inliers = 0;
			return result;
		}
		double camera_matrix_data[3][3] = {
			{camera.fx,0,camera.cx},
			{0,camera.fy,camera.cy},
			{0,0,1}
		};
	
		// 构建相机矩阵
		cv::Mat cameraMatrix(3,3,CV_64F,camera_matrix_data);
		cv::Mat inliers;
		// 求解pnp
		cv::solvePnPRansac(pts_obj,pts_img,cameraMatrix,cv::Mat(),result.rvec,result.tvec,false,100,1.0,100,inliers);
		result.inliers = inliers.rows;
	}
	
	/*
	// 画出inliers匹配
	vector<cv::DMatch> matchesShow;
	for(size_t i=0;i<result.inliers;i++){
		matchesShow.push_back(goodMatches[inliers.ptr<int>(i)[0]]);
	}
	cv::drawMatches(frame1.rgb,frame1.kp,frame2.rgb,frame2.kp,matchesShow,imgMatches);
	cv::imshow("inlier matches",imgMatches);
	cv::imwrite("../pic/inliers.png",imgMatches);
	cv::waitKey(0);
	*/
	return result;
}
int main(int argc, char *argv[]) {
    gflags::ParseCommandLineFlags(&argc, &argv, true);

    cv::vector<cv::Mat> lefts, rights;
    const cv::Size patternSize(9, 6);
    cv::vector<cv::vector<cv::Point3f>> worldPoints;
    cv::vector<cv::vector<cv::vector<cv::Point2f>>> imagePoints(2);

    for (size_t i = 0; i < 2; i++)
        imagePoints[i].resize(FLAGS_size);

    loadImages(lefts, rights, FLAGS_size);
    FLAGS_size = findChessboards(lefts, rights, imagePoints, patternSize, FLAGS_size);
    std::cout << "number of correct files = " << FLAGS_size << std::endl;
    setWorldPoints(worldPoints, patternSize, 0.024, FLAGS_size);

    std::cout << "calibrate stereo cameras" << std::endl;
    cv::vector<cv::Mat> cameraMatrix(2);
    cv::vector<cv::Mat> distCoeffs(2);
    cameraMatrix[0] = cv::Mat::eye(3, 3, CV_64FC1);
    cameraMatrix[1] = cv::Mat::eye(3, 3, CV_64FC1);
    distCoeffs[0] = cv::Mat(8, 1, CV_64FC1);
    distCoeffs[1] = cv::Mat(8, 1, CV_64FC1);
    cv::Mat R, T, E, F;

    double rms = stereoCalibrate(
            worldPoints, imagePoints[0], imagePoints[1], cameraMatrix[0],
            distCoeffs[0], cameraMatrix[1], distCoeffs[1], lefts[0].size(),
            R, T, E, F, cv::TermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 100, 1e-5),
            CV_CALIB_FIX_ASPECT_RATIO +
            CV_CALIB_ZERO_TANGENT_DIST +
            CV_CALIB_SAME_FOCAL_LENGTH +
            CV_CALIB_RATIONAL_MODEL +
            CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
    std::cout << "done with RMS error = " << rms << std::endl;

    double err = 0;
    int npoints = 0;
    for (int i = 0; i < FLAGS_size; i++) {
        int size = (int) imagePoints[0][i].size();
        cv::vector<cv::Vec3f> lines[2];
        cv::Mat imgpt[2];
        for (int k = 0; k < 2; k++) {
            imgpt[k] = cv::Mat(imagePoints[k][i]);
            cv::undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k],
                                cv::Mat(), cameraMatrix[k]);
            cv::computeCorrespondEpilines(imgpt[k], k + 1, F, lines[k]);
        }

        for (int j = 0; j < size; j++) {
            double errij =
                    std::fabs(imagePoints[0][i][j].x * lines[1][j][0] +
                              imagePoints[0][i][j].y * lines[1][j][1] +
                              lines[1][j][2]) +
                    std::fabs(imagePoints[1][i][j].x * lines[0][j][0] +
                              imagePoints[1][i][j].y * lines[0][j][1] +
                              lines[0][j][2]);
            err += errij;
        }
        npoints += size;
    }
    std::cout << "average reprojection error = " << err / npoints << std::endl;

    cv::Mat R1, R2, P1, P2, Q;
    cv::Rect validROI[2];
    stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
                  distCoeffs[1], lefts[0].size(), R, T, R1, R2, P1, P2, Q,
                  cv::CALIB_ZERO_DISPARITY, 1, lefts[0].size(),
                  &validROI[0], &validROI[1]);

    {
        cv::FileStorage fs(FLAGS_intrinsics.c_str(), cv::FileStorage::WRITE);
        if (fs.isOpened()) {
            fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0]
                << "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
            fs.release();
        }
    }

    cv::Mat rmap[2][2];
    cv::initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1,
                                lefts[0].size(),
                                CV_16SC2,
                                rmap[0][0], rmap[0][1]);
    cv::initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2,
                                lefts[0].size(),
                                CV_16SC2,
                                rmap[1][0], rmap[1][1]);

    {
        cv::FileStorage fs(FLAGS_extrinsics.c_str(), cv::FileStorage::WRITE);
        if (fs.isOpened()) {
            fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2
               << "P1" << P1 << "P2" << P2 << "Q" << Q
               << "V1" << validROI[0] << "V2" << validROI[1];
            fs.release();
        }
    }

    cv::Mat canvas;
    double sf;
    int w, h;

    sf = 600. / MAX(lefts[0].size().width, lefts[0].size().height);
    w = cvRound(lefts[0].size().width * sf);
    h = cvRound(lefts[0].size().height * sf);
    canvas.create(h, w * 2, CV_8UC3);

    cv::namedWindow("Rectified", CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);

    for (int i = 0; i < FLAGS_size; i++) {
        for (int k = 0; k < 2; k++) {
            if (k == 0) {
                cv::Mat img = lefts[i].clone(), rimg, cimg;
                cv::remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
                cv::cvtColor(rimg, cimg, CV_GRAY2BGR);
                cv::Mat canvasPart = canvas(cv::Rect(w * k, 0, w, h));
                cv::resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);

                cv::Rect vroi(cvRound(validROI[k].x * sf),
                              cvRound(validROI[k].y * sf),
                              cvRound(validROI[k].width * sf),
                              cvRound(validROI[k].height * sf));
                cv::rectangle(canvasPart, vroi, cv::Scalar(0, 0, 255), 3, 8);
            } else {
                cv::Mat img = rights[i].clone(), rimg, cimg;
                cv::remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
                cvtColor(rimg, cimg, CV_GRAY2BGR);
                cv::Mat canvasPart = canvas(cv::Rect(w * k, 0, w, h));
                cv::resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);

                cv::Rect vroi(cvRound(validROI[k].x * sf),
                              cvRound(validROI[k].y * sf),
                              cvRound(validROI[k].width * sf),
                              cvRound(validROI[k].height * sf));
                cv::rectangle(canvasPart, vroi, cv::Scalar(0, 0, 255), 3, 8);
            }
        }

        for (int j = 0; j < canvas.rows; j += 16)
            cv::line(canvas, cv::Point(0, j), cv::Point(canvas.cols, j),
                     cv::Scalar(0, 255, 0), 1, 8);

        cv::imshow("Rectified", canvas);

        if (cv::waitKey(0) == 'q')
            break;
    }

    cv::destroyAllWindows();
    return 0;
}
void GuiMaterialCtrl::onRender( Point2I offset, const RectI &updateRect )
{
   Parent::onRender( offset, updateRect );

   if ( !mMaterialInst )
      return;

   // Draw a quad with the material assigned
   GFXVertexBufferHandle<GFXVertexPCT> verts( GFX, 4, GFXBufferTypeVolatile );
   verts.lock();

   F32 screenLeft   = updateRect.point.x;
   F32 screenRight  = (updateRect.point.x + updateRect.extent.x);
   F32 screenTop    = updateRect.point.y;
   F32 screenBottom = (updateRect.point.y + updateRect.extent.y);

   const F32 fillConv = GFX->getFillConventionOffset();
   verts[0].point.set( screenLeft  - fillConv, screenTop    - fillConv, 0.f );
   verts[1].point.set( screenRight - fillConv, screenTop    - fillConv, 0.f );
   verts[2].point.set( screenLeft  - fillConv, screenBottom - fillConv, 0.f );
   verts[3].point.set( screenRight - fillConv, screenBottom - fillConv, 0.f );

   verts[0].color = verts[1].color = verts[2].color = verts[3].color = ColorI( 255, 255, 255, 255 );

   verts[0].texCoord.set( 0.0f, 0.0f );
   verts[1].texCoord.set( 1.0f, 0.0f );
   verts[2].texCoord.set( 0.0f, 1.0f );
   verts[3].texCoord.set( 1.0f, 1.0f );

   verts.unlock();

   GFX->setVertexBuffer( verts );

   MatrixSet matSet;
   matSet.setWorld(GFX->getWorldMatrix());
   matSet.setView(GFX->getViewMatrix());
   matSet.setProjection(GFX->getProjectionMatrix());
   
   MatrixF cameraMatrix( true );
   F32 left, right, top, bottom, nearPlane, farPlane;
   bool isOrtho;
   GFX->getFrustum( &left, &right, &bottom, &top, &nearPlane, &farPlane, &isOrtho );
   Frustum frust( isOrtho, left, right, top, bottom, nearPlane, farPlane, cameraMatrix );

   SceneRenderState state
   (
      gClientSceneGraph,
      SPT_Diffuse,
      SceneCameraState( GFX->getViewport(), frust, GFX->getWorldMatrix(), GFX->getProjectionMatrix() ),
      gClientSceneGraph->getDefaultRenderPass(),
      false
   );

   SceneData sgData;
   sgData.init( &state );
   sgData.wireframe = false; // Don't wireframe this.

   while( mMaterialInst->setupPass( &state, sgData ) )
   {
      mMaterialInst->setSceneInfo( &state, sgData );
      mMaterialInst->setTransforms( matSet, &state );
      GFX->setupGenericShaders();
      GFX->drawPrimitive( GFXTriangleStrip, 0, 2 );
   }

   // Clean up
   GFX->setShader( NULL );
   GFX->setTexture( 0, NULL );
}
void RaspiVoice::processImage(cv::Mat rawImage)
{
	cv::Mat processedImage = rawImage;

	if (verbose)
	{
		printtime("ProcessImage start");
	}

	if ((image_source > 0) || (opt.input_filename != ""))
	{
		if (opt.foveal_mapping)
		{
			cv::Matx33f cameraMatrix(100, 0, processedImage.cols / 2, 0, 100, processedImage.rows / 2, 0, 0, 1);
			cv::Matx41f distCoeffs(5.0, 5.0, 0, 0);
			cv::Mat processedImage2;
			cv::undistort(processedImage, processedImage2, cameraMatrix, distCoeffs);
			float clipzoom = 1.8; //horizontal zoom to remove blinders, decreases resolution if > 1.0
			cv::Rect roi(processedImage.cols / 2 - columns / 2 / clipzoom, processedImage.rows / 2 - rows / 2, columns / clipzoom, rows);
			processedImage = processedImage2(roi);
		}

		if (opt.zoom > 1.0)
		{
			int w = processedImage.cols;
			int h = processedImage.rows;
			float z = opt.zoom;
			cv::Rect roi((w / 2.0) - w / (2.0*z), (h / 2.0) - h / (2.0*z), w/z, h/z);
			processedImage = processedImage(roi);
		}

		//Bring to size needed by ImageToSoundscape:
		if (processedImage.rows != rows || processedImage.cols != columns)
		{
			cv::resize(processedImage, processedImage, cv::Size(columns, rows));
		}

		if ((opt.blinders > 0) && (opt.blinders < columns/2))
		{
			processedImage(cv::Rect(0, 0, opt.blinders, rows - 1)).setTo(0);
			processedImage(cv::Rect(columns - 1 - opt.blinders, 0, opt.blinders, rows - 1)).setTo(0);
		}

		if (opt.contrast != 0.0)
		{
   			float avg = 0.0;    
   			for (int y = 0; y < processedImage.rows; y++)
   			{
      				for (int x = 0; x < processedImage.cols; x++)
      				{
         			avg += processedImage.at<uchar>(y, x);
      				}
   			}
   			avg = avg / (processedImage.rows * processedImage.cols);
   			for (int y = 0; y < processedImage.rows; y++)
   			{
      				for (int x = 0; x < processedImage.cols; x++)
      				{
         				int mVal = processedImage.at<uchar>(y, x);
         				processedImage.at<uchar>(y, x) = cv::saturate_cast<uchar>(mVal + opt.contrast*(mVal - avg));
      				}
   			}
		}
		if (opt.threshold > 0)
		{
			if (opt.threshold < 255)
			{
				cv::threshold(processedImage, processedImage, opt.threshold, 255, cv::THRESH_BINARY);
			}
			else
			{
				//Auto threshold:
				cv::threshold(processedImage, processedImage, 127, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
			}
		}

		if (opt.negative_image)
		{
			cv::Mat sub_mat = cv::Mat::ones(processedImage.size(), processedImage.type()) * 255;
			cv::subtract(sub_mat, processedImage, processedImage);
		}

		if (opt.edge_detection_opacity > 0.0)
		{
			cv::Mat blurImage;
			cv::Mat edgesImage;
			int ratio = 3;
			int kernel_size = 3;
			int lowThreshold = opt.edge_detection_threshold;
			if (lowThreshold <= 0)
			{
				lowThreshold = 127;
			}
			cv::blur(processedImage, blurImage, cv::Size(3, 3));
			cv::Canny(blurImage, edgesImage, lowThreshold, lowThreshold*ratio, kernel_size);

			double alpha = opt.edge_detection_opacity;
			if (alpha > 1.0)
			{
				alpha = 1.0;
			}
			double beta = (1.0 - alpha);
			cv::addWeighted(edgesImage, alpha, processedImage, beta, 0.0, processedImage);
		}

		if ((opt.flip >= 1) && (opt.flip <= 3))
		{
			int flipCode;
			if (opt.flip == 1) //h
			{
				flipCode = 1;
			}
			else if (opt.flip == 2) //v
			{
				flipCode = 0;
			}
			else if (opt.flip == 3) //h+v
			{
				flipCode = -1;
			}

			cv::flip(processedImage, processedImage, flipCode);
		}

		if (preview)
		{
			//Screen views
			//imwrite("raspivoice_capture_raw.jpg", rawImage);
			//imwrite("raspivoice_capture_scaled_gray.jpg", processedImage);
			cv::imshow("RaspiVoice Preview", processedImage);

			cv::waitKey(200);
		}

		/* Set live camera image */
		for (int j = 0; j < columns; j++)
		{
			for (int i = 0; i < rows; i++)
			{
				int mVal = processedImage.at<uchar>(rows - 1 - i, j) / 16;
				if (mVal == 0)
				{
					(*image)[IDX2D(i, j)] = 0;
				}
				else
				{
					(*image)[IDX2D(i, j)] = pow(10.0, (mVal - 15) / 10.0);   // 2dB steps
				}
			}
		}
	}

}
Esempio n. 17
0
void testEPNP() {

	ublas::matrix<double> boardPoints(3,12);
	int p=0;
	for(int w=0; w<2/*7*/; w++) {
				for(int h=0; h<6; h++) {
					boardPoints(0,p) = -20.0+w*5.0;
					boardPoints(1,p) = -20.0 + h*5.0;
					boardPoints(2,p) = 0.0;
					p++;
				}
	}

	ublas::matrix<double> cameraMatrix(3,3);

	cameraMatrix(0,0) = 654.8611202347574;
	cameraMatrix(0,1) = 0;
	cameraMatrix(0,2) = 319.5;
	cameraMatrix(1,0) = 0;
	cameraMatrix(1,1) = 654.8611202347574;
	cameraMatrix(1,2) = 239.5;
	cameraMatrix(2,0) = 0;
	cameraMatrix(2,1) = 0;
	cameraMatrix(2,2) = 1;

	ublas::vector<double> distCoeffVec(5);
	distCoeffVec(0) = -0.3669624087278113;
	distCoeffVec(1) = -0.07638290902180184;
	distCoeffVec(2) = 0;
	distCoeffVec(3) = 0;
	distCoeffVec(4) = 0.5764668364450144;

	ublas::matrix<double> foundPoints(2,12);


	foundPoints(0,0) = 122.56181;
	foundPoints(1,0) = 64.894775;
	foundPoints(0,1) = 163.85579;
	foundPoints(1,1) = 63.682297;
	foundPoints(0,2) = 204.5;
	foundPoints(1,2) = 62;
	foundPoints(0,2) = 245.62991;
	foundPoints(1,2) = 61.093784;
	foundPoints(0,3) = 283;
	foundPoints(1,3) = 61;
	foundPoints(0,4) = 319.81903;
	foundPoints(1,4) = 61.967384;
	foundPoints(0,5) = 354.08017;
	foundPoints(1,5) = 62.274704;
	foundPoints(0,6) = 123;
	foundPoints(1,6) = 104.5;
	foundPoints(0,7) = 164.5;
	foundPoints(1,7) = 102.5;
	foundPoints(0,8) = 204.5;
	foundPoints(1,8) = 100.5;
	foundPoints(0,9) = 244;
	foundPoints(1,9) = 99.5;
	foundPoints(0,10) = 281.5;
	foundPoints(1,10) = 99;
	foundPoints(0,11) = 318.5;
	foundPoints(1,11) = 99;
   /*	foundPoints(353.72653, 96.017204));
	foundPoints(124.62646,144.43448));
	foundPoints(165.5,142.5));
	foundPoints(206.03426,140.04895));
	foundPoints(245,138.5));
	foundPoints(283,137.5));
	foundPoints(319,136));
	foundPoints(354.5,134.5));
	foundPoints(127.50209,184.51553));
	foundPoints(167.5,181.5));
	foundPoints(207,179));
	foundPoints(245.5,177));
	foundPoints(283,175));
	foundPoints(318.88574,174.8522));
	foundPoints(353.5,170.5));
	foundPoints(128.28163,224.11998));
	foundPoints(169,220.5));
	foundPoints(210.13632,217.0213));
	foundPoints(247,214.5));
	foundPoints(282.64105,212.52209));
	foundPoints(319.19608,209.22551));
	foundPoints(343,206));
	foundPoints(134,260.5));
	foundPoints(172.92181,256.8718));
	foundPoints(211,255));
	foundPoints(248.5,250.5));
	foundPoints(285,248));
	foundPoints(319.5,243));
	foundPoints(353.30963,240.85687));  */

	 epnp PnP(cameraMatrix, boardPoints, foundPoints/*undistortedPoints*/);
	 ublas::matrix<double> R (3,3);
	 ublas::vector<double> tvec(3);
   	 PnP.compute_pose(R,tvec);
	 print("R", R);
	 print("tvec", tvec);
}
Esempio n. 18
0
/**
 * Renders a frame of the state and shaders we have set up to the window
 */
void render() {

    // GL state
    glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
    glEnable(GL_DEPTH_TEST);

    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    glUseProgram(skyID);

    int modelviewHandle = glGetUniformLocation(skyID, "modelview_matrix");
    if ((modelviewHandle == -1))
        exit(1);

    // We reset the camera for this frame
    glm::mat4 cameraMatrix(1.0f);
    
    // Rotations are always relative to the origin, so we step back from the origin
    // (move the whole world AWAY from us 5 units in the z direction)
    // Circle the camera around the scene by spinning the whole world around the origin
    std::printf("%f \n", xRot);
    cameraMatrix = glm::translate(cameraMatrix, glm::vec3(0.0f, 0.0f, -1.0f+xRot));
    cameraMatrix = glm::rotate(cameraMatrix, yRot, glm::vec3(0.0f, 1.0f, 0.0f));
    //cameraMatrix = glm::rotate(cameraMatrix, xRot, glm::vec3(1.0f, 0.0f, 0.0f));
 
//Lighting render
    glUseProgram(lightProgramID);
    int mvHandle = glGetUniformLocation(lightProgramID, "modelview_matrix");
    int normHandle = glGetUniformLocation(lightProgramID, "normal_matrix");
    int lightposHandle = glGetUniformLocation(lightProgramID, "light_pos");
    if (mvHandle == -1 || normHandle==-1 || lightposHandle == -1) {
        fprintf(stderr, "Error: can't find matrix uniforms\n");
        exit(1);
    }
    
    // Update the light position
    float lightPos[4] = { lightX, lightY, lightZ, 1.0f };   
    glUniform4fv(lightposHandle, 1, lightPos); 

    // Uniform variables defining material colours
    // These can be changed for each sphere, to compare effects
    int mtlambientHandle = glGetUniformLocation(lightProgramID, "mtl_ambient");
    int mtldiffuseHandle = glGetUniformLocation(lightProgramID, "mtl_diffuse");
    int mtlspecularHandle = glGetUniformLocation(lightProgramID, "mtl_specular");
    if ( mtlambientHandle == -1 ||
         mtldiffuseHandle == -1 ||
         mtlspecularHandle == -1) {
        fprintf(stderr, "Error: can't find material uniform variables\n");
        exit(1);
    }
    
    float mtlambient[3] = { 0.12f, 0.1f, 0.1f };    // ambient material
    float mtldiffuse[3] = { 0.0f, 1.0f, 0.0f }; // diffuse material
    float mtlspecular[3] = { 1.0f, 1.0f, 1.0f };    // specular material

    glUniform3fv(mtlambientHandle, 1, mtlambient);
    glUniform3fv(mtldiffuseHandle, 1, mtldiffuse);
    glUniform3fv(mtlspecularHandle, 1, mtlspecular);    

    
    glm::mat4 mvMatrix=cameraMatrix;
    glm::mat3 normMatrix;
       // Set VAO to the sphere mesh
    glBindVertexArray(sphereVaoHandle);

    // Render three spheres in a row
    // We compute the normal matrix from the current modelview matrix
    // and give it to our program
    mvMatrix = glm::translate(mvMatrix, glm::vec3(0.0f, -0.0f, -20.0f));
    mvMatrix = glm::scale(mvMatrix, glm::vec3(10.0f, 10.0f, 10.0f));
    normMatrix = glm::mat3(mvMatrix);
    glUniformMatrix4fv(mvHandle, 1, false, glm::value_ptr(mvMatrix) );  // Middle
    glUniformMatrix3fv(normHandle, 1, false, glm::value_ptr(normMatrix));
    glDrawElements(GL_TRIANGLES, sphere->indCount, GL_UNSIGNED_INT, 0);


    // Set VAO to the sphere mesh

    // mvMatrix = glm::translate(mvMatrix, glm::vec3(-3.0f, 0.0f, 0.0f));
    // normMatrix = glm::mat3(mvMatrix);
    // glUniformMatrix4fv(mvHandle, 1, false, glm::value_ptr(mvMatrix) );  // Left
    // glUniformMatrix3fv(normHandle, 1, false, glm::value_ptr(normMatrix));
    // glDrawElements(GL_TRIANGLES, sphere->indCount, GL_UNSIGNED_INT, 0);

    // mvMatrix = glm::translate(mvMatrix, glm::vec3(6.0f, 0.0f, 0.0f));
    // normMatrix = glm::mat3(mvMatrix);
    // glUniformMatrix4fv(mvHandle, 1, false, glm::value_ptr(mvMatrix) );  // right
    // glUniformMatrix3fv(normHandle, 1, false, glm::value_ptr(normMatrix));
    // glDrawElements(GL_TRIANGLES, sphere->indCount, GL_UNSIGNED_INT, 0);
    
//Table
    glUseProgram(tableprogramID);

    modelviewHandle = glGetUniformLocation(tableprogramID, "modelview_matrix");
    if (modelviewHandle == -1)
        exit(1);

    // We reset the camera for this frame
    //glm::mat4 cameraMatrix;
    
    // Rotations are always relative to the origin, so we step back from the origin
    // (move the whole world AWAY from us 5 units in the z direction)

    //******* Use the view matrix to move the cube in front of the camera *******
    mvMatrix = glm::translate(mvMatrix, glm::vec3(0.0f, 0.0f, 10.0f));

    // Circle the camera around the scene by spinning the whole world around the origin
    mvMatrix = glm::scale(mvMatrix, glm::vec3(10.0f, 10.0f, 10.0f));
    
    // Set VAO to the square model and draw three in different positions
    glBindVertexArray(tableVaoHandle);

    // We just want to draw the cube unmodified at the origin, so we just
    // take a copy of the camera matrix with its current transformations
    //  glm::mat4 modelview;
    //  modelview = glm::translate(cameraMatrix, glm::vec3(0.0f, 0.0f, 0.0f));    
    //    modelview = glm::rotate(modelview, 0.0f, glm::vec3(0.0f, 0.0f, 1.0f));    
    glUniformMatrix4fv( modelviewHandle, 1, false, glm::value_ptr(cameraMatrix) );

    //******** Drawing the cube *******
    glDrawElements(GL_TRIANGLES, CUBE_NUM_TRIS * 3, GL_UNSIGNED_INT, 0);    // New call
    

    glBindVertexArray(0);


//SkyBox
    glDepthMask (GL_FALSE);
    glUseProgram (skyID);
    glActiveTexture (GL_TEXTURE0);
    glBindTexture (GL_TEXTURE_CUBE_MAP, *tex_cube);
    glDrawArrays (GL_TRIANGLES, 0, 36);
    glDepthMask (GL_TRUE);

    // Set VAO to the square model and draw three in different positions
    glBindVertexArray(cubeVaoHandle);
    
    glm::mat4 skyCube;
    skyCube = glm::mat4( cameraMatrix );
    skyCube = glm::scale( skyCube, glm::vec3(40.0f, 40.0f, 40.0f ));
    glUniformMatrix4fv( modelviewHandle, 1, false, glm::value_ptr(skyCube) );
    glDrawElements(GL_TRIANGLES, CUBE_NUM_TRIS * VALS_PER_VERT, GL_UNSIGNED_INT, 0);    // New call
    glBindVertexArray(0);


    glutSwapBuffers();
    glFlush();

}
Esempio n. 19
0
 Eigen::Matrix<double, 3, 4> cameraMatrix(const Eigen::Matrix<double, 3, 3> &k, const Eigen::Matrix<double, 3, 3> &r, const Eigen::Vector3d &t)
 {
     return cameraMatrix(k, cameraPose(r, t));
 }
Esempio n. 20
0
void ImposterCapture::begin(  TSShapeInstance *shapeInst,
                              S32 dl, 
                              S32 dim,
                              F32 radius,
                              const Point3F &center )
{
   mShapeInstance = shapeInst;
   mDl = dl;
   mDim = dim;
   mRadius = radius;
   mCenter = center;

   mBlackTex.set( mDim, mDim, GFXFormatR8G8B8A8, &GFXDefaultRenderTargetProfile, avar( "%s() - (line %d)", __FUNCTION__, __LINE__ ) ); 
   mWhiteTex.set( mDim, mDim, GFXFormatR8G8B8A8, &GFXDefaultRenderTargetProfile, avar( "%s() - (line %d)", __FUNCTION__, __LINE__ ) ); 
   mNormalTex.set( mDim, mDim, GFXFormatR8G8B8A8, &GFXDefaultRenderTargetProfile, avar( "%s() - (line %d)", __FUNCTION__, __LINE__ ) ); 
   mDepthBuffer.set( mDim, mDim, GFXFormatD24S8, &GFXDefaultZTargetProfile, avar( "%s() - (line %d)", __FUNCTION__, __LINE__ ) ); 

   // copy the black render target data into a bitmap
   mBlackBmp = new GBitmap;
   mBlackBmp->allocateBitmap(mDim, mDim, false, GFXFormatR8G8B8);

   // copy the white target data into a bitmap
   mWhiteBmp = new GBitmap;
   mWhiteBmp->allocateBitmap(mDim, mDim, false, GFXFormatR8G8B8);

   // Setup viewport and frustrum to do orthographic projection.
   RectI viewport( 0, 0, mDim, mDim );
   GFX->setViewport( viewport );
   GFX->setOrtho( -mRadius, mRadius, -mRadius, mRadius, 1, 20.0f * mRadius );

   // Position camera looking out the X axis.
   MatrixF cameraMatrix( true );
   cameraMatrix.setColumn( 0, Point3F( 0, 0, 1 ) );
   cameraMatrix.setColumn( 1, Point3F( 1, 0, 0 ) );
   cameraMatrix.setColumn( 2, Point3F( 0, 1, 0 ) );

   // setup scene state required for TS mesh render...this is messy and inefficient; 
   // should have a mode where most of this is done just once (and then 
   // only the camera matrix changes between snapshots).
   // note that we use getFrustum here, but we set up an ortho projection above.  
   // it doesn't seem like the scene state object pays attention to whether the projection is 
   // ortho or not.  this could become a problem if some code downstream tries to 
   // reconstruct the projection matrix using the dimensions and doesn't 
   // realize it should be ortho.  at the moment no code is doing that.
   F32 left, right, top, bottom, nearPlane, farPlane;
   bool isOrtho;
   GFX->getFrustum( &left, &right, &bottom, &top, &nearPlane, &farPlane, &isOrtho );
   Frustum frust( isOrtho, left, right, top, bottom, nearPlane, farPlane, cameraMatrix );

   // Set up render pass.

   mRenderPass = new RenderPassManager();
   mRenderPass->assignName( "DiffuseRenderPass" );
   mMeshRenderBin = new RenderMeshMgr();
   mRenderPass->addManager( mMeshRenderBin );

   // Set up scene state.

   mState = new SceneRenderState(
      gClientSceneGraph,
      SPT_Diffuse,
      SceneCameraState( viewport, frust, GFX->getWorldMatrix(),GFX->getProjectionMatrix() ),
      mRenderPass,
      false
   );

   // Set up our TS render state.
   mRData.setSceneState( mState );
   mRData.setCubemap( NULL );
   mRData.setFadeOverride( 1.0f );

   // set gfx up for render to texture
   GFX->pushActiveRenderTarget();
   mRenderTarget = GFX->allocRenderToTextureTarget();

}
void Calib::computeCameraPosePnP()
{
    std::cout << "There are " << imagePoints.size() << " imagePoints and " << objectPoints.size() << " objectPoints." << std::endl;
    cv::Mat cameraMatrix(3,3,cv::DataType<double>::type);
    cv::setIdentity(cameraMatrix);
    for (unsigned i = 0; i < 3; i++)
    {
        for (unsigned int j = 0; j < 3; j++)
        {
            cameraMatrix.at<double> (i, j) = M(i, j);
        }
    }
    std::cout << "Initial cameraMatrix: " << cameraMatrix << std::endl;

    cv::Mat distCoeffs(4,1,cv::DataType<double>::type);
    distCoeffs.at<double>(0) = 0;
    distCoeffs.at<double>(1) = 0;
    distCoeffs.at<double>(2) = 0;
    distCoeffs.at<double>(3) = 0;

    cv::Mat rvec(3,1,cv::DataType<double>::type);
    cv::Mat tvec(3,1,cv::DataType<double>::type);

    std::vector<cv::Point3d> objectPoints_cv;
    std::vector<cv::Point2d> imagePoints_cv;

    for(int point_id = 0;point_id < objectPoints.size();point_id++)
    {
        cv::Point3d object_point;
        cv::Point2d image_point;
        object_point.x = objectPoints[point_id](0);
        object_point.y = objectPoints[point_id](1);
        object_point.z = objectPoints[point_id](2);

        image_point.x = imagePoints[point_id](0);
        image_point.y = imagePoints[point_id](1);

        objectPoints_cv.push_back(object_point);
        imagePoints_cv.push_back(image_point);
    }
    cv::solvePnP(objectPoints_cv, imagePoints_cv, cameraMatrix, distCoeffs, rvec, tvec);

    std::cout << "rvec: " << rvec << std::endl;
    std::cout << "tvec: " << tvec << std::endl;

    std::vector<cv::Point2d> projectedPoints;
    cv::projectPoints(objectPoints_cv, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);

    for(unsigned int i = 0; i < projectedPoints.size(); ++i)
    {
        std::cout << "Image point: " << imagePoints[i] << " Projected to " << projectedPoints[i] << std::endl;
    }

    //inverting the R|t to get camera position in world co-ordinates
    cv::Mat Rot(3,3,cv::DataType<double>::type);
    cv::Rodrigues(rvec, Rot);
    Rot = Rot.t();
    tvec = -Rot*tvec;
    this->R << Rot.at<double>(0,0), Rot.at<double>(0,1), Rot.at<double>(0,2), Rot.at<double>(1,0), Rot.at<double>(1,1), Rot.at<double>(1,2), Rot.at<double>(2,0), Rot.at<double>(2,1), Rot.at<double>(2,2);
    this->T << tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2);
}