Exemple #1
0
    Intersection Triangle::intersect(const Ray &r) const
    {
         const Vec3t edge1(V1-V0), edge2(V2-V0); // could precalculate
         Vec3t pvec(cross(r.getNormal(), edge2));
         DefType det=dot(edge1,pvec);

         if (det < MM_EPSILON)
             return Intersection();

         Vec3t tvec(r.getPoint()-V0);
         DefType u=dot(tvec,pvec);

         if (u < 0 || u > det)
             return Intersection();

         Vec3t qvec(cross(tvec,edge1));

         DefType v=dot(r.getPoint(),qvec);

         if (v < 0 || u+v > det)
             return Intersection();

         DefType t=dot(edge2, qvec)/det;

         return Intersection(r.positionAtTime(t), m_normal, t, true, true);
    }
bool AmbientOcclusionEngine::rayIntersectsTriangle(Vec const& origin, Vec const& ray, 
   unsigned const triangle)
{
   Vec a(vertex(3*triangle+0));
   Vec b(vertex(3*triangle+1));
   Vec c(vertex(3*triangle+2));

   Vec ab(b-a);
   Vec ac(c-a);
   Vec pvec(ray^ac);

   double det(ab*pvec);
   if (std::abs(det) < Epsilon) return false;
   double invDet(1.0/det);

   Vec tvec(origin-a);
   double u(tvec*pvec);
   u *= invDet;
   if (u < 0.0 || u > 1.0) return false;

   Vec qvec(tvec^ab);
   double v(invDet*(ray*qvec));
   if (v < 0.0 || u + v > 1.0) return false;

   double t(ac*qvec);

   return t > Epsilon;
}
Exemple #3
0
// If pos is visible, return the distance from pos to the camera.
// Use fudge distance to scale rad against top/bot/left/right planes
// Otherwise, return -distance
F32 LLCamera::visibleDistance(const LLVector3 &pos, F32 rad, F32 fudgedist, U32 planemask) const
{
	if (mFixedDistance > 0)
	{
		return mFixedDistance;
	}
	LLVector3 dvec = pos - mOrigin;
	// Check visibility
	F32 dist = dvec.magVec();
	if (dist > rad)
	{
 		F32 dp,tdist;
 		dp = dvec * mXAxis;
  		if (dp < -rad)
  			return -dist;

		rad *= fudgedist;
		LLVector3 tvec(pos);
		for (int p=0; p<PLANE_NUM; p++)
		{
			if (!(planemask & (1<<p)))
				continue;
			tdist = -(mWorldPlanes[p].dist(tvec));
			if (tdist > rad)
				return -dist;
		}
	}
	return dist;
}
Exemple #4
0
void GLRender::computeModelViewMatrix()
{
	cv::Matx31f rvec(m_rotation);
	cv::Matx33f rmat;
	cv::Rodrigues(rvec, rmat);
	rmat = rmat.inv();

	m_modelViewMatrix[0] = rmat.val[0];
	m_modelViewMatrix[1] = rmat.val[3];
	m_modelViewMatrix[2] = rmat.val[6];
	m_modelViewMatrix[3] = 0.0f;

	m_modelViewMatrix[4] = rmat.val[1];
	m_modelViewMatrix[5] = rmat.val[4];
	m_modelViewMatrix[6] = rmat.val[7];
	m_modelViewMatrix[7] = 0.0f;

	m_modelViewMatrix[8] = rmat.val[2];
	m_modelViewMatrix[9] = rmat.val[5];
	m_modelViewMatrix[10] = rmat.val[8];
	m_modelViewMatrix[11] = 0.0f;

	cv::Matx31f tvec(m_translation);
	tvec = rmat*tvec;

	m_modelViewMatrix[12] = -tvec.val[0];
	m_modelViewMatrix[13] = -tvec.val[1];
	m_modelViewMatrix[14] = -tvec.val[2];
	m_modelViewMatrix[15] = 1.0f;
}
void LeapToCameraCalibrator::correctCameraPNP (ofxCv::Calibration & myCalibration){
    
    vector<cv::Point2f> imagePoints;
	vector<cv::Point3f> worldPoints;
    
	if( hasFingerCalibPoints ){
		for (int i=0; i<calibVectorImage.size(); i++){
			imagePoints.push_back(ofxCvMin::toCv(calibVectorImage[i]));
			worldPoints.push_back(ofxCvMin::toCv(calibVectorWorld[i]));
		}
	}
	else{
		cout << "not enough control points" << endl;
		return;
	}
    
    cout << "myCalibration.getDistCoeffs() " << myCalibration.getDistCoeffs() << endl;
    cout << "myCalibration.getUndistortedIntrinsics().getCameraMatrix() " << myCalibration.getUndistortedIntrinsics().getCameraMatrix() << endl;
    
    cv::Mat rvec(3,1,cv::DataType<double>::type);
    cv::Mat tvec(3,1,cv::DataType<double>::type);

    cv::solvePnP(worldPoints,imagePoints,
				 myCalibration.getUndistortedIntrinsics().getCameraMatrix(), myCalibration.getDistCoeffs(),
				 rvec, tvec, false );
  
    
	//     solvePnP( InputArray objectPoints, InputArray imagePoints,
	//     InputArray cameraMatrix, InputArray distCoeffs,
	//     OutputArray rvec, OutputArray tvec,
	//     bool useExtrinsicGuess=false );
		 
    
    calibrated = true;
    
	setExtrinsics(rvec, tvec);
	setIntrinsics(myCalibration.getUndistortedIntrinsics().getCameraMatrix());
    
	this->camera = myCalibration.getUndistortedIntrinsics().getCameraMatrix();
	
    cv::Mat distortionCoefficients = cv::Mat::zeros(5, 1, CV_64F);
    this->distortion = distortionCoefficients;

	
    this->rotation = rvec;
	this->translation = tvec;
	
	cout << "camera:\n";
	cout << this->camera << "\n";
	cout << "RVEC:\n";
	cout << rvec << "\n";
	cout << "TVEC:\n";
	cout << tvec << "\n";
	cout << "--------\n";
 
}
Exemple #6
0
int main( int argc, char* argv[])
{
  // Read points
  std::vector<cv::Point2f> imagePoints = Generate2DPoints();
  std::vector<cv::Point3f> objectPoints = Generate3DPoints();

  std::cout << "There are " << imagePoints.size() << " imagePoints and " << objectPoints.size() << " objectPoints." << std::endl;

    //camera parameters
  double fx = 525.0; //focal length x
  double fy = 525.0; //focal le

  double cx = 319.5; //optical centre x
  double cy = 239.5; //optical centre y

  cv::Mat cameraMatrix(3,3,cv::DataType<double>::type);
  cameraMatrix.at<double>(0,0)=fx;
  cameraMatrix.at<double>(1,1)=fy;
  cameraMatrix.at<double>(2,2)=1;
  cameraMatrix.at<double>(0,2)=cx;
  cameraMatrix.at<double>(1,2)=cy;
  cameraMatrix.at<double>(0,1)=0;
  cameraMatrix.at<double>(1,0)=0;
  cameraMatrix.at<double>(2,0)=0;
  cameraMatrix.at<double>(2,1)=0;


  std::cout << "Initial cameraMatrix: " << cameraMatrix << std::endl;

  cv::Mat distCoeffs(4,1,cv::DataType<double>::type);
  distCoeffs.at<double>(0) = 0;
  distCoeffs.at<double>(1) = 0;
  distCoeffs.at<double>(2) = 0;
  distCoeffs.at<double>(3) = 0;

  cv::Mat rvec(3,1,cv::DataType<double>::type);
  cv::Mat tvec(3,1,cv::DataType<double>::type);

  cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

  std::cout << "rvec: " << rvec << std::endl;
  std::cout << "tvec: " << tvec << std::endl;

  std::vector<cv::Point2f> projectedPoints;
  cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);

  for(unsigned int i = 0; i < projectedPoints.size(); ++i)
    {
    std::cout << "Image point: " << imagePoints[i] << " Projected to " << projectedPoints[i] << std::endl;
    }

  return 0;
}
Exemple #7
0
readMatrix::readMatrix(const char* fileName)
{
    int row = 0;
    bool firstLine = false;
    bool comment = false;
    std::vector<std::vector<double> >::iterator rowIter;
    std::vector<double>::iterator columnIter;
    std::ifstream inFile(fileName,std::ios::in);
    std::string in;
    LINFO("LOAD MATRIX %s",fileName);
    while (inFile >> in)
    {
        if(!in.compare("#")) //comment code # found
        {
            if(!comment)
            {
                comment = true;
            }
            else              //end of comment
            {
                comment = false;
            }
        }
        if((!comment) && in.compare("#")) //real line found
        {
            // the first line contains the diminsions
            // resize the vector
            if(firstLine == false)
            {
                sizeX = (int)atof(in.c_str());
                inFile >> in;
                sizeY = (int)atof(in.c_str());
                firstLine = true;
                std::vector<double> tvec(sizeX,0.0F);
                vectorIn.resize(sizeY,tvec);
                rowIter = vectorIn.begin();
                columnIter = rowIter->begin();
            }
            else
            {
                *columnIter = atof(in.c_str());
                ++columnIter;
                // if end of column reached, return
                if(columnIter == rowIter->end())
                {
                    row++;
                    ++rowIter;
                    if(row < sizeY)
                        columnIter = rowIter->begin();
                    //ASSERT(sizeY > row);
                }
            }
        }
Exemple #8
0
bool a3Triangle::intersect(const a3Ray& ray, float* t, float* _u, float* _v, float* vtu, float* vtv) const
{
    double tHit, u, v;

    t3Vector3d dir(ray.direction);
    t3Vector3d v0v1(v1 - v0);
    t3Vector3d v0v2(v2 - v0);
    t3Vector3d pvec = dir.getCrossed(v0v2);
    double det = v0v1.dot(pvec);

    // 右手坐标系 三角形唯有在行列式>0时才可见
    if(bEnableBackfaceCulling)
        if(det < A3_TOLERANCE_DOUBLE) 
            return false;
    else
        if(t3Math::Abs(det) < A3_TOLERANCE_DOUBLE) 
            return false;

    double invDet = 1.0 / det;

    // 克莱姆法则计算[t, u, v]三分量
    t3Vector3d tvec(ray.origin - v0);
    u = tvec.dot(pvec) * invDet;
    if(u < 0.0 || u > 1.0) return false;

    t3Vector3d qvec = tvec.getCrossed(v0v1);
    v = dir.dot(qvec) * invDet;
    if(v < 0.0 || u + v > 1.0) return false;

    // 可直接返回
    tHit = v0v2.dot(qvec) * invDet;

    if(tHit > ray.minT && tHit < ray.maxT)
    {
        *t = tHit;
        *_u = u;
        *_v = v;

        if(bCalTextureCoordinate)
        {
            // --!纹理坐标
            t3Vector3f vt = (1 - u - v) * vt0 + u * vt1 + v * vt2;
            *vtu = vt.x;
            *vtv = vt.y;
        }

        return true;
    }

    return false;
}
Exemple #9
0
void HumanBodyLaser::Disp2Point(const Mat& disp, const Point2f& stPt, const Mat& im, const Mat& msk, const Mat& Qmtx, vector<Point3f>& points, vector<Vec3b>& colors, vector<Vec3f>& normals)
{
	int height = disp.size().height;
	int width = disp.size().width;
	
	for(int y = 0; y < height; ++y)
	{
		uchar * pmsk = (uchar *)msk.ptr<uchar>(y);
		float * pdsp = (float *)disp.ptr<float>(y);
		for(int x = 0; x < width; ++x)
		{
			if(pmsk[x] == 0 || pdsp[x] == 0)
			{
				points.push_back(Point3f(PT_UNDEFINED, PT_UNDEFINED, PT_UNDEFINED));
				colors.push_back(Vec3b(0,0,0));
				normals.push_back(Vec3f(0,0,0));
			}
			else
			{				
				Mat tvec(4,1,CV_64FC1);
				tvec.at<double>(0,0) = stPt.x + x;
				tvec.at<double>(1,0) = stPt.y + y;
				tvec.at<double>(2,0) = pdsp[x];
				tvec.at<double>(3,0) = 1.0f;

				//cout << "Qmtx: " << Qmtx << endl;
				//cout << "tvec: " << tvec << endl;
				Mat hicoord = Qmtx * tvec;
				//cout << "hicoord: " << hicoord << endl;
				
				Point3f tpt;
				tpt.x = hicoord.at<double>(0,0) / hicoord.at<double>(3,0);
				tpt.y = hicoord.at<double>(1,0) / hicoord.at<double>(3,0);
				tpt.z = hicoord.at<double>(2,0) / hicoord.at<double>(3,0);

				Point3f tn = Point3f(0.f,0.f,0.f) - tpt;

				points.push_back(tpt);
				normals.push_back(Vec3f(tn.x, tn.y, tn.z));
				colors.push_back(im.at<Vec3b>(y,x));				
			}
		}
	}
}
cv::Mat OpenCVCameraEstimation::estimate(std::vector<imageio::ModelLandmark> imagePoints, cv::Mat intrinsicCameraMatrix, std::vector<int> vertexIds /*= std::vector<int>()*/)
{
	if (imagePoints.size() < 3) {
		Loggers->getLogger("morphablemodel").error("CameraEstimation: Number of points given is smaller than 3.");
		throw std::runtime_error("CameraEstimation: Number of points given is smaller than 3.");
	}

	// Todo: Currently, the optional vertexIds is not used
	vector<Point2f> points2d;
	vector<Point3f> points3d;
	for (const auto& landmark : imagePoints) {
		points2d.emplace_back(landmark.getPoint2D());
		points3d.emplace_back(morphableModel.getShapeModel().getMeanAtPoint(landmark.getName()));
	}

	//Estimate the pose
	Mat rvec(3, 1, CV_64FC1);
	Mat tvec(3, 1, CV_64FC1);
	if (points2d.size() == 3) {
		cv::solvePnP(points3d, points2d, intrinsicCameraMatrix, vector<float>(), rvec, tvec, false, CV_ITERATIVE); // CV_ITERATIVE (3pts) | CV_P3P (4pts) | CV_EPNP (4pts)
	} else {
		cv::solvePnP(points3d, points2d, intrinsicCameraMatrix, vector<float>(), rvec, tvec, false, CV_EPNP); // CV_ITERATIVE (3pts) | CV_P3P (4pts) | CV_EPNP (4pts)
		// Alternative, more outlier-resistant:
		// cv::solvePnPRansac(modelPoints, imagePoints, camMatrix, distortion, rvec, tvec, false); // min 4 points
		// has an optional argument 'inliers' - might be useful
	}

	// Convert rvec/tvec to matrices, etc... return 4x4 extrinsic camera matrix
	Mat rotation_matrix(3, 3, CV_64FC1);
	cv::Rodrigues(rvec, rotation_matrix);
	rotation_matrix.convertTo(rotation_matrix, CV_32FC1);
	Mat translation_vector = tvec;
	translation_vector.convertTo(translation_vector, CV_32FC1);

	Mat extrinsicCameraMatrix = Mat::zeros(4, 4, CV_32FC1);
	Mat extrRot = extrinsicCameraMatrix(cv::Range(0, 3), cv::Range(0, 3));
	rotation_matrix.copyTo(extrRot);
	Mat extrTrans = extrinsicCameraMatrix(cv::Range(0, 3), cv::Range(3, 4));
	translation_vector.copyTo(extrTrans);
	extrinsicCameraMatrix.at<float>(3, 3) = 1; // maybe set (3, 2) = 1 here instead so that the renderer can do divByW as well? (see Todo in libRender)

	return extrinsicCameraMatrix;
}
cv::Mat GoodFrame::getRT(std::vector<cv::Point3f> &modelPoints_min)
{
    cv::Mat img = this->getCapturesImage();
    std::vector<cv::Point2f> imagePoints;
    for (size_t i = 0; i < 68; ++i)
    {
        imagePoints.push_back(cv::Point2f((float)(this->getDetected_landmarks().at<double>(i)),
                                          (float)(this->getDetected_landmarks().at<double>(i+68))));
    }

    /////
    int max_d = MAX(img.rows,img.cols);
    cv::Mat camMatrix = (Mat_<double>(3,3) << max_d, 0,     img.cols/2.0,
                         0,	 max_d, img.rows/2.0,
                         0,	 0,	    1.0);

    cv::Mat ip(imagePoints);
    cv::Mat op(modelPoints_min);
    std::vector<double> rv(3), tv(3);
    cv::Mat rvec(rv),tvec(tv);
    double _dc[] = {0,0,0,0};
    //    std::cout << ip << std::endl << std::endl;
    //    std::cout << op << std::endl << std::endl;
    //    std::cout << camMatrix << std::endl << std::endl;
    solvePnP(op, ip, camMatrix, cv::Mat(1,4,CV_64FC1,_dc), rvec, tvec, false, CV_EPNP);

    double rot[9] = {0};
    cv::Mat rotM(3, 3, CV_64FC1, rot);
    cv::Rodrigues(rvec, rotM);
    double* _r = rotM.ptr<double>();
    //    printf("rotation mat: \n %.3f %.3f %.3f\n%.3f %.3f %.3f\n%.3f %.3f %.3f\n",_r[0],_r[1],_r[2],_r[3],_r[4],_r[5],_r[6],_r[7],_r[8]);

    //    printf("trans vec: \n %.3f %.3f %.3f\n",tv[0],tv[1],tv[2]);

    cv::Mat _pm(3, 4, CV_64FC1);
    _pm.at<double>(0,0) = _r[0]; _pm.at<double>(0,1) = _r[1]; _pm.at<double>(0,2) = _r[2]; _pm.at<double>(0,3) = tv[0];
    _pm.at<double>(1,0) = _r[3]; _pm.at<double>(1,1) = _r[4]; _pm.at<double>(1,2) = _r[5]; _pm.at<double>(1,3) = tv[1];
    _pm.at<double>(2,0) = _r[6]; _pm.at<double>(2,1) = _r[7]; _pm.at<double>(2,2) = _r[8]; _pm.at<double>(2,3) = tv[2];

    return _pm;
}
void CollisionFace::RayTrace(const VC3 &position, const VC3 &direction, float ray_length, Storm3D_CollisionInfo &rti, bool accurate)
{
	// Begin calculating determinant - also used to calculate U parameter
	VC3 pvec(direction.y * e02.z - direction.z * e02.y, direction.z * e02.x - direction.x * e02.z, direction.x * e02.y - direction.y * e02.x);

	// If determinant is near zero, ray lies in plane of triangle
	float det = e01.x * pvec.x + e01.y*pvec.y + e01.z * pvec.z;
	if(det < 0.0001f) 
		return;

	// Calculate distance from vert0 to ray origin
	VC3 tvec(position.x - vertex0.x, position.y - vertex0.y, position.z - vertex0.z);

	// Calculate U parameter and test bounds
	float u = tvec.x * pvec.x + tvec.y * pvec.y + tvec.z * pvec.z;
	if((u < 0) || (u > det)) 
		return;

	// Prepare to test V parameter
	VC3 qvec(tvec.y * e01.z - tvec.z * e01.y, tvec.z * e01.x - tvec.x * e01.z, tvec.x * e01.y - tvec.y * e01.x);
	
	// Calculate V parameter and test bounds
	float v = direction.x * qvec.x + direction.y * qvec.y + direction.z * qvec.z;
	if((v < 0) || ((u + v) > det))
		return;

	// Calculate t, scale parameters, ray intersects triangle
	float t = e02.x * qvec.x + e02.y * qvec.y + e02.z * qvec.z;
	t /= det;

	// Set collision info
	if((t < rti.range) && (t < ray_length) && (t > 0))
	{
		rti.hit = true;
		rti.range = t;
		rti.position = position+direction*t;
		rti.plane_normal = plane.planenormal;
	}
}
Exemple #13
0
// Like visibleDistance, except uses mHorizPlanes[], which are left and right
//  planes perpindicular to (0,0,1) in world space
F32 LLCamera::visibleHorizDistance(const LLVector3 &pos, F32 rad, F32 fudgedist, U32 planemask) const
{
	if (mFixedDistance > 0)
	{
		return mFixedDistance;
	}
	LLVector3 dvec = pos - mOrigin;
	// Check visibility
	F32 dist = dvec.magVec();
	if (dist > rad)
	{
		rad *= fudgedist;
		LLVector3 tvec(pos);
		for (int p=0; p<HORIZ_PLANE_NUM; p++)
		{
			if (!(planemask & (1<<p)))
				continue;
			F32 tdist = -(mHorizPlanes[p].dist(tvec));
			if (tdist > rad)
				return -dist;
		}
	}
	return dist;
}
void Calib::computeCameraPosePnP()
{
    std::cout << "There are " << imagePoints.size() << " imagePoints and " << objectPoints.size() << " objectPoints." << std::endl;
    cv::Mat cameraMatrix(3,3,cv::DataType<double>::type);
    cv::setIdentity(cameraMatrix);
    for (unsigned i = 0; i < 3; i++)
    {
        for (unsigned int j = 0; j < 3; j++)
        {
            cameraMatrix.at<double> (i, j) = M(i, j);
        }
    }
    std::cout << "Initial cameraMatrix: " << cameraMatrix << std::endl;

    cv::Mat distCoeffs(4,1,cv::DataType<double>::type);
    distCoeffs.at<double>(0) = 0;
    distCoeffs.at<double>(1) = 0;
    distCoeffs.at<double>(2) = 0;
    distCoeffs.at<double>(3) = 0;

    cv::Mat rvec(3,1,cv::DataType<double>::type);
    cv::Mat tvec(3,1,cv::DataType<double>::type);

    std::vector<cv::Point3d> objectPoints_cv;
    std::vector<cv::Point2d> imagePoints_cv;

    for(int point_id = 0;point_id < objectPoints.size();point_id++)
    {
        cv::Point3d object_point;
        cv::Point2d image_point;
        object_point.x = objectPoints[point_id](0);
        object_point.y = objectPoints[point_id](1);
        object_point.z = objectPoints[point_id](2);

        image_point.x = imagePoints[point_id](0);
        image_point.y = imagePoints[point_id](1);

        objectPoints_cv.push_back(object_point);
        imagePoints_cv.push_back(image_point);
    }
    cv::solvePnP(objectPoints_cv, imagePoints_cv, cameraMatrix, distCoeffs, rvec, tvec);

    std::cout << "rvec: " << rvec << std::endl;
    std::cout << "tvec: " << tvec << std::endl;

    std::vector<cv::Point2d> projectedPoints;
    cv::projectPoints(objectPoints_cv, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);

    for(unsigned int i = 0; i < projectedPoints.size(); ++i)
    {
        std::cout << "Image point: " << imagePoints[i] << " Projected to " << projectedPoints[i] << std::endl;
    }

    //inverting the R|t to get camera position in world co-ordinates
    cv::Mat Rot(3,3,cv::DataType<double>::type);
    cv::Rodrigues(rvec, Rot);
    Rot = Rot.t();
    tvec = -Rot*tvec;
    this->R << Rot.at<double>(0,0), Rot.at<double>(0,1), Rot.at<double>(0,2), Rot.at<double>(1,0), Rot.at<double>(1,1), Rot.at<double>(1,2), Rot.at<double>(2,0), Rot.at<double>(2,1), Rot.at<double>(2,2);
    this->T << tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2);
}
Exemple #15
0
int main() {

	// ## Make these inputs to the function ##
	const char* camDataFilename = "/Users/michaeldarling/Dropbox/Thesis/OpenCV/Calibration/Zoomed In/PS3Eye_out_camera_data.yml";
	const char* objPointsFilename = "/Users/michaeldarling/Dropbox/Thesis/OpenCV/Calibration/Zoomed In/LEDPos copy2.txt";
	const char* imageFilename = "/Users/michaeldarling/Dropbox/Thesis/OpenCV/Calibration/Zoomed In/Test Images/Test3.jpg";
	const char* imagePointsFilename = "/Users/michaeldarling/Dropbox/Thesis/OpenCV/Calibration/Zoomed In/imagePts3RANDOM"
			".txt";

	// Open the correct image
	std::cout << imageFilename << std::endl << imagePointsFilename << std::endl;
	cv::Mat img = cv::imread(imageFilename);
	cv::namedWindow("Window");
	cv::imshow("Window", img);

	// Get the camera matrix and distortion coefficients
	cv::Mat cameraMatrix, distCoeffs;
	getCalibrationData(camDataFilename, cameraMatrix, distCoeffs);



	// Solve pose estimation problem

	// get the object points (3d) and image points (2d)
	std::vector<cv::Point3f> objectPoints;
	std::vector<cv::Point2f> imagePoints;

	std::cout << "Object Points:" << std::endl;
	readPoints(objPointsFilename, objectPoints);

	std::cout << "\nImage Points:" << std::endl;
	readPoints(imagePointsFilename, imagePoints);

	// sort the image points
	ledFinder(imagePoints,imagePoints,1);
	std::cout << "\nSorted Image Points:" << std::endl;
	std::cout << imagePoints << std::endl;


	// Provide an initial guess: (give an approximate attitude--assume following from behind)
	// (OpenCV reference frame)  NOTE: rvec has weird quaternion convention, but [0,0,0]
	// corresponds to following direclty behind
	cv::Mat rvec(3,1,CV_64F);
	cv::Mat tvec(3,1,CV_64F);
	rvec.at<double>(0,0) = 0*DEG2RAD;
	rvec.at<double>(1,0) = 0*DEG2RAD;
	rvec.at<double>(2,0) = 0*DEG2RAD;
	tvec.at<double>(0,0) = 0*IN2MM;
	tvec.at<double>(1,0) = 0*IN2MM;
	tvec.at<double>(2,0) = 100*IN2MM;
	//

	// solve for the pose that minimizes reprojection error
	// UNITS:  (objectPoints = mm, imagePoints = pixels, ... , rvec = radians, tvec = mm)
	clock_t t;
	t = clock();
	cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, 1, CV_EPNP);
	t = clock() - t;
	std::cout << "\nTime To Estimate Pose: " << ((float)t/CLOCKS_PER_SEC*1000) << " ms" << std::endl;

	// compute the theta and r parts of the Euler rotation
	/*
	double rvec_theta = cv::norm(rvec, cv::NORM_L2);
	cv::Mat rvec_r = rvec/rvec_theta;
	 */


	// get rotation matrix
	cv::Mat rotMat, CV2B(3,3,CV_64F,cv::Scalar(0)), B2CV;
	CV2B.at<double>(0,2) = 1.0;
	CV2B.at<double>(1,0) = 1.0;
	CV2B.at<double>(2,1) = 1.0;
	cv::transpose(CV2B,B2CV); 		// CV2B and B2CV convert between OpenCV
	cv::Rodrigues(rvec,rotMat);     // frame convention and typical body frame


	// extract phi, theta, psi  (NOTE: these are for typical body frame)
	double phi, theta, psi;
	rotMat = CV2B * rotMat * B2CV;	// change the rotation matrix from CV convention to RHS body frame
	theta = asin(-rotMat.at<double>(2,0));				// get the Euler angles
	psi = acos(rotMat.at<double>(0,0) / cos(theta));
	phi = asin(rotMat.at<double>(2,1) / cos(theta));


	// add changes to the projection for troubleshooting

	phi 	+= 0*DEG2RAD;			// phi, theta, psi in body frame
	theta 	+= 0*DEG2RAD;
	psi 	+= 0*DEG2RAD;
	tvec.at<double>(0,0) += 0*IN2MM;  //tvec in OpenCV frame
	tvec.at<double>(1,0) += 0*IN2MM;
	tvec.at<double>(2,0) += 0*IN2MM;


	// reconstruct rotation matrix: from object frame to camera frame
	rotMat.at<double>(0,0) = cos(theta)*cos(psi);
	rotMat.at<double>(0,1) = sin(phi)*sin(theta)*cos(psi) - cos(phi)*sin(psi);
	rotMat.at<double>(0,2) = cos(phi)*sin(theta)*cos(psi) + sin(phi)*sin(psi);
	rotMat.at<double>(1,0) = cos(theta)*sin(psi);
	rotMat.at<double>(1,1) = sin(phi)*sin(theta)*sin(psi) + cos(phi)*cos(psi);
	rotMat.at<double>(1,2) = cos(phi)*sin(theta)*sin(psi) - sin(phi)*cos(psi);
	rotMat.at<double>(2,0) = -sin(theta);
	rotMat.at<double>(2,1) = sin(phi)*cos(theta);
	rotMat.at<double>(2,2) = cos(phi)*cos(theta);


	// rewrite the rvec from rotation matrix
	rotMat = B2CV * rotMat * CV2B;	// convert RHS body rotation matrix to OpenCV rotation matrix
	cv::Rodrigues(rotMat,rvec);


	// print to standard output
	cv::Mat tvec_body;
	tvec_body.push_back(tvec.at<double>(2,0));	// get tvec in body coordinates to display to
	tvec_body.push_back(tvec.at<double>(0,0));  // standard output
	tvec_body.push_back(tvec.at<double>(1,0));
	std::cout << "\nPhi, Theta, Psi: " << (phi*RAD2DEG) << ", " << (theta*RAD2DEG) <<
			", " << (psi*RAD2DEG) << std::endl;
	std::cout << "\ntvec:\n" << (tvec_body*MM2IN) << std::endl;
	std::cout << "\nTotal Distance: " << (cv::norm(tvec,cv::NORM_L2)*MM2IN) << std::endl;


	// compute the (re)projected points
	cv::vector<cv::Point3f> axesPoints;		// create points for coordinate axes
	axesPoints.push_back(cv::Point3f(0,0,0));
	axesPoints.push_back(cv::Point3f(AXES_LN,0,0));
	axesPoints.push_back(cv::Point3f(0,AXES_LN,0));
	axesPoints.push_back(cv::Point3f(0,0,AXES_LN));

	cv::vector<cv::Point2f> projImagePoints, projAxesPoints;
	cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projImagePoints);
	cv::projectPoints(axesPoints, rvec, tvec, cameraMatrix, distCoeffs, projAxesPoints);
	std::cout << "\nProjected Image Points:\n" << projImagePoints << std::endl;


	// ## Need to compute the projected error to determine feasibility  Decide on a threshold
	double reprojErr = scaledReprojError(imagePoints, projImagePoints);
	std::cout << "\nReprojection Error " << reprojErr << std::endl << std::endl;


	// Plot the LED's and reprojected positions on the image
	for (int i=0; i<NO_LEDS; i++) {
		cv::circle(img,imagePoints[i], 3, cv::Scalar(0,0,255), 2);
		cv::circle(img,projImagePoints[i], 3, cv::Scalar(0,255,0), 2);
		cv::line(img,projAxesPoints[0], projAxesPoints[1], cv::Scalar(0,0,255), 2);
		cv::line(img,projAxesPoints[0], projAxesPoints[2], cv::Scalar(0,255,0), 2);
		cv::line(img,projAxesPoints[0], projAxesPoints[3], cv::Scalar(255,0,0), 2);
	}

	cv::imshow("Window",img);
	//cv::imwrite("/Users/michaeldarling/Dropbox/Thesis/OpenCV/Calibration/Zoomed In/VisionOut5.jpg",img);
	cv::waitKey(0);
	std::cout << "DONE!\n" << std::endl;


	return 0;
}
int trustregion(NLP1* nlp, std::ostream *fout, 
		SymmetricMatrix& H, ColumnVector& search_dir, 
		ColumnVector& sx, real& TR_size, real& step_length, 
		real stpmax, real stpmin)
{
/****************************************************************************
 *   subroutine trustregion
 *
 *   Purpose
 *   find a step which satisfies the Goldstein-Armijo line search conditions
 *
 *        Compute the dogleg step
 *        Compute the predicted reduction, pred, of the quadratic model
 *        Compute the actual reduction, ared
 *        IF ared/pred > eta
 *          THEN x_vec = x_vec + d_vec
 *               TR_size >= TR_size
 *               Compute the gradient g_vec at the new point
 *          ELSE TR_size < ||d_vec||
 *
 *   Parameters
 *     nlp          -->  pointer to nonlinear problem object 
 *
 *     search_dir   -->  Vector of length n which specifies the 
 *                       newton direction on input. On output it will
 *                       contain the step 
 *
 *     step_length  <-- is a nonnegative variable. 
 *                      On output step_length contains the step size taken
 *
 *     ftol  -->  default Value = 1.e-4
 *                ftol should be smaller than 5.e-1
 *                suggested value = 1.e-4 for newton methods
 *                                = 1.e-1 for more exact line searches
 *     xtol  -->  default Value = 2.2e-16
 *     gtol  -->  default Value = 0.9
 *                gtol should be greater than 1.e-4 
 *
 *                termination occurs when the sufficient decrease 
 *                condition and the directional derivative condition are 
 *                satisfied. 
 *
 *     stpmin and TR_size are nonnegative input variables which 
 *       specify lower and upper bounds for the step.
 *       stpmin Default Value = 1.e-9
 *
 *   Initial version    Juan Meza November 1994
 *
 *
 *****************************************************************************/

  // Local variables

  int n = nlp->getDim();
  bool debug = nlp->getDebug();
  bool modeOverride = nlp->getModeOverride();

  ColumnVector tgrad(n), newton_dir(n), tvec(n), xc(n), xtrial(n);
  real fvalue, fplus, dnorm;
  real eta1 = .001;
  real eta2 = .1;
  real eta3 = .75;
  real rho_k;
  int iter = 0;
  int iter_max = 100;
  real dd1, dd2;
  real ared, pred;
  int dog_step;
  real TR_MAX = stpmax;
  static char *steps[] = {"C", "D", "N", "B"};
  static bool accept;

  //
  // Initialize variables
  //

  fvalue = nlp->getF();
  xc     = nlp->getXc();
  step_length = 1.0;
  tgrad      = nlp->getGrad();
  newton_dir = search_dir;

  if (debug) {
    *fout << "\n***************************************";
    *fout << "***************************************\n";
    *fout << "\nComputeStep using trustregion\n";
    *fout << "\tStep   ||step||       ared          pred        TR_size \n";
  }

  while (iter < iter_max) {
    iter++;
    //
    // Compute the dogleg step 
    //
    search_dir = newton_dir;
    dog_step = dogleg(nlp, fout, H, tgrad, search_dir, sx, dnorm, TR_size, stpmax);
    step_length = dnorm;
    //
    // Compute pred = -g'd - 1/2 d'Hd
    //
    dd1  = Dot(tgrad,search_dir);
    tvec = H * search_dir;
    dd2  = Dot(search_dir,tvec);
    pred = -dd1 - dd2/2.0;
    
    //
    // Compute objective function at trial point
    //

    xtrial = xc + search_dir;
    if (modeOverride) {
      nlp->setX(xtrial);
      nlp->eval();
      fplus = nlp->getF();
    }
    else
      fplus  = nlp->evalF(xtrial);
    ared   = fvalue - fplus;

    //
    // Should we take this step ?
    //

    rho_k = ared/pred;

    accept = false;
    if (rho_k >= eta1) accept = true;

    //
    // Update the trust region
    //

    if (accept) {

      //
      // Do the standard updating
      //
      
      if (rho_k <= eta2) {

	// Model is just sort of bad
	// New trust region will be TR_size/2
	
	if (debug) {
	  *fout << "trustregion: rho_k  = " << e(rho_k,14,4) 
	      << " eta2 = " << e(eta2,14,4) << "\n";
	}
	TR_size = step_length / 2.0;

	if (TR_size < stpmin) {
	  *fout << "***** Trust region too small to continue.\n";
	  nlp->setX(xc);
	  nlp->setF(fvalue);
	  nlp->setGrad(tgrad);
	  return(-1);
	}
      }

      else if ((eta3 <= rho_k) && (rho_k <= (2.0 - eta3))) {

	// Model is PRETTY good
	// Double trust region

	if (debug) {
	  *fout << "trustregion: rho_k = " << e(rho_k,14,4) 
	      << " eta3 = " << e(eta3,14,4) << "\n";
	}
	TR_size = min(2.0*TR_size, TR_MAX);
      }

      else {

	//
	// All other cases
	//

	TR_size = max(2.0*step_length,TR_size);
	TR_size = min(TR_size, TR_MAX);
	if (debug) {
	  *fout << "trustregion: rho_k = " << e(rho_k,14,4) << "\n";
	}
      }
    }
    
    else {

      // Model is REALLY bad
      //

      TR_size = step_length/10.0;

      if (debug) {
	*fout << "trustregion: rho_k = " << e(rho_k,14,4) << "n"
	    << " eta1 = " << e(eta1,14,4) << "\n";
      }
      if (TR_size < stpmin) {
	*fout << "***** Trust region too small to continue.\n";
	nlp->setX(xc);
	nlp->setF(fvalue);
	nlp->setGrad(tgrad);
	return(-1);
      }
    }

    //
    //  Accept/Reject Step
    //      

    if (accept) {
      //
      // Update x, f, and grad
      //

      if (!modeOverride) {
	nlp->setX(xtrial);
	nlp->setF(fplus);
	nlp->evalG();
      }

      if (debug) {
	*fout << "\t Step     ||step||       ared          pred        TR_size \n";
	*fout << "Accept  " << steps[dog_step] << e(step_length,14,4) 
	      << e(ared,14,4) << e(pred,14,4) << e(TR_size,14,4) << "\n";
      }
      return(dog_step);
    }

    else {
      //
      // Reject step
      //
	
      if (debug) {
	*fout << "\t Step     ||step||       ared          pred        TR_size \n";
	*fout << "Reject  " << steps[dog_step] << e(step_length,14,4) 
	      << e(ared,14,4)  << e(pred,14,4)  << e(TR_size,14,4) << "\n";
      }
    }
  }
  nlp->setX(xc);
  nlp->setF(fvalue);
  nlp->setGrad(tgrad);
  return(-1); // Too many iterations
}
void testEPNP() {

	ublas::matrix<double> boardPoints(3,12);
	int p=0;
	for(int w=0; w<2/*7*/; w++) {
				for(int h=0; h<6; h++) {
					boardPoints(0,p) = -20.0+w*5.0;
					boardPoints(1,p) = -20.0 + h*5.0;
					boardPoints(2,p) = 0.0;
					p++;
				}
	}

	ublas::matrix<double> cameraMatrix(3,3);

	cameraMatrix(0,0) = 654.8611202347574;
	cameraMatrix(0,1) = 0;
	cameraMatrix(0,2) = 319.5;
	cameraMatrix(1,0) = 0;
	cameraMatrix(1,1) = 654.8611202347574;
	cameraMatrix(1,2) = 239.5;
	cameraMatrix(2,0) = 0;
	cameraMatrix(2,1) = 0;
	cameraMatrix(2,2) = 1;

	ublas::vector<double> distCoeffVec(5);
	distCoeffVec(0) = -0.3669624087278113;
	distCoeffVec(1) = -0.07638290902180184;
	distCoeffVec(2) = 0;
	distCoeffVec(3) = 0;
	distCoeffVec(4) = 0.5764668364450144;

	ublas::matrix<double> foundPoints(2,12);


	foundPoints(0,0) = 122.56181;
	foundPoints(1,0) = 64.894775;
	foundPoints(0,1) = 163.85579;
	foundPoints(1,1) = 63.682297;
	foundPoints(0,2) = 204.5;
	foundPoints(1,2) = 62;
	foundPoints(0,2) = 245.62991;
	foundPoints(1,2) = 61.093784;
	foundPoints(0,3) = 283;
	foundPoints(1,3) = 61;
	foundPoints(0,4) = 319.81903;
	foundPoints(1,4) = 61.967384;
	foundPoints(0,5) = 354.08017;
	foundPoints(1,5) = 62.274704;
	foundPoints(0,6) = 123;
	foundPoints(1,6) = 104.5;
	foundPoints(0,7) = 164.5;
	foundPoints(1,7) = 102.5;
	foundPoints(0,8) = 204.5;
	foundPoints(1,8) = 100.5;
	foundPoints(0,9) = 244;
	foundPoints(1,9) = 99.5;
	foundPoints(0,10) = 281.5;
	foundPoints(1,10) = 99;
	foundPoints(0,11) = 318.5;
	foundPoints(1,11) = 99;
   /*	foundPoints(353.72653, 96.017204));
	foundPoints(124.62646,144.43448));
	foundPoints(165.5,142.5));
	foundPoints(206.03426,140.04895));
	foundPoints(245,138.5));
	foundPoints(283,137.5));
	foundPoints(319,136));
	foundPoints(354.5,134.5));
	foundPoints(127.50209,184.51553));
	foundPoints(167.5,181.5));
	foundPoints(207,179));
	foundPoints(245.5,177));
	foundPoints(283,175));
	foundPoints(318.88574,174.8522));
	foundPoints(353.5,170.5));
	foundPoints(128.28163,224.11998));
	foundPoints(169,220.5));
	foundPoints(210.13632,217.0213));
	foundPoints(247,214.5));
	foundPoints(282.64105,212.52209));
	foundPoints(319.19608,209.22551));
	foundPoints(343,206));
	foundPoints(134,260.5));
	foundPoints(172.92181,256.8718));
	foundPoints(211,255));
	foundPoints(248.5,250.5));
	foundPoints(285,248));
	foundPoints(319.5,243));
	foundPoints(353.30963,240.85687));  */

	 epnp PnP(cameraMatrix, boardPoints, foundPoints/*undistortedPoints*/);
	 ublas::matrix<double> R (3,3);
	 ublas::vector<double> tvec(3);
   	 PnP.compute_pose(R,tvec);
	 print("R", R);
	 print("tvec", tvec);
}
void CalcStatistics::calculate() {
	CLOG(LDEBUG)<<"in calculate()";

	Types::HomogMatrix homogMatrix;
	cv::Mat_<double> rvec;
	cv::Mat_<double> tvec;
	cv::Mat_<double> axis;
	cv::Mat_<double> rotMatrix;
	float fi;

	rotMatrix= cv::Mat_<double>::zeros(3,3);
	tvec = cv::Mat_<double>::zeros(3,1);
	axis = cv::Mat_<double>::zeros(3,1);

	// first homogMatrix on InputStream
	if(counter == 0) {
		homogMatrix = in_homogMatrix.read();
		for (int i = 0; i < 3; ++i) {
			for (int j = 0; j < 3; ++j) {
		                rotMatrix(i,j)=homogMatrix(i, j);
			}
			tvec(i, 0) = homogMatrix(i, 3);
		}

		Rodrigues(rotMatrix, rvec);

		cumulatedHomogMatrix = homogMatrix;
		cumulatedTvec = tvec;
		cumulatedRvec = rvec;
		fi = sqrt((pow(rvec(0,0), 2) + pow(rvec(1,0), 2)+pow(rvec(2,0),2)));
		cumulatedFi = fi;
		for(int k=0;k<3;k++) {
			axis(k,0)=rvec(k,0)/fi;
		}

		cumulatedAxis = axis;
		counter=1;
		return;
	}

	homogMatrix=in_homogMatrix.read();

	for (int i = 0; i < 3; ++i) {
		for (int j = 0; j < 3; ++j) {
            rotMatrix(i,j)=homogMatrix(i, j);
		}
        tvec(i, 0) = homogMatrix(i, 3);
	}

	Rodrigues(rotMatrix, rvec);

	fi = sqrt((pow(rvec(0,0), 2) + pow(rvec(1,0), 2)+pow(rvec(2,0),2)));

	for(int k=0;k<3;k++) {
			axis(k,0)=rvec(k,0)/fi;
	}
	cumulatedFi += fi;
	cumulatedTvec += tvec;
	cumulatedRvec += rvec;
	cumulatedAxis += axis;

	counter++;
	avgFi = cumulatedFi/counter;
	avgAxis = cumulatedAxis/counter;
	avgRvec = avgAxis * avgFi;
	avgTvec = cumulatedTvec/counter;

	Types::HomogMatrix hm;
	cv::Mat_<double> rottMatrix;
	Rodrigues(avgRvec, rottMatrix);

	CLOG(LINFO)<<"Uśredniona macierz z "<<counter<<" macierzy \n";
	for (int i = 0; i < 3; ++i) {
		for (int j = 0; j < 3; ++j) {
            hm(i, j) = rottMatrix(i, j);
            CLOG(LINFO) << hm(i, j) << "  ";
		}
        hm(i, 3) = avgTvec(i, 0);
        CLOG(LINFO) << hm(i, 3) <<" \n";
	}
	out_homogMatrix.write(hm);

	CLOG(LINFO)<<"Uśredniony kąt: " << avgFi;

	float standardDeviationFi = sqrt(pow(avgFi - fi, 2));

	CLOG(LINFO)<<"Odchylenie standardowe kąta: "<<standardDeviationFi;

}
void DrawCoordinateSystem::projectPoints(){
	cv::Mat_<double> rvec(3,1);
	cv::Mat_<double> tvec(3,1);
	Types::HomogMatrix homogMatrix;
	cv::Mat_<double> rotMatrix;
	rotMatrix.create(3,3);

	// Try to read rotation and translation from rvec and tvec or homogenous matrix.
	if(!in_rvec.empty()&&!in_tvec.empty()){
		rvec = in_rvec.read();
		tvec = in_tvec.read();
	}
	else if(!in_homogMatrix.empty()){
		homogMatrix = in_homogMatrix.read();

		for (int i = 0; i < 3; ++i) {
			for (int j = 0; j < 3; ++j) {
                rotMatrix(i,j)=homogMatrix(i, j);
			}
            tvec(i, 0) = homogMatrix(i, 3);
		}
		Rodrigues(rotMatrix, rvec);
	}
	else
		return;

	// Get camera info properties.
	Types::CameraInfo camera_matrix = in_camera_matrix.read();

	vector<cv::Point3f> object_points;
	vector<cv::Point2f> image_points;

	// Create four points constituting ends of three lines.
	object_points.push_back(cv::Point3f(0,0,0));
	object_points.push_back(cv::Point3f(0.1,0,0));
	object_points.push_back(cv::Point3f(0,0.1,0));
	object_points.push_back(cv::Point3f(0,0,0.1));

	// Project points taking into account the camera properties.
	if (rectified) {
		cv::Mat K, _r, _t;
		cv::decomposeProjectionMatrix(camera_matrix.projectionMatrix(), K, _r, _t);
		cv::projectPoints(object_points, rvec, tvec, K, cv::Mat(), image_points);
	} else {
		cv::projectPoints(object_points, rvec, tvec, camera_matrix.cameraMatrix(), camera_matrix.distCoeffs(), image_points);
	}

	// Draw lines between projected points.
	Types::Line ax(image_points[0], image_points[1]);
	ax.setCol(cv::Scalar(255, 0, 0));
	Types::Line ay(image_points[0], image_points[2]);
	ay.setCol(cv::Scalar(0, 255, 0));
	Types::Line az(image_points[0], image_points[3]);
	az.setCol(cv::Scalar(0, 0, 255));

	// Add lines to container.
	Types::DrawableContainer ctr;
	ctr.add(ax.clone());
	ctr.add(ay.clone());
	ctr.add(az.clone());

	CLOG(LINFO)<<"Image Points: "<<image_points;

	// Write output to dataports.
	out_csystem.write(ctr);
	out_impoints.write(image_points);
}
void SolvePNP_method(vector <Point3f> points3d,
		    vector <Point2f> imagePoints, 
		     MatrixXf &R,Vector3f &t, MatrixXf &xcam_, 
		     MatrixXf K,
                     const PointCloud<PointXYZRGB>::Ptr& cloudRGB,//projected data
                    const PointCloud<PointXYZ>::Ptr& cloud_laser_cord,//laser coordinates
                     MatrixXf& points_projected,Mat image,
		     PointCloud<PointXYZ>::ConstPtr cloud)
{
   
  
  Mat_<float> camera_matrix (3, 3);
  camera_matrix(0,0)=K(0,0);
  camera_matrix(0,1)=K(0,1);
  camera_matrix(0,2)=K(0,2);
  camera_matrix(1,0)=K(1,0);
  camera_matrix(1,1)=K(1,1);
  camera_matrix(1,2)=K(1,2);
  camera_matrix(2,0)=K(2,0);
  camera_matrix(2,1)=K(2,1);
  camera_matrix(2,2)=K(2,2);
  
  
  
  
  vector<double> rv(3), tv(3);

  Mat rvec(rv),tvec(tv);

  Mat_<float> dist_coef (5, 1);
   
  dist_coef = 0;
  
  
  cout <<camera_matrix<<endl;
  cout<< imagePoints<<endl;
  cout<< points3d <<endl;
  
  solvePnP( points3d, imagePoints, camera_matrix, dist_coef, rvec, tvec);
  
  //////////////////////////////////////////////////7
  //convert data
  //////////////////////////////////////////////////7
  
  double rot[9] = {0};

  
  Mat R_2(3,3,CV_64FC1,rot);

  //change results only debugging

  Rodrigues(rvec, R_2);

  
  R=Matrix3f::Zero(3,3);
  t=Vector3f(0,0,0);
  
  double* _r = R_2.ptr<double>();

  double* _t = tvec.ptr<double>();

  
  t(0)=_t[0];

  t(1)=_t[1];

  t(2)=_t[2];   

  
  R(0,0)=_r[0];

  R(0,1)=_r[1];

  R(0,2)=_r[2];

  R(1,0)=_r[3];

  R(1,1)=_r[4];

  R(1,2)=_r[5];

  R(2,0)=_r[6];

  R(2,1)=_r[7];

  R(2,2)=_r[8];

  
  
  points_projected=MatrixXf::Zero(2,cloud->points.size ());
  
  for (int j=0;j<cloud->points.size ();j++){

    

    

    PointCloud<PointXYZRGB> PointAuxRGB;

    PointAuxRGB.points.resize(1);

    

    Vector3f xw=Vector3f(cloud->points[j].x,

		cloud->points[j].y,

		cloud->points[j].z);

    

    xw=K*( R*xw+t);

    xw= xw/xw(2);

    

    int col,row;

    

    col=(int)xw(0);

    row=(int)xw(1);

    

     points_projected(0,j)=(int)xw(0);
    points_projected(1,j)=(int)xw(1);
    

    int b,r,g;

    if ((col<0 || row<0) || (col>image.cols || row>image.rows)){

      r=0;

      g=0;

      b=0;

      

    }else{

      

    //  b = img->imageData[img->widthStep * row+ col * 3];

     // g = img->imageData[img->widthStep * row+ col * 3 + 1];

      //r = img->imageData[img->widthStep * row+ col * 3 + 2];    

      

      r=image.at<cv::Vec3b>(row,col)[0];

      g=image.at<cv::Vec3b>(row,col)[1];

      b=image.at<cv::Vec3b>(row,col)[2];

      //std::cout << image.at<cv::Vec3b>(row,col)[0] << " " << image.at<cv::Vec3b>(row,col)[1] << " " << image.at<cv::Vec3b>(row,col)[2] << std::endl;



    }

    

    

    //std::cout << "( "<<r <<","<<g<<","<<b<<")"<<std::endl;

    uint8_t r_i = r;

    uint8_t g_i = g;

    uint8_t b_i = b;

    

    uint32_t rgb = ((uint32_t)r << 16 | (uint32_t)g << 8 | (uint32_t)b);

    

    //int32_t rgb = (r_i << 16) | (g_i << 8) | b_i;

    

    

    PointAuxRGB.points[0].x=cloud->points[j].x;

    PointAuxRGB.points[0].y=cloud->points[j].y;

    PointAuxRGB.points[0].z=cloud->points[j].z;

    PointAuxRGB.points[0].rgb=*reinterpret_cast<float*>(&rgb);  

    

    cloudRGB->points.push_back (PointAuxRGB.points[0]);

    //project point to the image

    
		

  }
  
}
Exemple #21
0
CollisionApp::CollisionApp()
  : m_dTurn(0.0f)
  , m_moveRate(2.0f)
  , m_turnRate(2.0f)
  , m_playerMoving(false)
{
  this->ToggleOutputWindow(false);
  Context::Init(Log);

  Window::WindowFactory windowFactory;

  Window::InitData windowData;
  windowData.isImGuiWindow = false;
  int appW(0), appH(0);
  GetWindowDimensions(appW, appH);
  windowData.width = appW;
  windowData.height = appH;
  windowData.clearColor = (vec4(0.0f, 0.0f, 0.0f, 1.0f));

  m_pViewport = windowFactory.GetNewWindow(windowData);

  float margin = 0.5f;
  m_canvasDim = 10.0f;

  //Add boundary lines
  BoundaryLine bl;
  bl.line = Line(vec3(margin, margin, 1.0f), vec3(1.0f, 0.0f, 0.0f));
  bl.length = m_canvasDim - 2.0f * margin;
  m_boundaryLines.push_back(bl);

  bl.line = Line(vec3(m_canvasDim - margin, margin, 1.0f), vec3(0.0f, 1.0f, 0.0f));
  m_boundaryLines.push_back(bl);

  bl.line = Line(vec3(m_canvasDim - margin, m_canvasDim - margin, 1.0f), vec3(-1.0f, 0.0f, 0.0f));
  m_boundaryLines.push_back(bl);

  bl.line = Line(vec3(margin, m_canvasDim - margin, 1.0f), vec3(0.0f, -1.0f, 0.0f));
  m_boundaryLines.push_back(bl);

  float crossDim = 0.6f;
  vec3 crossCenter(5.0f, 5.0f, 1.0f);

  // Upper arm
  vec3 lo = crossCenter + vec3(crossDim, crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, vec3::yAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(crossDim, 4.0f * crossDim, 0.0f);
  bl.length = 2.0f * crossDim;
  bl.line = Line(lo, -vec3::xAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(-crossDim, 4.0f * crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, -vec3::yAxis());
  m_boundaryLines.push_back(bl);

  // Left arm
  lo = crossCenter + vec3(-crossDim, crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, -vec3::xAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(-4.0f * crossDim, crossDim, 0.0f);
  bl.length = 2.0f * crossDim;
  bl.line = Line(lo, -vec3::yAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(-4.0f * crossDim, -crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, vec3::xAxis());
  m_boundaryLines.push_back(bl);

  // Bottom arm
  lo = crossCenter + vec3(-crossDim, -crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, -vec3::yAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(-crossDim, -4.0f * crossDim, 0.0f);
  bl.length = 2.0f * crossDim;
  bl.line = Line(lo, vec3::xAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(crossDim, -4.0f * crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, vec3::yAxis());
  m_boundaryLines.push_back(bl);

  // Right arm
  lo = crossCenter + vec3(crossDim, -crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, vec3::xAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(4.0f * crossDim, -crossDim, 0.0f);
  bl.length = 2.0f * crossDim;
  bl.line = Line(lo, vec3::yAxis());
  m_boundaryLines.push_back(bl);

  lo = crossCenter + vec3(4.0f * crossDim, crossDim, 0.0f);
  bl.length = 3.0f * crossDim;
  bl.line = Line(lo, -vec3::xAxis());
  m_boundaryLines.push_back(bl);

  //Boundary points
  vec3 bp = crossCenter + vec3(crossDim, 4.0f * crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);
  bp = crossCenter + vec3(-crossDim, 4.0f * crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);
  bp = crossCenter + vec3(-4.0f * crossDim, crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);
  bp = crossCenter + vec3(-4.0f * crossDim, -crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);
  bp = crossCenter + vec3(-crossDim, -4.0f * crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);
  bp = crossCenter + vec3(crossDim, -4.0f * crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);
  bp = crossCenter + vec3(4.0f * crossDim, -crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);
  bp = crossCenter + vec3(4.0f * crossDim, crossDim, 0.0f);
  m_boundaryPoints.push_back(bp);

  //Player puck
  float playerRadius = 0.3f;
  m_player.disk.SetRadius(playerRadius);
  float playerOffset = margin + playerRadius + 0.1f;
  m_player.disk.SetCenter(vec3(playerOffset, playerOffset, 1.0f));
  m_player.angle =  0.0f;
  //m_player.disk.SetCenter(vec3(0.799999952f, playerOffset, 1.0f));
  //m_player.angle = 2.46666574f;
  m_player.speed = 0.0f;
  

  //------------------------------------------------------------------------------------
  //  Add drawables
  //------------------------------------------------------------------------------------
  ScreenObject so;
  so.draw = true;
  Context::LineMesh player_lm = GenerateCircle(1.0f);
  player_lm.color.Set(1.0f, 0.0f, 0.0f, 1.0f);
  m_circleHandle = m_contextLine.AddObject(player_lm);
  m_drawables.insert(dItem(m_circleHandle, so));

  player_lm.color.Set(1.0f, 0.5f, 0.0f, 1.0f);
  so.draw = false;
  m_circleHandleInt = m_contextLine.AddObject(player_lm);
  m_drawables.insert(dItem(m_circleHandleInt, so));

  Context::LineMesh arrow_lm = GenerateArrow();
  so.draw = true;
  arrow_lm.color.Set(1.0f, 0.0f, 0.0f, 1.0f);
  m_arrowHandle = m_contextLine.AddObject(arrow_lm);
  m_drawables.insert(dItem(m_arrowHandle, so));

  arrow_lm.color.Set(1.0f, 0.5f, 0.0f, 1.0f);
  so.draw = false;
  m_arrowHandleInt = m_contextLine.AddObject(arrow_lm);
  m_drawables.insert(dItem(m_arrowHandleInt, so));

  Context::LineMesh lm;
  lm.color.Set(1.0f, 0.0f, 0.0f, 1.0f);
  for (auto const & l : m_boundaryLines)
  {
    Context::LineVertex lv0, lv1;

    lv0.point[0] = l.line.Origin()[0];
    lv0.point[1] = l.line.Origin()[1];
    lv0.point[2] = 0.0f;
    lv0.point[3] = 1.0f;

    lv1.point[0] = l.line.Origin()[0] + l.length * l.line.Direction()[0];
    lv1.point[1] = l.line.Origin()[1] + l.length * l.line.Direction()[1];
    lv1.point[2] = 0.0f;
    lv1.point[3] = 1.0f;

    lm.verts.push_back(lv0);
    lm.verts.push_back(lv1);

    Context::Line l;
    l.indices[0] = lm.verts.size() - 1;
    l.indices[1] = lm.verts.size() - 2;
    lm.lines.push_back(l);
  }

  mat4 scale, translation;
  scale.Scaling(2.0f / m_canvasDim);

  vec4 tvec(-1.0f, -1.0f, 0.0f, 0.0f);
  translation.Translation(tvec);
  m_transform = scale * translation;

  so.draw = true;
  m_drawables.insert(dItem(m_contextLine.AddObject(lm), so));

  m_contextLine.CommitLoadList();
}
Exemple #22
0
// Changes the heading of the missile to head toward the target.
void CHSMissile::ChangeHeading()
{
    HS_BOOL8 bChange;           // Indicates if any turning has occurred;

    if (!m_pData)
    {
        return;
    }

    // Calculate X, Y, and Z difference vector
    static double tX = 0.0, tY = 0.0, tZ = 0.0;

    if(NULL != m_target && HST_SHIP == m_target->GetType())
    {

        // get value from 0.0 - 1.0 where 1.0 is 100% visible
        float cloak_effect =
            static_cast<CHSShip* >(m_target)->CloakingEffect() * 100;

        // If the random value is less than the cloaking effect,
        // the missile still sees the ship and can update its
        // coordinates, otherwise, leave them at the previous
        // location
        if(hsInterface.GetRandom(100) <= (HS_UINT32) cloak_effect)
        {
            tX = m_target->GetX();
            tY = m_target->GetY();
            tZ = m_target->GetZ();
        }
    }
    else
    {
        tX = m_target->GetX();
        tY = m_target->GetY();
        tZ = m_target->GetZ();
    }

    HS_INT32 xyang = XYAngle(m_x, m_y, tX, tY);
    HS_INT32 zang = ZAngle(m_x, m_y, m_z, tX, tY, tZ);

    // Get the turn rate
    HS_INT32 iTurnRate = m_turnrate;

    bChange = false;

    // Check for change in zheading
    if (zang != m_zheading)
    {
        if (zang > m_zheading)
        {
            if ((zang - m_zheading) > iTurnRate)
                m_zheading += iTurnRate;
            else
                m_zheading = zang;
        }
        else
        {
            if ((m_zheading - zang) > iTurnRate)
                m_zheading -= iTurnRate;
            else
                m_zheading = zang;
        }
        bChange = true;
    }

    // Now handle any changes in the XY plane.
    HS_INT32 iDiff;
    if (xyang != m_xyheading)
    {
        if (abs(m_xyheading - xyang) < 180)
        {
            if (abs(m_xyheading - xyang) < iTurnRate)
                m_xyheading = xyang;
            else if (m_xyheading > xyang)
            {
                m_xyheading -= iTurnRate;

                if (m_xyheading < 0)
                    m_xyheading += 360;
            }
            else
            {
                m_xyheading += iTurnRate;

                if (m_xyheading > 359)
                    m_xyheading -= 360;
            }
        }
        else if (((360 - xyang) + m_xyheading) < 180)
        {
            iDiff = (360 - xyang) + m_xyheading;
            if (iDiff < iTurnRate)
            {
                m_xyheading = xyang;
            }
            else
            {
                m_xyheading -= iTurnRate;

                if (m_xyheading < 0)
                    m_xyheading += 360;
            }
        }
        else if (((360 - m_xyheading) + xyang) < 180)
        {
            iDiff = (360 - m_xyheading) + xyang;
            if (iDiff < iTurnRate)
            {
                m_xyheading = xyang;
            }
            else
            {
                m_xyheading += iTurnRate;

                if (m_xyheading > 359)
                    m_xyheading -= 360;
            }
        }
        else                    // This should never be true, but just in case.
        {
            m_xyheading += iTurnRate;

            if (m_xyheading > 359)
                m_xyheading -= 360;
        }
        bChange = true;
    }

    // Check to see if we need to recompute trajectory.
    if (bChange)
    {
        zang = m_zheading;
        if (zang < 0)
            zang += 360;

        CHSVector tvec(d2sin_table[m_xyheading] * d2cos_table[zang],
                       d2cos_table[m_xyheading] * d2cos_table[zang],
                       d2sin_table[zang]);
        m_motion_vector = tvec;
    }
}
Exemple #23
0
int main(int argc, char ** argv) {
  int iline=0, i, iarg=1, nfields=1, jmax=0, writetxt=0, writemat=0, grpsize=1;
  int membuf=1048576;
  char * here, *linebuf, *readbuf;
  string ifname = "", ofname = "", mfname = "", ffname = "", matfname = "", fdelim="\t", suffix="";
  if (argc < 2) {
    printf("%s", usage);
    return 1;
  }
  while (iarg < argc) {
    if (strncmp(argv[iarg], "-d", 2) == 0) {
      fdelim = argv[++iarg];
    } else if (strncmp(argv[iarg], "-c", 2) == 0) {
      suffix=".gz";
    } else if (strncmp(argv[iarg], "-f", 2) == 0) {
      ffname = argv[++iarg];
    } else if (strncmp(argv[iarg], "-i", 2) == 0) {
      ifname = argv[++iarg];
    } else if (strncmp(argv[iarg], "-m", 2) == 0) {
      mfname = argv[++iarg];
    } else if (strncmp(argv[iarg], "-o", 2) == 0) {
      ofname = argv[++iarg];
    } else if (strncmp(argv[iarg], "-s", 2) == 0) {
      membuf = strtol(argv[++iarg],NULL,10);
    } else if (strncmp(argv[iarg], "-?", 2) == 0) {
      printf("%s", usage);
      return 1;
    } else if (strncmp(argv[iarg], "-h", 2) == 0) {
      printf("%s", usage);
      return 1;
    } else {
      cout << "Unknown option " << argv[iarg] << endl;
      exit(1);
    }
    iarg++;
  }
  if (mfname.size() == 0) mfname = ofname;
  ivector tvec(0);
  svector delims(0);
  svector dnames(0);
  nfields = parseFormat(ffname, tvec, dnames, delims, &grpsize);
  srivector srv(nfields);
  ftvector ftv(nfields);

  istream * ifstr;
  linebuf = new char[membuf];
  readbuf = new char[membuf];

  ifstr = open_in_buf(ifname, readbuf, membuf);

  while (!ifstr->bad() && !ifstr->eof()) {
    ifstr->getline(linebuf, membuf-1);
    linebuf[membuf-1] = 0;
    if (ifstr->fail()) {
      ifstr->clear();
      ifstr->ignore(LONG_MAX,'\n');
    }
    if (strlen(linebuf) > 0) {
      jmax++;
      try {
        parseLine(linebuf, membuf, ++iline, fdelim.c_str(), tvec, delims, srv, ftv, grpsize);
      } catch (int e) {
        cerr << "Continuing" << endl;
      }
    }
    if ((jmax % 100000) == 0) {
      cout<<"\r"<<jmax<<" lines processed";
      cout.flush();
    }
  }
  if (ifstr) delete ifstr;
  cout<<"\r"<<jmax<<" lines processed";
  cout.flush();

  for (i = 0; i < nfields; i++) {
    switch (tvec[i]) {
    case ftype_int: case ftype_dt: case ftype_mdt: case ftype_date: case ftype_mdate: case ftype_cmdate:
      ftv[i].writeInts(ofname + dnames[i] + ".imat" + suffix);
      break;
    case ftype_dint:
      ftv[i].writeDInts(ofname + dnames[i] + ".dimat" + suffix);
      break;
    case ftype_qhex:
      ftv[i].writeQInts(ofname + dnames[i] + ".imat" + suffix);
      break;
    case ftype_float:
      ftv[i].writeFloats(ofname + dnames[i] + ".fmat" + suffix);
      break;
    case ftype_double:
      ftv[i].writeDoubles(ofname + dnames[i] + ".dmat" + suffix);
      break;
    case ftype_word:
      ftv[i].writeInts(ofname + dnames[i] + ".imat" + suffix);
      srv[i].writeMap(mfname + dnames[i], suffix);
      break;
    case ftype_string: case ftype_group: 
      ftv[i].writeIVecs(ofname + dnames[i] + ".imat" + suffix);
      srv[i].writeMap(mfname + dnames[i], suffix);
      break;
    case ftype_igroup:
      ftv[i].writeIVecs(ofname + dnames[i] + ".imat" + suffix);
      break;
    case ftype_digroup:
      ftv[i].writeDIVecs(ofname + dnames[i]);
      break;
    default:
      break;
    }    
  }
  printf("\n");
  if (linebuf) delete [] linebuf;
  return 0;
}