Exemple #1
0
static int SVertex_init(BPy_SVertex *self, PyObject *args, PyObject *kwds)
{
	static const char *kwlist_1[] = {"brother", NULL};
	static const char *kwlist_2[] = {"point_3d", "id", NULL};
	PyObject *obj = 0;
	float v[3];

	if (PyArg_ParseTupleAndKeywords(args, kwds, "|O!", (char **)kwlist_1, &SVertex_Type, &obj)) {
		if (!obj)
			self->sv = new SVertex();
		else
			self->sv = new SVertex(*(((BPy_SVertex *)obj)->sv));
	}
	else if (PyErr_Clear(),
	         PyArg_ParseTupleAndKeywords(args, kwds, "O&O!", (char **)kwlist_2, convert_v3, v, &Id_Type, &obj))
	{
		Vec3r point_3d(v[0], v[1], v[2]);
		self->sv = new SVertex(point_3d, *(((BPy_Id *)obj)->id));
	}
	else {
		PyErr_SetString(PyExc_TypeError, "invalid argument(s)");
		return -1;
	}
	self->py_if0D.if0D = self->sv;
	self->py_if0D.borrowed = 0;
	return 0;
}
Exemple #2
0
Eigen::Vector3f MetricCamera::back_project_2d_point_to_3d( const Eigen::Vector2f& point_2d, const float depth ) const
{
    Eigen::Vector3f homogeneous_point_2d( point_2d( i_x ) * depth, point_2d( i_y ) * depth, depth );
//    Eigen::Vector3f point_3d = KR.inverse() * ( homogeneous_point_2d - Kt );
    Eigen::Vector3f point_3d = KR_inverse * ( homogeneous_point_2d - Kt );

    const float z = point_3d( i_z );

    point_3d = point_3d * (depth / z); // FIXME is this last step correct ?

    return point_3d;
}
	// x = current params, fvec = the errors/differences of the proj with current params and the GT (image_points)
	int operator()(const Eigen::VectorXd& x, Eigen::VectorXd& fvec) const
	{
		const float aspect = static_cast<float>(width) / height;
		for (int i = 0; i < values(); i++)
		{
			// opencv to glm:
			glm::vec3 point_3d(model_points[i][0], model_points[i][1], model_points[i][2]);
			// projection given current params x:
			glm::vec3 proj_with_current_param_esti = project_ortho(point_3d, x[0], x[1], x[2], x[3], x[4], x[5], width, height);
			cv::Vec2f proj_point_2d(proj_with_current_param_esti.x, proj_with_current_param_esti.y);
			// diff of current proj to ground truth, our error
			auto diff = cv::norm(proj_point_2d, image_points[i]);
			// fvec should contain the differences
			// don't square it.
			fvec[i] = diff;
		}
		return 0;
	};
Exemple #4
0
	//识别“标”,得到旋转平移矩阵
	void VrmlGame::GetMatrixMP(Map &map)
	{
		if (!isInitMap)
		{
			std::cout<<"find mark...."<<std::endl;
			mpMap = &map;
			std::vector<std::vector<Wml::Vector2d> > all_points_2d;
			detect_corners(std::string("./absolute.txt"),all_points_2d);

			std::vector<Wml::Matrix4d> all_cameras;
			std::vector<Wml::Matrix3d> all_Ks;
			vector<KeyFrame *>::iterator kf;
			int count = 1;
			for( kf = mpMap->vpKeyFrames.begin(); kf != mpMap->vpKeyFrames.end(); kf++ )
			{
				int width = (*kf)->Camera.ImageSize()[0];
				int height = (*kf)->Camera.ImageSize()[1];
				TooN::Vector<5> params = (*kf)->Camera.GetParams();
				Wml::Matrix3d K;
				K(0,0) = width * params[0];
				K(0,1) = 0;
				K(0,2) = width * params[2];
				K(1,0) = 0;
				K(1,1) = height * params[1];
				K(1,2) = height * params[3];
				K(2,0) = 0;
				K(2,1) = 0;
				K(2,2) = 1;
				if(count==1)
				{
					std::cout<<K(0,0)<<","<<K(1,1)<<","<<K(0,2)<<","<<K(1,2)<<std::endl;
				}
				all_Ks.push_back(K);

				Wml::Matrix4d P;
				P(0,3) = (*kf)->se3CfromW.get_translation()[0];
				P(1,3) = (*kf)->se3CfromW.get_translation()[1];
				P(2,3) = (*kf)->se3CfromW.get_translation()[2];
				P(0,0) = (*kf)->se3CfromW.get_rotation().get_matrix()[0][0];
				P(0,1) = (*kf)->se3CfromW.get_rotation().get_matrix()[0][1];
				P(0,2) = (*kf)->se3CfromW.get_rotation().get_matrix()[0][2];
				P(1,0) = (*kf)->se3CfromW.get_rotation().get_matrix()[1][0];
				P(1,1) = (*kf)->se3CfromW.get_rotation().get_matrix()[1][1];
				P(1,2) = (*kf)->se3CfromW.get_rotation().get_matrix()[1][2];
				P(2,0) = (*kf)->se3CfromW.get_rotation().get_matrix()[2][0];
				P(2,1) = (*kf)->se3CfromW.get_rotation().get_matrix()[2][1];
				P(2,2) = (*kf)->se3CfromW.get_rotation().get_matrix()[2][2];
				if(count==1)
				{
					std::cout<<P(0,3)<<","<<P(1,3)<<","<<P(2,3)<<","<<P(0,0)<<","<<P(0,1)<<","<<P(0,2)<<","<<P(1,0)<<","<<P(1,1)<<","<<P(1,2)<<","<<P(2,0)<<","<<P(2,1)<<","<<P(2,2)<<std::endl;
				}
				P(3,0) = 0;
				P(3,1) = 0;
				P(3,2) = 0;
				P(3,3) = 1;
				all_cameras.push_back(P);
				count++;
			}

			std::vector<Wml::Vector3d> corners;
			CRobust3DRecovery r3dr;
			for (int i = 0; i < 4; ++ i)
			{
				std::vector<Wml::Vector2d> points_2d;
				std::vector<Wml::Matrix4d> key_cameras;
				std::vector<Wml::Matrix3d> key_Ks;

				for (int f = 0; f < all_points_2d.size(); ++ f)
				{
					if (all_points_2d[f].size() > i)
					{
						points_2d.push_back(all_points_2d[f][i]);
						key_cameras.push_back(all_cameras[f]);
						key_Ks.push_back(all_Ks[f]);
					}
				}

				if (points_2d.size() > 2)
				{
					Wml::Vector3d point_3d(0, 0, 0);
					r3dr.generate(points_2d, key_cameras, key_Ks, point_3d, 3, 10.0, 10);

					std::cout<<"3dpoint"<<i<<" = ["<<point_3d[0] << ", "<<point_3d[1] << ", "<<point_3d[2] << "]\n";

					corners.push_back(point_3d);
				}
			}

			if (corners.size() == 4)
			{
				Wml::Vector3d center(0.0, 0.0, 0.0);
				for (int i = 0; i < corners.size(); ++ i)
				{
					center += corners[i];
				}
				center /= corners.size();

				std::cout<<"center = ["<<center[0]<<", "<<center[1]<<", "<<center[2]<<","<<center[3]<<"]"<<std::endl;
				//////////////////////////////////////////////////////////////////////////

				Wml::Vector3d xaxis = corners[3] - corners[0];
				Wml::Vector3d zaxis = corners[1] - corners[0];
				Wml::Vector3d axis12 = corners[1] - corners[2];
				Wml::Vector3d axis32 = corners[3] - corners[2];

				// 坐标系尺度。
				//double scale = 4.0/(xaxis.Length() + zaxis.Length() + axis12.Length() + axis32.Length());
				double scale = (xaxis.Length() + zaxis.Length() + axis12.Length() + axis32.Length())/4.0;
				//新坐标
				Wml::Vector3d yaxis = zaxis.Cross(xaxis);
				zaxis = xaxis.Cross(yaxis);
				xaxis.Normalize();
				yaxis.Normalize();
				zaxis.Normalize();

				//注意要scale的,不知道为什么,但是如果scale是用1代替的话,就会出现PTAMM地图大得话,导入的modle就会很大
				//反之则会很小
				Wml::Vector3d u(scale,0,0);
				Wml::Vector3d v(0,scale,0);
				Wml::Vector3d n(0,0,scale);
				mP(0,0) = xaxis.Dot(u);
				mP(0,1) = xaxis.Dot(v);
				mP(0,2) = xaxis.Dot(n);
				mP(1,0) = yaxis.Dot(u);
				mP(1,1) = yaxis.Dot(v);
				mP(1,2) = yaxis.Dot(n);
				mP(2,0) = zaxis.Dot(u);
				mP(2,1) = zaxis.Dot(v);
				mP(2,2) = zaxis.Dot(n);
				mP(0,3) = center[0];
				mP(1,3) = center[1];
				mP(2,3) = center[2];
				mP(3,0) = 0;
				mP(3,1) = 0;
				mP(3,2) = 0;
				mP(3,3) = 1;
			}
			isInitMap = true;
		}
	}