示例#1
0
Point2 ARVRCamera::unproject_position(const Vector3 &p_pos) const {
	// get our ARVRServer
	ARVRServer *arvr_server = ARVRServer::get_singleton();
	ERR_FAIL_NULL_V(arvr_server, Vector2());

	Ref<ARVRInterface> arvr_interface = arvr_server->get_primary_interface();
	if (arvr_interface.is_null()) {
		// we might be in the editor or have VR turned off, just call superclass
		return Camera::unproject_position(p_pos);
	}

	if (!is_inside_tree()) {
		ERR_EXPLAIN("Camera is not inside scene.");
		ERR_FAIL_COND_V(!is_inside_tree(), Vector2());
	};

	Size2 viewport_size = get_viewport()->get_visible_rect().size;

	CameraMatrix cm = arvr_interface->get_projection_for_eye(ARVRInterface::EYE_MONO, viewport_size.aspect(), get_znear(), get_zfar());

	Plane p(get_camera_transform().xform_inv(p_pos), 1.0);

	p = cm.xform4(p);
	p.normal /= p.d;

	Point2 res;
	res.x = (p.normal.x * 0.5 + 0.5) * viewport_size.x;
	res.y = (-p.normal.y * 0.5 + 0.5) * viewport_size.y;

	return res;
};
示例#2
0
Vector3 Camera::project_position(const Point2& p_point) const {

	if (!is_inside_tree()) {
		ERR_EXPLAIN("Camera is not inside scene.");
		ERR_FAIL_COND_V(!is_inside_tree(),Vector3());
	}

	Size2 viewport_size = get_viewport()->get_visible_rect().size;

	CameraMatrix cm;

	if (mode==PROJECTION_ORTHOGONAL)
		cm.set_orthogonal(size,viewport_size.get_aspect(),near,far,keep_aspect==KEEP_WIDTH);
	else
		cm.set_perspective(fov,viewport_size.get_aspect(),near,far,keep_aspect==KEEP_WIDTH);

	Size2 vp_size;
	cm.get_viewport_size(vp_size.x,vp_size.y);

	Vector2 point;
	point.x = (p_point.x/viewport_size.x) * 2.0 - 1.0;
	point.y = (1.0-(p_point.y/viewport_size.y)) * 2.0 - 1.0;
	point*=vp_size;

	Vector3 p(point.x,point.y,-near);


	return get_camera_transform().xform(p);
}
示例#3
0
Vector3 Camera::project_local_ray_normal(const Point2& p_pos) const {

	if (!is_inside_scene()) {
		ERR_EXPLAIN("Camera is not inside scene.");
		ERR_FAIL_COND_V(!is_inside_scene(),Vector3());
	}

	Size2 viewport_size = viewport_ptr->get_visible_rect().size;

	Vector3 ray;

	if (mode==PROJECTION_ORTHOGONAL) {

		ray=Vector3(0,0,-1);
	} else {
		CameraMatrix cm;
		cm.set_perspective(fov,viewport_size.get_aspect(),near,far,vaspect);
		float screen_w,screen_h;
		cm.get_viewport_size(screen_w,screen_h);
		ray=Vector3( ((p_pos.x/viewport_size.width)*2.0-1.0)*screen_w, ((1.0-(p_pos.y/viewport_size.height))*2.0-1.0)*screen_h,-near).normalized();
	}


	return ray;
};
示例#4
0
Vector3 ARVRCamera::project_local_ray_normal(const Point2 &p_pos) const {
	// get our ARVRServer
	ARVRServer *arvr_server = ARVRServer::get_singleton();
	ERR_FAIL_NULL_V(arvr_server, Vector3());

	Ref<ARVRInterface> arvr_interface = arvr_server->get_primary_interface();
	if (arvr_interface.is_null()) {
		// we might be in the editor or have VR turned off, just call superclass
		return Camera::project_local_ray_normal(p_pos);
	}

	if (!is_inside_tree()) {
		ERR_EXPLAIN("Camera is not inside scene.");
		ERR_FAIL_COND_V(!is_inside_tree(), Vector3());
	};

	Size2 viewport_size = get_viewport()->get_camera_rect_size();
	Vector2 cpos = get_viewport()->get_camera_coords(p_pos);
	Vector3 ray;

	CameraMatrix cm = arvr_interface->get_projection_for_eye(ARVRInterface::EYE_MONO, viewport_size.aspect(), get_znear(), get_zfar());
	float screen_w, screen_h;
	cm.get_viewport_size(screen_w, screen_h);
	ray = Vector3(((cpos.x / viewport_size.width) * 2.0 - 1.0) * screen_w, ((1.0 - (cpos.y / viewport_size.height)) * 2.0 - 1.0) * screen_h, -get_znear()).normalized();

	return ray;
};
示例#5
0
void test_vec(Plane p_vec) {


	CameraMatrix cm;
	cm.set_perspective(45,1,0,100);
	Plane v0=cm.xform4(p_vec);

	print_line("out: "+v0);
	v0.normal.z = (v0.d/100.0 *2.0-1.0) * v0.d;
	print_line("out_F: "+v0);


/*v0: 0, 0, -0.1, 0.1
v1: 0, 0, 0, 0.1
fix: 0, 0, 0, 0.1
v0: 0, 0, 1.302803, 1.5
v1: 0, 0, 1.401401, 1.5
fix: 0, 0, 1.401401, 1.5
v0: 0, 0, 25.851850, 26
v1: 0, 0, 25.925926, 26
fix: 0, 0, 25.925924, 26
v0: 0, 0, 49.899902, 50
v1: 0, 0, 49.949947, 50
fix: 0, 0, 49.949951, 50
v0: 0, 0, 100, 100
v1: 0, 0, 100, 100
fix: 0, 0, 100, 100
*/     


}
示例#6
0
Vector3 ARVRCamera::project_position(const Point2 &p_point) const {
	// get our ARVRServer
	ARVRServer *arvr_server = ARVRServer::get_singleton();
	ERR_FAIL_NULL_V(arvr_server, Vector3());

	Ref<ARVRInterface> arvr_interface = arvr_server->get_primary_interface();
	if (arvr_interface.is_null()) {
		// we might be in the editor or have VR turned off, just call superclass
		return Camera::project_position(p_point);
	}

	if (!is_inside_tree()) {
		ERR_EXPLAIN("Camera is not inside scene.");
		ERR_FAIL_COND_V(!is_inside_tree(), Vector3());
	};

	Size2 viewport_size = get_viewport()->get_visible_rect().size;

	CameraMatrix cm = arvr_interface->get_projection_for_eye(ARVRInterface::EYE_MONO, viewport_size.aspect(), get_znear(), get_zfar());

	Size2 vp_size;
	cm.get_viewport_size(vp_size.x, vp_size.y);

	Vector2 point;
	point.x = (p_point.x / viewport_size.x) * 2.0 - 1.0;
	point.y = (1.0 - (p_point.y / viewport_size.y)) * 2.0 - 1.0;
	point *= vp_size;

	Vector3 p(point.x, point.y, -get_znear());

	return get_camera_transform().xform(p);
};
示例#7
0
Vector3 Camera::project_local_ray_normal(const Point2& p_pos) const {

	if (!is_inside_tree()) {
		ERR_EXPLAIN("Camera is not inside scene.");
		ERR_FAIL_COND_V(!is_inside_tree(),Vector3());
	}


#if 0
	Size2 viewport_size = get_viewport()->get_visible_rect().size;
	Vector2 cpos = p_pos;
#else

	Size2 viewport_size = get_viewport()->get_camera_rect_size();
	Vector2 cpos = get_viewport()->get_camera_coords(p_pos);
#endif

	Vector3 ray;

	if (mode==PROJECTION_ORTHOGONAL) {

		ray=Vector3(0,0,-1);
	} else {
		CameraMatrix cm;
		cm.set_perspective(fov,viewport_size.get_aspect(),near,far,keep_aspect==KEEP_WIDTH);
		float screen_w,screen_h;
		cm.get_viewport_size(screen_w,screen_h);
		ray=Vector3( ((cpos.x/viewport_size.width)*2.0-1.0)*screen_w, ((1.0-(cpos.y/viewport_size.height))*2.0-1.0)*screen_h,-near).normalized();
	}

	return ray;
};
示例#8
0
Point2 Camera::unproject_position(const Vector3& p_pos) const {

	if (!is_inside_tree()) {
		ERR_EXPLAIN("Camera is not inside scene.");
		ERR_FAIL_COND_V(!is_inside_tree(),Vector2());
	}

	Size2 viewport_size = get_viewport()->get_visible_rect().size;

	CameraMatrix cm;


	if (mode==PROJECTION_ORTHOGONAL)
		cm.set_orthogonal(size,viewport_size.get_aspect(),near,far,keep_aspect==KEEP_WIDTH);
	else
		cm.set_perspective(fov,viewport_size.get_aspect(),near,far,keep_aspect==KEEP_WIDTH);

	Plane p(get_camera_transform().xform_inv(p_pos),1.0);

	p=cm.xform4(p);
	p.normal/=p.d;


	Point2 res;
	res.x = (p.normal.x * 0.5 + 0.5)  * viewport_size.x;
	res.y = (-p.normal.y * 0.5 + 0.5)  * viewport_size.y;

	return res;

}
示例#9
0
文件: camera.cpp 项目: Alex-doc/godot
Vector<Plane> Camera::get_frustum() const {

	ERR_FAIL_COND_V(!is_inside_world(), Vector<Plane>());

	Size2 viewport_size = get_viewport()->get_visible_rect().size;
	CameraMatrix cm;
	if (mode == PROJECTION_PERSPECTIVE)
		cm.set_perspective(fov, viewport_size.aspect(), near, far, keep_aspect == KEEP_WIDTH);
	else
		cm.set_orthogonal(size, viewport_size.aspect(), near, far, keep_aspect == KEEP_WIDTH);

	return cm.get_projection_planes(get_camera_transform());
}
示例#10
0
Vector<Plane> ARVRCamera::get_frustum() const {
	// get our ARVRServer
	ARVRServer *arvr_server = ARVRServer::get_singleton();
	ERR_FAIL_NULL_V(arvr_server, Vector<Plane>());

	Ref<ARVRInterface> arvr_interface = arvr_server->get_primary_interface();
	ERR_FAIL_COND_V(arvr_interface.is_null(), Vector<Plane>());

	ERR_FAIL_COND_V(!is_inside_world(), Vector<Plane>());

	Size2 viewport_size = get_viewport()->get_visible_rect().size;
	CameraMatrix cm = arvr_interface->get_projection_for_eye(ARVRInterface::EYE_MONO, viewport_size.aspect(), get_znear(), get_zfar());
	return cm.get_projection_planes(get_camera_transform());
};
示例#11
0
CameraMatrix MobileVRInterface::get_projection_for_eye(ARVRInterface::Eyes p_eye, real_t p_aspect, real_t p_z_near, real_t p_z_far) {
	_THREAD_SAFE_METHOD_

	CameraMatrix eye;

	if (p_eye == ARVRInterface::EYE_MONO) {
		///@TODO for now hardcode some of this, what is really needed here is that this needs to be in sync with the real cameras properties
		// which probably means implementing a specific class for iOS and Android. For now this is purely here as an example.
		// Note also that if you use a normal viewport with AR/VR turned off you can still use the tracker output of this interface
		// to position a stock standard Godot camera and have control over this.
		// This will make more sense when we implement ARkit on iOS (probably a separate interface).
		eye.set_perspective(60.0, p_aspect, p_z_near, p_z_far, false);
	} else {
		eye.set_for_hmd(p_eye == ARVRInterface::EYE_LEFT ? 1 : 2, p_aspect, intraocular_dist, display_width, display_to_lens, oversample, p_z_near, p_z_far);
	};

	return eye;
};
示例#12
0
void CameraMatrixProvider::update(CameraMatrix& cameraMatrix)
{
  cameraMatrix.computeCameraMatrix(theTorsoMatrix, theRobotCameraMatrix, theCameraCalibration);
  cameraMatrix.isValid = theTorsoMatrix.isValid && theMotionInfo.isMotionStable &&
                         theFallDownState.state == FallDownState::upright &&
                         theFrameInfo.getTimeSince(theFilteredJointData.timeStamp) < 500 &&
                         theRobotInfo.penalty == PENALTY_NONE;

  DECLARE_DEBUG_DRAWING("module:CameraMatrixProvider:calibrationHelper", "drawingOnImage", drawFieldLines(cameraMatrix););
bool Transformation::robotToImage(const Vector3<>& point, const CameraMatrix& cameraMatrix,
                                  const CameraInfo& cameraInfo, Vector2<float>& pointInImage)
{
  Vector3<> pointInCam = cameraMatrix.invert() * point;
  if(pointInCam.x < 0)
  {
    return false;
  }
  pointInCam *= cameraInfo.focalLength / pointInCam.x;
  pointInImage = cameraInfo.opticalCenter - Vector2<>(pointInCam.y, pointInCam.z);
  return pointInCam.x > 0;
}
示例#14
0
void CameraMatrix::set_perspective(real_t p_fovy_degrees, real_t p_aspect, real_t p_z_near, real_t p_z_far, bool p_flip_fov, int p_eye, real_t p_intraocular_dist, real_t p_convergence_dist) {
	if (p_flip_fov) {
		p_fovy_degrees = get_fovy(p_fovy_degrees, 1.0 / p_aspect);
	}

	real_t left, right, modeltranslation, ymax, xmax, frustumshift;

	ymax = p_z_near * tan(p_fovy_degrees * Math_PI / 360.0f);
	xmax = ymax * p_aspect;
	frustumshift = (p_intraocular_dist / 2.0) * p_z_near / p_convergence_dist;

	switch (p_eye) {
		case 1: { // left eye
			left = -xmax + frustumshift;
			right = xmax + frustumshift;
			modeltranslation = p_intraocular_dist / 2.0;
		}; break;
		case 2: { // right eye
			left = -xmax - frustumshift;
			right = xmax - frustumshift;
			modeltranslation = -p_intraocular_dist / 2.0;
		}; break;
		default: { // mono, should give the same result as set_perspective(p_fovy_degrees,p_aspect,p_z_near,p_z_far,p_flip_fov)
			left = -xmax;
			right = xmax;
			modeltranslation = 0.0;
		}; break;
	};

	set_frustum(left, right, -ymax, ymax, p_z_near, p_z_far);

	// translate matrix by (modeltranslation, 0.0, 0.0)
	CameraMatrix cm;
	cm.set_identity();
	cm.matrix[3][0] = modeltranslation;
	*this = *this * cm;
}
示例#15
0
bool Geometry::calculatePointInImage
(
  const Vector3<>& point,
  const CameraMatrix& cameraMatrix,
  const CameraInfo& cameraInfo,
  Vector2<>& pointInImage
)
{
  Vector3<> pointInCam = cameraMatrix.invert() * point;
  if(pointInCam.x == 0)
  {
    return false;
  }
  pointInCam *= cameraInfo.focalLength / pointInCam.x;
  pointInImage = cameraInfo.opticalCenter - Vector2<>(pointInCam.y, pointInCam.z);
  return pointInCam.x > 0;
}
示例#16
0
Matrix2x2f UKFSample::getCovOfPointInWorld(const Vector2<>& pointInWorld2, float pointZInWorld, const MotionInfo& motionInfo,
                                           const CameraMatrix& cameraMatrix, const SelfLocatorParameters& parameters) const
{
  Vector3<> unscaledVectorToPoint = cameraMatrix.invert() * Vector3<>(pointInWorld2.x, pointInWorld2.y, pointZInWorld);
  const Vector3<> unscaledWorld = cameraMatrix.rotation * unscaledVectorToPoint;
  const float h = cameraMatrix.translation.z - pointZInWorld;
  const float scale = h / -unscaledWorld.z;
  Vector2f pointInWorld(unscaledWorld.x * scale, unscaledWorld.y * scale);
  const float distance = pointInWorld.abs();
  Vector2f cossin = distance == 0.f ? Vector2f(1.f, 0.f) : pointInWorld * (1.f / distance);
  Matrix2x2f rot(cossin, Vector2f(-cossin.y, cossin.x));
  const Vector2<>& robotRotationDeviation = motionInfo.motion == MotionRequest::stand
    ? parameters.robotRotationDeviationInStand : parameters.robotRotationDeviation;
  Matrix2x2f cov(Vector2f(sqr(h / tan((distance == 0.f ? pi_2 : atan(h / distance)) - robotRotationDeviation.x) - distance), 0.f),
                  Vector2f(0.f, sqr(tan(robotRotationDeviation.y) * distance)));
  return rot * cov * rot.transpose();
}
void CameraMatrixProvider::update(CameraMatrix& cameraMatrix,
                                  const TorsoMatrix& theTorsoMatrix,
                                  const RobotCameraMatrix& theRobotCameraMatrix,
                                  const CameraCalibration& theCameraCalibration,
                                  const MotionInfo& theMotionInfo,
                                  const FallDownState& theFallDownState,
                                  const FrameInfo& theFrameInfo,
                                  const FilteredJointData& theFilteredJointData,
                                  const RobotInfo& theRobotInfo)
{
  cameraMatrix.computeCameraMatrix(theTorsoMatrix, theRobotCameraMatrix, theCameraCalibration);
  cameraMatrix.isValid = theTorsoMatrix.isValid && theMotionInfo.isMotionStable &&
                         theFallDownState.state == FallDownState::upright &&
                         theFrameInfo.getTimeSince(theFilteredJointData.timeStamp) < 500 &&
                         theRobotInfo.penalty == PENALTY_NONE;

  //cout << "CameraMatrix z: " << cameraMatrix.translation.z << endl;

  //DECLARE_DEBUG_DRAWING("module:CameraMatrixProvider:calibrationHelper", "drawingOnImage");
  //COMPLEX_DRAWING("module:CameraMatrixProvider:calibrationHelper", drawFieldLines(cameraMatrix););
  //TEAM_OUTPUT(idTeamCameraHeight, bin, cameraMatrix.translation.z);
}
示例#18
0
int
main(int argc, char** argv)
{
    try
    {
        bool const colorize = (argc == 7);
        bool const colorizeFrequencies = false;

        if (argc != 6 && argc != 7)
        {
            cerr << "Usage: " << argv[0] << " <poses file> <points file> <K file> <orig. width> <orig. height> [<image names file>]" << endl;
            return -1;
        }

        int const origWidth  = atoi(argv[4]);
        int const origHeight = atoi(argv[5]);

        vector<string> imageNames;
        if (argc == 7)
        {
            ifstream is(argv[6]);
            string name;
            while (is >> name)
            {
                imageNames.push_back(name);
            }
        } // end if

        vector<int>        viewIds;
        vector<Matrix3x4d> cameraPoses;
        {
            ifstream is(argv[1]);
            int nViews;
            is >> nViews;
            cout << "Going to read " << nViews << " poses." << endl;

            char buf[1024];
            Matrix3x4d P;

            for (int i = 0; i < nViews; ++i)
            {
                int viewId;
                is >> viewId;
                viewIds.push_back(viewId);

                is >> P[0][0] >> P[0][1] >> P[0][2] >> P[0][3];
                is >> P[1][0] >> P[1][1] >> P[1][2] >> P[1][3];
                is >> P[2][0] >> P[2][1] >> P[2][2] >> P[2][3];
                cameraPoses.push_back(P);
            }
        } // end scope

        int const nCams = viewIds.size();

        vector<TriangulatedPoint> pointModel;
        vector<Vector3d>          points3d;
        {
            ifstream is(argv[2]);
            int nPoints;
            is >> nPoints;

            for (int j = 0; j < nPoints; ++j)
            {
                TriangulatedPoint X;

                is >> X.pos[0] >> X.pos[1] >> X.pos[2];
                int nMeasurements = 0;
                is >> nMeasurements;
                for (int k = 0; k < nMeasurements; ++k)
                {
                    PointMeasurement m;
                    is >> m.view >> m.id >> m.pos[0] >> m.pos[1];
                    X.measurements.push_back(m);
                }
                pointModel.push_back(X);
                points3d.push_back(X.pos);
            }
        } // end scope
        int const nPoints = pointModel.size();

        Matrix3x3d K;
        makeIdentityMatrix(K);

        {
            ifstream is(argv[3]);
            is >> K[0][0] >> K[0][1] >> K[0][2] >> K[1][1] >> K[1][2];
        }

        vector<Vector3f> colors(nPoints, makeVector3(0.0f, 0.0f, 0.0f));
        vector<float>    nMeasurements(nPoints, 0.0f);

        char const * wrlName = "result.wrl";

        if (colorize)
        {
            map<int, int> viewIdPosMap;
            for (int i = 0; i < nCams; ++i) viewIdPosMap.insert(make_pair(viewIds[i], i));

            for (int i = 0; i < nCams; ++i)
            {
                string const& imageName = imageNames[viewIds[i]];

                Image<unsigned char> srcIm;
                loadImageFile(imageName.c_str(), srcIm);
                int const w = srcIm.width();
                int const h = srcIm.height();
                int const nChan = srcIm.numChannels();

                double const s_x = double(w) / origWidth;
                double const s_y = double(h) / origHeight;
                cout << "s_x = " << s_x << ", s_y = " << s_y << endl;

                Matrix3x3d Kim, S;
                makeZeroMatrix(S);
                S[0][0] = s_x;
                S[1][1] = s_y;
                S[2][2] = 1.0;
                Kim = S * K;
                CameraMatrix cam;
                cam.setIntrinsic(Kim);
                cam.setOrientation(cameraPoses[i]);

                for (int j = 0; j < nPoints; ++j)
                {
                    TriangulatedPoint const& X = pointModel[j];
                    for (int k = 0; k < X.measurements.size(); ++k)
                    {
                        PointMeasurement m = X.measurements[k];
                        //map<int, int>::const_iterator p;
                        //p = viewIdPosMap.find(m.view);
                        //if (p == viewIdPosMap.end() || p->second != i) continue;
                        if (m.view != i) continue;
                        m.pos[0] *= s_x;
                        m.pos[1] *= s_y;
                        if (m.pos[0] >= 0.0f && m.pos[0] < float(w) &&
                                m.pos[1] >= 0.0f && m.pos[1] < float(h))
                        {
                            if (nChan == 1)
                            {
                                float c = bilinearSample(srcIm, m.pos[0], m.pos[1], 0);
                                colors[j][0] += c;
                                colors[j][1] += c;
                                colors[j][2] += c;
                                nMeasurements[j] += 1.0f;
                            }
                            else if (nChan == 3)
                            {
                                colors[j][0] += bilinearSample(srcIm, m.pos[0], m.pos[1], 0);
                                colors[j][1] += bilinearSample(srcIm, m.pos[0], m.pos[1], 1);
                                colors[j][2] += bilinearSample(srcIm, m.pos[0], m.pos[1], 2);
                                nMeasurements[j] += 1.0f;
                            }
                        }
                        else
                        {
                            colors[j][0] += 255.0f;
                            nMeasurements[j] += 1.0f;
                        }
                    } // end for (k)
                } // end for (j)
            } // end for (i)

            for (int j = 0; j < nPoints; ++j)
                scaleVectorIP(1.0f / nMeasurements[j], colors[j]);
        }
        else if (colorizeFrequencies)
        {
            float maxFrequency = 0.0f;
            double avgFrequency = 0.0;
            for (int j = 0; j < nPoints; ++j)
            {
                TriangulatedPoint const& X = pointModel[j];
                maxFrequency = std::max(maxFrequency, float(X.measurements.size()));
                avgFrequency += double(X.measurements.size());
            }
            cout << "maxFrequency = " << maxFrequency << ", avgFrequency = " << avgFrequency/nPoints << endl;

            for (int j = 0; j < nPoints; ++j)
            {
                TriangulatedPoint const& X = pointModel[j];
                float const frequency = float(X.measurements.size());
                float const alpha = frequency / maxFrequency;
#if 0
                colors[j][0] = (1.0f-alpha)*255.0f;
                colors[j][1] = alpha*255.0f;
                colors[j][2] = 0.0f;
#else
                colors[j][0] = alpha*255.0f;
                colors[j][1] = alpha*255.0f;
                colors[j][2] = alpha*255.0f;
#endif
            }
        } // end if (colorizeFrequencies)

        if (colorize || colorizeFrequencies)
            writeColoredPointsToVRML(points3d, colors, wrlName, false);
        else
            writePointsToVRML(points3d, wrlName, false);

        Vector3d bboxMin(points3d[0]);
        Vector3d bboxMax(points3d[0]);

        for (int j = 1; j < nPoints; ++j)
        {
            bboxMin[0] = std::min(points3d[j][0], bboxMin[0]);
            bboxMin[1] = std::min(points3d[j][1], bboxMin[1]);
            bboxMin[2] = std::min(points3d[j][2], bboxMin[2]);
            bboxMax[0] = std::max(points3d[j][0], bboxMax[0]);
            bboxMax[1] = std::max(points3d[j][1], bboxMax[1]);
            bboxMax[2] = std::max(points3d[j][2], bboxMax[2]);
        }
        double const diameter = distance_L2(bboxMin, bboxMax);
        cout << "diameter = " << diameter << endl;

        //double scale = 0.1 * diameter;
        double scale = 0.001 * diameter;
        Vector3f camColor = makeVector3(100.0f, 50.0f, 50.0f);
        for (int i = 0; i < nCams; ++i)
        {
            CameraMatrix cam;
            cam.setIntrinsic(K);
            cam.setOrientation(cameraPoses[i]);
            writeCameraFrustumToVRML(cam, origWidth, origHeight, scale, camColor, wrlName, true);
        }
    }
void CameraMatrixProvider::drawFieldLines(const CameraMatrix& cameraMatrix, const RobotPose& theRobotPose, const FieldDimensions& theFieldDimensions, const CameraInfo& theCameraInfo) const
{
  Vector3<> start0C, start1C, end0C, end1C;
  Vector2<> start0I, start1I, end0I, end1I;

  const Pose2D& robotPoseInv(theRobotPose.invert());
  const Pose3D& cameraMatrixInv(cameraMatrix.invert());
  int halfFieldLinesWidth = theFieldDimensions.fieldLinesWidth / 2;

  for(unsigned int i = 0; i < theFieldDimensions.fieldLines.lines.size(); ++i)
  {
    Pose2D relativeLine(robotPoseInv);
    relativeLine.conc(theFieldDimensions.fieldLines.lines[i].corner);
    const Vector2<> start0(Pose2D(relativeLine).translate(0, (float) - halfFieldLinesWidth).translation);
    const Vector2<> end1(Pose2D(relativeLine).translate(theFieldDimensions.fieldLines.lines[i].length, (float) halfFieldLinesWidth).translation);

    start0C = cameraMatrixInv * Vector3<>(start0.x, start0.y, 0.); // field2camera
    end1C = cameraMatrixInv * Vector3<>(end1.x, end1.y, 0.); // field2camera

    if(start0C.x <= 200 && end1C.x <= 200)
      continue;

    const Vector2<>& start1(Pose2D(relativeLine).translate(0, (float) halfFieldLinesWidth).translation);
    const Vector2<>& end0(Pose2D(relativeLine).translate(theFieldDimensions.fieldLines.lines[i].length, (float) - halfFieldLinesWidth).translation);

    start1C = cameraMatrixInv * Vector3<>(start1.x, start1.y, 0.); // field2camera
    end0C = cameraMatrixInv * Vector3<>(end0.x, end0.y, 0.); // field2camera

    if(start0C.x <= 200)
      intersectLineWithCullPlane(start0C, end0C - start0C, start0C);
    else if(end0C.x <= 200)
      intersectLineWithCullPlane(start0C, end0C - start0C, end0C);
    if(start1C.x <= 200)
      intersectLineWithCullPlane(start1C, end1C - start1C, start1C);
    else if(end1C.x <= 200)
      intersectLineWithCullPlane(start1C, end1C - start1C, end1C);

    camera2image(start0C, start0I, theCameraInfo);
    camera2image(end0C, end0I, theCameraInfo);
    camera2image(start1C, start1I, theCameraInfo);
    camera2image(end1C, end1I, theCameraInfo);

    //LINE("module:CameraMatrixProvider:calibrationHelper", start0I.x, start0I.y, end0I.x, end0I.y, 0, Drawings::ps_solid, ColorRGBA(0, 0, 0));
    //LINE("module:CameraMatrixProvider:calibrationHelper", start1I.x, start1I.y, end1I.x, end1I.y, 0, Drawings::ps_solid, ColorRGBA(0, 0, 0));
  }

  start0C = cameraMatrixInv * Vector3<>(100, -11, 0.); // field2camera
  end0C = cameraMatrixInv * Vector3<>(0, -11, 0.); // field2camera
  camera2image(start0C, start0I, theCameraInfo);
  camera2image(end0C, end0I, theCameraInfo);

  //LINE("module:CameraMatrixProvider:calibrationHelper", start0I.x, start0I.y, end0I.x, end0I.y, 0, Drawings::ps_solid, ColorClasses::blue);

  start0C = cameraMatrixInv * Vector3<>(100, 11, 0.); // field2camera
  end0C = cameraMatrixInv * Vector3<>(0, 11, 0.); // field2camera
  camera2image(start0C, start0I, theCameraInfo);
  camera2image(end0C, end0I, theCameraInfo);

  //LINE("module:CameraMatrixProvider:calibrationHelper", start0I.x, start0I.y, end0I.x, end0I.y, 0, Drawings::ps_solid, ColorClasses::blue);

  start0C = cameraMatrixInv * Vector3<>(110, 1000, 0.); // field2camera
  end0C = cameraMatrixInv * Vector3<>(110, -1000, 0.); // field2camera
  camera2image(start0C, start0I, theCameraInfo);
  camera2image(end0C, end0I, theCameraInfo);

  //LINE("module:CameraMatrixProvider:calibrationHelper", start0I.x, start0I.y, end0I.x, end0I.y, 0, Drawings::ps_solid, ColorClasses::blue);
}
示例#20
0
CameraMatrix CameraMatrix::inverse() const {

	CameraMatrix cm = *this;
	cm.invert();
	return cm;
}