void capture(Graphics& gl, const Lens& cam, const Pose& pose, Drawable& draw) {
		validate();
		
		glPushAttrib(GL_ALL_ATTRIB_BITS);
		
		Vec3d pos = pose.pos();
		Vec3d ux, uy, uz; 
		pose.unitVectors(ux, uy, uz);
		mProjection = Matrix4d::perspective(90, 1, cam.near(), cam.far());
		
		glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, mFboId);
		for (int face=0; face<6; face++) {
			glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT+face);
			
			gl.viewport(0, 0, resolution(), resolution());
			gl.clearColor(clearColor());
			gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
			
			switch (face) {
				case 0:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_X   
					mModelView = Matrix4d::lookAt(uz, uy, -ux, pos);
					break;
				case 1:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_X   
					mModelView = Matrix4d::lookAt(-uz, uy, ux, pos);
					break;
				case 2:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_Y   
					mModelView = Matrix4d::lookAt(ux, -uz, uy, pos);
					break;
				case 3:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_Y   
					mModelView = Matrix4d::lookAt(ux, uz, -uy, pos);
					break;
				case 4:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_Z   
					mModelView = Matrix4d::lookAt(ux, uy, uz, pos);
					break;
				default:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_Z   
					mModelView = Matrix4d::lookAt(-ux, uy, -uz, pos);
					break;
			}
			
			// apply camera transform:
			gl.pushMatrix(gl.PROJECTION);
			gl.loadMatrix(mProjection);
			gl.pushMatrix(gl.MODELVIEW);
			gl.loadMatrix(mModelView);
			
			draw.onDraw(gl);
			
			gl.popMatrix(gl.PROJECTION);
			gl.popMatrix(gl.MODELVIEW);
		}
		glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
		
		glPopAttrib();
	}
// @param[in] isStereo		Whether scene is in stereo (widens near/far planes to fit both eyes)
void Lens::frustum(Frustumd& f, const Pose& p, double aspect) const {//, bool isStereo) const {

	Vec3d ur, uu, uf;
	p.directionVectors(ur, uu, uf);
	const Vec3d& pos = p.pos();

	double nh = heightAtDepth(near());
	double fh = heightAtDepth(far());

	double nw = nh * aspect;
	double fw = fh * aspect;

//	// This effectively creates a union between the near/far planes of the
//	// left and right eyes. The offsets are computed by using the law
//	// of similar triangles.
//	if(isStereo){
//		nw += fabs(0.5*eyeSep()*(focalLength()-near())/focalLength());
//		fw += fabs(0.5*eyeSep()*(focalLength()- far())/focalLength());
//	}

	Vec3d nc = pos + uf * near();	// center point of near plane
	Vec3d fc = pos + uf * far();	// center point of far plane

	f.ntl = nc + uu * nh - ur * nw;
	f.ntr = nc + uu * nh + ur * nw;
	f.nbl = nc - uu * nh - ur * nw;
	f.nbr = nc - uu * nh + ur * nw;

	f.ftl = fc + uu * fh - ur * fw;
	f.ftr = fc + uu * fh + ur * fw;
	f.fbl = fc - uu * fh - ur * fw;
	f.fbr = fc - uu * fh + ur * fw;

	f.computePlanes();
}
	/// Get a linear-interpolated Pose between this and another
	// (useful ingredient for smooth animations, estimations, etc.)
	Pose lerp(Pose& target, double amt) const {
		Pose r(*this);
		r.pos().lerp(target.pos(), amt);
		r.quat().slerpTo(target.quat(), amt);
		return r;
	}
void VRSpatialCollisionManager::addQuad(float width, float height, const Pose& p, int objID) {
    Vec3d pos = p.pos() + p.up()*height*0.5;
    VRGeoData data; data.pushQuad();
    data.pushQuad(pos, p.dir(), p.up(), Vec2d(width, height), true);
    add(data.asGeometry("tmp"), objID);
}
Example #5
0
whycon::LocalizationSystem::Pose whycon::LocalizationSystem::get_pose(const whycon::CircleDetector::Circle& circle) const {
  Pose result;
  double x,y,x1,x2,y1,y2,sx1,sx2,sy1,sy2,major,minor,v0,v1;
  
  //transform the center
	transform(circle.x,circle.y, x, y);
  
  //calculate the major axis 
	//endpoints in image coords
	sx1 = circle.x + circle.v0 * circle.m0 * 2;
	sx2 = circle.x - circle.v0 * circle.m0 * 2;
	sy1 = circle.y + circle.v1 * circle.m0 * 2;
	sy2 = circle.y - circle.v1 * circle.m0 * 2;

  //endpoints in camera coords 
	transform(sx1, sy1, x1, y1);
	transform(sx2, sy2, x2, y2);

  //semiaxis length 
	major = sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2))/2.0;
  
	v0 = (x2-x1)/major/2.0;
	v1 = (y2-y1)/major/2.0;

	//calculate the minor axis 
	//endpoints in image coords
	sx1 = circle.x + circle.v1 * circle.m1 * 2;
	sx2 = circle.x - circle.v1 * circle.m1 * 2;
	sy1 = circle.y - circle.v0 * circle.m1 * 2;
	sy2 = circle.y + circle.v0 * circle.m1 * 2;
  
	//endpoints in camera coords 
	transform(sx1, sy1, x1, y1);
	transform(sx2, sy2, x2, y2);

	//semiaxis length 
	minor = sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2))/2.0;

	//construct the conic
	double a,b,c,d,e,f;
	a = v0*v0/(major*major)+v1*v1/(minor*minor);
	b = v0*v1*(1/(major*major)-1/(minor*minor));
	c = v0*v0/(minor*minor)+v1*v1/(major*major);
	d = (-x*a-b*y);
	e = (-y*c-b*x);
	f = (a*x*x+c*y*y+2*b*x*y-1);
	cv::Matx33d data(a,b,d,
									 b,c,e,
									 d,e,f);

	// compute conic eigenvalues and eigenvectors
	cv::Vec3d eigenvalues;
	cv::Matx33d eigenvectors;
	cv::eigen(data, eigenvalues, eigenvectors);

	// compute ellipse parameters in real-world
	double L1 = eigenvalues(1);
	double L2 = eigenvalues(0);
	double L3 = eigenvalues(2);
	int V2 = 0;
	int V3 = 2;

	// position
	double z = circle_diameter/sqrt(-L2*L3)/2.0;
	cv::Matx13d position_mat = L3 * sqrt((L2 - L1) / (L2 - L3)) * eigenvectors.row(V2) + L2 * sqrt((L1 - L3) / (L2 - L3)) * eigenvectors.row(V3);
	result.pos = cv::Vec3f(position_mat(0), position_mat(1), position_mat(2));
	int S3 = (result.pos(2) * z < 0 ? -1 : 1);
	result.pos *= S3 * z;

	/*float dist = sqrt(L1 * L1 * L1) * circle_diameter * 0.5;
	std::cout << "d1 " << dist << " " << cv::norm(result.pos) << std::endl;*/


	WHYCON_DEBUG("ellipse center: " << x << "," << y << " " << " computed position: " << result.pos << " " << result.pos / result.pos(2));

	// rotation
	cv::Matx13d normal_mat = sqrt((L2 - L1) / (L2 - L3)) * eigenvectors.row(V2) + sqrt((L1 - L3) / (L2 - L3)) * eigenvectors.row(V3);
	cv::normalize(cv::Vec3f(normal_mat(0), normal_mat(1), normal_mat(2)), result.rot, 1, cv::NORM_L2SQR);
	result.rot(0) = atan2(result.rot(1), result.rot(0));
	result.rot(1) = acos(result.rot(2));
	result.rot(2) = 0; /* not recoverable */
	/* TODO: to be checked */

	/*cv::Matx33d data_inv;
	cv::invert(data, data_inv);
	cv::Matx31d projection = data_inv * normal_mat.t();
	std::cout << "det " << cv::determinant(data_inv) << std::endl;
	//cv::Vec3f new_center = cv::normalize(cv::Vec3f(projection(0), projection(1), projection(2))) * dist;
	cv::Vec3f new_center = cv::Vec3f(projection(0) / projection(2), projection(1) / projection(2), 1) * dist;
	std::cout << "center: " << new_center(0) << "," << new_center(1) << "," << new_center(2) << " vs " << result.pos << std::endl;
	std::cout << "normalized: " << cv::normalize(new_center) << " " << cv::normalize(result.pos) << std::endl;*/

  return result;
}