예제 #1
0
/* ************************************************************************* */
TEST( SimpleCamera, level2)
{
  // Create a level camera, looking in Y-direction
  Pose2 pose2(0.4,0.3,M_PI/2.0);
  SimpleCamera camera = SimpleCamera::Level(K, pose2, 0.1);

  // expected
  Point3 x(1,0,0),y(0,0,-1),z(0,1,0);
  Rot3 wRc(x,y,z);
  Pose3 expected(wRc,Point3(0.4,0.3,0.1));
  CHECK(assert_equal( camera.pose(), expected));
}
예제 #2
0
/* ************************************************************************* */
TEST( SimpleCamera, lookat)
{
  // Create a level camera, looking in Y-direction
  Point3 C(10.0,0.0,0.0);
  SimpleCamera camera = SimpleCamera::Lookat(C, Point3(), Point3(0.0,0.0,1.0));

  // expected
  Point3 xc(0,1,0),yc(0,0,-1),zc(-1,0,0);
  Pose3 expected(Rot3(xc,yc,zc),C);
  CHECK(assert_equal( camera.pose(), expected));

  Point3 C2(30.0,0.0,10.0);
  SimpleCamera camera2 = SimpleCamera::Lookat(C2, Point3(), Point3(0.0,0.0,1.0));

  Matrix R = camera2.pose().rotation().matrix();
  Matrix I = trans(R)*R;
  CHECK(assert_equal(I, eye(3)));
}
예제 #3
0
/* ************************************************************************* */
TEST( InvDepthFactor, optimize) {

  // landmark 5 meters infront of camera
  Point3 landmark(5, 0, 1);

  Point2 expected_uv = level_camera.project(landmark);

  InvDepthCamera3<Cal3_S2> inv_camera(level_pose, K);
  LieVector inv_landmark(5, 1., 0., 1., 0., 0.);
  LieScalar inv_depth(1./4);

  gtsam::NonlinearFactorGraph graph;
  Values initial;

  InverseDepthFactor::shared_ptr factor(new InverseDepthFactor(expected_uv, sigma,
      Symbol('x',1), Symbol('l',1), Symbol('d',1), K));
  graph.push_back(factor);
  graph.add(PoseConstraint(Symbol('x',1),level_pose));
  initial.insert(Symbol('x',1), level_pose);
  initial.insert(Symbol('l',1), inv_landmark);
  initial.insert(Symbol('d',1), inv_depth);

  LevenbergMarquardtParams lmParams;
  Values result = LevenbergMarquardtOptimizer(graph, initial, lmParams).optimize();
  EXPECT(assert_equal(initial, result, 1e-9));

  /// Add a second camera

  // add a camera 1 meter to the right
  Pose3 right_pose = level_pose * Pose3(Rot3(), Point3(2,0,0));
  SimpleCamera right_camera(right_pose, *K);

  InvDepthCamera3<Cal3_S2> right_inv_camera(right_pose, K);

  Point3 landmark1(6,0,1);
  Point2 right_uv = right_camera.project(landmark1);


  InverseDepthFactor::shared_ptr factor1(new InverseDepthFactor(right_uv, sigma,
      Symbol('x',2), Symbol('l',1),Symbol('d',1),K));
  graph.push_back(factor1);

  graph.add(PoseConstraint(Symbol('x',2),right_pose));

  initial.insert(Symbol('x',2), right_pose);

  // TODO: need to add priors to make this work with
//    Values result2 = optimize<NonlinearFactorGraph>(graph, initial,
//      NonlinearOptimizationParameters(),MULTIFRONTAL, GN);

  Values result2 = LevenbergMarquardtOptimizer(graph, initial, lmParams).optimize();
  Point3 l1_result2 = InvDepthCamera3<Cal3_S2>::invDepthTo3D(
      result2.at<LieVector>(Symbol('l',1)),
      result2.at<LieScalar>(Symbol('d',1)));

  EXPECT(assert_equal(landmark1, l1_result2, 1e-9));
}
예제 #4
0
bool RobustViconTracker::poses_are_similar(const SimpleCamera& cam1, const SimpleCamera& cam2, double distanceThreshold, double angleThreshold)
{
  // Calculate the distance between the positions of the two cameras, and the angles between their corresponding camera axes.
  double distance = (cam1.p() - cam2.p()).norm();
  double nAngle = angle_between(cam1.n(), cam2.n());
  double uAngle = angle_between(cam1.u(), cam2.u());
  double vAngle = angle_between(cam1.v(), cam2.v());

  // Check the similarity of the camera poses by comparing the aforementioned distance and angles to suitable thresholds.
  return distance < distanceThreshold && nAngle < angleThreshold && uAngle < angleThreshold && vAngle < angleThreshold;
}
/* ************************************************************************* */
TEST( InvDepthFactor, Project4) {

  // landmark 4m to the left and 1m up from camera
  // inv depth landmark xyz at origion
  Point3 landmark(1, 4, 2);
  Point2 expected = level_camera.project(landmark);

  InvDepthCamera3<Cal3_S2> inv_camera(level_pose, K);
  Vector5 diag_landmark((Vector(5) << 0., 0., 0., atan(4.0/1), atan(2./sqrt(1.+16.))).finished());
  double inv_depth(1./sqrt(1.+16.+4.));
  Point2 actual = inv_camera.project(diag_landmark, inv_depth);
  EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST( InvDepthFactor, Project2) {

  // landmark 1m to the left and 1m up from camera
  // inv landmark xyz is same as camera xyz, so depth  actually doesn't matter
  Point3 landmark(1, 1, 2);
  Point2 expected = level_camera.project(landmark);

  InvDepthCamera3<Cal3_S2> inv_camera(level_pose, K);
  Vector5 diag_landmark((Vector(5) << 0., 0., 1., M_PI/4., atan(1.0/sqrt(2.0))).finished());
  double inv_depth(1/sqrt(3.0));
  Point2 actual = inv_camera.project(diag_landmark, inv_depth);
  EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST( InvDepthFactor, Project1) {

  // landmark 5 meters infront of camera
  Point3 landmark(5, 0, 1);

  Point2 expected_uv = level_camera.project(landmark);

  InvDepthCamera3<Cal3_S2> inv_camera(level_pose, K);
  Vector5 inv_landmark((Vector(5) << 1., 0., 1., 0., 0.).finished());
  double inv_depth(1./4);
  Point2 actual_uv = inv_camera.project(inv_landmark, inv_depth);
  EXPECT(assert_equal(expected_uv, actual_uv,1e-8));
  EXPECT(assert_equal(Point2(640,480), actual_uv));
}
	virtual void resetCamera(float camDist, float pitch, float yaw, float camPosX,float camPosY, float camPosZ)
	{
		m_camera.setCameraDistance(camDist);
		m_camera.setCameraPitch(pitch);
		m_camera.setCameraYaw(yaw);
		m_camera.setCameraTargetPosition(camPosX,camPosY,camPosZ);
		m_camera.setAspectRatio((float)m_swWidth/(float)m_swHeight);
		m_camera.update();

	}
예제 #9
0
#include <boost/assign.hpp>
#include <boost/assign/std/vector.hpp>

using namespace std;
using namespace gtsam;
using namespace boost::assign;

// Some common constants
static const boost::shared_ptr<Cal3_S2> sharedCal = //
    boost::make_shared<Cal3_S2>(1500, 1200, 0, 640, 480);

// Looking along X-axis, 1 meter above ground plane (x-y)
static const Rot3 upright = Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2);
static const Pose3 pose1 = Pose3(upright, gtsam::Point3(0, 0, 1));
SimpleCamera camera1(pose1, *sharedCal);

// landmark ~5 meters infront of camera
static const Point3 landmark(5, 0.5, 1.2);

// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);

//******************************************************************************
TEST( triangulation, TriangulationFactor ) {

  Key pointKey(1);
  SharedNoiseModel model;
  typedef TriangulationFactor<SimpleCamera> Factor;
  Factor factor(camera1, z1, model, pointKey);
	virtual void setUpAxis(int axis)
	{
		m_upAxis = axis;
		m_camera.setCameraUpAxis(axis);
		m_camera.update();
	}
	virtual void render(const btDiscreteDynamicsWorld* rbWorld) 
	{
		//clear the color buffer
		TGAColor clearColor;
		clearColor.bgra[0] = 255;
		clearColor.bgra[1] = 255;
		clearColor.bgra[2] = 255;
		clearColor.bgra[3] = 255;
		
		clearBuffers(clearColor);
	
		
		ATTRIBUTE_ALIGNED16(btScalar modelMat[16]);
		ATTRIBUTE_ALIGNED16(float viewMat[16]);
		ATTRIBUTE_ALIGNED16(float projMat[16]);

		m_camera.getCameraProjectionMatrix(projMat);
		m_camera.getCameraViewMatrix(viewMat);
		
		btVector3 lightDirWorld(-5,200,-40);
		switch (m_upAxis)
		{
		case 1:
    			lightDirWorld = btVector3(-50.f,100,30);
    		break;
		case 2:
				lightDirWorld = btVector3(-50.f,30,100);
				break;
		default:{}
		};
		
		lightDirWorld.normalize();
		
		
		for (int i=0;i<rbWorld->getNumCollisionObjects();i++)
		{
			btCollisionObject* colObj = rbWorld->getCollisionObjectArray()[i];
			int colObjIndex = colObj->getUserIndex();
			int shapeIndex = colObj->getCollisionShape()->getUserIndex();
			if (colObjIndex>=0 && shapeIndex>=0)
			{
				
				TinyRenderObjectData* renderObj = 0;
				
				int* cptr = m_swInstances[colObjIndex];
				if (cptr)
				{
					int c = *cptr;
					TinyRenderObjectData** sptr = m_swRenderObjects[c];
					if (sptr)
					{
						renderObj = *sptr;
						//sync the object transform
						const btTransform& tr = colObj->getWorldTransform();
						tr.getOpenGLMatrix(modelMat);
				
						for (int i=0;i<4;i++)
						{
							for (int j=0;j<4;j++)
							{
								
								renderObj->m_projectionMatrix[i][j] = projMat[i+4*j];
								renderObj->m_modelMatrix[i][j] = modelMat[i+4*j];
								renderObj->m_viewMatrix[i][j] = viewMat[i+4*j];
								renderObj->m_localScaling = colObj->getCollisionShape()->getLocalScaling();
								renderObj->m_lightDirWorld = lightDirWorld;
							}
						}
						TinyRenderer::renderObject(*renderObj);
					}
				}
			}
		}
		
		
		static int counter=0;
		counter++;
		if ((counter&7)==0)
		{
			
			char filename[1024];
			sprintf(filename,"framebuf%d.tga",counter);
			m_rgbColorBuffer.flip_vertically();
			getFrameBuffer().write_tga_file(filename,true);
		}
		float color[4] = {1,1,1,1};
		
	}
bool SceneParser::addCamera(struct basicxmlnode * cameraNode, Scene * scene){
	if (!cameraNode) {
		std::cout << "SceneParser::addCamera: empty camera node\n";
		return false;
	}

	if (std::string(cameraNode->tag) != "Camera") {
		return false;
	}

	// read resolution (mandatory attributes!)
	int resolutionX;
	int resolutionY;
	char * attributeValue;
	if (attributeValue = getattributevaluebyname(cameraNode, "resolutionX")) {
		if (!stringToNumber<int>(resolutionX, attributeValue)) {
			return false;
		}
	}
	else {
		std::cerr << "SceneParser::addCamera: no resolution provided!" << "\n";
		return false;
	}
	if (attributeValue = getattributevaluebyname(cameraNode, "resolutionY")) {
		if (!stringToNumber<int>(resolutionY, attributeValue)) {
			return false;
		}
	}
	else {
		std::cerr << "SceneParser::addCamera: no resolution provided!" << "\n";
		return false;
	}

	// instanciate camera
	SimpleCamera* camera = new SimpleCamera(resolutionX, resolutionY);
	Vector3 position;
	Vector3 direction;
	Vector3 up;
	float angle = 70;

	// read user specified values
	if (attributeValue = getattributevaluebyname(cameraNode, "position")) {
		if (!stringToVector3<double>(position, attributeValue)) {
			delete(camera);
			return false;
		}
        camera->setPosition(position);
	}

	if (attributeValue = getattributevaluebyname(cameraNode, "direction")) {
		if (!stringToVector3<double>(direction, attributeValue)) {
			delete(camera);
			return false;
		}
		else if (attributeValue = getattributevaluebyname(cameraNode, "up")) {
			if (!stringToVector3<double>(up, attributeValue)) {
				delete(camera);
				return false;
			}
			camera->setOrientation(direction, up);
		}
		else {
			std::cout << "SceneParser::addCamera: direction specified without up\n";
			delete(camera);
			return false;
		}
	}
	else if (attributeValue = getattributevaluebyname(cameraNode, "up")) {
		std::cout << "SceneParser::addCamera: up specified without direction\n";
		delete(camera);
		return false;
	}
	
	if (attributeValue = getattributevaluebyname(cameraNode, "angle")) {
		if (!stringToNumber<float>(angle, attributeValue)) {
			delete(camera);
			return false;
		}
		camera->setOpeningAngle(angle);
	}
	

	// add camera to scene
	scene->setCamera(camera);

	std::cout << "SceneParser::addCamera: added camera\n";

	return true;
}
예제 #13
0
파일: Renderer.cpp 프로젝트: pxf/pxf-tech2
bool calculate_pixel(float x, float y, task_detail_t *task, batch_blob_t *datablob, pixel_data_t *pixel)
{	
	// Clear pixel
	Pxf::Math::Vec3f fpixel(0.0f, 0.0f, 0.0f);
	fpixel.r = 0.0f;
	fpixel.g = 0.0f;
	fpixel.b = 0.0f;
	
	SimpleCamera* cam = (SimpleCamera*) datablob->cam;

	// Center ray
	Vec3f screen_coords(-1.0f + x, 1.0f - y,0.0f);
	ray_t cray;
	cray.o = Vec3f(0.0f,0.0f,1.41421356f);
	cray.o += cam->GetPos();

	Quaternion orientation = (*cam->GetOrientation());
	Normalize(orientation);

	//screen_coords = orientation * screen_coords;
	//Normalize(screen_coords);
	screen_coords += cam->GetPos();

	/*
	if (!calc_multisample_ray(&cray, datablob, &fpixel, 0.001f, 0))
	{
		Pxf::Message("calculate_pixel", "Ray shooting failed!");
		return false;
	}*/

	// find closest primitive
	for(int pixel_x = 0; pixel_x < datablob->samples_per_pixel; ++pixel_x)
	{
		for(int pixel_y = 0; pixel_y < datablob->samples_per_pixel; ++pixel_y)
		{
			// Offset ray
			ray_t ray = cray;
			Vec3f new_screen_coords = screen_coords;
			
			// TODO: Make sampling more non-uniform
			//new_screen_coords.x += (1.0f / ((float)datablob->pic_w * (float)datablob->samples_per_pixel)) * (float)pixel_x;
			//new_screen_coords.y += (1.0f / ((float)datablob->pic_h * (float)datablob->samples_per_pixel)) * (float)pixel_y;
			
			new_screen_coords.x += (0.5f / ((float)datablob->pic_w)) * (datablob->samples[rand() % 255] * 2.0f - 1.0f);
			new_screen_coords.y += (0.5f / ((float)datablob->pic_h)) * (datablob->samples[rand() % 255] * 2.0f - 1.0f);
			ray.d = new_screen_coords - cray.o;
			ray.d = orientation * ray.d;
			Normalize(ray.d);
			
			// Calc direct light
			Pxf::Math::Vec3f light_contrib;
			if (!calc_ray_contrib(&ray, datablob, &light_contrib, 0))
			{
				Pxf::Message("calculate_pixel", "Light calculations failed!");
				return false;
			}
			fpixel += light_contrib;
			
		}
	}
	
	fpixel /= datablob->samples_per_pixel * datablob->samples_per_pixel;
	
	fpixel.x = Pxf::Math::Clamp(fpixel.x, 0.0f, 1.0f);
	fpixel.y = Pxf::Math::Clamp(fpixel.y, 0.0f, 1.0f);
	fpixel.z = Pxf::Math::Clamp(fpixel.z, 0.0f, 1.0f);
		
	pixel->r = (unsigned char)(fpixel.r * 255.0f);
	pixel->g = (unsigned char)(fpixel.g * 255.0f);
	pixel->b = (unsigned char)(fpixel.b * 255.0f);	
	
	return true;
}