示例#1
0
MyGlWindow::MyGlWindow(int x, int y, int w, int h) :
  Fl_Gl_Window(x,y,w,h)
//==========================================================================
{
    
    mode( FL_RGB|FL_ALPHA|FL_DOUBLE | FL_STENCIL );
    
    fieldOfView = 45;

    MathVec3D<double> viewPoint( DEFAULT_VIEW_POINT );
    MathVec3D<double> viewCenter( DEFAULT_VIEW_CENTER );
    MathVec3D<double> upVector( DEFAULT_UP_VECTOR );
    double aspect = ( w /  h);
    m_viewer = new Viewer( viewPoint, viewCenter, upVector, 45, aspect );
    
    m_bvh = new BVH();
    m_particleList.resize(1000);
    for_each (m_particleList.begin(), m_particleList.end(), [] (Particle& p) {
        p.velocity  = Vec3f(0, 0, 0);
        p.color     = Vec3f(0.6, 0.6, 0);
        p.velocity  = Vec3f(0, 0, 0);
        p.timeAlive = 0;
        p.lifespan  = 5;
    });
    m_range = 0;
    m_direction = 0;
}
示例#2
0
void Renderer::origin()
{
    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    projection();
    viewPoint();
    // glEnable(GL_CULL_FACE);
}
示例#3
0
	//------------------------------------------------------------------------------//
	void GeoTerrainSection::_calcCurrentLod(Camera* cam)
	{
		Vector2 corner0(mSectionBound.getMinimum().x, mSectionBound.getMinimum().z);
		Vector2 corner1(mSectionBound.getMinimum().x, mSectionBound.getMaximum().z);
		Vector2 corner2(mSectionBound.getMaximum().x, mSectionBound.getMaximum().z);
		Vector2 corner3(mSectionBound.getMaximum().x, mSectionBound.getMinimum().z);

		Vector2 viewPoint(cam->getPosition().x, cam->getPosition().z);

		// compute view distance to our 4 corners
		float distance0 = viewPoint.distance(corner0);
		float distance1 = viewPoint.distance(corner1);
		float distance2 = viewPoint.distance(corner2);
		float distance3 = viewPoint.distance(corner3);

		// compute min distance as the test value
		float dist = minimum(distance0, distance1);
		dist = minimum(dist, distance2);
		dist = minimum(dist, distance3);

		// make sure the minimum distance is non-zero
		dist = maximum(dist, 0.0001f);

		// find the lowest lod which will suffice
		mCurrentLod = 0;
		bool finished = false;

		float fScale = mCreator->getLodErrorScale();
		float fLimit = mCreator->getRatioLimit();

		while (!finished)
		{
			// find the ratio of variance over distance
			float variance = mErrorMetrics[mCurrentLod];
			float vRatio = (variance * fScale) / dist;

			// if we exceed the ratio limit, move to the next lod
			if (vRatio > fLimit
				&& mCurrentLod + 1 < TOTAL_DETAIL_LEVELS)
			{
				++mCurrentLod;
			}
			else
			{
				finished=true;
			}
		}
	}
示例#4
0
void	ossimGui::ImageScrollWidget::wheelEvent ( QWheelEvent * e )
{
   //QScrollArea::wheelEvent(e);
   if(!m_inputBounds.hasNans())
   {
      ossimIpt origin = m_inputBounds.ul();
      ossimIpt localPoint(m_scrollOrigin.x +e->x(), m_scrollOrigin.y+e->y());
      ossimIpt viewPoint(localPoint.x+origin.x,
                         localPoint.y+origin.y);
      
      ossimDrect rect = viewportBoundsInViewSpace();
      
      
      emit wheel(e,  rect, viewPoint);//viewportPoint, localPoint, viewPoint);
   }      
}
示例#5
0
void	ossimGui::ImageScrollWidget::mousePressEvent ( QMouseEvent * e )
{
   QScrollArea::mousePressEvent(e);
   
   m_activePointStart = e->pos();
   m_activePointEnd = e->pos();
   if(!m_inputBounds.hasNans())
   {
      ossimIpt origin = m_inputBounds.ul();
      ossimIpt localPoint(m_scrollOrigin.x +e->x(), m_scrollOrigin.y+e->y());
      ossimIpt viewPoint(localPoint.x+origin.x,
                         localPoint.y+origin.y);
      
      ossimDrect rect = viewportBoundsInViewSpace();
      
      // Save the measured point position
      //  (viewPoint = view<-scroll<-local)
      ossim_uint32 idxLayer = 0;
      ossimImageSource* src = m_layers->layer(idxLayer)->chain();
      ossimGui::GatherImageViewProjTransVisitor visitor;
      src->accept(visitor);
      if (visitor.getTransformList().size() == 1)
      {
         // Transform to true image coordinates and save
         ossimRefPtr<IvtGeomTransform> ivtg = visitor.getTransformList()[0].get();
         if (ivtg.valid())
         {
            ivtg->viewToImage(viewPoint, m_measImgPoint);
         }
      }
      
      m_measPoint = viewPoint;
      m_drawPts = true;
      update();

      // m_regOverlay->setMeasPoint(m_measPoint);

// cout << "\n ImageScrollWidget::mousePressEvent ("
//    << viewPoint.x << ", "<< viewPoint.y << ") ("
//    << m_measImgPoint.x << ", "<< m_measImgPoint.y << ")"
//    << endl;

      emit mousePress(e,  rect, viewPoint);//viewportPoint, localPoint, viewPoint);
   }
}
示例#6
0
void	ossimGui::ImageScrollWidget::mouseReleaseEvent ( QMouseEvent * e )
{
   QScrollArea::mouseReleaseEvent(e);
   
   m_activePointEnd = e->pos();
   
   if(!m_inputBounds.hasNans())
   {
      ossimIpt origin = m_inputBounds.ul();
      ossimIpt localPoint(m_scrollOrigin.x +e->x(), m_scrollOrigin.y+e->y());
      ossimIpt viewPoint(localPoint.x+origin.x,
                         localPoint.y+origin.y);
      
      ossimDrect rect = viewportBoundsInViewSpace();
      
      
      emit mouseRelease(e,  rect, viewPoint);//viewportPoint, localPoint, viewPoint);
  }
}
示例#7
0
void	ossimGui::ImageScrollWidget::mouseMoveEvent ( QMouseEvent * e )
{
   QScrollArea::mouseMoveEvent(e);
   
   if(e->buttons() & Qt::LeftButton)
   {
      m_activePointEnd = e->pos();
      if(m_layers->numberOfLayers() > 1)
      {
         m_widget->update();
      }
   }
   if(!m_inputBounds.hasNans())
   {
      ossimIpt origin = m_inputBounds.ul();
      ossimIpt localPoint(m_scrollOrigin.x +e->x(), m_scrollOrigin.y+e->y());
      ossimIpt viewPoint(localPoint.x+origin.x,
                         localPoint.y+origin.y);
      
      ossimDrect rect = viewportBoundsInViewSpace();
      
      emit mouseMove(e,  rect, viewPoint);//viewportPoint, localPoint, viewPoint);
   }
}
示例#8
0
void CameraThread::postUpdate(SensorData * dataPtr, CameraInfo * info) const
{
	UASSERT(dataPtr!=0);
	SensorData & data = *dataPtr;
	if(_colorOnly && !data.depthRaw().empty())
	{
		data.setDepthOrRightRaw(cv::Mat());
	}

	if(_distortionModel && !data.depthRaw().empty())
	{
		UTimer timer;
		if(_distortionModel->getWidth() == data.depthRaw().cols &&
		   _distortionModel->getHeight() == data.depthRaw().rows	)
		{
			cv::Mat depth = data.depthRaw().clone();// make sure we are not modifying data in cached signatures.
			_distortionModel->undistort(depth);
			data.setDepthOrRightRaw(depth);
		}
		else
		{
			UERROR("Distortion model size is %dx%d but dpeth image is %dx%d!",
					_distortionModel->getWidth(), _distortionModel->getHeight(),
					data.depthRaw().cols, data.depthRaw().rows);
		}
		if(info) info->timeUndistortDepth = timer.ticks();
	}

	if(_bilateralFiltering && !data.depthRaw().empty())
	{
		UTimer timer;
		data.setDepthOrRightRaw(util2d::fastBilateralFiltering(data.depthRaw(), _bilateralSigmaS, _bilateralSigmaR));
		if(info) info->timeBilateralFiltering = timer.ticks();
	}

	if(_imageDecimation>1 && !data.imageRaw().empty())
	{
		UDEBUG("");
		UTimer timer;
		if(!data.depthRaw().empty() &&
		   !(data.depthRaw().rows % _imageDecimation == 0 && data.depthRaw().cols % _imageDecimation == 0))
		{
			UERROR("Decimation of depth images should be exact (decimation=%d, size=(%d,%d))! "
				   "Images won't be resized.", _imageDecimation, data.depthRaw().cols, data.depthRaw().rows);
		}
		else
		{
			data.setImageRaw(util2d::decimate(data.imageRaw(), _imageDecimation));
			data.setDepthOrRightRaw(util2d::decimate(data.depthOrRightRaw(), _imageDecimation));
			std::vector<CameraModel> models = data.cameraModels();
			for(unsigned int i=0; i<models.size(); ++i)
			{
				if(models[i].isValidForProjection())
				{
					models[i] = models[i].scaled(1.0/double(_imageDecimation));
				}
			}
			data.setCameraModels(models);
			StereoCameraModel stereoModel = data.stereoCameraModel();
			if(stereoModel.isValidForProjection())
			{
				stereoModel.scale(1.0/double(_imageDecimation));
				data.setStereoCameraModel(stereoModel);
			}
		}
		if(info) info->timeImageDecimation = timer.ticks();
	}
	if(_mirroring && !data.imageRaw().empty() && data.cameraModels().size() == 1)
	{
		UDEBUG("");
		UTimer timer;
		cv::Mat tmpRgb;
		cv::flip(data.imageRaw(), tmpRgb, 1);
		data.setImageRaw(tmpRgb);
		UASSERT_MSG(data.cameraModels().size() <= 1 && !data.stereoCameraModel().isValidForProjection(), "Only single RGBD cameras are supported for mirroring.");
		if(data.cameraModels().size() && data.cameraModels()[0].cx())
		{
			CameraModel tmpModel(
					data.cameraModels()[0].fx(),
					data.cameraModels()[0].fy(),
					float(data.imageRaw().cols) - data.cameraModels()[0].cx(),
					data.cameraModels()[0].cy(),
					data.cameraModels()[0].localTransform());
			data.setCameraModel(tmpModel);
		}
		if(!data.depthRaw().empty())
		{
			cv::Mat tmpDepth;
			cv::flip(data.depthRaw(), tmpDepth, 1);
			data.setDepthOrRightRaw(tmpDepth);
		}
		if(info) info->timeMirroring = timer.ticks();
	}
	if(_stereoToDepth && !data.imageRaw().empty() && data.stereoCameraModel().isValidForProjection() && !data.rightRaw().empty())
	{
		UDEBUG("");
		UTimer timer;
		cv::Mat depth = util2d::depthFromDisparity(
				_stereoDense->computeDisparity(data.imageRaw(), data.rightRaw()),
				data.stereoCameraModel().left().fx(),
				data.stereoCameraModel().baseline());
		// set Tx for stereo bundle adjustment (when used)
		CameraModel model = CameraModel(
				data.stereoCameraModel().left().fx(),
				data.stereoCameraModel().left().fy(),
				data.stereoCameraModel().left().cx(),
				data.stereoCameraModel().left().cy(),
				data.stereoCameraModel().localTransform(),
				-data.stereoCameraModel().baseline()*data.stereoCameraModel().left().fx());
		data.setCameraModel(model);
		data.setDepthOrRightRaw(depth);
		data.setStereoCameraModel(StereoCameraModel());
		if(info) info->timeDisparity = timer.ticks();
	}
	if(_scanFromDepth &&
		data.cameraModels().size() &&
		data.cameraModels().at(0).isValidForProjection() &&
		!data.depthRaw().empty())
	{
		UDEBUG("");
		if(data.laserScanRaw().empty())
		{
			UASSERT(_scanDecimation >= 1);
			UTimer timer;
			pcl::IndicesPtr validIndices(new std::vector<int>);
			pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = util3d::cloudFromSensorData(
					data,
					_scanDecimation,
					_scanMaxDepth,
					_scanMinDepth,
					validIndices.get());
			float maxPoints = (data.depthRaw().rows/_scanDecimation)*(data.depthRaw().cols/_scanDecimation);
			cv::Mat scan;
			const Transform & baseToScan = data.cameraModels()[0].localTransform();
			if(validIndices->size())
			{
				if(_scanVoxelSize>0.0f)
				{
					cloud = util3d::voxelize(cloud, validIndices, _scanVoxelSize);
					float ratio = float(cloud->size()) / float(validIndices->size());
					maxPoints = ratio * maxPoints;
				}
				else if(!cloud->is_dense)
				{
					pcl::PointCloud<pcl::PointXYZ>::Ptr denseCloud(new pcl::PointCloud<pcl::PointXYZ>);
					pcl::copyPointCloud(*cloud, *validIndices, *denseCloud);
					cloud = denseCloud;
				}

				if(cloud->size())
				{
					if(_scanNormalsK>0)
					{
						Eigen::Vector3f viewPoint(baseToScan.x(), baseToScan.y(), baseToScan.z());
						pcl::PointCloud<pcl::Normal>::Ptr normals = util3d::computeNormals(cloud, _scanNormalsK, viewPoint);
						pcl::PointCloud<pcl::PointNormal>::Ptr cloudNormals(new pcl::PointCloud<pcl::PointNormal>);
						pcl::concatenateFields(*cloud, *normals, *cloudNormals);
						scan = util3d::laserScanFromPointCloud(*cloudNormals, baseToScan.inverse());
					}
					else
					{
						scan = util3d::laserScanFromPointCloud(*cloud, baseToScan.inverse());
					}
				}
			}
			data.setLaserScanRaw(scan, LaserScanInfo((int)maxPoints, _scanMaxDepth, baseToScan));
			if(info) info->timeScanFromDepth = timer.ticks();
		}
		else
		{
			UWARN("Option to create laser scan from depth image is enabled, but "
				  "there is already a laser scan in the captured sensor data. Scan from "
				  "depth will not be created.");
		}
	}
}