Example #1
0
// ========  GLOBAL CALLBACK FUNCTIONS ========
void depth_cb(freenect_device *dev, void *v_depth, uint32_t timestamp)
{
	const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);

	uint16_t *depth = reinterpret_cast<uint16_t *>(v_depth);

	CKinect *obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));

	// Update of the timestamps at the end:
	CObservation3DRangeScan &obs = obj->internal_latest_obs();
	mrpt::synch::CCriticalSectionLocker lock( &obj->internal_latest_obs_cs() );

	obs.hasRangeImage  = true;
	obs.range_is_depth = true;

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.enter("depth_cb alloc");
#endif

	// This method will try to exploit memory pooling if possible:
	obs.rangeImage_setSize(frMode.height,frMode.width);

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.leave("depth_cb alloc");
#endif

	const CKinect::TDepth2RangeArray &r2m = obj->getRawDepth2RangeConversion();
	for (int r=0;r<frMode.height;r++)
		for (int c=0;c<frMode.width;c++)
		{
			// For now, quickly save the depth as it comes from the sensor, it'll
			//  transformed later on in getNextObservation()
			const uint16_t v = *depth++;
			obs.rangeImage.coeffRef(r,c) = r2m[v & KINECT_RANGES_TABLE_MASK];
		}
	obj->internal_tim_latest_depth() = timestamp;
}
Example #2
0
void rgb_cb(freenect_device *dev, void *img_data, uint32_t timestamp)
{
	CKinect *obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));
	const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);

	// Update of the timestamps at the end:
	CObservation3DRangeScan &obs = obj->internal_latest_obs();
	mrpt::synch::CCriticalSectionLocker lock( &obj->internal_latest_obs_cs() );

#ifdef KINECT_PROFILE_MEM_ALLOC
	alloc_tim.enter("depth_rgb loadFromMemoryBuffer");
#endif

	obs.hasIntensityImage = true;
	if (obj->getVideoChannel()==CKinect::VIDEO_CHANNEL_RGB)
	{
	     // Color image: We asked for Bayer data, so we can decode it outselves here
	     //  and avoid having to reorder Green<->Red channels, as would be needed with
	     //  the RGB image from freenect.
          obs.intensityImageChannel = mrpt::slam::CObservation3DRangeScan::CH_VISIBLE;
          obs.intensityImage.resize(frMode.width, frMode.height, CH_RGB, true /* origin=top-left */ );

#if MRPT_HAS_OPENCV
#	if MRPT_OPENCV_VERSION_NUM<0x200
		  // Version for VERY OLD OpenCV versions:
		  IplImage *src_img_bayer = cvCreateImageHeader(cvSize(frMode.width,frMode.height),8,1);
		  src_img_bayer->imageDataOrigin = reinterpret_cast<char*>(img_data);
		  src_img_bayer->imageData = src_img_bayer->imageDataOrigin;
		  src_img_bayer->widthStep = frMode.width;

		  IplImage *dst_img_RGB = obs.intensityImage.getAs<IplImage>();

          // Decode Bayer image:
		  cvCvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);

#	else
		  // Version for modern OpenCV:
          const cv::Mat  src_img_bayer( frMode.height, frMode.width, CV_8UC1, img_data, frMode.width );

          cv::Mat        dst_img_RGB= cv::cvarrToMat( obs.intensityImage.getAs<IplImage>(), false /* dont copy buffers */ );

          // Decode Bayer image:
          cv::cvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);
#	endif
#else
     THROW_EXCEPTION("Need building with OpenCV!")
#endif

	}
Example #3
0
/*---------------------------------------------------------------
						render
 ---------------------------------------------------------------*/
void  COpenGLViewport::render( const int render_width, const int render_height  ) const
{
#if MRPT_HAS_OPENGL_GLUT
	const CRenderizable *it = NULL; // Declared here for usage in the "catch"
	try
	{
		// Change viewport:
		// -------------------------------------------
		const GLint vx = m_view_x>1 ?  GLint(m_view_x) : ( m_view_x<0 ? GLint(render_width+m_view_x)  : GLint( render_width  * m_view_x ) );
		const GLint vy = m_view_y>1 ?  GLint(m_view_y) : ( m_view_y<0 ? GLint(render_height+m_view_y) : GLint( render_height * m_view_y ) );

		GLint vw;
		if (m_view_width>1)  // >1 -> absolute pixels:
			vw = GLint(m_view_width);
		else if (m_view_width<0)
		{  // Negative numbers: Specify the right side coordinates instead of the width:
			if (m_view_width>=-1)
			     vw = GLint( -render_width * m_view_width - vx +1 );
			else vw = GLint( -m_view_width - vx + 1 );
		}
		else  // A factor:
		{
			vw = GLint( render_width * m_view_width );
		}


		GLint vh;
		if (m_view_height>1)  // >1 -> absolute pixels:
			vh = GLint(m_view_height);
		else if (m_view_height<0)
		{  // Negative numbers: Specify the right side coordinates instead of the width:
			if (m_view_height>=-1)
			     vh = GLint( -render_height * m_view_height - vy + 1);
			else vh = GLint( -m_view_height - vy +1 );
		}
		else  // A factor:
			vh = GLint( render_height * m_view_height );

        glViewport(vx,vy, vw, vh );

		// Clear depth&/color buffers:
		// -------------------------------------------
		m_lastProjMat.viewport_width  = vw;
		m_lastProjMat.viewport_height = vh;

        glScissor(vx,vy,vw,vh);

        glEnable(GL_SCISSOR_TEST);
        if ( !m_isTransparent )
        {   // Clear color & depth buffers:
        	// Save?
			GLdouble old_colors[4];
        	if (m_custom_backgb_color)
        	{
        		glGetDoublev(GL_COLOR_CLEAR_VALUE, old_colors );
        		glClearColor(m_background_color.R,m_background_color.G,m_background_color.B,m_background_color.A);
        	}

            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT |  GL_ACCUM_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);

			// Restore old colors:
        	if (m_custom_backgb_color)
        		glClearColor(old_colors[0],old_colors[1],old_colors[2],old_colors[3]);
        }
        else
        {   // Clear depth buffer only:
            glClear(GL_DEPTH_BUFFER_BIT);
        }
        glDisable(GL_SCISSOR_TEST);

        // If we are in "image mode", rendering is much simpler: just set
        //  ortho projection and render the image quad:
        if (m_isImageView)
        {
#if defined(OPENGLVIEWPORT_ENABLE_TIMEPROFILING)
			glv_timlog.enter("COpenGLViewport::render imageview");
#endif
			// "Image mode" rendering:
        	// -----------------------------------
        	if (m_imageview_img) // should be ALWAYS true, but just in case!
        	{
				// Note: The following code is inspired in the implementations:
				//  - libcvd, by Edward Rosten http://www.edwardrosten.com/cvd/
				//  - PTAM, by Klein & Murray http://www.robots.ox.ac.uk/~gk/PTAM/

				const mrpt::utils::CImage *img = m_imageview_img.pointer();

				const int img_w = img->getWidth();
				const int img_h = img->getHeight();

				if (img_w!=0 && img_h!=0)
				{
					// Prepare an ortho projection:
					glMatrixMode(GL_PROJECTION);
					glLoadIdentity();

					// Need to adjust the aspect ratio?
					const double ratio = vw*img_h/double(vh*img_w);
					double ortho_w = img_w;
					double ortho_h = img_h;
					if (ratio>1)
						ortho_w *= ratio;
						else
					if (ratio!=0) ortho_h /=ratio;

					glOrtho(
						-0.5, ortho_h - 0.5,
						ortho_w - 0.5, -0.5,
						-1,1);

					// Prepare raster pos & pixel copy direction in -Y.
					glRasterPos2f(-0.5f,-0.5f);
					glPixelZoom(
						 vw / float(ortho_w),
						-vh / float(ortho_h) );

					// Prepare image data types:
					const GLenum img_type = GL_UNSIGNED_BYTE;
					const int nBytesPerPixel = img->isColor() ? 3 : 1;
					const bool is_RGB_order = (!::strcmp(img->getChannelsOrder(),"RGB"));  // Reverse RGB <-> BGR order?
					const GLenum img_format = nBytesPerPixel==3 ? (is_RGB_order ? GL_RGB : GL_BGR): GL_LUMINANCE;

					// Send image data to OpenGL:
					glPixelStorei(GL_UNPACK_ROW_LENGTH,img->getRowStride()/nBytesPerPixel );
					glDrawPixels(
						img_w, img_h,
						img_format, img_type,
						img->get_unsafe(0,0)
						);
					glPixelStorei(GL_UNPACK_ROW_LENGTH,0);  // Reset
					CRenderizable::checkOpenGLError();
				}
			}
			// done.
#if defined(OPENGLVIEWPORT_ENABLE_TIMEPROFILING)
			glv_timlog.leave("COpenGLViewport::render imageview");
#endif
        }
        else
        {
        	// Non "image mode" rendering:

			// Set camera:
			// -------------------------------------------
			glMatrixMode(GL_PROJECTION);
			glLoadIdentity();

			const CListOpenGLObjects	*objectsToRender;
			COpenGLViewport		*viewForGetCamera;

			if (m_isCloned)
			{	// Clone: render someone's else objects.
				ASSERT_(m_parent.get()!=NULL);

				COpenGLViewportPtr view = m_parent->getViewport( m_clonedViewport );
				if (!view)
					THROW_EXCEPTION_CUSTOM_MSG1("Cloned viewport '%s' not found in parent COpenGLScene",m_clonedViewport.c_str());

				objectsToRender = &view->m_objects;
				viewForGetCamera = m_isClonedCamera ? view.pointer() : const_cast<COpenGLViewport*>(this);
			}
			else
			{	// Normal case: render our own objects:
				objectsToRender = &m_objects;
				viewForGetCamera = const_cast<COpenGLViewport*>(this);
			}

			// Get camera:
			// 1st: if there is a CCamera in the scene:
			CRenderizablePtr cam_ptr = viewForGetCamera->getByClass<CCamera>();

			CCamera *myCamera=NULL;
			if (cam_ptr.present())
			{
				myCamera = getAs<CCamera>(cam_ptr);
			}

			// 2nd: the internal camera of all viewports:
			if (!myCamera)
				myCamera = &viewForGetCamera->m_camera;

			ASSERT_(m_camera.m_distanceZoom>0);


			m_lastProjMat.azimuth = DEG2RAD(myCamera->m_azimuthDeg);
			m_lastProjMat.elev    = DEG2RAD(myCamera->m_elevationDeg);

			m_lastProjMat.eye.x = myCamera->m_pointingX +  max(0.01f,myCamera->m_distanceZoom) * cos(m_lastProjMat.azimuth)*cos(m_lastProjMat.elev);
			m_lastProjMat.eye.y = myCamera->m_pointingY +  max(0.01f,myCamera->m_distanceZoom) * sin(m_lastProjMat.azimuth)*cos(m_lastProjMat.elev);
			m_lastProjMat.eye.z = myCamera->m_pointingZ +  max(0.01f,myCamera->m_distanceZoom) * sin(m_lastProjMat.elev);


			if (fabs(fabs(myCamera->m_elevationDeg)-90)>1e-6)
			{
				m_lastProjMat.up.x=0;
				m_lastProjMat.up.y=0;
				m_lastProjMat.up.z=1;
			}
			else
			{
				float sgn = myCamera->m_elevationDeg>0 ? 1:-1;
				m_lastProjMat.up.x = -cos(DEG2RAD(myCamera->m_azimuthDeg))*sgn;
				m_lastProjMat.up.y = -sin(DEG2RAD(myCamera->m_azimuthDeg))*sgn;
				m_lastProjMat.up.z = 0;
			}

			m_lastProjMat.is_projective = myCamera->m_projectiveModel;
			m_lastProjMat.FOV           = myCamera->m_projectiveFOVdeg;
			m_lastProjMat.pointing.x    = myCamera->m_pointingX;
			m_lastProjMat.pointing.y    = myCamera->m_pointingY;
			m_lastProjMat.pointing.z    = myCamera->m_pointingZ;
			m_lastProjMat.zoom    		= myCamera->m_distanceZoom;

			if (myCamera->m_projectiveModel)
			{
				gluPerspective( myCamera->m_projectiveFOVdeg, vw/double(vh),m_clip_min,m_clip_max);
				CRenderizable::checkOpenGLError();
			}
			else
			{
				const double ratio = vw/double(vh);
				double Ax = myCamera->m_distanceZoom*0.5;
				double Ay = myCamera->m_distanceZoom*0.5;

				if (ratio>1)
					Ax *= ratio;
				else
				{
					if (ratio!=0) Ay /=ratio;
				}

				glOrtho( -Ax,Ax,-Ay,Ay,-0.5*m_clip_max,0.5*m_clip_max);
				CRenderizable::checkOpenGLError();
			}

			if (myCamera->is6DOFMode())
			{
				// In 6DOFMode eye is set viewing towards the direction of the positive Z axis
				// Up is set as Y axis
				mrpt::poses::CPose3D viewDirection,pose,at;
				viewDirection.z(+1);
				pose = myCamera->getPose();
				at = pose + viewDirection;
				gluLookAt(
					pose.x(),
					pose.y(),
					pose.z(),
					at.x(),
					at.y(),
					at.z(),
					pose.getRotationMatrix()(0,1),
					pose.getRotationMatrix()(1,1),
					pose.getRotationMatrix()(2,1)
					);
				CRenderizable::checkOpenGLError();
			}else{
			// This command is common to ortho and perspective:
				gluLookAt(
					m_lastProjMat.eye.x,
					m_lastProjMat.eye.y,
					m_lastProjMat.eye.z,
					m_lastProjMat.pointing.x,
					m_lastProjMat.pointing.y,
					m_lastProjMat.pointing.z,
					m_lastProjMat.up.x,
					m_lastProjMat.up.y,
					m_lastProjMat.up.z);
				CRenderizable::checkOpenGLError();
			}

			// Optional pre-Render user code:
			if (hasSubscribers())
			{
				mrptEventGLPreRender  ev(this);
				this->publishEvent(ev);
			}


			// Global OpenGL settings:
			// ---------------------------------
			glHint(GL_POLYGON_SMOOTH_HINT, m_OpenGL_enablePolygonNicest ? GL_NICEST : GL_FASTEST);

			// Render objects:
			// -------------------------------------------
			glMatrixMode(GL_MODELVIEW);
			glLoadIdentity();

			glEnable(GL_DEPTH_TEST);
			glDepthFunc(GL_LEQUAL); //GL_LESS

			// Setup lights
			// -------------------------------------------
			glEnable( GL_LIGHTING );
			glEnable( GL_COLOR_MATERIAL );
			glColorMaterial( GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
			glShadeModel( GL_SMOOTH );
			glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE);

			for (size_t i=0;i<m_lights.size();i++)
				m_lights[i].sendToOpenGL();

			// Render all the objects:
			// -------------------------------------------
			mrpt::opengl::gl_utils::renderSetOfObjects(*objectsToRender);

        } // end of non "image mode" rendering


		// Finally, draw the border:
		// --------------------------------
		if (m_borderWidth>0)
		{
		    glLineWidth( 2*m_borderWidth );
		    glColor4f(0,0,0,1);
            glDisable(GL_DEPTH_TEST);

            glMatrixMode(GL_MODELVIEW);
            glLoadIdentity();
            glMatrixMode(GL_PROJECTION);
            glLoadIdentity();

			glDisable(GL_LIGHTING);  // Disable lights when drawing lines
            glBegin( GL_LINE_LOOP );
                glVertex2f(-1,-1);
                glVertex2f(-1, 1);
                glVertex2f( 1, 1);
                glVertex2f( 1,-1);
            glEnd();
			glEnable(GL_LIGHTING);  // Disable lights when drawing lines

            glEnable(GL_DEPTH_TEST);
		}

		// Optional post-Render user code:
		if (hasSubscribers())
		{
			mrptEventGLPostRender ev(this);
			this->publishEvent(ev);
		}
	}
	catch(exception &e)
	{
		string		msg;
		if (!it)
				msg = format("Exception while rendering a class '%s'\n%s", it->GetRuntimeClass()->className, e.what() );
		else	msg = format("Exception while rendering:\n%s", e.what() );

		THROW_EXCEPTION(msg);
	}
	catch(...)
	{
		THROW_EXCEPTION("Runtime error!");
	}
#else
	THROW_EXCEPTION("The MRPT has been compiled with MRPT_HAS_OPENGL_GLUT=0! OpenGL functions are not implemented");
#endif
}
Example #4
0
/*---------------------------------------------------------------
							loadTextureInOpenGL
  ---------------------------------------------------------------*/
void CTexturedObject::loadTextureInOpenGL() const
{
#if MRPT_HAS_OPENGL_GLUT
	unsigned char* dataAligned = nullptr;
	vector<unsigned char> data;

#ifdef TEXTUREOBJ_PROFILE_MEM_ALLOC
	static mrpt::utils::CTimeLogger tim;
#endif

	try
	{
		if (m_texture_is_loaded)
		{
			glBindTexture(GL_TEXTURE_2D, m_glTextureName);
			checkOpenGLError();
			return;
		}

		// Reserve the new one --------------------------

		// allocate texture names:
		m_glTextureName = getNewTextureNumber();

		// select our current texture
		glBindTexture(GL_TEXTURE_2D, m_glTextureName);
		checkOpenGLError();

		// when texture area is small, linear interpolation. Default is
		// GL_LINEAR_MIPMAP_NEAREST but we
		// are not building mipmaps.
		//  See also:
		//  http://www.opengl.org/discussion_boards/ubbthreads.php?ubb=showflat&Number=133116&page=1
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
		checkOpenGLError();

		// when texture area is large, NEAREST: this is mainly thinking of
		// rendering
		//  occupancy grid maps, such as we want those "big pixels" to be
		//  clearly visible ;-)
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
		checkOpenGLError();

		// if wrap is true, the texture wraps over at the edges (repeat)
		//       ... false, the texture ends at the edges (clamp)
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
		checkOpenGLError();

		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
		checkOpenGLError();

		// Assure that the images do not overpass the maximum dimensions allowed
		// by OpenGL:
		// ------------------------------------------------------------------------------------
		GLint texSize;
		glGetIntegerv(GL_MAX_TEXTURE_SIZE, &texSize);
		while (m_textureImage.getHeight() > (unsigned int)texSize ||
			   m_textureImage.getWidth() > (unsigned int)texSize)
		{
			m_textureImage = m_textureImage.scaleHalf();
			m_textureImageAlpha = m_textureImageAlpha.scaleHalf();
		}

		const int width = m_textureImage.getWidth();
		const int height = m_textureImage.getHeight();

#ifdef TEXTUREOBJ_PROFILE_MEM_ALLOC
		{
			const std::string sSec = mrpt::format(
				"opengl_texture: load %ix%i %s %stransp", width, height,
				m_textureImage.isColor() ? "RGB" : "BW",
				m_enableTransparency ? "" : "no ");
			tim.enter(sSec.c_str());
		}
#endif

		r_width = width;  // round2up( width );
		r_height = height;  // round2up( height );

		// Padding pixels:
		m_pad_x_right = (r_width - width);
		m_pad_y_bottom = (r_height - height);

		if (m_enableTransparency)
		{
			ASSERT_(!m_textureImageAlpha.isColor());
			ASSERT_(
				m_textureImageAlpha.getWidth() == m_textureImage.getWidth());
			ASSERT_(
				m_textureImageAlpha.getHeight() == m_textureImage.getHeight());
		}

		if (m_textureImage.isColor())
		{
			// Color texture:
			if (m_enableTransparency)
			{
// Color texture WITH trans.
// --------------------------------------
#ifdef TEXTUREOBJ_PROFILE_MEM_ALLOC
				const std::string sSec = mrpt::format(
					"opengl_texture_alloc %ix%i (color,trans)", width, height);
				tim.enter(sSec.c_str());
#endif

				dataAligned = reserveDataBuffer(height * width * 4 + 512, data);

#ifdef TEXTUREOBJ_PROFILE_MEM_ALLOC
				tim.leave(sSec.c_str());
#endif

				for (int y = 0; y < height; y++)
				{
					unsigned char* ptrSrcCol = m_textureImage(0, y, 0);
					unsigned char* ptrSrcAlfa = m_textureImageAlpha(0, y);
					unsigned char* ptr = dataAligned + y * width * 4;

					for (int x = 0; x < width; x++)
					{
						*ptr++ = *ptrSrcCol++;
						*ptr++ = *ptrSrcCol++;
						*ptr++ = *ptrSrcCol++;
						*ptr++ = *ptrSrcAlfa++;
					}
				}

				// Prepare image data types:
				const GLenum img_type = GL_UNSIGNED_BYTE;
				const bool is_RGB_order =
					(!::strcmp(
						m_textureImage.getChannelsOrder(),
						"RGB"));  // Reverse RGB <-> BGR order?
				const GLenum img_format = (is_RGB_order ? GL_RGBA : GL_BGRA);

				// Send image data to OpenGL:
				glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
				glPixelStorei(GL_UNPACK_ROW_LENGTH, width);
				glTexImage2D(
					GL_TEXTURE_2D, 0 /*level*/, 4 /* RGB components */, width,
					height, 0 /*border*/, img_format, img_type, dataAligned);
				checkOpenGLError();
				glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);  // Reset

				// No need to hide a fill border:
				m_pad_x_right = 0;
				m_pad_y_bottom = 0;

			}  // End of color texture WITH trans.
			else
			{
				// Color texture WITHOUT trans.
				// --------------------------------------
				// Prepare image data types:
				const GLenum img_type = GL_UNSIGNED_BYTE;
				const int nBytesPerPixel = m_textureImage.isColor() ? 3 : 1;
				const bool is_RGB_order =
					(!::strcmp(
						m_textureImage.getChannelsOrder(),
						"RGB"));  // Reverse RGB <-> BGR order?
				const GLenum img_format = nBytesPerPixel == 3
											  ? (is_RGB_order ? GL_RGB : GL_BGR)
											  : GL_LUMINANCE;

				// Send image data to OpenGL:
				glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
				glPixelStorei(
					GL_UNPACK_ROW_LENGTH,
					m_textureImage.getRowStride() / nBytesPerPixel);
				glTexImage2D(
					GL_TEXTURE_2D, 0 /*level*/, 3 /* RGB components */, width,
					height, 0 /*border*/, img_format, img_type,
					m_textureImage.get_unsafe(0, 0));
				glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);  // Reset

				// No need to hide a fill border:
				m_pad_x_right = 0;
				m_pad_y_bottom = 0;

			}  // End of color texture WITHOUT trans.
		}
		else
		{
			// Gray-scale texture:
			if (m_enableTransparency)
			{
#ifdef TEXTUREOBJ_PROFILE_MEM_ALLOC
				const std::string sSec = mrpt::format(
					"opengl_texture_alloc %ix%i (gray,transp)", width, height);
				tim.enter(sSec.c_str());
#endif

				dataAligned =
					reserveDataBuffer(height * width * 2 + 1024, data);

#ifdef TEXTUREOBJ_PROFILE_MEM_ALLOC
				tim.leave(sSec.c_str());
#endif

				for (int y = 0; y < height; y++)
				{
					unsigned char* ptrSrcCol = m_textureImage(0, y);
					unsigned char* ptrSrcAlfa = m_textureImageAlpha(0, y);
					unsigned char* ptr = dataAligned + y * width * 2;
					for (int x = 0; x < width; x++)
					{
						*ptr++ = *ptrSrcCol++;
						*ptr++ = *ptrSrcAlfa++;
					}
				}

				// Prepare image data types:
				const GLenum img_type = GL_UNSIGNED_BYTE;
				const GLenum img_format = GL_LUMINANCE_ALPHA;

				// Send image data to OpenGL:
				glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
				glPixelStorei(GL_UNPACK_ROW_LENGTH, width);
				glTexImage2D(
					GL_TEXTURE_2D, 0 /*level*/, 2 /* RGB components */, width,
					height, 0 /*border*/, img_format, img_type, dataAligned);
				checkOpenGLError();
				glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);  // Reset

				// No need to hide a fill border:
				m_pad_x_right = 0;
				m_pad_y_bottom = 0;

			}  // End of gray-scale texture WITH trans.
			else
			{
				// Prepare image data types:
				const GLenum img_type = GL_UNSIGNED_BYTE;
				const GLenum img_format = GL_LUMINANCE;

				// Send image data to OpenGL:
				glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
				glPixelStorei(
					GL_UNPACK_ROW_LENGTH, m_textureImage.getRowStride());
				glTexImage2D(
					GL_TEXTURE_2D, 0 /*level*/, 1 /* RGB components */, width,
					height, 0 /*border*/, img_format, img_type,
					m_textureImage(0, 0));
				checkOpenGLError();
				glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);  // Reset

				// No need to hide a fill border:
				m_pad_x_right = 0;
				m_pad_y_bottom = 0;

			}  // End of gray-scale texture WITHOUT trans.
		}

		m_texture_is_loaded = true;

#ifdef TEXTUREOBJ_PROFILE_MEM_ALLOC
		{
			const std::string sSec = mrpt::format(
				"opengl_texture: load %ix%i %s %stransp", width, height,
				m_textureImage.isColor() ? "RGB" : "BW",
				m_enableTransparency ? "" : "no ");
			tim.leave(sSec.c_str());
		}
#endif

#ifdef TEXTUREOBJ_USE_MEMPOOL
		// Before freeing the buffer in "data", donate my memory to the pool:
		if (!data.empty())
		{
			TMyMemPool* pool = TMyMemPool::getInstance();
			if (pool)
			{
				CTexturedObject_MemPoolParams mem_params;
				mem_params.len = data.size();

				CTexturedObject_MemPoolData* mem_block =
					new CTexturedObject_MemPoolData();
				data.swap(mem_block->data);

				pool->dump_to_pool(mem_params, mem_block);
			}
		}
#endif
	}
	catch (exception& e)
	{
		THROW_EXCEPTION(
			format("m_glTextureName=%i\n%s", m_glTextureName, e.what()));
	}
	catch (...)
	{
		THROW_EXCEPTION("Runtime error!");
	}
#endif
}
Example #5
0
// ------------------------------------------------------
//				Test_KinectOnlineOffline
// ------------------------------------------------------
void Test_KinectOnlineOffline(bool is_online, const string &rawlog_file = string())
{
	// Launch grabbing thread:
	// --------------------------------------------------------
	TThreadParam thrPar(
		is_online,
		rawlog_file,
		false // generate_3D_pointcloud_in_this_thread -> Don't, we'll do it in this main thread.
		);

	mrpt::system::TThreadHandle thHandle= mrpt::system::createThreadRef(thread_grabbing ,thrPar);

	// Wait until data stream starts so we can say for sure the sensor has been initialized OK:
	cout << "Waiting for sensor initialization...\n";
	do {
		CObservation3DRangeScanPtr newObs = thrPar.new_obs.get();
		if (newObs && newObs->timestamp!=INVALID_TIMESTAMP)
				break;
		else 	mrpt::system::sleep(10);
	} while (!thrPar.quit);

	// Check error condition:
	if (thrPar.quit) return;
	cout << "OK! Sensor started to emit observations.\n";

	// Create window and prepare OpenGL object in the scene:
	// --------------------------------------------------------
	mrpt::gui::CDisplayWindow3D  win3D("Kinect 3D view",800,600);

	win3D.setCameraAzimuthDeg(140);
	win3D.setCameraElevationDeg(20);
	win3D.setCameraZoom(8.0);
	win3D.setFOV(90);
	win3D.setCameraPointingToPoint(2.5,0,0);

	mrpt::opengl::CPointCloudColouredPtr gl_points = mrpt::opengl::CPointCloudColoured::Create();
	gl_points->setPointSize(2.5);

	opengl::COpenGLViewportPtr viewInt; // Extra viewports for the RGB images.
	{
		mrpt::opengl::COpenGLScenePtr &scene = win3D.get3DSceneAndLock();

		// Create the Opengl object for the point cloud:
		scene->insert( gl_points );
		scene->insert( mrpt::opengl::CGridPlaneXY::Create() );
		scene->insert( mrpt::opengl::stock_objects::CornerXYZ() );

		const double aspect_ratio =  480.0 / 640.0;
		const int VW_WIDTH = 400;	// Size of the viewport into the window, in pixel units.
		const int VW_HEIGHT = aspect_ratio*VW_WIDTH;

		// Create an extra opengl viewport for the RGB image:
		viewInt = scene->createViewport("view2d_int");
		viewInt->setViewportPosition(5, 30, VW_WIDTH,VW_HEIGHT );
		win3D.addTextMessage(10, 30+VW_HEIGHT+10,"Intensity data",TColorf(1,1,1), 2, MRPT_GLUT_BITMAP_HELVETICA_12 );

		win3D.addTextMessage(5,5,
			format("'o'/'i'-zoom out/in, ESC: quit"),
				TColorf(0,0,1), 110, MRPT_GLUT_BITMAP_HELVETICA_18 );


		win3D.unlockAccess3DScene();
		win3D.repaint();
	}



	mrpt::system::TTimeStamp  last_obs_tim = INVALID_TIMESTAMP;

	while (win3D.isOpen() && !thrPar.quit)
	{
		CObservation3DRangeScanPtr newObs = thrPar.new_obs.get();
		if (newObs && newObs->timestamp!=INVALID_TIMESTAMP &&
			newObs->timestamp!=last_obs_tim )
		{
			// It IS a new observation:
			last_obs_tim = newObs->timestamp;

			// Update visualization ---------------------------------------

			win3D.get3DSceneAndLock();

			// Estimated grabbing rate:
			win3D.addTextMessage(-350,-13, format("Timestamp: %s", mrpt::system::dateTimeLocalToString(last_obs_tim).c_str()), TColorf(0.6,0.6,0.6),"mono",10,mrpt::opengl::FILL, 100);
			win3D.addTextMessage(-100,-30, format("%.02f Hz", thrPar.Hz ), TColorf(1,1,1),"mono",10,mrpt::opengl::FILL, 101);

			// Show intensity image:
			if (newObs->hasIntensityImage )
			{
				viewInt->setImageView(newObs->intensityImage); // This is not "_fast" since the intensity image may be needed later on.
			}
			win3D.unlockAccess3DScene();

			// -------------------------------------------------------
			//           Create 3D points from RGB+D data
			//
			// There are several methods to do this.
			//  Switch the #if's to select among the options:
			// See also: http://www.mrpt.org/Generating_3D_point_clouds_from_RGB_D_observations
			// -------------------------------------------------------
			if (newObs->hasRangeImage)
			{
				static mrpt::utils::CTimeLogger logger;
				logger.enter("RGBD->3D");

// Pathway: RGB+D --> PCL <PointXYZ> --> XYZ opengl
#if 0
				static pcl::PointCloud<pcl::PointXYZ> cloud;
				logger.enter("RGBD->3D.projectInto");
				newObs->project3DPointsFromDepthImageInto(cloud, false /* without obs.sensorPose */);
				logger.leave("RGBD->3D.projectInto");

				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.load in OpenGL");
					gl_points->loadFromPointsMap(&cloud);
				logger.leave("RGBD->3D.load in OpenGL");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> PCL <PointXYZRGB> --> XYZ+RGB opengl
#if 0
				static pcl::PointCloud<pcl::PointXYZRGB> cloud;
				logger.enter("RGBD->3D.projectInto");
				newObs->project3DPointsFromDepthImageInto(cloud, false /* without obs.sensorPose */);
				logger.leave("RGBD->3D.projectInto");

				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.load in OpenGL");
					gl_points->loadFromPointsMap(&cloud);
				logger.leave("RGBD->3D.load in OpenGL");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> XYZ+RGB opengl
#if 1
				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.projectInto");
					newObs->project3DPointsFromDepthImageInto(*gl_points, false /* without obs.sensorPose */);
				logger.leave("RGBD->3D.projectInto");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> XYZ+RGB opengl (With a 6D global pose of the robot)
#if 0
				const CPose3D globalPose(1,2,3,DEG2RAD(10),DEG2RAD(20),DEG2RAD(30));
				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.projectInto");
					newObs->project3DPointsFromDepthImageInto(*gl_points, false /* without obs.sensorPose */, &globalPose);
				logger.leave("RGBD->3D.projectInto");
				win3D.unlockAccess3DScene();
#endif

// Pathway: RGB+D --> internal local XYZ pointcloud --> XYZ+RGB point cloud map --> XYZ+RGB opengl
#if 0
				// Project 3D points:
				if (!newObs->hasPoints3D)
				{
				logger.enter("RGBD->3D.projectInto");
					newObs->project3DPointsFromDepthImage();
				logger.leave("RGBD->3D.projectInto");
				}

				CColouredPointsMap pntsMap;
				pntsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage;
				pntsMap.loadFromRangeScan(*newObs);

				win3D.get3DSceneAndLock();
				logger.enter("RGBD->3D.load in OpenGL");
					gl_points->loadFromPointsMap(&pntsMap);
				logger.leave("RGBD->3D.load in OpenGL");
				win3D.unlockAccess3DScene();
#endif

				logger.leave("RGBD->3D");
			}

			win3D.repaint();
		} // end update visualization:


		// Process possible keyboard commands:
		// --------------------------------------
		if (win3D.keyHit())
		{
			const int key = tolower( win3D.getPushedKey() );

			switch(key)
			{
				// Some of the keys are processed in this thread:
				case 'o':
					win3D.setCameraZoom( win3D.getCameraZoom() * 1.2 );
					win3D.repaint();
					break;
				case 'i':
					win3D.setCameraZoom( win3D.getCameraZoom() / 1.2 );
					win3D.repaint();
					break;
				case 27: // ESC
					thrPar.quit = true;
					break;
				default:
					break;
			};
		}

		mrpt::system::sleep(1);
	}


	cout << "Waiting for grabbing thread to exit...\n";
	thrPar.quit = true;
	mrpt::system::joinThread(thHandle);
	cout << "Bye!\n";
}