Esempio n. 1
0
HRESULT BinaryDumpReader::processDepth()
{
	if(m_CurrFrame >= m_NumFrames)
	{
		GlobalAppState::get().s_playData = false;
		std::cout << "binary dump sequence complete - press space to run again" << std::endl;
		m_CurrFrame = 0;
	}

	if(GlobalAppState::get().s_playData) {

		float* depth = getDepthFloat();
		memcpy(depth, m_data.m_DepthImages[m_CurrFrame], sizeof(float)*getDepthWidth()*getDepthHeight());

		incrementRingbufIdx();

		if (m_bHasColorData) {
			memcpy(m_colorRGBX, m_data.m_ColorImages[m_CurrFrame], sizeof(vec4uc)*getColorWidth()*getColorHeight());
		}

		m_CurrFrame++;
		return S_OK;
	} else {
		return S_FALSE;
	}
}
bool SensorDataReader::processDepth()
{
	if (m_currFrame >= m_numFrames)
	{
		GlobalAppState::get().s_playData = false;
		//std::cout << "binary dump sequence complete - press space to run again" << std::endl;
		stopReceivingFrames();
		std::cout << "binary dump sequence complete - stopped receiving frames" << std::endl;
		m_currFrame = 0;
	}

	if (GlobalAppState::get().s_playData) {

		float* depth = getDepthFloat();

		//TODO check why the frame cache is not used?
		ml::SensorData::RGBDFrameCacheRead::FrameState frameState = m_sensorDataCache->getNext();
		//ml::SensorData::RGBDFrameCacheRead::FrameState frameState;
		//frameState.m_colorFrame = m_sensorData->decompressColorAlloc(m_currFrame);
		//frameState.m_depthFrame = m_sensorData->decompressDepthAlloc(m_currFrame);


		for (unsigned int i = 0; i < getDepthWidth()*getDepthHeight(); i++) {
			if (frameState.m_depthFrame[i] == 0) depth[i] = -std::numeric_limits<float>::infinity();
			else depth[i] = (float)frameState.m_depthFrame[i] / m_sensorData->m_depthShift;
		}

		incrementRingbufIdx();

		if (m_bHasColorData) {
			for (unsigned int i = 0; i < getColorWidth()*getColorHeight(); i++) {
				m_colorRGBX[i] = vec4uc(frameState.m_colorFrame[i]);
			}
		}
		frameState.free();

		m_currFrame++;
		return true;
	}
	else {
		return false;
	}
}
HRESULT PrimeSenseSensor::readDepthAndColor(float* depthFloat, vec4uc* colorRGBX )
{
	HRESULT hr = S_OK;

	int changedIndex;
	openni::Status rc = openni::OpenNI::waitForAnyStream(&m_streams[0], 1, &changedIndex, 0);
	if (rc != openni::STATUS_OK) {
		return S_FALSE;	//no frame available
	}

	rc = openni::OpenNI::waitForAnyStream(&m_streams[1], 1, &changedIndex, 0);
	if (rc != openni::STATUS_OK) {
		return S_FALSE;	//no frame available
	}

	openni::Status sd = m_depthStream.readFrame(&m_depthFrame);
	openni::Status sc = m_colorStream.readFrame(&m_colorFrame);

	assert(m_colorFrame.getWidth() == m_depthFrame.getWidth());
	assert(m_colorFrame.getHeight() == m_depthFrame.getHeight());

	const openni::DepthPixel* pDepth = (const openni::DepthPixel*)m_depthFrame.getData();
	const openni::RGB888Pixel* pImage = (const openni::RGB888Pixel*)m_colorFrame.getData();

	// check if we need to draw depth frame to texture
	if (m_depthFrame.isValid() && m_colorFrame.isValid())
	{	
		unsigned int width   = m_depthFrame.getWidth();
		unsigned int nPixels = m_depthFrame.getWidth()*m_depthFrame.getHeight();

		for (unsigned int i = 0; i < nPixels; i++)	{
			const int x = i%width;
			const int y = i/width;
			const int src = y*width + (width-1-x);
			const openni::DepthPixel& p = pDepth[src];

			float dF = (float)p*0.001f;
			if(dF >= GlobalAppState::get().s_sensorDepthMin && dF <= GlobalAppState::get().s_sensorDepthMax) depthFloat[i] = dF;
			else																	 depthFloat[i] = -std::numeric_limits<float>::infinity();
		}
		incrementRingbufIdx();
	}

	// check if we need to draw depth frame to texture
	if (m_depthFrame.isValid() && m_colorFrame.isValid())
	{
		int cropX, cropY, cropW, cropH;

		if(m_colorStream.getCropping(&cropX, &cropY, &cropW, &cropH))
		{
			const openni::RGB888Pixel* pImage = (const openni::RGB888Pixel*)m_colorFrame.getData();
			ZeroMemory(m_colorRGBX, sizeof(LONG)*m_colorWidth*m_colorHeight);
			#pragma omp parallel for
			for (int y = 0; y < cropH; ++y)
			{
				for (int x = 0; x < cropW; ++x)
				{
					unsigned int width   = m_colorFrame.getWidth();
					unsigned int height  = m_colorFrame.getHeight();
					int y2 = 0;
					if(m_colorWidth == 1280)	y2 = (y+cropY)-64/2/*-(unsigned int)(((float)(y+cropY)/((float)(height-1)))*64+0.5f)*/-cropY;
					else						y2 = y;
				
					if(y2 >= 0 && y2 < (int)height)
					{
						const openni::RGB888Pixel& pixel = pImage[(y2)*cropW+(cropW - x - 1)];

						unsigned int c = 0;
						c |= pixel.r;
						c <<= 8;
						c |= pixel.g;
						c <<= 8;
						c |= pixel.b;
						c |= 0xFF000000;

						((LONG*)m_colorRGBX)[(cropY+y)*m_colorWidth+(cropX+x)] = c;
					}
				}
			}
		}
		else
		{
			unsigned int width   = m_colorFrame.getWidth();
			unsigned int height  = m_colorFrame.getHeight();
			unsigned int nPixels = m_colorFrame.getWidth()*m_colorFrame.getHeight();

			for(unsigned int i = 0; i<nPixels; i++)
			{
				const int x = i%width;
				const int y = i/width;

				int y2 = 0;
				if(m_colorWidth == 1280)	y2 = y+64/2-10-(unsigned int)(((float)y/((float)(height-1)))*64+0.5f);
				else						y2 = y;

				if(y2 >= 0 && y2 < (int)height)
				{
					//unsigned int Index1D = y2*width+x;
					unsigned int Index1D = y2*width+ (width-1-x);	//x-flip here

					const openni::RGB888Pixel& pixel = pImage[Index1D];

					unsigned int c = 0;
					c |= pixel.r;
					c <<= 8;
					c |= pixel.g;
					c <<= 8;
					c |= pixel.b;
					c |= 0xFF000000;

					((LONG*)colorRGBX)[y*width+x] = c;
				}
			}
		}
	}


	return hr;
}