Esempio n. 1
0
	void Mainloop()
	{
		int frameId = 1;
		int xdir = 1;
		int ydir = 1;
		struct {int x, y;} center = {0,0};
		while (m_running)
		{
//			printf("Tick");
			OniFrame* pFrame = getServices().acquireFrame();

			if (pFrame == NULL) {printf("Didn't get frame...\n"); continue;}

			// Fill frame
			xnOSMemSet(pFrame->data, 0, pFrame->dataSize);

			OniDepthPixel* pDepth = (OniDepthPixel*)pFrame->data;

			for (int y1 = XN_MAX(center.y-10, 0); y1 < XN_MIN(center.y+10, OZ_RESOLUTION_Y); ++y1)
				for (int x1 = XN_MAX(center.x-10, 0); x1 < XN_MIN(center.x+10, OZ_RESOLUTION_X); ++x1)
					if ((x1-center.x)*(x1-center.x)+(y1-center.y)*(y1-center.y) < 70)
						pDepth[singleRes(x1, y1)] = OniDepthPixel(1000+(x1-y1)*3);

//			pDepth[singleRes(center.x, center.y)] = 1000;

			center.x += xdir;
			center.y += ydir;

			if (center.x < abs(xdir) || center.x > OZ_RESOLUTION_X-1-abs(xdir)) xdir*=-1;
			if (center.y < abs(ydir) || center.y > OZ_RESOLUTION_Y-1-abs(ydir)) ydir*=-1;

			for (int i = 0; i < OZ_RESOLUTION_X; ++i) pDepth[i] = 2000;
			pDepth[0] = 2000;

			// Fill metadata
			pFrame->frameIndex = frameId;

			pFrame->videoMode.pixelFormat = ONI_PIXEL_FORMAT_DEPTH_1_MM;
			pFrame->videoMode.resolutionX = OZ_RESOLUTION_X;
			pFrame->videoMode.resolutionY = OZ_RESOLUTION_Y;
			pFrame->videoMode.fps = 30;

			pFrame->width = OZ_RESOLUTION_X;
			pFrame->height = OZ_RESOLUTION_Y;

			pFrame->cropOriginX = pFrame->cropOriginY = 0;
			pFrame->croppingEnabled = FALSE;

			pFrame->sensorType = ONI_SENSOR_DEPTH;
			pFrame->stride = OZ_RESOLUTION_X*sizeof(OniDepthPixel);
			pFrame->timestamp = frameId*33000;

			raiseNewFrame(pFrame);
			getServices().releaseFrame(pFrame);

			frameId++;

			xnOSSleep(33);
		}
	}
Esempio n. 2
0
void mouseInputCallSelection()
{
	if (g_MouseInput.pSelectionCallback != NULL)
	{
		UIntRect selection;
		selection.uBottom = XN_MIN(g_MouseInput.StartSelection.Y, g_MouseInput.LastLocation.Y);
		selection.uTop = XN_MAX(g_MouseInput.StartSelection.Y, g_MouseInput.LastLocation.Y);
		selection.uLeft = XN_MIN(g_MouseInput.StartSelection.X, g_MouseInput.LastLocation.X);
		selection.uRight = XN_MAX(g_MouseInput.StartSelection.X, g_MouseInput.LastLocation.X);

		g_MouseInput.pSelectionCallback(g_MouseInput.nSelectionState, selection);
	}
}
void XnUncompressedDepthProcessor::ProcessFramePacketChunk(const XnSensorProtocolResponseHeader* pHeader, const XnUChar* pData, XnUInt32 nDataOffset, XnUInt32 nDataSize)
{
	XN_PROFILING_START_SECTION("XnUncompressedDepthProcessor::ProcessFramePacketChunk")

	// when depth is uncompressed, we can just copy it directly to write buffer
	XnBuffer* pWriteBuffer = GetWriteBuffer();

	// make sure we have enough room
	if (CheckWriteBufferForOverflow(nDataSize))
	{
		// sometimes, when packets are lost, we get uneven number of bytes, so we need to complete
		// one byte, in order to keep UINT16 alignment
		if (nDataSize % 2 != 0)
		{
			nDataSize--;
			pData++;
		}

		// copy values. Make sure we do not get corrupted shifts
		XnUInt16* pRaw = (XnUInt16*)(pData);
		XnUInt16* pRawEnd = (XnUInt16*)(pData + nDataSize);
		XnDepthPixel* pWriteBuf = (XnDepthPixel*)pWriteBuffer->GetUnsafeWritePointer();

		while (pRaw < pRawEnd)
		{
			*pWriteBuf = GetOutput(XN_MIN(*pRaw, XN_DEVICE_SENSOR_MAX_SHIFT_VALUE-1));
			++pRaw;
			++pWriteBuf;
		}

 		pWriteBuffer->UnsafeUpdateSize(nDataSize);
	}

	XN_PROFILING_END_SECTION
}
Esempio n. 4
0
void XnVPointDenoiser::OnPointUpdate(const XnVHandPointContext* pContext)
{
	XnVDenoisingLocalContext* pLocalContext = GetLocalContext(pContext->nID);
	if (pLocalContext == NULL)
	{
		return;
	}
	XnVHandPointContext* pDenoisedContext = m_DenoisedHands.GetContext(pContext->nID);

	pDenoisedContext->fTime = pContext->fTime;
	pLocalContext->ptBuffer[pLocalContext->nNextIndex] = pContext->ptPosition;
	pLocalContext->nNextIndex = (pLocalContext->nNextIndex+1)%XNV_SMOOTHER_AVERAGE_SIZE;
	pLocalContext->nCount++;

	XnUInt32 nAverageCount = XN_MIN(XNV_SMOOTHER_AVERAGE_SIZE, pLocalContext->nCount);
	XnV3DVector ptAveragePoint(0, 0, 0);

	for (XnUInt32 i = 0; i < nAverageCount; ++i)
	{
		ptAveragePoint += pLocalContext->ptBuffer[i];
	}
	if (nAverageCount > 0)
	{
		ptAveragePoint /= XnFloat(nAverageCount);
	}

	UpdatePointDenoise(pDenoisedContext->ptPosition, ptAveragePoint);

	m_DenoisedHands.MarkActive(pContext->nID);
} // XnVPointDenoiser::OnPointUpdate
Esempio n. 5
0
XnStatus MockMapGenerator::GetSupportedMapOutputModes(XnMapOutputMode aModes[], XnUInt32& nCount)
{
	XN_VALIDATE_PTR(m_pSupportedMapOutputModes, XN_STATUS_PROPERTY_NOT_SET);
	nCount = XN_MIN(nCount, m_nSupportedMapOutputModesCount);
	xnOSMemCopy(aModes, m_pSupportedMapOutputModes, nCount * sizeof(m_pSupportedMapOutputModes[0]));
	return XN_STATUS_OK;
}
XnUInt32 Link12BitS2DParser::ProcessFramePacketChunk(const XnUInt8* pData,XnUInt8* pDest, XnUInt32 nDataSize)
{
	
	XnStatus nRetVal = XN_STATUS_OK;
	XnUInt32 totalRead = 0;
	XnUInt32 totalWrite = 0;

	// check if we have data from previous packet
	if (m_ContinuousBufferSize!= 0)
	{
		// fill in to a whole element
		XnUInt32 nReadBytes = XN_MIN(nDataSize, XN_INPUT_ELEMENT_SIZE - m_ContinuousBufferSize);

		xnOSMemCopy(m_ContinuousBuffer + m_ContinuousBufferSize, pData, nReadBytes);
		m_ContinuousBufferSize += nReadBytes;

		pData += nReadBytes;
		nDataSize -= nReadBytes;

		if (m_ContinuousBufferSize == XN_INPUT_ELEMENT_SIZE)
		{
			// process it
			XnUInt32 nActualRead = 0;
			XnUInt32 nActualWritten = 0;
			Unpack12to16(m_ContinuousBuffer,pDest, XN_INPUT_ELEMENT_SIZE, &nActualRead, &nActualWritten);
			pDest += nActualWritten;
			totalRead += nActualRead;
			totalWrite += nActualWritten;
			m_ContinuousBufferSize = 0;
		}
	}

	// find out the number of input elements we have
	XnUInt32 nActualRead = 0;
	XnUInt32 nActualWritten = 0;
	nRetVal = Unpack12to16(pData, pDest, nDataSize, &nActualRead, &nActualWritten);
	totalRead += nActualRead;
	totalWrite += nActualWritten;
	if (nRetVal == XN_STATUS_OK)
	{
		pData += nActualRead;
		nDataSize -= nActualRead;

		// if we have any bytes left, store them for next packet.
		if (nDataSize > 0)
		{
			// no need to check for overflow. there can not be a case in which more than XN_INPUT_ELEMENT_SIZE
			// are left.

			xnOSMemCopy(m_ContinuousBuffer + m_ContinuousBufferSize, pData, nDataSize);
			m_ContinuousBufferSize += nDataSize;
		}
	}
	return totalWrite; //return total written bytes
}
void XnUncompressedYUVtoRGBImageProcessor::ProcessFramePacketChunk(const XnSensorProtocolResponseHeader* pHeader, const XnUChar* pData, XnUInt32 nDataOffset, XnUInt32 nDataSize)
{
	XN_PROFILING_START_SECTION("XnUncompressedYUVtoRGBImageProcessor::ProcessFramePacketChunk")

	XnBuffer* pWriteBuffer = GetWriteBuffer();

	if (m_ContinuousBuffer.GetSize() != 0)
	{
		// fill in to a whole element
		XnUInt32 nReadBytes = XN_MIN(nDataSize, XN_YUV_TO_RGB_INPUT_ELEMENT_SIZE - m_ContinuousBuffer.GetSize());
		m_ContinuousBuffer.UnsafeWrite(pData, nReadBytes);
		pData += nReadBytes;
		nDataSize -= nReadBytes;

		if (m_ContinuousBuffer.GetSize() == XN_YUV_TO_RGB_INPUT_ELEMENT_SIZE)
		{
			if (CheckWriteBufferForOverflow(XN_YUV_TO_RGB_OUTPUT_ELEMENT_SIZE))
			{
				// process it
				XnUInt32 nActualRead = 0;
				XnUInt32 nOutputSize = pWriteBuffer->GetFreeSpaceInBuffer();
				YUV422ToRGB888(m_ContinuousBuffer.GetData(), pWriteBuffer->GetUnsafeWritePointer(), XN_YUV_TO_RGB_INPUT_ELEMENT_SIZE, &nActualRead, &nOutputSize);
				pWriteBuffer->UnsafeUpdateSize(XN_YUV_TO_RGB_OUTPUT_ELEMENT_SIZE);
			}

			m_ContinuousBuffer.Reset();
		}
	}

	if (CheckWriteBufferForOverflow(nDataSize / XN_YUV_TO_RGB_INPUT_ELEMENT_SIZE * XN_YUV_TO_RGB_OUTPUT_ELEMENT_SIZE))
	{
		XnUInt32 nActualRead = 0;
		XnUInt32 nOutputSize = pWriteBuffer->GetFreeSpaceInBuffer();
		YUV422ToRGB888(pData, pWriteBuffer->GetUnsafeWritePointer(), nDataSize, &nActualRead, &nOutputSize);
		pWriteBuffer->UnsafeUpdateSize(nOutputSize);
		pData += nActualRead;
		nDataSize -= nActualRead;

		// if we have any bytes left, store them for next packet.
		if (nDataSize > 0)
		{
			// no need to check for overflow. there can not be a case in which more than XN_INPUT_ELEMENT_SIZE
			// are left.
			m_ContinuousBuffer.UnsafeWrite(pData, nDataSize);
		}
	}

	XN_PROFILING_END_SECTION
}
void LinkMsgEncoder::EncodeData(const void* pSourceData, XnUInt32 nSize)
{
	XnUInt16 nPacketRemainingSpace = 0; //Remaining space in current packet in each iteration
	XnUInt16 nPacketBytesToCopy = 0; //Number of bytes to copy to current packet in each iteration
	XnUInt32 nBytesLeftToCopy = nSize; //Total number of bytes left to copy
	const XnUInt8* pCurrData = reinterpret_cast<const XnUInt8*>(pSourceData); //Current source data pointer
	while (nBytesLeftToCopy > 0)
	{
		if (m_pCurrPacket->GetSize() == m_nMaxPacketSize)
		{
			//Current packet is full. Move to next packet (this also advances m_pCurrPacket).
			m_pCurrPacketBuffer += m_nMaxPacketSize;
			if (m_pCurrPacketBuffer >= m_pOutputBuffer + m_nBufferSize)
			{
				xnLogError(XN_MASK_LINK, "Msg encoder buffer overrun :( Was about to write to position %u, but buffer size is only %u",
					(m_pCurrPacketBuffer - m_pOutputBuffer), m_nBufferSize);
				XN_ASSERT(FALSE);
				return;
			}
			//Advance packet ID
			m_packetHeader.SetPacketID(m_packetHeader.GetPacketID() + 1);
			/*Copy prepared packet header into destination packet. This also sets m_pCurrPacket->m_nSize to minimum
			  and m_pCurrPacket->m_nFragmentation to XN_LINK_FRAG_MIDDLE.*/
			xnOSMemCopy(m_pCurrPacket, &m_packetHeader, sizeof(m_packetHeader));
			//Increase encoded size for packet header
			m_nEncodedSize += sizeof(m_packetHeader);
		}
		//Calculate remaining space in current packet
		nPacketRemainingSpace = m_nMaxPacketSize - m_pCurrPacket->GetSize();
		//Calculate how many bytes we're copying to the current packet
		nPacketBytesToCopy = static_cast<XnUInt16>(XN_MIN(nPacketRemainingSpace, nBytesLeftToCopy));
		
		/************ Copy data to current packet ********************/
		xnOSMemCopy(m_pCurrPacketBuffer + m_pCurrPacket->GetSize(), 
		            pCurrData,
					nPacketBytesToCopy);
		/*************************************************************/

		//Advance current source data pointer
		pCurrData += nPacketBytesToCopy;
		//Increase encoded size for packet data
		m_nEncodedSize += nPacketBytesToCopy;
		//Increase size of current packet
		m_pCurrPacket->SetSize(m_pCurrPacket->GetSize() + nPacketBytesToCopy);
		//Decrease number of bytes we have left to copy
		nBytesLeftToCopy -= nPacketBytesToCopy;
	}
}
Esempio n. 9
0
void XnVSlider1D::Initialize(XnVAxis eAxis, XnBool bDraggable, const XnPoint3D& ptInitialPosition,
							 XnFloat fSliderLength, XnFloat fInitialValue, XnFloat fMinOutput,
							 XnFloat fMaxOutput, XnFloat fOffAxisDetectionAngle,
							 XnFloat fOffAxisDetectionMinimumVelocity)
{
	m_bIsDraggable = bDraggable;
	if (m_pPointBuffer == NULL)
	{
		m_pPointBuffer = XN_NEW(XnVPointBuffer, 100);
	}
	else
	{
		m_pPointBuffer->Reset();
	}

	m_fOffAxisDetectionAngle = fOffAxisDetectionAngle;
	m_fOffAxisDetectionMinimumVelocity = fOffAxisDetectionMinimumVelocity;

	m_eAxis = eAxis;
	m_ptCurrentPosition = ptInitialPosition;
	m_fCurrentOutput = fInitialValue;

	m_fOutputMaximum = fMaxOutput;
	m_fOutputMinimum = fMinOutput;

	switch (m_eAxis)
	{
	case AXIS_X:
		m_fMinOutputMajorAxisPosition = ptInitialPosition.X - fSliderLength * (fInitialValue - fMinOutput) / (fMaxOutput - fMinOutput);
		m_fMaxOutputMajorAxisPosition = ptInitialPosition.X + fSliderLength * (fMaxOutput - fInitialValue) / (fMaxOutput - fMinOutput);
		break;
	case AXIS_Y:
		m_fMinOutputMajorAxisPosition = ptInitialPosition.Y - fSliderLength * (fInitialValue - fMinOutput) / (fMaxOutput - fMinOutput);
		m_fMaxOutputMajorAxisPosition = ptInitialPosition.Y + fSliderLength * (fMaxOutput - fInitialValue) / (fMaxOutput - fMinOutput);
		break;
	case AXIS_Z:
		m_fMinOutputMajorAxisPosition = ptInitialPosition.Z - fSliderLength * (fInitialValue - fMinOutput) / (fMaxOutput - fMinOutput);
		m_fMaxOutputMajorAxisPosition = ptInitialPosition.Z + fSliderLength * (fMaxOutput - fInitialValue) / (fMaxOutput - fMinOutput);
		break;
	default:
		return;
	}

	// clamp current output to valid range - it may not have been valid till now
	m_fCurrentOutput = XN_MIN(XN_MAX(m_fCurrentOutput, m_fOutputMinimum), m_fOutputMaximum);
} // XnVSlider1D::Initialize
Esempio n. 10
0
void PlayerDevice::SleepToTimestamp(XnUInt64 nTimeStamp)
{
	XnUInt64 nNow;
	xnOSGetHighResTimeStamp(&nNow);

	m_cs.Lock();

	XnBool bHasTimeReference = TRUE;
	if (!m_bHasTimeReference /*&& (nTimeStamp <= m_nStartTimestamp)*/)
	{
		m_nStartTimestamp = nTimeStamp;
		m_nStartTime = nNow;

		m_bHasTimeReference = TRUE;
		bHasTimeReference = FALSE;
	}

	m_cs.Unlock();

	if (bHasTimeReference && (m_dPlaybackSpeed > 0.0f))
	{
		// check this data timestamp compared to when we started
		XnInt64 nTimestampDiff = nTimeStamp - m_nStartTimestamp;

		// in some recordings, frames are not ordered by timestamp. Make sure this does not break the mechanism
		if (nTimestampDiff > 0)
		{
			XnInt64 nTimeDiff = nNow - m_nStartTime;

			// check if we need to wait some time
			XnInt64 nRequestedTimeDiff = (XnInt64)(nTimestampDiff / m_dPlaybackSpeed);
			if (nTimeDiff < nRequestedTimeDiff)
			{
				XnUInt32 nSleep = XnUInt32((nRequestedTimeDiff - nTimeDiff)/1000);
				nSleep = XN_MIN(nSleep, XN_PLAYBACK_SPEED_SANITY_SLEEP);
				xnOSSleep(nSleep);
			}

			// update reference to current frame (this will handle cases in which application
			// stopped reading frames and continued after a while)
			m_nStartTimestamp = nTimeStamp;
			xnOSGetHighResTimeStamp(&m_nStartTime);
		}
	}
}
Esempio n. 11
0
XnStatus XnSensorAudioStream::ReallocBuffer()
{
	XnStatus nRetVal = XN_STATUS_OK;

	if (m_buffer.pAudioBuffer == NULL)
	{
		// we allocate enough for 5 seconds of audio
		XnUInt32 nSampleSize = 2 * 2; // 16-bit per channel (2 bytes) * max number of channels (2)
		XnUInt32 nSamples = 48000 * 5; // max sample rate * number of seconds

		XnUInt32 nMaxBufferSize = nSamples * nSampleSize;

		// find min packet size (so we'll have max packet count)
		XnUInt32 nMinPacketSize = XN_MIN(XN_SENSOR_PROTOCOL_AUDIO_PACKET_SIZE_BULK, XN_SENSOR_PROTOCOL_AUDIO_PACKET_SIZE_ISO);
		XnUInt32 nMaxPacketCount = nMaxBufferSize / nMinPacketSize - 1;

		nRetVal = RequiredSizeProperty().UnsafeUpdateValue(nMaxBufferSize);
		XN_IS_STATUS_OK(nRetVal);

		m_buffer.pAudioPacketsTimestamps = (XnUInt64*)xnOSMallocAligned(sizeof(XnUInt64) * nMaxPacketCount, XN_DEFAULT_MEM_ALIGN);
		m_buffer.pAudioBuffer = (XnUInt8*)xnOSMallocAligned(nMaxBufferSize, XN_DEFAULT_MEM_ALIGN);
		m_buffer.nAudioBufferSize = nMaxBufferSize;
	}

	// calculate current packet size
	m_buffer.nAudioPacketSize = m_nOrigAudioPacketSize;

	if (m_Helper.GetFirmwareVersion() >= XN_SENSOR_FW_VER_5_2 && GetNumberOfChannels() == 1)
	{
		m_buffer.nAudioPacketSize /= 2;
	}

	m_buffer.nAudioBufferNumOfPackets = m_buffer.nAudioBufferSize / m_buffer.nAudioPacketSize;
	m_buffer.nAudioBufferSize = m_buffer.nAudioBufferNumOfPackets * m_buffer.nAudioPacketSize;

	m_header.nPacketCount = m_buffer.nAudioBufferNumOfPackets;
	m_header.nPacketSize = m_buffer.nAudioPacketSize;

	// set read and write indices
	m_buffer.nAudioReadIndex = 0;
	m_buffer.nAudioWriteIndex = 0;

	return (XN_STATUS_OK);
}
XnStatus XnStreamReaderStream::ReadImpl(XnStreamData* pStreamData)
{
	pStreamData->nFrameID = m_pLastData->nFrameID;
	pStreamData->nTimestamp = m_pLastData->nTimestamp;

	if (pStreamData->pInternal->bAllocated)
	{
		// don't take more than required size
		pStreamData->nDataSize = XN_MIN(m_pLastData->nDataSize, GetRequiredDataSize());
		xnOSMemCopy(pStreamData->pData, m_pLastData->pData, pStreamData->nDataSize);
	}
	else
	{
		pStreamData->nDataSize = m_pLastData->nDataSize;
		pStreamData->pData = m_pLastData->pData;
	}

	return (XN_STATUS_OK);
}
void XnPacked11DepthProcessor::ProcessFramePacketChunk(const XnSensorProtocolResponseHeader* /*pHeader*/, const XnUChar* pData, XnUInt32 /*nDataOffset*/, XnUInt32 nDataSize)
{
	XN_PROFILING_START_SECTION("XnPacked11DepthProcessor::ProcessFramePacketChunk")

	XnStatus nRetVal = XN_STATUS_OK;

	// check if we have data from previous packet
	if (m_ContinuousBuffer.GetSize() != 0)
	{
		// fill in to a whole element
		XnUInt32 nReadBytes = XN_MIN(nDataSize, XN_INPUT_ELEMENT_SIZE - m_ContinuousBuffer.GetSize());
		m_ContinuousBuffer.UnsafeWrite(pData, nReadBytes);
		pData += nReadBytes;
		nDataSize -= nReadBytes;

		if (m_ContinuousBuffer.GetSize() == XN_INPUT_ELEMENT_SIZE)
		{
			// process it
			XnUInt32 nActualRead = 0;
			Unpack11to16(m_ContinuousBuffer.GetData(), XN_INPUT_ELEMENT_SIZE, &nActualRead);
			m_ContinuousBuffer.Reset();
		}
	}

	// find out the number of input elements we have
	XnUInt32 nActualRead = 0;
	nRetVal = Unpack11to16(pData, nDataSize, &nActualRead);
	if (nRetVal == XN_STATUS_OK)
	{
		pData += nActualRead;
		nDataSize -= nActualRead;

		// if we have any bytes left, store them for next packet.
		if (nDataSize > 0)
		{
			// no need to check for overflow. there can not be a case in which more than XN_INPUT_ELEMENT_SIZE
			// are left.
			m_ContinuousBuffer.UnsafeWrite(pData, nDataSize);
		}
	}

	XN_PROFILING_END_SECTION
}
Esempio n. 14
0
XnStatus PlayerNode::SeekToFrame(const XnChar* strNodeName, XnInt32 nFrameOffset, XnPlayerSeekOrigin origin)
{
	XnStatus nRetVal = XN_STATUS_OK;
	XnUInt32 nNodeID = GetPlayerNodeIDByName(strNodeName);
	if (nNodeID == INVALID_NODE_ID)
	{
		XN_LOG_ERROR_RETURN(XN_STATUS_BAD_NODE_NAME, XN_MASK_OPEN_NI, "Bad node name '%s'", strNodeName);
	}

	PlayerNodeInfo* pPlayerNodeInfo = &m_pNodeInfoMap[nNodeID];

	XnInt64 nOriginFrame = 0;
	switch (origin)
	{
		case XN_PLAYER_SEEK_SET:
		{
			nOriginFrame = 0;
			break;
		}
		case XN_PLAYER_SEEK_CUR:
		{
			nOriginFrame = pPlayerNodeInfo->nCurFrame;
			break;
		}
		case XN_PLAYER_SEEK_END:
		{
			nOriginFrame = pPlayerNodeInfo->nFrames;
			break;
		}
		default:
		{
			XN_ASSERT(FALSE);
			XN_LOG_ERROR_RETURN(XN_STATUS_BAD_PARAM, XN_MASK_OPEN_NI, "Invalid seek origin: %u", origin);
		}
	}
	XnUInt32 nDestFrame = (XnUInt32)XN_MIN(XN_MAX(1, nOriginFrame + nFrameOffset), pPlayerNodeInfo->nFrames);
	nRetVal = SeekToFrameAbsolute(nNodeID, nDestFrame);
	XN_IS_STATUS_OK(nRetVal);

	return XN_STATUS_OK;
}
Esempio n. 15
0
XnFloat XnVSlider1D::ValueAtPosition(const XnPoint3D& pt)
{
	XnFloat fMajorAxisPosition;
	switch (m_eAxis)
	{
	case AXIS_X:
		fMajorAxisPosition = pt.X; break;
	case AXIS_Y:
		fMajorAxisPosition = pt.Y; break;
	case AXIS_Z:
		fMajorAxisPosition = pt.Z; break;
	default:
		return -1;
	}

	XnFloat fRelativePosition = (fMajorAxisPosition - m_fMinOutputMajorAxisPosition)
		/ (m_fMaxOutputMajorAxisPosition - m_fMinOutputMajorAxisPosition);
	XnFloat fTempOutput = m_fOutputMinimum + ((m_fOutputMaximum - m_fOutputMinimum) * fRelativePosition);

	return XN_MAX(m_fOutputMinimum, XN_MIN(m_fOutputMaximum, fTempOutput));
} // XnVSlider1D::ValueAtPosition
Esempio n. 16
0
//---------------------------------------------------------------------------
// Code
//---------------------------------------------------------------------------
XnBool XN_CALLBACK_TYPE XnDeviceSensorProtocolUsbEpCb(XnUChar* pBuffer, XnUInt32 nBufferSize, void* pCallbackData)
{
	XN_PROFILING_START_MT_SECTION("XnDeviceSensorProtocolUsbEpCb");

	XnUInt32 nReadBytes;
	XnUInt16 nMagic;

	XnSpecificUsbDevice* pDevice = (XnSpecificUsbDevice*)pCallbackData;
	XnDevicePrivateData* pDevicePrivateData = pDevice->pDevicePrivateData;
	XnUChar* pBufEnd = pBuffer + nBufferSize;

	XnSpecificUsbDeviceState* pCurrState = &pDevice->CurrState;

	while (pBuffer < pBufEnd)
	{
		switch (pCurrState->State)
		{
		case XN_WAITING_FOR_CONFIGURATION:
			if (pDevicePrivateData->bIgnoreDataPackets)
			{
				// ignore this packet
				xnLogVerbose(XN_MASK_SENSOR_PROTOCOL, "ignoring %d bytes - device requested to ignore!", nBufferSize);
				pBuffer = pBufEnd;
			}
			else
			{
				pCurrState->State = XN_IGNORING_GARBAGE;
				pCurrState->nMissingBytesInState = pDevice->nIgnoreBytes;
			}
			break;

		case XN_IGNORING_GARBAGE:
			// ignore first bytes on this endpoint. NOTE: due to a bug in the firmware, the first data received
			// on each endpoint is corrupt, causing wrong timestamp calculation, causing future (true) timestamps
			// to be calculated wrongly. By ignoring the first data received on each endpoint we hope to get
			// only valid data.
			nReadBytes = XN_MIN((XnUInt32)(pBufEnd - pBuffer), pCurrState->nMissingBytesInState);

			if (nReadBytes > 0)
			{
				xnLogVerbose(XN_MASK_SENSOR_PROTOCOL, "ignoring %d bytes - ignore garbage phase!", nReadBytes);
				pCurrState->nMissingBytesInState -= nReadBytes;
				pBuffer += nReadBytes;
			}

			if (pCurrState->nMissingBytesInState == 0)
			{
				pCurrState->State = XN_LOOKING_FOR_MAGIC;
				pCurrState->nMissingBytesInState = sizeof(XnUInt16);
			}
			break;

		case XN_LOOKING_FOR_MAGIC:
			nMagic = XN_PREPARE_VAR16_IN_BUFFER(pDevicePrivateData->FWInfo.nFWMagic);

			if (pCurrState->nMissingBytesInState == sizeof(XnUInt8) && // first byte already found
				pBuffer[0] == ((XnUInt8*)&nMagic)[1])	// we have here second byte
			{
				// move to next byte
				pBuffer++;

				// move to next state
				pCurrState->CurrHeader.nMagic = nMagic;
				pCurrState->State = XN_PACKET_HEADER;
				pCurrState->nMissingBytesInState = sizeof(XnSensorProtocolResponseHeader);
				break;
			}

			while (pBuffer < pBufEnd)
			{
				if (nMagic == *(XnUInt16*)(pBuffer))
				{
					pCurrState->CurrHeader.nMagic = nMagic;
					pCurrState->State = XN_PACKET_HEADER;
					pCurrState->nMissingBytesInState = sizeof(XnSensorProtocolResponseHeader);
					break;
				}
				else
				{
					pBuffer++;
				}
			}

			if (pBuffer == pBufEnd &&					// magic wasn't found
				pBuffer[-1] == ((XnUInt8*)&nMagic)[0])	// last byte in buffer is first in magic
			{
				// mark that we found first one
				pCurrState->nMissingBytesInState--;
			}

			break;

		case XN_PACKET_HEADER:
			nReadBytes = XN_MIN((XnUInt32)(pBufEnd - pBuffer), pCurrState->nMissingBytesInState);
			xnOSMemCopy((XnUChar*)&pCurrState->CurrHeader + sizeof(XnSensorProtocolResponseHeader) - pCurrState->nMissingBytesInState, 
				pBuffer, nReadBytes);
			pCurrState->nMissingBytesInState -= nReadBytes;
			pBuffer += nReadBytes;

			if (pCurrState->nMissingBytesInState == 0)
			{
				// we have entire header. Fix it
				pCurrState->CurrHeader.nBufSize = XN_PREPARE_VAR16_IN_BUFFER(pCurrState->CurrHeader.nBufSize);
				pCurrState->CurrHeader.nMagic = XN_PREPARE_VAR16_IN_BUFFER(pCurrState->CurrHeader.nMagic);
				pCurrState->CurrHeader.nPacketID = XN_PREPARE_VAR16_IN_BUFFER(pCurrState->CurrHeader.nPacketID);
				pCurrState->CurrHeader.nTimeStamp = XN_PREPARE_VAR32_IN_BUFFER(pCurrState->CurrHeader.nTimeStamp);
				pCurrState->CurrHeader.nType = XN_PREPARE_VAR16_IN_BUFFER(pCurrState->CurrHeader.nType);
				pCurrState->CurrHeader.nBufSize = xnOSEndianSwapUINT16(pCurrState->CurrHeader.nBufSize);
				pCurrState->CurrHeader.nBufSize -= sizeof(XnSensorProtocolResponseHeader);

				pCurrState->State = XN_PACKET_DATA;
				pCurrState->nMissingBytesInState = pCurrState->CurrHeader.nBufSize;
			}
			break;

		case XN_PACKET_DATA:
			nReadBytes = XN_MIN((XnUInt32)(pBufEnd - pBuffer), pCurrState->nMissingBytesInState);
			pDevicePrivateData->pSensor->GetFirmware()->GetStreams()->ProcessPacketChunk(&pCurrState->CurrHeader, pBuffer, pCurrState->CurrHeader.nBufSize - pCurrState->nMissingBytesInState, nReadBytes);
			pBuffer += nReadBytes;
			pCurrState->nMissingBytesInState -= nReadBytes;

			if (pCurrState->nMissingBytesInState == 0)
			{
				pCurrState->State = XN_LOOKING_FOR_MAGIC;
				pCurrState->nMissingBytesInState = sizeof(XnUInt16);
			}
			break;
		}
	}

	XN_PROFILING_END_SECTION;

	return TRUE;
}
Esempio n. 17
0
void drawDepth(IntRect* pLocation, IntPair* pPointer)
{
	if (g_DrawConfig.Streams.Depth.Coloring != DEPTH_OFF)
	{
		if (!isDepthOn())
		{
			drawClosedStream(pLocation, "Depth");
			return;
		}

		const DepthMetaData* pDepthMD = getDepthMetaData();
		const XnDepthPixel* pDepth = pDepthMD->Data();
		XN_ASSERT(pDepth);
		
		if (pDepthMD->FrameID() == 0)
		{
			return;
		}

		if (g_DrawConfig.Streams.Depth.Coloring == STANDARD_DEVIATION)
		{
			XnPixelStatistics* pStatistics = g_PixelStatistics;

			for (XnUInt16 nY = pDepthMD->YOffset(); nY < pDepthMD->YRes() + pDepthMD->YOffset(); nY++)
			{
				XnUInt8* pTexture = TextureMapGetLine(&g_texDepth, nY) + pDepthMD->XOffset()*4;
				for (XnUInt16 nX = 0; nX < pDepthMD->XRes(); nX++, pTexture+=4, pStatistics++)
				{
					pTexture[0] = pTexture[1] = XN_MIN((int)pStatistics->dStdDev, 255);
					pTexture[2] = 0;
					pTexture[3] = g_DrawConfig.Streams.Depth.fTransparency*255;
				}
			}
		}
		else
		{
			// copy depth into texture-map
			for (XnUInt16 nY = pDepthMD->YOffset(); nY < pDepthMD->YRes() + pDepthMD->YOffset(); nY++)
			{
				XnUInt8* pTexture = TextureMapGetLine(&g_texDepth, nY) + pDepthMD->XOffset()*4;
				for (XnUInt16 nX = 0; nX < pDepthMD->XRes(); nX++, pDepth++, pTexture+=4)
				{
					XnUInt8 nRed = 0;
					XnUInt8 nGreen = 0;
					XnUInt8 nBlue = 0;
					XnUInt8 nAlpha = g_DrawConfig.Streams.Depth.fTransparency*255;

					XnUInt16 nColIndex;

					switch (g_DrawConfig.Streams.Depth.Coloring)
					{
					case LINEAR_HISTOGRAM:
						nRed = nGreen = g_pDepthHist[*pDepth]*255;
						break;
					case PSYCHEDELIC_SHADES:
						nAlpha *= (((XnFloat)(*pDepth % 10) / 20) + 0.5);
					case PSYCHEDELIC:

						switch ((*pDepth/10) % 10)
						{
						case 0:
							nRed = 255;
							break;
						case 1:
							nGreen = 255;
							break;
						case 2:
							nBlue = 255;
							break;
						case 3:
							nRed = 255;
							nGreen = 255;
							break;
						case 4:
							nGreen = 255;
							nBlue = 255;
							break;
						case 5:
							nRed = 255;
							nBlue = 255;
							break;
						case 6:
							nRed = 255;
							nGreen = 255;
							nBlue = 255;
							break;
						case 7:
							nRed = 127;
							nBlue = 255;
							break;
						case 8:
							nRed = 255;
							nBlue = 127;
							break;
						case 9:
							nRed = 127;
							nGreen = 255;
							break;
						}
						break;
					case RAINBOW:
						nColIndex = (XnUInt16)((*pDepth / (g_nMaxDepth / 256.)));
						nRed = PalletIntsR[nColIndex];
						nGreen = PalletIntsG[nColIndex];
						nBlue = PalletIntsB[nColIndex];
						break;
					case CYCLIC_RAINBOW:
						nColIndex = (*pDepth % 256);
						nRed = PalletIntsR[nColIndex];
						nGreen = PalletIntsG[nColIndex];
						nBlue = PalletIntsB[nColIndex];
						break;
					case CYCLIC_RAINBOW_HISTOGRAM:
						float fHist = g_pDepthHist[*pDepth];
						nColIndex = (*pDepth % 256);
						nRed = PalletIntsR[nColIndex]   * fHist;
						nGreen = PalletIntsG[nColIndex] * fHist;
						nBlue = PalletIntsB[nColIndex]  * fHist;
						break;
					}

					pTexture[0] = nRed;
					pTexture[1] = nGreen;
					pTexture[2] = nBlue;

					if (*pDepth == 0)
						pTexture[3] = 0;
					else
						pTexture[3] = nAlpha;
				}
			}
		} // not STANDRARD_DEVIATION

		if (pPointer != NULL)
		{
			TextureMapDrawCursor(&g_texDepth, *pPointer);
		}

		TextureMapUpdate(&g_texDepth);
		TextureMapDraw(&g_texDepth, pLocation);
	}
}
Esempio n. 18
0
XnStatus XnSensorAudioStream::ReallocBuffer()
{
	XnStatus nRetVal = XN_STATUS_OK;

	XnDevicePrivateData* pDevicePrivateData = m_Helper.GetPrivateData();

	if (m_hSharedMemory == NULL)
	{
		// first time, create shared memory

		// we allocate enough for 5 seconds of audio
		XnUInt32 nSampleSize = 2 * 2; // 16-bit per channel (2 bytes) * max number of channels (2)
		XnUInt32 nSamples = 48000 * 5; // max sample rate * number of seconds

		XnUInt32 nMaxBufferSize = nSamples * nSampleSize;

		// find min packet size (so we'll have max packet count)
		XnUInt32 nMinPacketSize = XN_MIN(XN_SENSOR_PROTOCOL_AUDIO_PACKET_SIZE_BULK, XN_SENSOR_PROTOCOL_AUDIO_PACKET_SIZE_ISO);
		XnUInt32 nMaxPacketCount = nMaxBufferSize / nMinPacketSize - 1;

		XnUInt32 nSharedBufferSize = 
			sizeof(XnAudioSharedBuffer) + // header
			sizeof(XnUInt64) * nMaxPacketCount + // packet timestamps
			nMaxBufferSize;

		// to make the name unique, we'll add process ID
		XN_PROCESS_ID procID;
		xnOSGetCurrentProcessID(&procID);
		XnChar strSharedName[XN_DEVICE_MAX_STRING_LENGTH];
		sprintf(strSharedName, "%u_%s_%s", procID, m_strDeviceName, GetName());

		nRetVal = m_SharedBufferName.UnsafeUpdateValue(strSharedName);
		XN_IS_STATUS_OK(nRetVal);

		nRetVal = RequiredSizeProperty().UnsafeUpdateValue(nMaxBufferSize);
		XN_IS_STATUS_OK(nRetVal);

		nRetVal = xnOSCreateSharedMemory(strSharedName, nSharedBufferSize, XN_OS_FILE_READ | XN_OS_FILE_WRITE, &m_hSharedMemory);
		XN_IS_STATUS_OK(nRetVal);

		XnUChar* pAddress;
		nRetVal = xnOSSharedMemoryGetAddress(m_hSharedMemory, (void**)&pAddress);
		XN_IS_STATUS_OK(nRetVal);

		m_pSharedHeader = (XnAudioSharedBuffer*)pAddress;
		pDevicePrivateData->pAudioPacketsTimestamps = (XnUInt64*)(pAddress + sizeof(XnAudioSharedBuffer));
		pDevicePrivateData->pAudioBuffer = (XN_AUDIO_TYPE*)(pAddress + sizeof(XnAudioSharedBuffer) + sizeof(XnUInt64) * nMaxPacketCount);
		pDevicePrivateData->nAudioBufferSize = nMaxBufferSize;

		m_pSharedHeader->nTimestampsListOffset = sizeof(XnAudioSharedBuffer);
		m_pSharedHeader->nBufferOffset = pDevicePrivateData->pAudioBuffer - pAddress;
	}

	// calculate current packet size
	pDevicePrivateData->nAudioPacketSize = m_nOrigAudioPacketSize;

	if (m_Helper.GetFirmwareVersion() >= XN_SENSOR_FW_VER_5_2 && GetNumberOfChannels() == 1)
	{
		pDevicePrivateData->nAudioPacketSize /= 2;
	}

	pDevicePrivateData->nAudioBufferNumOfPackets = pDevicePrivateData->nAudioBufferSize / pDevicePrivateData->nAudioPacketSize;
	pDevicePrivateData->nAudioBufferSize = pDevicePrivateData->nAudioBufferNumOfPackets * pDevicePrivateData->nAudioPacketSize;

	m_pSharedHeader->nPacketCount = pDevicePrivateData->nAudioBufferNumOfPackets;
	m_pSharedHeader->nPacketSize = pDevicePrivateData->nAudioPacketSize;

	// set read and write indices
	pDevicePrivateData->nAudioReadIndex = 0;
	pDevicePrivateData->nAudioWriteIndex = 0;

	return (XN_STATUS_OK);
}
Esempio n. 19
0
	void Mainloop()
	{
		int frameId = 1;
		int xdir = -3;
		int ydir = 1;
		struct {int x, y;} center = {160,120};
		while (m_running)
		{
			xnOSSleep(33);
			//			printf("Tick");
			OniFrame* pFrame = getServices().acquireFrame();

			if (pFrame == NULL) {printf("Didn't get frame...\n"); continue;}

			// Fill frame
			xnOSMemSet(pFrame->data, 0, pFrame->dataSize);

			OniRGB888Pixel* pImage = (OniRGB888Pixel*)pFrame->data;


			for (int y = XN_MAX(center.y-10, 0); y < XN_MIN(center.y+10, OZ_RESOLUTION_Y); ++y)
				for (int x = XN_MAX(center.x-10, 0); x < XN_MIN(center.x+10, OZ_RESOLUTION_X); ++x)
					if ((x-center.x)*(x-center.x)+(y-center.y)*(y-center.y) < 70)
				{
					pImage[singleRes(x, y)].r = (char)(255*(x/(double)OZ_RESOLUTION_X));
					pImage[singleRes(x, y)].g = (char)(255*(y/(double)OZ_RESOLUTION_Y));
					pImage[singleRes(x, y)].b = (char)(255*((OZ_RESOLUTION_X-x)/(double)OZ_RESOLUTION_X));
				}
//			pImage[singleRes(center.x, center.y)].r = 255;

			center.x += xdir;
			center.y += ydir;

			if (center.x < abs(xdir) || center.x > OZ_RESOLUTION_X-1-abs(xdir)) xdir*=-1;
			if (center.y < abs(ydir) || center.y > OZ_RESOLUTION_Y-1-abs(ydir)) ydir*=-1;



			pImage[0].b = (unsigned char)255;

			// 			for (int y = 0; y < OZ_RESOLUTION_Y; ++y)
			// 			{
			// 				pDepth[y*OZ_RESOLUTION_X+(OZ_RESOLUTION_Y-y)] = pDepth[y*OZ_RESOLUTION_X+(y)] = 500+y;
			// 			}

			// Fill metadata
			pFrame->frameIndex = frameId;

			pFrame->videoMode.pixelFormat = ONI_PIXEL_FORMAT_RGB888;
			pFrame->videoMode.resolutionX = OZ_RESOLUTION_X;
			pFrame->videoMode.resolutionY = OZ_RESOLUTION_Y;
			pFrame->videoMode.fps = 30;

			pFrame->width = OZ_RESOLUTION_X;
			pFrame->height = OZ_RESOLUTION_Y;

			pFrame->cropOriginX = pFrame->cropOriginY = 0;
			pFrame->croppingEnabled = FALSE;

			pFrame->sensorType = ONI_SENSOR_COLOR;
			pFrame->stride = OZ_RESOLUTION_X*3;
			pFrame->timestamp = frameId*33000;

			raiseNewFrame(pFrame);
			getServices().releaseFrame(pFrame);

			frameId++;
		}
	}
XnStatus XnPacked11DepthProcessor::Unpack11to16(const XnUInt8* pcInput, const XnUInt32 nInputSize, XnUInt32* pnActualRead)
{
	const XnUInt8* pOrigInput = pcInput;

	XnUInt32 nElements = nInputSize / XN_INPUT_ELEMENT_SIZE; // floored
	XnUInt32 nNeededOutput = nElements * XN_OUTPUT_ELEMENT_SIZE;

	*pnActualRead = 0;
	XnBuffer* pWriteBuffer = GetWriteBuffer();

	// Check there is enough room for the depth pixels
	if (!CheckWriteBufferForOverflow(nNeededOutput))
	{
		return XN_STATUS_OUTPUT_BUFFER_OVERFLOW;
	}

	XnUInt16* pnOutput = (XnUInt16*)pWriteBuffer->GetUnsafeWritePointer();

	XnUInt16 a0,a1,a2,a3,a4,a5,a6,a7;
#ifdef XN_NEON
	XnUInt16 depth[8];
	uint16x8_t Q0;
#endif

	// Convert the 11bit packed data into 16bit shorts
	for (XnUInt32 nElem = 0; nElem < nElements; ++nElem)
	{
    if(m_nScaleFactor > 1)
    {
      XnUInt32 px = m_nOffsetInFrame%m_CurrentVideoMode.resolutionX;
      XnUInt32 py = (m_nOffsetInFrame)/m_CurrentVideoMode.resolutionX;

      if(py%m_nScaleFactor != 0)
      {
        // Skip as many pixels as possible
        XnUInt32 nEltsToSkip =
            XN_MIN(nElements - nElem,
                   (m_CurrentVideoMode.resolutionX - px)/8
                   + (m_nScaleFactor-(py%m_nScaleFactor) - 1)*m_CurrentVideoMode.resolutionX/8);

        //      ::memset(pnOutput, 0, nEltsToSkip*8*sizeof(XnUInt16));
        pcInput += nEltsToSkip*XN_INPUT_ELEMENT_SIZE;
        pnOutput += nEltsToSkip*8;
        m_nOffsetInFrame += nEltsToSkip*8;
        nElem += (nEltsToSkip-1);
        continue;
      }
    }

    // input:  0,  1,  2,3,  4,  5,  6,7,  8,  9,10
    //         -,---,---,-,---,---,---,-,---,---,-
    // bits:   8,3,5,6,2,8,1,7,4,4,7,1,8,2,6,5,3,8
    //         ---,---,-----,---,---,-----,---,---
    // output:   0,  1,    2,  3,  4,    5,  6,  7
    if(m_nScaleFactor == 2)
    {
      a0 = (XN_TAKE_BITS(pcInput[0],8,0) << 3) | XN_TAKE_BITS(pcInput[1],3,5);
      a2 = (XN_TAKE_BITS(pcInput[2],2,0) << 9) | (XN_TAKE_BITS(pcInput[3],8,0) << 1) | XN_TAKE_BITS(pcInput[4],1,7);
      a4 = (XN_TAKE_BITS(pcInput[5],4,0) << 7) | XN_TAKE_BITS(pcInput[6],7,1);
      a6 = (XN_TAKE_BITS(pcInput[8],6,0) << 5) | XN_TAKE_BITS(pcInput[9],5,3);
    }
    else if(m_nScaleFactor == 4)
    {
      a0 = (XN_TAKE_BITS(pcInput[0],8,0) << 3) | XN_TAKE_BITS(pcInput[1],3,5);
      a4 = (XN_TAKE_BITS(pcInput[5],4,0) << 7) | XN_TAKE_BITS(pcInput[6],7,1);
    }
    else
    {
      a0 = (XN_TAKE_BITS(pcInput[0],8,0) << 3) | XN_TAKE_BITS(pcInput[1],3,5);
      a1 = (XN_TAKE_BITS(pcInput[1],5,0) << 6) | XN_TAKE_BITS(pcInput[2],6,2);
      a2 = (XN_TAKE_BITS(pcInput[2],2,0) << 9) | (XN_TAKE_BITS(pcInput[3],8,0) << 1) | XN_TAKE_BITS(pcInput[4],1,7);
      a3 = (XN_TAKE_BITS(pcInput[4],7,0) << 4) | XN_TAKE_BITS(pcInput[5],4,4);
      a4 = (XN_TAKE_BITS(pcInput[5],4,0) << 7) | XN_TAKE_BITS(pcInput[6],7,1);
      a5 = (XN_TAKE_BITS(pcInput[6],1,0) << 10) | (XN_TAKE_BITS(pcInput[7],8,0) << 2) | XN_TAKE_BITS(pcInput[8],2,6);
      a6 = (XN_TAKE_BITS(pcInput[8],6,0) << 5) | XN_TAKE_BITS(pcInput[9],5,3);
      a7 = (XN_TAKE_BITS(pcInput[9],3,0) << 8) | XN_TAKE_BITS(pcInput[10],8,0);
    }



#ifdef XN_NEON
		depth[0] = GetOutput(a0);
		depth[1] = GetOutput(a1);
		depth[2] = GetOutput(a2);
		depth[3] = GetOutput(a3);
		depth[4] = GetOutput(a4);
		depth[5] = GetOutput(a5);
		depth[6] = GetOutput(a6);
		depth[7] = GetOutput(a7);

		// Load
		Q0 = vld1q_u16(depth);
		// Store
		vst1q_u16(pnOutput, Q0);
#else
    if(m_nScaleFactor == 2)
    {
      *pnOutput++ = GetOutput(a0);
      *pnOutput++ = 0;
      *pnOutput++ = GetOutput(a2);
      *pnOutput++ = 0;
      *pnOutput++ = GetOutput(a4);
      *pnOutput++ = 0;
      *pnOutput++ = GetOutput(a6);
      *pnOutput++ = 0;
    }
    else if(m_nScaleFactor == 4)
    {
      *pnOutput++ = GetOutput(a0);
      *pnOutput++ = 0;
      *pnOutput++ = 0;
      *pnOutput++ = 0;
      *pnOutput++ = GetOutput(a4);
      *pnOutput++ = 0;
      *pnOutput++ = 0;
      *pnOutput++ = 0;
    }
    else
    {
      *pnOutput++ = GetOutput(a0);
      *pnOutput++ = GetOutput(a1);
      *pnOutput++ = GetOutput(a2);
      *pnOutput++ = GetOutput(a3);
      *pnOutput++ = GetOutput(a4);
      *pnOutput++ = GetOutput(a5);
      *pnOutput++ = GetOutput(a6);
      *pnOutput++ = GetOutput(a7);
    }
#endif

		pcInput += XN_INPUT_ELEMENT_SIZE;
    m_nOffsetInFrame+=8;
	}

	*pnActualRead = (XnUInt32)(pcInput - pOrigInput);
	pWriteBuffer->UnsafeUpdateSize(nNeededOutput);

	return XN_STATUS_OK;
}
Esempio n. 21
0
XnStatus XnShiftToDepthUpdate(XnShiftToDepthTables* pShiftToDepth, const XnShiftToDepthConfig* pConfig)
{
	XN_VALIDATE_INPUT_PTR(pShiftToDepth);
	XN_VALIDATE_INPUT_PTR(pConfig);

	xnLogVerbose("S2D", "S2D table ID to be used: %d", pConfig->nCustomS2DTableID);
	
	// check max shift wasn't changed (if so, memory should be re-allocated)
	if (pConfig->nDeviceMaxShiftValue > pShiftToDepth->nShiftsCount)
		return XN_STATUS_DEVICE_INVALID_MAX_SHIFT;

	// check max depth wasn't changed (if so, memory should be re-allocated)
	if (pConfig->nDeviceMaxDepthValue > pShiftToDepth->nDepthsCount)
		return XN_STATUS_DEVICE_INVALID_MAX_DEPTH;

	XnUInt32 nIndex = 0;
	XnInt16  nShiftValue = 0;
	XnDouble dFixedRefX = 0;
	XnDouble dMetric = 0;
	XnDouble dDepth = 0;
	XnDouble dPlanePixelSize = pConfig->fZeroPlanePixelSize;
	XnDouble dPlaneDsr = pConfig->nZeroPlaneDistance;
	XnDouble dPlaneDcl = pConfig->fEmitterDCmosDistance;
	XnInt32 nConstShift = pConfig->nParamCoeff * pConfig->nConstShift;

	dPlanePixelSize *= pConfig->nPixelSizeFactor;
	nConstShift /= pConfig->nPixelSizeFactor;

	OniDepthPixel* pShiftToDepthTable = pShiftToDepth->pShiftToDepthTable;
	XnUInt16* pDepthToShiftTable = pShiftToDepth->pDepthToShiftTable;

	xnOSMemSet(pShiftToDepthTable, 0, pShiftToDepth->nShiftsCount * sizeof(OniDepthPixel));
	xnOSMemSet(pDepthToShiftTable, 0, pShiftToDepth->nDepthsCount * sizeof(XnUInt16));

	XnUInt16 nLastDepth = 0;
	XnUInt16 nLastIndex = 0;

	XnUInt32 nMaxDepth = XN_MIN(pConfig->nDeviceMaxDepthValue, pConfig->nDepthMaxCutOff);

	std::ostringstream os;
	os << "/etc/openni2/PS1080-" << pConfig->nCustomS2DTableID << ".csv";
	std::string S2D_file_name = os.str();
	
	xnLogInfo("S2D", "Trying file %s", S2D_file_name.c_str());
	if(access( S2D_file_name.c_str(), F_OK ) != -1){
	  xnLogInfo("S2D", "Using S2D file %s", S2D_file_name.c_str());
	  
	  std::ifstream infile;
          infile.open (S2D_file_name.c_str(), std::ifstream::in);
	  std::string line;
          while (std::getline(infile, line)){       
		std::stringstream  lineStream(line);
		std::string        cell;
		std::string val;
		std::getline(lineStream,cell,';');
		std::getline(lineStream,val,';');
		nIndex = atoi(cell.c_str());
		dDepth = atoi(val.c_str());
		pShiftToDepthTable[nIndex] = dDepth;
		for (XnUInt16 i = nLastDepth; i < dDepth; i++)
		{
			pDepthToShiftTable[i] = nLastIndex;	
			
		}

			nLastIndex = (XnUInt16)nIndex;
			nLastDepth = (XnUInt16)dDepth;
          }
	}else{
	
	
	for (nIndex = 1; nIndex < pConfig->nDeviceMaxShiftValue; nIndex++)
	{
		nShiftValue = (XnInt16)nIndex;
		dFixedRefX = (XnDouble)(nShiftValue - nConstShift) / (XnDouble)pConfig->nParamCoeff;
		dFixedRefX -= 0.375;
		dMetric = dFixedRefX * dPlanePixelSize;
		dDepth = pConfig->nShiftScale * ((dMetric * dPlaneDsr / (dPlaneDcl - dMetric)) + dPlaneDsr);
		
		// check cut-offs
		if ((dDepth > pConfig->nDepthMinCutOff) && (dDepth < nMaxDepth))
		{
			pShiftToDepthTable[nIndex] = (XnUInt16)dDepth;
			
			for (XnUInt16 i = nLastDepth; i < dDepth; i++)
			{
				pDepthToShiftTable[i] = nLastIndex;
				
			}

			nLastIndex = (XnUInt16)nIndex;
			nLastDepth = (XnUInt16)dDepth;
		}
	}
	}
	
	for (XnUInt16 i = nLastDepth; i <= pConfig->nDeviceMaxDepthValue; i++)
		pDepthToShiftTable[i] = nLastIndex;
	

	return XN_STATUS_OK;
}
XnStatus PlayerImpl::SetNodeNewData(const XnChar* strNodeName, XnUInt64 nTimeStamp, XnUInt32 nFrame, const void* pData, XnUInt32 nSize)
{
	XnStatus nRetVal = XN_STATUS_OK;

	XnUInt64 nNow;
	xnOSGetHighResTimeStamp(&nNow);

	if (!m_bHasTimeReference)
	{
		m_nStartTimestamp = nTimeStamp;
		m_nStartTime = nNow;

		m_bHasTimeReference = TRUE;
	}
	else if (m_dPlaybackSpeed != XN_PLAYBACK_SPEED_FASTEST)
	{
		// check this data timestamp compared to when we started
		XnInt64 nTimestampDiff = nTimeStamp - m_nStartTimestamp;
		
		// in some recordings, frames are not ordered by timestamp. Make sure this does not break the mechanism
		if (nTimestampDiff > 0)
		{
			XnInt64 nTimeDiff = nNow - m_nStartTime;

			// check if we need to wait some time
			XnInt64 nRequestedTimeDiff = (XnInt64)(nTimestampDiff / m_dPlaybackSpeed);
			if (nTimeDiff < nRequestedTimeDiff)
			{
				XnUInt32 nSleep = XnUInt32((nRequestedTimeDiff - nTimeDiff)/1000);
				nSleep = XN_MIN(nSleep, XN_PLAYBACK_SPEED_SANITY_SLEEP);
				xnOSSleep(nSleep);
			}

			// update reference to current frame (this will handle cases in which application
			// stopped reading frames and continued after a while)
			m_nStartTimestamp = nTimeStamp;
			xnOSGetHighResTimeStamp(&m_nStartTime);
		}
	}

	PlayedNodeInfo playedNode;
	nRetVal = m_playedNodes.Get(strNodeName, playedNode);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = xnLockedNodeStartChanges(playedNode.hNode, playedNode.hLock);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = xnSetIntProperty(playedNode.hNode, XN_PROP_TIMESTAMP, nTimeStamp);
	if (nRetVal != XN_STATUS_OK)
	{
		xnLockedNodeEndChanges(playedNode.hNode, playedNode.hLock);
		return (nRetVal);
	}
	nRetVal = xnSetIntProperty(playedNode.hNode, XN_PROP_FRAME_ID, nFrame);
	if (nRetVal != XN_STATUS_OK)
	{
		xnLockedNodeEndChanges(playedNode.hNode, playedNode.hLock);
		return (nRetVal);
	}
	nRetVal = xnSetGeneralProperty(playedNode.hNode, XN_PROP_NEWDATA, nSize, pData);
	if (nRetVal != XN_STATUS_OK)
	{
		xnLockedNodeEndChanges(playedNode.hNode, playedNode.hLock);
		return (nRetVal);
	}

	nRetVal = xnLockedNodeEndChanges(playedNode.hNode, playedNode.hLock);
	XN_IS_STATUS_OK(nRetVal);

	return XN_STATUS_OK;
}
Esempio n. 23
0
XnStatus XnVSlider1D::Update(const XnPoint3D& pt, XnFloat fTime, XnBool bCheckOffAxis)
{
	if (m_pPointBuffer == NULL)
	{
		return XN_STATUS_NOT_INIT;
	}

	m_pPointBuffer->AddPoint(pt, fTime);

	if (bCheckOffAxis)
	{
		XnVDirection eDir = CheckForOffAxisMovement(pt, fTime);

		if (eDir != DIRECTION_ILLEGAL)
		{
			// Set position and value to "Last Valid"
			// TODO: Do in a non-crappy way
			m_ptCurrentPosition = m_pPointBuffer->GetAveragePointByTime(m_nOffAxisDetectionTime, fTime, 1);
			m_fCurrentOutput = ValueAtPosition(m_ptCurrentPosition);

			OffAxisMovement(eDir);

			// don't update values
			return XN_STATUS_OK;
		}
	}
	m_ptCurrentPosition = pt;

	// calculate the current value
	// normalize the on-axis movement according to this scale:

	//TODO: Off-axis motion detection

	XnFloat fMajorAxisPosition;
	switch (m_eAxis)
	{
	case AXIS_X:
		fMajorAxisPosition = pt.X; break;
	case AXIS_Y:
		fMajorAxisPosition = pt.Y; break;
	case AXIS_Z:
		fMajorAxisPosition = pt.Z; break;
	default:
		return XN_STATUS_NITE_UNEXPECTED_DIRECTION;
	}

	XnFloat fRelativePosition = (fMajorAxisPosition - m_fMinOutputMajorAxisPosition)
		/ (m_fMaxOutputMajorAxisPosition - m_fMinOutputMajorAxisPosition);
	XnFloat fTempOutput = m_fOutputMinimum + ((m_fOutputMaximum - m_fOutputMinimum) * fRelativePosition);

	XnFloat fPreviousOutput = m_fCurrentOutput;

	// Handle dragging if enabled
	if ((fMajorAxisPosition > m_fMaxOutputMajorAxisPosition) && m_bIsDraggable)
	{
		XnFloat fPositionDelta = fMajorAxisPosition - m_fMaxOutputMajorAxisPosition;
		m_fMaxOutputMajorAxisPosition = fMajorAxisPosition;
		m_fMinOutputMajorAxisPosition += fPositionDelta;
	}
	else
	{
		if ((fMajorAxisPosition < m_fMinOutputMajorAxisPosition) && m_bIsDraggable)
		{
			XnFloat fPositionDelta = m_fMinOutputMajorAxisPosition - fMajorAxisPosition;
			m_fMaxOutputMajorAxisPosition = fMajorAxisPosition;
			m_fMaxOutputMajorAxisPosition -= fPositionDelta;
		}
	}

	m_fCurrentOutput = XN_MAX(m_fOutputMinimum, XN_MIN(m_fOutputMaximum, fTempOutput));

	ValueChange(m_fCurrentOutput);

	return XN_STATUS_OK;
} // XnVSlider1D::Update