コード例 #1
0
ファイル: Audio.cpp プロジェクト: jgollub/MetaImagerProj
void CALLBACK audioCallback(HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2)
{
	if (uMsg == WOM_DONE)
	{
		WAVEHDR* pHeader = (WAVEHDR*)dwParam1;
		XnUInt32 nIndex = pHeader->dwUser;

		xnDumpWriteString(g_AudioData.SyncDump, "Done playing index %d.", nIndex);

		if (g_AudioData.nFirstToCheck == -1 || g_AudioData.nFirstToCheck == nIndex)
		{
			g_AudioData.nFirstToCheck = -1;

			// get the timestamp of the packet just done playing
			XnUInt64 nPlayedTimestamp = g_AudioData.pAudioTimestamps[nIndex];

			// check how much time is still queued
			XnUInt32 nLastQueuedIndex = (g_AudioData.nAudioNextBuffer + NUMBER_OF_AUDIO_BUFFERS - 1) % NUMBER_OF_AUDIO_BUFFERS;
			XnUInt64 nLastQueuedTimestamp = g_AudioData.pAudioTimestamps[nLastQueuedIndex];

			xnDumpWriteString(g_AudioData.SyncDump, " %f ms in queue.", (nLastQueuedTimestamp - nPlayedTimestamp) / 1e3);

			if (nLastQueuedTimestamp - nPlayedTimestamp > AUDIO_LATENCY_THRESHOLD)
			{
				g_AudioData.bFlush = true;
				xnDumpWriteString(g_AudioData.SyncDump, " Will flush queue.\n");
			}
			else
				xnDumpWriteString(g_AudioData.SyncDump, "\n");
		}
		else
			xnDumpWriteString(g_AudioData.SyncDump, "\n");
	}
}
コード例 #2
0
void XnFrameStreamProcessor::OnEndOfFrame(const XnSensorProtocolResponseHeader* pHeader)
{
	// write dump
	XnBuffer* pCurWriteBuffer = m_pTripleBuffer->GetWriteBuffer();
	xnDumpWriteBuffer(m_InternalDump, pCurWriteBuffer->GetData(), pCurWriteBuffer->GetSize());
	xnDumpClose(&m_InternalDump);
	xnDumpClose(&m_InDump);

	if (!m_bFrameCorrupted)
	{
		// mark the buffer as stable
		XnUInt64 nTimestamp = GetTimeStamp(pHeader->nTimeStamp);
		XnUInt32 nFrameID;
		m_pTripleBuffer->MarkWriteBufferAsStable(nTimestamp, &nFrameID);

		// let inheriting classes do their stuff
		OnFrameReady(nFrameID, nTimestamp);
	}
	else
	{
		// restart
		m_pTripleBuffer->GetWriteBuffer()->Reset();
	}

	// log bandwidth
	XnUInt64 nSysTime;
	xnOSGetTimeStamp(&nSysTime);
	xnDumpWriteString(m_pDevicePrivateData->BandwidthDump, "%llu,%s,%d,%d\n", 
		nSysTime, m_csName, GetCurrentFrameID(), m_nBytesReceived);

	// re-init dumps
	xnDumpInit(&m_InDump, m_csInDumpMask, NULL, "%s_%d.raw", m_csInDumpMask, GetCurrentFrameID());
	xnDumpInit(&m_InternalDump, m_csInternalDumpMask, NULL, "%s_%d.raw", m_csInternalDumpMask, GetCurrentFrameID());
	m_nBytesReceived = 0;
}
コード例 #3
0
void XnDataProcessor::ProcessData(const XnSensorProtocolResponseHeader* pHeader, const XnUChar* pData, XnUInt32 nDataOffset, XnUInt32 nDataSize)
{
	XN_PROFILING_START_SECTION("XnDataProcessor::ProcessData")

	// count these bytes
	m_nBytesReceived += nDataSize;

	// check if we start a new packet
	if (nDataOffset == 0)
	{
		// make sure no packet was lost
		if (pHeader->nReserve != m_nLastPacketID+1 && pHeader->nReserve != 0)
		{
			xnLogWarning(XN_MASK_SENSOR_PROTOCOL, "%s: Expected %x, got %x", m_csName, m_nLastPacketID+1, pHeader->nReserve);
			OnPacketLost();
		}

		m_nLastPacketID = pHeader->nReserve;

		// log packet arrival
		XnUInt64 nNow;
		xnOSGetHighResTimeStamp(&nNow);
		xnDumpWriteString(m_pDevicePrivateData->MiniPacketsDump, "%llu,0x%hx,0x%hx,0x%hx,%u\n", nNow, pHeader->nType, pHeader->nReserve, pHeader->nBufSize, pHeader->nTimeStamp);
	}

	ProcessPacketChunk(pHeader, pData, nDataOffset, nDataSize);

	XN_PROFILING_END_SECTION
}
コード例 #4
0
XnStatus XnBufferPool::GetBuffer(XnBuffer** ppBuffer)
{
	XnStatus nRetVal = XN_STATUS_OK;
	
	xnOSEnterCriticalSection(&m_hLock);

	XnBuffersList::Iterator it = m_FreeBuffers.begin();
	if (it == m_FreeBuffers.end())
	{
		xnOSLeaveCriticalSection(&m_hLock);
		return XN_STATUS_ALLOC_FAILED;
	}

	XnBufferInPool* pBuffer = *it;

	// remove from list
	nRetVal = m_FreeBuffers.Remove(it);
	if (nRetVal != XN_STATUS_OK)
	{
		xnOSLeaveCriticalSection(&m_hLock);
		return XN_STATUS_ALLOC_FAILED;
	}

	pBuffer->m_nRefCount = 1;
	xnDumpWriteString(m_dump, "%u taken from pool\n", pBuffer->m_nID);

	xnOSLeaveCriticalSection(&m_hLock);

	*ppBuffer = pBuffer;
	
	return (XN_STATUS_OK);
}
コード例 #5
0
XnStatus XnSharedMemoryBufferPool::AllocateBuffers()
{
	XnStatus nRetVal = XN_STATUS_OK;
	
	if (m_nBufferSize > m_nMaxBufferSize)
	{
		return XN_STATUS_ALLOC_FAILED;
	}

	if (m_pSharedMemoryAddress != NULL)
	{
		// already allocated. nothing to do here
		return (XN_STATUS_OK);
	}

	// first time. allocate shared memory
	XnUInt32 nTotalSize = m_nMaxBufferSize * m_nBufferCount;
	nRetVal = xnOSCreateSharedMemory(m_strName, nTotalSize, XN_OS_FILE_READ | XN_OS_FILE_WRITE, &m_hSharedMemory);
	XN_IS_STATUS_OK(nRetVal);

	void* pAddress;
	nRetVal = xnOSSharedMemoryGetAddress(m_hSharedMemory, &pAddress);
	if (nRetVal != XN_STATUS_OK)
	{
		xnOSCloseSharedMemory(m_hSharedMemory);
		m_hSharedMemory = NULL;
		return (nRetVal);
	}

	m_pSharedMemoryAddress = (XnUChar*)pAddress;

	// now allocate buffers
	for (XnUInt32 i = 0; i < m_nBufferCount; ++i)
	{
		XnBufferInPool* pBuffer = XN_NEW(XnBufferInPool);
		if (pBuffer == NULL)
		{
			Free();
			return (XN_STATUS_ALLOC_FAILED);
		}

		pBuffer->m_nID = i;

		pBuffer->SetExternalBuffer(m_pSharedMemoryAddress + i*m_nMaxBufferSize, m_nMaxBufferSize);

		xnDumpWriteString(Dump(), "Allocated buffer %u with size %u\n", i, m_nMaxBufferSize);

		// add it to free list
		m_AllBuffers.AddLast(pBuffer);
		m_FreeBuffers.AddLast(pBuffer);
	}

	return (XN_STATUS_OK);
}
コード例 #6
0
void XnBufferPool::DecRef(XnBuffer* pBuffer)
{
	if (pBuffer == NULL)
	{
		return;
	}

	XnBufferInPool* pBufInPool = (XnBufferInPool*)pBuffer;

	xnOSEnterCriticalSection(&m_hLock);

	xnDumpWriteString(m_dump, "%u dec ref (%d)", pBufInPool->m_nID, pBufInPool->m_nRefCount-1);

	if (--pBufInPool->m_nRefCount == 0)
	{
		if (pBufInPool->m_bDestroy)
		{
			// remove it from all buffers pool
			XnBuffersList::ConstIterator it = m_AllBuffers.Find(pBufInPool);
			XN_ASSERT(it != m_AllBuffers.end());
			m_AllBuffers.Remove(it);
			// and free it
			DestroyBuffer(pBufInPool);
			xnDumpWriteString(m_dump, "destroy!\n");
		}
		else
		{
			// return it to free buffers list
			m_FreeBuffers.AddLast(pBufInPool);
			xnDumpWriteString(m_dump, "return to pool!\n");
		}
	}
	else
	{
		xnDumpWriteString(m_dump, "\n");
	}

	xnOSLeaveCriticalSection(&m_hLock);
}
コード例 #7
0
void XnBufferPool::AddRef(XnBuffer* pBuffer)
{
	if (pBuffer == NULL)
	{
		return;
	}

	xnOSEnterCriticalSection(&m_hLock);
	XnBufferInPool* pBufferInPool = (XnBufferInPool*)pBuffer;
	++pBufferInPool->m_nRefCount;

	xnDumpWriteString(m_dump, "%u add ref (%d)\n", pBufferInPool->m_nID, pBufferInPool->m_nRefCount);

	xnOSLeaveCriticalSection(&m_hLock);
}
コード例 #8
0
XnStatus XnBufferPool::ChangeBufferSize(XnUInt32 nBufferSize)
{
	XnStatus nRetVal = XN_STATUS_OK;

	xnDumpWriteString(m_dump, "changing buffer size to %d\n", nBufferSize);

	xnOSEnterCriticalSection(&m_hLock);

	m_nBufferSize = nBufferSize;

	nRetVal = AllocateBuffers();
	if (nRetVal != XN_STATUS_OK)
	{
		xnOSLeaveCriticalSection(&m_hLock);
		return (nRetVal);
	}

	xnOSLeaveCriticalSection(&m_hLock);
	
	return (XN_STATUS_OK);
}
コード例 #9
0
XnBool XnSensor::HasSynchedFrameArrived(const XnChar* strDepthStream, const XnChar* strImageStream)
{
	// find both streams
	XnDeviceStream* pDepth;
	XnDeviceStream* pImage;

	if (XN_STATUS_OK != FindStream(strDepthStream, &pDepth))
		return FALSE;

	if (XN_STATUS_OK != FindStream(strImageStream, &pImage))
		return FALSE;

	XnUInt32 nThreshold = XN_SENSOR_FRAME_SYNC_MAX_DIFF;
	if (IsHighResTimestamps())
		nThreshold *= 1000;

	// wait for both to advance, and time difference to be less than threshold
	XnInt32 nTimestampDiff = XnInt32(pDepth->GetLastTimestamp() - pImage->GetLastTimestamp());

	XnBool bConditionMet = (
		pDepth->IsNewDataAvailable() &&
		pImage->IsNewDataAvailable() &&
		(XnUInt32)abs(nTimestampDiff) <= nThreshold
		);

	if (xnLogIsDumpMaskEnabled(XN_DUMP_FRAME_SYNC))
	{
		XnUInt64 nNow;
		xnOSGetHighResTimeStamp(&nNow);
		xnDumpWriteString(m_FrameSyncDump, "%llu,%u,%llu,%u,%llu,%s\n",
			nNow,
			pDepth->IsNewDataAvailable(),
			pDepth->GetLastTimestamp(),
			pImage->IsNewDataAvailable(),
			pImage->GetLastTimestamp(),
			bConditionMet ? "OK" : "Waiting");
	}

	return bConditionMet;
}
コード例 #10
0
XN_C_API void xnOSLogMemFree(const void* pMemBlock)
{
	if (pMemBlock == NULL)
		return;

	XnMemBlockDataNode* pPrev = NULL;

	XnAutoCSLocker lock(g_hCS);
	XnMemBlockDataNode* pNode = g_allocatedMemory.pFirst;
	while (pNode != NULL)
	{
		if (pNode->Data.pMemBlock == pMemBlock)
		{
			// found. Remove it from the list
			if (pPrev == NULL) // no previous
				g_allocatedMemory.pFirst = pNode->pNext;
			else
				pPrev->pNext = pNode->pNext;

			// if it was last, update last
			if (g_allocatedMemory.pLast == pNode)
				g_allocatedMemory.pLast = pPrev;

			xnDumpWriteString(m_dump, "Free,0x%x\n", pMemBlock);

			// deallocate memory
			xnOSFree(pNode);

			return;
		}

		// move to next
		pPrev = pNode;
		pNode = pNode->pNext;
	}

	// if we got here then we're trying to free a non-allocated memory
	XN_ASSERT(FALSE);
}
コード例 #11
0
ファイル: XnSensorFPS.cpp プロジェクト: marshally/Sensor
void XnSensorFPS::Mark(XnFPSData* pFPS, const XnChar* csName, XnUInt32 nFrameID, XnUInt64 nTS)
{
	if (!xnLogIsEnabled(XN_MASK_SENSOR_FPS, XN_LOG_VERBOSE))
		return;

	XnUInt64 nNow;
	xnOSGetHighResTimeStamp(&nNow);

	xnFPSMarkFrame(pFPS, nNow);

	xnDumpWriteString(m_FramesDump, "%llu,%s,%u,%llu\n", nNow, csName, nFrameID, nTS);

	// get current time in seconds
	nNow /= 1000000;

	if (nNow != m_nLastPrint)
	{
		m_nLastPrint = nNow;
		xnLogVerbose(XN_MASK_SENSOR_FPS, "[FPS] InputFrames - I: %5.2f, D: %5.2f, OutputFrames - I: %5.2f, D: %5.2f",
			xnFPSCalc(&m_InputImage), xnFPSCalc(&m_InputDepth), xnFPSCalc(&m_OutputImage), xnFPSCalc(&m_OutputDepth));
	}
}
コード例 #12
0
ファイル: XnAudioProcessor.cpp プロジェクト: marshally/Sensor
void XnAudioProcessor::ProcessWholePacket(const XnSensorProtocolResponseHeader* pHeader, const XnUChar* pData)
{
	XnInt32 nAvailableBytes = 0;

	xnOSEnterCriticalSection(&m_pDevicePrivateData->hAudioBufferCriticalSection);

	// take write packet
	XnUChar* pWritePacket = m_pDevicePrivateData->pAudioBuffer + (m_pDevicePrivateData->nAudioWriteIndex * m_pDevicePrivateData->nAudioPacketSize);

	if (m_bDeleteChannel)
	{
		XnUInt16* pSamples = (XnUInt16*)pData;
		XnUInt16* pSamplesEnd = (XnUInt16*)(pData + pHeader->nBufSize);
		XnUInt16* pOutput = (XnUInt16*)pWritePacket;

		while (pSamples < pSamplesEnd)
		{
			*pOutput = *pSamples;

			pOutput++;
			// skip a sample
			pSamples += 2;
		}
	}
	else
	{
		// copy data
		xnOSMemCopy(pWritePacket, pData, pHeader->nBufSize);
	}

	// mark timestamp
	m_pDevicePrivateData->pAudioPacketsTimestamps[m_pDevicePrivateData->nAudioWriteIndex] = GetTimeStamp(pHeader->nTimeStamp);

	if (m_nLastPacketID % 10 == 0)
	{
		XnUInt64 nSysTime;
		xnOSGetTimeStamp(&nSysTime);

		xnDumpWriteString(m_pDevicePrivateData->BandwidthDump, "%llu,%s,%d,%d\n",
			nSysTime, "Audio", -1, m_nBytesReceived);

		m_nBytesReceived = 0;
	}

	// move write index forward
	m_pDevicePrivateData->nAudioWriteIndex = (m_pDevicePrivateData->nAudioWriteIndex + 1) % m_pDevicePrivateData->nAudioBufferNumOfPackets;

	// if write index got to read index (end of buffer), move read index forward (and loose a packet)
	if (m_pDevicePrivateData->nAudioWriteIndex == m_pDevicePrivateData->nAudioReadIndex)
	{
		m_pDevicePrivateData->nAudioReadIndex = (m_pDevicePrivateData->nAudioReadIndex + 1) % m_pDevicePrivateData->nAudioBufferNumOfPackets;
	}

	xnOSLeaveCriticalSection(&m_pDevicePrivateData->hAudioBufferCriticalSection);

	xnDumpWriteBuffer(m_AudioInDump, pData, pHeader->nBufSize);

	if (m_pDevicePrivateData->pAudioCallback != NULL)
	{
		m_pDevicePrivateData->pAudioCallback(m_pDevicePrivateData->pAudioCallbackCookie);
	}
}
コード例 #13
0
XN_C_API void* xnOSLogMemAlloc(void* pMemBlock, XnAllocationType nAllocType, XnUInt32 nBytes, const XnChar* csFunction, const XnChar* csFile, XnUInt32 nLine, const XnChar* csAdditional)
{
	static XnBool bFirstTime = TRUE;
	static XnBool bReentrent = FALSE;

	if (bFirstTime)
	{
		bFirstTime = FALSE;
		printf("************************************************************\n");
		printf("**  WARNING: Memory Profiling is on!                      **\n");
		printf("************************************************************\n");

		m_dump = XN_DUMP_CLOSED;

		bReentrent = TRUE;
		xnOSCreateCriticalSection(&g_hCS);

#ifdef XN_MEMORY_PROFILING_DUMP
		xnDumpForceInit(&m_dump, "Entry,Address,AllocType,Bytes,Function,File,Line,AdditionalInfo\n", "MemProfiling.log");
#endif
		bReentrent = FALSE;
	}

	if (bReentrent)
	{
		return pMemBlock;
	}

	XnMemBlockDataNode* pNode;
	pNode = (XnMemBlockDataNode*)xnOSMalloc(sizeof(XnMemBlockDataNode));
	pNode->Data.pMemBlock = pMemBlock;
	pNode->Data.nAllocType = nAllocType;
	pNode->Data.nBytes = nBytes;
	pNode->Data.csFunction = csFunction;
	pNode->Data.csFile = csFile;
	pNode->Data.nLine = nLine;
	pNode->Data.csAdditional = csAdditional;
	pNode->Data.nFrames = XN_MEM_PROF_MAX_FRAMES;
	xnDumpWriteString(m_dump, "Alloc,0x%x,%s,%u,%s,%s,%u,%s\n", pMemBlock, XnGetAllocTypeString(nAllocType), nBytes, csFunction, csFile, nLine, csAdditional);

	// try to get call stack (skip 2 frames - this one and the alloc func)
	XnChar* pstrFrames[XN_MEM_PROF_MAX_FRAMES];
	for (XnUInt32 i = 0; i < XN_MEM_PROF_MAX_FRAMES; ++i)
	{
		pstrFrames[i] = pNode->Data.aFrames[i];
	}
	if (XN_STATUS_OK != xnOSGetCurrentCallStack(2, pstrFrames, XN_MEM_PROF_MAX_FRAME_LEN, &pNode->Data.nFrames))
	{
		pNode->Data.nFrames = 0;
	}

	pNode->pNext = NULL;

	XnAutoCSLocker lock(g_hCS);
	if (g_allocatedMemory.pLast == NULL)
	{
		g_allocatedMemory.pFirst = g_allocatedMemory.pLast = pNode;
	}
	else
	{
		g_allocatedMemory.pLast->pNext = pNode;
		g_allocatedMemory.pLast = pNode;
	}

	return pMemBlock;
}
コード例 #14
0
XnUInt64 XnDataProcessor::GetTimeStamp(XnUInt32 nDeviceTimeStamp)
{
	const XnUInt64 nWrapPoint = ((XnUInt64)XN_MAX_UINT32) + 1;
	XnUInt64 nResultInTicks;

	XnUInt64 nNow;
	xnOSGetHighResTimeStamp(&nNow);

	XnChar csDumpComment[200] = "";

	XnBool bCheckSanity = TRUE;

	// we register the first TS calculated as time-zero. Every stream's TS data will be 
	// synchronized with it
	if (m_pDevicePrivateData->nGlobalReferenceTS == 0)
	{
		xnOSEnterCriticalSection(&m_pDevicePrivateData->hEndPointsCS);
		if (m_pDevicePrivateData->nGlobalReferenceTS == 0)
		{
			m_pDevicePrivateData->nGlobalReferenceTS = nDeviceTimeStamp;
			m_pDevicePrivateData->nGlobalReferenceOSTime = nNow;
		}
		xnOSLeaveCriticalSection(&m_pDevicePrivateData->hEndPointsCS);
	}

	if (m_TimeStampData.bFirst)
	{
		// check how much OS time passed since global reference was taken
		XnUInt64 nOSTime = nNow - m_pDevicePrivateData->nGlobalReferenceOSTime;

		// check how many full wrap-arounds occurred (according to OS time)
		XnFloat fWrapAroundInMicroseconds = nWrapPoint / (XnDouble)m_pDevicePrivateData->fDeviceFrequency;
		XnUInt32 nWraps = nOSTime / fWrapAroundInMicroseconds; // floor

		// now check, if current timestamp is less than global, then we have one more wrap-around
		// (make sure it's significant - we allow up to 10 ms before - otherwise it could just be a
		// matter of race-condition)
		if (m_pDevicePrivateData->nGlobalReferenceTS > nDeviceTimeStamp && 
			nOSTime > XN_SENSOR_TIMESTAMP_SANITY_DIFF*1000)
		{
			++nWraps;
		}

		m_TimeStampData.nReferenceTS = m_pDevicePrivateData->nGlobalReferenceTS;
		m_TimeStampData.nTotalTicksAtReferenceTS = nWrapPoint * nWraps;
		m_TimeStampData.nLastDeviceTS = 0;
		m_TimeStampData.bFirst = FALSE;
		nResultInTicks = 0;
		bCheckSanity = FALSE; // no need.
		sprintf(csDumpComment, "Init. Total Ticks in Ref TS: %llu", m_TimeStampData.nTotalTicksAtReferenceTS);
	}

	if (nDeviceTimeStamp > m_TimeStampData.nLastDeviceTS) // this is the normal case
	{
		nResultInTicks = m_TimeStampData.nTotalTicksAtReferenceTS + nDeviceTimeStamp - m_TimeStampData.nReferenceTS;
	}
	else // wrap around occurred
	{
		// add the passed time to the reference time
		m_TimeStampData.nTotalTicksAtReferenceTS += (nWrapPoint + nDeviceTimeStamp - m_TimeStampData.nReferenceTS);
		// mark reference timestamp
		m_TimeStampData.nReferenceTS = nDeviceTimeStamp;

		sprintf(csDumpComment, "Wrap around. Refernce TS: %u / TotalTicksAtReference: %llu", m_TimeStampData.nReferenceTS, m_TimeStampData.nTotalTicksAtReferenceTS);

		nResultInTicks = m_TimeStampData.nTotalTicksAtReferenceTS;
	}

	m_TimeStampData.nLastDeviceTS = nDeviceTimeStamp;

	// calculate result in microseconds
	// NOTE: Intel compiler does too much optimization, and we loose up to 5 milliseconds. We perform
	// the entire calculation in XnDouble as a workaround
	XnDouble dResultTimeMicroSeconds = (XnDouble)nResultInTicks / (XnDouble)m_pDevicePrivateData->fDeviceFrequency;
	XnUInt64 nResultTimeMilliSeconds = (XnUInt64)(dResultTimeMicroSeconds / 1000.0);

	XnBool bIsSane = TRUE;

	// perform sanity check
	if (bCheckSanity && (nResultTimeMilliSeconds > (m_TimeStampData.nLastResultTime + XN_SENSOR_TIMESTAMP_SANITY_DIFF*1000)))
	{
		bIsSane = FALSE;
		sprintf(csDumpComment, "%s,Didn't pass sanity. Will try to re-sync.", csDumpComment);
	}

	// calc result
	XnUInt64 nResult = (m_pDevicePrivateData->pSensor->IsHighResTimestamps() ? (XnUInt64)dResultTimeMicroSeconds : nResultTimeMilliSeconds);

	// dump it
	xnDumpWriteString(m_pDevicePrivateData->TimestampsDump, "%llu,%s,%u,%llu,%s\n", nNow, m_TimeStampData.csStreamName, nDeviceTimeStamp, nResult, csDumpComment);

	if (bIsSane)
	{
		m_TimeStampData.nLastResultTime = nResultTimeMilliSeconds;
		return (nResult);
	}
	else
	{
		// sanity failed. We lost sync. restart
		m_TimeStampData.bFirst = TRUE;
		return GetTimeStamp(nDeviceTimeStamp);
	}
}
コード例 #15
0
ファイル: Audio.cpp プロジェクト: jgollub/MetaImagerProj
// --------------------------------
// Code
// --------------------------------
void audioPlay()
{
	if (g_AudioData.hWaveOut == NULL) // not initialized
		return;

	const AudioMetaData* pAudioMD = getAudioMetaData();
	if (pAudioMD == NULL || pAudioMD->DataSize() == 0 || !pAudioMD->IsDataNew())
		return;

	if (g_AudioData.bFlush)
	{
		printf("Audio is falling behind. Flushing all queue.\n");
		xnDumpWriteString(g_AudioData.SyncDump, "Flushing queue...\n");

		// mark not to check all dropped headers
		g_AudioData.nFirstToCheck = g_AudioData.nAudioNextBuffer;
		// flush all queued headers
		waveOutReset(g_AudioData.hWaveOut);

		g_AudioData.bFlush = false;
		return;
	}

	int nBufferSize = pAudioMD->DataSize();

	WAVEHDR* pHeader = &g_AudioData.pAudioBuffers[g_AudioData.nAudioNextBuffer];
	if ((pHeader->dwFlags & WHDR_DONE) == 0)
	{
		printf("No audio buffer is available!. Audio buffer will be lost!\n");
		return;
	}

	// first unprepare this header
	MMRESULT mmRes = waveOutUnprepareHeader(g_AudioData.hWaveOut, pHeader, sizeof(WAVEHDR));
	if (mmRes != MMSYSERR_NOERROR)
	{
		CHAR msg[250];
		waveOutGetErrorText(mmRes, msg, 250);
		printf("Failed unpreparing header: %s\n", msg);
	}

	int nMaxPlayedAudio = (int)(pAudioMD->SampleRate() / 1000.0 * pAudioMD->NumberOfChannels() * 2 * AUDIO_LATENCY_THRESHOLD);
	if (nBufferSize > nMaxPlayedAudio)
	{
		printf("Dropping %d bytes of audio to keep synch.\n", nBufferSize - nMaxPlayedAudio);
		nBufferSize = nMaxPlayedAudio;
	}

	const XnUInt8* pData = pAudioMD->Data();

	if (nBufferSize > g_AudioData.nBufferSize)
	{
		printf("Dropping %d bytes of audio to match buffer size.\n", nBufferSize - g_AudioData.nBufferSize);
		pData += (nBufferSize - g_AudioData.nBufferSize);
		nBufferSize = g_AudioData.nBufferSize;
	}

	pHeader->dwFlags = 0;
	xnOSMemCopy(pHeader->lpData, pData, nBufferSize);
	pHeader->dwBufferLength = nBufferSize;

	// prepare header
	mmRes = waveOutPrepareHeader(g_AudioData.hWaveOut, pHeader, sizeof(WAVEHDR));
	if (mmRes != MMSYSERR_NOERROR)
	{
		CHAR msg[250];
		waveOutGetErrorText(mmRes, msg, 250);
		printf("Unable to prepare header: %s\n", msg);
		return;
	}

	// queue header
	mmRes = waveOutWrite(g_AudioData.hWaveOut, pHeader, sizeof(WAVEHDR));
	if (mmRes != MMSYSERR_NOERROR)
	{
		CHAR msg[250];
		waveOutGetErrorText(mmRes, msg, 250);
		printf("Unable to queue header: %s\n", msg);
		return;
	}

	// place end-time as a timestamp
	g_AudioData.pAudioTimestamps[g_AudioData.nAudioNextBuffer] = (XnUInt64)(pAudioMD->Timestamp() + nBufferSize / (pAudioMD->BitsPerSample() / 8.0) / pAudioMD->NumberOfChannels() / (pAudioMD->SampleRate() / 1e6));

	xnDumpWriteString(g_AudioData.SyncDump, "Queued index %d with timestamp %llu (%u bytes, %f ms, end timestamp: %llu)\n", g_AudioData.nAudioNextBuffer, pAudioMD->Timestamp(), nBufferSize, nBufferSize / 2.0 / pAudioMD->NumberOfChannels() / (pAudioMD->SampleRate() / 1e3), g_AudioData.pAudioTimestamps[g_AudioData.nAudioNextBuffer]);

	g_AudioData.nAudioNextBuffer = (g_AudioData.nAudioNextBuffer + 1) % NUMBER_OF_AUDIO_BUFFERS;
}
コード例 #16
0
XnUInt64 XnDataProcessor::GetTimeStamp(XnUInt32 nDeviceTimeStamp)
{
	const XnUInt64 nWrapPoint = ((XnUInt64)XN_MAX_UINT32) + 1;
	XnUInt64 nResultInTicks;

	XnUInt64 nNow;
	xnOSGetHighResTimeStamp(&nNow);

	XnChar csDumpComment[200] = "";

	XnBool bCheckSanity = TRUE;

	// we register the first TS calculated as time-zero. Every stream's TS data will be 
	// synchronized with it
	if (m_pDevicePrivateData->nGlobalReferenceTS == 0)
	{
		xnOSEnterCriticalSection(&m_pDevicePrivateData->hEndPointsCS);
		if (m_pDevicePrivateData->nGlobalReferenceTS == 0)
		{
			m_pDevicePrivateData->nGlobalReferenceTS = nDeviceTimeStamp;
			m_pDevicePrivateData->nGlobalReferenceOSTime = nNow;
		}
		xnOSLeaveCriticalSection(&m_pDevicePrivateData->hEndPointsCS);
	}

	if (m_TimeStampData.bFirst)
	{
		/* 
		This is a bit tricky, as we need to synchronize the first timestamp of different streams. 
		We somehow need to translate 32-bit tick counts to 64-bit timestamps. The device timestamps
		wrap-around every ~71.5 seconds (for PS1080 @ 60 MHz).
		Lets assume the first packet of the first stream got timestamp X. Now we get the first packet of another
		stream with a timestamp Y.
		We need to figure out what is the relation between X and Y.
		We do that by analyzing the following scenarios:
		1. Y is after X, in the same period (no wraparound yet).
		2. Y is after X, in a different period (one or more wraparounds occurred).
		3. Y is before X, in the same period (might happen due to race condition).
		4. Y is before X, in a different period (this can happen if X is really small, and Y is almost at wraparound).

		The following code tried to handle all those cases. It uses an OS timer to try and figure out how 
		many wraparounds occurred.
		*/

		// estimate the number of wraparound that occurred using OS time
		XnUInt64 nOSTime = nNow - m_pDevicePrivateData->nGlobalReferenceOSTime;

		// calculate wraparound length
		XnFloat fWrapAroundInMicroseconds = nWrapPoint / (XnDouble)m_pDevicePrivateData->fDeviceFrequency;

		// perform a rough estimation
		XnInt32 nWraps = nOSTime / fWrapAroundInMicroseconds;

		// now fix the estimation by clipping TS to the correct wraparounds
		XnInt64 nEstimatedTicks = 
			nWraps * nWrapPoint + // wraps time
			nDeviceTimeStamp - m_pDevicePrivateData->nGlobalReferenceTS;

		XnInt64 nEstimatedTime = nEstimatedTicks / (XnDouble)m_pDevicePrivateData->fDeviceFrequency;

		if (nEstimatedTime < nOSTime - 0.5 * fWrapAroundInMicroseconds)
			nWraps++;
		else if (nEstimatedTime > nOSTime + 0.5 * fWrapAroundInMicroseconds)
			nWraps--;

		// handle the two special cases - 3 & 4 in which we get a timestamp which is
		// *before* global TS (meaning before time 0)
		if (nWraps < 0 || // case 4
			(nWraps == 0 && nDeviceTimeStamp < m_pDevicePrivateData->nGlobalReferenceTS)) // case 3
		{
			nDeviceTimeStamp = m_pDevicePrivateData->nGlobalReferenceTS;
			nWraps = 0;
		}

		m_TimeStampData.nReferenceTS = m_pDevicePrivateData->nGlobalReferenceTS;
		m_TimeStampData.nTotalTicksAtReferenceTS = nWrapPoint * nWraps;
		m_TimeStampData.nLastDeviceTS = 0;
		m_TimeStampData.bFirst = FALSE;
		nResultInTicks = 0;
		bCheckSanity = FALSE; // no need.
		sprintf(csDumpComment, "Init. Total Ticks in Ref TS: %llu", m_TimeStampData.nTotalTicksAtReferenceTS);
	}

	if (nDeviceTimeStamp > m_TimeStampData.nLastDeviceTS) // this is the normal case
	{
		nResultInTicks = m_TimeStampData.nTotalTicksAtReferenceTS + nDeviceTimeStamp - m_TimeStampData.nReferenceTS;
	}
	else // wrap around occurred
	{
		// add the passed time to the reference time
		m_TimeStampData.nTotalTicksAtReferenceTS += (nWrapPoint + nDeviceTimeStamp - m_TimeStampData.nReferenceTS);
		// mark reference timestamp
		m_TimeStampData.nReferenceTS = nDeviceTimeStamp;

		sprintf(csDumpComment, "Wrap around. Refernce TS: %u / TotalTicksAtReference: %llu", m_TimeStampData.nReferenceTS, m_TimeStampData.nTotalTicksAtReferenceTS);

		nResultInTicks = m_TimeStampData.nTotalTicksAtReferenceTS;
	}

	m_TimeStampData.nLastDeviceTS = nDeviceTimeStamp;

	// calculate result in microseconds
	// NOTE: Intel compiler does too much optimization, and we loose up to 5 milliseconds. We perform
	// the entire calculation in XnDouble as a workaround
	XnDouble dResultTimeMicroSeconds = (XnDouble)nResultInTicks / (XnDouble)m_pDevicePrivateData->fDeviceFrequency;
	XnUInt64 nResultTimeMilliSeconds = (XnUInt64)(dResultTimeMicroSeconds / 1000.0);

	XnBool bIsSane = TRUE;

	// perform sanity check
	if (bCheckSanity && (nResultTimeMilliSeconds > (m_TimeStampData.nLastResultTime + XN_SENSOR_TIMESTAMP_SANITY_DIFF*1000)))
	{
		bIsSane = FALSE;
		sprintf(csDumpComment, "%s,Didn't pass sanity. Will try to re-sync.", csDumpComment);
	}

	// calc result
	XnUInt64 nResult = (m_pDevicePrivateData->pSensor->IsHighResTimestamps() ? (XnUInt64)dResultTimeMicroSeconds : nResultTimeMilliSeconds);

	// dump it
	xnDumpWriteString(m_pDevicePrivateData->TimestampsDump, "%llu,%s,%u,%llu,%s\n", nNow, m_TimeStampData.csStreamName, nDeviceTimeStamp, nResult, csDumpComment);

	if (bIsSane)
	{
		m_TimeStampData.nLastResultTime = nResultTimeMilliSeconds;
		return (nResult);
	}
	else
	{
		// sanity failed. We lost sync. restart
		m_TimeStampData.bFirst = TRUE;
		return GetTimeStamp(nDeviceTimeStamp);
	}
}