/// <summary> /// Main processing function /// </summary> void CBackgroundRemovalBasics::Update() { if (NULL == m_pNuiSensor) { return; } if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextBackgroundRemovedFrameEvent, 0)) { ComposeImage(); } if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) ) { ProcessDepth(); } if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) ) { ProcessColor(); } if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextSkeletonFrameEvent, 0) ) { ProcessSkeleton(); } }
/// <summary> /// Process an incoming stream frame /// </summary> void NuiDepthStream::ProcessStreamFrame() { if (WAIT_OBJECT_0 == WaitForSingleObject(GetFrameReadyEvent(), 0)) { // if we have received any valid new depth data we may need to draw ProcessDepth(); } }
/// <summary> /// Main processing function /// </summary> void KinectEasyGrabber::Record() { if (NULL == m_pNuiSensor) { return; } bool needToDraw = false; if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) ) { // if we have received any valid new depth data we may need to draw if ( SUCCEEDED(ProcessDepth()) ) { needToDraw = true; } } if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) ) { // if we have received any valid new color data we may need to draw if ( SUCCEEDED(ProcessColor()) ) { needToDraw = true; } } // Depth is 30 fps. For any given combination of FPS, we should ensure we are within half a frame of the more frequent of the two. // But depth is always the greater (or equal) of the two, so just use depth FPS. const int depthFps = 30; const int halfADepthFrameMs = (1000 / depthFps) / 2; // If we have not yet received any data for either color or depth since we started up, we shouldn't draw if (m_colorTimeStamp.QuadPart == 0 || m_depthTimeStamp.QuadPart == 0) { needToDraw = false; } // If the color frame is more than half a depth frame ahead of the depth frame we have, // then we should wait for another depth frame. Otherwise, just go with what we have. if (m_colorTimeStamp.QuadPart - m_depthTimeStamp.QuadPart > halfADepthFrameMs) { needToDraw = false; } if (needToDraw) { if(m_frameIndex >= 50) return; dumpToDisk(m_frameIndex, m_frameBasename, m_depthD16, m_colorRGBX, m_colorCoordinates, m_depthTimeStamp, m_colorTimeStamp); // Draw the data with Direct2D m_pDrawKinectEasyGrabber->Draw(m_colorRGBX, m_colorWidth * m_colorHeight * cBytesPerPixel); m_frameIndex++; } }
//////////////////////////////////////////////////////////////////////////////// // public member functions //////////////////////////////////////////////////////////////////////////////// int IntelCamera::NextFrame() { // color & depth image : force to synchronize if ( ProcessColor()==-1 ) return -1; if ( ProcessDepth()==-1 ) return -1; // point cloud if ( MapColorToDepth()==-1 ) return -1; return 1; }
/// <summary> /// Main processing function /// </summary> void CKinectFusion::Update() { if (nullptr == m_pNuiSensor) { return; } if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) ) { ProcessDepth(); } }
/// <summary> /// Main processing function /// </summary> void CDepthBasics::Update() { if (NULL == m_pNuiSensor) { return; } if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) ) { ProcessDepth(); } }
bool KinectSDKGrabber::GetNextFrame(cv::Mat &colorFrame, cv::Mat &depthFrame) { WaitForSingleObject(m_hNextDepthFrameEvent, INFINITE); WaitForSingleObject(m_hNextColorFrameEvent, INFINITE); colorFrame = cv::Mat::zeros(FrameHeight, FrameWidth, CV_8UC4); depthFrame = cv::Mat::zeros(FrameHeight, FrameWidth, CV_32FC1); HRESULT hr = ProcessDepth((float *)depthFrame.data); if (FAILED(hr)) { return false; } hr = ProcessColor(colorFrame); if (FAILED(hr)) { return false; } return true; }
void KinectSensorV1::Update() { if (NULL == m_pNuiSensor) { return; } if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0)) { ProcessDepth(); } if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0)) { ProcessColor(); } if (cRevieveRGB && WAIT_OBJECT_0 == WaitForSingleObject(m_hNextRGBFrameEvent, 0)) { ProcessRGB(); } }
//after calling this, get the depth fram with GetDepth or GetDepthRGBX void UpdateDepth(){ if (!m_pDepthFrameReader) { return; } IDepthFrame* pDepthFrame = NULL; HRESULT hr = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame); if (SUCCEEDED(hr)) { INT64 nTime = 0; IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; USHORT nDepthMinReliableDistance = 0; USHORT nDepthMaxReliableDistance = 0; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; hr = pDepthFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { hr = pDepthFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { m_nDepthWidth = nWidth; hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { m_nDepthHeight = nHeight; hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxReliableDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer); } if (SUCCEEDED(hr)) { if(m_bCalculateDepthRGBX) ProcessDepth(nTime, pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxReliableDistance); else ProcessDepthNoRGBX(nTime, pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxReliableDistance); if(!m_bColorDepthMapCalculated){ CalculateColorDepthMap(); } if(m_bMapDepthToColor && m_nColorWidth > 0 && m_nColorHeight > 0 && SUCCEEDED(hr) && m_bColorDepthMapCalculated){ ProcessDepthToColor(m_pDepth, m_nDepthWidth, m_nDepthHeight, m_pColorDepthMap, m_nColorWidth, m_nColorHeight); } } SafeRelease(pFrameDescription); } else{ DumpHR(hr); } SafeRelease(pDepthFrame); }
//after calling this, get the depth fram with GetDepth or GetDepthRGBX void Kinect2Manager::UpdateDepth(IDepthFrame* pDepthFrame) { #ifdef _USE_KINECT INT64 nTime = 0; IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; USHORT nDepthMinReliableDistance = 0; USHORT nDepthMaxReliableDistance = 0; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; HRESULT hr = pDepthFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { m_nDepthTime = nTime; hr = pDepthFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { m_nDepthWidth = nWidth; hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { m_nDepthHeight = nHeight; hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxReliableDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer); } if (SUCCEEDED(hr)) { if (m_bCalculateDepthRGBX) ProcessDepth(nTime, pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxReliableDistance); else ProcessDepthNoRGBX(nTime, pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxReliableDistance); } SafeRelease(pFrameDescription); #else #endif }
/// <summary> /// Main processing function /// </summary> void KinectEasyGrabber::Play() { if (NULL == m_pNuiSensor) { return; } bool needToDraw = false; if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) ) { // if we have received any valid new depth data we may need to draw if ( SUCCEEDED(ProcessDepth()) ) { needToDraw = true; } } if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) ) { // if we have received any valid new color data we may need to draw if ( SUCCEEDED(ProcessColor()) ) { needToDraw = true; } } // Depth is 30 fps. For any given combination of FPS, we should ensure we are within half a frame of the more frequent of the two. // But depth is always the greater (or equal) of the two, so just use depth FPS. const int depthFps = 30; const int halfADepthFrameMs = (1000 / depthFps) / 2; // If we have not yet received any data for either color or depth since we started up, we shouldn't draw if (m_colorTimeStamp.QuadPart == 0 || m_depthTimeStamp.QuadPart == 0) { needToDraw = false; } // If the color frame is more than half a depth frame ahead of the depth frame we have, // then we should wait for another depth frame. Otherwise, just go with what we have. if (m_colorTimeStamp.QuadPart - m_depthTimeStamp.QuadPart > halfADepthFrameMs) { needToDraw = false; } if (needToDraw) { int outputIndex = 0; LONG* pDest; LONG* pSrc; // loop over each row and column of the color for (LONG y = 0; y < m_colorHeight; ++y) { for (LONG x = 0; x < m_colorWidth; ++x) { // calculate index into depth array int depthIndex = x/m_colorToDepthDivisor + y/m_colorToDepthDivisor * m_depthWidth; USHORT depth = m_depthD16[depthIndex]; USHORT player = NuiDepthPixelToPlayerIndex(depth); // default setting source to copy from the background pixel pSrc = (LONG *)m_backgroundRGBX + outputIndex; // if we're tracking a player for the current pixel, draw from the color camera if ( player > 0 ) { // retrieve the depth to color mapping for the current depth pixel LONG colorInDepthX = m_colorCoordinates[depthIndex * 2]; LONG colorInDepthY = m_colorCoordinates[depthIndex * 2 + 1]; // make sure the depth pixel maps to a valid point in color space if ( colorInDepthX >= 0 && colorInDepthX < m_colorWidth && colorInDepthY >= 0 && colorInDepthY < m_colorHeight ) { // calculate index into color array LONG colorIndex = colorInDepthX + colorInDepthY * m_colorWidth; // set source for copy to the color pixel pSrc = (LONG *)m_colorRGBX + colorIndex; } } // calculate output pixel location pDest = (LONG *)m_outputRGBX + outputIndex++; // write output *pDest = *pSrc; } } // Draw the data with Direct2D m_pDrawKinectEasyGrabber->Draw(m_outputRGBX, m_colorWidth * m_colorHeight * cBytesPerPixel); } }
/// <summary> /// Main processing function /// </summary> void KinectEasyGrabber::RecordArray() { if (NULL == m_pNuiSensor) { return; } bool needToDraw = false; if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) ) { // if we have received any valid new depth data we may need to draw if ( SUCCEEDED(ProcessDepth()) ) { needToDraw = true; } } if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) ) { // if we have received any valid new color data we may need to draw if ( SUCCEEDED(ProcessColor()) ) { needToDraw = true; } } // Depth is 30 fps. For any given combination of FPS, we should ensure we are within half a frame of the more frequent of the two. // But depth is always the greater (or equal) of the two, so just use depth FPS. const int depthFps = 30; const int halfADepthFrameMs = (1000 / depthFps) / 2; // If we have not yet received any data for either color or depth since we started up, we shouldn't draw if (m_colorTimeStamp.QuadPart == 0 || m_depthTimeStamp.QuadPart == 0) { needToDraw = false; } // If the color frame is more than half a depth frame ahead of the depth frame we have, // then we should wait for another depth frame. Otherwise, just go with what we have. if (m_colorTimeStamp.QuadPart - m_depthTimeStamp.QuadPart > halfADepthFrameMs) { needToDraw = false; } // cantidad de frames a grabar if(m_frameIndex >= m_totalFrames){ RecordArrayToDisk(); return; } if(m_dumped) return; if (needToDraw) { //Hard copy into ram memcpy(m_outputArrayDepthD16[m_frameIndex],m_depthD16, m_depthWidth*m_depthHeight*sizeof(USHORT)); memcpy(m_outputArrayRGBX[m_frameIndex],m_colorRGBX, m_colorWidth*m_colorHeight*cBytesPerPixel*sizeof(BYTE)); memcpy(m_outputArrayColorCoordinates[m_frameIndex],m_colorCoordinates, m_depthWidth*m_depthHeight*2*sizeof(LONG)); m_colorArrayTimeStamp[m_frameIndex] = m_colorTimeStamp; m_depthArrayTimeStamp[m_frameIndex] = m_depthTimeStamp; // Draw the data with Direct2D #ifdef DRAW_FRAMES m_pDrawKinectEasyGrabber->Draw(m_colorRGBX, m_colorWidth * m_colorHeight * cBytesPerPixel); #endif m_frameIndex++; } }
/// <summary> /// Main processing function /// </summary> void CDepthBasics::Update() { if (!m_pDepthFrameReader) { return; } IDepthFrame* pDepthFrame = NULL; HRESULT hrDepth = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame); if (SUCCEEDED(hrDepth)) { INT64 nTime = 0; IFrameDescription* pFrameDescription = NULL; int nWidth = 0; int nHeight = 0; USHORT nDepthMinReliableDistance = 0; USHORT nDepthMaxDistance = 0; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; HRESULT hr = pDepthFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { hr = pDepthFrame->get_FrameDescription(&pFrameDescription); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = pFrameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance); } if (SUCCEEDED(hr)) { // In order to see the full range of depth (including the less reliable far field depth) // we are setting nDepthMaxDistance to the extreme potential depth threshold nDepthMaxDistance = USHRT_MAX; // Note: If you wish to filter by reliable depth distance, uncomment the following line. //// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance); } if (SUCCEEDED(hr)) { hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer); } if (SUCCEEDED(hr)) { ProcessDepth(nTime, pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance); } SafeRelease(pFrameDescription); } SafeRelease(pDepthFrame); }