void DepthSensor::processDepth()
{    
    HRESULT hr;	
    NUI_IMAGE_FRAME imageFrame;
    //get the depth frame 
    
    hr = mNuiSensor->NuiImageStreamGetNextFrame(mDepthStreamHandle, 500, &imageFrame);
    if (FAILED(hr))
    {
        throw std::runtime_error("NuiImageStreamGetNextFrame failed");
    }

    hr = CopyExtendedDepth(imageFrame);


    LARGE_INTEGER currentDepthFrameTime = imageFrame.liTimeStamp;

    // Release the Kinect camera frame
    mNuiSensor->NuiImageStreamReleaseFrame(mDepthStreamHandle, &imageFrame);
    

    // To enable playback of a .xed file through Kinect Studio and reset of the reconstruction
    // if the .xed loops, we test for when the frame timestamp has skipped a large number. 
    // Note: this will potentially continually reset live reconstructions on slow machines which
    // cannot process a live frame in less time than the reset threshold. Increase the number of
    // milliseconds in cResetOnTimeStampSkippedMilliseconds if this is a problem.
    if (m_bAutoResetReconstructionOnTimeout && m_cFrameCounter != 0
        && abs(currentDepthFrameTime.QuadPart - m_cLastDepthFrameTimeStamp.QuadPart) > cResetOnTimeStampSkippedMilliseconds)
    {
        ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }
    }

    m_cLastDepthFrameTimeStamp = currentDepthFrameTime;

    // Return if the volume is not initialized
    if (nullptr == m_pVolume)
    {
        throw std::runtime_error("Kinect Fusion reconstruction volume not initialized. Please try reducing volume size or restarting.");
        return;
    }


    ////////////////////////////////////////////////////////
    // Depth to DepthFloat

    // Convert the pixels describing extended depth as unsigned short type in millimeters to depth
    // as floating point type in meters.
    hr = m_pVolume->DepthToDepthFloatFrame(m_pDepthImagePixelBuffer, cDepthImagePixels * sizeof(NUI_DEPTH_IMAGE_PIXEL), m_pDepthFloatImage, m_fMinDepthThreshold, m_fMaxDepthThreshold, m_bMirrorDepthFrame);
    if (FAILED(hr))
    {
        throw std::runtime_error("Kinect Fusion NuiFusionDepthToDepthFloatFrame call failed.");
        return;
    }


    ////////////////////////////////////////////////////////
    // ProcessFrame

    // Perform the camera tracking and update the Kinect Fusion Volume
    // This will create memory on the GPU, upload the image, run camera tracking and integrate the
    // data into the Reconstruction Volume if successful. Note that passing nullptr as the final 
    // parameter will use and update the internal camera pose.
    hr = m_pVolume->ProcessFrame(m_pDepthFloatImage, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, m_cMaxIntegrationWeight, &m_worldToCameraTransform);
    if (SUCCEEDED(hr))
    {
        Matrix4 calculatedCameraPose;
        hr = m_pVolume->GetCurrentWorldToCameraTransform(&calculatedCameraPose);

        if (SUCCEEDED(hr))
        {
            // Set the pose
            m_worldToCameraTransform = calculatedCameraPose;
            m_cLostFrameCounter = 0;
            m_bTrackingFailed = false;
        }
    }
    else
    {
        if (hr == E_NUI_FUSION_TRACKING_ERROR)
        {
            m_cLostFrameCounter++;
            m_bTrackingFailed = true;
            std::cout << "Kinect Fusion camera tracking failed! Align the camera to the last tracked position. " << std::endl;
        }
        else
        {
            throw std::runtime_error("Kinect Fusion ProcessFrame call failed!");
            return;
        }
    }



    if (m_bAutoResetReconstructionWhenLost && m_bTrackingFailed && m_cLostFrameCounter >= cResetOnNumberOfLostFrames)
    {
        // Automatically clear volume and reset tracking if tracking fails
        hr = ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }

        // Set bad tracking message
        std::cout << "Kinect Fusion camera tracking failed, automatically reset volume" << std::endl; 
    
    }


    ////////////////////////////////////////////////////////
    // CalculatePointCloud
    // Raycast all the time, even if we camera tracking failed, to enable us to visualize what is happening with the system
    hr = m_pVolume->CalculatePointCloud(m_pPointCloud, &m_worldToCameraTransform);

    if (FAILED(hr))
    {
        throw std::runtime_error("Kinect Fusion CalculatePointCloud call failed.");
        return;
    }


    ////////////////////////////////////////////////////////
    // ShadePointCloud and render

    hr = NuiFusionShadePointCloud(m_pPointCloud, &m_worldToCameraTransform, nullptr, m_pShadedSurface, nullptr);

    if (FAILED(hr))
    {
        throw std::runtime_error("Kinect Fusion NuiFusionShadePointCloud call failed.");
        return;
    }

    // Draw the shaded raycast volume image
    INuiFrameTexture * pShadedImageTexture = m_pShadedSurface->pFrameTexture;
    NUI_LOCKED_RECT ShadedLockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    hr = pShadedImageTexture->LockRect(0, &ShadedLockedRect, nullptr, 0);
    if (FAILED(hr))
    {
        return;
    }


    // Make sure we've received valid data
    if (ShadedLockedRect.Pitch != 0)
    {
        BYTE * pBuffer = (BYTE *)ShadedLockedRect.pBits;

        // Draw the data with vtk
        mDrawDepth->Draw(pBuffer, cDepthWidth , cDepthHeight , cBytesPerPixel);
        if (!m_isInit)
        {
            mDrawDepth->Actor->GetMapper()->SetInputData(mDrawDepth->image);
            mDrawDepth->renderer->AddActor(mDrawDepth->Actor);
            m_isInit = true;
        }
        mDrawDepth->renWin->Render();
    }

    // We're done with the texture so unlock it
    pShadedImageTexture->UnlockRect(0);


    //////////////////////////////////////////////////////////
    //// Periodically Display Fps
    //// Update frame counter
    //m_cFrameCounter++;
    //// Display fps count approximately every cTimeDisplayInterval seconds
    //double elapsed = m_timer.AbsoluteTime() - m_fStartTime;
    //if ((int)elapsed >= cTimeDisplayInterval)
    //{
    //	double fps = (double)m_cFrameCounter / elapsed;
    //	// Update status display
    //	if (!m_bTrackingFailed)
    //	{
    //		WCHAR str[MAX_PATH];
    //		swprintf_s(str, ARRAYSIZE(str), L"Fps: %5.2f", fps);
    //		
    //		cout<<str<<endl;
    //	}
    //	m_cFrameCounter = 0;
    //	m_fStartTime = m_timer.AbsoluteTime();
    //}
}
Beispiel #2
0
/// <summary>
/// Handle new depth data and perform Kinect Fusion processing
/// </summary>
void CKinectFusion::ProcessDepth()
{
    if (m_bInitializeError)
    {
        return;
    }

    HRESULT hr = S_OK;
    NUI_IMAGE_FRAME imageFrame;

    ////////////////////////////////////////////////////////
    // Get an extended depth frame from Kinect

    hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
    if (FAILED(hr))
    {
        std::cout << "Kinect NuiImageStreamGetNextFrame call failed." << std::endl;
        return;
    }

    hr = CopyExtendedDepth(imageFrame);

    LARGE_INTEGER currentFrameTime = imageFrame.liTimeStamp;

    // Release the Kinect camera frame
    m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);

    if (FAILED(hr))
    {
        return;
    }

    // To enable playback of a .xed file through Kinect Studio and reset of the reconstruction
    // if the .xed loops, we test for when the frame timestamp has skipped a large number. 
    // Note: this will potentially continually reset live reconstructions on slow machines which
    // cannot process a live frame in less time than the reset threshold. Increase the number of
    // milliseconds in cResetOnTimeStampSkippedMilliseconds if this is a problem.
    if (m_cFrameCounter != 0 && abs(currentFrameTime.QuadPart - m_cLastFrameTimeStamp.QuadPart) > cResetOnTimeStampSkippedMilliseconds)
    {
        ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }
    }

    m_cLastFrameTimeStamp = currentFrameTime;

    ////////////////////////////////////////////////////////
    // Depth to DepthFloat

    // Convert the pixels describing extended depth as unsigned short type in millimeters to depth
    // as floating point type in meters.
    hr = NuiFusionDepthToDepthFloatFrame(m_pDepthImagePixelBuffer, m_cDepthWidth, m_cDepthHeight, m_pDepthFloatImage, m_fMinDepthThreshold, m_fMaxDepthThreshold, m_bMirrorDepthFrame);

    if (FAILED(hr))
    {
        std::cout << "Kinect Fusion NuiFusionDepthToDepthFloatFrame call failed." << std::endl;
        return;
    }

    // Return if the volume is not initialized
    if (nullptr == m_pVolume)
    {
        std::cout << "Kinect Fusion reconstruction volume not initialized. Please try reducing volume size or restarting." << std::endl;
        return;
    }

    ////////////////////////////////////////////////////////
    // ProcessFrame

    // Perform the camera tracking and update the Kinect Fusion Volume
    // This will create memory on the GPU, upload the image, run camera tracking and integrate the
    // data into the Reconstruction Volume if successful. Note that passing nullptr as the final 
    // parameter will use and update the internal camera pose.
	//hr = m_pVolume->IntegrateFrame(m_pDepthFloatImage, 1, &m_cameraTransform);
    hr = m_pVolume->ProcessFrame(m_pDepthFloatImage, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, m_cMaxIntegrationWeight, &m_worldToCameraTransform);

	// Test to see if camera tracking failed. 
    // If it did fail, no data integration or raycast for reference points and normals will have taken 
    //  place, and the internal camera pose will be unchanged.
    if (FAILED(hr))
    {
        if (hr == E_NUI_FUSION_TRACKING_ERROR)
        {
            m_cLostFrameCounter++;
            m_bTrackingFailed = true;
            std::cout << "Kinect Fusion camera tracking failed! Align the camera to the last tracked position. " << std::endl;
        }
        else
        {
            std::cout << "Kinect Fusion ProcessFrame call failed!" << std::endl;
            return;
        }
    }
    else
    {
        Matrix4 calculatedCameraPose;
        hr = m_pVolume->GetCurrentWorldToCameraTransform(&calculatedCameraPose);

        if (SUCCEEDED(hr))
        {
            // Set the pose
            m_worldToCameraTransform = calculatedCameraPose;
            m_cLostFrameCounter = 0;
            m_bTrackingFailed = false;
        }
    }
	
    if (m_bAutoResetReconstructionWhenLost && m_bTrackingFailed && m_cLostFrameCounter >= cResetOnNumberOfLostFrames)
    {
        // Automatically clear volume and reset tracking if tracking fails
        hr = ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }

        // Set bad tracking message
        std::cout << "Kinect Fusion camera tracking failed, automatically reset volume." << std::endl;
    }
	
    ////////////////////////////////////////////////////////
    // CalculatePointCloud

    // Raycast all the time, even if we camera tracking failed, to enable us to visualize what is happening with the system
    hr = m_pVolume->CalculatePointCloud(m_pPointCloud, &m_cameraTransform);

    if (FAILED(hr))
    {
        std::cout << "Kinect Fusion CalculatePointCloud call failed." << std::endl;
        return;
    }

    ////////////////////////////////////////////////////////
    // ShadePointCloud and render

    hr = NuiFusionShadePointCloud(m_pPointCloud, &m_cameraTransform, nullptr, m_pShadedSurface, nullptr);

    if (FAILED(hr))
    {
        std::cout << "Kinect Fusion NuiFusionShadePointCloud call failed." << std::endl;
        return;
    }

    // Draw the shaded raycast volume image
    INuiFrameTexture * pShadedImageTexture = m_pShadedSurface->pFrameTexture;
    NUI_LOCKED_RECT ShadedLockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    hr = pShadedImageTexture->LockRect(0, &ShadedLockedRect, nullptr, 0);
    if (FAILED(hr))
    {
        return;
    }

    // Make sure we've received valid data
    if (ShadedLockedRect.Pitch != 0)
    {
		if (m_pImageData != 0) {
			BYTE * pBuffer = (BYTE *)ShadedLockedRect.pBits;
			const BYTE* dataEnd = pBuffer + (640*480*4);

			GLubyte* dest = m_pImageData; 

			while (pBuffer < dataEnd)
				*dest++ = *pBuffer++;
		}
    }

    // We're done with the texture so unlock it
    pShadedImageTexture->UnlockRect(0);

    ////////////////////////////////////////////////////////
    // Periodically Display Fps

    // Update frame counter
    m_cFrameCounter++;

    // Display fps count approximately every cTimeDisplayInterval seconds

}