示例#1
0
void DepthSensor::Update()
{
    // Initialize must be called prior to creating timer events.
    mDrawDepth->interactor->Initialize();
    // Sign up to receive TimerEvent
    vtkTimerCallback* timer = new vtkTimerCallback(this);
    mDrawDepth->interactor->AddObserver(vtkCommand::TimerEvent, timer);
    int timerId = mDrawDepth->interactor->CreateRepeatingTimer(100);

    vtkSmartPointer<vtkLambdaCommand> keypressCallback = vtkSmartPointer<vtkLambdaCommand>::New();
    
    keypressCallback->SetCallback(
        [&](vtkObject* caller, long unsigned int eventId, void* clientData)
        {
            // Get the keypress
            vtkRenderWindowInteractor *iren = static_cast<vtkRenderWindowInteractor*>(caller);
            std::string key = iren->GetKeySym();
            // Output the key that was pressed
            std::cout << "Pressed " << key << std::endl;
            // Handle an arrow key
            if (key == "s")
            {
                if (this->SaveMesh())
                {
                    std::cout << "Saving Mesh successed." << std::endl;
                }
                //After save the mesh ,reset Recconstruction
                ResetReconstruction();
            }

            if (key == "r")
            {
                ResetReconstruction();
            }

            //press t and read STL File just have created
            if (key == "t")
            {
                if (m_saveMeshFormat == Stl || m_saveMeshFormat == Obj)
                {
                    cout << "Reading the mesh , waiting...... " << endl;
                    ReadModelFile((char*)filename.c_str(), m_saveMeshFormat);
                }
                else
                {
                    cout << "Read model file failed" << endl;
                }
            }
        }
    );
    mDrawDepth->interactor->AddObserver(vtkCommand::KeyPressEvent, keypressCallback);
    mDrawDepth->interactor->Start();

    
}
示例#2
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource( &pDepthSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader( &pDepthReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pDepthSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424
	unsigned int bufferSize = width * height * sizeof( unsigned short );

	cv::Mat bufferMat( height, width, CV_16UC1 );
	cv::Mat depthMat( height, width, CV_8UC1 );
	cv::namedWindow( "Depth" );

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	// Create Reconstruction
	NUI_FUSION_RECONSTRUCTION_PARAMETERS reconstructionParameter;
	reconstructionParameter.voxelsPerMeter = 256;
	reconstructionParameter.voxelCountX = 512;
	reconstructionParameter.voxelCountY = 384;
	reconstructionParameter.voxelCountZ = 512;

	Matrix4 worldToCameraTransform;
	SetIdentityMatrix( worldToCameraTransform );
	INuiFusionReconstruction* pReconstruction;
	hResult = NuiFusionCreateReconstruction( &reconstructionParameter, NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP, -1, &worldToCameraTransform, &pReconstruction );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateReconstruction()" << std::endl;
		return -1;
	}

	// Create Image Frame
	// Depth
	NUI_FUSION_IMAGE_FRAME* pDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// SmoothDepth
	NUI_FUSION_IMAGE_FRAME* pSmoothDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pSmoothDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// Point Cloud
	NUI_FUSION_IMAGE_FRAME* pPointCloudImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, width, height, nullptr, &pPointCloudImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( POINT_CLOUD )" << std::endl;
		return -1;
	}

	// Surface
	NUI_FUSION_IMAGE_FRAME* pSurfaceImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pSurfaceImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	// Normal
	NUI_FUSION_IMAGE_FRAME* pNormalImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pNormalImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	cv::namedWindow( "Surface" );
	cv::namedWindow( "Normal" );

	while( 1 ){
		// Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );

		if( SUCCEEDED( hResult ) ){
			hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
			if( SUCCEEDED( hResult ) ){
				bufferMat.convertTo( depthMat, CV_8U, -255.0f / 8000.0f, 255.0f );
				hResult = pReconstruction->DepthToDepthFloatFrame( reinterpret_cast<UINT16*>( bufferMat.data ), width * height * sizeof( UINT16 ), pDepthFloatImageFrame, NUI_FUSION_DEFAULT_MINIMUM_DEPTH/* 0.5[m] */, NUI_FUSION_DEFAULT_MAXIMUM_DEPTH/* 8.0[m] */, true );
				if( FAILED( hResult ) ){
					std::cerr << "Error :INuiFusionReconstruction::DepthToDepthFloatFrame()" << std::endl;
					return -1;
				}
			}
		}
		SafeRelease( pDepthFrame );

		// Smooting Depth Image Frame
		hResult = pReconstruction->SmoothDepthFloatFrame( pDepthFloatImageFrame, pSmoothDepthFloatImageFrame, 1, 0.04f );
		if( FAILED( hResult ) ){
			std::cerr << "Error :INuiFusionReconstruction::SmoothDepthFloatFrame" << std::endl;
			return -1;
		}

		// Reconstruction Process
		pReconstruction->GetCurrentWorldToCameraTransform( &worldToCameraTransform );
		hResult = pReconstruction->ProcessFrame( pSmoothDepthFloatImageFrame, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT, nullptr, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			static int errorCount = 0;
			errorCount++;
			if( errorCount >= 100 ) {
				errorCount = 0;
				ResetReconstruction( pReconstruction, &worldToCameraTransform );
			}
		}

		// Calculate Point Cloud
		hResult = pReconstruction->CalculatePointCloud( pPointCloudImageFrame, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			std::cerr << "Error : CalculatePointCloud" << std::endl;
			return -1;
		}

		// Shading Point Clouid
		Matrix4 worldToBGRTransform = { 0.0f };
		worldToBGRTransform.M11 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountX;
		worldToBGRTransform.M22 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountY;
		worldToBGRTransform.M33 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountZ;
		worldToBGRTransform.M41 = 0.5f;
		worldToBGRTransform.M42 = 0.5f;
		worldToBGRTransform.M43 = 0.0f;
		worldToBGRTransform.M44 = 1.0f;

		hResult = NuiFusionShadePointCloud( pPointCloudImageFrame, &worldToCameraTransform, &worldToBGRTransform, pSurfaceImageFrame, pNormalImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiFusionShadePointCloud" << std::endl;
			return -1;
		}

		cv::Mat surfaceMat( height, width, CV_8UC4, pSurfaceImageFrame->pFrameBuffer->pBits );
		cv::Mat normalMat( height, width, CV_8UC4, pNormalImageFrame->pFrameBuffer->pBits );

		cv::imshow( "Depth", depthMat );
		cv::imshow( "Surface", surfaceMat );
		cv::imshow( "Normal", normalMat );

		int key = cv::waitKey( 30 );
		if( key == VK_ESCAPE ){
			break;
		}
		else if( key == 'r' ){
			ResetReconstruction( pReconstruction, &worldToCameraTransform );
		}
	}

	SafeRelease( pDepthSource );
	SafeRelease( pDepthReader );
	SafeRelease( pDescription );
	SafeRelease( pCoordinateMapper );
	SafeRelease( pReconstruction );
	NuiFusionReleaseImageFrame( pDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pSmoothDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pPointCloudImageFrame );
	NuiFusionReleaseImageFrame( pSurfaceImageFrame );
	NuiFusionReleaseImageFrame( pNormalImageFrame );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}
示例#3
0
void DepthSensor::processDepth()
{    
    HRESULT hr;	
    NUI_IMAGE_FRAME imageFrame;
    //get the depth frame 
    
    hr = mNuiSensor->NuiImageStreamGetNextFrame(mDepthStreamHandle, 500, &imageFrame);
    if (FAILED(hr))
    {
        throw std::runtime_error("NuiImageStreamGetNextFrame failed");
    }

    hr = CopyExtendedDepth(imageFrame);


    LARGE_INTEGER currentDepthFrameTime = imageFrame.liTimeStamp;

    // Release the Kinect camera frame
    mNuiSensor->NuiImageStreamReleaseFrame(mDepthStreamHandle, &imageFrame);
    

    // To enable playback of a .xed file through Kinect Studio and reset of the reconstruction
    // if the .xed loops, we test for when the frame timestamp has skipped a large number. 
    // Note: this will potentially continually reset live reconstructions on slow machines which
    // cannot process a live frame in less time than the reset threshold. Increase the number of
    // milliseconds in cResetOnTimeStampSkippedMilliseconds if this is a problem.
    if (m_bAutoResetReconstructionOnTimeout && m_cFrameCounter != 0
        && abs(currentDepthFrameTime.QuadPart - m_cLastDepthFrameTimeStamp.QuadPart) > cResetOnTimeStampSkippedMilliseconds)
    {
        ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }
    }

    m_cLastDepthFrameTimeStamp = currentDepthFrameTime;

    // Return if the volume is not initialized
    if (nullptr == m_pVolume)
    {
        throw std::runtime_error("Kinect Fusion reconstruction volume not initialized. Please try reducing volume size or restarting.");
        return;
    }


    ////////////////////////////////////////////////////////
    // Depth to DepthFloat

    // Convert the pixels describing extended depth as unsigned short type in millimeters to depth
    // as floating point type in meters.
    hr = m_pVolume->DepthToDepthFloatFrame(m_pDepthImagePixelBuffer, cDepthImagePixels * sizeof(NUI_DEPTH_IMAGE_PIXEL), m_pDepthFloatImage, m_fMinDepthThreshold, m_fMaxDepthThreshold, m_bMirrorDepthFrame);
    if (FAILED(hr))
    {
        throw std::runtime_error("Kinect Fusion NuiFusionDepthToDepthFloatFrame call failed.");
        return;
    }


    ////////////////////////////////////////////////////////
    // ProcessFrame

    // Perform the camera tracking and update the Kinect Fusion Volume
    // This will create memory on the GPU, upload the image, run camera tracking and integrate the
    // data into the Reconstruction Volume if successful. Note that passing nullptr as the final 
    // parameter will use and update the internal camera pose.
    hr = m_pVolume->ProcessFrame(m_pDepthFloatImage, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, m_cMaxIntegrationWeight, &m_worldToCameraTransform);
    if (SUCCEEDED(hr))
    {
        Matrix4 calculatedCameraPose;
        hr = m_pVolume->GetCurrentWorldToCameraTransform(&calculatedCameraPose);

        if (SUCCEEDED(hr))
        {
            // Set the pose
            m_worldToCameraTransform = calculatedCameraPose;
            m_cLostFrameCounter = 0;
            m_bTrackingFailed = false;
        }
    }
    else
    {
        if (hr == E_NUI_FUSION_TRACKING_ERROR)
        {
            m_cLostFrameCounter++;
            m_bTrackingFailed = true;
            std::cout << "Kinect Fusion camera tracking failed! Align the camera to the last tracked position. " << std::endl;
        }
        else
        {
            throw std::runtime_error("Kinect Fusion ProcessFrame call failed!");
            return;
        }
    }



    if (m_bAutoResetReconstructionWhenLost && m_bTrackingFailed && m_cLostFrameCounter >= cResetOnNumberOfLostFrames)
    {
        // Automatically clear volume and reset tracking if tracking fails
        hr = ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }

        // Set bad tracking message
        std::cout << "Kinect Fusion camera tracking failed, automatically reset volume" << std::endl; 
    
    }


    ////////////////////////////////////////////////////////
    // CalculatePointCloud
    // Raycast all the time, even if we camera tracking failed, to enable us to visualize what is happening with the system
    hr = m_pVolume->CalculatePointCloud(m_pPointCloud, &m_worldToCameraTransform);

    if (FAILED(hr))
    {
        throw std::runtime_error("Kinect Fusion CalculatePointCloud call failed.");
        return;
    }


    ////////////////////////////////////////////////////////
    // ShadePointCloud and render

    hr = NuiFusionShadePointCloud(m_pPointCloud, &m_worldToCameraTransform, nullptr, m_pShadedSurface, nullptr);

    if (FAILED(hr))
    {
        throw std::runtime_error("Kinect Fusion NuiFusionShadePointCloud call failed.");
        return;
    }

    // Draw the shaded raycast volume image
    INuiFrameTexture * pShadedImageTexture = m_pShadedSurface->pFrameTexture;
    NUI_LOCKED_RECT ShadedLockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    hr = pShadedImageTexture->LockRect(0, &ShadedLockedRect, nullptr, 0);
    if (FAILED(hr))
    {
        return;
    }


    // Make sure we've received valid data
    if (ShadedLockedRect.Pitch != 0)
    {
        BYTE * pBuffer = (BYTE *)ShadedLockedRect.pBits;

        // Draw the data with vtk
        mDrawDepth->Draw(pBuffer, cDepthWidth , cDepthHeight , cBytesPerPixel);
        if (!m_isInit)
        {
            mDrawDepth->Actor->GetMapper()->SetInputData(mDrawDepth->image);
            mDrawDepth->renderer->AddActor(mDrawDepth->Actor);
            m_isInit = true;
        }
        mDrawDepth->renWin->Render();
    }

    // We're done with the texture so unlock it
    pShadedImageTexture->UnlockRect(0);


    //////////////////////////////////////////////////////////
    //// Periodically Display Fps
    //// Update frame counter
    //m_cFrameCounter++;
    //// Display fps count approximately every cTimeDisplayInterval seconds
    //double elapsed = m_timer.AbsoluteTime() - m_fStartTime;
    //if ((int)elapsed >= cTimeDisplayInterval)
    //{
    //	double fps = (double)m_cFrameCounter / elapsed;
    //	// Update status display
    //	if (!m_bTrackingFailed)
    //	{
    //		WCHAR str[MAX_PATH];
    //		swprintf_s(str, ARRAYSIZE(str), L"Fps: %5.2f", fps);
    //		
    //		cout<<str<<endl;
    //	}
    //	m_cFrameCounter = 0;
    //	m_fStartTime = m_timer.AbsoluteTime();
    //}
}
示例#4
0
//Initialize Kinect Fusion volume and images for processing
void DepthSensor::initKinectFusion()
{
    HRESULT hr = S_OK;

    
    // Create the Kinect Fusion Reconstruction Volume
    hr = NuiFusionCreateReconstruction(
        &reconstructionParams,
        NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP,
        -1, 
        &m_worldToCameraTransform,
        &m_pVolume);
    if (FAILED(hr))
    {
        throw std::runtime_error("NuiFusionCreateReconstruction failed.");
    }



    // Save the default world to volume transformation to be optionally used in ResetReconstruction
    hr = m_pVolume->GetCurrentWorldToVolumeTransform(&m_defaultWorldToVolumeTransform);
    if (FAILED(hr))
    {
        throw std::runtime_error("Failed in call to GetCurrentWorldToVolumeTransform.");
    }



    if (m_bTranslateResetPoseByMinDepthThreshold)
    {
        // This call will set the world-volume transformation
        hr = ResetReconstruction();
        if (FAILED(hr))
        {
            return ;
        }
    }



    // DepthFloatImage  Frames generated from the depth input
    hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_FLOAT, cDepthWidth, cDepthHeight, nullptr, &m_pDepthFloatImage);
    if (FAILED(hr))
        throw std::runtime_error("NuiFusionCreateImageFrame failed (Float).");

    // PointCloud   Create images to raycast the Reconstruction Volume
    hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, cDepthWidth, cDepthHeight, nullptr, &m_pPointCloud);
    if (FAILED(hr))
        throw std::runtime_error("NuiFusionCreateImageFrame failed (PointCloud).");

    // ShadedSurface  Create images to raycast the Reconstruction Volume(color)
    hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_COLOR, cDepthWidth, cDepthHeight, nullptr, &m_pShadedSurface);
    if (FAILED(hr))
        throw std::runtime_error("NuiFusionCreateImageFrame failed (Color).");

    m_pDepthImagePixelBuffer = new(std::nothrow) NUI_DEPTH_IMAGE_PIXEL[cDepthImagePixels];


    /////////////////////
    if (nullptr == m_pDepthImagePixelBuffer)
    {
        throw std::runtime_error("Failed to initialize Kinect Fusion depth image pixel buffer.");
    }

    m_fStartTime = m_timer.AbsoluteTime();

    std::cout << "Intial Finish"<<std::endl;
}
示例#5
0
/// <summary>
/// Handle new depth data and perform Kinect Fusion processing
/// </summary>
void CKinectFusion::ProcessDepth()
{
    if (m_bInitializeError)
    {
        return;
    }

    HRESULT hr = S_OK;
    NUI_IMAGE_FRAME imageFrame;

    ////////////////////////////////////////////////////////
    // Get an extended depth frame from Kinect

    hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
    if (FAILED(hr))
    {
        std::cout << "Kinect NuiImageStreamGetNextFrame call failed." << std::endl;
        return;
    }

    hr = CopyExtendedDepth(imageFrame);

    LARGE_INTEGER currentFrameTime = imageFrame.liTimeStamp;

    // Release the Kinect camera frame
    m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);

    if (FAILED(hr))
    {
        return;
    }

    // To enable playback of a .xed file through Kinect Studio and reset of the reconstruction
    // if the .xed loops, we test for when the frame timestamp has skipped a large number. 
    // Note: this will potentially continually reset live reconstructions on slow machines which
    // cannot process a live frame in less time than the reset threshold. Increase the number of
    // milliseconds in cResetOnTimeStampSkippedMilliseconds if this is a problem.
    if (m_cFrameCounter != 0 && abs(currentFrameTime.QuadPart - m_cLastFrameTimeStamp.QuadPart) > cResetOnTimeStampSkippedMilliseconds)
    {
        ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }
    }

    m_cLastFrameTimeStamp = currentFrameTime;

    ////////////////////////////////////////////////////////
    // Depth to DepthFloat

    // Convert the pixels describing extended depth as unsigned short type in millimeters to depth
    // as floating point type in meters.
    hr = NuiFusionDepthToDepthFloatFrame(m_pDepthImagePixelBuffer, m_cDepthWidth, m_cDepthHeight, m_pDepthFloatImage, m_fMinDepthThreshold, m_fMaxDepthThreshold, m_bMirrorDepthFrame);

    if (FAILED(hr))
    {
        std::cout << "Kinect Fusion NuiFusionDepthToDepthFloatFrame call failed." << std::endl;
        return;
    }

    // Return if the volume is not initialized
    if (nullptr == m_pVolume)
    {
        std::cout << "Kinect Fusion reconstruction volume not initialized. Please try reducing volume size or restarting." << std::endl;
        return;
    }

    ////////////////////////////////////////////////////////
    // ProcessFrame

    // Perform the camera tracking and update the Kinect Fusion Volume
    // This will create memory on the GPU, upload the image, run camera tracking and integrate the
    // data into the Reconstruction Volume if successful. Note that passing nullptr as the final 
    // parameter will use and update the internal camera pose.
	//hr = m_pVolume->IntegrateFrame(m_pDepthFloatImage, 1, &m_cameraTransform);
    hr = m_pVolume->ProcessFrame(m_pDepthFloatImage, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, m_cMaxIntegrationWeight, &m_worldToCameraTransform);

	// Test to see if camera tracking failed. 
    // If it did fail, no data integration or raycast for reference points and normals will have taken 
    //  place, and the internal camera pose will be unchanged.
    if (FAILED(hr))
    {
        if (hr == E_NUI_FUSION_TRACKING_ERROR)
        {
            m_cLostFrameCounter++;
            m_bTrackingFailed = true;
            std::cout << "Kinect Fusion camera tracking failed! Align the camera to the last tracked position. " << std::endl;
        }
        else
        {
            std::cout << "Kinect Fusion ProcessFrame call failed!" << std::endl;
            return;
        }
    }
    else
    {
        Matrix4 calculatedCameraPose;
        hr = m_pVolume->GetCurrentWorldToCameraTransform(&calculatedCameraPose);

        if (SUCCEEDED(hr))
        {
            // Set the pose
            m_worldToCameraTransform = calculatedCameraPose;
            m_cLostFrameCounter = 0;
            m_bTrackingFailed = false;
        }
    }
	
    if (m_bAutoResetReconstructionWhenLost && m_bTrackingFailed && m_cLostFrameCounter >= cResetOnNumberOfLostFrames)
    {
        // Automatically clear volume and reset tracking if tracking fails
        hr = ResetReconstruction();

        if (FAILED(hr))
        {
            return;
        }

        // Set bad tracking message
        std::cout << "Kinect Fusion camera tracking failed, automatically reset volume." << std::endl;
    }
	
    ////////////////////////////////////////////////////////
    // CalculatePointCloud

    // Raycast all the time, even if we camera tracking failed, to enable us to visualize what is happening with the system
    hr = m_pVolume->CalculatePointCloud(m_pPointCloud, &m_cameraTransform);

    if (FAILED(hr))
    {
        std::cout << "Kinect Fusion CalculatePointCloud call failed." << std::endl;
        return;
    }

    ////////////////////////////////////////////////////////
    // ShadePointCloud and render

    hr = NuiFusionShadePointCloud(m_pPointCloud, &m_cameraTransform, nullptr, m_pShadedSurface, nullptr);

    if (FAILED(hr))
    {
        std::cout << "Kinect Fusion NuiFusionShadePointCloud call failed." << std::endl;
        return;
    }

    // Draw the shaded raycast volume image
    INuiFrameTexture * pShadedImageTexture = m_pShadedSurface->pFrameTexture;
    NUI_LOCKED_RECT ShadedLockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    hr = pShadedImageTexture->LockRect(0, &ShadedLockedRect, nullptr, 0);
    if (FAILED(hr))
    {
        return;
    }

    // Make sure we've received valid data
    if (ShadedLockedRect.Pitch != 0)
    {
		if (m_pImageData != 0) {
			BYTE * pBuffer = (BYTE *)ShadedLockedRect.pBits;
			const BYTE* dataEnd = pBuffer + (640*480*4);

			GLubyte* dest = m_pImageData; 

			while (pBuffer < dataEnd)
				*dest++ = *pBuffer++;
		}
    }

    // We're done with the texture so unlock it
    pShadedImageTexture->UnlockRect(0);

    ////////////////////////////////////////////////////////
    // Periodically Display Fps

    // Update frame counter
    m_cFrameCounter++;

    // Display fps count approximately every cTimeDisplayInterval seconds

}
示例#6
0
/// <summary>
/// Initialize Kinect Fusion volume and images for processing
/// </summary>
/// <returns>S_OK on success, otherwise failure code</returns>
HRESULT CKinectFusion::InitializeKinectFusion()
{
    HRESULT hr = S_OK;

    // Create the Kinect Fusion Reconstruction Volume
    hr = NuiFusionCreateReconstruction(
        &m_reconstructionParams,
        m_processorType, m_deviceIndex,
        &m_worldToCameraTransform,
        &m_pVolume);

    if (FAILED(hr))
    {
        if (E_NUI_GPU_FAIL == hr)
        {
			std::cout << "Device " << m_deviceIndex << " not able to run Kinect Fusion, or error initializing." << std::endl;
        }
        else if (E_NUI_GPU_OUTOFMEMORY == hr)
        {
            std::cout << "Device " << m_deviceIndex << " out of memory error initializing reconstruction - try a smaller reconstruction volume." << std::endl;
        }
        else if (NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_CPU != m_processorType)
        {
            std::cout << "Failed to initialize Kinect Fusion reconstruction volume on device " << m_deviceIndex << std::endl;
        }
        else
        {
            std::cout << "Failed to initialize Kinect Fusion reconstruction volume on CPU." << std::endl;
        }

        return hr;
    }
	
    // Save the default world to volume transformation to be optionally used in ResetReconstruction
    hr = m_pVolume->GetCurrentWorldToVolumeTransform(&m_defaultWorldToVolumeTransform);
    if (FAILED(hr))
    {
        std::cout << "Failed in call to GetCurrentWorldToVolumeTransform." << std::endl;
        return hr;
    }
	
    if (m_bTranslateResetPoseByMinDepthThreshold)
    {
        // This call will set the world-volume transformation
        hr = ResetReconstruction();
        if (FAILED(hr))
        {
            return hr;
        }
    }

    // Frames generated from the depth input
    hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_FLOAT, m_cDepthWidth, m_cDepthHeight, nullptr, &m_pDepthFloatImage);
    if (FAILED(hr))
    {
        std::cout << "Failed to initialize Kinect Fusion image." << std::endl;
        return hr;
    }

    // Create images to raycast the Reconstruction Volume
    hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, m_cDepthWidth, m_cDepthHeight, nullptr, &m_pPointCloud);
    if (FAILED(hr))
    {
        std::cout << "Failed to initialize Kinect Fusion image." << std::endl;
        return hr;
    }

    // Create images to raycast the Reconstruction Volume
    hr = NuiFusionCreateImageFrame(NUI_FUSION_IMAGE_TYPE_COLOR, m_cDepthWidth, m_cDepthHeight, nullptr, &m_pShadedSurface);
    if (FAILED(hr))
    {
        std::cout << "Failed to initialize Kinect Fusion image." << std::endl;
        return hr;
    }

    m_pDepthImagePixelBuffer = new(std::nothrow) NUI_DEPTH_IMAGE_PIXEL[m_cImageSize];
    if (nullptr == m_pDepthImagePixelBuffer)
    {
        std::cout << "Failed to initialize Kinect Fusion depth image pixel buffer." << std::endl;
        return hr;
    }

    // Set an introductory message
   std::cout << "Click 'Near Mode' to change sensor range, and 'Reset Reconstruction' to clear!" << std::endl;

    return hr;
}