コード例 #1
0
/// <summary>
/// Retrieve depth data from stream frame
/// </summary>
void NuiDepthStream::ProcessDepth()
{
    HRESULT         hr;
    NUI_IMAGE_FRAME imageFrame;

    // Attempt to get the depth frame
    hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_hStreamHandle, 0, &imageFrame);
    if (FAILED(hr))
    {
        return;
    }

    if (m_paused)
    {
        // Stream paused. Skip frame process and release the frame.
        goto ReleaseFrame;
    }

    BOOL nearMode;
    INuiFrameTexture* pTexture;

    // Get the depth image pixel texture
    hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_hStreamHandle, &imageFrame, &nearMode, &pTexture);
    if (FAILED(hr))
    {
        goto ReleaseFrame;
    }

    NUI_LOCKED_RECT lockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    pTexture->LockRect(0, &lockedRect, NULL, 0);

    // Make sure we've received valid data
    if (lockedRect.Pitch != 0)
    {
        // Conver depth data to color image and copy to image buffer
        m_imageBuffer.CopyDepth(lockedRect.pBits, lockedRect.size, nearMode, m_depthTreatment);

        // Draw ou the data with Direct2D
        if (m_pStreamViewer)
        {
            m_pStreamViewer->SetImage(&m_imageBuffer);
        }
    }

    // Done with the texture. Unlock and release it
    pTexture->UnlockRect(0);
    pTexture->Release();

ReleaseFrame:
    // Release the frame
    m_pNuiSensor->NuiImageStreamReleaseFrame(m_hStreamHandle, &imageFrame);
}
コード例 #2
0
/// <summary>
/// Handle new depth data
/// </summary>
/// <returns>S_OK on success, otherwise failure code</returns>
HRESULT CBackgroundRemovalBasics::ProcessDepth()
{
    HRESULT hr;
	HRESULT bghr = S_OK;
    NUI_IMAGE_FRAME imageFrame;

    // Attempt to get the depth frame
    LARGE_INTEGER depthTimeStamp;
    hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
    if (FAILED(hr))
    {
        return hr;
    }
    depthTimeStamp = imageFrame.liTimeStamp;
    INuiFrameTexture* pTexture;

    // Attempt to get the extended depth texture
    hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_pDepthStreamHandle, &imageFrame, &m_bNearMode, &pTexture);
    if (FAILED(hr))
    {
        return hr;
    }
    NUI_LOCKED_RECT LockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    pTexture->LockRect(0, &LockedRect, NULL, 0);

    // Make sure we've received valid data, and then present it to the background removed color stream. 
	if (LockedRect.Pitch != 0)
	{
		bghr = m_pBackgroundRemovalStream->ProcessDepth(m_depthWidth * m_depthHeight * cBytesPerPixel, LockedRect.pBits, depthTimeStamp);
	}

    // We're done with the texture so unlock it. Even if above process failed, we still need to unlock and release.
    pTexture->UnlockRect(0);
    pTexture->Release();

    // Release the frame
    hr = m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);

	if (FAILED(bghr))
	{
		return bghr;
	}

    return hr;
}
コード例 #3
0
/// <summary>
/// Retrieve depth data from stream frame
/// </summary>
void NuiDepthStream::ProcessDepth()
{
    HRESULT         hr;
    NUI_IMAGE_FRAME imageFrame;

	if (m_Recording)
	{
	//////Initializaing a video writer and allocate an image for recording /////////
		if ((m_TimerCount++)%FramesPerFile==0)
		{
		WCHAR szFilename[MAX_PATH] = { 0 };
		if (SUCCEEDED(GetFileName(szFilename,_countof(szFilename), m_instanceName, DepthSensor)))
			{
				char char_szFilename[MAX_PATH] = {0};
				size_t convertedChars;
				wcstombs_s(&convertedChars,char_szFilename,sizeof(char_szFilename),szFilename,sizeof(char_szFilename));
				m_pwriter=cvCreateVideoWriter(char_szFilename,
				CV_FOURCC('L', 'A', 'G', 'S'),
				//-1,  //user specified 
				FramesPerSecond,m_ImageRes);
				//2,m_ImageRes);
			}
			m_TimerCount%=FramesPerFile;
		}
	}

    // Attempt to get the depth frame
    hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_hStreamHandle, 0, &imageFrame);
    if (FAILED(hr))
    {
        return;
    }

    if (m_paused)
    {
        // Stream paused. Skip frame process and release the frame.
        goto ReleaseFrame;
    }

    BOOL nearMode;
    INuiFrameTexture* pTexture;

	///FT image texture
	INuiFrameTexture* pFTTexture;

	pFTTexture=imageFrame.pFrameTexture;

    // Get the depth image pixel texture
    hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_hStreamHandle, &imageFrame, &nearMode, &pTexture);
    if (FAILED(hr))
    {
        goto ReleaseFrame;
    }

    NUI_LOCKED_RECT lockedRect;

	///FT locked rect
	NUI_LOCKED_RECT FTlockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    pTexture->LockRect(0, &lockedRect, NULL, 0);

	// Lock the FT frame data
	pFTTexture->LockRect(0, &FTlockedRect, NULL, 0);

    // Make sure we've received valid data
    if (lockedRect.Pitch != 0)
    {
        // Conver depth data to color image and copy to image buffer
        m_imageBuffer.CopyDepth(lockedRect.pBits, lockedRect.size, nearMode, m_depthTreatment);
		// Convert 8 bits depth frame to 12 bits
		NUI_DEPTH_IMAGE_PIXEL* depthBuffer = (NUI_DEPTH_IMAGE_PIXEL*)lockedRect.pBits;
		cv::Mat depthMat(m_ImageRes.height, m_ImageRes.width, CV_8UC1);
		INT cn = 1;
		for(int i=0;i<depthMat.rows;i++){
		   for(int j=0;j<depthMat.cols;j++){
			  USHORT realdepth = ((depthBuffer->depth)&0x0fff); //Taking 12LSBs for depth
			  BYTE intensity = realdepth == 0 || realdepth > 4095 ? 0 : 255 - (BYTE)(((float)realdepth / 4095.0f) * 255.0f);//Scaling to 255 scale grayscale
			  depthMat.data[i*depthMat.cols*cn + j*cn + 0] = intensity;
			  depthBuffer++;
		   }
		}


		// Copy FT depth data to IFTImage buffer
		memcpy(m_pFTdepthBuffer->GetBuffer(), PBYTE(FTlockedRect.pBits), std::min(m_pFTdepthBuffer->GetBufferSize(), UINT(pFTTexture->BufferLen())));
		
		if (m_Recording)
		{
			//*m_pcolorImage = depthMat;
			//cvWriteFrame(m_pwriter,m_pcolorImage);

			const NUI_SKELETON_FRAME* pSkeletonFrame = m_pPrimaryViewer->getSkeleton();
			m_pDepthInbedAPPs->processFrame(depthMat, lockedRect.pBits, m_ImageRes, pSkeletonFrame);

			//if (m_TimerCount%FramesPerFile==0)
			//{
			//	cvReleaseVideoWriter(&m_pwriter);
			//}
		}

        // Draw out the data with Direct2D
        if (m_pStreamViewer)
        {
            m_pStreamViewer->SetImage(&m_imageBuffer);
        }
    }

    // Done with the texture. Unlock and release it
	pFTTexture->UnlockRect(0);
    pTexture->UnlockRect(0);
    pTexture->Release();

ReleaseFrame:
    // Release the frame
    m_pNuiSensor->NuiImageStreamReleaseFrame(m_hStreamHandle, &imageFrame);
}
コード例 #4
0
ファイル: main_backup.cpp プロジェクト: JNeiger/3D_Scanner
int main(void)
{
	//Set the error callback
	glfwSetErrorCallback(error_callback);

	//Initialize GLFW
	if (!glfwInit())
	{
		exit(EXIT_FAILURE);
	}

	//Set the GLFW window creation hints - these are optional
	//glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); //Request a specific OpenGL version
	//glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); //Request a specific OpenGL version
	//glfwWindowHint(GLFW_SAMPLES, 4); //Request 4x antialiasing
	//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);


	//Create a window and create its OpenGL context
	window = glfwCreateWindow(960, 720, "Test Window", NULL, NULL);

	//If the window couldn't be created
	if (!window)
	{
		fprintf(stderr, "Failed to open GLFW window.\n");
		glfwTerminate();
		//exit(EXIT_FAILURE);
	}

	//This function makes the context of the specified window current on the calling thread. 
	glfwMakeContextCurrent(window);

	//Sets the key callback
	glfwSetKeyCallback(window, key_callback);

	//Initialize GLEW
	GLenum err = glewInit();

	//If GLEW hasn't initialized
	if (err != GLEW_OK)
	{
		fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
		return -1;
	}

	//Set a background color
	glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
	glfwSetCursorPos(window, 1024 / 2, 768 / 2);

	GLuint VertexArrayID;
	glGenVertexArrays(1, &VertexArrayID);
	glBindVertexArray(VertexArrayID);

	// Create and compile our GLSL program from the shaders
	GLuint red = LoadShaders("SimpleTransform.vertexshader", "SingleColorRed.fragmentshader");
	GLuint grid = LoadShaders("SimpleTransform.vertexshader", "SingleColorGrid.fragmentshader");
	glBindFragDataLocation(red, 0, "red");
	glBindFragDataLocation(grid, 1, "grid");
	// Get a handle for our "MVP" uniform
	GLuint MatrixID = glGetUniformLocation(red, "MVP");

	// Projection matrix : 45° Field of View, 4:3 ratio, display range : 0.1 unit <-> 100 units
	glm::mat4 Projection = glm::perspective(45.0f, 4.0f / 3.0f, 0.1f, 1000.0f);
	// Or, for an ortho camera :
	//glm::mat4 Projection = glm::ortho(-10.0f,10.0f,-10.0f,10.0f,0.0f,100.0f); // In world coordinates

	// Camera matrix
	glm::mat4 View = glm::lookAt(
		glm::vec3(4, 3, 3), // Camera is at (4,3,3), in World Space
		glm::vec3(0, 0, 0), // and looks at the origin
		glm::vec3(0, 1, 0)  // Head is up (set to 0,-1,0 to look upside-down)
		);


	static const GLfloat g_vertex_buffer_data[] = {
		-1.0f, -1.0f, 0.0f,
		1.0f, -1.0f, 0.0f,
		0.0f, 1.0f, 0.0f,
	};

	static const GLushort g_element_buffer_data[] = { 0, 1, 2 };

	GLuint vertexbuffer;
	glGenBuffers(1, &vertexbuffer);
	glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
	glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);

	static const GLfloat g_triangle_buffer_data[] = {
		-1.0f, -1.0f, -1.0f,
		1.0f, -1.0f, -1.0f,
		0.0f, 1.0f, -1.0f,
	};

	GLuint triangle;
	glGenBuffers(1, &triangle);
	glBindBuffer(GL_ARRAY_BUFFER, triangle);
	glBufferData(GL_ARRAY_BUFFER, sizeof(g_triangle_buffer_data), g_triangle_buffer_data, GL_STATIC_DRAW);

	// Enable depth test
	glEnable(GL_DEPTH_TEST);
	// Accept fragment if it closer to the camera than the former one
	glDepthFunc(GL_LESS);
	glEnable(GL_CULL_FACE);
	glEnable(GL_LIGHTING);
	glEnable(GL_SMOOTH);//OPENGL INSTANTIATION
	HRESULT hr;
	NUI_IMAGE_FRAME depthFrame;
	HANDLE hDepth;
	INuiSensor* pNuiSensor = NULL;
	int iSensorCount = 0;
	hr = NuiGetSensorCount(&iSensorCount);

	if (FAILED(hr))
		return hr;

	for (int i = 0; i < iSensorCount; i++)
	{
		INuiSensor* tempSensor;
		hr = NuiCreateSensorByIndex(i, &tempSensor);

		if (FAILED(hr))
			continue;

		hr = tempSensor->NuiStatus();
		if (S_OK == hr)
		{
			pNuiSensor = tempSensor;
			break;
		}

		tempSensor->Release();
	}

	for (int i = 0; i < 2048; i++) {
		depthLookUp[i] = rawDepthToMeters(i);
	}

	rotation = getRotationMatrix(theta, psi, fi);

	pNuiSensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH);
	pNuiSensor->NuiImageStreamOpen(
		NUI_IMAGE_TYPE_DEPTH,
		NUI_IMAGE_RESOLUTION_320x240,
		0,
		2,
		NULL,
		&hDepth);//KINECT INSTANTIATION

	cout << "Starting Main Loop";

	static double lastTime = glfwGetTime();
	//Main Loop
	do
	{
		double currentTime = glfwGetTime();
		float deltaTime = float(currentTime - lastTime);
		//Clear color buffer
		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

		glUseProgram(grid);
		modelMatrix(MatrixID);


		hr = pNuiSensor->NuiImageStreamGetNextFrame(hDepth, 0, &depthFrame);
		if (!FAILED(hr))
		{

			INuiFrameTexture* pTexture;
			NUI_LOCKED_RECT LockedRect;

			hr = pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(
				hDepth, &depthFrame, false, &pTexture);

			if (FAILED(hr))
			{
				pNuiSensor->NuiImageStreamReleaseFrame(hDepth, &depthFrame);
				continue;
			}

			pTexture->LockRect(0, &LockedRect, NULL, 0);//Kinect Image Grab
			int skipX = 1;
			int skipY = 1;
			float scalar = 4.0f;

			if (LockedRect.Pitch != 0)
			{
				for (int x = 0; x < width; x += skipX)
				{
					for (int y = 0; y < height; y += skipY)
					{
						const NUI_DEPTH_IMAGE_PIXEL * pBufferRun = reinterpret_cast<const NUI_DEPTH_IMAGE_PIXEL *>(LockedRect.pBits) + x + y * width;
						
						//float depth = (float)(pBufferRun->depth);
						//glm::vec3 location = realWorld(depth, height - y, x, 500.0f, 1000.0f);
						//createCube(0.006f, location);
						Vector4 locationDepth = NuiTransformDepthImageToSkeleton(x, y, (short)(pBufferRun->depth << 3));
						glm::vec3 locationDepthxyz = glm::vec3(locationDepth.x * scalar, locationDepth.y * scalar, locationDepth.z * scalar);
						createCube(0.009f, locationDepthxyz);
					}
				}
			}

			pTexture->UnlockRect(0);
			pTexture->Release();

			pNuiSensor->NuiImageStreamReleaseFrame(hDepth, &depthFrame);
		}

		createGrid();

		//Test drawings
		/*
		glUseProgram(red);
		modelMatrix(MatrixID);
		//createCube(0.05f, glm::vec3(1.0f,1.0f,1.0f));
		// 1rst attribute buffer : vertices
		glEnableVertexAttribArray(0);
		//createObject(vertexbuffer, GL_TRIANGLES, 3);
		//createObject(triangle, GL_TRIANGLES, 3);
		glDisableVertexAttribArray(0);
		*/

		//Swap buffers
		glfwSwapBuffers(window);
		//Get and organize events, like keyboard and mouse input, window resizing, etc...
		glfwPollEvents();

		std::string title = "Title | DELTA TIME " + std::to_string(1.0f/deltaTime);
		const char* pszConstString = title.c_str();
		glfwSetWindowTitle(window, pszConstString);

		lastTime = currentTime;
	} //Check if the ESC key had been pressed or if the window had been closed
	while (!glfwWindowShouldClose(window));


	//Close OpenGL window and terminate GLFW
	glfwDestroyWindow(window);
	//Finalize and clean up GLFW
	glfwTerminate();

	exit(EXIT_SUCCESS);
}
コード例 #5
0
    HRESULT KinectSDKGrabber::ProcessDepth(float *buffer)
    {
        HRESULT hr = S_OK;
        NUI_IMAGE_FRAME imageFrame;

        // Attempt to get the depth frame
        hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
        HRESULT nodata = E_NUI_FRAME_NO_DATA;
        if (FAILED(hr) && hr != E_NUI_FRAME_NO_DATA)
        {
            std::cout << hr << std::endl;
            return hr;
        }

        BOOL nearMode;
        INuiFrameTexture *pTexture = imageFrame.pFrameTexture;
        NUI_LOCKED_RECT LockedRect;

        // Lock the frame data so the Kinect knows not to modify it while we're reading it
        pTexture->LockRect(0, &LockedRect, NULL, 0);

        m_pNuiSensor->NuiImageGetColorPixelCoordinateFrameFromDepthPixelFrameAtResolution(
            NUI_IMAGE_RESOLUTION_640x480,
            NUI_IMAGE_RESOLUTION_640x480,
            FrameBufferSize,
            (USHORT *)LockedRect.pBits,
            FrameBufferSize * 2,
            m_colorCoordinates
        );

        pTexture->UnlockRect(0);

        // Get the depth image pixel texture
        hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_pDepthStreamHandle, &imageFrame, &nearMode, &pTexture);
        if (FAILED(hr))
        {
            goto ReleaseFrame;
        }

        // Lock the frame data so the Kinect knows not to modify it while we're reading it
        pTexture->LockRect(0, &LockedRect, NULL, 0);

        // Make sure we've received valid data
        if (LockedRect.Pitch != 0)
        {
            const NUI_DEPTH_IMAGE_PIXEL *pBufferRun = reinterpret_cast<const NUI_DEPTH_IMAGE_PIXEL *>(LockedRect.pBits);

            // end pixel is start + width*height - 1
            const NUI_DEPTH_IMAGE_PIXEL *pBufferEnd = pBufferRun + FrameBufferSize;


            int idx = 0;
            while (pBufferRun < pBufferEnd)
            {
                float depth = pBufferRun->depth;
                if (depth != 0)
                {
                    buffer[idx] = depth / 1000.f;
                }
                else
                {
                    buffer[idx] = std::numeric_limits<float>::quiet_NaN();
                }
                ++pBufferRun;
                ++idx;
            }

            hr = S_OK;
        }
        else
        {
            hr = E_FAIL;
        }

        // We're done with the texture so unlock it
        pTexture->UnlockRect(0);

        pTexture->Release();

    ReleaseFrame:
        // Release the frame
        m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);

        return hr;
    }
コード例 #6
0
void KinectReader::ProcessDepth()
{
	if(!m_pNuiSensor)
	{/*
		int delta_depth =  _max_depth - _min_depth;
		for ( unsigned int i = 0; i < 640*160; i++ )
			m_depth[i] = (rand()%delta_depth - delta_depth)*0.15f;
			*/

		return;
	}


    HRESULT hr;
    NUI_IMAGE_FRAME imageFrame;

    // Attempt to get the depth frame
    hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
	
    if (FAILED(hr))
    {
        return;
    }

    BOOL nearMode;
    INuiFrameTexture* pTexture;

    // Get the depth image pixel texture
    hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(
        m_pDepthStreamHandle, &imageFrame, &nearMode, &pTexture);
    if (FAILED(hr))
    {
        goto ReleaseFrame;
    }
	
    NUI_LOCKED_RECT LockedRect;
	
    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    pTexture->LockRect(0, &LockedRect, NULL, 0);

    // Make sure we've received valid data
    if (LockedRect.Pitch != 0)
    {
        // Get the min and max reliable depth for the current frame
		int minDepth = _min_depth;//(nearMode ? NUI_IMAGE_DEPTH_MINIMUM_NEAR_MODE : NUI_IMAGE_DEPTH_MINIMUM) >> NUI_IMAGE_PLAYER_INDEX_SHIFT;
        int maxDepth = _max_depth;///(nearMode ? NUI_IMAGE_DEPTH_MAXIMUM_NEAR_MODE : NUI_IMAGE_DEPTH_MAXIMUM) >> NUI_IMAGE_PLAYER_INDEX_SHIFT;

        float * float_run = m_depth;
        const NUI_DEPTH_IMAGE_PIXEL * pBufferRun = reinterpret_cast<const NUI_DEPTH_IMAGE_PIXEL *>(LockedRect.pBits);

        // end pixel is start + width*height - 1
        const NUI_DEPTH_IMAGE_PIXEL * pBufferEnd = pBufferRun + (cDepthWidth * cDepthHeight);

		float intensity = 0.0f;
		int delta_depth =  maxDepth - minDepth;
		float float_per_depth_unit = _active_depth/float(delta_depth );
		static int t=0;
		++t;
        while ( pBufferRun < pBufferEnd )
        {
            // discard the portion of the depth that contains only the player index
            USHORT depth = pBufferRun->depth;

            // To convert to a byte, we're discarding the most-significant
            // rather than least-significant bits.
            // We're preserving detail, although the intensity will "wrap."
            // Values outside the reliable depth range are mapped to 0 (black).

            // Note: Using conditionals in this loop could degrade performance.
            // Consider using a lookup table instead when writing production code.
			//if (depth)
			intensity = static_cast<float>(depth >= minDepth && depth < maxDepth ? ((depth-minDepth)-delta_depth)*float_per_depth_unit : 0.0f);

			//if ( t < 1000)
			//	intensity = (rand()%delta_depth - delta_depth)*float_per_depth_unit;
            // Write out blue byte
            *(float_run++) = intensity;

            // Increment our index into the Kinect's depth buffer
            ++pBufferRun;
        }

        // Draw the data with Direct2D
        //m_pDrawDepth->Draw(m_depthRGBX, cDepthWidth * cDepthHeight * cBytesPerPixel);
		//image is ready
    }

    // We're done with the texture so unlock it
    pTexture->UnlockRect(0);

    pTexture->Release();

ReleaseFrame:
	//return;
    // Release the frame
    m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);
}