/// <summary>
/// Main processing function
/// </summary>
void CBackgroundRemovalBasics::Update()
{
    if (NULL == m_pNuiSensor)
    {
        return;
    }

    if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextBackgroundRemovedFrameEvent, 0))
    {
        ComposeImage();
    }

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) )
    {
        ProcessDepth();
    }

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) )
    {
        ProcessColor();
    }

    if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextSkeletonFrameEvent, 0) )
    {
        ProcessSkeleton();
    }
}
/// <summary>
/// Process a incoming stream frame
/// </summary>
void NuiColorStream::ProcessStreamFrame()
{
    if (WAIT_OBJECT_0 == WaitForSingleObject(GetFrameReadyEvent(), 0))
    {
        // Frame ready event has been set. Proceed to process incoming frame
        ProcessColor();
    }
}
/// <summary>
/// Main processing function
/// </summary>
void KinectEasyGrabber::Record()
{
    if (NULL == m_pNuiSensor)
    {
        return;
    }

    bool needToDraw = false;

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) )
    {
        // if we have received any valid new depth data we may need to draw
        if ( SUCCEEDED(ProcessDepth()) )
        {
            needToDraw = true;
        }
    }

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) )
    {
        // if we have received any valid new color data we may need to draw
        if ( SUCCEEDED(ProcessColor()) )
        {
            needToDraw = true;
        }
    }

    // Depth is 30 fps.  For any given combination of FPS, we should ensure we are within half a frame of the more frequent of the two.  
    // But depth is always the greater (or equal) of the two, so just use depth FPS.
    const int depthFps = 30;
    const int halfADepthFrameMs = (1000 / depthFps) / 2;

    // If we have not yet received any data for either color or depth since we started up, we shouldn't draw
    if (m_colorTimeStamp.QuadPart == 0 || m_depthTimeStamp.QuadPart == 0)
    {
        needToDraw = false;
    }

    // If the color frame is more than half a depth frame ahead of the depth frame we have,
    // then we should wait for another depth frame.  Otherwise, just go with what we have.
    if (m_colorTimeStamp.QuadPart - m_depthTimeStamp.QuadPart > halfADepthFrameMs)
    {
        needToDraw = false;
    }

	if (needToDraw)
    {
		if(m_frameIndex >= 50) return;

		dumpToDisk(m_frameIndex, m_frameBasename, m_depthD16, m_colorRGBX, m_colorCoordinates, m_depthTimeStamp, m_colorTimeStamp);

		// Draw the data with Direct2D
        m_pDrawKinectEasyGrabber->Draw(m_colorRGBX, m_colorWidth * m_colorHeight * cBytesPerPixel);

		m_frameIndex++;
	}
}
Beispiel #4
0
////////////////////////////////////////////////////////////////////////////////
// public member functions
////////////////////////////////////////////////////////////////////////////////
int IntelCamera::NextFrame()
{
	// color & depth image : force to synchronize
	if ( ProcessColor()==-1 ) return -1;
	if ( ProcessDepth()==-1 ) return -1;

	// point cloud
	if ( MapColorToDepth()==-1 ) return -1;

	return 1;
}
Beispiel #5
0
/// <summary>
/// Main processing function
/// </summary>
void CColorBasics::Update(DASHout* dasher, DWORD event_res)
{
	u64 timeref = 0;	//TODO fix/move this

    if (NULL == m_pNuiSensor)
    {
        return;
    }

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, INFINITE) )
    {
        ProcessColor(dasher);
    }

    if (event_res == 1 && WAIT_OBJECT_0 == WaitForSingleObject(m_hNextSkeletonEvent, INFINITE) )
    {
		//TODO add process skel function
		timeref = gf_sys_clock_high_res() - dasher->sys_start;
		ProcessSkeleton(dasher, timeref);
    }

	if(dasher->nextColourFrame){
		int res = muxer_encode(dasher, (u8 *) dasher->nextColourFrame->kinectFrame, dasher->nextColourFrame->size,dasher->nextColourFrame->pts);

		if((res>=0)&&(!dasher->segment_started)){
			res = muxer_open_segment(dasher, "x64/Debug/out", "seg", dasher->seg_num);
			timeref = gf_sys_clock_high_res() - dasher->sys_start;
			printf("\t\tOpening segment time : %llu\n", timeref);
		}

		if(res>=0){
			res = muxer_write_frame(dasher, dasher->colFrameCount);
			dasher->colFrameCount++;
		}

		if(res==1){
			res = muxer_close_segment(dasher);
			if(res==GF_OK){
				dasher->seg_num = write_playlist_segment(dasher->seg_num, timeref);
			}
		}

		dasher->nextColourFrame = NULL;
	}

}
Beispiel #6
0
/// <summary>
/// Main processing function
/// </summary>
void CDataCollection::Update()
{
    if (NULL == m_pNuiSensor)
    {
        return;
    }

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) )
    {
        ProcessColor();
    }
	
	else if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextSkeletonEvent, 0) )
	{
		ProcessSkeleton();
	}
}
    bool KinectSDKGrabber::GetNextFrame(cv::Mat &colorFrame, cv::Mat &depthFrame)
    {
        WaitForSingleObject(m_hNextDepthFrameEvent, INFINITE);
        WaitForSingleObject(m_hNextColorFrameEvent, INFINITE);

        colorFrame = cv::Mat::zeros(FrameHeight, FrameWidth, CV_8UC4);
        depthFrame = cv::Mat::zeros(FrameHeight, FrameWidth, CV_32FC1);

        HRESULT hr = ProcessDepth((float *)depthFrame.data);
        if (FAILED(hr))
        {
            return false;
        }

        hr = ProcessColor(colorFrame);
        if (FAILED(hr))
        {
            return false;
        }

        return true;
    }
Beispiel #8
0
void KinectSensorV1::Update()
{
	if (NULL == m_pNuiSensor)
	{
		return;
	}

	if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0))
	{
		ProcessDepth();
	}
	 
	if (WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0))
	{
		ProcessColor();
	}

	if (cRevieveRGB && WAIT_OBJECT_0 == WaitForSingleObject(m_hNextRGBFrameEvent, 0))
	{
		ProcessRGB();
	}
}
Beispiel #9
0
bool CScrollBufL1::ProcessEscape(char *szText, int nLen)
{
    int param1 = -1;
    int param2 = -1;
    int param3 = -1;
    char command = 0;
    bool bExpanded;

    VerifyCritical();
    bExpanded = ExtractCodes(szText, nLen, &param1, &param2, &param3, &command);

    switch (command)
    {
    case 'c':
        /* Terminal ID aka device attribute */
        TRACE("Terminal ID\n");
        break;
    case 'A':
        m_pL2->SetInsertionLine(m_pL2->GetInsertionLine() - param1, false);
        break;
    case 'B':
        m_pL2->SetInsertionLine(m_pL2->GetInsertionLine() + param1, false);
        break;
    case 'C':
        m_pL2->KeypadInsertionCol(param1, 1);
        break;
    case 'D':
        m_pL2->KeypadInsertionCol(param1, -1);
        break;
    case 'G':
        m_pL2->SetInsertionCol(param1);
        break;
    case 'h':
        ProcessMode(bExpanded, param1);
        break;
    case 'f':
    case 'H':
        ProcessMoveCursor(param1, param2);
        break;
    case 'J':
        ProcessEraseScreen(param1, ' ');
        break;
    case 'K':
        ProcessEraseLine(param1, m_pL2->GetInsertionLine(), ' ');
        break;
    case 'l':
        ProcessResetMode(bExpanded, param1);
        break;
    case 'm':
        if (param1 == -1)
        {
            ProcessColor(0);
        }
        else
        {
            ProcessColor(param1);
        }
        if (param2 != -1)
        {
            ProcessColor(param2);
        }
        if (param3 != -1)
        {
            ProcessColor(param3);
        }
        break;
    case 'P':
        m_pL2->ProcessDeleteChar(param1);
        break;
    case 'r':
        m_pL2->ProcessScrollArea(param1, param2);
        break;
    case '@':
        m_pL2->ProcessInsertChar(param1);
        break;
    default:
        TRACE3("Got %i %i %c\n", param1, param2, command);
        break;
    }

    return true;
}
Beispiel #10
0
	void UpdateColor()
	{
		if (!m_pColorFrameReader)
		{
			return;
		}

		IColorFrame* pColorFrame = NULL;

		HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);

		if (SUCCEEDED(hr))
		{
			INT64 nTime = 0;
			IFrameDescription* pFrameDescription = NULL;
			int nWidth = 0;
			int nHeight = 0;
			ColorImageFormat imageFormat = ColorImageFormat_None;
			UINT nBufferSize = 0;
			RGBQUAD *pBuffer = NULL;

			hr = pColorFrame->get_RelativeTime(&nTime);

			if (SUCCEEDED(hr))
			{
				hr = pColorFrame->get_FrameDescription(&pFrameDescription);
			}

			if (SUCCEEDED(hr))
			{
				hr = pFrameDescription->get_Width(&nWidth);
			}

			if (SUCCEEDED(hr))
			{
				m_nColorWidth = nWidth;
				hr = pFrameDescription->get_Height(&nHeight);
			}

			if (SUCCEEDED(hr))
			{
				m_nColorHeight = nHeight;
				hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
			}

			if (SUCCEEDED(hr))
			{
				if (imageFormat == ColorImageFormat_Bgra)
				{
					hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer));
				}
				else if (m_pColorRGBX)
				{
					pBuffer = m_pColorRGBX;
					nBufferSize = nWidth * nHeight * sizeof(RGBQUAD);
					hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra);            
				}
				else
				{
					hr = E_FAIL;
				}
			}

			if (SUCCEEDED(hr))
			{
				{
					ProcessColor(nTime, pBuffer, nWidth, nHeight);
				}
			}

			SafeRelease(pFrameDescription);
		}
		else{
			DumpHR(hr);
		}

		SafeRelease(pColorFrame);
	}
void Kinect2Manager::UpdateColor(IColorFrame* pColorFrame)
{
#ifdef _USE_KINECT
    INT64 nTime = 0;
    IFrameDescription* pFrameDescription = NULL;
    int nWidth = 0;
    int nHeight = 0;
    ColorImageFormat imageFormat = ColorImageFormat_None;
    UINT nBufferSize = 0;
    RGBQUAD *pBuffer = NULL;

    HRESULT hr = pColorFrame->get_RelativeTime(&nTime);

    if (SUCCEEDED(hr))
    {

        m_nColorTime = nTime;
        hr = pColorFrame->get_FrameDescription(&pFrameDescription);
    }

    if (SUCCEEDED(hr))
    {
        hr = pFrameDescription->get_Width(&nWidth);
    }

    if (SUCCEEDED(hr))
    {
        m_nColorWidth = nWidth;
        hr = pFrameDescription->get_Height(&nHeight);
    }

    if (SUCCEEDED(hr))
    {
        m_nColorHeight = nHeight;
        hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
    }

    if (SUCCEEDED(hr))
    {
        if (imageFormat == ColorImageFormat_Bgra)
        {
            hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer));
        }
        else if (m_pColorRGBX)
        {
            pBuffer = m_pColorRGBX;
            nBufferSize = nWidth * nHeight * sizeof(RGBQUAD);
            hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra);
        }
        else
        {
            hr = E_FAIL;
        }
    }

    if (SUCCEEDED(hr))
    {
        {
            ProcessColor(nTime, pBuffer, nWidth, nHeight);

        }
    }

    SafeRelease(pFrameDescription);
#else
#endif
}
/// <summary>
/// Main processing function
/// </summary>
void KinectEasyGrabber::Play()
{
    if (NULL == m_pNuiSensor)
    {
        return;
    }

    bool needToDraw = false;

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) )
    {
        // if we have received any valid new depth data we may need to draw
        if ( SUCCEEDED(ProcessDepth()) )
        {
            needToDraw = true;
        }
    }

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) )
    {
        // if we have received any valid new color data we may need to draw
        if ( SUCCEEDED(ProcessColor()) )
        {
            needToDraw = true;
        }
    }

    // Depth is 30 fps.  For any given combination of FPS, we should ensure we are within half a frame of the more frequent of the two.  
    // But depth is always the greater (or equal) of the two, so just use depth FPS.
    const int depthFps = 30;
    const int halfADepthFrameMs = (1000 / depthFps) / 2;

    // If we have not yet received any data for either color or depth since we started up, we shouldn't draw
    if (m_colorTimeStamp.QuadPart == 0 || m_depthTimeStamp.QuadPart == 0)
    {
        needToDraw = false;
    }

    // If the color frame is more than half a depth frame ahead of the depth frame we have,
    // then we should wait for another depth frame.  Otherwise, just go with what we have.
    if (m_colorTimeStamp.QuadPart - m_depthTimeStamp.QuadPart > halfADepthFrameMs)
    {
        needToDraw = false;
    }

    if (needToDraw)
    {
        int outputIndex = 0;
        LONG* pDest;
        LONG* pSrc;

        // loop over each row and column of the color
        for (LONG y = 0; y < m_colorHeight; ++y)
        {
            for (LONG x = 0; x < m_colorWidth; ++x)
            {
                // calculate index into depth array
                int depthIndex = x/m_colorToDepthDivisor + y/m_colorToDepthDivisor * m_depthWidth;

                USHORT depth  = m_depthD16[depthIndex];
                USHORT player = NuiDepthPixelToPlayerIndex(depth);

                // default setting source to copy from the background pixel
                pSrc  = (LONG *)m_backgroundRGBX + outputIndex;

                // if we're tracking a player for the current pixel, draw from the color camera
                if ( player > 0 )
                {
                    // retrieve the depth to color mapping for the current depth pixel
                    LONG colorInDepthX = m_colorCoordinates[depthIndex * 2];
                    LONG colorInDepthY = m_colorCoordinates[depthIndex * 2 + 1];

                    // make sure the depth pixel maps to a valid point in color space
                    if ( colorInDepthX >= 0 && colorInDepthX < m_colorWidth && colorInDepthY >= 0 && colorInDepthY < m_colorHeight )
                    {
                        // calculate index into color array
                        LONG colorIndex = colorInDepthX + colorInDepthY * m_colorWidth;

                        // set source for copy to the color pixel
                        pSrc  = (LONG *)m_colorRGBX + colorIndex;
                    }
                }

                // calculate output pixel location
                pDest = (LONG *)m_outputRGBX + outputIndex++;

                // write output
                *pDest = *pSrc;
            }
        }

        // Draw the data with Direct2D
        m_pDrawKinectEasyGrabber->Draw(m_outputRGBX, m_colorWidth * m_colorHeight * cBytesPerPixel);
    }
}
/// <summary>
/// Main processing function
/// </summary>
void KinectEasyGrabber::RecordArray()
{
    if (NULL == m_pNuiSensor)
    {
        return;
    }

    bool needToDraw = false;

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextDepthFrameEvent, 0) )
    {
        // if we have received any valid new depth data we may need to draw
        if ( SUCCEEDED(ProcessDepth()) )
        {
            needToDraw = true;
        }
    }

    if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hNextColorFrameEvent, 0) )
    {
        // if we have received any valid new color data we may need to draw
        if ( SUCCEEDED(ProcessColor()) )
        {
            needToDraw = true;
        }
    }

    // Depth is 30 fps.  For any given combination of FPS, we should ensure we are within half a frame of the more frequent of the two.  
    // But depth is always the greater (or equal) of the two, so just use depth FPS.
    const int depthFps = 30;
    const int halfADepthFrameMs = (1000 / depthFps) / 2;

    // If we have not yet received any data for either color or depth since we started up, we shouldn't draw
    if (m_colorTimeStamp.QuadPart == 0 || m_depthTimeStamp.QuadPart == 0)
    {
        needToDraw = false;
    }

    // If the color frame is more than half a depth frame ahead of the depth frame we have,
    // then we should wait for another depth frame.  Otherwise, just go with what we have.
    if (m_colorTimeStamp.QuadPart - m_depthTimeStamp.QuadPart > halfADepthFrameMs)
    {
        needToDraw = false;
    }

	// cantidad de frames a grabar
	if(m_frameIndex >= m_totalFrames){ RecordArrayToDisk(); return; }
	if(m_dumped) return;

	if (needToDraw)
    {
		//Hard copy into ram
		memcpy(m_outputArrayDepthD16[m_frameIndex],m_depthD16, m_depthWidth*m_depthHeight*sizeof(USHORT));
		memcpy(m_outputArrayRGBX[m_frameIndex],m_colorRGBX, m_colorWidth*m_colorHeight*cBytesPerPixel*sizeof(BYTE));
		memcpy(m_outputArrayColorCoordinates[m_frameIndex],m_colorCoordinates, m_depthWidth*m_depthHeight*2*sizeof(LONG));

		m_colorArrayTimeStamp[m_frameIndex] = m_colorTimeStamp;
		m_depthArrayTimeStamp[m_frameIndex] = m_depthTimeStamp;

		// Draw the data with Direct2D
		#ifdef DRAW_FRAMES
        m_pDrawKinectEasyGrabber->Draw(m_colorRGBX, m_colorWidth * m_colorHeight * cBytesPerPixel);
		#endif
		m_frameIndex++;
	}
}
Beispiel #14
0
/// <summary>
/// Main processing function
/// </summary>
// この関数がループ処理される
void CColorBasics::Update()
{
	HRESULT hr = NULL;
	// (1) カラーフレーム(背景描画のみに使用)
	if (m_pColorFrameReader)
	{

		IColorFrame* pColorFrame = NULL;

		hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);

		if (SUCCEEDED(hr))
		{
			INT64 nTime = 0;
			IFrameDescription* pFrameDescription = NULL;
			int nWidth = 0;
			int nHeight = 0;
			ColorImageFormat imageFormat = ColorImageFormat_None;
			UINT nBufferSize = 0;
			RGBQUAD *pBuffer = NULL;

			hr = pColorFrame->get_RelativeTime(&nTime);

			if (SUCCEEDED(hr))
			{
				hr = pColorFrame->get_FrameDescription(&pFrameDescription);
			}

			if (SUCCEEDED(hr))
			{
				hr = pFrameDescription->get_Width(&nWidth);
			}

			if (SUCCEEDED(hr))
			{
				hr = pFrameDescription->get_Height(&nHeight);
			}

			if (SUCCEEDED(hr))
			{
				hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
			}

			if (SUCCEEDED(hr))
			{
				if (imageFormat == ColorImageFormat_Bgra)
				{
					hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer));
				}
				else if (m_pColorRGBX)
				{
					pBuffer = m_pColorRGBX;
					nBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD);
					hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra);
				}
				else
				{
					hr = E_FAIL;
				}
			}

			if (SUCCEEDED(hr))
			{
				ProcessColor(nTime, pBuffer, nWidth, nHeight);
			}
			SafeRelease(pFrameDescription);
		}
		SafeRelease(pColorFrame);
	}


	// これ以降にBody処理を実装
	TIMESPAN nBodyTime = 0;
	if (m_pBodyFrameReader)
	{
		IBodyFrame* pBodyFrame = NULL;

		hr = m_pBodyFrameReader->AcquireLatestFrame(&pBodyFrame);
		if (SUCCEEDED(hr))
		{
			hr = pBodyFrame->get_RelativeTime(&nBodyTime);

			// ここに、UI描画処理 ボタン等
			// ゲームのステータスによって描画を設定
			if (!m_pGame){ m_pGame = new CSemaphoreGame(m_pDrawColor, m_pCoordinateMapper); }
			if (m_pGame){ m_pGame->Display(nBodyTime); }

			IBody* ppBodies[BODY_COUNT] = { 0 };

			if (SUCCEEDED(hr))
			{
				hr = pBodyFrame->GetAndRefreshBodyData(_countof(ppBodies), ppBodies);
			}

			if (SUCCEEDED(hr))
			{
				// ここに、UI処理->ProcessBody()内でOK
				// ボディデータにてUI処理を行う
				// ステータスの変更
				if (m_pGame){ m_pGame->Play(nBodyTime, BODY_COUNT, ppBodies); }

				ProcessBody(nBodyTime, BODY_COUNT, ppBodies);
			}

			for (int i = 0; i < _countof(ppBodies); ++i)
			{
				SafeRelease(ppBodies[i]);
			}
		}
		SafeRelease(pBodyFrame);
	}

	return;
}