// Called when thread is started
void* CaptureThread::Entry()
{
    while (true)
    {
        // check to see if the thread should exit
        if (TestDestroy() == true)
        {
            break;
        }

        if (capturing == CAPTURE)
        {
            // get a new image
            CaptureFrame();
        } else if (capturing == PREVIEW)
        {

            // get a new image and show it on screen
            CaptureFrame();
            SendFrame(imageQueue.back());
        } else if (capturing == IDLE)
        {
            Sleep(10);
        } else if (capturing == STOP)
        {
            break;
        }

        Yield();
    }

    return NULL;
}
//Thread to stream frames by capturing data and sending it to
//all connected PipeEndpoints
void Webcam_impl::frame_threadfunc()
{
	while(streaming)
	{
		{
			//Capture a frame
			boost::shared_ptr<WebcamImage> frame = CaptureFrame();
			
			try
			{
				recursive_mutex::scoped_lock lock(global_lock);
				m_FrameStreamBroadcaster->AsyncSendPacket(frame, async_frame_send_handler);
			}
			catch (std::exception&)
			{
				if (streaming)
				{
				cout << "warning: error sending frame" << endl;
				}
			}

		}
				

		this_thread::sleep(posix_time::milliseconds(100));

	}



}
OMX_ERRORTYPE V4lRender::RenderSetConfig(
    OMX_INDEXTYPE nParamIndex,
    OMX_PTR pStructure)
{
	OMX_ERRORTYPE ret = OMX_ErrorNone;

	switch (nParamIndex)
	{
	case OMX_IndexConfigCommonOutputCrop:
	{
		OMX_CONFIG_RECTTYPE *pRect;
		pRect = (OMX_CONFIG_RECTTYPE*)pStructure;
		CHECK_STRUCT(pRect, OMX_CONFIG_RECTTYPE, ret);
		fsl_osal_memcpy(&sRectOut, pRect, sizeof(OMX_CONFIG_RECTTYPE));
		SetDeviceDisplayRegion();
	}
	break;
	case OMX_IndexConfigCaptureFrame:
	{
		OMX_CONFIG_CAPTUREFRAME *pCapture;
		pCapture = (OMX_CONFIG_CAPTUREFRAME*)pStructure;
		CHECK_STRUCT(pCapture, OMX_CONFIG_CAPTUREFRAME, ret);
		sCapture.eType = pCapture->eType;
		sCapture.pBuffer = pCapture->pBuffer;
		bCaptureFrameDone = OMX_FALSE;
		if (sCapture.eType == CAP_SNAPSHOT)
		{
			CaptureFrame(EnqueueBufferIdx);
			bCaptureFrameDone = OMX_TRUE;
		}
	}
	break;
	case OMX_IndexOutputMode:
	{
		OMX_CONFIG_OUTPUTMODE *pOutputMode;
		pOutputMode = (OMX_CONFIG_OUTPUTMODE*)pStructure;
		CHECK_STRUCT(pOutputMode, OMX_CONFIG_OUTPUTMODE, ret);
		fsl_osal_memcpy(&sOutputMode, pOutputMode, sizeof(OMX_CONFIG_OUTPUTMODE));
//                AdjustCropIn(&sOutputMode.sRectIn);
		SetOutputMode();
	}
	break;
	case OMX_IndexSysSleep:
	{
		OMX_CONFIG_SYSSLEEP *pStreamOff;
		pStreamOff = (OMX_CONFIG_SYSSLEEP *)pStructure;
		CHECK_STRUCT(pStreamOff, OMX_CONFIG_SYSSLEEP, ret);
		if (pStreamOff->bSleep)
			V4lStreamOff(OMX_TRUE);
		else
			V4lStreamOnInPause();
	}
	break;
	default:
		ret = OMX_ErrorUnsupportedIndex;
		break;
	}

	return ret;
}
void PD_flow_mrpt::initializePDFlow()
{
	//Initialize Visualization
	initializeScene();

    //Initialize CUDA
    mrpt::system::sleep(500);
    initializeCUDA();

	//Start video streaming
    OpenCamera();

	//Fill empty matrices
    CaptureFrame();
    createImagePyramidGPU();
    CaptureFrame();
    createImagePyramidGPU();
    solveSceneFlowGPU();
}
IplImage* CaptureThread::Pop()
{
    if (imageQueue.size() <= 0)
    {
        CaptureFrame();
    }

    IplImage *image = imageQueue.front();

    if (imageQueue.size() > 1)
    {
        imageQueue.pop();
    }

    return image;
}
//Capture a frame and store in the "buffer" and "multidimbuffer" memories
RR_SHARED_PTR<WebcamImage_size > Webcam_impl::CaptureFrameToBuffer()
{

	boost::shared_ptr<WebcamImage> image=CaptureFrame();

	m_buffer=image->data;

	boost::shared_ptr<RRArray<uint8_t> > mdata=AllocateRRArray<uint8_t>(image->height*image->width*3);

	boost::shared_ptr<RRArray<int32_t> > dims=AllocateRRArray<int32_t>(3);
	(*dims)[0]=image->height;
	(*dims)[1]=image->width;
	(*dims)[2]=3;

	//Rearrange the data into the correct format for MATLAB arrays
	boost::shared_ptr<RRMultiDimArray<uint8_t> > mdbuf=boost::make_shared<RRMultiDimArray<uint8_t> >(dims,mdata);

	for (int channel=0; channel < 3; channel++)
    {
        int channel0 = image->height * image->width * channel;
        for (int x = 0; x < image->width; x++)
        {                        
            for (int y = 0; y < image->height; y++)
            {
                uint8_t value = (*image->data)[(y * image->step + x*3)  + (2-channel)];
                (*mdata)[channel0 + x * image->height + y]=value;
            }
        }
    }
    m_multidimbuffer=mdbuf;

	//Return a WebcamImage_size structure to the client
	boost::shared_ptr<WebcamImage_size> size=boost::make_shared<WebcamImage_size>();
	size->width=image->width;
	size->height=image->height;
	size->step=image->step;
	return size;
}
Beispiel #7
0
/**
 * The main entry point for the application.
 */
int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow)
{
	UNREFERENCED_PARAMETER(hPrevInstance);
	UNREFERENCED_PARAMETER(lpCmdLine);

	// Initialize global strings
	LoadString(hInstance, IDS_APP_TITLE, gWindowTitle, sizeof(gWindowTitle));
	LoadString(hInstance, IDC_INTEGRATION, gWindowClass, sizeof(gWindowClass));

	// Register the window class
	RegisterWindowClass(hInstance);

	// Perform application initialization:
	if ( !InitInstance(hInstance, nCmdShow) )
	{
		return FALSE;
	}

	// Set the view to the default position
	ResetView();

	// Cache the last mouse position
	GetCursorPos(&gLastMousePos);

	// Initialize the Twitch SDK
	std::string channelName = "<username>";
	InitializeStreaming("<username>", "<password>", "<clientId>", "<clientSecret>", GetIntelDllPath());

	// Main message loop
	MSG msg;
	while (true)
	{
		// Check to see if any messages are waiting in the queue
		while (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE))
		{
			// Process window messages
			TranslateMessage(&msg);
			DispatchMessage(&msg);

			// Received a quit message
			if (msg.message == WM_QUIT)
			{
				break;
			}
		}

		// Received a quit message so exit the app
		if (msg.message == WM_QUIT)
		{
			break;
		}

		if (gReinitializeRequired)
		{
			gReinitializeRequired = false;
			InitializeRendering();
		}

		// Draw the scene
		Render();

		UpdateWaveMesh();

		// Process user input independent of the event queue
		if (gFocused && !AcceptingChatInput())
		{
			HandleInput();
		}

		// Record the frame time
		float curTime = GetSystemTimeMs();

		// Begin streaming when ready
		if (gStreamingDesired && 
			!IsStreaming() &&
			IsReadyToStream())
		{
			StartStreaming(gBroadcastWidth, gBroadcastHeight, gBroadcastFramesPerSecond);

			gLastCaptureTime = 0;
		}

		// If you send frames too quickly to the SDK (based on the broadcast FPS you configured) it will not be able 
		// to make use of them all.  In that case, it will simply release buffers without using them which means the
		// game wasted time doing the capture.  To mitigate this, the app should pace the captures to the broadcast FPS.
		float captureDelta = curTime - gLastCaptureTime;
		bool isTimeForNextCapture = (captureDelta / 1000.0f) >= (1.0f / gBroadcastFramesPerSecond);

		// streaming is in progress so try and capture a frame
		if (IsStreaming() && 
			!gPaused &&
			isTimeForNextCapture)
		{
			// capture a snapshot of the back buffer
			unsigned char* pBgraFrame = nullptr;
			int width = 0;
			int height = 0;
			bool gotFrame = false;

			gotFrame = CaptureFrame(gBroadcastWidth, gBroadcastHeight, pBgraFrame, width, height);

			// send a frame to the stream
			if (gotFrame)
			{
				SubmitFrame(pBgraFrame);
			}
		}

		// The SDK may generate events that need to be handled by the main thread so we should handle them
		FlushStreamingEvents();

		#undef CHAT_STATE
		#undef STREAM_STATE
		#define CHAT_STATE(__state__) CS_##__state__
		#define STREAM_STATE(__state__) SS_##__state__

		// initialize chat after we have authenticated
		if (GetChatState() == CHAT_STATE(Uninitialized) && 
			GetStreamState() >= STREAM_STATE(Authenticated))
		{
			InitializeChat(channelName.c_str());
		}

		if (GetChatState() != CHAT_STATE(Uninitialized))
		{
			FlushChatEvents();
		}

		#undef CHAT_STATE
		#undef STREAM_STATE

		gLastFrameTime = curTime;

		// Update the window title to show the state
		#undef STREAM_STATE
		#define STREAM_STATE(__state__) #__state__,
		const char* streamStates[] = 
		{
			STREAM_STATE_LIST
		};
		#undef STREAM_STATE

		#undef CHAT_STATE
		#define CHAT_STATE(__state__) #__state__,
		const char* chatStates[] = 
		{
			CHAT_STATE_LIST
		};
		#undef CHAT_STATE

		char buffer[256];
		sprintf_s(buffer, sizeof(buffer), "Twitch Direct3D Integration Sample - %s - Stream:%s Chat:%s", GetUsername().c_str(), streamStates[GetStreamState()], chatStates[GetChatState()]);
		SetWindowTextA(gWindowHandle, buffer);
	}

	StopStreaming();

	DeinitChatRenderer();

	// Shutdown the Twitch SDK
	ShutdownChat();
	ShutdownStreaming();

	DeinitRendering();

	// Shutdown the app
	gGraphicsDevice->Release();
	gDirect3D->Release();

	// Cleanup the mesh
	DestroyWaveMesh();

	return (int)msg.wParam;
}
OMX_ERRORTYPE V4lRender::WriteDevice(
    OMX_BUFFERHEADERTYPE *pBufferHdr)
{
	OMX_ERRORTYPE ret = OMX_ErrorNone;
	FB_DATA *fb;
	OMX_U32 nIndex;

	fsl_osal_mutex_lock(lock);

	LOG_DEBUG("WriteDevice: %p, nAllocating %d, FilledLen %d, nRenderFrames %d, nFrameBuffer %d, hMarkTargetComponent %p\n",
	          pBufferHdr, nAllocating, pBufferHdr->nFilledLen, nRenderFrames, nFrameBuffer, pBufferHdr->hMarkTargetComponent);


	if(nAllocating > 0)
	{
		V4lSearchBuffer(pBufferHdr->pBuffer, &nIndex);
		BufferHdrs[nIndex] = pBufferHdr;
		//LOG_DEBUG("nIndex %d, nQueuedBuffer %d\n", nIndex, nQueuedBuffer);
	}
	else
	{
		nIndex = (nRenderFrames % (nFrameBuffer-V4L_DUMMY_BUF_NUM));
		fb = &fb_data[nIndex];
		fsl_osal_memcpy(fb->pVirtualAddr, pBufferHdr->pBuffer, pBufferHdr->nFilledLen);
		BufferHdrs[nIndex] = pBufferHdr;
	}

	if(sCapture.eType == CAP_THUMBNAL)
	{
		CaptureFrame(nIndex);
		bCaptureFrameDone = OMX_TRUE;
		ports[IN_PORT]->SendBuffer(BufferHdrs[nIndex]);
		BufferHdrs[nIndex] = NULL;
		fsl_osal_mutex_unlock(lock);
		return OMX_ErrorNone;
	}

	V4lQueueBuffer(nIndex);
#ifdef USING_DUMMY_WORKAROUND
	if((2==nQueuedBuffer)&&(bFlushState==OMX_TRUE))
	{
		OMX_U32 nTmp;
		//make sure: always one valid buffer(except dummy buffer) will be dequeue later
		//otherwise: pause+seek will be very slow or timeout
		ASSERT(OMX_TRUE==bDummyValid);
		V4lDeQueueBuffer(&nTmp);
		ASSERT(nTmp==(nFrameBuffer-V4L_DUMMY_BUF_NUM));
		memcpy(fb_data[nFrameBuffer-V4L_DUMMY_BUF_NUM].pVirtualAddr,fb_data[nIndex].pVirtualAddr,fb_data[nIndex].nLength);
		V4lQueueBuffer(nFrameBuffer-V4L_DUMMY_BUF_NUM);
		bFlushState=OMX_FALSE;
	}
#else
	if(pBufferHdr->hMarkTargetComponent != NULL)
	{
		LOG_DEBUG("requeue current buffer to steam on v4l\n");
		if(bV4lNewApi)
		{
			//will bring unrecoverable error if the same buffer is queue twice
			//no display : pause -> seek -> no display -> resume -> display
			V4lQueueBuffer((nIndex+1)%nFrameBuffer); //enable another buffer if you want to have display when pause+seek
		}
		else
		{
			V4lQueueBuffer(nIndex);
		}
		//nQueuedBuffer--;
		//nRenderFrames--;
	}
#endif

	nIndex = 0;
	ret = V4lDeQueueBuffer(&nIndex);
	if(ret == OMX_ErrorNone)
	{
		OMX_BUFFERHEADERTYPE *pHdr = NULL;
		pHdr = BufferHdrs[nIndex];
		BufferHdrs[nIndex] = NULL;

		ports[IN_PORT]->SendBuffer(pHdr);
		//LOG_DEBUG("dequeued nIndex %d\n", nIndex);
	}
#ifndef USING_DUMMY_WORKAROUND
	// when seeking in pause state, previous frame has been flushed in FlushComponent(), here no chance to call SendBuffer() and
	// OMX_EventMark will not be sent , and then seek action fails. So we add this SendEventMark() to trigger OMX_EventMark.
	if(pBufferHdr->hMarkTargetComponent != NULL)
	{
		LOG_DEBUG("Send eventMark in pause state\n");
		ports[0]->SendEventMark(pBufferHdr);
	}
#endif
	fsl_osal_mutex_unlock(lock);

	return OMX_ErrorNone;
}