Ejemplo n.º 1
0
/**
 * Get original size of camera capture source
 */
static javacall_result camera_get_video_size(javacall_handle handle, 
                                             long* width, long* height)
{
    camera_handle* pHandle = (camera_handle*)handle;
    javacall_result ret = JAVACALL_FAIL;
    BITMAPINFO* pBmpInfo;

    if (pHandle->hCapWnd) {
        int size = capGetVideoFormatSize(pHandle->hCapWnd);
        pBmpInfo = MALLOC(size);
        if (NULL == pBmpInfo) return ret;

        ((LPBITMAPINFOHEADER)pBmpInfo)->biSize= sizeof(BITMAPINFOHEADER);
        if (0 != capGetVideoFormat(pHandle->hCapWnd, pBmpInfo, size)) {
            if (width) { *width = IMAGEWIDTH(pBmpInfo); }
            if (height) { *height = IMAGEHEIGHT(pBmpInfo); }
            /* IMPL_NOTE - Set to 24 bit RGB format */
            if (IMAGEBITS(pBmpInfo) != 24) {
                IMAGEBITS(pBmpInfo) = 24;
                IMAGEBICLRUSED(pBmpInfo) = 0;
                capSetVideoFormat(pHandle->hCapWnd, pBmpInfo, size);
            }
            ret = JAVACALL_OK;
        }

        FREE(pBmpInfo);
    }

    if (width && height) {
      JAVA_DEBUG_PRINT2("[camera] camera_get_video_size %d %d\n", *width, *height);
    }

    return ret;
}
Ejemplo n.º 2
0
/* 配置视频格式:分辨率和视频格式:RGB/I420 */
void CMainFrame::OnVfwVideoformat()
{
	// TODO: 在此添加命令处理程序代码
	DWORD fsize;
	if(m_caps.fHasDlgVideoFormat){
		capDlgVideoFormat(m_hWndCap);
		fsize = capGetVideoFormatSize(m_hWndCap);
		capGetVideoFormat(m_hWndCap, lpbiIn, fsize);
		Config_XVIECODEC();
	}
}
void	CAviCap::_pushResolution()
{
	_svRes=NULL;
	int _vfs=capGetVideoFormatSize(GetSafeHwnd());
	if(!_vfs)
		return;

	   _svRes=(LPVOID)( new char[_vfs]);
	
	  capGetVideoFormat(GetSafeHwnd(), _svRes, (WORD)_vfs);

}
Ejemplo n.º 4
0
/* 对捕获的视频做编码、解码处理 */
void CMainFrame::OnVfwCodec()
{
	// TODO: 在此添加命令处理程序代码
	DWORD fsize;
	/* VCM initialization */
	hic1 = ICOpen(mmioFOURCC('v','i','d','c'),  mmioFOURCC('X','V','I','D'),  ICMODE_COMPRESS);
	if (hic1 == 0) 
		AfxMessageBox(_T("打开编码器失败!"));

	hic2 = ICOpen(mmioFOURCC('v','i','d','c'),  mmioFOURCC('X','V','I','D'),  ICMODE_DECOMPRESS);
	if (hic2 == 0) 
		AfxMessageBox(_T("打开解码器失败!"));

	fsize = capGetVideoFormatSize(m_hWndCap);
	capGetVideoFormat(m_hWndCap, lpbiIn, fsize);
	
	InitAVIWriteOpt();

	lpbiOut->bmiHeader.biSize          = sizeof(BITMAPINFOHEADER);
	lpbiOut->bmiHeader.biWidth         = lpbiIn->bmiHeader.biWidth;
	lpbiOut->bmiHeader.biHeight        = lpbiIn->bmiHeader.biHeight;
	lpbiOut->bmiHeader.biPlanes        = 1;
	lpbiOut->bmiHeader.biBitCount      = 24;
	lpbiOut->bmiHeader.biCompression   = BI_RGB;
	lpbiOut->bmiHeader.biSizeImage     = lpbiIn->bmiHeader.biWidth*lpbiIn->bmiHeader.biHeight*3;
	lpbiOut->bmiHeader.biXPelsPerMeter = 0;
	lpbiOut->bmiHeader.biYPelsPerMeter = 0;
	lpbiOut->bmiHeader.biClrUsed       = 0;
	lpbiOut->bmiHeader.biClrImportant  = 0;

//	get the format of the input video
	if (ICCompressGetFormat(hic1,lpbiIn,lpbiTmp)!=ICERR_OK) 
		AfxMessageBox(_T("编码器不能读取输出格式!"));
	if (ICCompressQuery(hic1,lpbiIn,lpbiTmp) != ICERR_OK)   
		AfxMessageBox(_T("不能处理编码器输入输出格式!"));

//	set the parameters of the CODEC
	pc.cbSize         = sizeof(COMPVARS);			//结构体大小
	pc.dwFlags        = ICMF_COMPVARS_VALID;
	pc.hic            = hic1;						//编码器句柄
	pc.fccType        = mmioFOURCC('v','i','d','c');
	pc.fccHandler     = mmioFOURCC('X','V','I','D');
	pc.lpbiOut        = lpbiTmp;					//输出格式
	pc.lKey           = 100;						//key帧频率
	pc.lQ             = 10000;						//图像质量

	if(!ICSeqCompressFrameStart(&pc, lpbiIn))
		return;
	ICDecompressBegin(hic2,lpbiTmp,lpbiOut);
	m_vfwState  = ENCDEC;
}
Ejemplo n.º 5
0
bool CVideoCap::Initialize(int nWidth, int nHeight)
{
//	CAPTUREPARMS	gCapTureParms ; //视频驱动器的能力
	CAPDRIVERCAPS	gCapDriverCaps;
	DWORD			dwSize;

	if (!IsWebCam())
		return false;

	capSetUserData(m_hWndCap, this);
	
	capSetCallbackOnError(m_hWndCap, capErrorCallback);
	if (!capSetCallbackOnFrame(m_hWndCap, FrameCallbackProc))
	{
		return false;
	}

	// 将捕获窗同驱动器连接
	int i;
	for (i = 0; i < 10; i++)
	{
		if (capDriverConnect(m_hWndCap, i))
			break;
	}
	if (i == 10)
		return false;
	
	
	dwSize = capGetVideoFormatSize(m_hWndCap);
	m_lpbmi = new BITMAPINFO;

	// M263只支持176*144 352*288 (352*288 24彩的试验只支持biPlanes = 1)
	capGetVideoFormat(m_hWndCap, m_lpbmi, dwSize);
	// 采用指定的大小
	if (nWidth && nHeight)
	{
 		m_lpbmi->bmiHeader.biWidth = nWidth;
 		m_lpbmi->bmiHeader.biHeight = nHeight;
		m_lpbmi->bmiHeader.biPlanes = 1;
		m_lpbmi->bmiHeader.biSizeImage = (((m_lpbmi->bmiHeader.biWidth * m_lpbmi->bmiHeader.biBitCount + 31) & ~31) >> 3) * m_lpbmi->bmiHeader.biHeight;
		// 实验得知一些摄像头不支持指定的分辩率
 		if (!capSetVideoFormat(m_hWndCap, m_lpbmi, sizeof(BITMAPINFO)))
			return false;
	}
Ejemplo n.º 6
0
static int vfw_probe (zbar_video_t *vdo)
{
    video_state_t *state = vdo->state;
    state->bi_size = capGetVideoFormatSize(state->hwnd);
    BITMAPINFOHEADER *bih = state->bih = realloc(state->bih, state->bi_size);
    /* FIXME check OOM */

    if(!capSetUserData(state->hwnd, (LONG)vdo) ||
       !state->bi_size || !bih ||
       !capGetVideoFormat(state->hwnd, bih, state->bi_size))
        return(err_capture(vdo, SEV_ERROR, ZBAR_ERR_INVALID, __func__,
                           "setting up video capture"));

    zprintf(3, "initial format: " BIH_FMT " (bisz=%x)\n",
            BIH_FIELDS(bih), state->bi_size);

    if(!vdo->width || !vdo->height) {
        vdo->width = bih->biWidth;
        vdo->height = bih->biHeight;
    }
    vdo->datalen = bih->biSizeImage;

    zprintf(2, "probing supported formats:\n");
    vdo->formats = calloc(VFW_NUM_FORMATS, sizeof(uint32_t));

    int n = 0;
    const uint32_t *fmt;
    for(fmt = vfw_formats; *fmt; fmt++)
        if(vfw_probe_format(vdo, *fmt))
            vdo->formats[n++] = *fmt;

    vdo->formats = realloc(vdo->formats, (n + 1) * sizeof(uint32_t));

    vdo->width = bih->biWidth;
    vdo->height = bih->biHeight;
    vdo->intf = VIDEO_VFW;
    vdo->init = vfw_init;
    vdo->start = vfw_start;
    vdo->stop = vfw_stop;
    vdo->cleanup = vfw_cleanup;
    vdo->nq = vfw_nq;
    vdo->dq = vfw_dq;
    return(0);
}
Ejemplo n.º 7
0
// Frame data is stored in lpVHdr 
static LRESULT CALLBACK FrameCallbackProc(HWND hWnd, LPVIDEOHDR lpVHdr)
{
	// If no data provided by driver (dropped frame) - nothing to do
	if (lpVHdr->dwBytesUsed == 0) return FALSE;
	
	int grayScaleSize = lpVHdr->dwBytesUsed/3; // RGB uses 24 BPP, GS is 8 BPP

	// Get pointer to our video grabber - remember, this is friend function
	VideoGrabber* videoGrabber = (VideoGrabber*) capGetUserData(hWnd);
	if (videoGrabber->mGrabNextFrame)
	{
		// Get video format from driver (including resolution)
		if (videoGrabber->mBitmapInfo == NULL) 
		{
			// All these lines are run only once! I put them here and not in the constructor \
			   because I need to run them in context of callback. Strange though...
			DWORD videoFormatSize = capGetVideoFormatSize(videoGrabber->camhwnd);
			videoGrabber->mBitmapInfo = (PBITMAPINFO) new char[videoFormatSize];	
			capGetVideoFormat(videoGrabber->camhwnd, videoGrabber->mBitmapInfo, videoFormatSize);
			videoGrabber->mCurrentFrameGS = new BYTE[grayScaleSize];
			videoGrabber->mCurrentFrameBlurred = new BYTE[grayScaleSize];
			videoGrabber->mPreviousFrame = new BYTE[grayScaleSize];
		}

		ApplyGrayScaleFilter(lpVHdr, videoGrabber->mCurrentFrameGS); // Pass current frame data to grayscale it
		// Blurring decreases noise. mBitmapInfo contains frame dimensions (width & height)
		ApplyAverageBlurFilter(videoGrabber->mCurrentFrameGS, videoGrabber->mBitmapInfo, videoGrabber->mCurrentFrameBlurred);

		if (videoGrabber->mPreviousFrameExists)
		{
			// Calculate difference between frames
			int differedPixelsNum = CompareFrames(videoGrabber->mCurrentFrameBlurred, videoGrabber->mPreviousFrame, 
				videoGrabber->mBitmapInfo, videoGrabber->PIXELS_DIFFERENCE_TRESHOLD);
			videoGrabber->mMotionDetectedDuringLastSecond = 
				(differedPixelsNum > videoGrabber->MOTION_TRESHOLD); // Motion detected!
		}

		memcpy(videoGrabber->mPreviousFrame, videoGrabber->mCurrentFrameBlurred, grayScaleSize);
		videoGrabber->mPreviousFrameExists = TRUE;		// Now we have frame to compare with
		videoGrabber->mGrabNextFrame = FALSE;			// frame for current second has been processed
		SetEvent(videoGrabber->mFrameProcessedEvent);	// Signal about frame processing completion
	}
BOOL	CAviCap::_getFormat()
{
	int vfs=capGetVideoFormatSize(GetSafeHwnd());
	
	if(!vfs)		return FALSE;

	 if(_bmpInfo)	{delete _bmpInfo;_bmpInfo=NULL;}
	  _bmpInfo =(BITMAPINFO	*)( new char[vfs]);
	
	LPBITMAPINFOHEADER bmpIH=( LPBITMAPINFOHEADER )_bmpInfo;
	 
	 bmpIH->biSize= sizeof BITMAPINFOHEADER;
	  BOOL ret=capGetVideoFormat(GetSafeHwnd(), _bmpInfo, (WORD)vfs);
	
	if(ret && _autosize)
	{
	#define XX	bmpIH->biWidth
	#define	YY	bmpIH->biHeight
	// Adjust size&position now!
	CRect	rc;
	CRect	rcc;
		CWnd  *parent = GetParent();
		if(!parent)  parent=CWnd::GetDesktopWindow( );
		ASSERT(parent);
		GetClientRect(&rcc);
		parent->GetClientRect(&rc);
		
		{
		int x = XX>rc.Width() ? 0 : (rc.Width()-XX)/2;
		int y = YY>rc.Height() ? 0 : (rc.Height()-YY)/2;
		SetWindowPos(NULL,
			x, y, XX,YY, SWP_NOZORDER);
		}

	}
	
	  
	 return   ret;

}
Ejemplo n.º 9
0
/**
 * Set camera snapshot size
 */
static javacall_result camera_set_video_size(javacall_handle handle, 
                                             long width, long height)
{
    camera_handle* pHandle = (camera_handle*)handle;
    javacall_result ret = JAVACALL_FAIL;
    BITMAPINFO* pBmpInfo;

    JAVA_DEBUG_PRINT2("[camera] camera_set_video_size %d %d\n", width, height);

    /* Convert to supported width & height */
    if (width <= 160) {
        width = 160;
        height = 120;
    } else if (width <= 320) {
        width = 320;
        height = 240;
    } else {
        width = 640;
        height = 480;
    }

    if (pHandle->hCapWnd) {
        int size = capGetVideoFormatSize(pHandle->hCapWnd);
        pBmpInfo = MALLOC(size);
        if (NULL == pBmpInfo) return ret;

        ((LPBITMAPINFOHEADER)pBmpInfo)->biSize= sizeof(BITMAPINFOHEADER);
        if (0 != capGetVideoFormat(pHandle->hCapWnd, pBmpInfo, size)) {
            IMAGEBITS(pBmpInfo) = 24;
            IMAGEBICLRUSED(pBmpInfo) = 0;
            IMAGEWIDTH(pBmpInfo) = width;
            IMAGEHEIGHT(pBmpInfo) = height;
            capSetVideoFormat(pHandle->hCapWnd, pBmpInfo, size);
            ret = JAVACALL_OK;
        }
        FREE(pBmpInfo);
    }

    return ret;
}
Ejemplo n.º 10
0
/*初始化VFW设备*/ 
void CMainFrame::OnVfwInitvfw()
{
	// TODO: 在此添加命令处理程序代码
	DWORD fsize;

	// 创建视频窗口
	if(!m_wndSource.CreateEx(WS_EX_TOPMOST,NULL,
							_T("Source"),WS_OVERLAPPED|WS_CAPTION,
							CRect(0,0,352,288),NULL,0))
		return;
	
	m_hWndCap = capCreateCaptureWindow(_T("Capture Window"),WS_CHILD|WS_VISIBLE,
									  0,0,352,288,
									  m_wndSource.m_hWnd,0);

	//m_wndSource.ShowWindow(SW_SHOW);
	// 注册回调函数
	capSetCallbackOnError(m_hWndCap,(FARPROC)ErrorCallbackProc);
	capSetCallbackOnStatus(m_hWndCap,(FARPROC)StatusCallbackProc);
	capSetCallbackOnVideoStream(m_hWndCap,(FARPROC)VideoCallbackProc);

	// 连接视频设备
	capDriverConnect(m_hWndCap,0);	//(HWND m_hWndCap, int index);//index : 0--9
	// 获取驱动器的性能参数
	capDriverGetCaps(m_hWndCap,&m_caps,sizeof(CAPDRIVERCAPS));
	if (m_caps.fHasOverlay)
		capOverlay(m_hWndCap,TRUE);
	// 设置预览速率开始预览
	capPreviewRate(m_hWndCap,1000/25);
	capPreview(m_hWndCap,bPreview);


	fsize = capGetVideoFormatSize(m_hWndCap);
	capGetVideoFormat(m_hWndCap, lpbiIn, fsize);
	
	AfxMessageBox(_T("初始化成功!"));
}
Ejemplo n.º 11
0
void *cap_getformat(cap_cx *cx, size_t *pfmtlen, char *err)
{
	void *pfmt;
	size_t fmtlen;

	if(cx->opened)
	{
		if((fmtlen = (size_t)capGetVideoFormatSize(cx->hwnd)))
		{
			if((pfmt = malloc(fmtlen)))
			{
				if(capGetVideoFormat(cx->hwnd, pfmt, fmtlen))
				{
					set_err("");
					*pfmtlen = fmtlen;
					return pfmt;
				}
				free(pfmt);
			}
		}
	}
	set_err("Can't get video format");
	return 0;
}
Ejemplo n.º 12
0
bool CvCaptureCAM_VFW::setProperty(int property_id, double value)
{
    bool handledSize = false;

    switch( property_id )
    {
    case CV_CAP_PROP_FRAME_WIDTH:
        width = cvRound(value);
        handledSize = true;
        break;
    case CV_CAP_PROP_FRAME_HEIGHT:
        height = cvRound(value);
        handledSize = true;
        break;
    case CV_CAP_PROP_FOURCC:
        break;
    case CV_CAP_PROP_FPS:
        if( value > 0 )
        {
            CAPTUREPARMS params;
            if( capCaptureGetSetup(capWnd, &params, sizeof(params)) )
            {
                params.dwRequestMicroSecPerFrame = cvRound(1e6/value);
                return capCaptureSetSetup(capWnd, &params, sizeof(params)) == TRUE;
            }
        }
        break;
    default:
        break;
    }

    if ( handledSize )
    {
        // If both width and height are set then change frame size.
        if( width > 0 && height > 0 )
        {
            const DWORD size = capGetVideoFormatSize(capWnd);
            if( size == 0 )
                return false;

            unsigned char *pbi = new unsigned char[size];
            if( !pbi )
                return false;

            if( capGetVideoFormat(capWnd, pbi, size) != size )
            {
                delete []pbi;
                return false;
            }

            BITMAPINFOHEADER& vfmt = ((BITMAPINFO*)pbi)->bmiHeader;
            bool success = true;
            if( width != vfmt.biWidth || height != vfmt.biHeight )
            {
                // Change frame size.
                vfmt.biWidth = width;
                vfmt.biHeight = height;
                vfmt.biSizeImage = height * ((width * vfmt.biBitCount + 31) / 32) * 4;
                vfmt.biCompression = BI_RGB;
                success = capSetVideoFormat(capWnd, pbi, size) == TRUE;
            }
            if( success )
            {
                // Adjust capture window size.
                CAPSTATUS status = {};
                capGetStatus(capWnd, &status, sizeof(status));
                ::SetWindowPos(capWnd, NULL, 0, 0, status.uiImageWidth, status.uiImageHeight, SWP_NOZORDER|SWP_NOMOVE);
                // Store frame size.
                widthSet = width;
                heightSet = height;
            }
            delete []pbi;
            width = height = -1;

            return success;
        }

        return true;
    }

    return false;
}
Ejemplo n.º 13
0
// Initialize camera input
bool CvCaptureCAM_VFW::open( int wIndex )
{
    char szDeviceName[80];
    char szDeviceVersion[80];
    HWND hWndC = 0;

    close();

    if( (unsigned)wIndex >= 10 )
        wIndex = 0;

    for( ; wIndex < 10; wIndex++ )
    {
        if( capGetDriverDescription( wIndex, szDeviceName,
            sizeof (szDeviceName), szDeviceVersion,
            sizeof (szDeviceVersion)))
        {
            hWndC = capCreateCaptureWindow ( "My Own Capture Window",
                WS_POPUP | WS_CHILD, 0, 0, 320, 240, 0, 0);
            if( capDriverConnect (hWndC, wIndex))
                break;
            DestroyWindow( hWndC );
            hWndC = 0;
        }
    }

    if( hWndC )
    {
        capWnd = hWndC;
        hdr = 0;
        hic = 0;
        fourcc = (DWORD)-1;

        memset( &caps, 0, sizeof(caps));
        capDriverGetCaps( hWndC, &caps, sizeof(caps));
        CAPSTATUS status = {};
        capGetStatus(hWndC, &status, sizeof(status));
        ::SetWindowPos(hWndC, NULL, 0, 0, status.uiImageWidth, status.uiImageHeight, SWP_NOZORDER|SWP_NOMOVE);
        capSetUserData( hWndC, (size_t)this );
        capSetCallbackOnFrame( hWndC, frameCallback );
        CAPTUREPARMS p;
        capCaptureGetSetup(hWndC,&p,sizeof(CAPTUREPARMS));
        p.dwRequestMicroSecPerFrame = 66667/2; // 30 FPS
        capCaptureSetSetup(hWndC,&p,sizeof(CAPTUREPARMS));
        //capPreview( hWndC, 1 );
        capPreviewScale(hWndC,FALSE);
        capPreviewRate(hWndC,1);

        // Get frame initial parameters.
        const DWORD size = capGetVideoFormatSize(capWnd);
        if( size > 0 )
        {
            unsigned char *pbi = new unsigned char[size];
            if( pbi )
            {
                if( capGetVideoFormat(capWnd, pbi, size) == size )
                {
                    BITMAPINFOHEADER& vfmt = ((BITMAPINFO*)pbi)->bmiHeader;
                    widthSet = vfmt.biWidth;
                    heightSet = vfmt.biHeight;
                    fourcc = vfmt.biCompression;
                }
                delete []pbi;
            }
        }
        // And alternative way in case of failure.
        if( widthSet == 0 || heightSet == 0 )
        {
            widthSet = status.uiImageWidth;
            heightSet = status.uiImageHeight;
        }

    }
    return capWnd != 0;
}
int
camera_device_start_capturing(CameraDevice* cd,
                              uint32_t pixel_format,
                              int frame_width,
                              int frame_height)
{
    WndCameraDevice* wcd;
    HBITMAP bm_handle;
    BITMAP  bitmap;
    size_t format_info_size;

    if (cd == NULL || cd->opaque == NULL) {
        E("%s: Invalid camera device descriptor", __FUNCTION__);
        return -1;
    }
    wcd = (WndCameraDevice*)cd->opaque;

    /* wcd->dc is an indicator of capturing: !NULL - capturing, NULL - not */
    if (wcd->dc != NULL) {
        W("%s: Capturing is already on on device '%s'",
          __FUNCTION__, wcd->window_name);
        return 0;
    }

    /* Connect capture window to the video capture driver. */
    if (!capDriverConnect(wcd->cap_window, wcd->input_channel)) {
        return -1;
    }

    /* Get current frame information from the driver. */
    format_info_size = capGetVideoFormatSize(wcd->cap_window);
    if (format_info_size == 0) {
        E("%s: Unable to get video format size: %d",
          __FUNCTION__, GetLastError());
        _camera_device_reset(wcd);
        return -1;
    }
    wcd->frame_bitmap = (BITMAPINFO*)malloc(format_info_size);
    if (wcd->frame_bitmap == NULL) {
        E("%s: Unable to allocate frame bitmap info buffer", __FUNCTION__);
        _camera_device_reset(wcd);
        return -1;
    }
    if (!capGetVideoFormat(wcd->cap_window, wcd->frame_bitmap,
                           format_info_size)) {
        E("%s: Unable to obtain video format: %d", __FUNCTION__, GetLastError());
        _camera_device_reset(wcd);
        return -1;
    }

    /* Lets see if we need to set different frame dimensions */
    if (wcd->frame_bitmap->bmiHeader.biWidth != frame_width ||
            abs(wcd->frame_bitmap->bmiHeader.biHeight) != frame_height) {
        /* Dimensions don't match. Set new frame info. */
        wcd->frame_bitmap->bmiHeader.biWidth = frame_width;
        wcd->frame_bitmap->bmiHeader.biHeight = frame_height;
        /* We need to recalculate image size, since the capture window / driver
         * will use image size provided by us. */
        if (wcd->frame_bitmap->bmiHeader.biBitCount == 24) {
            /* Special case that may require WORD boundary alignment. */
            uint32_t bpl = (frame_width * 3 + 1) & ~1;
            wcd->frame_bitmap->bmiHeader.biSizeImage = bpl * frame_height;
        } else {
            wcd->frame_bitmap->bmiHeader.biSizeImage =
                (frame_width * frame_height * wcd->frame_bitmap->bmiHeader.biBitCount) / 8;
        }
        if (!capSetVideoFormat(wcd->cap_window, wcd->frame_bitmap,
                               format_info_size)) {
            E("%s: Unable to set video format: %d", __FUNCTION__, GetLastError());
            _camera_device_reset(wcd);
            return -1;
        }
    }

    if (wcd->frame_bitmap->bmiHeader.biCompression > BI_PNG) {
        D("%s: Video capturing driver has reported pixel format %.4s",
          __FUNCTION__, (const char*)&wcd->frame_bitmap->bmiHeader.biCompression);
    }

    /* Most of the time frame bitmaps come in "bottom-up" form, where its origin
     * is the lower-left corner. However, it could be in the normal "top-down"
     * form with the origin in the upper-left corner. So, we must adjust the
     * biHeight field, since the way "top-down" form is reported here is by
     * setting biHeight to a negative value. */
    if (wcd->frame_bitmap->bmiHeader.biHeight < 0) {
        wcd->frame_bitmap->bmiHeader.biHeight =
            -wcd->frame_bitmap->bmiHeader.biHeight;
        wcd->is_top_down = 1;
    } else {
        wcd->is_top_down = 0;
    }

    /* Get DC for the capturing window that will be used when we deal with
     * bitmaps obtained from the camera device during frame capturing. */
    wcd->dc = GetDC(wcd->cap_window);
    if (wcd->dc == NULL) {
        E("%s: Unable to obtain DC for %s: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        _camera_device_reset(wcd);
        return -1;
    }

    /*
     * At this point we need to grab a frame to properly setup framebuffer, and
     * calculate pixel format. The problem is that bitmap information obtained
     * from the driver doesn't necessarily match the actual bitmap we're going to
     * obtain via capGrabFrame / capEditCopy / GetClipboardData
     */

    /* Grab a frame, and post it to the clipboard. Not very effective, but this
     * is how capXxx API is operating. */
    if (!capGrabFrameNoStop(wcd->cap_window) ||
        !capEditCopy(wcd->cap_window) ||
        !OpenClipboard(wcd->cap_window)) {
        E("%s: Device '%s' is unable to save frame to the clipboard: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        _camera_device_reset(wcd);
        return -1;
    }

    /* Get bitmap handle saved into clipboard. Note that bitmap is still
     * owned by the clipboard here! */
    bm_handle = (HBITMAP)GetClipboardData(CF_BITMAP);
    if (bm_handle == NULL) {
        E("%s: Device '%s' is unable to obtain frame from the clipboard: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        CloseClipboard();
        _camera_device_reset(wcd);
        return -1;
    }

    /* Get bitmap object that is initialized with the actual bitmap info. */
    if (!GetObject(bm_handle, sizeof(BITMAP), &bitmap)) {
        E("%s: Device '%s' is unable to obtain frame's bitmap: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        CloseClipboard();
        _camera_device_reset(wcd);
        return -1;
    }

    /* Now that we have all we need in 'bitmap' */
    CloseClipboard();

    /* Make sure that dimensions match. Othewise - fail. */
    if (wcd->frame_bitmap->bmiHeader.biWidth != bitmap.bmWidth ||
        wcd->frame_bitmap->bmiHeader.biHeight != bitmap.bmHeight ) {
        E("%s: Requested dimensions %dx%d do not match the actual %dx%d",
          __FUNCTION__, frame_width, frame_height,
          wcd->frame_bitmap->bmiHeader.biWidth,
          wcd->frame_bitmap->bmiHeader.biHeight);
        _camera_device_reset(wcd);
        return -1;
    }

    /* Create bitmap info that will be used with GetDIBits. */
    wcd->gdi_bitmap = (BITMAPINFO*)malloc(wcd->frame_bitmap->bmiHeader.biSize);
    if (wcd->gdi_bitmap == NULL) {
        E("%s: Unable to allocate gdi bitmap info", __FUNCTION__);
        _camera_device_reset(wcd);
        return -1;
    }
    memcpy(wcd->gdi_bitmap, wcd->frame_bitmap,
           wcd->frame_bitmap->bmiHeader.biSize);
    wcd->gdi_bitmap->bmiHeader.biCompression = BI_RGB;
    wcd->gdi_bitmap->bmiHeader.biBitCount = bitmap.bmBitsPixel;
    wcd->gdi_bitmap->bmiHeader.biSizeImage = bitmap.bmWidthBytes * bitmap.bmWidth;
    /* Adjust GDI's bitmap biHeight for proper frame direction ("top-down", or
     * "bottom-up") We do this trick in order to simplify pixel format conversion
     * routines, where we always assume "top-down" frames. The trick he is to
     * have negative biHeight in 'gdi_bitmap' if driver provides "bottom-up"
     * frames, and positive biHeight in 'gdi_bitmap' if driver provides "top-down"
     * frames. This way GetGDIBits will always return "top-down" frames. */
    if (wcd->is_top_down) {
        wcd->gdi_bitmap->bmiHeader.biHeight =
            wcd->frame_bitmap->bmiHeader.biHeight;
    } else {
        wcd->gdi_bitmap->bmiHeader.biHeight =
            -wcd->frame_bitmap->bmiHeader.biHeight;
    }

    /* Allocate framebuffer. */
    wcd->framebuffer = (uint8_t*)malloc(wcd->gdi_bitmap->bmiHeader.biSizeImage);
    if (wcd->framebuffer == NULL) {
        E("%s: Unable to allocate %d bytes for framebuffer",
          __FUNCTION__, wcd->gdi_bitmap->bmiHeader.biSizeImage);
        _camera_device_reset(wcd);
        return -1;
    }

    /* Lets see what pixel format we will use. */
    if (wcd->gdi_bitmap->bmiHeader.biBitCount == 16) {
        wcd->pixel_format = V4L2_PIX_FMT_RGB565;
    } else if (wcd->gdi_bitmap->bmiHeader.biBitCount == 24) {
        wcd->pixel_format = V4L2_PIX_FMT_BGR24;
    } else if (wcd->gdi_bitmap->bmiHeader.biBitCount == 32) {
        wcd->pixel_format = V4L2_PIX_FMT_BGR32;
    } else {
        E("%s: Unsupported number of bits per pixel %d",
          __FUNCTION__, wcd->gdi_bitmap->bmiHeader.biBitCount);
        _camera_device_reset(wcd);
        return -1;
    }

    D("%s: Capturing device '%s': %d bits per pixel in %.4s [%dx%d] frame",
      __FUNCTION__, wcd->window_name, wcd->gdi_bitmap->bmiHeader.biBitCount,
      (const char*)&wcd->pixel_format, wcd->frame_bitmap->bmiHeader.biWidth,
      wcd->frame_bitmap->bmiHeader.biHeight);

    return 0;
}