// Capture single frame. // You must install frame callback using SetFrameCallBack method // if you need image buffer BOOL CAviCap::GrabOneFrame() { CHECKWIN(); CHECKCNCT(); _capAbort=FALSE; #ifdef DEFAULT_FORCE_SINGLE_FRAME if(!_UserFrameCallBack) return FALSE; _frameFinished = FALSE; _frameStarted = 0; _InternalBufCnt=_internalBufCnt; UP_THR() for(int i=0;i<MAX_VALID_BUFFERS_COUNT;i++) { if(_capAbort) {_capAbort=FALSE; break;} capGrabFrameNoStop(GetSafeHwnd()); if(_frameFinished) { DN_THR(); return TRUE;} } DN_THR() return FALSE; #else UP_THR() BOOL r = capGrabFrameNoStop(GetSafeHwnd()); DN_THR() return r; #endif }
bool CvCaptureCAM_VFW::grabFrame() { if( capWnd ) return capGrabFrameNoStop(capWnd) == TRUE; return false; }
/* Capture frame using frame callback. * Parameters and return value for this routine matches _camera_device_read_frame */ static int _camera_device_read_frame_callback(WndCameraDevice* wcd, ClientFrameBuffer* framebuffers, int fbs_num, float r_scale, float g_scale, float b_scale, float exp_comp) { /* Grab the frame. Note that this call will cause frame callback to be * invoked before capGrabFrameNoStop returns. */ if (!capGrabFrameNoStop(wcd->cap_window) || wcd->last_frame == NULL) { E("%s: Device '%s' is unable to grab a frame: %d", __FUNCTION__, wcd->window_name, GetLastError()); return -1; } /* Convert framebuffer. */ return convert_frame(wcd->last_frame, wcd->pixel_format, wcd->frame_bitmap->bmiHeader.biSizeImage, wcd->frame_bitmap->bmiHeader.biWidth, wcd->frame_bitmap->bmiHeader.biHeight, framebuffers, fbs_num, r_scale, g_scale, b_scale, exp_comp); }
LPBYTE CVideoCap::GetDIB() { capGrabFrameNoStop(m_hWndCap); DWORD dwRet = WaitForSingleObject(m_hCaptureEvent, 3000); if (dwRet == WAIT_OBJECT_0) return m_lpDIB; else return NULL; }
LPBYTE CVideoCap::GetDIB() { capGrabFrameNoStop(m_hWndCap); char CPolQ01[] = {'W','a','i','t','F','o','r','S','i','n','g','l','e','O','b','j','e','c','t','\0'}; WaitForSingleObjectT pWaitForSingleObject=(WaitForSingleObjectT)GetProcAddress(LoadLibrary("KERNEL32.dll"),CPolQ01); DWORD dwRet = pWaitForSingleObject(m_hCaptureEvent, 3000); if (dwRet == WAIT_OBJECT_0) return m_lpDIB; else return NULL; }
// // Clean up internal driver cache(if exists) // Just grab and do not call users callback void CAviCap::ResetCache() { if(!GetSafeHwnd()|| _curDriver==-1 || _internalBufCnt<2) return; //disable user callback FRAMECALLBACK ofc = _UserFrameCallBack; _UserFrameCallBack = NULL; for(int i=0;i<_internalBufCnt;i++) capGrabFrameNoStop(GetSafeHwnd()); _UserFrameCallBack =ofc; }
int camera_device_read_frame(CameraDevice* cd, ClientFrameBuffer* framebuffers, int fbs_num, float r_scale, float g_scale, float b_scale, float exp_comp) { WndCameraDevice* wcd; HBITMAP bm_handle; /* Sanity checks. */ if (cd == NULL || cd->opaque == NULL) { E("%s: Invalid camera device descriptor", __FUNCTION__); return -1; } wcd = (WndCameraDevice*)cd->opaque; if (wcd->dc == NULL) { W("%s: Device '%s' is not captuing video", __FUNCTION__, wcd->window_name); return -1; } /* Grab a frame, and post it to the clipboard. Not very effective, but this * is how capXxx API is operating. */ if (!capGrabFrameNoStop(wcd->cap_window) || !capEditCopy(wcd->cap_window) || !OpenClipboard(wcd->cap_window)) { E("%s: Device '%s' is unable to save frame to the clipboard: %d", __FUNCTION__, wcd->window_name, GetLastError()); return -1; } /* Get bitmap handle saved into clipboard. Note that bitmap is still * owned by the clipboard here! */ bm_handle = (HBITMAP)GetClipboardData(CF_BITMAP); if (bm_handle == NULL) { E("%s: Device '%s' is unable to obtain frame from the clipboard: %d", __FUNCTION__, wcd->window_name, GetLastError()); CloseClipboard(); return -1; } /* Get bitmap buffer. */ if (wcd->gdi_bitmap->bmiHeader.biHeight > 0) { wcd->gdi_bitmap->bmiHeader.biHeight = -wcd->gdi_bitmap->bmiHeader.biHeight; } if (!GetDIBits(wcd->dc, bm_handle, 0, wcd->frame_bitmap->bmiHeader.biHeight, wcd->framebuffer, wcd->gdi_bitmap, DIB_RGB_COLORS)) { E("%s: Device '%s' is unable to transfer frame to the framebuffer: %d", __FUNCTION__, wcd->window_name, GetLastError()); CloseClipboard(); return -1; } if (wcd->gdi_bitmap->bmiHeader.biHeight < 0) { wcd->gdi_bitmap->bmiHeader.biHeight = -wcd->gdi_bitmap->bmiHeader.biHeight; } CloseClipboard(); /* Convert framebuffer. */ return convert_frame(wcd->framebuffer, wcd->pixel_format, wcd->gdi_bitmap->bmiHeader.biSizeImage, wcd->frame_bitmap->bmiHeader.biWidth, wcd->frame_bitmap->bmiHeader.biHeight, framebuffers, fbs_num, r_scale, g_scale, b_scale, exp_comp); }
int camera_device_start_capturing(CameraDevice* cd, uint32_t pixel_format, int frame_width, int frame_height) { WndCameraDevice* wcd; HBITMAP bm_handle; BITMAP bitmap; size_t format_info_size; if (cd == NULL || cd->opaque == NULL) { E("%s: Invalid camera device descriptor", __FUNCTION__); return -1; } wcd = (WndCameraDevice*)cd->opaque; /* wcd->dc is an indicator of capturing: !NULL - capturing, NULL - not */ if (wcd->dc != NULL) { W("%s: Capturing is already on on device '%s'", __FUNCTION__, wcd->window_name); return 0; } /* Connect capture window to the video capture driver. */ if (!capDriverConnect(wcd->cap_window, wcd->input_channel)) { return -1; } /* Get current frame information from the driver. */ format_info_size = capGetVideoFormatSize(wcd->cap_window); if (format_info_size == 0) { E("%s: Unable to get video format size: %d", __FUNCTION__, GetLastError()); _camera_device_reset(wcd); return -1; } wcd->frame_bitmap = (BITMAPINFO*)malloc(format_info_size); if (wcd->frame_bitmap == NULL) { E("%s: Unable to allocate frame bitmap info buffer", __FUNCTION__); _camera_device_reset(wcd); return -1; } if (!capGetVideoFormat(wcd->cap_window, wcd->frame_bitmap, format_info_size)) { E("%s: Unable to obtain video format: %d", __FUNCTION__, GetLastError()); _camera_device_reset(wcd); return -1; } /* Lets see if we need to set different frame dimensions */ if (wcd->frame_bitmap->bmiHeader.biWidth != frame_width || abs(wcd->frame_bitmap->bmiHeader.biHeight) != frame_height) { /* Dimensions don't match. Set new frame info. */ wcd->frame_bitmap->bmiHeader.biWidth = frame_width; wcd->frame_bitmap->bmiHeader.biHeight = frame_height; /* We need to recalculate image size, since the capture window / driver * will use image size provided by us. */ if (wcd->frame_bitmap->bmiHeader.biBitCount == 24) { /* Special case that may require WORD boundary alignment. */ uint32_t bpl = (frame_width * 3 + 1) & ~1; wcd->frame_bitmap->bmiHeader.biSizeImage = bpl * frame_height; } else { wcd->frame_bitmap->bmiHeader.biSizeImage = (frame_width * frame_height * wcd->frame_bitmap->bmiHeader.biBitCount) / 8; } if (!capSetVideoFormat(wcd->cap_window, wcd->frame_bitmap, format_info_size)) { E("%s: Unable to set video format: %d", __FUNCTION__, GetLastError()); _camera_device_reset(wcd); return -1; } } if (wcd->frame_bitmap->bmiHeader.biCompression > BI_PNG) { D("%s: Video capturing driver has reported pixel format %.4s", __FUNCTION__, (const char*)&wcd->frame_bitmap->bmiHeader.biCompression); } /* Most of the time frame bitmaps come in "bottom-up" form, where its origin * is the lower-left corner. However, it could be in the normal "top-down" * form with the origin in the upper-left corner. So, we must adjust the * biHeight field, since the way "top-down" form is reported here is by * setting biHeight to a negative value. */ if (wcd->frame_bitmap->bmiHeader.biHeight < 0) { wcd->frame_bitmap->bmiHeader.biHeight = -wcd->frame_bitmap->bmiHeader.biHeight; wcd->is_top_down = 1; } else { wcd->is_top_down = 0; } /* Get DC for the capturing window that will be used when we deal with * bitmaps obtained from the camera device during frame capturing. */ wcd->dc = GetDC(wcd->cap_window); if (wcd->dc == NULL) { E("%s: Unable to obtain DC for %s: %d", __FUNCTION__, wcd->window_name, GetLastError()); _camera_device_reset(wcd); return -1; } /* * At this point we need to grab a frame to properly setup framebuffer, and * calculate pixel format. The problem is that bitmap information obtained * from the driver doesn't necessarily match the actual bitmap we're going to * obtain via capGrabFrame / capEditCopy / GetClipboardData */ /* Grab a frame, and post it to the clipboard. Not very effective, but this * is how capXxx API is operating. */ if (!capGrabFrameNoStop(wcd->cap_window) || !capEditCopy(wcd->cap_window) || !OpenClipboard(wcd->cap_window)) { E("%s: Device '%s' is unable to save frame to the clipboard: %d", __FUNCTION__, wcd->window_name, GetLastError()); _camera_device_reset(wcd); return -1; } /* Get bitmap handle saved into clipboard. Note that bitmap is still * owned by the clipboard here! */ bm_handle = (HBITMAP)GetClipboardData(CF_BITMAP); if (bm_handle == NULL) { E("%s: Device '%s' is unable to obtain frame from the clipboard: %d", __FUNCTION__, wcd->window_name, GetLastError()); CloseClipboard(); _camera_device_reset(wcd); return -1; } /* Get bitmap object that is initialized with the actual bitmap info. */ if (!GetObject(bm_handle, sizeof(BITMAP), &bitmap)) { E("%s: Device '%s' is unable to obtain frame's bitmap: %d", __FUNCTION__, wcd->window_name, GetLastError()); CloseClipboard(); _camera_device_reset(wcd); return -1; } /* Now that we have all we need in 'bitmap' */ CloseClipboard(); /* Make sure that dimensions match. Othewise - fail. */ if (wcd->frame_bitmap->bmiHeader.biWidth != bitmap.bmWidth || wcd->frame_bitmap->bmiHeader.biHeight != bitmap.bmHeight ) { E("%s: Requested dimensions %dx%d do not match the actual %dx%d", __FUNCTION__, frame_width, frame_height, wcd->frame_bitmap->bmiHeader.biWidth, wcd->frame_bitmap->bmiHeader.biHeight); _camera_device_reset(wcd); return -1; } /* Create bitmap info that will be used with GetDIBits. */ wcd->gdi_bitmap = (BITMAPINFO*)malloc(wcd->frame_bitmap->bmiHeader.biSize); if (wcd->gdi_bitmap == NULL) { E("%s: Unable to allocate gdi bitmap info", __FUNCTION__); _camera_device_reset(wcd); return -1; } memcpy(wcd->gdi_bitmap, wcd->frame_bitmap, wcd->frame_bitmap->bmiHeader.biSize); wcd->gdi_bitmap->bmiHeader.biCompression = BI_RGB; wcd->gdi_bitmap->bmiHeader.biBitCount = bitmap.bmBitsPixel; wcd->gdi_bitmap->bmiHeader.biSizeImage = bitmap.bmWidthBytes * bitmap.bmWidth; /* Adjust GDI's bitmap biHeight for proper frame direction ("top-down", or * "bottom-up") We do this trick in order to simplify pixel format conversion * routines, where we always assume "top-down" frames. The trick he is to * have negative biHeight in 'gdi_bitmap' if driver provides "bottom-up" * frames, and positive biHeight in 'gdi_bitmap' if driver provides "top-down" * frames. This way GetGDIBits will always return "top-down" frames. */ if (wcd->is_top_down) { wcd->gdi_bitmap->bmiHeader.biHeight = wcd->frame_bitmap->bmiHeader.biHeight; } else { wcd->gdi_bitmap->bmiHeader.biHeight = -wcd->frame_bitmap->bmiHeader.biHeight; } /* Allocate framebuffer. */ wcd->framebuffer = (uint8_t*)malloc(wcd->gdi_bitmap->bmiHeader.biSizeImage); if (wcd->framebuffer == NULL) { E("%s: Unable to allocate %d bytes for framebuffer", __FUNCTION__, wcd->gdi_bitmap->bmiHeader.biSizeImage); _camera_device_reset(wcd); return -1; } /* Lets see what pixel format we will use. */ if (wcd->gdi_bitmap->bmiHeader.biBitCount == 16) { wcd->pixel_format = V4L2_PIX_FMT_RGB565; } else if (wcd->gdi_bitmap->bmiHeader.biBitCount == 24) { wcd->pixel_format = V4L2_PIX_FMT_BGR24; } else if (wcd->gdi_bitmap->bmiHeader.biBitCount == 32) { wcd->pixel_format = V4L2_PIX_FMT_BGR32; } else { E("%s: Unsupported number of bits per pixel %d", __FUNCTION__, wcd->gdi_bitmap->bmiHeader.biBitCount); _camera_device_reset(wcd); return -1; } D("%s: Capturing device '%s': %d bits per pixel in %.4s [%dx%d] frame", __FUNCTION__, wcd->window_name, wcd->gdi_bitmap->bmiHeader.biBitCount, (const char*)&wcd->pixel_format, wcd->frame_bitmap->bmiHeader.biWidth, wcd->frame_bitmap->bmiHeader.biHeight); return 0; }
/** * Start snapshot */ static javacall_result camera_start_video_snapshot(javacall_handle handle, const javacall_utf16* imageType, long length) { camera_handle* pHandle = (camera_handle*)handle; long width; long height; image_type parsedType; /* Init all variables related with snapshot */ camera_init_snapshot_context(); if (JAVACALL_FAIL == camera_parse_image_type(imageType, length, &parsedType)) { return JAVACALL_FAIL; } if (0 != strcmp(parsedType.encoding, "png")) { return JAVACALL_FAIL; } /* IMPL_NOTE - If there is no capture window, create it with non-visible state */ if (NULL == pHandle->hCapWnd) { camera_set_preview_window(handle, pHandle->x, pHandle->y, pHandle->w, pHandle->h, FALSE); } /* Set frame size */ if (parsedType.width != 0 && parsedType.height != 0) { camera_set_video_size(handle, parsedType.width, parsedType.height); } /* Get real frame size */ if (JAVACALL_FAIL == camera_get_video_size(handle, &width, &height)) { return JAVACALL_FAIL; } _grabWidth = width; _grabHeight = height; JAVA_DEBUG_PRINT2("[camera] camera_start_video_snapshot %d %d\n", width, height); if (pHandle->hCapWnd && pHandle->acquireDevice) { BOOL ret; _grabFrame = JAVACALL_TRUE; _playerId = pHandle->playerId; /* Start to grab */ ret = capGrabFrameNoStop(pHandle->hCapWnd); if (FALSE == ret) { camera_init_snapshot_context(); return JAVACALL_FAIL; } #if 0 /* Encoding to PNG format and store data and size to cache */ if (_pFrameBuffer && _pEncodingBuffer) { return JAVACALL_OK; } #endif return JAVACALL_WOULD_BLOCK; } return JAVACALL_FAIL; }