//------------------------------------------------------------------ void ofxCLEye::threadedFunction(){ while(isThreadRunning()){ if(lock()){ CLEyeCameraGetFrame(cam, (PBYTE)viPixels); unlock(); } } }
void EyeCameraCapture::Capture(cv::Mat& output) { // Ensure matrix size and type are valid int type = (_mode == CLEYE_COLOR_PROCESSED || _mode == CLEYE_COLOR_RAW ? CV_8UC4 : CV_8UC1); if (output.type() != type || output.rows != _height || output.cols != _width) { output.create(_height, _width, type); } CLEyeCameraGetFrame(_cam, output.data); }
bool retrieveFrame(int channel, cv::OutputArray outArray) { CLEyeCameraGetFrame(m_eye, pCapBuffer, 33); const int from_to[] = { 0, 0, 1, 1, 2, 2 }; const CvArr** src = (const CvArr**)&m_frame4ch; CvArr** dst = (CvArr**)&m_frame; cvMixChannels(src, 1, dst, 1, from_to, 3); if (m_frame->origin == IPL_ORIGIN_TL) cv::cvarrToMat(m_frame).copyTo(outArray); else { cv::Mat temp = cv::cvarrToMat(m_frame); flip(temp, outArray, 0); } return true; }
bool PS3::IsFrameNew() { static int frame = 0; static double lastFPSlog = 0; double now = GetTickCount(); bool frameNew = CLEyeCameraGetFrame( _cam, _pCapBuffer ); //! Count the fps and framecount if ( frameNew ) { _frameCount++; frame++; } //! 1 sec ago if ( now >= lastFPSlog + 1000 ) { _fps = frame; frame = 0; lastFPSlog = now; } return frameNew; }
//-------------------------------------------------------------- void ofxCLEye::update(){ if(!initialized){ return; } newFrame = false; bool success = false; if(usingThread){ success = true; } else{ success = CLEyeCameraGetFrame(cam, (PBYTE)viPixels); } if(success){ newFrame = true; if(colorMode == CLEYE_MONO_PROCESSED){ pixels.setFromPixels(viPixels, width, height, (colorMode == CLEYE_MONO_PROCESSED) ? 1 : 4); } else{ for(int i = 0; i < width * height; i++){ pixels[i * 3 + 0] = viPixels[i * 4 + 2]; pixels[i * 3 + 1] = viPixels[i * 4 + 1]; pixels[i * 3 + 2] = viPixels[i * 4 + 0]; } } if(usingTexture){ texture.loadData(pixels.getPixels(), width, height, (colorMode == CLEYE_MONO_PROCESSED) ? GL_LUMINANCE : GL_RGB); } if(usingThread){ unlock(); } } }
void CLEyeCameraCapture::Run() { // Create camera instance _cam = CLEyeCreateCamera(_cameraGUID, _mode, _resolution, _fps); if(_cam == NULL) return; // Get camera frame dimensions CLEyeCameraGetFrameDimensions(_cam, w, h); // Depending on color mode chosen, create the appropriate OpenCV image if(_mode == CLEYE_COLOR_PROCESSED || _mode == CLEYE_COLOR_RAW) pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 4); else pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1); // Set some camera parameters //CLEyeSetCameraParameter(_cam, CLEYE_GAIN, 20); //CLEyeSetCameraParameter(_cam, CLEYE_EXPOSURE, 511); CLEyeSetCameraParameter(_cam, CLEYE_AUTO_GAIN, true); CLEyeSetCameraParameter(_cam, CLEYE_AUTO_EXPOSURE, true); CLEyeSetCameraParameter( _cam, CLEYE_HFLIP, true); // Start capturing CLEyeCameraStart(_cam); cvGetImageRawData(pCapImage, &pCapBuffer); pCapture = pCapImage; long frames = 0; long count = GetTickCount(); long prevCount = 0; double fps = 0; // image capturing loop Mat src_gray, subImage, subImage_gray; vector<Vec3f> circles; Point center; Point n_center; int radius = 0; int counter = 0; char* fpsText = new char[5]; char* pos_text = new char[10]; while(_running) { CLEyeCameraGetFrame(_cam, pCapBuffer); //check fps every 100 frames frames++; if((frames % 100) == 0){ prevCount = count; count = GetTickCount(); fps = 100000.0/(count - prevCount); //std::cout << "fps: " << fps << endl; sprintf(fpsText, "fps: %f", fps); } if(frames > 100) putText(pCapture, fpsText, Point(5, 20), CV_FONT_HERSHEY_PLAIN, 1, Scalar(0, 255, 0)); else putText(pCapture, "calculating fps...", Point(5, 20), CV_FONT_HERSHEY_PLAIN, 1, Scalar(0, 255, 0)); //find circle in whole area of frame first if(!_isTracking){ CircleDetector(pCapture, src_gray, circles, center, radius); if(circles.size() != 0) _isTracking = true; n_center = center; } //dynamically move subimage area by tracking the object else { int subImage_size = 30; Point temp = FixSubImageSize(n_center, 320, 240, subImage_size); Rect t_rect(temp.x - subImage_size, temp.y - subImage_size, subImage_size*2, subImage_size*2); subImage = pCapture(t_rect); CircleDetector(subImage, subImage_gray, circles, center, radius); imshow(trackingWindowName, subImage); if(circles.size() == 0) { counter++; if(counter == 3) { _isTracking = false; counter = 0; cout << "Lost tracking! Search whole frame." << endl; } } else { counter = 0; n_center.x = temp.x - subImage_size + center.x; n_center.y = temp.y - subImage_size + center.y; cout << "fps: " << fps << " x:" << n_center.x << ", y:" << n_center.y << endl; } } sprintf(pos_text, "x=%d,y=%d", n_center.x, n_center.y); if(circles.size() != 0){ putText(pCapture, pos_text, Point(n_center.x + radius, n_center.y - radius), CV_FONT_HERSHEY_PLAIN, 1, Scalar(0, 255, 0)); } imshow(_windowName, pCapture); } // Stop camera capture CLEyeCameraStop(_cam); // Destroy camera object CLEyeDestroyCamera(_cam); // Destroy the allocated OpenCV image cvReleaseImage(&pCapImage); _cam = NULL; }
IplImage * camera_control_query_frame(CameraControl* cc) { IplImage* result; #if defined(CAMERA_CONTROL_USE_CL_DRIVER) // assign buffer-pointer to address of buffer cvGetRawData(cc->frame4ch, &cc->pCapBuffer, 0, 0); CLEyeCameraGetFrame(cc->camera, cc->pCapBuffer, 2000); // convert 4ch image to 3ch image const int from_to[] = { 0, 0, 1, 1, 2, 2 }; const CvArr** src = (const CvArr**) &cc->frame4ch; CvArr** dst = (CvArr**) &cc->frame3ch; cvMixChannels(src, 1, dst, 1, from_to, 3); result = cc->frame3ch; #else long start = psmove_util_get_ticks(); result = cvQueryFrame(cc->capture); psmove_DEBUG("cvQueryFrame: %ld ms\n", psmove_util_get_ticks() - start); #endif #if defined(PSMOVE_USE_DEINTERLACE) /** * Dirty hack follows: * - Clone image * - Hack internal variables to make an image of all odd lines **/ IplImage *tmp = cvCloneImage(result); tmp->imageData += tmp->widthStep; // odd lines tmp->widthStep *= 2; tmp->height /= 2; /** * Use nearest-neighbor to be faster. In my tests, this does not * cause a speed disadvantage, and tracking quality is still good. * * This will scale the half-height image "tmp" to the original frame * size by doubling lines (so we can still do normal circle tracking). **/ cvResize(tmp, result, CV_INTER_NN); /** * Need to revert changes in tmp from above, otherwise the call * to cvReleaseImage would cause a crash. **/ tmp->height = result->height; tmp->widthStep = result->widthStep; tmp->imageData -= tmp->widthStep; // odd lines cvReleaseImage(&tmp); #endif // undistort image if (cc->mapx && cc->mapy) { cvRemap(result, cc->frame3chUndistort, cc->mapx, cc->mapy, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0)); result = cc->frame3chUndistort; } return result; }
void ofxPS3::getNewFrame(unsigned char* newFrame) { CLEyeCameraGetFrame(ps3EyeCamera,(PBYTE)newFrame, 1000); }
void Run() { int w, h; IplImage *pCapImage; PBYTE pCapBuffer = NULL; // Create camera instance _cam = CLEyeCreateCamera(_cameraGUID, _mode, _resolution, _fps); if(_cam == NULL) return; // Get camera frame dimensions CLEyeCameraGetFrameDimensions(_cam, w, h); // Depending on color mode chosen, create the appropriate OpenCV image if(_mode == CLEYE_COLOR_PROCESSED || _mode == CLEYE_COLOR_RAW) pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 4); else pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1); // Set some camera parameters //CLEyeSetCameraParameter(_cam, CLEYE_GAIN, 30); //CLEyeSetCameraParameter(_cam, CLEYE_EXPOSURE, 500); //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_EXPOSURE, false); //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_GAIN, false); //CLEyeSetCameraParameter(_cam, CLEYE_AUTO_WHITEBALANCE, false); //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_RED, 100); //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_BLUE, 200); //CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_GREEN, 200); // Start capturing CLEyeCameraStart(_cam); CvMemStorage* storage = cvCreateMemStorage(0); IplImage* hsv_frame = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 3); IplImage* thresholded = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 1); IplImage* temp = cvCreateImage(cvSize(pCapImage->width >> 1, pCapImage->height >> 1), IPL_DEPTH_8U, 3); // Create a window in which the captured images will be presented cvNamedWindow( "Camera" , CV_WINDOW_AUTOSIZE ); cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE ); //int hl = 100, hu = 115, sl = 95, su = 135, vl = 115, vu = 200; int hl = 5, hu = 75, sl = 40, su = 245, vl = 105, vu = 175; // image capturing loop while(_running) { // Detect a red ball CvScalar hsv_min = cvScalar(hl, sl, vl, 0); CvScalar hsv_max = cvScalar(hu, su, vu, 0); cvGetImageRawData(pCapImage, &pCapBuffer); CLEyeCameraGetFrame(_cam, pCapBuffer); cvConvertImage(pCapImage, hsv_frame); // Get one frame if( !pCapImage ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } // Covert color space to HSV as it is much easier to filter colors in the HSV color-space. cvCvtColor(pCapImage, hsv_frame, CV_RGB2HSV); // Filter out colors which are out of range. cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded); // Memory for hough circles CvMemStorage* storage = cvCreateMemStorage(0); // hough detector works better with some smoothing of the image cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 ); CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2, thresholded->height/4, 100, 50, 10, 400); for (int i = 0; i < circles->total; i++) { float* p = (float*)cvGetSeqElem( circles, i ); //printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] ); cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(0,255,0), -1, 8, 0 ); cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])), cvRound(p[2]), CV_RGB(255,0,0), 3, 8, 0 ); } cvShowImage( "Camera", pCapImage ); // Original stream with detected ball overlay cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space cvShowImage( "EdgeDetection", thresholded ); // The stream after color filtering cvReleaseMemStorage(&storage); // Do not release the frame! //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version), //remove higher bits using AND operator int key = cvWaitKey(10); switch(key){ case 'q' : hu += 5; break; case 'Q' : hu -= 5; break; case 'a' : hl -= 5; break; case 'A' : hl += 5; break; case 'w' : su += 5; break; case 'W' : su -= 5; break; case 's' : sl -= 5; break; case 'S' : sl += 5; break; case 'e' : vu += 5; break; case 'E' : vu -= 5; break; case 'd' : vl -= 5; break; case 'D' : vl += 5; break; } if (key != -1){ printf("H: %i, S: %i, V: %i\nH: %i, S: %i, V: %i\n\n", hu, su, vu, hl, sl, vl); } } cvReleaseImage(&temp); cvReleaseImage(&pCapImage); // Stop camera capture CLEyeCameraStop(_cam); // Destroy camera object CLEyeDestroyCamera(_cam); // Destroy the allocated OpenCV image cvReleaseImage(&pCapImage); _cam = NULL; }
IplImage * camera_control_query_frame(CameraControl* cc, PSMove_timestamp *ts_grab, PSMove_timestamp *ts_retrieve) { IplImage* result; #if defined(CAMERA_CONTROL_USE_CL_DRIVER) // assign buffer-pointer to address of buffer cvGetRawData(cc->frame4ch, &cc->pCapBuffer, 0, 0); CLEyeCameraGetFrame(cc->camera, cc->pCapBuffer, 2000); // convert 4ch image to 3ch image const int from_to[] = { 0, 0, 1, 1, 2, 2 }; const CvArr** src = (const CvArr**) &cc->frame4ch; CvArr** dst = (CvArr**) &cc->frame3ch; cvMixChannels(src, 1, dst, 1, from_to, 3); result = cc->frame3ch; #elif defined(CAMERA_CONTROL_USE_PS3EYE_DRIVER) int stride = 0; unsigned char *pixels = ps3eye_grab_frame(cc->eye, &stride); // Convert pixels from camera to BGR unsigned char *cvpixels; cvGetRawData(cc->framebgr, &cvpixels, 0, 0); yuv422_to_bgr(pixels, stride, cvpixels, cc->width, cc->height); result = cc->framebgr; #else cvGrabFrame(cc->capture); if (ts_grab != NULL) { *ts_grab = _psmove_timestamp(); } result = cvRetrieveFrame(cc->capture, 0); if (ts_retrieve != NULL) { *ts_retrieve = _psmove_timestamp(); } #endif if (cc->deinterlace == PSMove_True) { /** * Dirty hack follows: * - Clone image * - Hack internal variables to make an image of all odd lines **/ IplImage *tmp = cvCloneImage(result); tmp->imageData += tmp->widthStep; // odd lines tmp->widthStep *= 2; tmp->height /= 2; /** * Use nearest-neighbor to be faster. In my tests, this does not * cause a speed disadvantage, and tracking quality is still good. * * This will scale the half-height image "tmp" to the original frame * size by doubling lines (so we can still do normal circle tracking). **/ cvResize(tmp, result, CV_INTER_NN); /** * Need to revert changes in tmp from above, otherwise the call * to cvReleaseImage would cause a crash. **/ tmp->height = result->height; tmp->widthStep = result->widthStep; tmp->imageData -= tmp->widthStep; // odd lines cvReleaseImage(&tmp); } // undistort image if (cc->mapx && cc->mapy) { cvRemap(result, cc->frame3chUndistort, cc->mapx, cc->mapy, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0)); result = cc->frame3chUndistort; } #if defined(CAMERA_CONTROL_DEBUG_CAPTURED_IMAGE) cvShowImage("camera input", result); cvWaitKey(1); #endif return result; }