void CCflycap_grab_next_frame_blocking_with_stride( CCflycap *ccntxt, unsigned char *out_bytes, intptr_t stride0, float timeout ) { CHECK_CC(ccntxt); FlyCapture2::Camera *cam = (FlyCapture2::Camera *)ccntxt->inherited.cam; cam_iface_backend_extras* backend_extras = (cam_iface_backend_extras*)(ccntxt->inherited.backend_extras); // The main frame grabbing code goes here. FlyCapture2::Image rawImage; CIPGRCHK(cam->RetrieveBuffer( &rawImage )); if (stride0 < (intptr_t)rawImage.GetStride()) { CAM_IFACE_THROW_ERROR("stride too small for image"); } if (stride0==(intptr_t)rawImage.GetStride()) { // same stride memcpy((void*)out_bytes, /*dest*/ (const void*)rawImage.GetData(),/*src*/ rawImage.GetDataSize());/*src*/ } else { // different strides for (int row=0; row<(int)rawImage.GetRows(); row++) { memcpy((void*)(out_bytes+row*stride0), /*dest*/ (const void*)(rawImage.GetData() + row*rawImage.GetStride()),/*src*/ rawImage.GetStride());/*size*/ } } }
void CCflycap_CCflycap( CCflycap * ccntxt, int device_number, int NumImageBuffers, int mode_number) { // call parent CamContext_CamContext((CamContext*)ccntxt,device_number,NumImageBuffers,mode_number); // XXX cast error? ccntxt->inherited.vmt = (CamContext_functable*)&CCflycap_vmt; CAM_IFACE_CHECK_DEVICE_NUMBER(device_number); std::vector<CamMode> result; int myerr = get_mode_list(device_number, result); ccntxt->inherited.device_number = device_number; ccntxt->inherited.backend_extras = new cam_iface_backend_extras; memset(ccntxt->inherited.backend_extras,0,sizeof(cam_iface_backend_extras)); FlyCapture2::Camera *cam = new FlyCapture2::Camera; FlyCapture2::PGRGuid guid; CIPGRCHK( BACKEND_GLOBAL(busMgr_ptr)->GetCameraFromIndex(device_number, &guid)); CIPGRCHK(cam->Connect(&guid)); FlyCapture2::FC2Config cfg; CIPGRCHK(cam->GetConfiguration(&cfg)); cfg.numBuffers = NumImageBuffers; CIPGRCHK(cam->SetConfiguration(&cfg)); // Set the settings to the camera CamMode target_mode = result[mode_number]; if (target_mode.videomode == FlyCapture2::VIDEOMODE_FORMAT7) { CIPGRCHK(cam->SetFormat7Configuration(&target_mode.fmt7ImageSettings, target_mode.fmt7PacketInfo.recommendedBytesPerPacket )); } else { CIPGRCHK(cam->SetVideoModeAndFrameRate(target_mode.videomode, target_mode.framerate)); } ccntxt->inherited.cam = (void*)cam; // XXX move this to start camera and query camera for settings CIPGRCHK(cam->StartCapture()); // Retrieve an image to get width, height. XXX change to query later. FlyCapture2::Image rawImage; CIPGRCHK( cam->RetrieveBuffer( &rawImage )); cam_iface_backend_extras *extras = (cam_iface_backend_extras *)ccntxt->inherited.backend_extras; ccntxt->inherited.depth = rawImage.GetBitsPerPixel(); extras->buf_size = rawImage.GetDataSize(); extras->current_height = rawImage.GetRows(); extras->current_width = rawImage.GetCols(); extras->max_height = rawImage.GetRows(); extras->max_width = rawImage.GetCols(); CIPGRCHK( cam->GetTriggerModeInfo( &extras->trigger_mode_info )); switch (rawImage.GetPixelFormat()) { case FlyCapture2::PIXEL_FORMAT_MONO8: ccntxt->inherited.coding = CAM_IFACE_MONO8; if (rawImage.GetBayerTileFormat()!=FlyCapture2::NONE) { NOT_IMPLEMENTED; } break; case FlyCapture2::PIXEL_FORMAT_RAW8: switch (rawImage.GetBayerTileFormat()) { case FlyCapture2::NONE: ccntxt->inherited.coding = CAM_IFACE_RAW8; break; case FlyCapture2::RGGB: ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_RGGB; break; case FlyCapture2::GRBG: ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_GRBG; break; case FlyCapture2::GBRG: ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_GBRG; break; case FlyCapture2::BGGR: ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_BGGR; break; default: NOT_IMPLEMENTED; } } CIPGRCHK(cam->StopCapture()); }
int main() { FlyCapture2::Error error; FlyCapture2::PGRGuid guid; FlyCapture2::BusManager busMgr; FlyCapture2::Camera cam; FlyCapture2::VideoMode vm; FlyCapture2::FrameRate fr; FlyCapture2::Image rawImage; //Initializing camera error = busMgr.GetCameraFromIndex(0, &guid); if (error != FlyCapture2::PGRERROR_OK) { error.PrintErrorTrace(); return CAM_FAILURE; } vm = FlyCapture2::VIDEOMODE_640x480Y8; fr = FlyCapture2::FRAMERATE_60; error = cam.Connect(&guid); if (error != FlyCapture2::PGRERROR_OK) { error.PrintErrorTrace(); return CAM_FAILURE; } cam.SetVideoModeAndFrameRate(vm, fr); //Starting the capture error = cam.StartCapture(); if (error != FlyCapture2::PGRERROR_OK) { error.PrintErrorTrace(); return CAM_FAILURE; } //Image Variables Mat frame (Size(640,480),CV_8UC1); Mat img_bgr (Size(640,480),CV_8UC3); Mat img_hsv (Size(640,480),CV_8UC3); //Image in HSV color space Mat threshy (Size(640,480),CV_8UC1); //Threshed Image // Mat labelImg (Size(640,480),CV_8UC1); //Image Variable for blobs IplImage* histogram= cvCreateImage(cvSize(640,480),8,3); //Image histograms Mat histograms (Size(640,480),CV_8UC1); //greyscale image for histogram Mat empty (Size(640,480),CV_8UC3); // CvBlobs blobs; while(1) { cam.RetrieveBuffer(&rawImage); memcpy(frame.data, rawImage.GetData(), rawImage.GetDataSize()); // histogram = empty.clone(); cvZero(histogram); // cvAddS(frame, cvScalar(70,70,70), frame); cvtColor(frame,img_bgr,CV_BayerBG2BGR); // cout<<"\n1"; cvtColor(img_bgr,img_hsv,CV_BGR2HSV); // cout<<"\n2"; //Thresholding the frame for yellow inRange(img_hsv, Scalar(20, 100, 20), Scalar(70, 255, 255), threshy); // cvInRangeS(img_hsv, cvScalar(0, 120, 100), cvScalar(255, 255, 255), threshy); //Filtering the frame - subsampling?? // smooth(threshy,threshy,CV_MEDIAN,7,7); //Finding the blobs // unsigned int result=cvLabel(threshy,labelImg,blobs); //Filtering the blobs // cvFilterByArea(blobs,500,1000); //Rendering the blobs // cvRenderBlobs(labelImg,blobs,img_bgr,img_bgr); CvPoint prev = cvPoint(0,0); for(int x=0;x<640;++x) { Mat col = threshy.col(x); int y = 480 - countNonZero(col); for(int i=0 ; i<480 ; ++i) histograms.at<uchar>(x,i) = y; CvPoint next = cvPoint(x,y); cvLine(histogram,prev,next,cColor); // cvCircle(histogram,next,2,cColor,3); prev=next; } //Showing the images imshow("Live",img_bgr); imshow("Threshed",threshy); cvShowImage("Histogram",histogram); int c= cvWaitKey(10); if(c == 27) break; } //Cleanup // cvReleaseImage(&frame); // cvReleaseImage(&threshy); // cvReleaseImage(&img_hsv); // cvReleaseImage(&labelImg); // cvReleaseImage(&histogram); cvDestroyAllWindows(); return 0; }
int main() { FlyCapture2::Error error; FlyCapture2::PGRGuid guid; FlyCapture2::BusManager busMgr; FlyCapture2::Camera cam; FlyCapture2::VideoMode vm; FlyCapture2::FrameRate fr; FlyCapture2::Image rawImage; //Initializing camera error = busMgr.GetCameraFromIndex(0, &guid); if (error != FlyCapture2::PGRERROR_OK) { error.PrintErrorTrace(); return CAM_FAILURE; } vm = FlyCapture2::VIDEOMODE_640x480Y8; fr = FlyCapture2::FRAMERATE_60; error = cam.Connect(&guid); if (error != FlyCapture2::PGRERROR_OK) { error.PrintErrorTrace(); return CAM_FAILURE; } cam.SetVideoModeAndFrameRate(vm, fr); //Starting the capture error = cam.StartCapture(); if (error != FlyCapture2::PGRERROR_OK) { error.PrintErrorTrace(); return CAM_FAILURE; } while(1) { Mat frame (Size(640,480),CV_8UC1); // Mat img_scene (Size(640,480),CV_8UC3); Mat img_object = imread( "bitslogo.png", CV_LOAD_IMAGE_GRAYSCALE ); cam.RetrieveBuffer(&rawImage); memcpy(frame.data, rawImage.GetData(), rawImage.GetDataSize()); cvtColor(frame,frame,CV_BayerBG2RGB); imwrite("temp.bmp",frame); // cvtColor(img_scene,img_scene,CV_RGB2GRAY); Mat img_scene = imread( "temp.bmp", CV_LOAD_IMAGE_GRAYSCALE ); if( !img_object.data || !img_scene.data ) { std::cout<< " --(!) Error reading images " << std::endl; return -1; } //-- Step 1: Detect the keypoints using SURF Detector int minHessian = 400; SurfFeatureDetector detector( minHessian ); std::vector<KeyPoint> keypoints_object, keypoints_scene; detector.detect( img_object, keypoints_object ); detector.detect( img_scene, keypoints_scene ); //-- Step 2: Calculate descriptors (feature vectors) SurfDescriptorExtractor extractor; Mat descriptors_object, descriptors_scene; extractor.compute( img_object, keypoints_object, descriptors_object ); extractor.compute( img_scene, keypoints_scene, descriptors_scene ); //-- Step 3: Matching descriptor vectors using FLANN matcher FlannBasedMatcher matcher; std::vector< DMatch > matches; matcher.match( descriptors_object, descriptors_scene, matches ); double max_dist = 0; double min_dist = 100; //-- Quick calculation of max and min distances between keypoints for( int i = 0; i < descriptors_object.rows; i++ ) { double dist = matches[i].distance; if( dist < min_dist ) min_dist = dist; if( dist > max_dist ) max_dist = dist; } printf("-- Max dist : %f \n", max_dist ); printf("-- Min dist : %f \n", min_dist ); //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist ) std::vector< DMatch > good_matches; for( int i = 0; i < descriptors_object.rows; i++ ) { if( matches[i].distance < 3*min_dist ) { good_matches.push_back( matches[i]); } } Mat img_matches; drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); //-- Localize the object std::vector<Point2f> obj; std::vector<Point2f> scene; for( int i = 0; i < good_matches.size(); i++ ) { //-- Get the keypoints from the good matches obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt ); scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); } Mat H = findHomography( obj, scene, CV_RANSAC ); //-- Get the corners from the image_1 ( the object to be "detected" ) std::vector<Point2f> obj_corners(4); obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 ); obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows ); std::vector<Point2f> scene_corners(4); perspectiveTransform( obj_corners, scene_corners, H); //-- Draw lines between the corners (the mapped object in the scene - image_2 ) line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 ); line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); //-- Show detected matches imshow( "Good Matches & Object detection", img_matches ); int c = cvWaitKey(100); if(c == 27) break; } return 0; }
int main( int /*argc*/, char** /*argv*/ ) { //!< load camera calibration info cv::Mat mx1, my1, mx2, my2, Q; loadCalibrationInfo(mx1, my1, mx2, my2, Q); //!< connect to cameras unsigned int numCameras; FlyCapture2::Camera** ppCameras; prepareCameras( numCameras, ppCameras ); //!< Allocate images //!< an image par camera: a vector of pointers to images (cv::Mat) std::vector< std::shared_ptr< cv::Mat > > pimageCamera; for ( unsigned int i = 0; i < numCameras; i++ ) pimageCamera.push_back( std::shared_ptr< cv::Mat > ( new cv::Mat(480, 640, CV_8UC1) ) ); //!< create cv::Mat*, cast to shared_ptr, then push_back //!< disparity maps cv::Mat imageDisparity(480, 640, CV_32F), disp8U; //!< OpenCV control panel: block stereo matching parameters cv::namedWindow("control panel", cv::WINDOW_NORMAL ); int ndisparities = 5; cv::createTrackbar( "(n+1)x16=ndisparities", "control panel", &ndisparities, 20, NULL ); //!< ndisparities % 16 == 0, positive, ndisparities < width int blockSize = 5; cv::createTrackbar( "(s+2)x2+1=blockSize", "control panel", &blockSize, 30, NULL ); //!< 5 <= blocksize <= 255, odd #define ADDBAR( name1 , name2 , init, maxval ) \ int name1 = 5; \ cv::createTrackbar( #name1, "control panel", &name1, maxval, NULL ); \ sbm->name2( name1 ); #ifdef USE_OPENCV_SBM //!< sample code from https://github.com/Itseez/opencv/blob/52a785e95a30d9336bfbac97a0a0d0089ffaa7de/samples/cpp/stereo_match.cpp //!< stereo matching object cv::Ptr< cv::StereoBM > sbm = cv::createStereoBM( ndisparities, blockSize ); //sbm->setPreFilterType( cv::StereoBM::PREFILTER_NORMALIZED_RESPONSE ); //sbm->setPreFilterType( cv::StereoBM::PREFILTER_XSOBEL ); ADDBAR( textureThreshold, setTextureThreshold, 5, 30 ); ADDBAR( smallerBlockSize, setSmallerBlockSize, 5, 30 ); #endif #ifdef USE_OPENCV_SGBM //!< sample code from https://github.com/Itseez/opencv/blob/52a785e95a30d9336bfbac97a0a0d0089ffaa7de/samples/cpp/stereo_match.cpp cv::Ptr< cv::StereoSGBM > sbm = cv::createStereoSGBM( 0, ndisparities, blockSize ); //!< parameters from http://www.jayrambhia.com/blog/disparity-maps/ ADDBAR( P1, setP1, 500, 1000 ); ADDBAR( P2, setP2, 2000, 3000 ); #endif #if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM) ADDBAR( preFilterCap, setPreFilterCap, 4, 20 ); //sbm->setMinDisparity( 0 ); //sbm->setNumDisparities( ndisparities ); ADDBAR( uniquenessRatio, setUniquenessRatio, 10, 20 ); ADDBAR( speckleWindowSize, setSpeckleWindowSize, 150, 200 ); ADDBAR( speckleRange, setSpeckleRange, 2, 10 ); ADDBAR( disp12MaxDiff, setDisp12MaxDiff, 5, 20 ); #endif #ifdef USE_ELAS Elas::parameters param; param.postprocess_only_left = true; param.disp_min = 0; param.disp_max = (ndisparities + 1) * 16; const int32_t dims[3] = {640, 480, 640}; Elas elas( param ); //float* D1_data = (float*)malloc(640*480*sizeof(float)); //float* D2_data = (float*)malloc(width*height*sizeof(float)); #endif pcl::PointCloud< pcl::PointXYZ >::Ptr pointCloudFromDepth_ptr ( new pcl::PointCloud< pcl::PointXYZ > ); { pcl::PointXYZ initPoint; initPoint.x = 0; initPoint.y = 0; initPoint.z = 0; pointCloudFromDepth_ptr->points.push_back( initPoint ); pointCloudFromDepth_ptr->width = (int) pointCloudFromDepth_ptr->points.size (); pointCloudFromDepth_ptr->height = 1; } boost::shared_ptr< pcl::visualization::PCLVisualizer > viewer (new pcl::visualization::PCLVisualizer ("3D Viewer")); viewer->setBackgroundColor (0, 0, 0); viewer->addPointCloud<pcl::PointXYZ> (pointCloudFromDepth_ptr, "my points"); viewer->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "my points"); viewer->addCoordinateSystem (1.0, "global"); viewer->initCameraParameters (); while ( 1 ) { //!< Display the timestamps for all cameras to show that the image //!< capture is synchronized for each image #if defined(USE_OPENCV_SBM) sbm->setTextureThreshold( textureThreshold ); sbm->setSmallerBlockSize( smallerBlockSize ); #endif #if defined(USE_OPENCV_SGBM) sbm->setP1( P1 ); sbm->setP2( P2 ); #endif #if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM) sbm->setBlockSize( (blockSize + 2) * 2 + 1 ); sbm->setNumDisparities( (ndisparities + 1) * 16 ); sbm->setUniquenessRatio( uniquenessRatio ); sbm->setSpeckleWindowSize( speckleWindowSize ); sbm->setSpeckleRange( speckleRange ); sbm->setDisp12MaxDiff( disp12MaxDiff ); #endif #ifdef USE_ELAS param.disp_max = (ndisparities + 1) * 16; elas.setParameters( param ); #endif for ( unsigned int i = 0; i < numCameras; i++ ) { FlyCapture2::Image rawImage; //!< buffer: Must be here in the loop, otherwise cameras do not synchronize. FlyCapture2::Error error; error = ppCameras[i]->RetrieveBuffer( &rawImage ); PRINT_ERROR( error ); // FlyCapture2::TimeStamp timestamp = rawImage.GetTimeStamp(); // fprintf(stderr, // "Cam %d TimeStamp [%d %d]\n", // i, // timestamp.cycleSeconds, // timestamp.cycleCount); memcpy( pimageCamera[i]->data, rawImage.GetData(), rawImage.GetDataSize() ); } //!< rectifying images (and un-distorting?) cv::remap( *pimageCamera[1], *pimageCamera[1], mx1, my1, cv::INTER_LINEAR ); cv::remap( *pimageCamera[0], *pimageCamera[0], mx2, my2, cv::INTER_LINEAR ); #if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM) sbm->compute( *pimageCamera[1], *pimageCamera[0], imageDisparity ); #endif #ifdef USE_ELAS // int static skip = 0; // if (skip % 15 ==0) { // elas.process(imageCamera1.data, imageCamera0.data, (float*)imageDisparity.data,(float*)imageDisparity.data, dims); elas.process(pimageCamera[1]->data, pimageCamera[0]->data, (float*)imageDisparity.data,(float*)imageDisparity.data, dims); } // skip++; #endif //!< normalize disparity map for imshow double minVal, maxVal; minMaxLoc( imageDisparity, &minVal, &maxVal ); imageDisparity.convertTo( disp8U, CV_8UC1, 255/(maxVal - minVal) ); #define DRAWTXT(img, str, x, y, s) \ cv::putText( img, str, cv::Point(x,y), cv::FONT_HERSHEY_SIMPLEX, s, cv::Scalar::all(255) ) DRAWTXT( disp8U, "disparity", 10, 20, 0.5 ); cv::imshow( "disparity", disp8U ); //!< disparity map --> depth map --> point cloud cv::Mat depth; cv::reprojectImageTo3D( imageDisparity, depth, Q ); cv::Mat pointCloudFromDepth = depth.reshape( 3, depth.size().area() ); pointCloudFromDepth_ptr->clear(); for (int i = 0; i < pointCloudFromDepth.rows; i++) { float *pt = pointCloudFromDepth.ptr < float > ( i ); pcl::PointXYZ basic_point; basic_point.x = pt[0]; basic_point.y = pt[1]; basic_point.z = pt[2]; #if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM) basic_point.z /= 8.0; //!< simple hack, but unknown reason..... //!< still wrong. Depth maybe strange. #endif if ( pt[2] < 129 ) //!< simple hack pointCloudFromDepth_ptr->points.push_back ( basic_point ); } pointCloudFromDepth_ptr->width = (int) pointCloudFromDepth_ptr->points.size (); pointCloudFromDepth_ptr->height = 1; viewer->updatePointCloud< pcl::PointXYZ > ( pointCloudFromDepth_ptr, "my points" ); viewer->spinOnce (); for ( unsigned int i = 0; i < numCameras; i++ ) { // drawCameraInfo( ppCameras[i], *pimageCamera[i] ); cv::imshow( std::string("cam") + std::to_string(i), *pimageCamera[i] ); } cv::imshow( "sub", cv::abs( *pimageCamera[0] - *pimageCamera[1] ) ); char keyValue = cv::waitKey( 10 ); if (keyValue == 'q' || keyValue == 27 /* ESC */ ) { break; } } exit(0); //!< hack; code below may freeze. for ( unsigned int i = 0; i < numCameras; i++ ) { ppCameras[i]->StopCapture(); ppCameras[i]->Disconnect(); delete ppCameras[i]; } delete [] ppCameras; return 0; }