コード例 #1
0
CameraFrame CameraPointGrey::getFrame(){

    FlyCapture2::Error error;

    if(triggerMode == triggerModeSoftware){
        // Fire software trigger
        // broadcasting not supported on some platforms
        cam.FireSoftwareTrigger(false);
    }

    // Retrieve the image
    FlyCapture2::Image rawImage;
    error = cam.RetrieveBuffer(&rawImage);
    if (error != FlyCapture2::PGRERROR_OK)
        PrintError(error);

    CameraFrame frame;

    frame.timeStamp = rawImage.GetTimeStamp().cycleCount;
    frame.height = rawImage.GetRows();
    frame.width = rawImage.GetCols();
    frame.memory = rawImage.GetData();

    return frame;
}
コード例 #2
0
  static bool frameToImage (FlyCapture2::Image * frame, sensor_msgs::Image & image)
  {
    // Get the raw image dimensions
    FlyCapture2::PixelFormat pixFormat;
    uint32_t rows, cols, stride;

    FlyCapture2::Image convertedImage;
    FlyCapture2::Error error;

    error = frame->Convert(FlyCapture2::PIXEL_FORMAT_RGB, &convertedImage );

    if (error != FlyCapture2::PGRERROR_OK)
    {
        error.PrintErrorTrace();
        return false;
    }

    // Get the raw image dimensions
    convertedImage.GetDimensions( &rows, &cols, &stride, &pixFormat );

    return sensor_msgs::fillImage (image, sensor_msgs::image_encodings::RGB8,
                                   rows, cols, stride,
                                   convertedImage.GetData ());

  }
コード例 #3
0
/*!
  Acquire a color image from the active camera.

  \param I : Image data structure (RGBa image).

  \param timestamp : The acquisition timestamp.
*/
void vpFlyCaptureGrabber::acquire(vpImage<vpRGBa> &I, FlyCapture2::TimeStamp &timestamp)
{
  this->open();

  FlyCapture2::Error error;
  // Retrieve an image
  error = m_camera.RetrieveBuffer( &m_rawImage );
  if (error != FlyCapture2::PGRERROR_OK) {
    error.PrintErrorTrace();
    throw (vpException(vpException::fatalError,
                       "Cannot retrieve image for camera with guid 0x%lx",
                       m_guid) );
  }
  timestamp = m_rawImage.GetTimeStamp();

  // Create a converted image
  FlyCapture2::Image convertedImage;

  // Convert the raw image
  error = m_rawImage.Convert( FlyCapture2::PIXEL_FORMAT_RGBU, &convertedImage );
  if (error != FlyCapture2::PGRERROR_OK) {
    error.PrintErrorTrace();
    throw (vpException(vpException::fatalError,
                       "Cannot convert image for camera with guid 0x%lx",
                       m_guid) );
  }
  height = convertedImage.GetRows();
  width = convertedImage.GetCols();
  unsigned char *data = convertedImage.GetData();
  I.resize(height, width);
  unsigned int bps = convertedImage.GetBitsPerPixel();
  memcpy(I.bitmap, data, width*height*bps/8);
}
コード例 #4
0
void VideoSource::GetAndFillFrameBWandRGB(CVD::Image<CVD::byte> &imBW, CVD::Image<CVD::Rgb<CVD::byte> > &imRGB)
{
    Error error;

    imRGB.resize(mirSize);
    imBW.resize(mirSize);
    static FlyCapture2::Image imCap;
    error = cam.RetrieveBuffer(&imCap);
    if (error != PGRERROR_OK)
    {
        PrintErrorAndExit(error);
  
    }

  
    unsigned char *pImage = imCap.GetData();

    for (int y = 0; y<mirSize.y; y++) {
        for (int x = 0; x<mirSize.x; x++) {
            imRGB[y][x].blue = *pImage;
            pImage++;

            imRGB[y][x].green =*pImage;
            imBW[y][x] = *pImage;
            pImage++;

            imRGB[y][x].red = *pImage;
            pImage++;
        }
    }

}
コード例 #5
0
void CCaptureGUIView::setFlyImage(const FlyCapture2::Image& image) {
	unsigned int rows, cols, stride;
	FlyCapture2::PixelFormat format;
	image.GetDimensions(&rows, &cols, &stride, &format);
	m_bmpmutex.lock();
	auto error = image.Convert(FlyCapture2::PIXEL_FORMAT_BGR, &fly_image);
	m_bmpmutex.unlock();
	assert(error != FlyCapture2::PGRERROR_OK); // error check
}
コード例 #6
0
void CCflycap_grab_next_frame_blocking_with_stride( CCflycap *ccntxt,
						    unsigned char *out_bytes,
						    intptr_t stride0,
						    float timeout ) {
  CHECK_CC(ccntxt);
  FlyCapture2::Camera *cam = (FlyCapture2::Camera *)ccntxt->inherited.cam;
  cam_iface_backend_extras* backend_extras = (cam_iface_backend_extras*)(ccntxt->inherited.backend_extras);
  // The main frame grabbing code goes here.

  FlyCapture2::Image rawImage;
  CIPGRCHK(cam->RetrieveBuffer( &rawImage ));

  if (stride0 < (intptr_t)rawImage.GetStride()) {
    CAM_IFACE_THROW_ERROR("stride too small for image");
  }

  if (stride0==(intptr_t)rawImage.GetStride()) {
    // same stride
    memcpy((void*)out_bytes, /*dest*/
	   (const void*)rawImage.GetData(),/*src*/
	   rawImage.GetDataSize());/*src*/

  } else {
    // different strides
    for (int row=0; row<(int)rawImage.GetRows(); row++) {
      memcpy((void*)(out_bytes+row*stride0), /*dest*/
	     (const void*)(rawImage.GetData() + row*rawImage.GetStride()),/*src*/
	     rawImage.GetStride());/*size*/
    }
  }
}
コード例 #7
0
ファイル: pgr_camera.cpp プロジェクト: rbaldwin7/rhocode
bool Camera::saveImageToFile(FlyCapture2::Image * rawImage)
{
  static int imageCount=0;
  FlyCapture2::Error error;
  FlyCapture2::CameraInfo camInfo;
  error = camPGR_.GetCameraInfo(&camInfo);


  // Get the raw image dimensions
  FlyCapture2::PixelFormat pixFormat;
  unsigned int rows, cols, stride;
  rawImage->GetDimensions( &rows, &cols, &stride, &pixFormat );

  // Create a converted image
  FlyCapture2::Image convertedImage;

  // Convert the raw image
  error = rawImage->Convert(FlyCapture2::PIXEL_FORMAT_BGRU, &convertedImage );
  if (error != PGRERROR_OK)
  {
    PrintError( error );
    return false;
  }
  // Create a unique filename
  char filename[512];
  sprintf( filename, "/tmp/%u-%d.bmp", camInfo.serialNumber, imageCount );
  ROS_INFO("Filename: %s", filename);

  // Save the image. If a file format is not passed in, then the file
  // extension is parsed to attempt to determine the file format.
  error = convertedImage.Save( filename );
  if (error != PGRERROR_OK)
  {
    PrintError( error );
  }

  imageCount++;

  return true;
}
コード例 #8
0
void CCflycap_CCflycap( CCflycap * ccntxt, int device_number, int NumImageBuffers,
                        int mode_number) {

  // call parent
  CamContext_CamContext((CamContext*)ccntxt,device_number,NumImageBuffers,mode_number); // XXX cast error?
  ccntxt->inherited.vmt = (CamContext_functable*)&CCflycap_vmt;

  CAM_IFACE_CHECK_DEVICE_NUMBER(device_number);
  std::vector<CamMode> result;
  int myerr = get_mode_list(device_number, result);

  ccntxt->inherited.device_number = device_number;
  ccntxt->inherited.backend_extras = new cam_iface_backend_extras;
  memset(ccntxt->inherited.backend_extras,0,sizeof(cam_iface_backend_extras));

  FlyCapture2::Camera *cam = new FlyCapture2::Camera;
  FlyCapture2::PGRGuid guid;
  CIPGRCHK( BACKEND_GLOBAL(busMgr_ptr)->GetCameraFromIndex(device_number, &guid));
  CIPGRCHK(cam->Connect(&guid));

  FlyCapture2::FC2Config cfg;
  CIPGRCHK(cam->GetConfiguration(&cfg));
  cfg.numBuffers = NumImageBuffers;
  CIPGRCHK(cam->SetConfiguration(&cfg));

  // Set the settings to the camera
  CamMode target_mode = result[mode_number];

  if (target_mode.videomode == FlyCapture2::VIDEOMODE_FORMAT7) {
    CIPGRCHK(cam->SetFormat7Configuration(&target_mode.fmt7ImageSettings,
					  target_mode.fmt7PacketInfo.recommendedBytesPerPacket ));
  } else {
    CIPGRCHK(cam->SetVideoModeAndFrameRate(target_mode.videomode, target_mode.framerate));
  }

  ccntxt->inherited.cam = (void*)cam;

  // XXX move this to start camera and query camera for settings

  CIPGRCHK(cam->StartCapture());


  // Retrieve an image to get width, height. XXX change to query later.
  FlyCapture2::Image rawImage;
  CIPGRCHK( cam->RetrieveBuffer( &rawImage ));

  cam_iface_backend_extras *extras = (cam_iface_backend_extras *)ccntxt->inherited.backend_extras;

  ccntxt->inherited.depth = rawImage.GetBitsPerPixel();
  extras->buf_size = rawImage.GetDataSize();
  extras->current_height = rawImage.GetRows();
  extras->current_width = rawImage.GetCols();
  extras->max_height = rawImage.GetRows();
  extras->max_width = rawImage.GetCols();
  CIPGRCHK( cam->GetTriggerModeInfo( &extras->trigger_mode_info ));

  switch (rawImage.GetPixelFormat()) {
  case FlyCapture2::PIXEL_FORMAT_MONO8:
    ccntxt->inherited.coding = CAM_IFACE_MONO8;
    if (rawImage.GetBayerTileFormat()!=FlyCapture2::NONE) {
      NOT_IMPLEMENTED;
    }
    break;
  case FlyCapture2::PIXEL_FORMAT_RAW8:
    switch (rawImage.GetBayerTileFormat()) {
    case FlyCapture2::NONE:
      ccntxt->inherited.coding = CAM_IFACE_RAW8;
      break;
    case FlyCapture2::RGGB:
      ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_RGGB;
      break;
    case FlyCapture2::GRBG:
      ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_GRBG;
      break;
    case FlyCapture2::GBRG:
      ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_GBRG;
      break;
    case FlyCapture2::BGGR:
      ccntxt->inherited.coding = CAM_IFACE_MONO8_BAYER_BGGR;
      break;
    default:
      NOT_IMPLEMENTED;
    }
  }
  CIPGRCHK(cam->StopCapture());

}
コード例 #9
0
  static bool frameToImage (FlyCapture2::Image * frame, sensor_msgs::Image & image)
  {

    std::string encoding;

    // Get the raw image dimensions
    FlyCapture2::PixelFormat pixFormat;
    uint32_t rows, cols, stride;


    // NOTE: 16-bit and Yuv formats not supported
    static const char *BAYER_ENCODINGS[] = { "none", "bayer_rggb8",
      "bayer_grbg8", "bayer_gbrg8", "bayer_bggr8", "unknown"
    };
    //unsigned int bpp = frame->GetBitsPerPixel();
    FlyCapture2::BayerTileFormat bayerFmt;
    bayerFmt = frame->GetBayerTileFormat ();
    //ROS_INFO("bayer is %u", bayerFmt);
    frame->GetDimensions( &rows, &cols, &stride, &pixFormat );
    //ROS_INFO("Frame WxH = %d x %d", cols, rows); // FIXME: Dynamic resizing of camera is now working
    //ROS_INFO("Pixel Format (from Raw Image): 0x%08x", pixFormat);
    //ROS_INFO("Stride of Raw Image (The number of bytes between rows of the image): %d", stride);

    if (bayerFmt == FlyCapture2::NONE)
    {
      //ROS_INFO("Pixel Format (from Raw Image): 0x%08x", pixFormat);
      encoding = sensor_msgs::image_encodings::MONO8;
      return sensor_msgs::fillImage (image, encoding, rows,
                                     cols,
                                     stride,
                                     frame->GetData ());
    }
    else
    {
      // Create a converted image
      FlyCapture2::Image convertedImage;
      FlyCapture2::Error error;

      //encoding = BAYER_ENCODINGS[bayerFmt];
      switch ((FlyCapture2::PixelFormat) pixFormat) {
        case FlyCapture2::PIXEL_FORMAT_RAW8:
          // Convert the raw image
          error = frame->Convert(FlyCapture2::PIXEL_FORMAT_RGB, &convertedImage );
          encoding = sensor_msgs::image_encodings::RGB8; // Arbitrary encoding based on pixel format

          if (error != FlyCapture2::PGRERROR_OK)
          {
            error.PrintErrorTrace();
            return false;
          }
          // Get the raw image dimensions
          convertedImage.GetDimensions( &rows, &cols, &stride, &pixFormat );
          break;
        default:
          return false; // FIXME: Now, it's arbitrarily converting Raw images to RGB8 encoding
          // TODO: Handle encoding directly from Bayer
          break;
      } // switch
      //ROS_INFO("Stride of Converted Image (The number of bytes between rows of the image): %d", stride);

      return sensor_msgs::fillImage (image, encoding, rows,
                                     cols,
                                     stride,
                                     convertedImage.GetData ());
    } //else

  }
コード例 #10
0
ファイル: Cam.cpp プロジェクト: CVfAR/ATAM
/*!
@brief		open camera
@param[out]	w	image width
@param[out]	h	image height
@param[out]	c	num of channels
@retval		successed or not
*/
bool CCam::Open(
	int &w,
	int &h,
	int &c
	)
{
#ifdef USEPGR
	if (!mPGRCap.IsConnected()){

		FlyCapture2::Error error;

		// connect to a camera
		error = mPGRCap.Connect(0);
		if (error != FlyCapture2::PGRERROR_OK){
			return false;
		}

		FlyCapture2::CameraInfo camInfo;
		error = mPGRCap.GetCameraInfo(&camInfo);
		if (error != FlyCapture2::PGRERROR_OK) {
			return false;
		}

		// start capturing
		error = mPGRCap.StartCapture();
		if (error != FlyCapture2::PGRERROR_OK){
			return false;
		}

		// get image
		FlyCapture2::Image raw;
		error = mPGRCap.RetrieveBuffer(&raw);
		if (error != FlyCapture2::PGRERROR_OK){
			return false;
		}

		// VGA
		w = mWidth = raw.GetCols() / 2;		// image width
		h= mHeight = raw.GetRows() / 2;		// image height
		c= mChannel = 3;		// color camera

		mSize = mWidth*mHeight*mChannel;

		mImg = cv::Mat(cv::Size(mWidth, mHeight), CV_8UC3);

		return true;
	}
	else{
		return false;
	}
#else
	if (!mCap.isOpened()){

		if (!mCap.open(mDeviceID)){		// if opening failed
			printf("Cam ID %d not found\n", mDeviceID);
			return false;
		}

		// VGA
		mCap.set(cv::CAP_PROP_FRAME_WIDTH, 640);
		mCap.set(cv::CAP_PROP_FRAME_HEIGHT, 480);
	
		// check image
		int count = 0;
		while (mImg.data == NULL){

			mCap >> mImg;
			++count;

			if (count > 10){		// if retrieval failed
				printf("Cannot retrieve images\n");
				return false;
			}
		}

		mWidth = w = mImg.cols;
		mHeight = h = mImg.rows;
		mChannel = c = mImg.channels();

		mSize = mHeight*(int)mImg.step;

		return true;
	}
	else{
		return false;
コード例 #11
0
ファイル: surf_oldcamera.cpp プロジェクト: p-kar/vision
int main()
{
    FlyCapture2::Error error;
    FlyCapture2::PGRGuid guid;
    FlyCapture2::BusManager busMgr;
    FlyCapture2::Camera cam;
    FlyCapture2::VideoMode vm;
    FlyCapture2::FrameRate fr;
    FlyCapture2::Image rawImage;

    //Initializing camera
        error = busMgr.GetCameraFromIndex(0, &guid);
    if (error != FlyCapture2::PGRERROR_OK)
    {
        error.PrintErrorTrace();
        return CAM_FAILURE;
    }

          vm = FlyCapture2::VIDEOMODE_640x480Y8;
    fr = FlyCapture2::FRAMERATE_60;

    error = cam.Connect(&guid);
    if (error != FlyCapture2::PGRERROR_OK)
    {
        error.PrintErrorTrace();
        return CAM_FAILURE;
    }

    cam.SetVideoModeAndFrameRate(vm, fr);
    //Starting the capture
    error = cam.StartCapture();
    if (error != FlyCapture2::PGRERROR_OK)
    {
        error.PrintErrorTrace();
               return CAM_FAILURE;
    }

    while(1)
    {
        Mat frame (Size(640,480),CV_8UC1);
        // Mat img_scene (Size(640,480),CV_8UC3);
        Mat img_object = imread( "bitslogo.png", CV_LOAD_IMAGE_GRAYSCALE );
        cam.RetrieveBuffer(&rawImage);
        memcpy(frame.data, rawImage.GetData(), rawImage.GetDataSize());
        cvtColor(frame,frame,CV_BayerBG2RGB);
        imwrite("temp.bmp",frame);
        // cvtColor(img_scene,img_scene,CV_RGB2GRAY);
        Mat img_scene = imread( "temp.bmp", CV_LOAD_IMAGE_GRAYSCALE );

        if( !img_object.data || !img_scene.data )
        { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

        //-- Step 1: Detect the keypoints using SURF Detector
        int minHessian = 400;

        SurfFeatureDetector detector( minHessian );

        std::vector<KeyPoint> keypoints_object, keypoints_scene;

        detector.detect( img_object, keypoints_object );
        detector.detect( img_scene, keypoints_scene );

        //-- Step 2: Calculate descriptors (feature vectors)
        SurfDescriptorExtractor extractor;

        Mat descriptors_object, descriptors_scene;

        extractor.compute( img_object, keypoints_object, descriptors_object );
        extractor.compute( img_scene, keypoints_scene, descriptors_scene );

        //-- Step 3: Matching descriptor vectors using FLANN matcher
        FlannBasedMatcher matcher;
        std::vector< DMatch > matches;
        matcher.match( descriptors_object, descriptors_scene, matches );

        double max_dist = 0; double min_dist = 100;

        //-- Quick calculation of max and min distances between keypoints
        for( int i = 0; i < descriptors_object.rows; i++ )
        { double dist = matches[i].distance;
          if( dist < min_dist ) min_dist = dist;
          if( dist > max_dist ) max_dist = dist;
        }

        printf("-- Max dist : %f \n", max_dist );
        printf("-- Min dist : %f \n", min_dist );

        //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
        std::vector< DMatch > good_matches;

        for( int i = 0; i < descriptors_object.rows; i++ )
        { if( matches[i].distance < 3*min_dist )
           { good_matches.push_back( matches[i]); }
        }

        Mat img_matches;
        drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
                     good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

        //-- Localize the object
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;

        for( int i = 0; i < good_matches.size(); i++ )
        {
          //-- Get the keypoints from the good matches
          obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
          scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
        }

        Mat H = findHomography( obj, scene, CV_RANSAC );

        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector<Point2f> obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
        obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
        std::vector<Point2f> scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);

        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
        line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

        //-- Show detected matches
        imshow( "Good Matches & Object detection", img_matches );
        int c = cvWaitKey(100);
        if(c == 27)
            break;
    }

    return 0;
}
コード例 #12
0
ファイル: histograms_test.cpp プロジェクト: p-kar/vision
int main()
{
    FlyCapture2::Error error;
    FlyCapture2::PGRGuid guid;
    FlyCapture2::BusManager busMgr;
    FlyCapture2::Camera cam;
    FlyCapture2::VideoMode vm;
    FlyCapture2::FrameRate fr;
    FlyCapture2::Image rawImage;

    //Initializing camera
        error = busMgr.GetCameraFromIndex(0, &guid);
    if (error != FlyCapture2::PGRERROR_OK)
    {
        error.PrintErrorTrace();
        return CAM_FAILURE;
    }

          vm = FlyCapture2::VIDEOMODE_640x480Y8;
    fr = FlyCapture2::FRAMERATE_60;

    error = cam.Connect(&guid);
    if (error != FlyCapture2::PGRERROR_OK)
    {
        error.PrintErrorTrace();
        return CAM_FAILURE;
    }

    cam.SetVideoModeAndFrameRate(vm, fr);
    //Starting the capture
    error = cam.StartCapture();
    if (error != FlyCapture2::PGRERROR_OK)
    {
        error.PrintErrorTrace();
               return CAM_FAILURE;
    }

        //Image Variables
        Mat frame (Size(640,480),CV_8UC1);
        Mat img_bgr (Size(640,480),CV_8UC3);
        Mat img_hsv (Size(640,480),CV_8UC3);                        //Image in HSV color space
        Mat threshy (Size(640,480),CV_8UC1);                                //Threshed Image
        // Mat labelImg (Size(640,480),CV_8UC1);        //Image Variable for blobs
        IplImage* histogram= cvCreateImage(cvSize(640,480),8,3);                        //Image histograms
        Mat histograms (Size(640,480),CV_8UC1);         //greyscale image for histogram
        Mat empty (Size(640,480),CV_8UC3);        

        

        // CvBlobs blobs;
        
        while(1)
        {
                cam.RetrieveBuffer(&rawImage);
                memcpy(frame.data, rawImage.GetData(), rawImage.GetDataSize());
                // histogram = empty.clone();
                cvZero(histogram);
                // cvAddS(frame, cvScalar(70,70,70), frame);
                cvtColor(frame,img_bgr,CV_BayerBG2BGR);
                // cout<<"\n1";
                cvtColor(img_bgr,img_hsv,CV_BGR2HSV);        
                // cout<<"\n2";                                        
                //Thresholding the frame for yellow
                inRange(img_hsv, Scalar(20, 100, 20), Scalar(70, 255, 255), threshy);                                        
                // cvInRangeS(img_hsv, cvScalar(0, 120, 100), cvScalar(255, 255, 255), threshy);
                //Filtering the frame - subsampling??
                // smooth(threshy,threshy,CV_MEDIAN,7,7);

                //Finding the blobs
                // unsigned int result=cvLabel(threshy,labelImg,blobs);
                //Filtering the blobs
                // cvFilterByArea(blobs,500,1000);
                //Rendering the blobs
                // cvRenderBlobs(labelImg,blobs,img_bgr,img_bgr);

                CvPoint prev = cvPoint(0,0);

                for(int x=0;x<640;++x)
                {
                        Mat col = threshy.col(x);
                        int y = 480 - countNonZero(col);
                        for(int i=0 ; i<480 ; ++i)
                             histograms.at<uchar>(x,i) = y;
                        CvPoint next = cvPoint(x,y);
                        cvLine(histogram,prev,next,cColor);
                        // cvCircle(histogram,next,2,cColor,3);
                        prev=next;
                }

                //Showing the images
                imshow("Live",img_bgr);
                imshow("Threshed",threshy);
                cvShowImage("Histogram",histogram);

                int c= cvWaitKey(10);

                if(c == 27)
                        break;
        }

        //Cleanup
        // cvReleaseImage(&frame);
        // cvReleaseImage(&threshy);
        // cvReleaseImage(&img_hsv);
        // cvReleaseImage(&labelImg);
        // cvReleaseImage(&histogram);
        cvDestroyAllWindows();
        return 0;
}
コード例 #13
0
ファイル: main.cpp プロジェクト: tttamaki/realtimeStereo
int main( int /*argc*/, char** /*argv*/ )
{









    //!< load camera calibration info
    cv::Mat mx1, my1, mx2, my2, Q;
    loadCalibrationInfo(mx1, my1, mx2, my2, Q);


    //!< connect to cameras
    unsigned int numCameras;
    FlyCapture2::Camera** ppCameras;
    prepareCameras( numCameras, ppCameras );





    //!< Allocate images

    //!< an image par camera: a vector of pointers to images (cv::Mat)
    std::vector< std::shared_ptr< cv::Mat > > pimageCamera;
    for ( unsigned int i = 0; i < numCameras; i++ )
        pimageCamera.push_back( std::shared_ptr< cv::Mat > ( new cv::Mat(480, 640, CV_8UC1) ) ); //!< create cv::Mat*, cast to shared_ptr, then push_back

    //!< disparity maps
    cv::Mat imageDisparity(480, 640, CV_32F), disp8U;



    
    
    
    //!< OpenCV control panel: block stereo matching parameters
    cv::namedWindow("control panel", cv::WINDOW_NORMAL );
    int ndisparities = 5;
    cv::createTrackbar( "(n+1)x16=ndisparities", "control panel", &ndisparities, 20, NULL ); //!< ndisparities % 16 == 0, positive, ndisparities < width
    int blockSize = 5;
    cv::createTrackbar( "(s+2)x2+1=blockSize", "control panel", &blockSize, 30, NULL ); //!<  5 <= blocksize <= 255, odd


#define ADDBAR( name1 , name2 , init, maxval ) \
    int name1 = 5; \
    cv::createTrackbar( #name1, "control panel", &name1, maxval, NULL ); \
    sbm->name2( name1 );

    

#ifdef USE_OPENCV_SBM
    //!< sample code from https://github.com/Itseez/opencv/blob/52a785e95a30d9336bfbac97a0a0d0089ffaa7de/samples/cpp/stereo_match.cpp
    //!< stereo matching object
    cv::Ptr< cv::StereoBM > sbm = cv::createStereoBM( ndisparities, blockSize );
    //sbm->setPreFilterType( cv::StereoBM::PREFILTER_NORMALIZED_RESPONSE );
    //sbm->setPreFilterType( cv::StereoBM::PREFILTER_XSOBEL );

    ADDBAR( textureThreshold, setTextureThreshold, 5, 30 );
    ADDBAR( smallerBlockSize, setSmallerBlockSize, 5, 30 );
    
#endif
    
#ifdef USE_OPENCV_SGBM
    //!< sample code from https://github.com/Itseez/opencv/blob/52a785e95a30d9336bfbac97a0a0d0089ffaa7de/samples/cpp/stereo_match.cpp
    cv::Ptr< cv::StereoSGBM > sbm = cv::createStereoSGBM( 0, ndisparities, blockSize );

    //!< parameters from http://www.jayrambhia.com/blog/disparity-maps/
    ADDBAR( P1, setP1, 500, 1000 );
    ADDBAR( P2, setP2, 2000, 3000 );
#endif

#if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM)
    ADDBAR( preFilterCap, setPreFilterCap, 4, 20 );
    //sbm->setMinDisparity( 0 );
    //sbm->setNumDisparities( ndisparities );
    ADDBAR( uniquenessRatio, setUniquenessRatio, 10, 20 );
    ADDBAR( speckleWindowSize, setSpeckleWindowSize, 150, 200 );
    ADDBAR( speckleRange, setSpeckleRange, 2, 10 );
    ADDBAR( disp12MaxDiff, setDisp12MaxDiff, 5, 20 );
#endif
    
    
    
#ifdef USE_ELAS
    Elas::parameters param;
    param.postprocess_only_left = true;
    param.disp_min = 0;
    param.disp_max = (ndisparities + 1) * 16;
    const int32_t dims[3] = {640, 480, 640};
    Elas elas( param );
    //float* D1_data = (float*)malloc(640*480*sizeof(float));
    //float* D2_data = (float*)malloc(width*height*sizeof(float));
#endif

    
    
    
    
    
    
  
    
    pcl::PointCloud< pcl::PointXYZ >::Ptr pointCloudFromDepth_ptr ( new pcl::PointCloud< pcl::PointXYZ > );
    {
	pcl::PointXYZ initPoint;
	initPoint.x = 0;
	initPoint.y = 0;
	initPoint.z = 0;
	pointCloudFromDepth_ptr->points.push_back( initPoint );
	pointCloudFromDepth_ptr->width = (int) pointCloudFromDepth_ptr->points.size ();
	pointCloudFromDepth_ptr->height = 1;
    }
    
    boost::shared_ptr< pcl::visualization::PCLVisualizer > viewer (new pcl::visualization::PCLVisualizer ("3D Viewer"));
    viewer->setBackgroundColor (0, 0, 0);
    viewer->addPointCloud<pcl::PointXYZ> (pointCloudFromDepth_ptr, "my points");
    viewer->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "my points");
    viewer->addCoordinateSystem (1.0, "global");
    viewer->initCameraParameters ();
    
    
    
    
    

    while ( 1 )
    {
	//!< Display the timestamps for all cameras to show that the image
	//!< capture is synchronized for each image
	
	#if defined(USE_OPENCV_SBM)
	sbm->setTextureThreshold( textureThreshold );
	sbm->setSmallerBlockSize( smallerBlockSize );
	#endif
	
	#if defined(USE_OPENCV_SGBM)
	sbm->setP1( P1 );
	sbm->setP2( P2 );
	#endif
	
	#if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM)
	sbm->setBlockSize( (blockSize + 2) * 2 + 1 );
	sbm->setNumDisparities( (ndisparities + 1) * 16 );
	
	sbm->setUniquenessRatio( uniquenessRatio );
	sbm->setSpeckleWindowSize( speckleWindowSize );
	sbm->setSpeckleRange( speckleRange );
	sbm->setDisp12MaxDiff( disp12MaxDiff );
	#endif
	
#ifdef USE_ELAS
	param.disp_max = (ndisparities + 1) * 16;
	elas.setParameters( param );
#endif
	
        for ( unsigned int i = 0; i < numCameras; i++ )
        {
	    FlyCapture2::Image rawImage; //!< buffer: Must be here in the loop, otherwise cameras do not synchronize.
            FlyCapture2::Error error;
            error = ppCameras[i]->RetrieveBuffer( &rawImage );
            PRINT_ERROR( error );
	    
// 	    FlyCapture2::TimeStamp timestamp = rawImage.GetTimeStamp();
//             fprintf(stderr, 
//                 "Cam %d  TimeStamp [%d %d]\n", 
//                 i, 
//                 timestamp.cycleSeconds, 
//                 timestamp.cycleCount);

            memcpy( pimageCamera[i]->data, rawImage.GetData(), rawImage.GetDataSize() );
        }

        //!< rectifying images (and un-distorting?)
        cv::remap( *pimageCamera[1], *pimageCamera[1], mx1, my1, cv::INTER_LINEAR );
        cv::remap( *pimageCamera[0], *pimageCamera[0], mx2, my2, cv::INTER_LINEAR );


#if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM)
        sbm->compute( *pimageCamera[1], *pimageCamera[0], imageDisparity );
#endif

#ifdef USE_ELAS
//        int static skip = 0;
//        if (skip % 15 ==0)
        {
 //         elas.process(imageCamera1.data, imageCamera0.data, (float*)imageDisparity.data,(float*)imageDisparity.data, dims);
            elas.process(pimageCamera[1]->data, pimageCamera[0]->data, (float*)imageDisparity.data,(float*)imageDisparity.data, dims);

        }
//        skip++;
#endif

        //!< normalize disparity map for imshow
        double minVal, maxVal;
        minMaxLoc( imageDisparity, &minVal, &maxVal );
        imageDisparity.convertTo( disp8U, CV_8UC1, 255/(maxVal - minVal) );


	#define DRAWTXT(img, str, x, y, s) \
	cv::putText( img, str, cv::Point(x,y), cv::FONT_HERSHEY_SIMPLEX, s, cv::Scalar::all(255) )
	
	DRAWTXT( disp8U, "disparity", 10, 20, 0.5 );
	cv::imshow( "disparity", disp8U );
	
	
	
	
	//!< disparity map --> depth map --> point cloud
	cv::Mat depth;
	cv::reprojectImageTo3D( imageDisparity, depth, Q );
	cv::Mat pointCloudFromDepth = depth.reshape( 3, depth.size().area() );
	pointCloudFromDepth_ptr->clear();
	for (int i = 0; i < pointCloudFromDepth.rows; i++)
	{
	    float *pt = pointCloudFromDepth.ptr < float > ( i );
	    pcl::PointXYZ basic_point;
	    basic_point.x = pt[0];
	    basic_point.y = pt[1];
	    basic_point.z = pt[2];
#if defined(USE_OPENCV_SBM) || defined(USE_OPENCV_SGBM)
	    basic_point.z /= 8.0; //!< simple hack, but unknown reason..... //!< still wrong. Depth maybe strange.
#endif

	    if ( pt[2] < 129 ) //!< simple hack
		pointCloudFromDepth_ptr->points.push_back ( basic_point );
	}
	pointCloudFromDepth_ptr->width = (int) pointCloudFromDepth_ptr->points.size ();
	pointCloudFromDepth_ptr->height = 1;
	viewer->updatePointCloud< pcl::PointXYZ > ( pointCloudFromDepth_ptr, "my points" );
	viewer->spinOnce ();

	
	
	
	for ( unsigned int i = 0; i < numCameras; i++ )
	{
	    // drawCameraInfo( ppCameras[i], *pimageCamera[i] );
	    cv::imshow( std::string("cam") + std::to_string(i), *pimageCamera[i] );
	}
	
	cv::imshow( "sub",  cv::abs( *pimageCamera[0] - *pimageCamera[1] ) );
	
	
	
	char keyValue = cv::waitKey( 10 );
	if (keyValue == 'q' || keyValue == 27 /* ESC */ )
	{
	    break;
	}
	
    }

    exit(0); //!< hack; code below may freeze.

    for ( unsigned int i = 0; i < numCameras; i++ )
    {
        ppCameras[i]->StopCapture();
        ppCameras[i]->Disconnect();
        delete ppCameras[i];
    }

    delete [] ppCameras;




    return 0;
}
コード例 #14
0
ファイル: SaveVideoClass.cpp プロジェクト: maljoras/xyTracker
void VideoSaverFlyCapture::captureThread()
{
  if (!isFlyCapture()) {
    VideoSaver::captureThread();
  } else {

  
    m_GrabbingFinished = false;
    m_KeepThreadAlive = true;
    m_frameNumber = 0;
    m_newFrameAvailable = false;
    FlyCapture2::Image rgbImage;
    FlyCapture2::Image rawImage;
    FlyCapture2::Image rawImage2;

    m_timer= std::clock();
  
    while (m_KeepThreadAlive) 
    {

      FlyCapture2::Error error = m_Camera.RetrieveBuffer( &rawImage );
      if ( error != FlyCapture2::PGRERROR_OK )
      {
	error.PrintErrorTrace();
      }
      
      //get the time stamp
      const double localtimestamp = M_TIMER_ELAPSED;

      // convert to bgr
      rawImage2.DeepCopy(&rawImage); // not sure if really needed since we convert below...
      rawImage2.Convert(FlyCapture2::PIXEL_FORMAT_RGB, &rgbImage );

      // convert to Mat
      unsigned int rowBytes = (double) rgbImage.GetReceivedDataSize()/(double)rgbImage.GetRows();       

      // copy to frame variable and update times
      { std::unique_lock<std::mutex> lock(VideoSaver::m_FrameMutex); 

	m_Frame.release();
	m_Frame = cv::Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(),rowBytes);
	// could this happen ?
	if (m_Frame.size().width==0) 
	  m_Frame = cv::Mat::zeros(m_FrameSize,CV_8UC3);

	m_TimeStamp =  localtimestamp;
	m_frameNumber++;
	m_newFrameAvailable = true;
	VideoSaver::m_newFrameAvailableCond.notify_one();
      }


    } 
    rawImage.ReleaseBuffer();
    rawImage2.ReleaseBuffer();
    rgbImage.ReleaseBuffer();

    m_newFrameAvailableCond.notify_one();
    std::cout << "Finished grabbing." << std::endl;    
    m_GrabbingFinished  = true;

  }
}