예제 #1
0
bool FirewireVideo::GrabNewest( unsigned char* image, bool wait )
{
    dc1394video_frame_t *f;
    err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &f);
    if( err != DC1394_SUCCESS)
        throw VideoException("Could not capture frame", dc1394_error_get_string(err) );

    if( f ) {
        while( true )
        {
            dc1394video_frame_t *nf;
            err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &nf);
            if( err != DC1394_SUCCESS)
                throw VideoException("Could not capture frame", dc1394_error_get_string(err) );

            if( nf )
            {
                err=dc1394_capture_enqueue(camera,f);
                f = nf;
            }else{
                break;
            }
        }
        memcpy(image,f->image,f->image_bytes);
        err=dc1394_capture_enqueue(camera,f);
        return true;
    }else if(wait){
        return GrabNext(image,true);
    }
    return false;
}
예제 #2
0
FirewireFrame FirewireVideo::GetNewest(bool wait)
{
    dc1394video_frame_t *f;
    err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &f);
    if( err != DC1394_SUCCESS)
        throw VideoException("Could not capture frame", dc1394_error_get_string(err) );

    if( f ) {
        while( true )
        {
            dc1394video_frame_t *nf;
            err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &nf);
            if( err != DC1394_SUCCESS)
                throw VideoException("Could not capture frame", dc1394_error_get_string(err) );

            if( nf )
            {
                err=dc1394_capture_enqueue(camera,f);
                f = nf;
            }else{
                break;
            }
        }
        return FirewireFrame(f);
    }else if(wait){
        return GetNext(true);
    }
    return FirewireFrame(0);
}
예제 #3
0
int capture_single(capture_t *cap, const int index, capture_frame_t *frame)
{
  dc1394camera_t *cam = cap->cameras[index];
  dc1394error_t err;
  dc1394video_frame_t *vframe = NULL;
  uint32_t frames_behind = 0;

  if (cap->prefer_one_shot && cam->one_shot_capable == DC1394_TRUE) {
    err = dc1394_video_set_one_shot(cam, DC1394_ON);
    DC1394_WRN(err, "could not set one shot mode.");
    if (err != DC1394_SUCCESS) {
      return CAPTURE_ERROR;
    }
  }

  err = dc1394_capture_dequeue(cam, DC1394_CAPTURE_POLICY_WAIT, &vframe);
  DC1394_WRN(err, "could not dequeue frame.");
  if (err != DC1394_SUCCESS) {
    return CAPTURE_ERROR;
  }
  frames_behind = vframe->frames_behind;

  /* copy the image to frame->raw_data */
  s_copy_frame(vframe, frame);

  err = dc1394_capture_enqueue(cam, vframe);
  DC1394_WRN(err, "could not enqueue frame.");
  if (err != DC1394_SUCCESS) {
    return CAPTURE_ERROR;
  }

  /* drop behind frames if drop_frame is enable */
  if (cap->drop_frames) {
    while (frames_behind-- > 0) {
      err = dc1394_capture_dequeue(cam, DC1394_CAPTURE_POLICY_WAIT, &vframe);
      DC1394_WRN(err, "could not dequeue frame.");
      if (err != DC1394_SUCCESS) {
        return CAPTURE_ERROR;
      }
      
      err = dc1394_capture_enqueue(cam, vframe);
      DC1394_WRN(err, "could not enqueue frame.");
      if (err != DC1394_SUCCESS) {
        return CAPTURE_ERROR;
      }
    }
  }

  return CAPTURE_SUCCESS;
}
예제 #4
0
파일: pgr.c 프로젝트: jbmulligan/quip
static void init_buffer_objects(QSP_ARG_DECL  PGR_Cam * pgcp )
{
	int i;
	dc1394video_frame_t *framep;
	char fname[TMPSIZE];
	Data_Obj *dp;

sprintf(ERROR_STRING,"Initializing %d buffer objects...",
pgcp->pc_ring_buffer_size);
advise(ERROR_STRING);

	// Cycle once through the ring buffer,
	// making a data object for each frame
	for(i=0;i<pgcp->pc_ring_buffer_size;i++){
		if ( dc1394_capture_dequeue( pgcp->pc_cam_p, 
			DC1394_CAPTURE_POLICY_WAIT, &framep )
			!= DC1394_SUCCESS) {
	error1("init_buffer_objects:  error in dc1394_capture_dequeue!?" );
		}
		snprintf(fname,TMPSIZE,"_frame%d",framep->id);
		assert( i == framep->id );
		dp = make_1394frame_obj(QSP_ARG  framep);
		if( dc1394_capture_enqueue(pgcp->pc_cam_p,framep)
				!= DC1394_SUCCESS ){
			error1("init_buffer_objects:  error enqueueing frame!?");
		}
		// Here we might store dp in a table...
	}
advise("Done setting up buffer objects.");
}
예제 #5
0
void Libdc1394SequenceGrabber::idle()
{
	dc1394error_t err;
	dc1394video_frame_t *last_frame(NULL), *frame(NULL);
	if (!_camera) {
		fakeTracking();
		return;
	}
    do{     
        err = dc1394_capture_dequeue(_camera, DC1394_CAPTURE_POLICY_POLL, &frame);
        if (frame) { 
            if (last_frame)
                err=dc1394_capture_enqueue(_camera, last_frame);
            last_frame = frame; 
        }
    } while (frame);
    
	checkSuccess(err, "dc1394_capture_dequeue failed");
	
    if (_firstFrame) {
		setupBayer();
		_firstFrame = false;
	}
	
	if (last_frame) {
		processCameraImageData( last_frame->image );
		newFrameAvailable();
		
		err=dc1394_capture_enqueue(_camera, last_frame);
        checkSuccess(err, "dc1394_capture_enqueue failed");
	}
}
예제 #6
0
void DC1394Camera::update(double timestep)
{
    dc1394video_frame_t* frame = 0;
    dc1394error_t err = DC1394_FAILURE;
    
    // Grab a frame
    LOGGER.debugStream() << m_guid << ": Grabbing frame " << ++m_frameNum;
    err = dc1394_capture_dequeue(m_camera, DC1394_CAPTURE_POLICY_WAIT, &frame);
    assert(DC1394_SUCCESS == err && "Could not capture a frame\n");

    // See if we have dropped any frames
    if(frame->frames_behind == DMA_BUFFER_SIZE - 1)
    {
        LOGGER.warn("PROBABLE FRAME DROP");
    }
    // Let us know if we are not getting the most recent frame
    LOGGER.debugStream() << m_guid << ": Frame Position: " << frame->id 
                         << " Frames Behind: " << frame->frames_behind
                         << " DMA Buffers: " << DMA_BUFFER_SIZE
                         << " DMA Timestamp: " << frame->timestamp;

    // Put the DC1394 buffer into a temporary Image so we can copy it
    // to the public side.
    OpenCVImage newImage(frame->image, m_width, m_height,
                         false, Image::PF_RGB_8);

    // Copy image to public side of the interface
    capturedImage(&newImage);

    // Free the space back up on the queue
    LOGGER.debugStream() << m_guid << ": Releasing frame " << m_frameNum;
    err = dc1394_capture_enqueue(m_camera, frame);
    assert(DC1394_SUCCESS == err && "Could not enqueue used frame\n");
}
예제 #7
0
//--------------------------------------------------------------------
void ofxVideoGrabberPtgrey::grabFrame(){
    if (camera != NULL) {
		// get a frame
        // The first time you call a DMA capture function dc1394_capture_dequeue() it returns
		// a pointer to the first frame buffer structure (dc1394frame_t). After a successful
		// capture function call, the capture_buffer pointer and the frame buffer it points
		// to are available to you for reading and writing. It will not be overwritten with
		// a newer frame while it is allocated to you (FREE), even if the ring buffer overflows.
		// Once you have finished with it you should release it as soon as possible with a call
		// to dc1394_capture_enqueue().
		err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &frame);
        if (frame == NULL) {
            bIsFrameNew = false;
        } else {
			bIsFrameNew = true;

			// copy into pixels
		/*	for( int i = 0; i < height; i++ ) {
				memcpy( pixels + (i*width), frame->image + (i*frame->stride), width *3);
			}*/
			memcpy( pixels, frame->image, width * height*3) ;

			if (bUseTexture) {
				tex.loadData(frame->image, width, height, GL_LUMINANCE);
			}

			// make frame available again as part of the
			// ring buffer receiving images from the cam
			err = dc1394_capture_enqueue(camera, frame);
		}
	}
}
예제 #8
0
static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt)
{
    struct dc1394_data *dc1394 = c->priv_data;
    int res;

    /* discard stale frame */
    if (dc1394->current_frame++) {
        if (dc1394_capture_enqueue(dc1394->camera, dc1394->frame) != DC1394_SUCCESS)
            av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
    }

    res = dc1394_capture_dequeue(dc1394->camera, DC1394_CAPTURE_POLICY_WAIT, &dc1394->frame);
    if (res == DC1394_SUCCESS) {
        dc1394->packet.data = (uint8_t *)(dc1394->frame->image);
        dc1394->packet.pts = (dc1394->current_frame  * 1000000) / (dc1394->fps);
        res = dc1394->frame->image_bytes;
    } else {
        av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
        dc1394->packet.data = NULL;
        res = -1;
    }

    *pkt = dc1394->packet;
    return res;
}
void Camera::captureFrame() {

    cv::Mat distortedFrame(camFrameHeight, camFrameWidth, CV_8UC1);
    imgIdx = (imgIdx+1) % 8;

    if (testMode) {
        distortedFrame = testImages[imgIdx].clone();
        /* faking camera image acquisition time */
        eventLoopTimer->setInterval(1000/FRAME_RATE);
    } else {
        dc1394video_frame_t *frame = NULL;
        error = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame);
        distortedFrame.data = frame->image;
        dc1394_capture_enqueue(camera, frame);
    }
    
    /* undistort camera image */
    cv::Mat camFrame(camFrameHeight, camFrameWidth, CV_8UC1);
    undistortLUT(distortedFrame, camFrame);
    
    /* display original frame with ambient light in camera widget */
    cv::Rect cropped((camFrame.cols-width)/2, (camFrame.rows-height)/2, width, height);
    emit newCamFrame(camFrame(cropped).clone());
    
    /* remove ambient light */
    camFrame -= ambientImage;
    
    /* cropping image in center to power-of-2 size */
    cv::Mat croppedFrame = camFrame(cropped).clone();

    /* assigning image id (current active LED) to pixel in 0,0 */
    croppedFrame.at<uchar>(0, 0) = imgIdx;
    
    emit newCroppedFrame(croppedFrame);
}
예제 #10
0
파일: ffmv_ccd.cpp 프로젝트: A-j-K/indi
/**
 * Download image from FireFly
 */
void FFMVCCD::grabImage()
{
   dc1394error_t err;
   dc1394video_frame_t *frame;
   uint32_t uheight, uwidth;
   int sub;
   uint16_t val;
   struct timeval start, end;

   // Let's get a pointer to the frame buffer
   uint8_t * image = PrimaryCCD.getFrameBuffer();

   // Get width and height
   int width = PrimaryCCD.getSubW() / PrimaryCCD.getBinX();
   int height = PrimaryCCD.getSubH() / PrimaryCCD.getBinY();

   memset(image, 0, PrimaryCCD.getFrameBufferSize());


   /*-----------------------------------------------------------------------
    *  stop data transmission
    *-----------------------------------------------------------------------*/


   gettimeofday(&start, NULL);
   for (sub = 0; sub < sub_count; ++sub) {
       IDMessage(getDeviceName(), "Getting sub %d of %d", sub, sub_count);
       err=dc1394_capture_dequeue(dcam, DC1394_CAPTURE_POLICY_WAIT, &frame);
       if (err != DC1394_SUCCESS) {
              IDMessage(getDeviceName(), "Could not capture frame");
       }
       dc1394_get_image_size_from_video_mode(dcam,DC1394_VIDEO_MODE_640x480_MONO16, &uwidth, &uheight);

       if (DC1394_TRUE == dc1394_capture_is_frame_corrupt(dcam, frame)) {
              IDMessage(getDeviceName(), "Corrupt frame!");
              continue;
       }
       // Fill buffer with random pattern
       for (int i=0; i < height ; i++) {
           for (int j=0; j < width; j++) {
               /* Detect unsigned overflow */
               val = ((uint16_t *) image)[i*width+j] + ntohs(((uint16_t*) (frame->image))[i*width+j]);
               if (val > ((uint16_t *) image)[i*width+j]) {
                   ((uint16_t *) image)[i*width+j] = val;
               } else {
                   ((uint16_t *) image)[i*width+j] = 0xFFFF;
               }
           }
       }

       dc1394_capture_enqueue(dcam, frame);
   }
   err=dc1394_video_set_transmission(dcam,DC1394_OFF);
   IDMessage(getDeviceName(), "Download complete.");
   gettimeofday(&end, NULL);
   IDMessage(getDeviceName(), "Download took %d uS", (int) ((end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec)));

   // Let INDI::CCD know we're done filling the image buffer
   ExposureComplete(&PrimaryCCD);
}
예제 #11
0
파일: dc1394.c 프로젝트: cobr123/qtVlc
/*****************************************************************************
 * Demux:
 *****************************************************************************/
static block_t *GrabVideo( demux_t *p_demux )
{
    demux_sys_t *p_sys = p_demux->p_sys;
    block_t     *p_block = NULL;

    if( dc1394_capture_dequeue( p_sys->camera,
                DC1394_CAPTURE_POLICY_WAIT,
                &p_sys->frame ) != DC1394_SUCCESS )
    {
        msg_Err( p_demux, "Unable to capture a frame" );
        return NULL;
    }

    p_block = block_New( p_demux, p_sys->frame->size[0] *
                                  p_sys->frame->size[1] * 2 );
    if( !p_block )
    {
        msg_Err( p_demux, "Can not get block" );
        return NULL;
    }

    if( !p_sys->frame->image )
    {
        msg_Err (p_demux, "Capture buffer empty");
        block_Release( p_block );
        return NULL;
    }

    memcpy( p_block->p_buffer, (const char *)p_sys->frame->image,
            p_sys->width * p_sys->height * 2 );

    p_block->i_pts = p_block->i_dts = mdate();
    dc1394_capture_enqueue( p_sys->camera, p_sys->frame );
    return p_block;
}
예제 #12
0
//=============================================================================
// extractImagesMono()
//
// De-interleave the stereo images into single images
// Construct a TriclopsInput for stereo processing from these images.
//
void
extractImagesMono( PGRStereoCamera_t* 	stereoCamera, 
		   unsigned char* 	pucDeInterleaved,
		   unsigned char** 	ppucRightMono8,
		   unsigned char** 	ppucLeftMono8,
		   unsigned char** 	ppucCenterMono8) 
{

   dc1394error_t err;
   // RC7
   dc1394video_frame_t* frame;
   err = dc1394_capture_dequeue( stereoCamera->camera,
				 DC1394_CAPTURE_POLICY_WAIT,
				 &frame );
   if ( err != DC1394_SUCCESS )
   {
      fprintf( stderr, "extractImagesColor - cannot dequeue image!\n" );
      return;
   }

   unsigned char* pucGrabBuffer = frame->image;

   unsigned char* right;
   unsigned char* left;
   unsigned char* center;
   if ( stereoCamera->nBytesPerPixel == 2 )
   {
      // de-interlace the 16 bit data into 2 mono images
      dc1394_deinterlace_stereo( pucGrabBuffer,
				 pucDeInterleaved,
				 stereoCamera->nCols,
				 2*stereoCamera->nRows );
      right = pucDeInterleaved;
      left  = pucDeInterleaved + stereoCamera->nRows * stereoCamera->nCols;
      center= left;
   }
   else
   {
      dc1394_deinterlace_rgb( pucGrabBuffer,
			      pucDeInterleaved,
			      stereoCamera->nCols,
			      3*stereoCamera->nRows );

      // NOTE: this code needs to be double checked.
      // Currently 3-bytes-per-pixel is not activatable in this example
      right 	= pucDeInterleaved;
      center  	= pucDeInterleaved + stereoCamera->nRows * stereoCamera->nCols;
      left	= pucDeInterleaved + 2 * stereoCamera->nRows * stereoCamera->nCols;
   }
      
   *ppucRightMono8 	= right;
   *ppucLeftMono8 	= left;
   *ppucCenterMono8 	= center;

   // return buffer for use
   dc1394_capture_enqueue( stereoCamera->camera, frame );

   return;
}
예제 #13
0
void ofxLibdc::flushBuffer() {
	dc1394video_frame_t *frame;
	do {
		dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &frame);
		if(frame != NULL)
			dc1394_capture_enqueue(camera, frame);
	} while (frame != NULL);
}
예제 #14
0
static GstFlowReturn
gst_dc1394_create (GstPushSrc * psrc, GstBuffer ** buffer)
{
    GstDc1394 *src;
    GstBuffer *outbuf;
    GstCaps *caps;
    dc1394video_frame_t *frame[1];
    GstFlowReturn res = GST_FLOW_OK;
    dc1394error_t err;

    src = GST_DC1394 (psrc);

    err = dc1394_capture_dequeue (src->camera, DC1394_CAPTURE_POLICY_WAIT, frame);

    if (err != DC1394_SUCCESS) {
        GST_ELEMENT_ERROR (src, RESOURCE, FAILED,
                           ("failed to dequeue frame"), ("failed to dequeue frame"));
        goto error;
    }

    outbuf = gst_buffer_new_and_alloc (frame[0]->image_bytes);

    memcpy (GST_BUFFER_MALLOCDATA (outbuf), (guchar *) frame[0]->image,
            frame[0]->image_bytes * sizeof (guchar));

    GST_BUFFER_DATA (outbuf) = GST_BUFFER_MALLOCDATA (outbuf);

    caps = gst_pad_get_caps (GST_BASE_SRC_PAD (psrc));
    gst_buffer_set_caps (outbuf, caps);
    gst_caps_unref (caps);

    GST_BUFFER_TIMESTAMP (outbuf) = src->timestamp_offset + src->running_time;
    if (src->rate_numerator != 0) {
        GST_BUFFER_DURATION (outbuf) = gst_util_uint64_scale_int (GST_SECOND,
                                       src->rate_denominator, src->rate_numerator);
    }

    src->n_frames++;
    if (src->rate_numerator != 0) {
        src->running_time = gst_util_uint64_scale_int (src->n_frames * GST_SECOND,
                            src->rate_denominator, src->rate_numerator);
    }

    if (dc1394_capture_enqueue (src->camera, frame[0]) != DC1394_SUCCESS) {
        GST_ELEMENT_ERROR (src, RESOURCE, FAILED, ("failed to enqueue frame"),
                           ("failed to enqueue frame"));
        goto error;
    }

    *buffer = outbuf;

    return res;

error:
    {
        return GST_FLOW_ERROR;
    }
}
예제 #15
0
FirewireFrame FirewireVideo::GetNext(bool wait)
{
    const dc1394capture_policy_t policy =
            wait ? DC1394_CAPTURE_POLICY_WAIT : DC1394_CAPTURE_POLICY_POLL;

    dc1394video_frame_t *frame;
    dc1394_capture_dequeue(camera, policy, &frame);
    return FirewireFrame(frame);
}
void Camera::captureAmbientImage() {
    
    /* capture image with no LEDs to subtract ambient light */
    ambientImage = cv::Mat(camFrameHeight, camFrameWidth, CV_8UC1);
    dc1394video_frame_t *frame = NULL;
    error = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame);
    memcpy(ambientImage.data, frame->image, camFrameHeight*camFrameWidth*sizeof(uchar));
    dc1394_capture_enqueue(camera, frame);
}
예제 #17
0
파일: pgr.c 프로젝트: jbmulligan/quip
Data_Obj * grab_newest_firewire_frame( QSP_ARG_DECL  PGR_Cam * pgcp )
{
	dc1394video_frame_t *framep, *prev_framep=NULL;
	//int i=0;
	int n_dequeued=0;

	if( ! ready_to_grab( QSP_ARG  pgcp ) )
		return NULL;

	// We might want to release all of the frames we have now, in case we
	// need to automatically release any that we grab in the meantime,
	// so that we release in order...

	// We get the newest by dequeueing in POLL mode, until we come up empty.
	// If we have at least one frame at that time, then that's the frame.
	// If we don't have any, then we WAIT.  If at any time we have
	// more than 1, then we release the older.

	while( 1 ){
		if ( dc1394_capture_dequeue( pgcp->pc_cam_p, DC1394_CAPTURE_POLICY_POLL,
				&framep ) != DC1394_SUCCESS) {
			fprintf( stderr, "Unable to capture a frame\n" );
			return(NULL);
		}
		if( framep == NULL ){	// No frame to fetch?
			if( n_dequeued > 0 ){	// already have something?
				// The last one is the newest!
				sprintf(msg_str,"%d",prev_framep->id);
				assign_var("newest",msg_str);
				note_frame_usage(pgcp,prev_framep);
				return dobj_for_frame(QSP_ARG  prev_framep);
			} else {		// No frames yet...
				// We don't want to call the WAIT version here, because
				// we might have multiple cameras...
				return NULL;
			}
		} else {	// We have a new frame
			if( prev_framep != NULL ){	// already have one?
				if( dc1394_capture_enqueue(pgcp->pc_cam_p,prev_framep)
						!= DC1394_SUCCESS ){
					WARN("error enqueueing frame");
				}
			} else {
				// This counts the frame we dequeued.
				// We don't bother if we just enqueued
				// the previous one.
				pgcp->pc_n_avail--;
			}
			prev_framep = framep;
			n_dequeued++;
		}
	}
	// NOTREACHED
}
예제 #18
0
void
extractImagesColorXB3( PGRStereoCamera_t* 	 stereoCamera, 
		       dc1394bayer_method_t bayerMethod,
		       unsigned char* 	pucDeInterleaved,
		       unsigned char* 	pucRGB,
		       unsigned char* 	pucGreen,
		       unsigned char** 	ppucRightRGB,
		       unsigned char** 	ppucLeftRGB,
		       unsigned char** 	ppucCenterRGB) 
{

   dc1394error_t err;
   dc1394video_frame_t* frame;
   err = dc1394_capture_dequeue( stereoCamera->camera,
				 DC1394_CAPTURE_POLICY_WAIT,
				 &frame );
   if ( err != DC1394_SUCCESS )
   {
      fprintf( stderr, "extractImagesColor - cannot dequeue image!\n" );
      return;
   }

   unsigned char* pucGrabBuffer = frame->image;

   dc1394_deinterlace_rgb( pucGrabBuffer,
			   pucDeInterleaved,
			   stereoCamera->nCols,
			   3*stereoCamera->nRows );
   // extract color from the bayer tile image
   // note: this will alias colors on the top and bottom rows
   dc1394_bayer_decoding_8bit( pucDeInterleaved,
			       pucRGB,
			       stereoCamera->nCols,
			       3*stereoCamera->nRows,
			       stereoCamera->bayerTile,
			       bayerMethod );
   // now deinterlace the RGB Buffer
   dc1394_deinterlace_green( pucRGB,
			     pucGreen,
			     stereoCamera->nCols,
			     9*stereoCamera->nRows );
   // NOTE: this code needs to be double checked.
   // Currently 3-bytes-per-pixel is not activatable in this example
   int iOneBufferPixels = stereoCamera->nRows * stereoCamera->nCols;
   *ppucLeftRGB 	= pucRGB;
   *ppucCenterRGB 	= pucRGB + 3 * iOneBufferPixels;
   *ppucRightRGB 	= pucRGB + 6 * iOneBufferPixels;
      

   // return buffer for use
   dc1394_capture_enqueue( stereoCamera->camera, frame );
   return;
}
예제 #19
0
bool ofxLibdc::grabFrame(ofImage& img) {
	dc1394video_frame_t *frame;
	dc1394_capture_dequeue(camera, capturePolicy, &frame);
	if(frame != NULL) {
		if(imageType == OF_IMAGE_GRAYSCALE) {
			memcpy(img.getPixels(), frame->image, width * height);
		} else if(imageType == OF_IMAGE_COLOR) {
		}
		dc1394_capture_enqueue(camera, frame);
		return true;
	} else {
		return false;
	}
}
예제 #20
0
void ofxLibdc::grabStill(ofImage& img) {
	setTransmit(false);
	flushBuffer();
	dc1394_video_set_one_shot(camera, DC1394_ON);
	// if possible, the following should be replaced with a call to grabFrame
	dc1394video_frame_t *frame;
	dc1394_capture_dequeue(camera, capturePolicy, &frame);
	img.allocate(width, height, imageType);
	if(imageType == OF_IMAGE_GRAYSCALE) {
		memcpy(img.getPixels(), frame->image, width * height);
	} else if(imageType == OF_IMAGE_COLOR) {
	}
	dc1394_capture_enqueue(camera, frame);
}
예제 #21
0
파일: FWCamera.cpp 프로젝트: lynxis/libavg
BitmapPtr FWCamera::getImage(bool bWait)
{
#ifdef AVG_ENABLE_1394_2
    bool bGotFrame = false;
    unsigned char * pCaptureBuffer = 0;
    dc1394video_frame_t * pFrame;
    dc1394error_t err;
    if (bWait) {
        err = dc1394_capture_dequeue(m_pCamera, DC1394_CAPTURE_POLICY_WAIT, &pFrame);
    } else {
        err = dc1394_capture_dequeue(m_pCamera, DC1394_CAPTURE_POLICY_POLL, &pFrame);
    }
    if (err == DC1394_SUCCESS && pFrame) {
        bGotFrame = true;
        pCaptureBuffer = pFrame->image;
    }
    if (bGotFrame) {
        int lineLen;
        if (getCamPF() == YCbCr411) {
            lineLen = getImgSize().x*1.5;
        } else {
            lineLen = getImgSize().x*getBytesPerPixel(getCamPF());
        }
        BitmapPtr pCamBmp(new Bitmap(getImgSize(), getCamPF(), pCaptureBuffer, lineLen,
                false, "TempCameraBmp"));
        BitmapPtr pDestBmp = convertCamFrameToDestPF(pCamBmp);
//        cerr << "CamBmp: " << pCamBmp->getPixelFormat() << ", DestBmp: " 
//                << pDestBmp->getPixelFormat() << endl;
        dc1394_capture_enqueue(m_pCamera, pFrame);
        return pDestBmp;
    } else {
        return BitmapPtr();
    }
#else
    return BitmapPtr();
#endif
}
예제 #22
0
    bool
    VideoIIDC1394::frameCapture(void)
    {
#if defined(DUNE_WITH_DC1394)
      dc1394error_t error_code;
      error_code = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame);
      if (error_code < 0)
      {
        throw std::runtime_error("Could not capture a frame");
      }
      return true;
#else
      return false;
#endif
    }
예제 #23
0
파일: pgr.c 프로젝트: jbmulligan/quip
Data_Obj * grab_firewire_frame(QSP_ARG_DECL  PGR_Cam * pgcp )
{
	dc1394video_frame_t *framep;
	//dc1394capture_policy_t policy=DC1394_CAPTURE_POLICY_WAIT;
	Data_Obj *dp;
	char fname[TMPSIZE];

	// Before attempting to dequeue, make sure that we have at least one
	// available...  The library seems to hang if we keep
	// grabbing without releasing.

	if( ! ready_to_grab(QSP_ARG  pgcp) )
		return NULL;

	/* POLICY_WAIT waits for the next frame...
	 * POLICY_POLL returns right away if there is no frame available.
	 */
	if ( dc1394_capture_dequeue( pgcp->pc_cam_p, 
		pgcp->pc_policy, &framep ) != DC1394_SUCCESS) {
		fprintf( stderr, "Unable to capture a frame\n" );
		return(NULL);
	}
	if( framep == NULL ){
		if( pgcp->pc_policy != DC1394_CAPTURE_POLICY_POLL )
			WARN("dc1394_capture_dequeue returned a null frame.");
		return NULL;
	}

	pgcp->pc_n_avail--;

	//sprintf(fname,"_frame%d",framep->id);
	snprintf(fname,TMPSIZE,"_frame%d",framep->id);
	dp = get_obj(fname);
	if( dp == NULL ){
		warn("grab_firewire_frame:  unable to create frame object");
		return(NULL);
	}

	assert( OBJ_DATA_PTR(dp) == framep->image );

	/* in the other case, the pointer is likely to be unchanged,
	 * but we don't assume...
	 * We *do* assume that the old size is still ok.
	 */

	note_frame_usage(pgcp,framep);
	return(dp);
} // end grab_firewire_frame
예제 #24
0
IplImage *dc1394_capture_get_iplimage(dc1394camera_t *camera)
{
    dc1394error_t err;
    dc1394video_frame_t *frame;
    IplImage *img;

    err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame);
    DC1394_WRN(err,"Could not capture a frame");

    img = dc1394_frame_get_iplimage(frame);

    err = dc1394_capture_enqueue(camera, frame);
    DC1394_WRN(err,"releasing buffer");

    return img;
}
예제 #25
0
void CameraIIDC::flushBuffer(){
    // This function is courtesy of ofxVideoGrabber/Libdc1394Grabber
    bool bufferEmpty = false;

    while (!bufferEmpty){
        if(dc1394_capture_dequeue(cam, DC1394_CAPTURE_POLICY_POLL, &currentFrame) == DC1394_SUCCESS){
            if(currentFrame != NULL){
                dc1394_capture_enqueue(cam, currentFrame);
            } else {
                bufferEmpty = true;
            }
        } else {
            bufferEmpty = true;
        }
    }
}
예제 #26
0
파일: dc1394.cpp 프로젝트: neurodroid/gnoom
void* thread_acq_image(void*) {
    timespec time_save0, time_save1, t_sleep, t_rem;
    t_sleep.tv_sec = 0;
    t_sleep.tv_nsec = 10;

    dc1394video_frame_t *frame;

    for (;;) {
        /* wait for image */
        pthread_mutex_lock( &camera_mutex );
        int ret = gCamera.wait_for_image(1);
        pthread_mutex_unlock( &camera_mutex );
        if (ret) {
            pthread_mutex_lock( &camera_mutex );
            dc1394error_t err = dc1394_capture_dequeue(gCamera.cam(), DC1394_CAPTURE_POLICY_POLL, &frame);
            /* frame->timestamp appears to be broken, so we have to resort to clock_gettime */
            timespec fts;
            clock_gettime( CLOCK_REALTIME, &fts );
            double ft = t2d(fts);
            pthread_mutex_unlock( &camera_mutex );
            if (err) {
                cleanup_and_exit(gCamera);
                std::cerr << dc1394_error_get_string(err) << "\nCould not capture frame" << std::endl;
            }

            // return frame to ring buffer:
            // if (frame->image) {
                pthread_mutex_lock( &camera_mutex );
                err = dc1394_capture_enqueue(gCamera.cam(), frame);
                pthread_mutex_unlock( &camera_mutex );
                if (err) {
                    std::cerr << dc1394_error_get_string(err) << "\nCould not return frame to ring buffer" << std::endl;
                    cleanup_and_exit(gCamera);
                }
                // }
            int width = frame->size[0];
            int height = frame->size[1];
            pthread_mutex_lock( &acq_buffer_mutex );
            acq_frame_buffer.push(saveframe(std::vector<unsigned char>(&(frame->image)[0],
                                                                       &(frame->image)[width*height]),
                                            width, height, ft)); // (double)frame->timestamp));
            pthread_mutex_unlock( &acq_buffer_mutex );
        } else {
            nanosleep(&t_sleep, &t_rem);
        }
    }
}
예제 #27
0
CameraFrame CameraIIDC::getFrame(){

    CameraFrame frame;

    if (!capturing) {
        cerr << "ERROR: Not capturing on camera. Call startCapture() before lockFrame()." << endl;
        return frame;
    }

    dc1394error_t err;
    
    if(triggerMode == triggerModeSoftware){

        if (cam->one_shot_capable != DC1394_TRUE){
            cerr << "ERROR: Camera is not one_shot_capable." << endl;
            return frame;
        }

        dc1394error_t err;

        // Flush the ring buffer
        flushBuffer();

        // One-shot trigger
        err == dc1394_video_set_one_shot(cam, DC1394_ON);

    }

    // Get frame from ring buffer:
    err = dc1394_capture_dequeue(cam, DC1394_CAPTURE_POLICY_WAIT, &currentFrame);
    if (err!=DC1394_SUCCESS){
        cerr << "ERROR: Could not capture a frame." << endl;
        return frame;
    }

    // Return the frame to the ring buffer:
    dc1394_capture_enqueue(cam, currentFrame);
    currentFrame = NULL;

    // Copy frame address and properties
    frame.memory = currentFrame->image;
    frame.width = currentFrame->size[0];
    frame.height = currentFrame->size[1];
    frame.sizeBytes = currentFrame->image_bytes;

    return frame;
}
예제 #28
0
int main(int argc, char *argv[])
{
    dc1394camera_t * camera;
    dc1394error_t err;
    dc1394video_frame_t * frame;
    dc1394_t * d;
    dc1394camera_list_t * list;

    d = dc1394_new ();                                                     /* Initialize libdc1394 */
    if (!d)
        return 1;

    err=dc1394_camera_enumerate (d, &list);                                /* Find cameras */
    DC1394_ERR_RTN(err,"Failed to enumerate cameras");

    if (list->num == 0) {                                                  /* Verify that we have at least one camera */
        dc1394_log_error("No cameras found");
        return 1;
    }

    camera = dc1394_camera_new (d, list->ids[0].guid);                     /* Work with first camera */
    if (!camera) {
        dc1394_log_error("Failed to initialize camera with guid %llx", list->ids[0].guid);
        return 1;
    }
    dc1394_camera_free_list (list);

    err=dc1394_capture_setup(camera, 4, DC1394_CAPTURE_FLAGS_DEFAULT);     /* Setup capture */

    err=dc1394_video_set_transmission(camera, DC1394_ON);                  /* Start transmission */
    
    err=dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame);/* Capture */
    DC1394_ERR_RTN(err,"Problem getting an image");

    err=dc1394_capture_enqueue(camera, frame);                             /* Release the buffer */

    err=dc1394_video_set_transmission(camera, DC1394_OFF);                 /* Stop transmission */

    err=dc1394_capture_stop(camera);                                       /* Stop capture */

    printf("Hello World\n");                                               /* Hey, this is a HELLO WORLD program!! */

    dc1394_camera_free (camera);                                           /* cleanup and exit */
    dc1394_free (d);
    return 0;
}
예제 #29
0
bool FirewireVideo::GrabNext( unsigned char* image, bool wait )
{
    const dc1394capture_policy_t policy =
            wait ? DC1394_CAPTURE_POLICY_WAIT : DC1394_CAPTURE_POLICY_POLL;

    dc1394video_frame_t *frame;
    err = dc1394_capture_dequeue(camera, policy, &frame);
    if( err != DC1394_SUCCESS)
        throw VideoException("Could not capture frame", dc1394_error_get_string(err) );

    if( frame )
    {
        memcpy(image,frame->image,frame->image_bytes);
        dc1394_capture_enqueue(camera,frame);
        return true;
    }
    return false;
}
예제 #30
0
static void s_flush_buffer(capture_t *cap, const int index)
{
  dc1394camera_t *cam = cap->cameras[index];

  dc1394switch_t pwr = DC1394_OFF;
  dc1394video_frame_t *frame = NULL;

  dc1394_video_get_transmission(cam, &pwr);
  if (pwr == DC1394_ON) {
    dc1394_video_set_transmission(cam, DC1394_OFF);
  }

  while (dc1394_capture_dequeue(cam, DC1394_CAPTURE_POLICY_POLL, &frame), frame != NULL) {
    dc1394error_t err;

    err = dc1394_capture_enqueue(cam, frame);
    DC1394_ERR(err, "could not enqueue");
  }
}