コード例 #1
0
 void VideoFrame::ImageView(IplImage* view) const {
   ASSURE_LOG(view) << "View not set.";
   ASSERT_LOG(width_step_ % 4 == 0) << "IplImages need to have width_step_ "
                                    << "divisable by 4.";
   cvInitImageHeader(view, cvSize(width_, height_), IPL_DEPTH_8U, channels_);
   cvSetData(view, data_, width_step_);
 }
コード例 #2
0
ファイル: cap_aravis.cpp プロジェクト: acss/opencv
IplImage* CvCaptureCAM_Aravis::retrieveFrame(int)
{
    if(framebuffer) {
        int depth = 0, channels = 0;
        switch(pixelFormat) {
            case ARV_PIXEL_FORMAT_MONO_8:
                depth = IPL_DEPTH_8U;
                channels = 1;
                break;
            case ARV_PIXEL_FORMAT_MONO_12:
                depth = IPL_DEPTH_16U;
                channels = 1;
                break;
        }
        if(depth && channels) {
            IplImage src;
            cvInitImageHeader( &src, cvSize( width, height ), depth, channels, IPL_ORIGIN_TL, 4 );

            cvSetData( &src, framebuffer, src.widthStep );
            if( !frame ||
                 frame->width != src.width ||
                 frame->height != src.height ||
                 frame->depth != src.depth ||
                 frame->nChannels != src.nChannels) {

                cvReleaseImage( &frame );
                frame = cvCreateImage( cvGetSize(&src), src.depth, channels );
            }
            cvCopy(&src, frame);

            return frame;
        }
    }
    return NULL;
}
コード例 #3
0
ファイル: cap_vfw.cpp プロジェクト: 12rohanb/opencv
IplImage* CvCaptureAVI_VFW::retrieveFrame(int)
{
    if( avistream && bmih )
    {
        bool isColor = bmih->biBitCount == 24;
        int nChannels = (isColor) ? 3 : 1;
        IplImage src;
        cvInitImageHeader( &src, cvSize( bmih->biWidth, bmih->biHeight ),
                           IPL_DEPTH_8U, nChannels, IPL_ORIGIN_BL, 4 );

        char* dataPtr = (char*)(bmih + 1);

        // Only account for the color map size if we are an 8-bit image and the color map is used
        if (!isColor)
        {
            static int RGBQUAD_SIZE_PER_BYTE = sizeof(RGBQUAD)/sizeof(BYTE);
            int offsetFromColormapToData = (int)bmih->biClrUsed*RGBQUAD_SIZE_PER_BYTE;
            dataPtr += offsetFromColormapToData;
        }

        cvSetData( &src, dataPtr, src.widthStep );

        if( !frame || frame->width != src.width || frame->height != src.height )
        {
            cvReleaseImage( &frame );
            frame = cvCreateImage( cvGetSize(&src), 8, nChannels );
        }

        cvFlip( &src, frame, 0 );
        return frame;
    }

    return 0;
}
コード例 #4
0
ファイル: cvarray.cpp プロジェクト: mikanradojevic/sdkpub
// create IplImage header
CV_IMPL IplImage *
cvCreateImageHeader( CvSize size, int depth, int channels )
{
    IplImage *img = 0;

    CV_FUNCNAME( "cvCreateImageHeader" );

    __BEGIN__;

    if( !CvIPL.createHeader )
    {
        CV_CALL( img = (IplImage *)cvAlloc( sizeof( *img )));
        CV_CALL( cvInitImageHeader( img, size, depth, channels, IPL_ORIGIN_TL, 4, 1 ));
    }
    else
    {
        char *colorModel;
        char *channelSeq;

        icvGetColorModel( channels, &colorModel, &channelSeq );

        img = CvIPL.createHeader( channels, 0, depth, colorModel, channelSeq,
                                  IPL_DATA_ORDER_PIXEL, IPL_ORIGIN_TL, 4,
                                  size.width, size.height, 0, 0, 0, 0 );
    }

    __END__;

    if( cvGetErrStatus() < 0 && img )
        cvReleaseImageHeader( &img );

    return img;
}
コード例 #5
0
ファイル: matrix_c.cpp プロジェクト: AliMiraftab/opencv
_IplImage cvIplImage(const cv::Mat& m)
{
    _IplImage self;
    CV_Assert( m.dims <= 2 );
    cvInitImageHeader(&self, cvSize(m.size()), cvIplDepth(m.flags), m.channels());
    cvSetData(&self, m.data, (int)m.step[0]);
    return self;
}
コード例 #6
0
ファイル: wimage.hpp プロジェクト: 353/viewercv
WImageViewC<T, C>::WImageViewC(T* data, int width, int height, int width_step)
    : WImageC<T, C>(0) {
    cvInitImageHeader(&header_, cvSize(width, height), WImage<T>::Depth(), C);
    header_.imageData = reinterpret_cast<char*>(data);
    if (width_step > 0) {
        header_.widthStep = width_step;
    }
    WImageC<T, C>::SetIpl(&header_);
}
コード例 #7
0
ファイル: cap_ffmpeg.cpp プロジェクト: 5kg/opencv
    virtual IplImage* retrieveFrame(int)
    {
        unsigned char* data = 0;
        int step=0, width=0, height=0, cn=0;

        if(!ffmpegCapture ||
           !icvRetrieveFrame_FFMPEG_p(ffmpegCapture,&data,&step,&width,&height,&cn))
           return 0;
        cvInitImageHeader(&frame, cvSize(width, height), 8, cn);
        cvSetData(&frame, data, step);
        return &frame;
    }
コード例 #8
0
ファイル: V4L.cpp プロジェクト: krejmano/DisCODe
/*!
 * Method sets memory for memory mapping.
 */
bool V4L::setMemMap() {

    v_map = (struct video_mmap *) (malloc(m_buf.frames
                                          * sizeof(struct video_mmap)));

    cvInitImageHeader(&frame, cvSize(win.width, win.height), IPL_DEPTH_8U, 3,
                      IPL_ORIGIN_TL, 4);

    frame.imageData = (char *) cvAlloc(frame.imageSize);

    FirstCapture = 1;

    /// \todo: always true?
    return true;
}
コード例 #9
0
ファイル: V4L.cpp プロジェクト: krejmano/DisCODe
/*!
 * Method returns one frame (IplImage).
 */
bool V4L::retFrame() {
    if (ioctl(video_dev, VIDIOCSYNC, &((*(v_map + bufferIndex)).frame)) == -1) {
        LOG(LERROR) << "function: retFrame\n";
        return false;
    }

    if ((frame.width != (*(v_map + bufferIndex)).width) || (frame.height
            != (*(v_map + bufferIndex)).height)) {
        cvFree(&(frame.imageData));
        cvInitImageHeader(&frame, cvSize(win.width, win.height), IPL_DEPTH_8U,
                          3, IPL_ORIGIN_TL, 4);
        frame.imageData = (char *) cvAlloc(frame.imageSize);

    }

    memcpy((char *) (frame.imageData), (char *) (map
            + m_buf.offsets[bufferIndex]), frame.imageSize);
    //memcpy((char *)(original_frame.imageData),(char *)(map + m_buf.offsets[bufferIndex]),original_frame.imageSize);
    /*
     switch(pic.palette)
     {
     case RGB24:
     memcpy((char *)(frame.imageData),(char *)(map + m_buf.offsets[bufferIndex]),frame.imageSize);
     break;
     case YUV422P:
     yuv420p_to_rgb24(win.width,
     win.height,
     (unsigned char*)(map + m_buf.offsets[bufferIndex]),
     (unsigned char*)frame.imageData);
     break;
     case YUV420:
     yuv420_to_rgb24(win.width,
     win.height,
     (unsigned char*)(map + m_buf.offsets[bufferIndex]),
     (unsigned char*)frame.imageData);
     break;
     case YUV411P:
     yuv411p_to_rgb24(win.width,
     win.height,
     (unsigned char*)(map + m_buf.offsets[bufferIndex]),
     (unsigned char*)frame.imageData);
     break;
     default:
     printf("format is not supported V4L\n");
     return false;
     }*/
    return true;
}
コード例 #10
0
int C920Camera::InitializeCapture(V4L2CameraCapture* capture) {
    capture->Buffers[MAX_V4L_BUFFERS].start = NULL;

    fprintf(stdout, "C920Camera::InitilizeCapture INFO: Opening capture device %s.\n", capture->DeviceName);
    capture->DeviceHandle = open(capture->DeviceName, O_RDWR /* required */| O_NONBLOCK, 0);
    if (capture->DeviceHandle == 0) {
        fprintf(stderr, "C920Camera::InitilizeCapture ERROR: Unable to open %s.", capture->DeviceName);
        this->CloseCapture(capture);
        return -1;
    }

    fprintf(stdout, "C920Camera::InitilizeCapture INFO: Quering capture device capability %s.\n", capture->DeviceName);
    CLEAR(capture->V4L2Capability);
    if (-1 == xioctl(capture->DeviceHandle, VIDIOC_QUERYCAP, &capture->V4L2Capability)) {
        fprintf(stderr, "C920Camera::InitilizeCapture ERROR: Unable query capability from %s.", capture->DeviceName);
        this->CloseCapture(capture);
        return -2;
    }

    if ((capture->V4L2Capability.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
        fprintf(stderr, "C920Camera::InitilizeCapture ERROR: V4L2: Device %s is unable to capture video memory.\n",
                capture->DeviceName);
        this->CloseCapture(capture);
        return -3;
    }

    if (!this->SetCaptureFormat(capture)) {
        fprintf(stderr, "C920Camera::InitilizeCapture ERROR: Unable to set capture format.\n");
        return -4;
    }

    if (this->InitializeCaptureBuffers(capture) != true) {
        fprintf(stderr, "C920Camera::InitilizeCapture ERROR: Unable to initialize capture buffers.\n");
        return -5;
    }

    /* Set up Image data */
    cvInitImageHeader(&capture->Frame, cvSize(capture->V4L2Format.fmt.pix.width, capture->V4L2Format.fmt.pix.height),
                      IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, IPL_ALIGN_4BYTES);
    /* Allocate space for RGBA data */
    capture->Frame.imageData = (char *) cvAlloc(capture->Frame.imageSize);

    capture->NeedsCaptureInitialization = true;

    return true;
}
コード例 #11
0
    CvImage& copy( const CvImage& another )
    {
        if( !imageData || width != another.width || height != another.height )
        {
            cvReleaseData( this );
            cvResetImageROI( this );
            cvInitImageHeader( this, cvSize( another.width, another.height),
                               another.depth, another.nChannels, another.origin,
                               another.align );
            cvCreateImageData( this );
            if( another.roi )
                cvSetImageROI( this, cvGetImageROI( &another ));
        }

        cvCopy( (IplImage *)&another, this );
        return *this;
    }
コード例 #12
0
ファイル: cap_aravis.cpp プロジェクト: cyberCBM/DetectO
IplImage* CvCaptureCAM_Aravis::retrieveFrame(int)
{
    if(framebuffer) {
        int depth = 0, channels = 0;
        switch(pixelFormat) {
            case ARV_PIXEL_FORMAT_MONO_8:
            case ARV_PIXEL_FORMAT_BAYER_GR_8:
                depth = IPL_DEPTH_8U;
                channels = 1;
                break;
            case ARV_PIXEL_FORMAT_MONO_12:
            case ARV_PIXEL_FORMAT_MONO_16:
                depth = IPL_DEPTH_16U;
                channels = 1;
                break;
        }
        if(depth && channels) {
            IplImage src;
            cvInitImageHeader( &src, cvSize( width, height ), depth, channels, IPL_ORIGIN_TL, 4 );

            cvSetData( &src, framebuffer, src.widthStep );
            if( !frame ||
                 frame->width != src.width ||
                 frame->height != src.height ||
                 frame->depth != src.depth ||
                 frame->nChannels != src.nChannels) {

                cvReleaseImage( &frame );
                frame = cvCreateImage( cvGetSize(&src), src.depth, channels );
            }
            cvCopy(&src, frame);

            if(controlExposure && ((frameID - prevFrameID) >= 3)) {
                // control exposure every third frame
                // i.e. skip frame taken with previous exposure setup
                autoExposureControl(frame);
            }

            return frame;
        }
    }
    return NULL;
}
コード例 #13
0
   IplImage* C920Camera::RetrieveFrame(V4L2CameraCapture* capture) {
      // Resize Frame if Format size has changed.
      if (((unsigned long) capture->Frame.width != capture->V4L2Format.fmt.pix.width)
	    || ((unsigned long) capture->Frame.height != capture->V4L2Format.fmt.pix.height)) {
	 cvFree(&capture->Frame.imageData);
	 cvInitImageHeader(&capture->Frame, cvSize(capture->V4L2Format.fmt.pix.width, capture->V4L2Format.fmt.pix.height),
	       IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4);
	 capture->Frame.imageData = (char *) cvAlloc(capture->Frame.imageSize);
      }
      // Decode image from MJPEG to RGB24
      if (capture->Buffers[capture->BufferIndex].start) {
	 if (!this->MJPEG2RGB24(capture->V4L2Format.fmt.pix.width, capture->V4L2Format.fmt.pix.height,
		  (unsigned char*) (capture->Buffers[capture->BufferIndex].start), capture->Buffers[capture->BufferIndex].length,
		  (unsigned char*) capture->Frame.imageData)) {
	    fprintf(stdout, "C920Camera::RetrieveFrame ERROR: Unable to decode frame.\n");
	    return 0;
	 }
      }
      return (&capture->Frame);
   }
コード例 #14
0
ファイル: cap_vfw.cpp プロジェクト: MasaMune692/alcexamples
IplImage* CvCaptureAVI_VFW::retrieveFrame(int)
{
    if( avistream && bmih )
    {
        IplImage src;
        cvInitImageHeader( &src, cvSize( bmih->biWidth, bmih->biHeight ),
                           IPL_DEPTH_8U, 3, IPL_ORIGIN_BL, 4 );
        cvSetData( &src, (char*)(bmih + 1), src.widthStep );
        if( !frame || frame->width != src.width || frame->height != src.height )
        {
            cvReleaseImage( &frame );
            frame = cvCreateImage( cvGetSize(&src), 8, 3 );
        }

        cvFlip( &src, frame, 0 );
        return frame;
    }

    return 0;
}
コード例 #15
0
ファイル: rim_ipl.cpp プロジェクト: zophos/Rim
IplImage *rb_rim_image2ipl_ref(VALUE oRimImage)
{
    struct NARRAY *n_na;
    int width,height,channels,depth;

    if(!IsNArray(oRimImage))
        return NULL;

    GetNArray(oRimImage,n_na);

    if(!rb_rim_get_image_description(n_na,
                                     &width,
                                     &height,
                                     &channels,
                                     &depth))
        return NULL;

    IplImage *img=ALLOC(IplImage);
    cvInitImageHeader(img,
                      cvSize(width,height),
                      depth,
                      channels);
    
    size_t lineWidth=img->nChannels*img->width*na_sizeof[n_na->type];
    size_t sz=n_na->total*na_sizeof[n_na->type];
    if((img->widthStep==lineWidth)&&(img->imageSize==sz)){
        img->imageData=n_na->ptr;
        img->imageDataOrigin=NULL;
    }
    else{
        free(img);
        img=rim_cv_create_and_copy_image(width,
                                         height,
                                         channels,
                                         depth,
                                         n_na);
    }
    
    return img;
}
コード例 #16
0
ファイル: cvarray.cpp プロジェクト: mikanradojevic/sdkpub
// convert array (CvMat or IplImage) to IplImage
CV_IMPL IplImage*
cvGetImage( const CvArr* array, IplImage* img )
{
    IplImage* result = 0;
    const IplImage* src = (const IplImage*)array;
    
    CV_FUNCNAME( "cvGetImage" );

    __BEGIN__;

    if( !img )
        CV_ERROR_FROM_CODE( CV_StsNullPtr );

    if( !_CV_IS_IMAGE(src) )
    {
        const CvMat* mat = (const CvMat*)src;
        
        if( !_CV_IS_ARR(mat))
            CV_ERROR_FROM_CODE( CV_StsBadFlag );

        if( mat->data.ptr == 0 )
            CV_ERROR_FROM_CODE( CV_StsNullPtr );

        cvInitImageHeader( img, cvSize(mat->width, mat->height),
                           icvCvToIplDepth(CV_ARR_DEPTH(mat->type)),
                           CV_ARR_CN(mat->type) );
        cvSetData( img, mat->data.ptr, mat->step );

        result = img;
    }
    else
    {
        result = (IplImage*)src;
    }

    __END__;

    return result;
}
コード例 #17
0
ファイル: wimage.hpp プロジェクト: 09beezahmad/opencv
WImageViewC<T, C>::WImageViewC() : WImageC<T, C>(0) {
    cvInitImageHeader(&header_, cvSize(0, 0), WImage<T>::Depth(), C);
    header_.imageData = reinterpret_cast<char*>(0);
    WImageC<T, C>::SetIpl(&header_);
}
コード例 #18
0
ファイル: Kalman.cpp プロジェクト: mikanradojevic/sdkpub
//
// Transform
// Transform the  sample 'in place'
//
HRESULT CKalmTrack::Transform(IMediaSample *pSample)
{
    BYTE*   pData;
    CvImage image;
    
    pSample->GetPointer(&pData);
    
    AM_MEDIA_TYPE* pType = &m_pInput->CurrentMediaType();
    VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *) pType->pbFormat;
    
    // Get the image properties from the BITMAPINFOHEADER
    CvSize size = cvSize( pvi->bmiHeader.biWidth, pvi->bmiHeader.biHeight );
    int stride = (size.width * 3 + 3) & -4;

    cvInitImageHeader( &image, size, IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4, 1 );
    cvSetImageData( &image, pData,stride );

    if(IsTracking == false)
    {
        if(IsInit == false)
        {
            CvPoint p1, p2;
            // Draw box
            p1.x = cvRound( size.width * m_params.x );
            p1.y = cvRound( size.height * m_params.y );

            p2.x = cvRound( size.width * (m_params.x + m_params.width));
            p2.y = cvRound( size.height * (m_params.y + m_params.height));

            CheckBackProject( &image );

            cvRectangle( &image, p1, p2, -1, 1 );
        }
        else
        {
            m_object.x = cvRound( size.width * m_params.x );
            m_object.y = cvRound( size.height * m_params.y );
            m_object.width = cvRound( size.width * m_params.width );
            m_object.height = cvRound( size.height * m_params.height );
            ApplyCamShift( &image, true );

            CheckBackProject( &image );

            IsTracking = true;
        }
    }
    else
    {
        cvKalmanUpdateByTime(Kalman);
        m_object.x = cvRound( Kalman->PriorState[0]-m_object.width*0.5);
        m_object.y = cvRound( Kalman->PriorState[2]-m_object.height*0.5 );
        
        ApplyCamShift( &image, false );

        CheckBackProject( &image );

        cvRectangle( &image,
                     cvPoint( m_object.x, m_object.y ),
                     cvPoint( m_object.x + m_object.width, m_object.y + m_object.height ),
                     -1, 1 );

        Rectang(&image,m_Indicat1,-1);
        m_X.x = 10;
        m_X.y = 10;
        m_X.width=50*m_Old.x/size.width;
        m_X.height =10;
        Rectang(&image,m_X,CV_RGB(0,0,255));
        m_Y.x = 10;
        m_Y.y = 10;
        m_Y.width=10;
        m_Y.height = 50*m_Old.y/size.height;
        Rectang(&image,m_Y,CV_RGB(255,0,0));
        m_Indicat2.x = 0; 
        m_Indicat2.y = size.height-50;
        m_Indicat2.width = 50;
        m_Indicat2.height = 50;
        Rectang(&image,m_Indicat2,-1);
        float Norm = cvSqrt(Measurement[1]*Measurement[1]+Measurement[3]*Measurement[3]);
        int VXNorm = (fabs(Measurement[1])>5)?(int)(12*Measurement[1]/Norm):0;
        int VYNorm = (fabs(Measurement[3])>5)?(int)(12*Measurement[3]/Norm):0;
        CvPoint pp1 = {25,size.height-25};
        CvPoint pp2 = {25+VXNorm,size.height-25+VYNorm};
        cvLine(&image,pp1,pp2,CV_RGB(0,0,0),3);
        /*CvPoint pp1 = {25,size.height-25};
        double angle = atan2( Measurement[3], Measurement[1] );
        CvPoint pp2 = {cvRound(25+12*cos(angle)),cvRound(size.height-25-12*sin(angle))};
        cvLine(&image,pp1,pp2,0,3);*/
    }

    cvSetImageData( &image, 0, 0 );

    return NOERROR;
} // Transform
コード例 #19
0
ファイル: core_c.cpp プロジェクト: neutmute/emgucv
IplImage* cveInitImageHeader(IplImage* image, CvSize* size, int depth, int channels, int origin, int align)
{
   return cvInitImageHeader(image, *size, depth, channels, origin, align);
}
コード例 #20
0
static int icvOpenAVI_FFMPEG( CvCaptureAVI_FFMPEG* capture, const char* filename )
{
    int err, valid = 0, video_index = -1, i;
    AVFormatContext *ic;

    capture->ic = NULL;
    capture->video_stream = -1;
    capture->video_st = NULL;
    /* register all codecs, demux and protocols */
    av_register_all();

    err = av_open_input_file(&ic, filename, NULL, 0, NULL);
    if (err < 0) {
	    CV_WARN("Error opening file");
	    goto exit_func;
    }
    capture->ic = ic;
    err = av_find_stream_info(ic);
    if (err < 0) {
	    CV_WARN("Could not find codec parameters");
	    goto exit_func;
    }
    for(i = 0; i < ic->nb_streams; i++) {
#if LIBAVFORMAT_BUILD > 4628
        AVCodecContext *enc = ic->streams[i]->codec;
#else
        AVCodecContext *enc = &ic->streams[i]->codec;
#endif
        AVCodec *codec;
    if( CODEC_TYPE_VIDEO == enc->codec_type && video_index < 0) {
        video_index = i;
        codec = avcodec_find_decoder(enc->codec_id);
        if (!codec ||
        avcodec_open(enc, codec) < 0)
        goto exit_func;
        capture->video_stream = i;
        capture->video_st = ic->streams[i];
        capture->picture = avcodec_alloc_frame();

        capture->rgb_picture.data[0] = (uchar*)cvAlloc(
                                avpicture_get_size( PIX_FMT_BGR24,
                                enc->width, enc->height ));
        avpicture_fill( (AVPicture*)&capture->rgb_picture, capture->rgb_picture.data[0],
                PIX_FMT_BGR24, enc->width, enc->height );

        cvInitImageHeader( &capture->frame, cvSize( enc->width,
                                   enc->height ), 8, 3, 0, 4 );
        cvSetData( &capture->frame, capture->rgb_picture.data[0],
                           capture->rgb_picture.linesize[0] );
        break;
    }
    }


    if(video_index >= 0)
    valid = 1;

exit_func:

    if( !valid )
        icvCloseAVI_FFMPEG( capture );

    return valid;
}
コード例 #21
0
ファイル: cap_vfw.cpp プロジェクト: qqchen/opencv2410-VS
IplImage* CvCaptureCAM_VFW::retrieveFrame(int)
{
    BITMAPINFO vfmt;
    memset( &vfmt, 0, sizeof(vfmt));
    BITMAPINFOHEADER& vfmt0 = vfmt.bmiHeader;

    if( !capWnd )
        return 0;

    const DWORD sz = capGetVideoFormat( capWnd, &vfmt, sizeof(vfmt));
    const int prevWidth = frame ? frame->width : 0;
    const int prevHeight = frame ? frame->height : 0;

    if( !hdr || hdr->lpData == 0 || sz == 0 )
        return 0;

    if( !frame || frame->width != vfmt0.biWidth || frame->height != vfmt0.biHeight )
    {
        cvReleaseImage( &frame );
        frame = cvCreateImage( cvSize( vfmt0.biWidth, vfmt0.biHeight ), 8, 3 );
    }

    if( vfmt0.biCompression != BI_RGB ||
        vfmt0.biBitCount != 24 )
    {
        BITMAPINFOHEADER vfmt1 = icvBitmapHeader( vfmt0.biWidth, vfmt0.biHeight, 24 );

        if( hic == 0 || fourcc != vfmt0.biCompression ||
            prevWidth != vfmt0.biWidth || prevHeight != vfmt0.biHeight )
        {
            closeHIC();
            hic = ICOpen( MAKEFOURCC('V','I','D','C'),
                          vfmt0.biCompression, ICMODE_DECOMPRESS );
            if( hic )
            {
                if( ICDecompressBegin( hic, &vfmt0, &vfmt1 ) != ICERR_OK )
                {
                    closeHIC();
                    return 0;
                }
            }
        }

        if( !hic || ICDecompress( hic, 0, &vfmt0, hdr->lpData,
            &vfmt1, frame->imageData ) != ICERR_OK )
        {
            closeHIC();
            return 0;
        }

        cvFlip( frame, frame, 0 );
    }
    else
    {
        IplImage src;
        cvInitImageHeader( &src, cvSize(vfmt0.biWidth, vfmt0.biHeight),
            IPL_DEPTH_8U, 3, IPL_ORIGIN_BL, 4 );
        cvSetData( &src, hdr->lpData, src.widthStep );
        cvFlip( &src, frame, 0 );
    }

    return frame;
}
コード例 #22
0
ファイル: cvcap_ffmpeg.cpp プロジェクト: allanca/otterdive
bool CvCapture_FFMPEG::open( const char* _filename )
{
    unsigned i;
    bool valid = false;

    close();

    /* register all codecs, demux and protocols */
    av_register_all();

#ifndef _DEBUG
    // av_log_level = AV_LOG_QUIET;
#endif

    int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
    if (err < 0) {
	    CV_WARN("Error opening file");
	    goto exit_func;
    }
    err = av_find_stream_info(ic);
    if (err < 0) {
	    CV_WARN("Could not find codec parameters");
	    goto exit_func;
    }
    for(i = 0; i < ic->nb_streams; i++) {
#if LIBAVFORMAT_BUILD > 4628
        AVCodecContext *enc = ic->streams[i]->codec;
#else
        AVCodecContext *enc = &ic->streams[i]->codec;
#endif

        if( CODEC_TYPE_VIDEO == enc->codec_type && video_stream < 0) {
            AVCodec *codec = avcodec_find_decoder(enc->codec_id);
            if (!codec ||
            avcodec_open(enc, codec) < 0)
            goto exit_func;
            video_stream = i;
            video_st = ic->streams[i];
            picture = avcodec_alloc_frame();

            rgb_picture.data[0] = (uint8_t*)cvAlloc(
                                    avpicture_get_size( PIX_FMT_BGR24,
                                    enc->width, enc->height ));
            avpicture_fill( (AVPicture*)&rgb_picture, rgb_picture.data[0],
                    PIX_FMT_BGR24, enc->width, enc->height );

            cvInitImageHeader( &frame, cvSize( enc->width,
                                       enc->height ), 8, 3, 0, 4 );
            cvSetData( &frame, rgb_picture.data[0],
                               rgb_picture.linesize[0] );
            break;
        }
    }

    if(video_stream >= 0) valid = true;

    // perform check if source is seekable via ffmpeg's seek function av_seek_frame(...)
    err = av_seek_frame(ic, video_stream, 10, 0);
    if (err < 0)
    {
        filename=(char*)malloc(strlen(_filename)+1);
        strcpy(filename, _filename);
        // reopen videofile to 'seek' back to first frame
        reopen();
    }
    else
    {
        // seek seems to work, so we don't need the filename,
        // but we still need to seek back to filestart
        filename=NULL;
        av_seek_frame(ic, video_stream, 0, 0);
    }
exit_func:

    if( !valid )
        close();

    return valid;
}
コード例 #23
0
ファイル: cap_dc1394_v2.cpp プロジェクト: huamulan/opencv
bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
{
    dc1394capture_policy_t policy = DC1394_CAPTURE_POLICY_WAIT;
    bool code = false, isColor;
    dc1394video_frame_t *dcFrame = 0, *fs = 0;
    int i, nch;

    if (!dcCam || (!started && !startCapture()))
        return false;

    dc1394_capture_dequeue(dcCam, policy, &dcFrame);

    if (!dcFrame)
        return false;

    if (/*dcFrame->frames_behind > 1 ||*/ dc1394_capture_is_frame_corrupt(dcCam, dcFrame) == DC1394_TRUE)
    {
        goto _exit_;
    }

    isColor = dcFrame->color_coding != DC1394_COLOR_CODING_MONO8 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16S;

    if (nimages == 2)
    {
        fs = (dc1394video_frame_t*)calloc(1, sizeof(*fs));

        //dc1394_deinterlace_stereo_frames(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
        dc1394_deinterlace_stereo_frames_fixed(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);

        dc1394_capture_enqueue(dcCam, dcFrame); // release the captured frame as soon as possible
        dcFrame = 0;
        if (!fs->image)
            goto _exit_;
        isColor = colorStereo;
    }
    nch = isColor ? 3 : 1;

    for (i = 0; i < nimages; i++)
    {
        IplImage fhdr;
        dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
        f.size[1] /= nimages;
        f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
        if (isColor)
        {
            if (!frameC)
                frameC = (dc1394video_frame_t*)calloc(1, sizeof(*frameC));
            frameC->color_coding = nch == 3 ? DC1394_COLOR_CODING_RGB8 : DC1394_COLOR_CODING_MONO8;
            if (nimages == 1)
            {
                dc1394_convert_frames(&f, frameC);
                dc1394_capture_enqueue(dcCam, dcFrame);
                dcFrame = 0;
            }
            else
            {
                f.color_filter = bayerFilter;
                dc1394_debayer_frames(&f, frameC, bayer);
            }
            fc = frameC;
        }
        if (!img[i])
            img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvSetData(&fhdr, fc->image, fc->size[0]*nch);

        // Swap R&B channels:
        if (nch==3)
            cvConvertImage(&fhdr,&fhdr,CV_CVTIMG_SWAP_RB);

        if( rectify && cameraId == VIDERE && nimages == 2 )
        {
            if( !maps[0][0] || maps[0][0]->width != img[i]->width || maps[0][0]->height != img[i]->height )
            {
                CvSize size = cvGetSize(img[i]);
                cvReleaseImage(&maps[0][0]);
                cvReleaseImage(&maps[0][1]);
                cvReleaseImage(&maps[1][0]);
                cvReleaseImage(&maps[1][1]);
                maps[0][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[0][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                maps[1][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[1][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                char buf[4*4096];
                if( getVidereCalibrationInfo( buf, (int)sizeof(buf) ) &&
                        initVidereRectifyMaps( buf, maps[0], maps[1] ))
                    ;
                else
                    rectify = false;
            }
            cvRemap(&fhdr, img[i], maps[i][0], maps[i][1]);
        }
        else
            cvCopy(&fhdr, img[i]);
    }

    code = true;

_exit_:
    if (dcFrame)
        dc1394_capture_enqueue(dcCam, dcFrame);
    if (fs)
    {
        if (fs->image)
            free(fs->image);
        free(fs);
    }

    return code;
}
コード例 #24
0
ファイル: Image.cpp プロジェクト: Killerregenwurm/utvision
Image::Image( int nWidth, int nHeight, int nChannels, void* pImageData, int nDepth, int nOrigin, int nAlign )
	: m_bOwned( false )
{
	cvInitImageHeader( this, cvSize( nWidth, nHeight ), nDepth, nChannels, nOrigin, nAlign );
	imageData = static_cast< char* >( pImageData );
}
コード例 #25
0
ファイル: cvcap_dc1394.cpp プロジェクト: gotomypc/eyepatch
// resize capture->frame appropriately depending on camera and capture settings
static int icvResizeFrame(CvCaptureCAM_DC1394 * capture){
	if(capture->convert){
		// resize if sizes are different, formats are different
		// or conversion option has changed
		if(capture->camera->frame_width != capture->frame.width ||
		   capture->camera->frame_height != capture->frame.height ||
		   capture->frame.depth != 8 ||
		   capture->frame.nChannels != 3 ||
		   capture->frame.imageData == NULL ||
		   capture->buffer_is_writeable == 0) 
		{
			if(capture->frame.imageData && capture->buffer_is_writeable){ 
				cvReleaseData( &(capture->frame));
			}
			cvInitImageHeader( &capture->frame, cvSize( capture->camera->frame_width,
						                                capture->camera->frame_height ),
								IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4 );
			cvCreateData( &(capture->frame) );
			capture->buffer_is_writeable = 1;
		}

	}
	else {
		// free image data if allocated by opencv
		if(capture->buffer_is_writeable){
			cvReleaseData(&(capture->frame));
		}

		// figure out number of channels and bpp
		int bpp = 8;
		int nch = 3;
		int width = capture->camera->frame_width;
		int height = capture->camera->frame_height;
		double code = CV_FOURCC('B','G','R',0);
		switch(capture->color_mode){
		case COLOR_FORMAT7_YUV422:
			nch = 2;
			code = CV_FOURCC('Y','4','2','2');
			break;
		case COLOR_FORMAT7_MONO8:
			code = CV_FOURCC('Y',0,0,0);
			nch = 1;
			break;
		case COLOR_FORMAT7_YUV411:
			code = CV_FOURCC('Y','4','1','1');
			width *= 2;
			nch = 3;  //yy[u/v]
			break;
		case COLOR_FORMAT7_YUV444:
			code = CV_FOURCC('Y','U','V',0);
			nch = 3;
			break;
		case COLOR_FORMAT7_MONO16:
			code = CV_FOURCC('Y',0,0,0);
			bpp = IPL_DEPTH_16S;
			nch = 1;
			break;
		case COLOR_FORMAT7_RGB16:
			bpp = IPL_DEPTH_16S;
			nch = 3;
			break;
		default:
			break;
		}
		// reset image header
		cvInitImageHeader( &capture->frame,cvSize( width, height ), bpp, nch, IPL_ORIGIN_TL, 4 );
		//assert(capture->frame.imageSize == capture->camera->quadlets_per_frame*4);
		capture->buffer_is_writeable = 0;
	}
	return 1;
}
コード例 #26
0
bool VideoGrabV4L2::open(const std::string& dev_name, int width, int height)
{
  if (fd >= 0) {
    printf("Closing previously opened video device.");
    release();
  }
  
  struct v4l2_format              fmt;
  struct v4l2_buffer              buf;
  struct v4l2_requestbuffers      req;
  
  fd = v4l2_open(dev_name.c_str(), O_RDWR | O_NONBLOCK, 0);
  if (fd < 0) {
    perror("Cannot open device");
    return false;
  }

  CLEAR(fmt);
  fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  fmt.fmt.pix.width       = width;
  fmt.fmt.pix.height      = height;
  fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_BGR24;
  fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;
  xioctl(fd, VIDIOC_S_FMT, &fmt);
  if (fmt.fmt.pix.pixelformat != V4L2_PIX_FMT_BGR24) {
    printf("Libv4l didn't accept RGB24 format. Can't proceed.\n");
    release();
    return false;
  }
  if ((fmt.fmt.pix.width != 640) || (fmt.fmt.pix.height != 480))
    printf("Warning: driver is sending image at %dx%d\n",
      fmt.fmt.pix.width, fmt.fmt.pix.height);
  
  // Store image dimensions in image header
  cvInitImageHeader( &frame,
              cvSize( fmt.fmt.pix.width,
                  fmt.fmt.pix.height ),
              IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4 );
  // Allocate memory for image data
  frame.imageData = new char[fmt.fmt.pix.sizeimage];
  if (!frame.imageData) {
    perror("Not enough memory to allocate image.");
    release();
    return false;
  }

  CLEAR(req);
  req.count = 2;
  req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  req.memory = V4L2_MEMORY_MMAP;
  xioctl(fd, VIDIOC_REQBUFS, &req);

  buffers = (buffer*)calloc(req.count, sizeof(*buffers));
  for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
    CLEAR(buf);

    buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory      = V4L2_MEMORY_MMAP;
    buf.index       = n_buffers;

    xioctl(fd, VIDIOC_QUERYBUF, &buf);

    buffers[n_buffers].length = buf.length;
    buffers[n_buffers].start = v4l2_mmap(NULL, buf.length,
      PROT_READ | PROT_WRITE, MAP_SHARED,
      fd, buf.m.offset);

    if (MAP_FAILED == buffers[n_buffers].start) {
      perror("mmap");
      release();
      return false;
    }
  }

  for (unsigned int i = 0; i < n_buffers; ++i) {
    CLEAR(buf);
    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;
    buf.index = i;
    xioctl(fd, VIDIOC_QBUF, &buf);
  }
  
  v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  xioctl(fd, VIDIOC_STREAMON, &type);
  
  return true;
}
コード例 #27
0
ファイル: cap_vfw.cpp プロジェクト: 12rohanb/opencv
IplImage* CvCaptureCAM_VFW::retrieveFrame(int)
{
    BITMAPINFO vfmt;
    memset( &vfmt, 0, sizeof(vfmt));
    BITMAPINFOHEADER& vfmt0 = vfmt.bmiHeader;

    if( !capWnd )
        return 0;

    const DWORD sz = capGetVideoFormat( capWnd, &vfmt, sizeof(vfmt));
    const int prevWidth = frame ? frame->width : 0;
    const int prevHeight = frame ? frame->height : 0;

    if( !hdr || hdr->lpData == 0 || sz == 0 )
        return 0;

    if( !frame || frame->width != vfmt0.biWidth || frame->height != vfmt0.biHeight )
    {
        cvReleaseImage( &frame );
        frame = cvCreateImage( cvSize( vfmt0.biWidth, vfmt0.biHeight ), 8, 3 );
    }

    if ( vfmt0.biCompression == MAKEFOURCC('N','V','1','2') )
    {
        // Frame is in YUV 4:2:0 NV12 format, convert to BGR color space
        // See https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx#nv12)
        IplImage src;
        cvInitImageHeader( &src, cvSize( vfmt0.biWidth, vfmt0.biHeight * 3 / 2 ), IPL_DEPTH_8U, 1, IPL_ORIGIN_BL, 4 );
        cvSetData( &src, hdr->lpData, src.widthStep );
        cvCvtColor( &src, frame, CV_YUV2BGR_NV12 );
    }
    else if( vfmt0.biCompression != BI_RGB ||
             vfmt0.biBitCount != 24 )
    {
        BITMAPINFOHEADER vfmt1 = icvBitmapHeader( vfmt0.biWidth, vfmt0.biHeight, 24 );

        if( hic == 0 || fourcc != vfmt0.biCompression ||
            prevWidth != vfmt0.biWidth || prevHeight != vfmt0.biHeight )
        {
            closeHIC();
            hic = ICOpen( MAKEFOURCC('V','I','D','C'),
                          vfmt0.biCompression, ICMODE_DECOMPRESS );
            if( hic )
            {
                if( ICDecompressBegin( hic, &vfmt0, &vfmt1 ) != ICERR_OK )
                {
                    closeHIC();
                    return 0;
                }
            }
        }

        if( !hic || ICDecompress( hic, 0, &vfmt0, hdr->lpData,
            &vfmt1, frame->imageData ) != ICERR_OK )
        {
            closeHIC();
            return 0;
        }

        cvFlip( frame, frame, 0 );
    }
    else
    {
        IplImage src;
        cvInitImageHeader( &src, cvSize(vfmt0.biWidth, vfmt0.biHeight),
            IPL_DEPTH_8U, 3, IPL_ORIGIN_BL, 4 );
        cvSetData( &src, hdr->lpData, src.widthStep );
        cvFlip( &src, frame, 0 );
    }

    return frame;
}
コード例 #28
0
ファイル: Image.cpp プロジェクト: Killerregenwurm/utvision
Image::Image( int nWidth, int nHeight, int nChannels, int nDepth, int nOrigin )
	: m_bOwned( true )
{
	cvInitImageHeader( this, cvSize( nWidth, nHeight ), nDepth, nChannels, nOrigin );
	imageDataOrigin = imageData = static_cast< char* >( cvAlloc( imageSize ) );
}