예제 #1
0
// ######################################################################
void convertFromString(const std::string& orig, VideoFormat& val)
{
  const std::string upper = toUpperCase(orig);

  for (int i = 0; i <= VIDFMT_AUTO; ++i)
    if (upper.compare(toUpperCase(convertToString(VideoFormat(i)))) == 0)
      { val = VideoFormat(i); return; }

  conversion_error::raise<VideoFormat>(orig);
}
예제 #2
0
VideoFrame VideoDecoder::frame()
{
    DPTR_D(VideoDecoder);
    if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
        return VideoFrame(0, 0, VideoFormat(VideoFormat::Format_Invalid));
    //DO NOT make frame as a memeber, because VideoFrame is explictly shared!
    VideoFrame frame(d.codec_ctx->width, d.codec_ctx->height, VideoFormat((int)d.codec_ctx->pix_fmt));
    frame.setBits(d.frame->data);
    frame.setBytesPerLine(d.frame->linesize);
    return frame;
}
예제 #3
0
VideoFrame::VideoFrame(const QImage& image)
    : Frame(new VideoFramePrivate(image.width(), image.height(), VideoFormat(image.format())))
{
    setBits((uchar*)image.constBits(), 0);
    setBytesPerLine(image.bytesPerLine(), 0);
    d_func()->qt_image.reset(new QImage(image));
}
예제 #4
0
VideoFrame::VideoFrame(const QImage& image)
    : Frame(*new VideoFramePrivate(image.width(), image.height(), VideoFormat(image.format())))
{
    // TODO: call const image.bits()?
    setBits((uchar*)image.bits(), 0);
    setBytesPerLine(image.bytesPerLine(), 0);
}
예제 #5
0
VideoFrame VideoDecoder::frame()
{
    DPTR_D(VideoDecoder);
    if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
        return VideoFrame(0, 0, VideoFormat(VideoFormat::Format_Invalid));
    //DO NOT make frame as a memeber, because VideoFrame is explictly shared!
    float displayAspectRatio = 0;
    if (d.codec_ctx->sample_aspect_ratio.den > 0)
        displayAspectRatio = ((float)d.frame->width / (float)d.frame->height) *
            ((float)d.codec_ctx->sample_aspect_ratio.num / (float)d.codec_ctx->sample_aspect_ratio.den);

    VideoFrame frame(d.frame->width, d.frame->height, VideoFormat((int)d.codec_ctx->pix_fmt));
    frame.setDisplayAspectRatio(displayAspectRatio);
    frame.setBits(d.frame->data);
    frame.setBytesPerLine(d.frame->linesize);
    return frame;
}
예제 #6
0
QImage VideoFrame::toImage(QImage::Format fmt, const QSize& dstSize, const QRectF &roi) const
{
    VideoFrame f(to(VideoFormat(VideoFormat::pixelFormatFromImageFormat(fmt)), dstSize, roi));
    if (!f)
        return QImage();
    QImage image((const uchar*)f.frameData().constData(), f.width(), f.height(), f.bytesPerLine(0), fmt);
    return image.copy();
}
예제 #7
0
	void VideoFrame::poolDeleter(VideoFrame::Obj * obj){
        try {
            std::unique_lock<std::mutex> lock(poolMutex);
            pool[VideoFormat(obj->pixels)].push_back(ofPtr<Obj>(obj,&VideoFrame::poolDeleter));
        }
        catch(const std::exception& e) {
            /* When program terminates, acquiring lock is impossible. */
        }
	}
예제 #8
0
// sets format and formatTypeV4L2
void V4L2CaptureStream::queryCurrentFormat() // throws CaptureException;
{
	// from http://www.linuxtv.org/downloads/video4linux/API/V4L2_API/spec-single/v4l2.html
	// To query the current image format applications set the type field of a struct v4l2_format to V4L2_BUF_TYPE_VIDEO_CAPTURE and call the VIDIOC_G_FMT ioctl with a pointer to this structure. Drivers fill the struct v4l2_pix_format pix member of the fmt union.
	
	// query width, height, format
	int width;
	int height;
	
	// TODO: this is in getPixelFormat, which is static.  Need to make an fg2_ function for this.
	{
		struct v4l2_format fmt;
		memset(&fmt, 0, sizeof(fmt));
		fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	
	 	int res = ioctl(fg->fd, VIDIOC_G_FMT, &fmt);
		if (res != 0)
			FailWithException("VIDIOC_G_FMT failed", errno);
		
		// For some reason, some drivers seam not to be working
		// if format is not explicitely set !
		// Fix it here...
	 	res = ioctl(fg->fd, VIDIOC_S_FMT, &fmt);
		if (res == -1)
		{
			printf("VIDIOC_S_FMT failed\n");
			//FailWithException("VIDIOC_S_FMT failed", errno);
		}
	 	
	 	width = fmt.fmt.pix.width;
	 	height = fmt.fmt.pix.height;
	 	formatTypeV4L2 = fmt.fmt.pix.pixelformat;
	}
	
	int formatType;
	
	switch (formatTypeV4L2)
	{
		// TODO: other formats
       case V4L2_PIX_FMT_RGB24:
			formatType = RGB24;
			break;
       case V4L2_PIX_FMT_RGB32:
			formatType = RGB32;
			break;
 		case V4L2_PIX_FMT_YUV420:
	 		formatType = RGB24;
	 		break;
	 	case V4L2_PIX_FMT_YUYV:
	 		formatType = RGB24;
	 		break;
		default:
			FailWithException("unknown or unsupported format", formatTypeV4L2);
	}
	
	format = VideoFormat(0, formatType, width, height, FPS_UNKNOWN);	// TODO: FPS
}
예제 #9
0
VideoFrame VideoDecoderDXVA::frame()
{
    DPTR_D(VideoDecoderDXVA);
    //qDebug("frame size: %dx%d", d.frame->width, d.frame->height);
    if (!d.frame->opaque || !d.frame->data[0])
        return VideoFrame();
    if (d.frame->width <= 0 || d.frame->height <= 0 || !d.codec_ctx)
        return VideoFrame();

    IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
    if (copyMode() == ZeroCopy && d.interop_res) {
        dxva::SurfaceInteropDXVA *interop = new dxva::SurfaceInteropDXVA(d.interop_res);
        interop->setSurface(d3d, width(), height());
        VideoFrame f(width(), height(), VideoFormat::Format_RGB32); //p->width()
        f.setBytesPerLine(d.width * 4); //used by gl to compute texture size
        f.setMetaData(QStringLiteral("surface_interop"), QVariant::fromValue(VideoSurfaceInteropPtr(interop)));
        f.setTimestamp(d.frame->pkt_pts/1000.0);
        f.setDisplayAspectRatio(d.getDAR(d.frame));
        return f;
    }
    class ScopedD3DLock {
        IDirect3DSurface9 *mpD3D;
    public:
        ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
            if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
                qWarning("Failed to lock surface");
                mpD3D = 0;
            }
        }
        ~ScopedD3DLock() {
            if (mpD3D)
                mpD3D->UnlockRect();
        }
    };

    D3DLOCKED_RECT lock;
    ScopedD3DLock(d3d, &lock);
    if (lock.Pitch == 0) {
        return VideoFrame();
    }
    //picth >= desc.Width
    D3DSURFACE_DESC desc;
    d3d->GetDesc(&desc);
    const VideoFormat fmt = VideoFormat(pixelFormatFromD3D(desc.Format));
    if (!fmt.isValid()) {
        qWarning("unsupported dxva pixel format: %#x", desc.Format);
        return VideoFrame();
    }
    //YV12 need swap, not imc3?
    // imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
    // nv12 bpp(1)==1
    // 3rd plane is not used for nv12
    int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
    uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
    const bool swap_uv = desc.Format ==  MAKEFOURCC('I','M','C','3');
    return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
예제 #10
0
VideoFormat VideoThread::videoFormat()
{
	//return VideoFormat(VideoFrame::BUFFER_IMAGE, QVideoFrame::Format_RGB565);
	return VideoFormat(VideoFrame::BUFFER_IMAGE, QVideoFrame::Format_ARGB32);
	//return VideoFormat(VideoFrame::BUFFER_POINTER, QVideoFrame::Format_ARGB32);
	//return VideoFormat(VideoFrame::BUFFER_BYTEARRAY, QVideoFrame::Format_YUV420P);
	
	
	// Size defaults to 640,480
}
예제 #11
0
VideoFrame VideoDecoder::frame()
{
    DPTR_D(VideoDecoder);
    /*qDebug("color space: %d, range: %d, prim: %d, t: %d"
           , d.codec_ctx->colorspace, d.codec_ctx->color_range
           , d.codec_ctx->color_primaries, d.codec_ctx->color_trc);
           */
    if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
        return VideoFrame(0, 0, VideoFormat(VideoFormat::Format_Invalid));
    //DO NOT make frame as a memeber, because VideoFrame is explictly shared!
    float displayAspectRatio = 0;
    if (d.codec_ctx->sample_aspect_ratio.den > 0)
        displayAspectRatio = ((float)d.frame->width / (float)d.frame->height) *
            ((float)d.codec_ctx->sample_aspect_ratio.num / (float)d.codec_ctx->sample_aspect_ratio.den);

    VideoFrame frame(d.frame->width, d.frame->height, VideoFormat((int)d.codec_ctx->pix_fmt));
    frame.setDisplayAspectRatio(displayAspectRatio);
    frame.setBits(d.frame->data);
    frame.setBytesPerLine(d.frame->linesize);
    frame.setTimestamp((double)d.frame->pts/1000.0); // in s
    return frame;
}
예제 #12
0
VideoFrame VideoDecoderDXVA::frame()
{
    DPTR_D(VideoDecoderDXVA);
    if (!d.frame->opaque || !d.frame->data[0])
        return VideoFrame();
    if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
        return VideoFrame();

    class ScopedD3DLock {
    public:
        ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect)
            : mpD3D(d3d)
        {
            if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
                qWarning("Failed to lock surface");
                mpD3D = 0;
            }
        }
        ~ScopedD3DLock() {
            if (mpD3D)
                mpD3D->UnlockRect();
        }
    private:
        IDirect3DSurface9 *mpD3D;
    };

    IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
    //picth >= desc.Width
    //D3DSURFACE_DESC desc;
    //d3d->GetDesc(&desc);
    D3DLOCKED_RECT lock;
    ScopedD3DLock(d3d, &lock);
    if (lock.Pitch == 0) {
        return VideoFrame();
    }

    const VideoFormat fmt = VideoFormat((int)D3dFindFormat(d.render)->avpixfmt);
    if (!fmt.isValid()) {
        qWarning("unsupported dxva pixel format: %#x", d.render);
        return VideoFrame();
    }
    //YV12 need swap, not imc3?
    // imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
    // nv12 bpp(1)==1
    // 3rd plane is not used for nv12
    int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
    uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
    const bool swap_uv = d.render ==  MAKEFOURCC('I','M','C','3');
    return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
예제 #13
0
void* SurfaceInteropDXVA::mapToHost(const VideoFormat &format, void *handle, int plane)
{
    Q_UNUSED(plane);
    class ScopedD3DLock {
        IDirect3DSurface9 *mpD3D;
    public:
        ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
            if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
                qWarning("Failed to lock surface");
                mpD3D = 0;
            }
        }
        ~ScopedD3DLock() {
            if (mpD3D)
                mpD3D->UnlockRect();
        }
    };

    D3DLOCKED_RECT lock;
    ScopedD3DLock(m_surface, &lock);
    if (lock.Pitch == 0)
        return NULL;

    //picth >= desc.Width
    D3DSURFACE_DESC desc;
    m_surface->GetDesc(&desc);
    const VideoFormat fmt = VideoFormat(pixelFormatFromFourcc(desc.Format));
    if (!fmt.isValid()) {
        qWarning("unsupported dxva pixel format: %#x", desc.Format);
        return NULL;
    }
    //YV12 need swap, not imc3?
    // imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
    // nv12 bpp(1)==1
    // 3rd plane is not used for nv12
    int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
    quint8 *src[] = { (quint8*)lock.pBits, 0, 0}; //compute chroma later
    Q_ASSERT(src[0] && pitch[0] > 0);
    const bool swap_uv = desc.Format ==  MAKEFOURCC('I','M','C','3');
    // try to use SSE. fallback to normal copy if SSE is not supported
    VideoFrame frame(VideoFrame::fromGPU(fmt, frame_width, frame_height, desc.Height, src, pitch, true, swap_uv));
    // TODO: check rgb32 because d3d can use hw to convert
    if (format != fmt)
        frame = frame.to(format);
    VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
    frame.setTimestamp(f->timestamp());
    *f = frame;
    return f;
}
예제 #14
0
파일: X11Renderer.cpp 프로젝트: heefan/QtAV
VideoFormat::PixelFormat pixelFormat(XImage* xi) {
    const struct fmt2Xfmtentry *fmte = fmt2Xfmt;
    while (fmte->fmt != VideoFormat::Format_Invalid) {
        int depth = VideoFormat(fmte->fmt).bitsPerPixel();
        // 15->16? mpv
        if (depth == xi->bits_per_pixel && fmte->byte_order == xi->byte_order
                && fmte->red_mask == xi->red_mask
                && fmte->green_mask == xi->green_mask
                && fmte->blue_mask == xi->blue_mask)
            break;
        //qDebug() << fmte->fmt;
        fmte++;
    }
    return fmte->fmt;
}
예제 #15
0
QImage VideoFrame::toImage(QImage::Format fmt, const QSize& dstSize, const QRectF &roi) const
{
    Q_D(const VideoFrame);
    if (!d->qt_image.isNull()
            && fmt == d->qt_image->format()
            && dstSize == d->qt_image->size()
            && (!roi.isValid() || roi == d->qt_image->rect())) {
        return *d->qt_image.data();
    }
    VideoFrame f(to(VideoFormat(VideoFormat::pixelFormatFromImageFormat(fmt)), dstSize, roi));
    if (!f)
        return QImage();
    QImage image((const uchar*)f.frameData().constData(), f.width(), f.height(), f.bytesPerLine(0), fmt);
    return image.copy();
}
예제 #16
0
bool LibAVFilterPrivate::pull(Frame *f)
{
    AVFrameHolderRef frame_ref(new AVFrameHolder());
    int ret = av_buffersink_get_frame(out_filter_ctx, frame_ref->frame());
    if (ret < 0) {
        qWarning("av_buffersink_get_frame error: %s", av_err2str(ret));
        return false;
    }
    VideoFrame vf(frame_ref->frame()->width, frame_ref->frame()->height, VideoFormat(frame_ref->frame()->format));
    vf.setBits(frame_ref->frame()->data);
    vf.setBytesPerLine(frame_ref->frame()->linesize);
    vf.setMetaData("avframe_hoder_ref", QVariant::fromValue(frame_ref));
    *f = vf;
    return true;
}
예제 #17
0
VideoFormat::PixelFormat pixelFormat(XImage* xi) {
    const struct fmt2Xfmtentry *fmte = fmt2Xfmt;
    while (fmte->fmt != VideoFormat::Format_Invalid) {
        int depth = VideoFormat(fmte->fmt).bitsPerPixel();
        // 15->16? mpv
        if (depth == xi->bits_per_pixel && fmte->byte_order == xi->byte_order
                && fmte->red_mask == xi->red_mask
                && fmte->green_mask == xi->green_mask
                && fmte->blue_mask == xi->blue_mask)
            break;
        //qDebug() << fmte->fmt;
        fmte++;
    }
    qDebug("XImage format: bpp %d, endian: %d, R %X, G %X, B %X", xi->bits_per_pixel, xi->byte_order, xi->red_mask, xi->green_mask, xi->blue_mask);
    qDebug() << "PixelFormat: " << fmte->fmt;
    return fmte->fmt;
}
예제 #18
0
VideoFrame VideoDecoderFFmpegBase::frame()
{
    DPTR_D(VideoDecoderFFmpegBase);
    if (d.frame->width <= 0 || d.frame->height <= 0 || !d.codec_ctx)
        return VideoFrame();
    // it's safe if width, height, pixfmt will not change, only data change
    VideoFrame frame(d.frame->width, d.frame->height, VideoFormat((int)d.codec_ctx->pix_fmt));
    frame.setDisplayAspectRatio(d.getDAR(d.frame));
    frame.setBits(d.frame->data);
    frame.setBytesPerLine(d.frame->linesize);
    // in s. TODO: what about AVFrame.pts? av_frame_get_best_effort_timestamp? move to VideoFrame::from(AVFrame*)
    frame.setTimestamp((double)d.frame->pkt_pts/1000.0);
    frame.setMetaData(QStringLiteral("avbuf"), QVariant::fromValue(AVFrameBuffersRef(new AVFrameBuffers(d.frame))));
    d.updateColorDetails(&frame);
    if (frame.format().hasPalette()) {
        frame.setMetaData(QStringLiteral("pallete"), QByteArray((const char*)d.frame->data[1], 256*4));
    }
    return frame;
}
예제 #19
0
파일: capture.cpp 프로젝트: snorp/moon
void
VideoCaptureDevice::SetPalDevice (MoonCaptureDevice *device)
{
	CaptureDevice::SetPalDevice (device);

	MoonVideoCaptureDevice *video_device = (MoonVideoCaptureDevice*)device;

	VideoFormatCollection *col = MoonUnmanagedFactory::CreateVideoFormatCollection ();

	int num_formats;
	MoonVideoFormat **formats = video_device->GetSupportedFormats (&num_formats);
	for (int i = 0; i < num_formats; i ++)
	  col->Add (Value (VideoFormat (formats[i])));

	SetSupportedFormats (col);
	col->unref ();

	SetFriendlyName (video_device->GetFriendlyName());
}
예제 #20
0
VideoFrame VideoDecoderFFmpeg::frame()
{
    DPTR_D(VideoDecoderFFmpeg);
    /*qDebug("color space: %d, range: %d, prim: %d, t: %d"
           , d.codec_ctx->colorspace, d.codec_ctx->color_range
           , d.codec_ctx->color_primaries, d.codec_ctx->color_trc);
           */
    if (d.frame->width <= 0 || d.frame->height <= 0 || !d.codec_ctx)
        return VideoFrame();
    // it's safe if width, height, pixfmt will not change, only data change
    VideoFrame frame(d.frame->width, d.frame->height, VideoFormat((int)d.codec_ctx->pix_fmt));
    frame.setDisplayAspectRatio(d.getDAR(d.frame));
    frame.setBits(d.frame->data);
    frame.setBytesPerLine(d.frame->linesize);
    frame.setTimestamp((double)d.frame->pkt_pts/1000.0); // in s. what about AVFrame.pts?
    frame.setMetaData(QStringLiteral("avbuf"), QVariant::fromValue(AVFrameBuffersRef(new AVFrameBuffers(d.frame))));
    d.updateColorDetails(&frame);
    return frame;
}
예제 #21
0
VideoFrame VideoDecoderFFmpeg::frame()
{
    DPTR_D(VideoDecoderFFmpeg);
    /*qDebug("color space: %d, range: %d, prim: %d, t: %d"
           , d.codec_ctx->colorspace, d.codec_ctx->color_range
           , d.codec_ctx->color_primaries, d.codec_ctx->color_trc);
           */
    if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
        return VideoFrame();
    //DO NOT make frame as a memeber, because VideoFrame is explictly shared!
    float displayAspectRatio = 0;
    if (d.codec_ctx->sample_aspect_ratio.den > 0)
        displayAspectRatio = ((float)d.frame->width / (float)d.frame->height) *
            ((float)d.codec_ctx->sample_aspect_ratio.num / (float)d.codec_ctx->sample_aspect_ratio.den);

    // it's safe if width, height, pixfmt will not change, only data change
    VideoFrame frame(d.frame->width, d.frame->height, VideoFormat((int)d.codec_ctx->pix_fmt));
    frame.setDisplayAspectRatio(displayAspectRatio);
    frame.setBits(d.frame->data);
    frame.setBytesPerLine(d.frame->linesize);
    frame.setTimestamp((double)d.frame->pkt_pts/1000.0); // in s. what about AVFrame.pts?
    d.updateColorDetails(&frame);
    return frame;
}
예제 #22
0
// ######################################################################
GenericFrame MgzJDecoder::readFrame()
{

  // Grab the journal entry for this frame and allocate an appropriate GenericFrame
  MgzJEncoder::journalEntry entry = itsJournal.at(itsFrameNum);
  const Dims dims(entry.width, entry.height);
  const GenericFrame::NativeType pix_type = GenericFrame::NativeType(entry.pix_type);
  const int num_pix = dims.sz();
  GenericFrame frame;

  //Read in the compressed image to a buffer
  uint64 comp_image_buf_size = entry.end_byte - entry.start_byte;
  byte * comp_image_buf = new byte[comp_image_buf_size];
  itsFile.seekg(entry.start_byte, std::ios::beg);
  itsFile.read((char*)comp_image_buf, comp_image_buf_size);

  //Prepare zlib to do the decompression
  z_stream strm;
  strm.zalloc   = Z_NULL;
  strm.zfree    = Z_NULL;
  strm.opaque   = Z_NULL;
  strm.avail_in = 0;
  strm.next_in  = Z_NULL;
  int ret = inflateInit(&strm);
  if(ret != Z_OK) 
   LFATAL("Could not initialize zlib!"); 

  strm.avail_in = comp_image_buf_size;
  strm.next_in  = comp_image_buf;
  switch(pix_type)
  {
    case GenericFrame::GRAY_U8:
      {
        Image<byte> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(byte);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::GRAY_U16:
      {
        Image<uint16> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(uint16);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::GRAY_F32:
      {
        Image<float> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(float);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img, entry.flags);

        break;
      }
    case GenericFrame::RGB_U8:
      {
        Image<PixRGB<byte> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<byte>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::RGB_U16:
      {
        Image<PixRGB<uint16> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<uint16>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::RGB_F32:
      {
        Image<PixRGB<float> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<float>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img, entry.flags);

        break;
      }
      case GenericFrame::VIDEO:
      {
        const size_t vidSize = getFrameSize(VideoFormat(entry.flags), dims);
        ArrayHandle<byte> vidBuffer(new ArrayData<byte>(Dims(vidSize,1), NO_INIT));
        strm.avail_out = vidSize;
        strm.next_out = (unsigned char*)vidBuffer.uniq().dataw();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(VideoFrame(vidBuffer, dims, VideoFormat(entry.flags), bool(entry.byte_swap)));
        break;
      }
    default:
      LFATAL("Could Not Open Frame Of Type: %d!", pix_type);
  }
  
  inflateEnd(&strm);
  delete [] comp_image_buf;
  return frame;
}
예제 #23
0
	VideoFrame(bool free, mp_image *mpi, int field = Picture): d(new Data(free, mpi, VideoFormat(mpi), mpi->pts, field)) {}
예제 #24
0
VideoFrame VideoFrame::to(VideoFormat::PixelFormat pixfmt, const QSize& dstSize, const QRectF &roi) const
{
    return to(VideoFormat(pixfmt), dstSize, roi);
}