Exemplo n.º 1
0
VideoFrame VideoDecoderVAAPI::frame()
{
    DPTR_D(VideoDecoderVAAPI);
    if (!d.frame->opaque || !d.frame->data[0])
        return VideoFrame();
    VASurfaceID surface_id = (VASurfaceID)(uintptr_t)d.frame->data[3];
    VAStatus status = VA_STATUS_SUCCESS;
    if (display() == GLX) {
        d.surface_interop->setSurface((va_surface_t*)d.frame->opaque, d.surface_width, d.surface_height);
        VideoFrame f(d.surface_width, d.surface_height, VideoFormat::Format_RGB32);
        f.setBytesPerLine(d.surface_width*4); //used by gl to compute texture size
        f.setSurfaceInterop(d.surface_interop);
        return f;
    }
#if VA_CHECK_VERSION(0,31,0)
    if ((status = vaSyncSurface(d.display, surface_id)) != VA_STATUS_SUCCESS) {
        qWarning("vaSyncSurface(VADisplay:%p, VASurfaceID:%#x) == %#x", d.display, surface_id, status);
#else
    if (vaSyncSurface(d.display, d.context_id, surface_id)) {
        qWarning("vaSyncSurface(VADisplay:%#x, VAContextID:%#x, VASurfaceID:%#x) == %#x", d.display, d.context_id, surface_id, status);
#endif
        return VideoFrame();
    }

    if (!d.disable_derive && d.supports_derive) {
        /*
         * http://web.archiveorange.com/archive/v/OAywENyq88L319OcRnHI
         * vaDeriveImage is faster than vaGetImage. But VAImage is uncached memory and copying from it would be terribly slow
         * TODO: copy from USWC, see vlc and https://github.com/OpenELEC/OpenELEC.tv/pull/2937.diff
         * https://software.intel.com/en-us/articles/increasing-memory-throughput-with-intel-streaming-simd-extensions-4-intel-sse4-streaming-load
         */
        status = vaDeriveImage(d.display, surface_id, &d.image);
        if (status != VA_STATUS_SUCCESS) {
            qWarning("vaDeriveImage(VADisplay:%p, VASurfaceID:%#x, VAImage*:%p) == %#x", d.display, surface_id, &d.image, status);
            return VideoFrame();
        }
    } else {
        status = vaGetImage(d.display, surface_id, 0, 0, d.surface_width, d.surface_height, d.image.image_id);
        if (status != VA_STATUS_SUCCESS) {
            qWarning("vaGetImage(VADisplay:%p, VASurfaceID:%#x, 0,0, %d, %d, VAImageID:%#x) == %#x", d.display, surface_id, d.surface_width, d.surface_height, d.image.image_id, status);
            return VideoFrame();
        }
    }

    void *p_base;
    if ((status = vaMapBuffer(d.display, d.image.buf, &p_base)) != VA_STATUS_SUCCESS) {
        qWarning("vaMapBuffer(VADisplay:%p, VABufferID:%#x, pBuf:%p) == %#x", d.display, d.image.buf, &p_base, status);
        return VideoFrame();
    }

    VideoFormat::PixelFormat pixfmt = VideoFormat::Format_Invalid;
    bool swap_uv = false;
    switch (d.image.format.fourcc) {
    case VA_FOURCC_YV12:
        swap_uv |= d.disable_derive || !d.supports_derive;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_IYUV:
        swap_uv = true;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_NV12:
        pixfmt = VideoFormat::Format_NV12;
        break;
    default:
        break;
    }
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vaapi pixel format: %#x", d.image.format.fourcc);
        return VideoFrame();
    }
    const VideoFormat fmt(pixfmt);
    uint8_t *src[3];
    int pitch[3];
    for (int i = 0; i < fmt.planeCount(); ++i) {
        src[i] = (uint8_t*)p_base + d.image.offsets[i];
        pitch[i] = d.image.pitches[i];
    }
    if (swap_uv) {
        std::swap(src[1], src[2]);
        std::swap(pitch[1], pitch[2]);
    }
    VideoFrame frame;
    if (d.copy_uswc && d.gpu_mem.isReady()) {
        int yuv_size = 0;
        if (pixfmt == VideoFormat::Format_NV12)
            yuv_size = pitch[0]*d.surface_height*3/2;
        else
            yuv_size = pitch[0]*d.surface_height + pitch[1]*d.surface_height/2 + pitch[2]*d.surface_height/2;
        // additional 15 bytes to ensure 16 bytes aligned
        QByteArray buf(15 + yuv_size, 0);
        const int offset_16 = (16 - ((uintptr_t)buf.data() & 0x0f)) & 0x0f;
        // plane 1, 2... is aligned?
        uchar* plane_ptr = (uchar*)buf.data() + offset_16;
        QVector<uchar*> dst(fmt.planeCount(), 0);
        for (int i = 0; i < dst.size(); ++i) {
            dst[i] = plane_ptr;
            // TODO: add VideoFormat::planeWidth/Height() ?
            const int plane_w = pitch[i];//(i == 0 || pixfmt == VideoFormat::Format_NV12) ? d.surface_width : fmt.chromaWidth(d.surface_width);
            const int plane_h = i == 0 ? d.surface_height : fmt.chromaHeight(d.surface_height);
            plane_ptr += pitch[i] * plane_h;
            d.gpu_mem.copyFrame(src[i], dst[i], plane_w, plane_h, pitch[i]);
        }
        frame = VideoFrame(buf, d.width, d.height, fmt);
        frame.setBits(dst);
        frame.setBytesPerLine(pitch);
    } else {
        frame = VideoFrame(d.width, d.height, fmt);
        frame.setBits(src);
        frame.setBytesPerLine(pitch);
        // TODO: why clone is faster()?
        frame = frame.clone();
    }

    if ((status = vaUnmapBuffer(d.display, d.image.buf)) != VA_STATUS_SUCCESS) {
        qWarning("vaUnmapBuffer(VADisplay:%p, VABufferID:%#x) == %#x", d.display, d.image.buf, status);
        return VideoFrame();
    }

    if (!d.disable_derive && d.supports_derive) {
        vaDestroyImage(d.display, d.image.image_id);
        d.image.image_id = VA_INVALID_ID;
    }
    return frame;
}

struct display_names_t {
    VideoDecoderVAAPI::DisplayType display;
    QString name;
};
static const display_names_t display_names[] = {
    { VideoDecoderVAAPI::GLX, "GLX" },
    { VideoDecoderVAAPI::X11, "X11" },
    { VideoDecoderVAAPI::DRM, "DRM" }
};

static VideoDecoderVAAPI::DisplayType displayFromName(QString name) {
    for (unsigned int i = 0; i < sizeof(display_names)/sizeof(display_names[0]); ++i) {
        if (name.toUpper().contains(display_names[i].name.toUpper())) {
            return display_names[i].display;
        }
    }
    return VideoDecoderVAAPI::X11;
}

static QString displayToName(VideoDecoderVAAPI::DisplayType t) {
    for (unsigned int i = 0; i < sizeof(display_names)/sizeof(display_names[0]); ++i) {
        if (t == display_names[i].display) {
            return display_names[i].name;
        }
    }
    return QString();
}

void VideoDecoderVAAPI::setDisplayPriority(const QStringList &priority)
{
    DPTR_D(VideoDecoderVAAPI);
    d.display_priority.clear();
    foreach (QString disp, priority) {
        d.display_priority.push_back(displayFromName(disp));
    }
Exemplo n.º 2
0
	void push(int field) {
		queue->push_back(VideoFrame(false, in->mpi(), p->nextPTS(), field | (in->field() & ~VideoFrame::Interlaced)));
		++pushed;
	}
Exemplo n.º 3
0
void QuickVideoPreview::displayNoFrame()
{
    receive(VideoFrame());
}
Exemplo n.º 4
0
void VideoRendererItem::present(const QImage &image) {
	present(VideoFrame(image));
}
Exemplo n.º 5
0
// ######################################################################
VideoFrame XCgrabberFlex::grabRaw()
{
#ifndef HAVE_XCLIB
  LFATAL("you must have XC support and the xclib library in order to use XCgrabberFlex");
  return VideoFrame();  /* can't happen */
#else
  ASSERT(itsCameraOk);
  int i = 0;

  struct xclib::xcdevservice xcdev = itsXclib.xcdev;
  struct xclib::pxlibservice pxlib = itsXclib.pxlib;

  // get the captured buffer ID
  xclib::pxbuffer_t bufferID = (xclib::pxbuffer_t)xcdev.getLiveStatus
    (&xcdev, UNITMAP, 0, PXVIST_DONE | PXVIST_BUFFER);

  while( bufferID == itsLastBuf)
    {
      bufferID = (xclib::pxbuffer_t)xcdev.getLiveStatus
        (&xcdev, UNITMAP, 0, PXVIST_DONE | PXVIST_BUFFER);
      usleep(100);
    }
  if(itsLastBuf != 0 && bufferID != (itsLastBuf)%USEDBUFFER + 1)
    {
      LINFO("last buf id= %4d, curr buf id= %4d",(int)itsLastBuf,(int)bufferID);
      LERROR("buffer error: buffer mis order");
    }

  pthread_mutex_lock(&qmutex_buf);
  itsLastBuf = bufferID;
  pthread_mutex_unlock(&qmutex_buf);

  // is the captured image base on byte or uint16 type
  int dataMode = (itsBitDepth == 8 ?  PXDATUINT8:PXDATUINT16);

  const unsigned int bufSz = itsDims.getVal().sz() * (int)ceil(itsBitDepth/8);
  const unsigned int imgSz = itsDims.getVal().sz();

  //! define the image from frame buffer
  struct xclib::pximage pximg;

  i = pxlib.initPximage(&pxlib, UNITMAP,
                        &pximg, 1, PXHINTBAYER, 0, itsStateid, bufferID, 0);

  pximg.wind.nw.x = 1920/2 - itsDims.getVal().w()/2;
  pximg.wind.nw.y = 1080/2 - itsDims.getVal().h()/2;
  pximg.wind.se.x = 1920/2 + itsDims.getVal().w()/2;
  pximg.wind.se.y = 1080/2 + itsDims.getVal().h()/2;

  LINFO("pximgsize %d,%d", pximg.wind.nw.x,pximg.wind.se.x);

  if (i<1)
    LFATAL("error, can not define a pximage, code: %d",i);


  if(pximg.ioset(&pximg, PXRXSCAN | PXIWRAP, dataMode, 0x01) < 0)
    {
      LFATAL("error in ioset, can not set frame buffer read");
      return VideoFrame();
    }

  if(imgSz !=  pximg.ioread(&pximg, PXRXSCAN | PXIWRAP, itsImgBuf,bufSz,0,0))
    {
      LFATAL("error in reading frame buffer(size error),"
             "expected size = %d", imgSz);
      return VideoFrame();
    }
  return VideoFrame(itsImgBuf,  bufSz, itsDims.getVal(),
                    itsGrabMode.getVal(), itsByteSwap.getVal(), false);

#endif // HAVE_XCLIB
}
Exemplo n.º 6
0
VideoFrame VideoDecoderVAAPI::frame()
{
    DPTR_D(VideoDecoderVAAPI);
    if (!d.frame->opaque || !d.frame->data[0])
        return VideoFrame();
    VASurfaceID surface_id = (VASurfaceID)(uintptr_t)d.frame->data[3];
    VAStatus status = VA_STATUS_SUCCESS;
    if (display() == GLX || (copyMode() == ZeroCopy && display() == X11)) {
        surface_ptr p;
        std::list<surface_ptr>::iterator it = d.surfaces_used.begin();
        for (; it != d.surfaces_used.end() && !p; ++it) {
            if((*it)->get() == surface_id) {
                p = *it;
                break;
            }
        }
        if (!p) {
            for (it = d.surfaces_free.begin(); it != d.surfaces_free.end() && !p; ++it) {
                if((*it)->get() == surface_id) {
                    p = *it;
                    break;
                }
            }
        }
        if (!p) {
            qWarning("VAAPI - Unable to find surface");
            return VideoFrame();
        }
        ((SurfaceInteropVAAPI*)d.surface_interop.data())->setSurface(p);

        VideoFrame f(d.width, d.height, VideoFormat::Format_RGB32); //p->width()
        f.setBytesPerLine(d.width*4); //used by gl to compute texture size
        f.setMetaData("surface_interop", QVariant::fromValue(d.surface_interop));
        f.setTimestamp(double(d.frame->pkt_pts)/1000.0);
        return f;
    }
#if VA_CHECK_VERSION(0,31,0)
    if ((status = vaSyncSurface(d.display->get(), surface_id)) != VA_STATUS_SUCCESS) {
        qWarning("vaSyncSurface(VADisplay:%p, VASurfaceID:%#x) == %#x", d.display->get(), surface_id, status);
#else
    if (vaSyncSurface(d.display->get(), d.context_id, surface_id)) {
        qWarning("vaSyncSurface(VADisplay:%#x, VAContextID:%#x, VASurfaceID:%#x) == %#x", d.display, d.context_id, surface_id, status);
#endif
        return VideoFrame();
    }

    if (!d.disable_derive && d.supports_derive) {
        /*
         * http://web.archiveorange.com/archive/v/OAywENyq88L319OcRnHI
         * vaDeriveImage is faster than vaGetImage. But VAImage is uncached memory and copying from it would be terribly slow
         * TODO: copy from USWC, see vlc and https://github.com/OpenELEC/OpenELEC.tv/pull/2937.diff
         * https://software.intel.com/en-us/articles/increasing-memory-throughput-with-intel-streaming-simd-extensions-4-intel-sse4-streaming-load
         */
        VA_ENSURE_TRUE(vaDeriveImage(d.display->get(), surface_id, &d.image), VideoFrame());
    } else {
        VA_ENSURE_TRUE(vaGetImage(d.display->get(), surface_id, 0, 0, d.width, d.height, d.image.image_id), VideoFrame());
    }

    void *p_base;
    VA_ENSURE_TRUE(vaMapBuffer(d.display->get(), d.image.buf, &p_base), VideoFrame());

    VideoFormat::PixelFormat pixfmt = VideoFormat::Format_Invalid;
    bool swap_uv = false;
    switch (d.image.format.fourcc) {
    case VA_FOURCC_YV12:
        swap_uv |= d.disable_derive || !d.supports_derive;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_IYUV:
        swap_uv = true;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_NV12:
        pixfmt = VideoFormat::Format_NV12;
        break;
    default:
        break;
    }
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vaapi pixel format: %#x", d.image.format.fourcc);
        return VideoFrame();
    }
    const VideoFormat fmt(pixfmt);
    uint8_t *src[3];
    int pitch[3];
    for (int i = 0; i < fmt.planeCount(); ++i) {
        src[i] = (uint8_t*)p_base + d.image.offsets[i];
        pitch[i] = d.image.pitches[i];
    }
    VideoFrame frame(copyToFrame(fmt, d.surface_height, src, pitch, swap_uv));
    VAWARN(vaUnmapBuffer(d.display->get(), d.image.buf));
    if (!d.disable_derive && d.supports_derive) {
        vaDestroyImage(d.display->get(), d.image.image_id);
        d.image.image_id = VA_INVALID_ID;
    }
    return frame;
}

void VideoDecoderVAAPI::setDisplayPriority(const QStringList &priority)
{
    DPTR_D(VideoDecoderVAAPI);
    d.display_priority.clear();
    int idx = staticMetaObject.indexOfEnumerator("DisplayType");
    const QMetaEnum me = staticMetaObject.enumerator(idx);
    foreach (const QString& disp, priority) {
        d.display_priority.push_back((DisplayType)me.keyToValue(disp.toUtf8().constData()));
    }
 // return the key frame position
 bool extractInPrecision(qint64 value, int range) {
     frame = VideoFrame();
     if (value < demuxer.startTime())
         value += demuxer.startTime();
     demuxer.seek(value);
     const int vstream = demuxer.videoStream();
     Packet pkt;
     qint64 pts0 = -1;
     bool warn_bad_seek = true;
     bool warn_out_of_range = true;
     while (!demuxer.atEnd()) {
         if (!demuxer.readFrame())
             continue;
         if (demuxer.stream() != vstream)
             continue;
         pkt = demuxer.packet();
         if (pts0 < 0LL)
             pts0 = (qint64)(pkt.pts*1000.0);
         if ((qint64)(pkt.pts*1000.0) - value > (qint64)range) {
             if (warn_out_of_range)
                 qDebug("read packet out of range");
             warn_out_of_range = false;
             // No return because decoder needs more packets before the desired frame is decoded
             //return false;
         }
         //qDebug("video packet: %f", pkt.pts);
         // TODO: always key frame?
         if (pkt.hasKeyFrame)
             break;
         if (warn_bad_seek)
             qWarning("Not seek to key frame!!!");
         warn_bad_seek = false;
     }
     // enlarge range if seek to key-frame failed
     const qint64 key_pts = (qint64)(pkt.pts*1000.0);
     const bool enlarge_range = pts0 >= 0LL && key_pts - pts0 > 0LL;
     if (enlarge_range) {
         range = qMax<qint64>(key_pts - value, range);
         qDebug() << "enlarge range ==>>>> " << range;
     }
     if (!pkt.isValid()) {
         qWarning("VideoFrameExtractor failed to get a packet at %lld", value);
         return false;
     }
     decoder->flush(); //must flush otherwise old frames will be decoded at the beginning
     decoder->setOptions(dec_opt_normal);
     // must decode key frame
     int k = 0;
     while (k < 2 && !frame.isValid()) {
         //qWarning("invalid key frame!!!!! undecoded: %d", decoder->undecodedSize());
         if (decoder->decode(pkt)) {
             frame = decoder->frame();
         }
         ++k;
     }
     // if seek backward correctly to key frame, diff0 = t - value <= 0
     // but sometimes seek to no-key frame(and range is enlarged), diff0 >= 0
     // decode key frame
     const int diff0 = qint64(frame.timestamp()*1000.0) - value;
     if (qAbs(diff0) <= range) { //TODO: flag forward: result pts must >= value
         if (frame.isValid()) {
             qDebug() << "VideoFrameExtractor: key frame found @" << frame.timestamp() <<" diff=" << diff0 << ". format: " <<  frame.format();
             return true;
         }
     }
     QVariantHash* dec_opt = &dec_opt_normal; // 0: default, 1: framedrop
     // decode at the given position
     while (!demuxer.atEnd()) {
         if (!demuxer.readFrame())
             continue;
         if (demuxer.stream() != vstream)
             continue;
         pkt = demuxer.packet();
         const qreal t = pkt.pts;
         //qDebug("video packet: %f, delta=%lld", t, value - qint64(t*1000.0));
         if (!pkt.isValid()) {
             qWarning("invalid packet. no decode");
             continue;
         }
         if (pkt.hasKeyFrame) {
             // FIXME:
             //qCritical("Internal error. Can not be a key frame!!!!");
             //return false; //??
         }
         qint64 diff = qint64(t*1000.0) - value;
         QVariantHash *dec_opt_old = dec_opt;
         if (seek_count == 0 || diff >= 0)
             dec_opt = &dec_opt_normal;
         else
             dec_opt = &dec_opt_framedrop;
         if (dec_opt != dec_opt_old)
             decoder->setOptions(*dec_opt);
         // invalid packet?
         if (!decoder->decode(pkt)) {
             qWarning("!!!!!!!!!decode failed!!!!");
             frame = VideoFrame();
             return false;
         }
         // store the last decoded frame because next frame may be out of range
         const VideoFrame f = decoder->frame();
         if (!f.isValid()) {
             //qDebug("VideoFrameExtractor: invalid frame!!!");
             continue;
         }
         frame = f;
         const qreal pts = frame.timestamp();
         const qint64 pts_ms = pts*1000.0;
         if (pts_ms < value)
             continue; //
         diff = pts_ms - value;
         if (qAbs(diff) <= (qint64)range) {
             qDebug("got frame at %fs, diff=%lld", pts, diff);
             break;
         }
         // if decoder was not flushed, we may get old frame which is acceptable
         if (diff > range && t > pts) {
             qWarning("out pts out of range. diff=%lld, range=%d", diff, range);
             frame = VideoFrame();
             return false;
         }
     }
     ++seek_count;
     // now we get the final frame
     return true;
 }
Exemplo n.º 8
0
// ######################################################################
GenericFrame MgzJDecoder::readFrame()
{

  // Grab the journal entry for this frame and allocate an appropriate GenericFrame
  MgzJEncoder::journalEntry entry = itsJournal.at(itsFrameNum);
  const Dims dims(entry.width, entry.height);
  const GenericFrame::NativeType pix_type = GenericFrame::NativeType(entry.pix_type);
  const int num_pix = dims.sz();
  GenericFrame frame;

  //Read in the compressed image to a buffer
  uint64 comp_image_buf_size = entry.end_byte - entry.start_byte;
  byte * comp_image_buf = new byte[comp_image_buf_size];
  itsFile.seekg(entry.start_byte, std::ios::beg);
  itsFile.read((char*)comp_image_buf, comp_image_buf_size);

  //Prepare zlib to do the decompression
  z_stream strm;
  strm.zalloc   = Z_NULL;
  strm.zfree    = Z_NULL;
  strm.opaque   = Z_NULL;
  strm.avail_in = 0;
  strm.next_in  = Z_NULL;
  int ret = inflateInit(&strm);
  if(ret != Z_OK) 
   LFATAL("Could not initialize zlib!"); 

  strm.avail_in = comp_image_buf_size;
  strm.next_in  = comp_image_buf;
  switch(pix_type)
  {
    case GenericFrame::GRAY_U8:
      {
        Image<byte> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(byte);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::GRAY_U16:
      {
        Image<uint16> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(uint16);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::GRAY_F32:
      {
        Image<float> img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(float);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img, entry.flags);

        break;
      }
    case GenericFrame::RGB_U8:
      {
        Image<PixRGB<byte> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<byte>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::RGB_U16:
      {
        Image<PixRGB<uint16> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<uint16>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img);

        break;
      }
    case GenericFrame::RGB_F32:
      {
        Image<PixRGB<float> > img(dims, NO_INIT);
        strm.avail_out = num_pix * sizeof(PixRGB<float>);
        strm.next_out  = (unsigned char*)img.getArrayPtr();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(img, entry.flags);

        break;
      }
      case GenericFrame::VIDEO:
      {
        const size_t vidSize = getFrameSize(VideoFormat(entry.flags), dims);
        ArrayHandle<byte> vidBuffer(new ArrayData<byte>(Dims(vidSize,1), NO_INIT));
        strm.avail_out = vidSize;
        strm.next_out = (unsigned char*)vidBuffer.uniq().dataw();
        ret = inflate(&strm, Z_FINISH);
        if(ret != Z_STREAM_END)
        { LFATAL("Could Not Inflate Frame! %d, %s", ret, strm.msg); }
        frame = GenericFrame(VideoFrame(vidBuffer, dims, VideoFormat(entry.flags), bool(entry.byte_swap)));
        break;
      }
    default:
      LFATAL("Could Not Open Frame Of Type: %d!", pix_type);
  }
  
  inflateEnd(&strm);
  delete [] comp_image_buf;
  return frame;
}