示例#1
0
/* Decoder callback that adds the vda frame to the queue in display order. */
static void vda_decoder_callback (void *vda_hw_ctx,
                                  CFDictionaryRef user_info,
                                  OSStatus status,
                                  uint32_t infoFlags,
                                  CVImageBufferRef image_buffer)
{
    struct vda_context *vda_ctx = vda_hw_ctx;

    if (!image_buffer)
        return;

    if (vda_ctx->cv_pix_fmt_type != CVPixelBufferGetPixelFormatType(image_buffer))
        return;

    if (vda_ctx->use_sync_decoding) {
        vda_ctx->cv_buffer = CVPixelBufferRetain(image_buffer);
    } else {
        vda_frame *new_frame;
        vda_frame *queue_walker;

        if (!(new_frame = av_mallocz(sizeof(*new_frame))))
            return;

        new_frame->next_frame = NULL;
        new_frame->cv_buffer = CVPixelBufferRetain(image_buffer);
        new_frame->pts = vda_pts_from_dictionary(user_info);

        pthread_mutex_lock(&vda_ctx->queue_mutex);

        queue_walker = vda_ctx->queue;

        if (!queue_walker || (new_frame->pts < queue_walker->pts)) {
            /* we have an empty queue, or this frame earlier than the current queue head */
            new_frame->next_frame = queue_walker;
            vda_ctx->queue = new_frame;
        } else {
            /* walk the queue and insert this frame where it belongs in display order */
            vda_frame *next_frame;

            while (1) {
                next_frame = queue_walker->next_frame;

                if (!next_frame || (new_frame->pts < next_frame->pts)) {
                    new_frame->next_frame = next_frame;
                    queue_walker->next_frame = new_frame;
                    break;
                }
                queue_walker = next_frame;
            }
        }

        pthread_mutex_unlock(&vda_ctx->queue_mutex);
    }
}
	virtual void addCVImageBuffer(CVImageBufferRef in_ref)
	{
		if (in_ref)	CVPixelBufferRetain(in_ref);
		releaseData();
		
		m_ref = in_ref;
	}
示例#3
0
static int vdadec_decode(AVCodecContext *avctx,
        void *data, int *got_frame, AVPacket *avpkt)
{
    VDADecoderContext *ctx = avctx->priv_data;
    AVFrame *pic = data;
    int ret;

    set_context(avctx);
    ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt);
    restore_context(avctx);
    if (*got_frame) {
        AVBufferRef *buffer = pic->buf[0];
        VDABufferContext *context = av_buffer_get_opaque(buffer);
        CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3];

        CVPixelBufferRetain(cv_buffer);
        CVPixelBufferLockBaseAddress(cv_buffer, 0);
        context->cv_buffer = cv_buffer;
        pic->format = ctx->pix_fmt;
        if (CVPixelBufferIsPlanar(cv_buffer)) {
            int i, count = CVPixelBufferGetPlaneCount(cv_buffer);
            av_assert0(count < 4);
            for (i = 0; i < count; i++) {
                pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i);
                pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i);
            }
        } else {
            pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer);
            pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer);
        }
    }
    avctx->pix_fmt = ctx->pix_fmt;

    return ret;
}
示例#4
0
CVPixelBufferRef
gst_core_media_buffer_get_pixel_buffer (GstBuffer * buf)
{
  GstCoreMediaMeta *meta = (GstCoreMediaMeta *) gst_buffer_get_meta (buf,
      GST_CORE_MEDIA_META_API_TYPE);
  g_return_val_if_fail (meta != NULL, NULL);

  return CVPixelBufferRetain (meta->pixel_buf);
}
VideoFrame VideoDecoderVideoToolbox::frame()
{
    DPTR_D(VideoDecoderVideoToolbox);
    CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
    if (!cv_buffer) {
        qDebug("Frame buffer is empty.");
        return VideoFrame();
    }
    if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
        qDebug("Empty frame buffer");
        return VideoFrame();
    }
    VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported videotoolbox pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
        return VideoFrame();
    }
    // we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
    class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
        bool glinterop;
        CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
    public:
        SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
            CVPixelBufferRetain(cvbuf); // videotoolbox need it for map and CVPixelBufferRelease
        }
        ~SurfaceInteropCVBuffer() {
            CVPixelBufferRelease(cvbuf);
        }
        void* mapToHost(const VideoFormat &format, void *handle, int plane) {
            Q_UNUSED(plane);
            CVPixelBufferLockBaseAddress(cvbuf, 0);
            const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
            if (!fmt.isValid()) {
                CVPixelBufferUnlockBaseAddress(cvbuf, 0);
                return NULL;
            }
            const int w = CVPixelBufferGetWidth(cvbuf);
            const int h = CVPixelBufferGetHeight(cvbuf);
            uint8_t *src[3];
            int pitch[3];
            for (int i = 0; i <fmt.planeCount(); ++i) {
                // get address results in internal copy
                src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
                pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
            }
            CVPixelBufferUnlockBaseAddress(cvbuf, 0);
            //CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
            VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
            if (fmt != format)
                frame = frame.to(format);
            VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
            frame.setTimestamp(f->timestamp());
            frame.setDisplayAspectRatio(f->displayAspectRatio());
            *f = frame;
            return f;
        }
示例#6
0
/**
 * gst_apple_core_video_pixel_buffer_new:
 * @buf: an unlocked CVPixelBuffer
 *
 * Initializes a wrapper to manage locking state for a CVPixelBuffer.
 * This function expects to receive unlocked CVPixelBuffer, and further assumes
 * that no one else will lock it (as long as the wrapper exists).
 *
 * This function retains @buf.
 *
 * Returns: The wrapped @buf.
 */
GstAppleCoreVideoPixelBuffer *
gst_apple_core_video_pixel_buffer_new (CVPixelBufferRef buf)
{
  GstAppleCoreVideoPixelBuffer *gpixbuf =
      g_slice_new (GstAppleCoreVideoPixelBuffer);
  gpixbuf->refcount = 1;
  g_mutex_init (&gpixbuf->mutex);
  gpixbuf->buf = CVPixelBufferRetain (buf);
  gpixbuf->lock_state = GST_APPLE_CORE_VIDEO_MEMORY_UNLOCKED;
  gpixbuf->lock_count = 0;
  return gpixbuf;
}
示例#7
0
/* Decoder callback that adds the vda frame to the queue in display order. */
static void vda_decoder_callback (void *vda_hw_ctx, CFDictionaryRef /*user_info*/, OSStatus /*status*/, uint32_t /*infoFlags*/, CVImageBufferRef image_buffer) {
	vda_context *vda_ctx = (vda_context*)vda_hw_ctx;
	if (!image_buffer)
		return;
	const auto fmt = CVPixelBufferGetPixelFormatType(image_buffer);
	if (!_Contains(cvpixfmts, fmt)) {
		qDebug() << "vda: not supported format!!";
		return;
	}
	vda_ctx->cv_pix_fmt_type = fmt;
	vda_ctx->cv_buffer = CVPixelBufferRetain(image_buffer);
}
示例#8
0
mp_image *HwAccVda::getImage(mp_image *mpi) {
	auto buffer = (CVPixelBufferRef)mpi->planes[3];
	auto release = [] (void *arg) {
		CVPixelBufferRef buffer = (CVPixelBufferRef)arg;
		CVPixelBufferRelease(buffer);
	};
	CVPixelBufferRetain(buffer);
	auto img = null_mp_image(IMGFMT_VDA, size().width(), size().height(), buffer, release);
	mp_image_copy_attributes(img, mpi);
	img->planes[3] = mpi->planes[3];
	return img;
}
示例#9
0
static OSStatus setup_video(void)
{
    //Add video track
    videoTrack = NewMovieTrack(movie, video_width << 16, video_height << 16, 0);
    OSStatus theError = GetMoviesError();
    if (theError) {
        log_debug("quicktime_video: error creating movie track");
        return theError;
    }

    //Create video track media
    videoMedia = NewTrackMedia(videoTrack, VideoMediaType, timeScale, 0, 0);
    theError = GetMoviesError();
    if (theError) {
        log_debug("quicktime_video: error creating track media!");
        return theError;
    }

    //Prepare media for editing
    theError = BeginMediaEdits(videoMedia);
    if (theError) {
        log_debug("quicktime_video: error beginning media edits!");
        return theError;
    }

    // ----- Setup Codec -----
    CodecType codec = (CodecType)video_codec;

    // Create compression session
    ICMEncodedFrameOutputRecord record = {
        FrameOutputCallback, NULL, NULL
    };
    theError = ICMCompressionSessionCreate(kCFAllocatorDefault,
                                           video_width, video_height, codec, timeScale, NULL /*options*/, NULL,
                                           &record, &videoCompressionSession);
    if (theError) {
        log_debug("quicktime_video: error creating compression session!");
        return theError;
    }

    // ----- PixelBuffer -----
    theError = CVPixelBufferCreate(NULL, video_width, video_height,
                                   kCVPixelFormatType_24RGB, NULL, &pixelBuffer);
    if (theError) {
        log_debug("quicktime_video: error creating pixel buffer!");
        return theError;
    }
    CVPixelBufferRetain(pixelBuffer);

    video_ready = 1;
    return noErr;
}
示例#10
0
static void VTBDec_on_frame(void *opaque, void *sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags flags, CVImageBufferRef image, CMTime pts, CMTime duration)
{
	VTBDec *ctx = (VTBDec *)opaque;
    if (ctx->frame) {
        CVPixelBufferRelease(ctx->frame);
        ctx->frame = NULL;
    }
	if (status) ctx->last_error = GF_NON_COMPLIANT_BITSTREAM;
	
    if (!image) {
        GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[VTB] No output buffer - status %d\n", status));
        return;
    }
    ctx->frame = CVPixelBufferRetain(image);
}
示例#11
0
文件: vda_h264.c 项目: TheRyuu/libav
/* Decoder callback that adds the VDA frame to the queue in display order. */
static void vda_decoder_callback(void *vda_hw_ctx,
                                 CFDictionaryRef user_info,
                                 OSStatus status,
                                 uint32_t infoFlags,
                                 CVImageBufferRef image_buffer)
{
    struct vda_context *vda_ctx = vda_hw_ctx;

    if (!image_buffer)
        return;

    if (vda_ctx->cv_pix_fmt_type != CVPixelBufferGetPixelFormatType(image_buffer))
        return;

    vda_ctx->cv_buffer = CVPixelBufferRetain(image_buffer);
}
示例#12
0
static void
VDADecoderCallback (void *decompressionOutputRefCon, CFDictionaryRef frameInfo, OSStatus status, uint32_t infoFlags, CVImageBufferRef imageBuffer)
{
	MoonVDADecoder *decoder = (MoonVDADecoder *) decompressionOutputRefCon;
	VideoStream *vs = (VideoStream *) decoder->GetStream ();

	// FIXME: Is this always 1 thread?  Can we optimize this
	decoder->GetDeployment ()->RegisterThread ();

	Deployment::SetCurrent (decoder->GetDeployment ());

	if (imageBuffer == NULL) {
		return;
	}

	OSType format_type = CVPixelBufferGetPixelFormatType (imageBuffer);
	if (format_type != kCVPixelFormatType_422YpCbCr8) {
		g_warning ("Mismatched format in VDA");
		return;
	}

	MediaFrame *mf = (MediaFrame *) CFDictionaryGetValue (frameInfo, CFSTR ("MoonMediaFrame"));

	mf->AddState (MediaFrameVUY2);
	mf->FreeBuffer ();
	mf->SetBufLen (0);

	mf->srcSlideY = 0;
	mf->srcSlideH = vs->GetHeight ();

	mf->width = vs->GetWidth ();
	mf->height = vs->GetHeight ();

	CVPixelBufferLockBaseAddress (imageBuffer, 0);

	mf->data_stride [0] = (uint8_t *) CVPixelBufferGetBaseAddress (imageBuffer);
	mf->srcStride [0] = CVPixelBufferGetBytesPerRow (imageBuffer);

	mf->AddState (MediaFrameDecoded);

	mf->decoder_specific_data = imageBuffer;
	CVPixelBufferRetain (imageBuffer);

	decoder->ReportDecodeFrameCompleted (mf);

	mf->unref ();
}
示例#13
0
文件: vda_h264.c 项目: TheRyuu/libav
void ff_vda_output_callback(void *opaque,
                            CFDictionaryRef user_info,
                            OSStatus status,
                            uint32_t infoFlags,
                            CVImageBufferRef image_buffer)
{
    AVCodecContext *ctx = opaque;
    VDAContext *vda = ctx->internal->hwaccel_priv_data;


    if (vda->frame) {
        CVPixelBufferRelease(vda->frame);
        vda->frame = NULL;
    }

    if (!image_buffer)
        return;

    vda->frame = CVPixelBufferRetain(image_buffer);
}
示例#14
0
void QTPixelBuffer::set(CVPixelBufferRef ref)
{
    CVPixelBufferRetain(ref);
    CVPixelBufferRelease(m_pixelBuffer);
    m_pixelBuffer = ref;
}
示例#15
0
文件: hwdec_osx.c 项目: jmglogow/mpv
static int create(struct gl_hwdec *hw)
{
    if (!check_hwdec(hw))
        return -1;

    struct priv *p = talloc_zero(hw, struct priv);
    hw->priv = p;

    hw->gl->GenTextures(MP_MAX_PLANES, p->gl_planes);

    p->hwctx = (struct mp_hwdec_ctx){
        .type = HWDEC_VIDEOTOOLBOX,
        .download_image = mp_vt_download_image,
        .ctx = &p->hwctx,
    };
    hwdec_devices_add(hw->devs, &p->hwctx);

    return 0;
}

static int reinit(struct gl_hwdec *hw, struct mp_image_params *params)
{
    struct priv *p = hw->priv;

    assert(params->imgfmt == hw->driver->imgfmt);

    if (!params->hw_subfmt) {
        MP_ERR(hw, "Unsupported CVPixelBuffer format.\n");
        return -1;
    }

    if (!gl_get_imgfmt_desc(hw->gl, params->hw_subfmt, &p->desc)) {
        MP_ERR(hw, "Unsupported texture format.\n");
        return -1;
    }

    params->imgfmt = params->hw_subfmt;
    params->hw_subfmt = 0;
    return 0;
}

static int map_frame(struct gl_hwdec *hw, struct mp_image *hw_image,
                     struct gl_hwdec_frame *out_frame)
{
    struct priv *p = hw->priv;
    GL *gl = hw->gl;

    CVPixelBufferRelease(p->pbuf);
    p->pbuf = (CVPixelBufferRef)hw_image->planes[3];
    CVPixelBufferRetain(p->pbuf);
    IOSurfaceRef surface = CVPixelBufferGetIOSurface(p->pbuf);
    if (!surface) {
        MP_ERR(hw, "CVPixelBuffer has no IOSurface\n");
        return -1;
    }

    const bool planar = CVPixelBufferIsPlanar(p->pbuf);
    const int planes  = CVPixelBufferGetPlaneCount(p->pbuf);
    assert((planar && planes == p->desc.num_planes) || p->desc.num_planes == 1);

    GLenum gl_target = GL_TEXTURE_RECTANGLE;

    for (int i = 0; i < p->desc.num_planes; i++) {
        const struct gl_format *fmt = p->desc.planes[i];

        gl->BindTexture(gl_target, p->gl_planes[i]);

        CGLError err = CGLTexImageIOSurface2D(
            CGLGetCurrentContext(), gl_target,
            fmt->internal_format,
            IOSurfaceGetWidthOfPlane(surface, i),
            IOSurfaceGetHeightOfPlane(surface, i),
            fmt->format, fmt->type, surface, i);

        if (err != kCGLNoError)
            MP_ERR(hw, "error creating IOSurface texture for plane %d: %s (%x)\n",
                   i, CGLErrorString(err), gl->GetError());

        gl->BindTexture(gl_target, 0);

        out_frame->planes[i] = (struct gl_hwdec_plane){
            .gl_texture = p->gl_planes[i],
            .gl_target = gl_target,
            .tex_w = IOSurfaceGetWidthOfPlane(surface, i),
            .tex_h = IOSurfaceGetHeightOfPlane(surface, i),
        };
    }

    snprintf(out_frame->swizzle, sizeof(out_frame->swizzle), "%s",
             p->desc.swizzle);

    return 0;
}

static void destroy(struct gl_hwdec *hw)
{
    struct priv *p = hw->priv;
    GL *gl = hw->gl;

    CVPixelBufferRelease(p->pbuf);
    gl->DeleteTextures(MP_MAX_PLANES, p->gl_planes);

    hwdec_devices_remove(hw->devs, &p->hwctx);
}

const struct gl_hwdec_driver gl_hwdec_videotoolbox = {
    .name = "videotoolbox",
    .api = HWDEC_VIDEOTOOLBOX,
    .imgfmt = IMGFMT_VIDEOTOOLBOX,
    .create = create,
    .reinit = reinit,
    .map_frame = map_frame,
    .destroy = destroy,
};
示例#16
0
文件: vda.c 项目: Aseeker/mpv
static int init_vda_decoder(struct lavc_ctx *ctx)
{
    struct priv *p = ctx->hwdec_priv;

    if (p->vda_ctx.decoder)
        ff_vda_destroy_decoder(&p->vda_ctx);

    const struct profile_entry *pe =
        find_codec(ctx->avctx->codec_id, ctx->avctx->profile);

    p->vda_ctx = (struct vda_context) {
        .width             = ctx->avctx->width,
        .height            = ctx->avctx->height,
        .format            = pe->vda_codec,
        // equals to k2vuyPixelFormat (= YUY2/UYVY)
        .cv_pix_fmt_type   = kCVPixelFormatType_422YpCbCr8,

#if HAVE_VDA_LIBAVCODEC_REFCOUNTING
        .use_ref_buffer    = 1,
#endif
        // use_ref_buffer is 1 in ffmpeg (while libav doesn't support this
        // feature). This means that in the libav case, libavcodec returns us
        // a CVPixelBuffer with refcount=1 AND hands over ownership of that
        // reference.

        // This is slightly different from a typical refcounted situation
        // where the API would return something that we need to to retain
        // for it to stay around (ffmpeg behaves like expected when using
        // use_ref_buffer = 1).

        // If mpv doesn't properly free CVPixelBufferRefs that are no longer
        // used, the wrapped IOSurface ids increase monotonically hinting at
        // a leaking of both CVPixelBuffers and IOSurfaces.
    };

    int status = ff_vda_create_decoder(
        &p->vda_ctx, ctx->avctx->extradata, ctx->avctx->extradata_size);

    if (status) {
        print_vda_error(ctx->log, MSGL_ERR, "failed to init VDA decoder", status);
        return -1;
    }

    return 0;
}

static int init(struct lavc_ctx *ctx)
{
    struct priv *p = talloc_zero(NULL, struct priv);
    ctx->hwdec_priv = p;
    ctx->avctx->hwaccel_context = &p->vda_ctx;
    return 0;
}

static void uninit(struct lavc_ctx *ctx) {
    struct priv *p = ctx->hwdec_priv;
    if (p->vda_ctx.decoder)
        ff_vda_destroy_decoder(&p->vda_ctx);
}

static void cv_retain(void *pbuf)
{
    CVPixelBufferRetain((CVPixelBufferRef)pbuf);
}
示例#17
0
void QTPixelBuffer::retainCallback(void* refcon)
{
    CVPixelBufferRetain(static_cast<CVPixelBufferRef>(refcon));
}
示例#18
0
QTPixelBuffer::QTPixelBuffer(const QTPixelBuffer& p) 
    : m_pixelBuffer(p.m_pixelBuffer) 
{
    CVPixelBufferRetain(m_pixelBuffer);
}
示例#19
0
QTPixelBuffer::QTPixelBuffer(CVPixelBufferRef ref) 
    : m_pixelBuffer(ref)
{
    CVPixelBufferRetain(m_pixelBuffer);
}
示例#20
0
void CDVDVideoCodecVDA::VDADecoderCallback(
  void                *decompressionOutputRefCon,
   CFDictionaryRef    frameInfo,
   OSStatus           status,
   uint32_t           infoFlags,
   CVImageBufferRef   imageBuffer)
{
  CCocoaAutoPool pool;
  // Warning, this is an async callback. There can be multiple frames in flight.
  CDVDVideoCodecVDA *ctx = (CDVDVideoCodecVDA*)decompressionOutputRefCon;

  if (imageBuffer == NULL)
  {
    //CLog::Log(LOGDEBUG, "%s - imageBuffer is NULL", __FUNCTION__);
    return;
  }
  OSType format_type = CVPixelBufferGetPixelFormatType(imageBuffer);
  if ((format_type != kCVPixelFormatType_422YpCbCr8) && (format_type != kCVPixelFormatType_32BGRA) )
  {
    CLog::Log(LOGERROR, "%s - imageBuffer format is not '2vuy' or 'BGRA',is reporting 0x%x",
      __FUNCTION__, format_type);
    return;
  }
  if (kVDADecodeInfo_FrameDropped & infoFlags)
  {
    CLog::Log(LOGDEBUG, "%s - frame dropped", __FUNCTION__);
    return;
  }

  // allocate a new frame and populate it with some information.
  // this pointer to a frame_queue type keeps track of the newest decompressed frame
  // and is then inserted into a linked list of frame pointers depending on the display time
  // parsed out of the bitstream and stored in the frameInfo dictionary by the client
  frame_queue *newFrame = (frame_queue*)calloc(sizeof(frame_queue), 1);
  newFrame->nextframe = NULL;
  newFrame->pixel_buffer_format = format_type;
  newFrame->pixel_buffer_ref = CVPixelBufferRetain(imageBuffer);
  GetFrameDisplayTimeFromDictionary(frameInfo, newFrame);

  // if both dts or pts are good we use those, else use decoder insert time for frame sort
  if ((newFrame->pts != DVD_NOPTS_VALUE) || (newFrame->dts != DVD_NOPTS_VALUE))
  {
    // if pts is borked (stupid avi's), use dts for frame sort
    if (newFrame->pts == DVD_NOPTS_VALUE)
      newFrame->sort_time = newFrame->dts;
    else
      newFrame->sort_time = newFrame->pts;
  }

  // since the frames we get may be in decode order rather than presentation order
  // our hypothetical callback places them in a queue of frames which will
  // hold them in display order for display on another thread
  pthread_mutex_lock(&ctx->m_queue_mutex);
  //
  frame_queue *queueWalker = ctx->m_display_queue;
  if (!queueWalker || (newFrame->sort_time < queueWalker->sort_time))
  {
    // we have an empty queue, or this frame earlier than the current queue head.
    newFrame->nextframe = queueWalker;
    ctx->m_display_queue = newFrame;
  } else {
    // walk the queue and insert this frame where it belongs in display order.
    bool frameInserted = false;
    frame_queue *nextFrame = NULL;
    //
    while (!frameInserted)
    {
      nextFrame = queueWalker->nextframe;
      if (!nextFrame || (newFrame->sort_time < nextFrame->sort_time))
      {
        // if the next frame is the tail of the queue, or our new frame is earlier.
        newFrame->nextframe = nextFrame;
        queueWalker->nextframe = newFrame;
        frameInserted = true;
      }
      queueWalker = nextFrame;
    }
  }
  ctx->m_queue_depth++;
  //
  pthread_mutex_unlock(&ctx->m_queue_mutex);	
}
示例#21
0
void PrivateDecoderVDA::VDADecoderCallback(void *decompressionOutputRefCon,
                                           CFDictionaryRef frameInfo,
                                           OSStatus status,
                                           uint32_t infoFlags,
                                           CVImageBufferRef imageBuffer)
{
    CocoaAutoReleasePool pool;
    PrivateDecoderVDA *decoder = (PrivateDecoderVDA*)decompressionOutputRefCon;

    if (kVDADecodeInfo_FrameDropped & infoFlags)
    {
        LOG(VB_GENERAL, LOG_ERR, LOC + "Callback: Decoder dropped frame");
        return;
    }

    if (!imageBuffer)
    {
        LOG(VB_GENERAL, LOG_ERR, LOC +
            "Callback: decoder returned empty buffer.");
        return;
    }

    INIT_ST;
    vda_st = status;
    CHECK_ST;

    OSType format_type = CVPixelBufferGetPixelFormatType(imageBuffer);
    if ((format_type != '2vuy') && (format_type != 'BGRA'))
    {
        LOG(VB_GENERAL, LOG_ERR, LOC +
            QString("Callback: image buffer format unknown (%1)")
                .arg(format_type));
        return;
    }

    int64_t pts = AV_NOPTS_VALUE;
    int8_t interlaced = 0;
    int8_t topfirst   = 0;
    int8_t repeatpic  = 0;
    CFNumberRef ptsref = (CFNumberRef)CFDictionaryGetValue(frameInfo,
                                                   CFSTR("FRAME_PTS"));
    CFNumberRef intref = (CFNumberRef)CFDictionaryGetValue(frameInfo,
                                                   CFSTR("FRAME_INTERLACED"));
    CFNumberRef topref = (CFNumberRef)CFDictionaryGetValue(frameInfo,
                                                   CFSTR("FRAME_TFF"));
    CFNumberRef repref = (CFNumberRef)CFDictionaryGetValue(frameInfo,
                                                   CFSTR("FRAME_REPEAT"));

    if (ptsref)
    {
        CFNumberGetValue(ptsref, kCFNumberSInt64Type, &pts);
        CFRelease(ptsref);
    }
    if (intref)
    {
        CFNumberGetValue(intref, kCFNumberSInt8Type, &interlaced);
        CFRelease(intref);
    }
    if (topref)
    {
        CFNumberGetValue(topref, kCFNumberSInt8Type, &topfirst);
        CFRelease(topref);
    }
    if (repref)
    {
        CFNumberGetValue(repref, kCFNumberSInt8Type, &repeatpic);
        CFRelease(repref);
    }

    int64_t time =  (pts != (int64_t)AV_NOPTS_VALUE) ? pts : 0;
    {
        QMutexLocker lock(&decoder->m_frame_lock);
        bool found = false;
        int i = 0;
        for (; i < decoder->m_decoded_frames.size(); i++)
        {
            int64_t pts = decoder->m_decoded_frames[i].pts;
            if (pts != (int64_t)AV_NOPTS_VALUE && time > pts)
            {
                found = true;
                break;
            }
        }

        VDAFrame frame(CVPixelBufferRetain(imageBuffer), format_type,
                       pts, interlaced, topfirst, repeatpic);
        if (!found)
            i = decoder->m_decoded_frames.size();
        decoder->m_decoded_frames.insert(i, frame);
        decoder->m_frames_decoded++;
    }
}