Exemple #1
0
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx,
                                               AVFrame *frame)
{
    BufferSourceContext *s = ctx->priv;
    AVFrame *copy;
    int refcounted, ret;

    if (!frame) {
        s->eof = 1;
        return 0;
    } else if (s->eof)
        return AVERROR(EINVAL);

    refcounted = !!frame->buf[0];

    switch (ctx->outputs[0]->type) {
    case AVMEDIA_TYPE_VIDEO:
        CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
                                 frame->format);
        break;
    case AVMEDIA_TYPE_AUDIO:
        CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
                                 frame->format);
        break;
    default:
        return AVERROR(EINVAL);
    }

    if (!av_fifo_space(s->fifo) &&
        (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
                                         sizeof(copy))) < 0)
        return ret;

    if (!(copy = av_frame_alloc()))
        return AVERROR(ENOMEM);

    if (refcounted) {
        av_frame_move_ref(copy, frame);
    } else {
        ret = av_frame_ref(copy, frame);
        if (ret < 0) {
            av_frame_free(&copy);
            return ret;
        }
    }

    if ((ret = av_fifo_generic_write(s->fifo, &copy, sizeof(copy), NULL)) < 0) {
        if (refcounted)
            av_frame_move_ref(frame, copy);
        av_frame_free(&copy);
        return ret;
    }

    return 0;
}
Exemple #2
0
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
{
    AVFrame *tmp;
    int ret;

    av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);

    if (!frame->data[0])
        return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);

    if (av_frame_is_writable(frame))
        return ff_decode_frame_props(avctx, frame);

    tmp = av_frame_alloc();
    if (!tmp)
        return AVERROR(ENOMEM);

    av_frame_move_ref(tmp, frame);

    ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
    if (ret < 0) {
        av_frame_free(&tmp);
        return ret;
    }

    av_frame_copy(frame, tmp);
    av_frame_free(&tmp);

    return 0;
}
Exemple #3
0
static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
                          int nb_samples)
{
    BufferSinkContext *s = ctx->priv;
    AVFilterLink   *link = ctx->inputs[0];
    AVFrame *tmp;
#ifdef IDE_COMPILE
	AVRational tmp2;
#endif

    if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
        return AVERROR(ENOMEM);
    av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);

    tmp->pts = s->next_pts;
    if (s->next_pts != AV_NOPTS_VALUE) {
#ifdef IDE_COMPILE
		tmp2.num = 1;
		tmp2.den = link->sample_rate;
		s->next_pts += av_rescale_q(nb_samples, tmp2, link->time_base);
#else
		s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
                                    link->time_base);
#endif
	}
    av_frame_move_ref(frame, tmp);
    av_frame_free(&tmp);

    return 0;
}
Exemple #4
0
int CDVDVideoCodecFFmpeg::FilterProcess(AVFrame* frame)
{
  int result;

  if (frame)
  {
    result = av_buffersrc_add_frame(m_pFilterIn, frame);
    if (result < 0)
    {
      CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterProcess - av_buffersrc_add_frame");
      return VC_ERROR;
    }
  }

  result = av_buffersink_get_frame(m_pFilterOut, m_pFilterFrame);

  if(result  == AVERROR(EAGAIN) || result == AVERROR_EOF)
    return VC_BUFFER;
  else if(result < 0)
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterProcess - av_buffersink_get_frame");
    return VC_ERROR;
  }

  av_frame_unref(m_pFrame);
  av_frame_move_ref(m_pFrame, m_pFilterFrame);

  return VC_PICTURE;
}
Exemple #5
0
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
    BufferSinkContext *buf = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    int ret;
    AVFrame *cur_frame;

    /* no picref available, fetch it from the filterchain */
    if (!av_fifo_size(buf->fifo)) {
        if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
            return AVERROR(EAGAIN);
        if ((ret = ff_request_frame(inlink)) < 0)
            return ret;
    }

    if (!av_fifo_size(buf->fifo))
        return AVERROR(EINVAL);

    if (flags & AV_BUFFERSINK_FLAG_PEEK) {
        cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0));
        av_frame_ref(frame, cur_frame); /* TODO check failure */
    } else {
        av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL);
        av_frame_move_ref(frame, cur_frame);
        av_frame_free(&cur_frame);
    }

    return 0;
}
bool
HardwareAccel::extractData(VideoFrame& input)
{
    try {
        auto inFrame = input.pointer();

        if (inFrame->format != format_) {
            std::stringstream buf;
            buf << "Frame format mismatch: expected " << av_get_pix_fmt_name(format_);
            buf << ", got " << av_get_pix_fmt_name((AVPixelFormat)inFrame->format);
            throw std::runtime_error(buf.str());
        }

        // FFmpeg requires a second frame in which to transfer the data
        // from the GPU buffer to the main memory
        auto output = std::unique_ptr<VideoFrame>(new VideoFrame());
        auto outFrame = output->pointer();
        outFrame->format = AV_PIX_FMT_YUV420P;

        extractData(input, *output);

        // move outFrame into inFrame so the caller receives extracted image data
        // but we have to delete inFrame first
        av_frame_unref(inFrame);
        av_frame_move_ref(inFrame, outFrame);
    } catch (const std::runtime_error& e) {
        fail(false);
        RING_ERR("%s", e.what());
        return false;
    }

    succeed();
    return true;
}
Exemple #7
0
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
    AVCodecInternal *avci = avctx->internal;
    int ret;

    av_frame_unref(frame);

    if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
        return AVERROR(EINVAL);

    ret = bsfs_init(avctx);
    if (ret < 0)
        return ret;

    if (avci->buffer_frame->buf[0]) {
        av_frame_move_ref(frame, avci->buffer_frame);
    } else {
        ret = decode_receive_frame_internal(avctx, frame);
        if (ret < 0)
            return ret;
    }

    if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
        ret = apply_cropping(avctx, frame);
        if (ret < 0) {
            av_frame_unref(frame);
            return ret;
        }
    }

    avctx->frame_number++;

    return 0;
}
Exemple #8
0
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VDAContext  *vda = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;

    av_frame_unref(vda->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    default:
        av_log(NULL, AV_LOG_ERROR,
               "Unsupported pixel format: %u\n", pixel_format);
        return AVERROR(ENOSYS);
    }

    vda->tmp_frame->width  = frame->width;
    vda->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vda->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
                  data, linesize, vda->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vda->tmp_frame, frame);
    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vda->tmp_frame);

    return 0;
}
Exemple #9
0
static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    LPDIRECT3DSURFACE9 surface =  (LPDIRECT3DSURFACE9)frame->data[3];
    HwAccelContext        *hac = s->opaque;
    DXVA2Context       *ctx = hac->hwaccel_ctx;
    D3DSURFACE_DESC    surfaceDesc;
    D3DLOCKED_RECT     LockedRect;
    HRESULT            hr;
    int                ret;
    int                i;

    IDirect3DSurface9_GetDesc(surface, &surfaceDesc);

    ctx->tmp_frame->width  = frame->width;
    ctx->tmp_frame->height = frame->height;
    ctx->tmp_frame->format = AV_PIX_FMT_NV12;

    ret = av_frame_get_buffer(ctx->tmp_frame, 32);
    if (ret < 0)
        return ret;

    hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL, D3DLOCK_READONLY);
    if (FAILED(hr)) {
        av_log(NULL, AV_LOG_ERROR, "Unable to lock DXVA2 surface\n");
        return AVERROR_UNKNOWN;
    }

    av_image_copy_plane(ctx->tmp_frame->data[0], ctx->tmp_frame->linesize[0],
                        (uint8_t*)LockedRect.pBits,
                        LockedRect.Pitch, frame->width, frame->height);

    av_image_copy_plane(ctx->tmp_frame->data[1], ctx->tmp_frame->linesize[1],
                        (uint8_t*)LockedRect.pBits + LockedRect.Pitch * surfaceDesc.Height,
                        LockedRect.Pitch, frame->width, frame->height / 2);

    IDirect3DSurface9_UnlockRect(surface);
    for (i = 0; i < ctx->num_surfaces; i++) {
        if (ctx->surfaces[i] == surface) {
            break;
        }
    }
    av_log(NULL, AV_LOG_DEBUG, "dxva2_retrieve_data:%d\n",i);
    ret = av_frame_copy_props(ctx->tmp_frame, frame);
    if (ret < 0)
        goto fail;

    av_frame_unref(frame);
    av_frame_move_ref(frame, ctx->tmp_frame);

    return 0;
fail:
    av_frame_unref(ctx->tmp_frame);
    return ret;
}
Exemple #10
0
static void fixstride(AVFilterLink *link, AVFrame *f)
{
    AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
    if(!dst)
        return;
    av_frame_copy_props(dst, f);
    av_image_copy(dst->data, dst->linesize,
                  (const uint8_t **)f->data, f->linesize,
                  dst->format, dst->width, dst->height);
    av_frame_unref(f);
    av_frame_move_ref(f, dst);
    av_frame_free(&dst);
}
Exemple #11
0
static int cudascale_scale(AVFilterContext *ctx, AVFrame *out, AVFrame *in)
{
    CUDAScaleContext *s = ctx->priv;
    AVFrame *src = in;
    int ret;

    ret = scalecuda_resize(ctx, s->frame, src);
    if (ret < 0)
        return ret;

    src = s->frame;
    ret = av_hwframe_get_buffer(src->hw_frames_ctx, s->tmp_frame, 0);
    if (ret < 0)
        return ret;

    av_frame_move_ref(out, s->frame);
    av_frame_move_ref(s->frame, s->tmp_frame);

    ret = av_frame_copy_props(out, in);
    if (ret < 0)
        return ret;

    return 0;
}
Exemple #12
0
static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
{
    AVHWFramesContext *ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
    AVFrame *frame_tmp;
    int ret = 0;

    frame_tmp = av_frame_alloc();
    if (!frame_tmp)
        return AVERROR(ENOMEM);

    /* if the format is set, use that
     * otherwise pick the first supported one */
    if (dst->format >= 0) {
        frame_tmp->format = dst->format;
    } else {
        enum AVPixelFormat *formats;

        ret = av_hwframe_transfer_get_formats(src->hw_frames_ctx,
                                              AV_HWFRAME_TRANSFER_DIRECTION_FROM,
                                              &formats, 0);
        if (ret < 0)
            goto fail;
        frame_tmp->format = formats[0];
        av_freep(&formats);
    }
    frame_tmp->width  = ctx->width;
    frame_tmp->height = ctx->height;

    ret = av_frame_get_buffer(frame_tmp, 32);
    if (ret < 0)
        goto fail;

    ret = av_hwframe_transfer_data(frame_tmp, src, flags);
    if (ret < 0)
        goto fail;

    frame_tmp->width  = src->width;
    frame_tmp->height = src->height;

    av_frame_move_ref(dst, frame_tmp);

fail:
    av_frame_free(&frame_tmp);
    return ret;
}
Exemple #13
0
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx,
                                                AVFrame *frame)
{
    BufferSinkContext *s    = ctx->priv;
    AVFilterLink      *link = ctx->inputs[0];
    int ret;

    if ((ret = ff_request_frame(link)) < 0)
        return ret;

    if (!s->cur_frame)
        return AVERROR(EINVAL);

    av_frame_move_ref(frame, s->cur_frame);
    av_frame_free(&s->cur_frame);

    return 0;
}
Exemple #14
0
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
{
    int ret;

    /* move the original frame to our backup */
    av_frame_unref(avci->to_free);
    av_frame_move_ref(avci->to_free, frame);

    /* now copy everything except the AVBufferRefs back
     * note that we make a COPY of the side data, so calling av_frame_free() on
     * the caller's frame will work properly */
    ret = av_frame_copy_props(frame, avci->to_free);
    if (ret < 0)
        return ret;

    memcpy(frame->data,     avci->to_free->data,     sizeof(frame->data));
    memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
    if (avci->to_free->extended_data != avci->to_free->data) {
        int planes = av_get_channel_layout_nb_channels(avci->to_free->channel_layout);
        int size   = planes * sizeof(*frame->extended_data);

        if (!size) {
            av_frame_unref(frame);
            return AVERROR_BUG;
        }

        frame->extended_data = av_malloc(size);
        if (!frame->extended_data) {
            av_frame_unref(frame);
            return AVERROR(ENOMEM);
        }
        memcpy(frame->extended_data, avci->to_free->extended_data,
               size);
    } else
        frame->extended_data = frame->data;

    frame->format         = avci->to_free->format;
    frame->width          = avci->to_free->width;
    frame->height         = avci->to_free->height;
    frame->channel_layout = avci->to_free->channel_layout;
    frame->nb_samples     = avci->to_free->nb_samples;

    return 0;
}
Exemple #15
0
static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
                          int nb_samples)
{
    BufferSinkContext *s = ctx->priv;
    AVFilterLink   *link = ctx->inputs[0];
    AVFrame *tmp;

    if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
        return AVERROR(ENOMEM);
    av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);

    tmp->pts = s->next_pts;
    s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
                                link->time_base);

    av_frame_move_ref(frame, tmp);
    av_frame_free(&tmp);

    return 0;
}
Exemple #16
0
static int utvideo_decode_frame(AVCodecContext *avctx, void *data,
                                int *got_frame, AVPacket *avpkt)
{
    UtVideoContext *utv = (UtVideoContext *)avctx->priv_data;
    AVFrame *pic = avctx->coded_frame;
    int w = avctx->width, h = avctx->height;

    /* Set flags */
    pic->reference = 0;
    pic->pict_type = AV_PICTURE_TYPE_I;
    pic->key_frame = 1;

    /* Decode the frame */
    utv->codec->DecodeFrame(utv->buffer, avpkt->data, true);

    /* Set the output data depending on the colorspace */
    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_YUV420P:
        pic->linesize[0] = w;
        pic->linesize[1] = pic->linesize[2] = w / 2;
        pic->data[0] = utv->buffer;
        pic->data[2] = utv->buffer + (w * h);
        pic->data[1] = pic->data[2] + (w * h / 4);
        break;
    case AV_PIX_FMT_YUYV422:
        pic->linesize[0] = w * 2;
        pic->data[0] = utv->buffer;
        break;
    case AV_PIX_FMT_BGR24:
    case AV_PIX_FMT_RGB32:
        /* Make the linesize negative, since Ut Video uses bottom-up BGR */
        pic->linesize[0] = -1 * w * (avctx->pix_fmt == AV_PIX_FMT_BGR24 ? 3 : 4);
        pic->data[0] = utv->buffer + utv->buf_size + pic->linesize[0];
        break;
    }

    *got_frame = 1;
    av_frame_move_ref((AVFrame*)data, pic);

    return avpkt->size;
}
Exemple #17
0
static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream        *ist = s->opaque;
    DXVA2Context       *ctx = ist->hwaccel_ctx;
    int                ret;

    ret = av_hwframe_transfer_data(ctx->tmp_frame, frame, 0);
    if (ret < 0)
        return ret;

    ret = av_frame_copy_props(ctx->tmp_frame, frame);
    if (ret < 0) {
        av_frame_unref(ctx->tmp_frame);
        return ret;
    }

    av_frame_unref(frame);
    av_frame_move_ref(frame, ctx->tmp_frame);

    return 0;
}
Exemple #18
0
static int audio_thread(void *arg)
{
	VideoState *is = (VideoState*)arg;
	AVFrame *frame = av_frame_alloc();
	Frame *af;
	int got_frame = 0;
	AVPacket pkt1, *pkt = &pkt1; 
	do {
		if (packet_queue_get(&is->audioq, pkt, 1) < 0) {
			break;
		}
		avcodec_decode_audio4(is->audio_ctx, frame, &got_frame, pkt);
		if (got_frame) {
			af = frame_queue_peek_writable(&is->sampq);
			av_frame_move_ref(af->frame, frame);
			frame_queue_push(&is->sampq);
			av_free_packet(pkt);
		}
	} while(1);
	av_frame_free(&frame);
	return 0;
}
Exemple #19
0
void AudioDecoder::run()
{
	//get a pkt and decode it
    //AVFrame* frame = av_frame_alloc();
    auto sharedFrame = std::make_shared<Frame>();
    for(;;){
        if(aw->abortRequest == 1)
            break;
        std::shared_ptr<Packet> sharedPacket;
        if(!aw->audioPacketQ.pop(sharedPacket))
            continue;
        AVPacket* ppkt = &sharedPacket->pkt;
        int serial = sharedPacket->serial;
		int flush = 0;
		//flush the left frame
        if(ppkt->size == 0 && ppkt->data == NULL)
			flush = 1;	

		//get a frame and push
        int gotFrame = 1;
        AVFrame* frame = sharedFrame->frame;
		do{
            int ret = avcodec_decode_audio4(aw->audioCodecCtx, frame, &gotFrame, ppkt);
            if(ret < 0){
                std::cout<<"decode audio error!"<<std::endl;
				break;
			}
			if(gotFrame){
                //AVFrame* qFrame = av_frame_alloc();
                auto qFrame = std::make_shared<Frame>();
                qFrame->serial = serial;
                av_frame_move_ref(qFrame->frame, frame);
                aw->audioFrameQ.push(qFrame);
			}
            ppkt->data += ret;
            ppkt->size -= ret;
        } while((ppkt->size > 0 || (gotFrame && flush)) && aw->abortRequest != 1);
	}
}
Exemple #20
0
int CDVDVideoCodecFFmpeg::FilterProcess(AVFrame* frame)
{
  int result;

  if (frame || (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN))
  {
    result = av_buffersrc_add_frame(m_pFilterIn, frame);
    if (result < 0)
    {
      CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterProcess - av_buffersrc_add_frame");
      return VC_ERROR;
    }
  }

  result = av_buffersink_get_frame(m_pFilterOut, m_pFilterFrame);

  if (result  == AVERROR(EAGAIN))
    return VC_BUFFER;
  else if (result == AVERROR_EOF)
  {
    result = av_buffersink_get_frame(m_pFilterOut, m_pFilterFrame);
    m_filterEof = true;
    if (result < 0)
      return VC_BUFFER;
  }
  else if (result < 0)
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterProcess - av_buffersink_get_frame");
    return VC_ERROR;
  }

  av_frame_unref(m_pFrame);
  av_frame_move_ref(m_pFrame, m_pFilterFrame);

  return VC_PICTURE;
}
static int decode_video_picture
(
    lwlibav_video_decode_handler_t *vdhp,
    AVFrame                        *picture,
    int                            *got_picture,
    uint32_t                       *current,
    uint32_t                        goal,
    uint32_t                        rap_number
)
{
    /* Get a packet containing a frame. */
    uint32_t frame_number = *current;
    AVPacket *pkt = &vdhp->packet;
    int ret = lwlibav_get_av_frame( vdhp->format, vdhp->stream_index, frame_number, pkt );
    if( ret > 0 )
        return ret;
    /* Correct the current frame number in order to match DTS since libavformat might have sought wrong position. */
    uint32_t correction_distance = 0;
    if( frame_number == rap_number && (vdhp->lw_seek_flags & SEEK_DTS_BASED) )
    {
        frame_number = correct_current_frame_number( vdhp, pkt, frame_number, goal );
        if( frame_number == 0
         || frame_number > rap_number )
            return -2;
        if( *current > frame_number )
            /* It seems we got a more backward frame rather than what we requested. */
            correction_distance = *current - frame_number;
        *current = frame_number;
    }
    if( pkt->flags & AV_PKT_FLAG_KEY )
        vdhp->last_rap_number = frame_number;
    /* Avoid decoding frames until the seek correction caused by too backward is done. */
    while( correction_distance )
    {
        ret = lwlibav_get_av_frame( vdhp->format, vdhp->stream_index, ++frame_number, pkt );
        if( ret > 0 )
            return ret;
        if( pkt->flags & AV_PKT_FLAG_KEY )
            vdhp->last_rap_number = frame_number;
        *current = frame_number;
        --correction_distance;
    }
    /* Decode a frame in a packet. */
    int64_t pts = pkt->pts != AV_NOPTS_VALUE ? pkt->pts : pkt->dts;
    AVFrame *mov_frame = vdhp->movable_frame_buffer;
    av_frame_unref( mov_frame );
    ret = avcodec_decode_video2( vdhp->ctx, mov_frame, got_picture, pkt );
    /* We can't get the requested frame by feeding a picture if that picture is PAFF field coded.
     * This branch avoids putting empty data on the frame buffer. */
    if( *got_picture )
    {
        av_frame_unref( picture );
        av_frame_move_ref( picture, mov_frame );
    }
    picture->pts = pts;
    if( ret < 0 )
    {
        if( vdhp->lh.show_log )
            vdhp->lh.show_log( &vdhp->lh, LW_LOG_ERROR, "Failed to decode a video frame." );
        return -1;
    }
    return 0;
}
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    QScopedPointer<AVFrame, ScopedAVFrameDeleter> f;
    // hwupload
    AVPixelFormat pixfmt = AVPixelFormat(frame.pixelFormatFFmpeg());
    if (frame.isValid()) {
        f.reset(av_frame_alloc());
        f->format = pixfmt;
        f->width = frame.width();
        f->height = frame.height();
//        f->quality = d.avctx->global_quality;
        switch (timestampMode()) {
        case TimestampCopy:
            f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
            break;
        case TimestampMonotonic:
            f->pts = d.nb_encoded+1;
            break;
        default:
            break;
        }

        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.constBits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
#ifdef HAVE_AVHWCTX
        if (d.avctx->hw_frames_ctx) {
            // TODO: try to map to SourceSurface
            // checl valid sw_formats
            if (!d.hwframes_ref) {
                qWarning("no hw frame context for uploading");
                return false;
            }
            if (pixfmt != d.hwframes->sw_format) {
                // reinit or got an unsupported format. assume parameters will not change, so it's  the 1st init
                // check constraints
                bool init_frames_ctx = d.hwframes->sw_format == AVPixelFormat(-1);
                if (d.sw_fmts.contains(pixfmt)) { // format changed
                    init_frames_ctx = true;
                } else { // convert to supported sw format
                    pixfmt = d.sw_fmts[0];
                    f->format = pixfmt;
                    VideoFrame converted = frame.to(VideoFormat::pixelFormatFromFFmpeg(pixfmt));
                    for (int i = 0; i < converted.planeCount(); ++i) {
                        f->linesize[i] = converted.bytesPerLine(i);
                        f->data[i] = (uint8_t*)frame.constBits(i);
                    }
                }
                if (init_frames_ctx) {
                    d.hwframes->sw_format = pixfmt;
                    d.hwframes->width = frame.width();
                    d.hwframes->height = frame.height();
                    AV_ENSURE(av_hwframe_ctx_init(d.hwframes_ref), false);
                }
            }
            // upload
            QScopedPointer<AVFrame, ScopedAVFrameDeleter> hwf( av_frame_alloc());
            AV_ENSURE(av_hwframe_get_buffer(d.hwframes_ref, hwf.data(), 0), false);
            //hwf->format = d.hwframes->format; // not necessary
            //hwf->width = f->width;
            //hwf->height = f->height;
            AV_ENSURE(av_hwframe_transfer_data(hwf.data(), f.data(), 0), false);
            AV_ENSURE(av_frame_copy_props(hwf.data(), f.data()), false);
            av_frame_unref(f.data());
            av_frame_move_ref(f.data(), hwf.data());
        }
#endif //HAVE_AVHWCTX
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f.data(), &got_packet);
    if (ret < 0) {
        qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    d.nb_encoded++;
    if (!got_packet) {
        qWarning("no packet got");
        d.packet = Packet();
        // invalid frame means eof
        return frame.isValid();
    }
   // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
   // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}
Exemple #23
0
void CVideoBufferFFmpeg::SetRef(AVFrame *frame)
{
  av_frame_unref(m_pFrame);
  av_frame_move_ref(m_pFrame, frame);
  m_pixFormat = (AVPixelFormat)m_pFrame->format;
}
static int utvideo_decode_frame(AVCodecContext *avctx, void *data,
                                int *got_frame, AVPacket *avpkt)
{
    UtVideoContext *utv = (UtVideoContext *)avctx->priv_data;
    AVFrame *pic = avctx->coded_frame;
    int w = avctx->width, h = avctx->height;

    /* Set flags */
    pic->pict_type = AV_PICTURE_TYPE_I;
    pic->key_frame = 1;

    /* Decode the frame */
    utv->codec->DecodeFrame(utv->buffer, avpkt->data, true);

    /* Set the output data depending on the colorspace */
    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_YUV420P:
        pic->linesize[0] = w;
        pic->linesize[1] = pic->linesize[2] = w / 2;
        pic->data[0] = utv->buffer;
        pic->data[2] = utv->buffer + (w * h);
        pic->data[1] = pic->data[2] + (w * h / 4);
        break;
    case AV_PIX_FMT_YUYV422:
        pic->linesize[0] = w * 2;
        pic->data[0] = utv->buffer;
        break;
    case AV_PIX_FMT_YUV422P10: {
        uint16_t *y, *u, *v;
        int i,j;
        int linesize = ((w + 47) / 48) * 128;

        pic->linesize[0] = w * 2;
        pic->linesize[1] =
        pic->linesize[2] = w;
        pic->data[0] = utv->buffer + linesize * h;
        pic->data[1] = pic->data[0] + h*pic->linesize[0];
        pic->data[2] = pic->data[1] + h*pic->linesize[1];
        y = (uint16_t*)pic->data[0];
        u = (uint16_t*)pic->data[1];
        v = (uint16_t*)pic->data[2];
        for (j = 0; j < h; j++) {
            const uint8_t *in = utv->buffer + j * linesize;

            for (i = 0; i + 1 < w; i += 6, in += 4) {
                unsigned a,b;
                a = AV_RL32(in);
                in += 4;
                b = AV_RL32(in);
                *u++ = (a    ) & 0x3FF;
                *y++ = (a>>10) & 0x3FF;
                *v++ = (a>>20) & 0x3FF;
                *y++ = (b    ) & 0x3FF;

                if (i + 3 >= w)
                    break;

                in += 4;
                a = AV_RL32(in);
                *u++ = (b>>10) & 0x3FF;
                *y++ = (b>>20) & 0x3FF;
                *v++ = (a    ) & 0x3FF;
                *y++ = (a>>10) & 0x3FF;

                if (i + 5 >= w)
                    break;

                in += 4;
                b = AV_RL32(in);
                *u++ = (a>>20) & 0x3FF;
                *y++ = (b    ) & 0x3FF;
                *v++ = (b>>10) & 0x3FF;
                *y++ = (b>>20) & 0x3FF;
            }
        }
        break;
    }
    case AV_PIX_FMT_BGR24:
    case AV_PIX_FMT_RGB32:
        /* Make the linesize negative, since Ut Video uses bottom-up BGR */
        pic->linesize[0] = -1 * w * (avctx->pix_fmt == AV_PIX_FMT_BGR24 ? 3 : 4);
        pic->data[0] = utv->buffer + utv->buf_size + pic->linesize[0];
        break;
    }

    *got_frame = 1;
    av_frame_move_ref((AVFrame*)data, pic);

    return avpkt->size;
}
Exemple #25
0
int CDVDVideoCodecFFmpeg::Decode(uint8_t* pData, int iSize, double dts, double pts)
{
  int iGotPicture = 0, len = 0;

  if (!m_pCodecContext)
    return VC_ERROR;

  if (pData)
    m_iLastKeyframe++;

  if (m_pHardware)
  {
    int result;
    if (pData || (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN))
    {
      result = m_pHardware->Check(m_pCodecContext);
      result &= ~VC_NOBUFFER;
    }
    else
    {
      result = m_pHardware->Decode(m_pCodecContext, nullptr);
    }

    if (result)
      return result;
  }

  if (!m_pHardware && pData)
    SetFilters();

  if (m_pFilterGraph && !m_filterEof)
  {
    int result = 0;
    if (pData == NULL)
      result = FilterProcess(nullptr);
    if (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN)
    {
      result &= VC_PICTURE;
    }
    if (result)
      return result;
  }

  m_dts = dts;
  m_pCodecContext->reordered_opaque = pts_dtoi(pts);

  AVPacket avpkt;
  av_init_packet(&avpkt);
  avpkt.data = pData;
  avpkt.size = iSize;
  avpkt.dts = (dts == DVD_NOPTS_VALUE) ? AV_NOPTS_VALUE : dts / DVD_TIME_BASE * AV_TIME_BASE;
  avpkt.pts = (pts == DVD_NOPTS_VALUE) ? AV_NOPTS_VALUE : pts / DVD_TIME_BASE * AV_TIME_BASE;

  /* We lie, but this flag is only used by pngdec.c.
   * Setting it correctly would allow CorePNG decoding. */
  avpkt.flags = AV_PKT_FLAG_KEY;
  len = avcodec_decode_video2(m_pCodecContext, m_pDecodedFrame, &iGotPicture, &avpkt);

  if (m_decoderState == STATE_HW_FAILED && !m_pHardware)
    return VC_REOPEN;

  if(m_iLastKeyframe < m_pCodecContext->has_b_frames + 2)
    m_iLastKeyframe = m_pCodecContext->has_b_frames + 2;

  if (len < 0)
  {
    if(m_pHardware)
    {
      int result = m_pHardware->Check(m_pCodecContext);
      if (result & VC_NOBUFFER)
      {
        result = m_pHardware->Decode(m_pCodecContext, NULL);
        return result;
      }
    }
    CLog::Log(LOGERROR, "%s - avcodec_decode_video returned failure", __FUNCTION__);
    return VC_ERROR;
  }

  if (!iGotPicture)
  {
    if (pData && m_pCodecContext->skip_frame > AVDISCARD_DEFAULT)
    {
      m_droppedFrames++;
      if (m_interlaced)
        m_droppedFrames++;
    }

    if (m_pHardware && (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN))
    {
      int result;
      result = m_pHardware->Decode(m_pCodecContext, NULL);
      return result;
    }
    else
      return VC_BUFFER;
  }

  if (m_pDecodedFrame->key_frame)
  {
    m_started = true;
    m_iLastKeyframe = m_pCodecContext->has_b_frames + 2;
  }

  if (m_pDecodedFrame->interlaced_frame)
    m_interlaced = true;
  else
    m_interlaced = false;

  /* put a limit on convergence count to avoid huge mem usage on streams without keyframes */
  if (m_iLastKeyframe > 300)
    m_iLastKeyframe = 300;

  /* h264 doesn't always have keyframes + won't output before first keyframe anyway */
  if(m_pCodecContext->codec_id == AV_CODEC_ID_H264 ||
     m_pCodecContext->codec_id == AV_CODEC_ID_SVQ3)
    m_started = true;

  if (m_pHardware == nullptr)
  {
    bool need_scale = std::find( m_formats.begin()
                               , m_formats.end()
                               , m_pCodecContext->pix_fmt) == m_formats.end();

    bool need_reopen  = false;
    if (m_filters != m_filters_next)
      need_reopen = true;

    if (!m_filters_next.empty() && m_filterEof)
      need_reopen = true;

    if (m_pFilterIn)
    {
      if (m_pFilterIn->outputs[0]->format != m_pCodecContext->pix_fmt ||
          m_pFilterIn->outputs[0]->w != m_pCodecContext->width ||
          m_pFilterIn->outputs[0]->h != m_pCodecContext->height)
        need_reopen = true;
    }

    // try to setup new filters
    if (need_reopen || (need_scale && m_pFilterGraph == nullptr))
    {
      m_filters = m_filters_next;

      if (FilterOpen(m_filters, need_scale) < 0)
        FilterClose();
    }
  }

  int result;
  if (m_pHardware)
  {
    av_frame_unref(m_pFrame);
    av_frame_move_ref(m_pFrame, m_pDecodedFrame);
    result = m_pHardware->Decode(m_pCodecContext, m_pFrame);
  }
  else if (m_pFilterGraph && !m_filterEof)
  {
    result = FilterProcess(m_pDecodedFrame);
  }
  else
  {
    av_frame_unref(m_pFrame);
    av_frame_move_ref(m_pFrame, m_pDecodedFrame);
    result = VC_PICTURE | VC_BUFFER;
  }

  if (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN)
    result &= ~VC_BUFFER;

  if (result & VC_FLUSHED)
    Reset();

  return result;
}
Exemple #26
0
void CVideoBufferDRMPRIME::SetRef(AVFrame* frame)
{
  av_frame_move_ref(m_pFrame, frame);
}
Exemple #27
0
CDVDVideoCodec::VCReturn CDVDVideoCodecFFmpeg::GetPicture(VideoPicture* pVideoPicture)
{
  if (m_eof)
  {
    return VC_EOF;
  }

  // handle hw accelerators first, they may have frames ready
  if (m_pHardware)
  {
    int flags = m_codecControlFlags;
    flags &= ~DVD_CODEC_CTRL_DRAIN;
    m_pHardware->SetCodecControl(flags);
    CDVDVideoCodec::VCReturn ret = m_pHardware->Decode(m_pCodecContext, nullptr);
    if (ret == VC_PICTURE)
    {
      if (m_pHardware->GetPicture(m_pCodecContext, pVideoPicture))
        return VC_PICTURE;
      else
        return VC_ERROR;
    }
    else if (ret == VC_BUFFER)
      ;
    else
      return ret;
  }
  else if (m_pFilterGraph && !m_filterEof)
  {
    CDVDVideoCodec::VCReturn ret = FilterProcess(nullptr);
    if (ret == VC_PICTURE)
    {
      if (!SetPictureParams(pVideoPicture))
        return VC_ERROR;
      return VC_PICTURE;
    }
    else if (ret == VC_BUFFER)
      ;
    else
      return ret;
  }

  // process ffmpeg
  if (m_codecControlFlags & DVD_CODEC_CTRL_DRAIN)
  {
    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.data = nullptr;
    avpkt.size = 0;
    avpkt.dts = AV_NOPTS_VALUE;
    avpkt.pts = AV_NOPTS_VALUE;
    avcodec_send_packet(m_pCodecContext, &avpkt);
  }

  int ret = avcodec_receive_frame(m_pCodecContext, m_pDecodedFrame);

  if (m_decoderState == STATE_HW_FAILED && !m_pHardware)
    return VC_REOPEN;

  if(m_iLastKeyframe < m_pCodecContext->has_b_frames + 2)
    m_iLastKeyframe = m_pCodecContext->has_b_frames + 2;

  if (ret == AVERROR_EOF)
  {
    // next drain hw accel or filter
    if (m_pHardware)
    {
      int flags = m_codecControlFlags;
      flags |= DVD_CODEC_CTRL_DRAIN;
      m_pHardware->SetCodecControl(flags);
      int ret = m_pHardware->Decode(m_pCodecContext, nullptr);
      if (ret == VC_PICTURE)
      {
        if (m_pHardware->GetPicture(m_pCodecContext, pVideoPicture))
          return VC_PICTURE;
        else
          return VC_ERROR;
      }
      else
      {
        m_eof = true;
        CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg::GetPicture - eof hw accel");
        return VC_EOF;
      }
    }
    else if (m_pFilterGraph && !m_filterEof)
    {
      int ret = FilterProcess(nullptr);
      if (ret == VC_PICTURE)
      {
        if (!SetPictureParams(pVideoPicture))
          return VC_ERROR;
        else
          return VC_PICTURE;
      }
      else
      {
        m_eof = true;
        CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg::GetPicture - eof filter");
        return VC_EOF;
      }
    }
    else
    {
      m_eof = true;
      CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg::GetPicture - eof");
      return VC_EOF;
    }
  }
  else if (ret == AVERROR(EAGAIN))
  {
    return VC_BUFFER;
  }
  else if (ret)
  {
    CLog::Log(LOGERROR, "%s - avcodec_receive_frame returned failure", __FUNCTION__);
    return VC_ERROR;
  }

  // here we got a frame
  int64_t framePTS = m_pDecodedFrame->best_effort_timestamp;

  if (m_pCodecContext->skip_frame > AVDISCARD_DEFAULT)
  {
    if (m_dropCtrl.m_state == CDropControl::VALID &&
        m_dropCtrl.m_lastPTS != AV_NOPTS_VALUE &&
        framePTS != AV_NOPTS_VALUE &&
        framePTS > (m_dropCtrl.m_lastPTS + m_dropCtrl.m_diffPTS * 1.5))
    {
      m_droppedFrames++;
      if (m_interlaced)
        m_droppedFrames++;
    }
  }
  m_dropCtrl.Process(framePTS, m_pCodecContext->skip_frame > AVDISCARD_DEFAULT);

  if (m_pDecodedFrame->key_frame)
  {
    m_started = true;
    m_iLastKeyframe = m_pCodecContext->has_b_frames + 2;
  }
  if (m_pDecodedFrame->interlaced_frame)
    m_interlaced = true;
  else
    m_interlaced = false;

  if (!m_started)
  {
    int frames = 300;
    if (m_dropCtrl.m_state == CDropControl::VALID)
      frames = static_cast<int>(6000000 / m_dropCtrl.m_diffPTS);
    if (m_iLastKeyframe >= frames && m_pDecodedFrame->pict_type == AV_PICTURE_TYPE_I)
    {
      m_started = true;
    }
    else
    {
      av_frame_unref(m_pDecodedFrame);
      return VC_BUFFER;
    }
  }

  // push the frame to hw decoder for further processing
  if (m_pHardware)
  {
    av_frame_unref(m_pFrame);
    av_frame_move_ref(m_pFrame, m_pDecodedFrame);
    CDVDVideoCodec::VCReturn ret = m_pHardware->Decode(m_pCodecContext, m_pFrame);
    if (ret == VC_FLUSHED)
    {
      Reset();
      return ret;
    }
    else if (ret == VC_FATAL)
    {
      m_decoderState = STATE_HW_FAILED;
      return VC_REOPEN;
    }
    else if (ret == VC_PICTURE)
    {
      if (m_pHardware->GetPicture(m_pCodecContext, pVideoPicture))
        return VC_PICTURE;
      else
        return VC_ERROR;
    }

    return ret;
  }
  // process filters for sw decoding
  else
  {
    SetFilters();

    bool need_scale = std::find(m_formats.begin(),
                                m_formats.end(),
                                m_pCodecContext->pix_fmt) == m_formats.end();

    bool need_reopen = false;
    if (m_filters != m_filters_next)
      need_reopen = true;

    if (!m_filters_next.empty() && m_filterEof)
      need_reopen = true;

    if (m_pFilterIn)
    {
      if (m_pFilterIn->outputs[0]->format != m_pCodecContext->pix_fmt ||
          m_pFilterIn->outputs[0]->w != m_pCodecContext->width ||
          m_pFilterIn->outputs[0]->h != m_pCodecContext->height)
        need_reopen = true;
    }

    // try to setup new filters
    if (need_reopen || (need_scale && m_pFilterGraph == nullptr))
    {
      m_filters = m_filters_next;

      if (FilterOpen(m_filters, need_scale) < 0)
        FilterClose();
    }

    if (m_pFilterGraph && !m_filterEof)
    {
      CDVDVideoCodec::VCReturn ret = FilterProcess(m_pDecodedFrame);
      if (ret != VC_PICTURE)
        return VC_NONE;
    }
    else
    {
      av_frame_unref(m_pFrame);
      av_frame_move_ref(m_pFrame, m_pDecodedFrame);
    }

    if (!SetPictureParams(pVideoPicture))
      return VC_ERROR;
    else
      return VC_PICTURE;
  }

  return VC_NONE;
}
Exemple #28
0
int AudioDecoder::audio_thread(void *arg)
{
    VideoState *is = (VideoState *) arg;
    AVStreamsParser* ps = is->getAVStreamsParser();
    AVFrame *frame = av_frame_alloc();
    Frame *af;
#if CONFIG_AVFILTER
    int last_serial = -1;
    int64_t dec_channel_layout;
    int reconfigure;
#endif
    int got_frame = 0;
    AVRational tb;
    int ret = 0;

    if (!frame)
        return AVERROR(ENOMEM);

    do {
        if ((got_frame = is->auddec().decode_frame(frame)) < 0)
            goto the_end;

        if (got_frame) {
                tb = (AVRational){1, frame->sample_rate};

#if CONFIG_AVFILTER
                dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));

                reconfigure =
                    cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
                            (AVSampleFormat)frame->format, av_frame_get_channels(frame))    ||
                    is->audio_filter_src.channel_layout != dec_channel_layout ||
                    is->audio_filter_src.freq           != frame->sample_rate ||
                    is->auddec().pkt_serial               != last_serial;

                if (reconfigure) {
                    char buf1[1024], buf2[1024];
                    av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
                    av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
                    av_log(NULL, AV_LOG_DEBUG,
                           "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
                           is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
                           frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name((AVSampleFormat)frame->format), buf2, is->auddec().pkt_serial);

                    is->audio_filter_src.fmt            = (AVSampleFormat)frame->format;
                    is->audio_filter_src.channels       = av_frame_get_channels(frame);
                    is->audio_filter_src.channel_layout = dec_channel_layout;
                    is->audio_filter_src.freq           = frame->sample_rate;
                    last_serial                         = is->auddec().pkt_serial;

                    if ((ret = configure_audio_filters(is,gOptions. afilters, 1)) < 0)
                        goto the_end;
                }

            if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
                goto the_end;

            while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
                tb = is->out_audio_filter->inputs[0]->time_base;
#endif
                if (!(af = is->sampq().peek_writable()))
                    goto the_end;

                af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
                af->pos = av_frame_get_pkt_pos(frame);
                af->serial = is->auddec().pkt_serial;
                af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});

                av_frame_move_ref(af->frame, frame);
                is->sampq().push();

#if CONFIG_AVFILTER
                if (ps->audioq.serial != is->auddec().pkt_serial)
                    break;
            }
            if (ret == AVERROR_EOF)
                is->auddec().finished = is->auddec().pkt_serial;
#endif
        }
    } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
 the_end:
#if CONFIG_AVFILTER
    avfilter_graph_free(&is->agraph);
#endif
    av_frame_free(&frame);
    return ret;
}
Exemple #29
0
void CPixelBufferFFmpeg::SetRef(AVFrame *frame)
{
  av_frame_unref(m_pFrame);
  av_frame_move_ref(m_pFrame, frame);
  m_pixFormat = static_cast<AVPixelFormat>(m_pFrame->format);
}
Exemple #30
0
static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
                                           AVFrame *frame, int flags)
{
    BufferSourceContext *s = ctx->priv;
    AVFrame *copy;
    int refcounted, ret;

    s->nb_failed_requests = 0;

    if (!frame) {
        s->eof = 1;
        return 0;
    } else if (s->eof)
        return AVERROR(EINVAL);

    refcounted = !!frame->buf[0];

    if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {

    switch (ctx->outputs[0]->type) {
    case AVMEDIA_TYPE_VIDEO:
        CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
                                 frame->format);
        break;
    case AVMEDIA_TYPE_AUDIO:
        /* For layouts unknown on input but known on link after negotiation. */
        if (!frame->channel_layout)
            frame->channel_layout = s->channel_layout;
        CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
                                 av_frame_get_channels(frame), frame->format);
        break;
    default:
        return AVERROR(EINVAL);
    }

    }

    if (!av_fifo_space(s->fifo) &&
        (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
                                         sizeof(copy))) < 0)
        return ret;

    if (!(copy = av_frame_alloc()))
        return AVERROR(ENOMEM);

    if (refcounted) {
        av_frame_move_ref(copy, frame);
    } else {
        ret = av_frame_ref(copy, frame);
        if (ret < 0) {
            av_frame_free(&copy);
            return ret;
        }
    }

    if ((ret = av_fifo_generic_write(s->fifo, &copy, sizeof(copy), NULL)) < 0) {
        if (refcounted)
            av_frame_move_ref(frame, copy);
        av_frame_free(&copy);
        return ret;
    }

    if ((flags & AV_BUFFERSRC_FLAG_PUSH))
        if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0)
            return ret;

    return 0;
}