int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i; AVFrame *pic; s->mb_skiped = 0; if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) { avcodec_default_release_buffer(avctx, (AVFrame*)s->last_picture_ptr); for(i=0; i<MAX_PICTURE_COUNT; i++) { if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference) { avcodec_default_release_buffer(avctx, (AVFrame*)&s->picture[i]); } } } for(i=0; i<MAX_PICTURE_COUNT; i++) { if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/) { avcodec_default_release_buffer(s->avctx, (AVFrame*)&s->picture[i]); } } if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL) pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header) else { i= ff_find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; } pic->reference= s->pict_type != B_TYPE ? 3 : 0; if( alloc_picture(s, (Picture*)pic, 0) < 0) return -1; s->current_picture_ptr= (Picture*)pic; s->current_picture_ptr->pict_type= s->pict_type; s->current_picture_ptr->key_frame= s->pict_type == I_TYPE; copy_picture(&s->current_picture, s->current_picture_ptr); s->hurry_up= s->avctx->hurry_up; return 0; }
void AVIDump::CloseFile() { if (s_stream) { if (s_stream->codec) { #if LIBAVCODEC_VERSION_MAJOR < 55 avcodec_default_release_buffer(s_stream->codec, s_src_frame); #endif avcodec_close(s_stream->codec); } av_freep(&s_stream); } av_frame_free(&s_src_frame); av_frame_free(&s_scaled_frame); if (s_format_context) { if (s_format_context->pb) avio_close(s_format_context->pb); av_freep(&s_format_context); } if (s_sws_context) { sws_freeContext(s_sws_context); s_sws_context = nullptr; } }
static void ffmpeg_ReleaseFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ) { decoder_t *p_dec = (decoder_t *)p_context->opaque; decoder_sys_t *p_sys = p_dec->p_sys; if( p_sys->p_va ) { vlc_va_Release( p_sys->p_va, p_ff_pic ); } else if( !p_ff_pic->opaque ) { /* We can end up here without the AVFrame being allocated by * avcodec_default_get_buffer() if VA is used and the frame is * released when the decoder is closed */ if( p_ff_pic->type == FF_BUFFER_TYPE_INTERNAL ) avcodec_default_release_buffer( p_context, p_ff_pic ); } else { picture_t *p_pic = (picture_t*)p_ff_pic->opaque; decoder_UnlinkPicture( p_dec, p_pic ); } for( int i = 0; i < 4; i++ ) p_ff_pic->data[i] = NULL; }
static void ffmpeg_ReleaseFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ) { decoder_t *p_dec = (decoder_t *)p_context->opaque; decoder_sys_t *p_sys = p_dec->p_sys; if( p_sys->p_va ) { VaUngrabSurface( p_sys->p_va, p_ff_pic ); /* */ for( int i = 0; i < 4; i++ ) p_ff_pic->data[i] = NULL; } else if( !p_ff_pic->opaque ) { avcodec_default_release_buffer( p_context, p_ff_pic ); } else { picture_t *p_pic = (picture_t*)p_ff_pic->opaque; decoder_UnlinkPicture( p_dec, p_pic ); /* */ for( int i = 0; i < 4; i++ ) p_ff_pic->data[i] = NULL; } }
void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture) { if (picture != 0) av_freep(&picture->opaque); avcodec_default_release_buffer(context, picture); }
static void ffmpeg_ReleaseFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ) { decoder_t *p_dec = (decoder_t *)p_context->opaque; picture_t *p_pic; if( !p_ff_pic->opaque ) { avcodec_default_release_buffer( p_context, p_ff_pic ); return; } p_pic = (picture_t*)p_ff_pic->opaque; p_ff_pic->data[0] = NULL; p_ff_pic->data[1] = NULL; p_ff_pic->data[2] = NULL; p_ff_pic->data[3] = NULL; if( p_ff_pic->reference != 0 || p_dec->p_sys->i_codec_id == CODEC_ID_H264 /* Bug in libavcodec */ ) { p_dec->pf_picture_unlink( p_dec, p_pic ); } }
static void player_av_av_release_buffer (struct AVCodecContext *c, AVFrame *pic) { if (pic) av_freep (&pic->opaque); avcodec_default_release_buffer (c, pic); }
static void vd_release_buffer(struct AVCodecContext *c, AVFrame *pic) { if(pic->opaque != NULL) free(pic->opaque); avcodec_default_release_buffer(c, pic); }
void release_buffer_with_pts(struct AVCodecContext *ctx, AVFrame *frame) { if (frame) { av_freep(&frame->opaque); } avcodec_default_release_buffer(ctx, frame); }
void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { if (pic) { delete reinterpret_cast<uint64_t*>(pic->opaque); } avcodec_default_release_buffer(c, pic); }
static void I_AVReleaseBufferProc(struct AVCodecContext *c, AVFrame *pic) { if(pic) { av_freep(&pic->opaque); } avcodec_default_release_buffer(c, pic); }
static void ReleaseBuffer(AVCodecContext *Context, AVFrame *Frame) { if (Frame->type == FF_BUFFER_TYPE_INTERNAL) { avcodec_default_release_buffer(Context, Frame); return; } VideoDecoder *parent = static_cast<VideoDecoder*>(Context->opaque); if (parent) parent->ReleaseAVBuffer(Context, Frame); else LOG(VB_GENERAL, LOG_ERR, "Invalid context"); }
/* static */ void FFmpegH264Decoder<LIBAV_VER>::ReleaseBufferCb(AVCodecContext* aCodecContext, AVFrame* aFrame) { switch (aCodecContext->pix_fmt) { case PIX_FMT_YUV420P: { Image* image = static_cast<Image*>(aFrame->opaque); if (image) { image->Release(); } break; } default: avcodec_default_release_buffer(aCodecContext, aFrame); break; } }
static void free_picture(MpegEncContext *s, Picture *pic) { int i; if(pic->data[0] ) { avcodec_default_release_buffer(s->avctx, (AVFrame*)pic); } av_freep(&pic->qscale_table); av_freep(&pic->mb_type_base); pic->mb_type= NULL; for(i=0; i<2; i++) { av_freep(&pic->motion_val_base[i]); av_freep(&pic->ref_index[i]); } }
/* static */ void FFmpegH264Decoder<LIBAV_VER>::ReleaseBufferCb(AVCodecContext* aCodecContext, AVFrame* aFrame) { switch (aCodecContext->pix_fmt) { case PIX_FMT_YUV420P: { Image* image = static_cast<Image*>(aFrame->opaque); if (image) { image->Release(); } for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) { aFrame->data[i] = nullptr; } break; } default: avcodec_default_release_buffer(aCodecContext, aFrame); break; } }
/** @internal @This is called by avcodec when releasing a frame * @param context current avcodec context * @param frame avframe handler released by avcodec black magic box */ static void upipe_avcdec_release_buffer(struct AVCodecContext *context, AVFrame *frame) { struct upipe *upipe = context->opaque; struct uref *uref = frame->opaque; const struct upipe_av_plane *planes = NULL; int i; uint64_t framenum = 0; uref_pic_get_number(uref, &framenum); upipe_dbg_va(upipe, "Releasing frame %u (%p)", (uint64_t) framenum, uref); if (likely(uref->ubuf)) { planes = upipe_avcdec_from_upipe(upipe)->pixfmt->planes; for (i=0; i < 4 && planes[i].chroma; i++) { ubuf_pic_plane_unmap(uref->ubuf, planes[i].chroma, 0, 0, -1, -1); frame->data[i] = NULL; } } else { avcodec_default_release_buffer(context, frame); } uref_free(uref); }
void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { if(pic) { av_freep(&pic->opaque); } avcodec_default_release_buffer(c, pic); }
void frame_storage_destroy(AVCodecContext *c, AVFrame *frame) { av_freep(&frame->opaque); avcodec_default_release_buffer(c, frame); }
void DecoderVideo::releaseBuffer(struct AVCodecContext *c, AVFrame *pic) { if (pic) av_freep(&pic->opaque); avcodec_default_release_buffer(c, pic); }
void video_release_buffer( struct AVCodecContext *c, AVFrame *f ) { if( f ) av_freep( &f->opaque ); avcodec_default_release_buffer( c, f ); }
void FFMpegDecoder::my_release_buffer(struct AVCodecContext *c, AVFrame *pic) { if(pic) av_freep(&pic->opaque); avcodec_default_release_buffer(c, pic); }
static void our_release_buffer(AVCodecContext *c, AVFrame *pic) { if(pic) av_freep(&pic->opaque); avcodec_default_release_buffer(c, pic); }