Exemple #1
0
void av_bitstream_filter_close(AVBitStreamFilterContext *bsfc){
    if(bsfc->filter->close)
        bsfc->filter->close(bsfc);
    av_freep(&bsfc->priv_data);
    av_parser_close(bsfc->parser);
    av_free(bsfc);
}
Exemple #2
0
static void libavcodec_uninit(H264_CONTEXT* h264)
{
	H264_CONTEXT_LIBAVCODEC* sys = (H264_CONTEXT_LIBAVCODEC*) h264->pSystemData;

	if (!sys)
		return;

	if (sys->videoFrame)
	{
		av_free(sys->videoFrame);
	}

	if (sys->codecParser)
	{
		av_parser_close(sys->codecParser);
	}

	if (sys->codecContext)
	{
		avcodec_close(sys->codecContext);
		av_free(sys->codecContext);
	}

	free(sys);
	h264->pSystemData = NULL;
}
Exemple #3
0
static int
old_flac_header (AVFormatContext * s, int idx)
{
    struct ogg *ogg = s->priv_data;
    AVStream *st = s->streams[idx];
    struct ogg_stream *os = ogg->streams + idx;
    AVCodecParserContext *parser = av_parser_init(AV_CODEC_ID_FLAC);
    int size;
    uint8_t *data;

    if (!parser)
        return -1;

    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id = AV_CODEC_ID_FLAC;

    parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
    av_parser_parse2(parser, st->codec,
                     &data, &size, os->buf + os->pstart, os->psize,
                     AV_NOPTS_VALUE, AV_NOPTS_VALUE, -1);

    av_parser_close(parser);

    if (st->codec->sample_rate) {
        avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
        return 0;
    }

    return 1;
}
Exemple #4
0
STDMETHODIMP CDecAvcodec::DestroyDecoder()
{
  DbgLog((LOG_TRACE, 10, L"Shutting down ffmpeg..."));
  m_pAVCodec	= NULL;

  if (m_pParser) {
    av_parser_close(m_pParser);
    m_pParser = NULL;
  }

  if (m_pAVCtx) {
    avcodec_close(m_pAVCtx);
    av_freep(&m_pAVCtx->extradata);
    av_freep(&m_pAVCtx);
  }
  av_frame_free(&m_pFrame);

  av_freep(&m_pFFBuffer);
  m_nFFBufferSize = 0;

  av_freep(&m_pFFBuffer2);
  m_nFFBufferSize2 = 0;

  if (m_pSwsContext) {
    sws_freeContext(m_pSwsContext);
    m_pSwsContext = NULL;
  }

  m_nCodecId = AV_CODEC_ID_NONE;

  return S_OK;
}
Exemple #5
0
STDMETHODIMP CDecAvcodec::Flush()
{
  if (m_pAVCtx) {
    avcodec_flush_buffers(m_pAVCtx);
  }

  if (m_pParser) {
    av_parser_close(m_pParser);
    m_pParser = av_parser_init(m_nCodecId);
  }

  m_CurrentThread = 0;
  m_rtStartCache = AV_NOPTS_VALUE;
  m_bWaitingForKeyFrame = TRUE;
  m_h264RandomAccess.flush(m_pAVCtx->thread_count);

  m_nBFramePos = 0;
  m_tcBFrameDelay[0].rtStart = m_tcBFrameDelay[0].rtStop = AV_NOPTS_VALUE;
  m_tcBFrameDelay[1].rtStart = m_tcBFrameDelay[1].rtStop = AV_NOPTS_VALUE;

  if (!m_bDXVA && !(m_pCallback->GetDecodeFlags() & LAV_VIDEO_DEC_FLAG_DVD) && (m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO)) {
    InitDecoder(m_nCodecId, &m_pCallback->GetInputMediaType());
  }

  return __super::Flush();
}
Exemple #6
0
static av_cold int uninit(AVCodecContext *avctx)
{
    CHDContext *priv = avctx->priv_data;
    HANDLE device;

    device = priv->dev;
    DtsStopDecoder(device);
    DtsCloseDecoder(device);
    DtsDeviceClose(device);

    av_parser_close(priv->parser);
    if (priv->bsfc) {
        av_bitstream_filter_close(priv->bsfc);
    }

    av_free(priv->sps_pps_buf);

    if (priv->pic.data[0])
        avctx->release_buffer(avctx, &priv->pic);

    if (priv->head) {
       OpaqueList *node = priv->head;
       while (node) {
          OpaqueList *next = node->next;
          av_free(node);
          node = next;
       }
    }

    return 0;
}
static void remove_extradata_close(AVBSFContext *ctx)
{
    RemoveExtradataContext *s = ctx->priv_data;

    avcodec_free_context(&s->avctx);
    av_parser_close(s->parser);
}
Exemple #8
0
int ff_qsv_decode_close(QSVContext *q)
{
    QSVFrame *cur = q->work_frames;

    if (q->session)
        MFXVideoDECODE_Close(q->session);

    while (q->async_fifo && av_fifo_size(q->async_fifo)) {
        QSVFrame *out_frame;
        mfxSyncPoint *sync;

        av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,      sizeof(sync),      NULL);

        av_freep(&sync);
    }

    while (cur) {
        q->work_frames = cur->next;
        av_frame_free(&cur->frame);
        av_freep(&cur);
        cur = q->work_frames;
    }

    av_fifo_free(q->async_fifo);
    q->async_fifo = NULL;

    av_parser_close(q->parser);
    avcodec_free_context(&q->avctx_internal);

    if (q->internal_session)
        MFXClose(q->internal_session);

    return 0;
}
FFmpegH264Decoder<LIBAV_VER>::~FFmpegH264Decoder()
{
  MOZ_COUNT_DTOR(FFmpegH264Decoder);
  if (mCodecParser) {
    av_parser_close(mCodecParser);
    mCodecParser = nullptr;
  }
}
static
void
ppb_video_decoder_destroy_priv(void *p)
{
    struct pp_video_decoder_s *vd = p;

    if (vd->orig_graphics3d) {
        pp_resource_unref(vd->orig_graphics3d);
        vd->orig_graphics3d = 0;
    }

    if (vd->graphics3d) {
        pp_resource_unref(vd->graphics3d);
        vd->graphics3d = 0;
    }

    if (vd->avparser) {
        av_parser_close(vd->avparser);
        vd->avparser = NULL;
    }

    if (vd->avctx)
        avcodec_free_context(&vd->avctx);

    if (vd->avframe)
        av_frame_free(&vd->avframe);

    if (vd->va_context.context_id) {
        vaDestroyContext(display.va, vd->va_context.context_id);
        vd->va_context.context_id = 0;
    }

    if (vd->va_context.config_id) {
        vaDestroyConfig(display.va, vd->va_context.config_id);
        vd->va_context.config_id = 0;
    }

    vaDestroySurfaces(display.va, vd->surfaces, MAX_VIDEO_SURFACES);
    for (uintptr_t k = 0; k < MAX_VIDEO_SURFACES; k ++) {
        vd->surfaces[k] = VA_INVALID_SURFACE;
        vd->surface_used[k] = 0;

    }

    for (uintptr_t k = 0; k < vd->buffer_count; k ++) {
        vd->ppp_video_decoder_dev->DismissPictureBuffer(vd->instance->id, vd->self_id,
                                                        vd->buffers[k].id);
        pthread_mutex_lock(&display.lock);
        glXDestroyPixmap(display.x, vd->buffers[k].glx_pixmap);
        XFreePixmap(display.x, vd->buffers[k].pixmap);
        pthread_mutex_unlock(&display.lock);
    }

    vd->buffer_count = 0;
    vd->buffers_were_requested = 0;
    free_and_nullify(vd->buffers);
}
Exemple #11
0
SharedVideoContext::~SharedVideoContext() {
	if (CodecContext) {
		avcodec_close(CodecContext);
		if (FreeCodecContext)
			av_freep(&CodecContext);
	}
	av_parser_close(Parser);
	if (BitStreamFilter)
		av_bitstream_filter_close(BitStreamFilter);
	delete TCC;
}
void video_stop_decoder() {
	//Send a NULL buffer to the callback to indicate that we're done.
	frame_processing_callback(NULL, 0, 0, 0);

	avcodec_close(context);
	//quite recent and not very useful for us, always use avcodec_close for now.
	//avcodec_free_context(&context);
	av_parser_close(cpContext);
	av_frame_free(&current_frame);
	if(tempBuffer != NULL)
		free(tempBuffer);
}
Exemple #13
0
 void DisposeParser()
 {
   if (m_parser)
   {
     av_parser_close(m_parser);
     m_parser = nullptr;
   }
   if (m_context)
   {
     avcodec_close(m_context);
     m_context = nullptr;
   }
 }
Exemple #14
0
static av_unused int64_t flac_read_timestamp(AVFormatContext *s, int stream_index,
                                             int64_t *ppos, int64_t pos_limit)
{
    AVPacket pkt, out_pkt;
    AVStream *st = s->streams[stream_index];
    AVCodecParserContext *parser;
    int ret;
    int64_t pts = AV_NOPTS_VALUE;

    if (avio_seek(s->pb, *ppos, SEEK_SET) < 0)
        return AV_NOPTS_VALUE;

    av_init_packet(&pkt);
    parser = av_parser_init(st->codec->codec_id);
    if (!parser){
        return AV_NOPTS_VALUE;
    }
    parser->flags |= PARSER_FLAG_USE_CODEC_TS;

    for (;;){
        ret = ff_raw_read_partial_packet(s, &pkt);
        if (ret < 0){
            if (ret == AVERROR(EAGAIN))
                continue;
            else
                break;
        }
        av_init_packet(&out_pkt);
        ret = av_parser_parse2(parser, st->codec,
                               &out_pkt.data, &out_pkt.size, pkt.data, pkt.size,
                               pkt.pts, pkt.dts, *ppos);

        av_free_packet(&pkt);
        if (out_pkt.size){
            int size = out_pkt.size;
            if (parser->pts != AV_NOPTS_VALUE){
                // seeking may not have started from beginning of a frame
                // calculate frame start position from next frame backwards
                *ppos = parser->next_frame_offset - size;
                pts = parser->pts;
                break;
            }
        }
    }
    av_parser_close(parser);
    return pts;
}
Exemple #15
0
void ff_free_parser_state(AVFormatContext *s, AVParserState *state)
{
    int i;
    AVParserStreamState *ss;

    if (!state)
        return;

    for (i = 0; i < state->nb_streams; i++) {
        ss = &state->stream_states[i];
        if (ss->parser)
            av_parser_close(ss->parser);
    }

    av_free(state->stream_states);
    av_free(state);
}
static av_cold int uninit(AVCodecContext *avctx)
{
    CHDContext *priv = avctx->priv_data;
    HANDLE device;

    device = priv->dev;
    DtsStopDecoder(device);
    DtsCloseDecoder(device);
    DtsDeviceClose(device);

    /*
     * Restore original extradata, so that if the decoder is
     * reinitialised, the bitstream detection and filtering
     * will work as expected.
     */
    if (priv->orig_extradata) {
        av_free(avctx->extradata);
        avctx->extradata = priv->orig_extradata;
        avctx->extradata_size = priv->orig_extradata_size;
        priv->orig_extradata = NULL;
        priv->orig_extradata_size = 0;
    }

    av_parser_close(priv->parser);
    if (priv->bsfc) {
        av_bitstream_filter_close(priv->bsfc);
    }

    av_free(priv->sps_pps_buf);

    if (priv->pic.data[0])
        avctx->release_buffer(avctx, &priv->pic);

    if (priv->head) {
       OpaqueList *node = priv->head;
       while (node) {
          OpaqueList *next = node->next;
          av_free(node);
          node = next;
       }
    }

    return 0;
}
static void
gst_vaapi_decoder_ffmpeg_close(GstVaapiDecoderFfmpeg *ffdecoder)
{
    GstVaapiDecoderFfmpegPrivate * const priv = ffdecoder->priv;

    if (priv->avctx) {
        if (priv->is_opened) {
            avcodec_close(priv->avctx);
            priv->is_opened = FALSE;
        }
        av_freep(&priv->avctx->extradata);
        priv->avctx->extradata_size = 0;
    }

    if (priv->pctx) {
        av_parser_close(priv->pctx);
        priv->pctx = NULL;
    }
}
void CVideoDecoder2::Finit(void)
{
    if(pQueueMutex){
        uv_mutex_destroy(pQueueMutex);
        free(pQueueMutex);
        pQueueMutex = NULL;
    }
    if(pQueueNotEmpty){
        uv_cond_destroy(pQueueNotEmpty);
        free(pQueueNotEmpty);
        pQueueNotEmpty = NULL;
    }
	sws_freeContext(pConvertCtx);
	av_parser_close(pCodecParserCtx);
	av_frame_free(&pFrame);
	av_frame_free(&pFrameYUV);
	avcodec_close(pCodecCtx);
	av_freep(pCodecCtx);
    bInit = false;
}
Exemple #19
0
void ff_free_parser_state(AVFormatContext *s, AVParserState *state)
{
    int i;
    AVParserStreamState *ss;

    if (!state)
        return;

    for (i = 0; i < state->nb_streams; i++) {
        ss = &state->stream_states[i];
        if (ss->parser)
            av_parser_close(ss->parser);
        av_free_packet(&ss->cur_pkt);
    }

    free_packet_list(state->packet_buffer);
    free_packet_list(state->raw_packet_buffer);

    av_free(state->stream_states);
    av_free(state);
}
Exemple #20
0
void ff_closeH264Dec() {
    
    if(parser) {
        
        av_parser_close(parser);
        parser = NULL;
    }
    
    if(pFrame) {
        
        av_frame_free(&pFrame);
        pFrame = NULL;
    }
    
    if(pFrame2) {
        
        av_frame_free(&pFrame2);
        pFrame2 = NULL;
    }
    
    if(pFrame2Buf) {
        
        av_free(pFrame2Buf);
        pFrame2Buf = NULL;
    }

    if(img_convert_ctx) {
        
        sws_freeContext(img_convert_ctx);
        img_convert_ctx = NULL;
    }
    
    if(pCodecCtx) {
        
        avcodec_close(pCodecCtx);
        av_free(pCodecCtx);
        pCodecCtx = NULL;
    }

}
Exemple #21
0
int ff_qsv_decode_close(QSVContext *q)
{
    QSVFrame *cur = q->work_frames;

    while (cur) {
        q->work_frames = cur->next;
        av_frame_free(&cur->frame);
        av_freep(&cur);
        cur = q->work_frames;
    }

    av_fifo_free(q->async_fifo);
    q->async_fifo = NULL;

    av_parser_close(q->parser);
    avcodec_free_context(&q->avctx_internal);

    if (q->internal_session)
        MFXClose(q->internal_session);

    return 0;
}
Exemple #22
0
HRESULT CLAVAudio::FreeBitstreamContext()
{
  if (m_avBSContext) {
    av_write_trailer(m_avBSContext); // For the SPDIF muxer that frees the buffers
    avformat_free_context(m_avBSContext);
  }
  m_avBSContext = NULL;

  if (m_pParser)
    av_parser_close(m_pParser);
  m_pParser = NULL;

  if (m_pAVCtx) {
    if (m_pAVCtx->codec)
      avcodec_close(m_pAVCtx);
    av_freep(&m_pAVCtx->extradata);
    av_freep(&m_pAVCtx);
  }

  // Dump any remaining data
  m_bsOutput.SetSize(0);

  return S_OK;
}
int main(int argc, char* argv[])
{
	AVCodec *pCodec;
    AVCodecContext *pCodecCtx= NULL;
	AVCodecParserContext *pCodecParserCtx=NULL;

    int frame_count;
    FILE *fp_in;
	FILE *fp_out;
    AVFrame	*pFrame,*pFrameYUV;
	uint8_t *out_buffer;
	const int in_buffer_size=4096;
	uint8_t in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE]={0};
	uint8_t *cur_ptr;
	int cur_size;

    AVPacket packet;
	int ret, got_picture;
	
	int y_size;

#if TEST_HEVC
	enum AVCodecID codec_id=AV_CODEC_ID_HEVC;
	char filepath_in[]="bigbuckbunny_480x272.hevc";
#elif TEST_H264
	AVCodecID codec_id=AV_CODEC_ID_H264;
	char filepath_in[]="bigbuckbunny_480x272.h264";
#else
	AVCodecID codec_id=AV_CODEC_ID_MPEG2VIDEO;
	char filepath_in[]="bigbuckbunny_480x272.m2v";
#endif

	char filepath_out[]="bigbuckbunny_480x272.yuv";
	int first_time=1;

	struct SwsContext *img_convert_ctx;

	//av_log_set_level(AV_LOG_DEBUG);
	
	avcodec_register_all();

    pCodec = avcodec_find_decoder(codec_id);
    if (!pCodec) {
        printf("Codec not found\n");
        return -1;
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (!pCodecCtx){
        printf("Could not allocate video codec context\n");
        return -1;
    }

	pCodecParserCtx=av_parser_init(codec_id);
	if (!pCodecParserCtx){
		printf("Could not allocate video parser context\n");
		return -1;
	}

    //if(pCodec->capabilities&CODEC_CAP_TRUNCATED)
    //    pCodecCtx->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
    
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        printf("Could not open codec\n");
        return -1;
    }
	//Input File
    fp_in = fopen(filepath_in, "rb");
    if (!fp_in) {
        printf("Could not open input stream\n");
        return -1;
    }
	//Output File
	fp_out = fopen(filepath_out, "wb");
	if (!fp_out) {
		printf("Could not open output YUV file\n");
		return -1;
	}

    pFrame = av_frame_alloc();
	av_init_packet(&packet);


	while (1) {
        cur_size = fread(in_buffer, 1, in_buffer_size, fp_in);
        if (cur_size == 0)
            break;
        cur_ptr=in_buffer;

        while (cur_size>0){

			int len = av_parser_parse2(
				pCodecParserCtx, pCodecCtx,
				&packet.data, &packet.size,
				cur_ptr , cur_size ,
				AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);

			cur_ptr += len;
			cur_size -= len;

			if(packet.size==0)
				continue;

			//Some Info from AVCodecParserContext
			printf("Packet Size:%6d\t",packet.size);
			switch(pCodecParserCtx->pict_type){
				case AV_PICTURE_TYPE_I: printf("Type: I\t");break;
				case AV_PICTURE_TYPE_P: printf("Type: P\t");break;
				case AV_PICTURE_TYPE_B: printf("Type: B\t");break;
				default: printf("Type: Other\t");break;
			}
			printf("Output Number:%4d\t",pCodecParserCtx->output_picture_number);
			printf("Offset:%lld\n",pCodecParserCtx->cur_offset);

			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
			if (ret < 0) {
				printf("Decode Error.\n");
				return ret;
			}
			if (got_picture) {
				if(first_time){
					printf("\nCodec Full Name:%s\n",pCodecCtx->codec->long_name);
					printf("width:%d\nheight:%d\n\n",pCodecCtx->width,pCodecCtx->height);
					//SwsContext
					img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
						pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
					
					pFrameYUV=av_frame_alloc();
					out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
					avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
					
					y_size=pCodecCtx->width*pCodecCtx->height;

					first_time=0;
				}

				printf("Succeed to decode 1 frame!\n");
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
					pFrameYUV->data, pFrameYUV->linesize);

				fwrite(pFrameYUV->data[0],1,y_size,fp_out);     //Y 
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_out);   //U
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_out);   //V
			}
		}

    }

	//Flush Decoder
    packet.data = NULL;
    packet.size = 0;
	while(1){
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
		if (ret < 0) {
			printf("Decode Error.\n");
			return ret;
		}
		if (!got_picture)
			break;
		if (got_picture) {
			printf("Flush Decoder: Succeed to decode 1 frame!\n");
			sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
				pFrameYUV->data, pFrameYUV->linesize);

			fwrite(pFrameYUV->data[0],1,y_size,fp_out);     //Y
			fwrite(pFrameYUV->data[1],1,y_size/4,fp_out);   //U
			fwrite(pFrameYUV->data[2],1,y_size/4,fp_out);   //V
		}
	}

    fclose(fp_in);
	fclose(fp_out);
    
	sws_freeContext(img_convert_ctx);
	av_parser_close(pCodecParserCtx);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	av_free(pCodecCtx);

	return 0;
}
Exemple #24
0
int main(int argc, char **argv)
{
    const char *filename, *outfilename;
    const AVCodec *codec;
    AVCodecParserContext *parser;
    AVCodecContext *c= NULL;
    FILE *f;
    AVFrame *picture;
    uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
    uint8_t *data;
    size_t   data_size;
    int ret;
    AVPacket *pkt;

    if (argc <= 2) {
        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
        exit(0);
    }
    filename    = argv[1];
    outfilename = argv[2];

    avcodec_register_all();

    pkt = av_packet_alloc();
    if (!pkt)
        exit(1);

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
    memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    /* find the MPEG-1 video decoder */
    codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    parser = av_parser_init(codec->id);
    if (!parser) {
        fprintf(stderr, "parser not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    picture = av_frame_alloc();

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    while (!feof(f)) {
        /* read raw data from the input file */
        data_size = fread(inbuf, 1, INBUF_SIZE, f);
        if (!data_size)
            break;

        /* use the parser to split the data into frames */
        data = inbuf;
        while (data_size > 0) {
            ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
                                   data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
            if (ret < 0) {
                fprintf(stderr, "Error while parsing\n");
                exit(1);
            }
            data      += ret;
            data_size -= ret;

            if (pkt->size)
                decode(c, picture, pkt, outfilename);
        }
    }

    /* flush the decoder */
    decode(c, picture, NULL, outfilename);

    fclose(f);

    av_parser_close(parser);
    avcodec_free_context(&c);
    av_frame_free(&picture);
    av_packet_free(&pkt);

    return 0;
}
Exemple #25
0
static void clear_parser(sh_common_t *sh)
{
    av_parser_close(sh->parser);
    sh->parser = NULL;
    av_freep(&sh->avctx);
}
static gboolean
gst_vaapi_decoder_ffmpeg_open(GstVaapiDecoderFfmpeg *ffdecoder, GstBuffer *buffer)
{
    GstVaapiDecoderFfmpegPrivate * const priv = ffdecoder->priv;
    GstVaapiDisplay * const display = GST_VAAPI_DECODER_DISPLAY(ffdecoder);
    GstBuffer * const codec_data = GST_VAAPI_DECODER_CODEC_DATA(ffdecoder);
    GstVaapiCodec codec = GST_VAAPI_DECODER_CODEC(ffdecoder);
    enum CodecID codec_id;
    AVCodec *ffcodec;
    gboolean try_parser, need_parser;
    int ret;

    gst_vaapi_decoder_ffmpeg_close(ffdecoder);

    if (codec_data) {
        const guchar *data = GST_BUFFER_DATA(codec_data);
        const guint   size = GST_BUFFER_SIZE(codec_data);
        if (!set_codec_data(priv->avctx, data, size))
            return FALSE;
    }

    codec_id = get_codec_id_from_codec(codec);
    if (codec_id == CODEC_ID_NONE)
        return FALSE;

    ffcodec = avcodec_find_decoder(codec_id);
    if (!ffcodec)
        return FALSE;

    switch (codec_id) {
    case CODEC_ID_H264:
        /* For AVC1 formats, sequence headers are in extradata and
           input encoded buffers represent the whole NAL unit */
        try_parser  = priv->avctx->extradata_size == 0;
        need_parser = try_parser;
        break;
    case CODEC_ID_WMV3:
        /* There is no WMV3 parser in FFmpeg */
        try_parser  = FALSE;
        need_parser = FALSE;
        break;
    case CODEC_ID_VC1:
        /* For VC-1, sequence headers ae in extradata and input encoded
           buffers represent the whole slice */
        try_parser  = priv->avctx->extradata_size == 0;
        need_parser = FALSE;
        break;
    default:
        try_parser  = TRUE;
        need_parser = TRUE;
        break;
    }

    if (try_parser) {
        priv->pctx = av_parser_init(codec_id);
        if (!priv->pctx && need_parser)
            return FALSE;
    }

    /* XXX: av_find_stream_info() does this and some codecs really
       want hard an extradata buffer for initialization (e.g. VC-1) */
    if (!priv->avctx->extradata && priv->pctx && priv->pctx->parser->split) {
        const guchar *buf = GST_BUFFER_DATA(buffer);
        guint buf_size = GST_BUFFER_SIZE(buffer);
        buf_size = priv->pctx->parser->split(priv->avctx, buf, buf_size);
        if (buf_size > 0 && !set_codec_data(priv->avctx, buf, buf_size))
            return FALSE;
    }

    if (priv->pctx && !need_parser) {
        av_parser_close(priv->pctx);
        priv->pctx = NULL;
    }

    /* Use size information from the demuxer, whenever available */
    priv->avctx->coded_width  = GST_VAAPI_DECODER_WIDTH(ffdecoder);
    priv->avctx->coded_height = GST_VAAPI_DECODER_HEIGHT(ffdecoder);

    GST_VAAPI_DISPLAY_LOCK(display);
    ret = avcodec_open(priv->avctx, ffcodec);
    GST_VAAPI_DISPLAY_UNLOCK(display);
    if (ret < 0)
        return FALSE;
    return TRUE;
}