STDMETHODIMP CDecAvcodec::Flush() { if (m_pAVCtx) { avcodec_flush_buffers(m_pAVCtx); } if (m_pParser) { av_parser_close(m_pParser); m_pParser = av_parser_init(m_nCodecId); } m_CurrentThread = 0; m_rtStartCache = AV_NOPTS_VALUE; m_bWaitingForKeyFrame = TRUE; m_h264RandomAccess.flush(m_pAVCtx->thread_count); m_nBFramePos = 0; m_tcBFrameDelay[0].rtStart = m_tcBFrameDelay[0].rtStop = AV_NOPTS_VALUE; m_tcBFrameDelay[1].rtStart = m_tcBFrameDelay[1].rtStop = AV_NOPTS_VALUE; if (!m_bDXVA && !(m_pCallback->GetDecodeFlags() & LAV_VIDEO_DEC_FLAG_DVD) && (m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO)) { InitDecoder(m_nCodecId, &m_pCallback->GetInputMediaType()); } return __super::Flush(); }
static int old_flac_header (AVFormatContext * s, int idx) { struct ogg *ogg = s->priv_data; AVStream *st = s->streams[idx]; struct ogg_stream *os = ogg->streams + idx; AVCodecParserContext *parser = av_parser_init(AV_CODEC_ID_FLAC); int size; uint8_t *data; if (!parser) return -1; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_FLAC; parser->flags = PARSER_FLAG_COMPLETE_FRAMES; av_parser_parse2(parser, st->codec, &data, &size, os->buf + os->pstart, os->psize, AV_NOPTS_VALUE, AV_NOPTS_VALUE, -1); av_parser_close(parser); if (st->codec->sample_rate) { avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); return 0; } return 1; }
static int remove_extradata(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { int cmd= args ? *args : 0; AVCodecParserContext *s; if(!bsfc->parser) { bsfc->parser= av_parser_init(avctx->codec_id); } s= bsfc->parser; if(s && s->parser->split) { if( (((avctx->flags & CODEC_FLAG_GLOBAL_HEADER) || (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)) && cmd=='a') ||(!keyframe && cmd=='k') ||(cmd=='e' || !cmd) ) { int i= s->parser->split(avctx, buf, buf_size); buf += i; buf_size -= i; } } *poutbuf= (uint8_t *) buf; *poutbuf_size= buf_size; return 0; }
static void allocate_parser(AVCodecContext **avctx, AVCodecParserContext **parser, unsigned format) { enum CodecID codec_id = CODEC_ID_NONE; switch (format) { case MKTAG('M', 'P', '4', 'L'): codec_id = CODEC_ID_AAC_LATM; break; case 0x2000: case 0x332D6361: case 0x332D4341: case 0x20736D: case MKTAG('s', 'a', 'c', '3'): codec_id = CODEC_ID_AC3; break; case MKTAG('d', 'n', 'e', 't'): // DNET/byte-swapped AC-3 - there is no parser for that yet //codec_id = CODEC_ID_DNET; break; case MKTAG('E', 'A', 'C', '3'): codec_id = CODEC_ID_EAC3; break; case 0x2001: case 0x86: codec_id = CODEC_ID_DTS; break; case MKTAG('f', 'L', 'a', 'C'): codec_id = CODEC_ID_FLAC; break; case MKTAG('M', 'L', 'P', ' '): codec_id = CODEC_ID_MLP; break; case 0x55: case 0x5500736d: case 0x55005354: case MKTAG('.', 'm', 'p', '3'): case MKTAG('M', 'P', '3', ' '): case MKTAG('L', 'A', 'M', 'E'): codec_id = CODEC_ID_MP3; break; case 0x50: case 0x5000736d: case MKTAG('.', 'm', 'p', '2'): case MKTAG('.', 'm', 'p', '1'): codec_id = CODEC_ID_MP2; break; case MKTAG('T', 'R', 'H', 'D'): codec_id = CODEC_ID_TRUEHD; break; } if (codec_id != CODEC_ID_NONE) { *avctx = avcodec_alloc_context3(NULL); if (!*avctx) return; *parser = av_parser_init(codec_id); if (!*parser) av_freep(avctx); } }
//AV_CODEC_ID_H264, AV_CODEC_ID_MJPEG int ff_openH264Dec(pNaluDataCallbackFunc pFunc, enum AVCodecID dec_id) { pCodec = avcodec_find_decoder(dec_id); if (!pCodec) { fprintf(stderr, "h264 decoder can not be found!\n"); return -1; } pCodecCtx = avcodec_alloc_context3(pCodec); if (!pCodecCtx) { fprintf(stderr, "Could not allocate video codec context\n"); return -1; } //当给入的待解码数据不是以完整帧给入时,相当于是起到一个数据缓冲的作用 //if(pCodec->capabilities&CODEC_CAP_TRUNCATED) // pCodecCtx->flags|= CODEC_FLAG_TRUNCATED; /* open the coderc */ if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { fprintf(stderr, "could not open codec\n"); return -1; } parser = av_parser_init(pCodecCtx->codec_id); parser->flags |= PARSER_FLAG_ONCE; //在打开解码器后初始化parse naluOutputFunc = pFunc; // Allocate video frame pFrame = av_frame_alloc(); if(!pFrame) { fprintf(stderr, "Could not allocate video frame 1\n"); return -1; } pFrame2 = av_frame_alloc(); if(!pFrame2) { fprintf(stderr, "Could not allocate video frame 2\n"); return -1; } return 0; }
static av_cold int check_format(AVCodecContext *avctx) { AVCodecParserContext *parser; uint8_t *pout; int psize; int index; H264Context *h; int ret = -1; /* init parser & parse file */ parser = av_parser_init(avctx->codec->id); if (!parser) { av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 parser.\n"); goto final; }
static av_unused int64_t flac_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit) { AVPacket pkt, out_pkt; AVStream *st = s->streams[stream_index]; AVCodecParserContext *parser; int ret; int64_t pts = AV_NOPTS_VALUE; if (avio_seek(s->pb, *ppos, SEEK_SET) < 0) return AV_NOPTS_VALUE; av_init_packet(&pkt); parser = av_parser_init(st->codec->codec_id); if (!parser){ return AV_NOPTS_VALUE; } parser->flags |= PARSER_FLAG_USE_CODEC_TS; for (;;){ ret = ff_raw_read_partial_packet(s, &pkt); if (ret < 0){ if (ret == AVERROR(EAGAIN)) continue; else break; } av_init_packet(&out_pkt); ret = av_parser_parse2(parser, st->codec, &out_pkt.data, &out_pkt.size, pkt.data, pkt.size, pkt.pts, pkt.dts, *ppos); av_free_packet(&pkt); if (out_pkt.size){ int size = out_pkt.size; if (parser->pts != AV_NOPTS_VALUE){ // seeking may not have started from beginning of a frame // calculate frame start position from next frame backwards *ppos = parser->next_frame_offset - size; pts = parser->pts; break; } } } av_parser_close(parser); return pts; }
static int remove_extradata_init(AVBSFContext *ctx) { RemoveExtradataContext *s = ctx->priv_data; int ret; s->parser = av_parser_init(ctx->par_in->codec_id); if (s->parser) { s->avctx = avcodec_alloc_context3(NULL); if (!s->avctx) return AVERROR(ENOMEM); ret = avcodec_parameters_to_context(s->avctx, ctx->par_in); if (ret < 0) return ret; } return 0; }
/*Load up the h264 codec needed for video decoding. Perform the initialization steps required by FFmpeg.*/ int video_init_decoder() { //initialize libavcodec avcodec_register_all(); //try to load h264 codec = avcodec_find_decoder(AV_CODEC_ID_H264); if(codec == NULL) { fprintf(stderr, "FFmpeg error : Counldn't find needed codec H264 for video decoding.\n"); return -1; } //inilialize the ffmpeg codec context context = avcodec_alloc_context3(codec); if(avcodec_open2(context, codec, NULL) < 0) { fprintf(stderr, "FFmpeg error : Couldn't open codec.\n"); return -1; } //initialize the frame parser (needed to get a whole frame from several packets) cpContext = av_parser_init(AV_CODEC_ID_H264); //initialize the video packet and frame structures av_init_packet(&video_packet); current_frame = av_frame_alloc(); frameOffset = 0; //prevent h264 from logging error messages that we have no interest in av_log_set_level(JAKO_FFMPEG_LOG); //for now, use the example "dump to file" callback for frame processing. frame_processing_callback = video_display_frame; //temp buffer to store raw frame; for now, don't make assumptions on its size, //wait til frames come in to allocate it. tempBufferSize = 0; tempBuffer = NULL; current_width = 0; current_height = 0; return 0; }
int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *pkt) { uint8_t *dummy_data; int dummy_size; int ret; if (!q->avctx_internal) { q->avctx_internal = avcodec_alloc_context3(NULL); if (!q->avctx_internal) return AVERROR(ENOMEM); if (avctx->extradata) { q->avctx_internal->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!q->avctx_internal->extradata) return AVERROR(ENOMEM); memcpy(q->avctx_internal->extradata, avctx->extradata, avctx->extradata_size); q->avctx_internal->extradata_size = avctx->extradata_size; } q->parser = av_parser_init(avctx->codec_id); if (!q->parser) return AVERROR(ENOMEM); q->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; q->orig_pix_fmt = AV_PIX_FMT_NONE; } if (!pkt->size) return qsv_decode(avctx, q, frame, got_frame, pkt); /* we assume the packets are already split properly and want * just the codec parameters here */ av_parser_parse2(q->parser, q->avctx_internal, &dummy_data, &dummy_size, pkt->data, pkt->size, pkt->pts, pkt->dts, pkt->pos); /* TODO: flush delayed frames on reinit */ if (q->parser->format != q->orig_pix_fmt || q->parser->coded_width != avctx->coded_width || q->parser->coded_height != avctx->coded_height) { enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV, AV_PIX_FMT_NONE, AV_PIX_FMT_NONE }; enum AVPixelFormat qsv_format; qsv_format = ff_qsv_map_pixfmt(q->parser->format, &q->fourcc); if (qsv_format < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding pixel format '%s' is not supported\n", av_get_pix_fmt_name(q->parser->format)); ret = AVERROR(ENOSYS); goto reinit_fail; } q->orig_pix_fmt = q->parser->format; avctx->pix_fmt = pix_fmts[1] = qsv_format; avctx->width = q->parser->width; avctx->height = q->parser->height; avctx->coded_width = q->parser->coded_width; avctx->coded_height = q->parser->coded_height; avctx->level = q->avctx_internal->level; avctx->profile = q->avctx_internal->profile; ret = ff_get_format(avctx, pix_fmts); if (ret < 0) goto reinit_fail; avctx->pix_fmt = ret; ret = qsv_decode_init(avctx, q); if (ret < 0) goto reinit_fail; } return qsv_decode(avctx, q, frame, got_frame, pkt); reinit_fail: q->orig_pix_fmt = q->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE; return ret; }
HRESULT CLAVAudio::CreateBitstreamContext(AVCodecID codec, WAVEFORMATEX *wfe) { int ret = 0; if (m_avBSContext) FreeBitstreamContext(); m_bsParser.Reset(); // Increase DTS buffer even further, as we do not have any sample caching if (codec == AV_CODEC_ID_DTS) m_faJitter.SetNumSamples(400); else m_faJitter.SetNumSamples(100); m_pParser = av_parser_init(codec); ASSERT(m_pParser); m_pAVCtx = avcodec_alloc_context3(avcodec_find_decoder(codec)); CheckPointer(m_pAVCtx, E_POINTER); DbgLog((LOG_TRACE, 20, "Creating Bistreaming Context...")); ret = avformat_alloc_output_context2(&m_avBSContext, NULL, "spdif", NULL); if (ret < 0 || !m_avBSContext) { DbgLog((LOG_ERROR, 10, L"::CreateBitstreamContext() -- alloc of avformat spdif muxer failed (ret: %d)", ret)); goto fail; } m_avBSContext->pb = m_avioBitstream; m_avBSContext->oformat->flags |= AVFMT_NOFILE; // DTS-HD is by default off, unless explicitly asked for if (m_settings.DTSHDFraming && m_settings.bBitstream[Bitstream_DTSHD] && !m_bForceDTSCore) { m_bDTSHD = TRUE; av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", LAV_BITSTREAM_DTS_HD_RATE, 0); } else { m_bDTSHD = FALSE; av_opt_set_int(m_avBSContext->priv_data, "dtshd_rate", 0, 0); } av_opt_set_int(m_avBSContext->priv_data, "dtshd_fallback_time", -1, 0); AVStream *st = av_new_stream(m_avBSContext, 0); if (!st) { DbgLog((LOG_ERROR, 10, L"::CreateBitstreamContext() -- alloc of output stream failed")); goto fail; } m_pAVCtx->codec_id = st->codec->codec_id = codec; m_pAVCtx->codec_type = st->codec->codec_type = AVMEDIA_TYPE_AUDIO; m_pAVCtx->channels = st->codec->channels = wfe->nChannels; m_pAVCtx->sample_rate = st->codec->sample_rate = wfe->nSamplesPerSec; ret = avformat_write_header(m_avBSContext, NULL); if (ret < 0) { DbgLog((LOG_ERROR, 10, L"::CreateBitstreamContext() -- av_write_header returned an error code (%d)", -ret)); goto fail; } m_nCodecId = codec; return S_OK; fail: FreeBitstreamContext(); return E_FAIL; }
static av_cold int init(AVCodecContext *avctx) { CHDContext* priv; BC_STATUS ret; BC_INFO_CRYSTAL version; BC_INPUT_FORMAT format = { .FGTEnable = FALSE, .Progressive = TRUE, .OptFlags = 0x80000000 | vdecFrameRate59_94 | 0x40, .width = avctx->width, .height = avctx->height, }; BC_MEDIA_SUBTYPE subtype; uint32_t mode = DTS_PLAYBACK_MODE | DTS_LOAD_FILE_PLAY_FW | DTS_SKIP_TX_CHK_CPB | DTS_PLAYBACK_DROP_RPT_MODE | DTS_SINGLE_THREADED_MODE | DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976); av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n", avctx->codec->name); avctx->pix_fmt = PIX_FMT_YUYV422; /* Initialize the library */ priv = avctx->priv_data; priv->avctx = avctx; priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1; priv->last_picture = -1; priv->decode_wait = BASE_WAIT; subtype = id2subtype(priv, avctx->codec->id); switch (subtype) { case BC_MSUBTYPE_AVC1: { uint8_t *dummy_p; int dummy_int; priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!priv->bsfc) { av_log(avctx, AV_LOG_ERROR, "Cannot open the h264_mp4toannexb BSF!\n"); return AVERROR_BSF_NOT_FOUND; } av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p, &dummy_int, NULL, 0, 0); } subtype = BC_MSUBTYPE_H264; // Fall-through case BC_MSUBTYPE_H264: format.startCodeSz = 4; // Fall-through case BC_MSUBTYPE_VC1: case BC_MSUBTYPE_WVC1: case BC_MSUBTYPE_WMV3: case BC_MSUBTYPE_WMVA: case BC_MSUBTYPE_MPEG2VIDEO: case BC_MSUBTYPE_DIVX: case BC_MSUBTYPE_DIVX311: format.pMetaData = avctx->extradata; format.metaDataSz = avctx->extradata_size; break; default: av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n"); return AVERROR(EINVAL); } format.mSubtype = subtype; if (priv->sWidth) { format.bEnableScaling = 1; format.ScalingParams.sWidth = priv->sWidth; } /* Get a decoder instance */ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n"); // Initialize the Link and Decoder devices ret = DtsDeviceOpen(&priv->dev, mode); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n"); goto fail; } ret = DtsCrystalHDVersion(priv->dev, &version); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsCrystalHDVersion failed\n"); goto fail; } priv->is_70012 = version.device == 0; if (priv->is_70012 && (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n"); goto fail; } ret = DtsSetInputFormat(priv->dev, &format); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n"); goto fail; } ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n"); goto fail; } ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n"); goto fail; } ret = DtsStartDecoder(priv->dev); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n"); goto fail; } ret = DtsStartCapture(priv->dev); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n"); goto fail; } if (avctx->codec->id == CODEC_ID_H264) { priv->parser = av_parser_init(avctx->codec->id); if (!priv->parser) av_log(avctx, AV_LOG_WARNING, "Cannot open the h.264 parser! Interlaced h.264 content " "will not be detected reliably.\n"); priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES; } av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n"); return 0; fail: uninit(avctx); return -1; } static inline CopyRet copy_frame(AVCodecContext *avctx, BC_DTS_PROC_OUT *output, void *data, int *data_size) { BC_STATUS ret; BC_DTS_STATUS decoder_status; uint8_t trust_interlaced; uint8_t interlaced; CHDContext *priv = avctx->priv_data; int64_t pkt_pts = AV_NOPTS_VALUE; uint8_t pic_type = 0; uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) == VDEC_FLAG_BOTTOMFIELD; uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST); int width = output->PicInfo.width; int height = output->PicInfo.height; int bwidth; uint8_t *src = output->Ybuff; int sStride; uint8_t *dst; int dStride; if (output->PicInfo.timeStamp != 0) { OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp); if (node) { pkt_pts = node->reordered_opaque; pic_type = node->pic_type; av_free(node); } else { /* * We will encounter a situation where a timestamp cannot be * popped if a second field is being returned. In this case, * each field has the same timestamp and the first one will * cause it to be popped. To keep subsequent calculations * simple, pic_type should be set a FIELD value - doesn't * matter which, but I chose BOTTOM. */ pic_type = PICT_BOTTOM_FIELD; } av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n", output->PicInfo.timeStamp); av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n", pic_type); } ret = DtsGetDriverStatus(priv->dev, &decoder_status); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed: %u\n", ret); return RET_ERROR; } /* * For most content, we can trust the interlaced flag returned * by the hardware, but sometimes we can't. These are the * conditions under which we can trust the flag: * * 1) It's not h.264 content * 2) The UNKNOWN_SRC flag is not set * 3) We know we're expecting a second field * 4) The hardware reports this picture and the next picture * have the same picture number. * * Note that there can still be interlaced content that will * fail this check, if the hardware hasn't decoded the next * picture or if there is a corruption in the stream. (In either * case a 0 will be returned for the next picture number) */ trust_interlaced = avctx->codec->id != CODEC_ID_H264 || !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) || priv->need_second_field || (decoder_status.picNumFlags & ~0x40000000) == output->PicInfo.picture_number; /* * If we got a false negative for trust_interlaced on the first field, * we will realise our mistake here when we see that the picture number is that * of the previous picture. We cannot recover the frame and should discard the * second field to keep the correct number of output frames. */ if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) { av_log(avctx, AV_LOG_WARNING, "Incorrectly guessed progressive frame. Discarding second field\n"); /* Returning without providing a picture. */ return RET_OK; } interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) && trust_interlaced; if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) { av_log(avctx, AV_LOG_VERBOSE, "Next picture number unknown. Assuming progressive frame.\n"); } av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n", interlaced, trust_interlaced); if (priv->pic.data[0] && !priv->need_second_field) avctx->release_buffer(avctx, &priv->pic); priv->need_second_field = interlaced && !priv->need_second_field; priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (!priv->pic.data[0]) { if (avctx->get_buffer(avctx, &priv->pic) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return RET_ERROR; } } bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0); if (priv->is_70012) { int pStride; if (width <= 720) pStride = 720; else if (width <= 1280) pStride = 1280; else if (width <= 1080) pStride = 1080; sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0); } else { sStride = bwidth; } dStride = priv->pic.linesize[0]; dst = priv->pic.data[0]; av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n"); if (interlaced) { int dY = 0; int sY = 0; height /= 2; if (bottom_field) { av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n"); dY = 1; } else { av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n"); dY = 0; } for (sY = 0; sY < height; dY++, sY++) { memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth); dY++; } } else { av_image_copy_plane(dst, dStride, src, sStride, bwidth, height); } priv->pic.interlaced_frame = interlaced; if (interlaced) priv->pic.top_field_first = !bottom_first; priv->pic.pkt_pts = pkt_pts; if (!priv->need_second_field) { *data_size = sizeof(AVFrame); *(AVFrame *)data = priv->pic; } /* * Two types of PAFF content have been observed. One form causes the * hardware to return a field pair and the other individual fields, * even though the input is always individual fields. We must skip * copying on the next decode() call to maintain pipeline length in * the first case. */ if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) && (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) { av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n"); return RET_SKIP_NEXT_COPY; } /* * Testing has shown that in all cases where we don't want to return the * full frame immediately, VDEC_FLAG_UNKNOWN_SRC is set. */ return priv->need_second_field && !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ? RET_COPY_NEXT_FIELD : RET_OK; }
void CDVDDemuxClient::RequestStreams() { int i = -1; for (auto stream : m_IDemux->GetStreams()) { ++i; if (!stream) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid stream at pos %d", i); DisposeStreams(); return; } if (stream->type == STREAM_AUDIO) { CDemuxStreamAudio *source = dynamic_cast<CDemuxStreamAudio*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid audio stream at pos %d", i); DisposeStreams(); return; } CDemuxStreamAudioClient* st = nullptr; if (m_streams[i]) { st = dynamic_cast<CDemuxStreamAudioClient*>(m_streams[i]); if (!st || (st->codec != source->codec)) DisposeStream(i); } if (!m_streams[i]) { st = new CDemuxStreamAudioClient(); st->m_parser = av_parser_init(source->codec); if(st->m_parser) st->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } st->iChannels = source->iChannels; st->iSampleRate = source->iSampleRate; st->iBlockAlign = source->iBlockAlign; st->iBitRate = source->iBitRate; st->iBitsPerSample = source->iBitsPerSample; if (source->ExtraSize > 0 && source->ExtraData) { st->ExtraData = new uint8_t[source->ExtraSize]; st->ExtraSize = source->ExtraSize; for (unsigned int j=0; j<source->ExtraSize; j++) st->ExtraData[j] = source->ExtraData[j]; } m_streams[i] = st; st->m_parser_split = true; st->changes++; } else if (stream->type == STREAM_VIDEO) { CDemuxStreamVideo *source = dynamic_cast<CDemuxStreamVideo*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid video stream at pos %d", i); DisposeStreams(); return; } CDemuxStreamVideoClient* st = nullptr; if (m_streams[i]) { st = dynamic_cast<CDemuxStreamVideoClient*>(m_streams[i]); if (!st || (st->codec != source->codec) || (st->iWidth != source->iWidth) || (st->iHeight != source->iHeight)) DisposeStream(i); } if (!m_streams[i]) { st = new CDemuxStreamVideoClient(); st->m_parser = av_parser_init(source->codec); if(st->m_parser) st->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } st->iFpsScale = source->irFpsScale; st->iFpsRate = source->irFpsRate; st->iHeight = source->iHeight; st->iWidth = source->iWidth; st->fAspect = source->fAspect; st->stereo_mode = "mono"; if (source->ExtraSize > 0 && source->ExtraData) { st->ExtraData = new uint8_t[source->ExtraSize]; st->ExtraSize = source->ExtraSize; for (unsigned int j=0; j<source->ExtraSize; j++) st->ExtraData[j] = source->ExtraData[j]; } m_streams[i] = st; st->m_parser_split = true; } else if (stream->type == STREAM_SUBTITLE) { CDemuxStreamSubtitle *source = dynamic_cast<CDemuxStreamSubtitle*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid subtitle stream at pos %d", i); DisposeStreams(); return; } CDemuxStreamSubtitleClient* st = nullptr; if (m_streams[i]) { st = dynamic_cast<CDemuxStreamSubtitleClient*>(m_streams[i]); if (!st || (st->codec != source->codec)) DisposeStream(i); } if (!m_streams[i]) { st = new CDemuxStreamSubtitleClient(); } if (source->ExtraSize == 4) { st->ExtraData = new uint8_t[4]; st->ExtraSize = 4; for (int j=0; j<4; j++) st->ExtraData[j] = source->ExtraData[j]; } m_streams[i] = st; } else if (stream->type == STREAM_TELETEXT) { if (m_streams[i]) { if (m_streams[i]->codec != stream->codec) DisposeStream(i); } if (!m_streams[i]) m_streams[i] = new CDemuxStreamTeletext(); } else if (stream->type == STREAM_RADIO_RDS) { if (m_streams[i]) { if (m_streams[i]->codec != stream->codec) DisposeStream(i); } if (!m_streams[i]) m_streams[i] = new CDemuxStreamRadioRDS(); } else { if (m_streams[i]) DisposeStream(i); m_streams[i] = new CDemuxStream(); } m_streams[i]->codec = stream->codec; m_streams[i]->codecName = stream->codecName; m_streams[i]->bandwidth = stream->bandwidth; m_streams[i]->uniqueId = stream->uniqueId; for (int j=0; j<4; j++) m_streams[i]->language[j] = stream->language[j]; m_streams[i]->realtime = stream->realtime; CLog::Log(LOGDEBUG,"CDVDDemuxClient::RequestStreams(): added/updated stream %d with codec_id %d", m_streams[i]->uniqueId, m_streams[i]->codec); } ++i; // check if we need to dispose any streams no longer in props for (int j = i; j < MAX_STREAMS; j++) { if (m_streams[j]) { CLog::Log(LOGDEBUG,"CDVDDemuxClient::RequestStreams(): disposed stream %d with codec_id %d", m_streams[j]->uniqueId, m_streams[j]->codec); DisposeStream(j); } } }
void rtpCodecInitialize_video(demuxer_t* demuxer, MediaSubsession* subsession, unsigned& flags) { flags = 0; // Create a dummy video stream header // to make the main MPlayer code happy: sh_video_t* sh_video = new_sh_video(demuxer,0); BITMAPINFOHEADER* bih = (BITMAPINFOHEADER*)calloc(1,sizeof(BITMAPINFOHEADER)); bih->biSize = sizeof(BITMAPINFOHEADER); sh_video->bih = bih; demux_stream_t* d_video = demuxer->video; d_video->sh = sh_video; sh_video->ds = d_video; // Map known video MIME types to the BITMAPINFOHEADER parameters // that this program uses. (Note that not all types need all // of the parameters to be set.) if (strcmp(subsession->codecName(), "MPV") == 0) { flags |= RTPSTATE_IS_MPEG12_VIDEO; } else if (strcmp(subsession->codecName(), "MP1S") == 0 || strcmp(subsession->codecName(), "MP2T") == 0) { flags |= RTPSTATE_IS_MPEG12_VIDEO|RTPSTATE_IS_MULTIPLEXED; } else if (strcmp(subsession->codecName(), "H263") == 0 || strcmp(subsession->codecName(), "H263-2000") == 0 || strcmp(subsession->codecName(), "H263-1998") == 0) { bih->biCompression = sh_video->format = mmioFOURCC('H','2','6','3'); needVideoFrameRate(demuxer, subsession); } else if (strcmp(subsession->codecName(), "H264") == 0) { bih->biCompression = sh_video->format = mmioFOURCC('H','2','6','4'); unsigned int configLen = 0; unsigned char* configData = parseH264ConfigStr(subsession->fmtp_spropparametersets(), configLen); sh_video->bih = bih = insertVideoExtradata(bih, configData, configLen); delete[] configData; #ifdef USE_LIBAVCODEC av_register_codec_parser(&h264_parser); h264parserctx = av_parser_init(CODEC_ID_H264); #endif needVideoFrameRate(demuxer, subsession); } else if (strcmp(subsession->codecName(), "H261") == 0) { bih->biCompression = sh_video->format = mmioFOURCC('H','2','6','1'); needVideoFrameRate(demuxer, subsession); } else if (strcmp(subsession->codecName(), "JPEG") == 0) { bih->biCompression = sh_video->format = mmioFOURCC('M','J','P','G'); needVideoFrameRate(demuxer, subsession); } else if (strcmp(subsession->codecName(), "MP4V-ES") == 0) { bih->biCompression = sh_video->format = mmioFOURCC('m','p','4','v'); // For the codec to work correctly, it may need a 'VOL Header' to be // inserted at the front of the data stream. Construct this from the // "config" MIME parameter, which was present (hopefully) in the // session's SDP description: unsigned configLen; unsigned char* configData = parseGeneralConfigStr(subsession->fmtp_config(), configLen); sh_video->bih = bih = insertVideoExtradata(bih, configData, configLen); needVideoFrameRate(demuxer, subsession); } else if (strcmp(subsession->codecName(), "X-QT") == 0 || strcmp(subsession->codecName(), "X-QUICKTIME") == 0) { // QuickTime generic RTP format, as described in // http://developer.apple.com/quicktime/icefloe/dispatch026.html // We can't initialize this stream until we've received the first packet // that has QuickTime "sdAtom" information in the header. So, keep // reading packets until we get one: unsigned char* packetData; unsigned packetDataLen; float pts; QuickTimeGenericRTPSource* qtRTPSource = (QuickTimeGenericRTPSource*)(subsession->rtpSource()); unsigned fourcc; do { if (!awaitRTPPacket(demuxer, demuxer->video, packetData, packetDataLen, pts)) { return; } } while (!parseQTState_video(qtRTPSource->qtState, fourcc)); bih->biCompression = sh_video->format = fourcc; bih->biWidth = qtRTPSource->qtState.width; bih->biHeight = qtRTPSource->qtState.height; uint8_t *pos = (uint8_t*)qtRTPSource->qtState.sdAtom + 86; uint8_t *endpos = (uint8_t*)qtRTPSource->qtState.sdAtom + qtRTPSource->qtState.sdAtomSize; while (pos+8 < endpos) { unsigned atomLength = pos[0]<<24 | pos[1]<<16 | pos[2]<<8 | pos[3]; if (atomLength == 0 || atomLength > endpos-pos) break; if ((!memcmp(pos+4, "avcC", 4) && fourcc==mmioFOURCC('a','v','c','1') || !memcmp(pos+4, "esds", 4) || !memcmp(pos+4, "SMI ", 4) && fourcc==mmioFOURCC('S','V','Q','3')) && atomLength > 8) { sh_video->bih = bih = insertVideoExtradata(bih, pos+8, atomLength-8); break; } pos += atomLength; } needVideoFrameRate(demuxer, subsession); } else { fprintf(stderr, "Unknown MPlayer format code for MIME type \"video/%s\"\n", subsession->codecName()); } }
/* * 初始化视频解码参数 * platform: 0:A5s66 1:3516A(Hisi) * encode: 0:h.264 1:h.265 2:jpeg */ void QDecodeStream::ipcam_decode_init(int platform,int encode) { int pps_code_size; AVDictionary *opts = NULL; avcodec_register_all(); //注册所有的文件格式和编解码库,只能调用一次 if(encode == H264) decode_h264.codec = avcodec_find_decoder(AV_CODEC_ID_H264); else if(encode == H265) decode_h264.codec = avcodec_find_decoder(AV_CODEC_ID_H265); else if(encode == JPEG) decode_h264.codec = avcodec_find_decoder(AV_CODEC_ID_MJPEG); decode_h264.codecCtx = avcodec_alloc_context3(decode_h264.codec); //解码器的环境变量选择解码器 //decode_h264.codecCtx->flags |= CODEC_FLAG_TRUNCATED; int frame_thread_supported = (decode_h264.codecCtx->codec->capabilities & CODEC_CAP_FRAME_THREADS) && !(decode_h264.codecCtx->flags & CODEC_FLAG_TRUNCATED) && !(decode_h264.codecCtx->flags2 & CODEC_FLAG_LOW_DELAY) && !(decode_h264.codecCtx->flags2 & CODEC_FLAG2_CHUNKS); decode_h264.codecCtx->delay = 0; if(decode_h264.codecCtx->thread_count == 1) { decode_h264.codecCtx->active_thread_type = 0; } else if(frame_thread_supported && (decode_h264.codecCtx->thread_type & FF_THREAD_FRAME)) { decode_h264.codecCtx->active_thread_type = FF_THREAD_FRAME; } else if((decode_h264.codecCtx->codec->capabilities & CODEC_CAP_SLICE_THREADS) && (decode_h264.codecCtx->thread_type & FF_THREAD_SLICE)) { decode_h264.codecCtx->active_thread_type = FF_THREAD_SLICE; } else if(!(decode_h264.codecCtx->codec->capabilities & CODEC_CAP_AUTO_THREADS)) { decode_h264.codecCtx->thread_count = 1; decode_h264.codecCtx->active_thread_type = 0; } if(decode_h264.codecCtx->thread_count > MAX_AUTO_THREADS) { av_log(decode_h264.codecCtx,AV_LOG_WARNING,"Application has requested %d threads.Using a thread count greater than %d is not recommended.\n", decode_h264.codecCtx->thread_count,MAX_AUTO_THREADS); } //根据编码,决定PPS PSP的头格式数据 pps_code_size = decoder_pps_psp(platform,encode); if(pps_code_size == 0) printf("get pps & psp head failed!\n"); /* //解码设置 if(encode == H265) { av_opt_set(decode_h264.codecCtx->priv_data,"x265-params","qp=0",0); av_opt_set(decode_h264.codecCtx->priv_data,"preset","veryfast",0); av_opt_set(decode_h264.codecCtx->priv_data,"x265-params","crf=10",0); } decode_h264.codecCtx->codec_type = AVMEDIA_TYPE_VIDEO; decode_h264.codecCtx->time_base.den = 25; decode_h264.codecCtx->global_quality = 1; av_opt_set(decode_h264.codecCtx->priv_data,"tune","zero-latency",0); decode_h264.codecCtx->active_thread_type |= FF_THREAD_FRAME; */ //打开编码 avcodec_open2(decode_h264.codecCtx, decode_h264.codec, &opts); decode_h264.frame = av_frame_alloc(); decode_h264.parser = av_parser_init(AV_CODEC_ID_H264); decode_h264.img_convert_ctx = sws_alloc_context(); sws_init_context(decode_h264.img_convert_ctx,NULL,NULL); //分配一帧图像的存储空间 one_frame_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_RGB24, play_win->image.width,play_win->image.height)); }
int CVideoDecoder2::Init(void) { int ret = -1; while (!bInit){ pQueueMutex = (uv_mutex_t*)malloc(sizeof(uv_mutex_t)); if(!pQueueMutex){ ret = -1; break; } ret = uv_mutex_init(pQueueMutex); if (ret < 0){ free(pQueueMutex); pQueueMutex = NULL; break; } pQueueNotEmpty = (uv_cond_t*)malloc(sizeof(uv_cond_t)); if(!pQueueNotEmpty){ ret = -1; break; } ret = uv_cond_init(pQueueNotEmpty); if (ret < 0){ free(pQueueNotEmpty); pQueueNotEmpty = NULL; break; } // Register all codecs avcodec_register_all(); pCodec = avcodec_find_decoder(codecId); if (!pCodec){ ret = -1; break; } pCodecCtx = avcodec_alloc_context3(pCodec); if (!pCodecCtx){ ret = -1; break; } if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){ ret = -1; break; } pCodecParserCtx = av_parser_init(codecId); if (!pCodecParserCtx){ ret = -1; break; } decodeWorkerReq.data = this; ret = uv_queue_work(pLoop, &decodeWorkerReq, DecodeWorker, AfterDecode); if(ret < 0){ bStop = true; break; } bInit = true; } if (!bInit){ Finit(); return -1; } else { return 0; } }
static gboolean gst_vaapi_decoder_ffmpeg_open(GstVaapiDecoderFfmpeg *ffdecoder, GstBuffer *buffer) { GstVaapiDecoderFfmpegPrivate * const priv = ffdecoder->priv; GstVaapiDisplay * const display = GST_VAAPI_DECODER_DISPLAY(ffdecoder); GstBuffer * const codec_data = GST_VAAPI_DECODER_CODEC_DATA(ffdecoder); GstVaapiCodec codec = GST_VAAPI_DECODER_CODEC(ffdecoder); enum CodecID codec_id; AVCodec *ffcodec; gboolean try_parser, need_parser; int ret; gst_vaapi_decoder_ffmpeg_close(ffdecoder); if (codec_data) { const guchar *data = GST_BUFFER_DATA(codec_data); const guint size = GST_BUFFER_SIZE(codec_data); if (!set_codec_data(priv->avctx, data, size)) return FALSE; } codec_id = get_codec_id_from_codec(codec); if (codec_id == CODEC_ID_NONE) return FALSE; ffcodec = avcodec_find_decoder(codec_id); if (!ffcodec) return FALSE; switch (codec_id) { case CODEC_ID_H264: /* For AVC1 formats, sequence headers are in extradata and input encoded buffers represent the whole NAL unit */ try_parser = priv->avctx->extradata_size == 0; need_parser = try_parser; break; case CODEC_ID_WMV3: /* There is no WMV3 parser in FFmpeg */ try_parser = FALSE; need_parser = FALSE; break; case CODEC_ID_VC1: /* For VC-1, sequence headers ae in extradata and input encoded buffers represent the whole slice */ try_parser = priv->avctx->extradata_size == 0; need_parser = FALSE; break; default: try_parser = TRUE; need_parser = TRUE; break; } if (try_parser) { priv->pctx = av_parser_init(codec_id); if (!priv->pctx && need_parser) return FALSE; } /* XXX: av_find_stream_info() does this and some codecs really want hard an extradata buffer for initialization (e.g. VC-1) */ if (!priv->avctx->extradata && priv->pctx && priv->pctx->parser->split) { const guchar *buf = GST_BUFFER_DATA(buffer); guint buf_size = GST_BUFFER_SIZE(buffer); buf_size = priv->pctx->parser->split(priv->avctx, buf, buf_size); if (buf_size > 0 && !set_codec_data(priv->avctx, buf, buf_size)) return FALSE; } if (priv->pctx && !need_parser) { av_parser_close(priv->pctx); priv->pctx = NULL; } /* Use size information from the demuxer, whenever available */ priv->avctx->coded_width = GST_VAAPI_DECODER_WIDTH(ffdecoder); priv->avctx->coded_height = GST_VAAPI_DECODER_HEIGHT(ffdecoder); GST_VAAPI_DISPLAY_LOCK(display); ret = avcodec_open(priv->avctx, ffcodec); GST_VAAPI_DISPLAY_UNLOCK(display); if (ret < 0) return FALSE; return TRUE; }
void CDVDDemuxClient::SetStreamProps(CDemuxStream *stream, std::map<int, std::shared_ptr<CDemuxStream>> &map, bool forceInit) { if (!stream) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStream - invalid stream"); DisposeStreams(); return; } std::shared_ptr<CDemuxStream> currentStream(GetStreamInternal(stream->uniqueId)); std::shared_ptr<CDemuxStream> toStream; if (stream->type == STREAM_AUDIO) { CDemuxStreamAudio *source = dynamic_cast<CDemuxStreamAudio*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStream - invalid audio stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamAudio>> streamAudio; if (currentStream) streamAudio = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamAudio>>(currentStream); if (forceInit || !streamAudio || streamAudio->codec != source->codec) { streamAudio.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamAudio>()); streamAudio->m_parser = av_parser_init(source->codec); if (streamAudio->m_parser) streamAudio->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; streamAudio->iSampleRate = source->iSampleRate; streamAudio->iChannels = source->iChannels; } streamAudio->iBlockAlign = source->iBlockAlign; streamAudio->iBitRate = source->iBitRate; streamAudio->iBitsPerSample = source->iBitsPerSample; if (source->ExtraSize > 0 && source->ExtraData) { delete[] streamAudio->ExtraData; streamAudio->ExtraData = new uint8_t[source->ExtraSize]; streamAudio->ExtraSize = source->ExtraSize; for (unsigned int j=0; j<source->ExtraSize; j++) streamAudio->ExtraData[j] = source->ExtraData[j]; } streamAudio->m_parser_split = true; streamAudio->changes++; map[stream->uniqueId] = streamAudio; toStream = streamAudio; } else if (stream->type == STREAM_VIDEO) { CDemuxStreamVideo *source = dynamic_cast<CDemuxStreamVideo*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStream - invalid video stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamVideo>> streamVideo; if (currentStream) streamVideo = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamVideo>>(currentStream); if (forceInit || !streamVideo || streamVideo->codec != source->codec) { streamVideo.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamVideo>()); streamVideo->m_parser = av_parser_init(source->codec); if (streamVideo->m_parser) streamVideo->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; streamVideo->iHeight = source->iHeight; streamVideo->iWidth = source->iWidth; } streamVideo->iFpsScale = source->iFpsScale; streamVideo->iFpsRate = source->iFpsRate; streamVideo->fAspect = source->fAspect; streamVideo->iBitRate = source->iBitRate; if (source->ExtraSize > 0 && source->ExtraData) { delete[] streamVideo->ExtraData; streamVideo->ExtraData = new uint8_t[source->ExtraSize]; streamVideo->ExtraSize = source->ExtraSize; for (unsigned int j=0; j<source->ExtraSize; j++) streamVideo->ExtraData[j] = source->ExtraData[j]; } streamVideo->m_parser_split = true; streamVideo->changes++; map[stream->uniqueId] = streamVideo; toStream = streamVideo; } else if (stream->type == STREAM_SUBTITLE) { CDemuxStreamSubtitle *source = dynamic_cast<CDemuxStreamSubtitle*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStream - invalid subtitle stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamSubtitle>> streamSubtitle; if (currentStream) streamSubtitle = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamSubtitle>>(currentStream); if (!streamSubtitle || streamSubtitle->codec != source->codec) { streamSubtitle.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamSubtitle>()); streamSubtitle->m_parser = av_parser_init(source->codec); if (streamSubtitle->m_parser) streamSubtitle->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } if (source->ExtraSize == 4) { delete[] streamSubtitle->ExtraData; streamSubtitle->ExtraData = new uint8_t[4]; streamSubtitle->ExtraSize = 4; for (int j=0; j<4; j++) streamSubtitle->ExtraData[j] = source->ExtraData[j]; } map[stream->uniqueId] = streamSubtitle; toStream = streamSubtitle; } else if (stream->type == STREAM_TELETEXT) { CDemuxStreamTeletext *source = dynamic_cast<CDemuxStreamTeletext*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStream - invalid teletext stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamTeletext>> streamTeletext; if (currentStream) streamTeletext = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamTeletext>>(currentStream); if (!streamTeletext || streamTeletext->codec != source->codec) { streamTeletext.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamTeletext>()); } map[stream->uniqueId] = streamTeletext; toStream = streamTeletext; } else if (stream->type == STREAM_RADIO_RDS) { CDemuxStreamRadioRDS *source = dynamic_cast<CDemuxStreamRadioRDS*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStream - invalid radio-rds stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamRadioRDS>> streamRDS; if (currentStream) streamRDS = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamRadioRDS>>(currentStream); if (!streamRDS || streamRDS->codec != source->codec) { streamRDS.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamRadioRDS>()); } map[stream->uniqueId] = streamRDS; toStream = streamRDS; } else { std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStream>> streamGen; streamGen = std::make_shared<CDemuxStreamClientInternalTpl<CDemuxStream>>(); map[stream->uniqueId] = streamGen; toStream = streamGen; } // only update profile / level if we create a new stream // existing streams may be corrected by ParsePacket if (!currentStream) { toStream->profile = stream->profile; toStream->level = stream->level; } toStream->uniqueId = stream->uniqueId; toStream->codec = stream->codec; toStream->codecName = stream->codecName; toStream->flags = stream->flags; toStream->cryptoSession = stream->cryptoSession; toStream->externalInterfaces = stream->externalInterfaces; toStream->language = stream->language; CLog::Log(LOGDEBUG,"CDVDDemuxClient::RequestStream(): added/updated stream %d with codec_id %d", toStream->uniqueId, toStream->codec); }
void CDVDDemuxClient::RequestStreams() { std::map<int, std::shared_ptr<CDemuxStream>> m_newStreamMap; for (auto stream : m_IDemux->GetStreams()) { if (!stream) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid stream"); DisposeStreams(); return; } std::shared_ptr<CDemuxStream> dStream = GetStreamInternal(stream->uniqueId); if (stream->type == STREAM_AUDIO) { CDemuxStreamAudio *source = dynamic_cast<CDemuxStreamAudio*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid audio stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamAudio>> streamAudio; if (dStream) streamAudio = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamAudio>>(dStream); if (!streamAudio || streamAudio->codec != source->codec) { streamAudio.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamAudio>()); streamAudio->m_parser = av_parser_init(source->codec); if (streamAudio->m_parser) streamAudio->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } streamAudio->iChannels = source->iChannels; streamAudio->iSampleRate = source->iSampleRate; streamAudio->iBlockAlign = source->iBlockAlign; streamAudio->iBitRate = source->iBitRate; streamAudio->iBitsPerSample = source->iBitsPerSample; if (source->ExtraSize > 0 && source->ExtraData) { streamAudio->ExtraData = new uint8_t[source->ExtraSize]; streamAudio->ExtraSize = source->ExtraSize; for (unsigned int j=0; j<source->ExtraSize; j++) streamAudio->ExtraData[j] = source->ExtraData[j]; } streamAudio->m_parser_split = true; streamAudio->changes++; m_newStreamMap[stream->uniqueId] = streamAudio; dStream = streamAudio; } else if (stream->type == STREAM_VIDEO) { CDemuxStreamVideo *source = dynamic_cast<CDemuxStreamVideo*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid video stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamVideo>> streamVideo; if (dStream) streamVideo = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamVideo>>(dStream); if (!streamVideo || streamVideo->codec != source->codec || streamVideo->iWidth != source->iWidth || streamVideo->iHeight != source->iHeight) { streamVideo.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamVideo>()); streamVideo->m_parser = av_parser_init(source->codec); if (streamVideo->m_parser) streamVideo->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } streamVideo->iFpsScale = source->iFpsScale; streamVideo->iFpsRate = source->iFpsRate; streamVideo->iHeight = source->iHeight; streamVideo->iWidth = source->iWidth; streamVideo->fAspect = source->fAspect; streamVideo->stereo_mode = "mono"; if (source->ExtraSize > 0 && source->ExtraData) { streamVideo->ExtraData = new uint8_t[source->ExtraSize]; streamVideo->ExtraSize = source->ExtraSize; for (unsigned int j=0; j<source->ExtraSize; j++) streamVideo->ExtraData[j] = source->ExtraData[j]; } streamVideo->m_parser_split = true; streamVideo->changes++; m_newStreamMap[stream->uniqueId] = streamVideo; dStream = streamVideo; } else if (stream->type == STREAM_SUBTITLE) { CDemuxStreamSubtitle *source = dynamic_cast<CDemuxStreamSubtitle*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid subtitle stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamSubtitle>> streamSubtitle; if (dStream) streamSubtitle = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamSubtitle>>(dStream); if (!streamSubtitle || streamSubtitle->codec != source->codec) { streamSubtitle.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamSubtitle>()); streamSubtitle->m_parser = av_parser_init(source->codec); if (streamSubtitle->m_parser) streamSubtitle->m_parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } if (source->ExtraSize == 4) { streamSubtitle->ExtraData = new uint8_t[4]; streamSubtitle->ExtraSize = 4; for (int j=0; j<4; j++) streamSubtitle->ExtraData[j] = source->ExtraData[j]; } m_newStreamMap[stream->uniqueId] = streamSubtitle; dStream = streamSubtitle; } else if (stream->type == STREAM_TELETEXT) { CDemuxStreamTeletext *source = dynamic_cast<CDemuxStreamTeletext*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid teletext stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamTeletext>> streamTeletext; if (dStream) streamTeletext = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamTeletext>>(dStream); if (!streamTeletext || streamTeletext->codec != source->codec) { streamTeletext.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamTeletext>()); } m_newStreamMap[stream->uniqueId] = streamTeletext; dStream = streamTeletext; } else if (stream->type == STREAM_RADIO_RDS) { CDemuxStreamRadioRDS *source = dynamic_cast<CDemuxStreamRadioRDS*>(stream); if (!source) { CLog::Log(LOGERROR, "CDVDDemuxClient::RequestStreams - invalid radio-rds stream with id %d", stream->uniqueId); DisposeStreams(); return; } std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStreamRadioRDS>> streamRDS; if (dStream) streamRDS = std::dynamic_pointer_cast<CDemuxStreamClientInternalTpl<CDemuxStreamRadioRDS>>(dStream); if (!streamRDS || streamRDS->codec != source->codec) { streamRDS.reset(new CDemuxStreamClientInternalTpl<CDemuxStreamRadioRDS>()); } m_newStreamMap[stream->uniqueId] = streamRDS; dStream = streamRDS; } else { std::shared_ptr<CDemuxStreamClientInternalTpl<CDemuxStream>> streamGen; streamGen = std::make_shared<CDemuxStreamClientInternalTpl<CDemuxStream>>(); m_newStreamMap[stream->uniqueId] = streamGen; dStream = streamGen; } dStream->uniqueId = stream->uniqueId; dStream->codec = stream->codec; dStream->codecName = stream->codecName; dStream->bandwidth = stream->bandwidth; dStream->uniqueId = stream->uniqueId; for (int j=0; j<4; j++) dStream->language[j] = stream->language[j]; dStream->realtime = stream->realtime; CLog::Log(LOGDEBUG,"CDVDDemuxClient::RequestStreams(): added/updated stream %d with codec_id %d", dStream->uniqueId, dStream->codec); } m_streams = m_newStreamMap; }
static BOOL libavcodec_init(H264_CONTEXT* h264) { H264_CONTEXT_LIBAVCODEC* sys; sys = (H264_CONTEXT_LIBAVCODEC*) calloc(1, sizeof(H264_CONTEXT_LIBAVCODEC)); if (!sys) { goto EXCEPTION; } h264->pSystemData = (void*) sys; avcodec_register_all(); sys->codec = avcodec_find_decoder(CODEC_ID_H264); if (!sys->codec) { WLog_ERR(TAG, "Failed to find libav H.264 codec"); goto EXCEPTION; } sys->codecContext = avcodec_alloc_context3(sys->codec); if (!sys->codecContext) { WLog_ERR(TAG, "Failed to allocate libav codec context"); goto EXCEPTION; } if (sys->codec->capabilities & CODEC_CAP_TRUNCATED) { sys->codecContext->flags |= CODEC_FLAG_TRUNCATED; } if (avcodec_open2(sys->codecContext, sys->codec, NULL) < 0) { WLog_ERR(TAG, "Failed to open libav codec"); goto EXCEPTION; } sys->codecParser = av_parser_init(CODEC_ID_H264); if (!sys->codecParser) { WLog_ERR(TAG, "Failed to initialize libav parser"); goto EXCEPTION; } sys->videoFrame = avcodec_alloc_frame(); if (!sys->videoFrame) { WLog_ERR(TAG, "Failed to allocate libav frame"); goto EXCEPTION; } return TRUE; EXCEPTION: libavcodec_uninit(h264); return FALSE; }
PP_Resource ppb_video_decoder_create(PP_Instance instance, PP_Resource context, PP_VideoDecoder_Profile profile) { if (!config.enable_hwdec) { trace_info_f(" hardware-accelerated decoding was disabled in config file\n"); return 0; } if (!display.va_available) { trace_info_f(" no hw acceleration available\n"); return 0; } if (!display.glXBindTexImageEXT) { trace_info_f(" no glXBindTexImageEXT available\n"); return 0; } switch (profile) { case PP_VIDEODECODER_H264PROFILE_BASELINE: case PP_VIDEODECODER_H264PROFILE_MAIN: case PP_VIDEODECODER_H264PROFILE_EXTENDED: case PP_VIDEODECODER_H264PROFILE_HIGH: // pass, there is an implementation below break; case PP_VIDEODECODER_H264PROFILE_NONE: case PP_VIDEODECODER_H264PROFILE_HIGH10PROFILE: case PP_VIDEODECODER_H264PROFILE_HIGH422PROFILE: case PP_VIDEODECODER_H264PROFILE_HIGH444PREDICTIVEPROFILE: case PP_VIDEODECODER_H264PROFILE_SCALABLEBASELINE: case PP_VIDEODECODER_H264PROFILE_SCALABLEHIGH: case PP_VIDEODECODER_H264PROFILE_STEREOHIGH: case PP_VIDEODECODER_H264PROFILE_MULTIVIEWHIGH: case PP_VIDEODECODER_VP8PROFILE_ANY: case PP_VIDEODECODER_PROFILE_UNKNOWN: default: trace_error("%s, profile %d is not supported\n", __func__, profile); return 0; } const struct PPP_VideoDecoder_Dev_0_11 *ppp_video_decoder_dev = NULL; struct pp_instance_s *pp_i = tables_get_pp_instance(instance); if (!pp_i) { trace_error("%s, bad instance\n", __func__); return 0; } ppp_video_decoder_dev = ppp_get_interface(PPP_VIDEODECODER_DEV_INTERFACE); if (!ppp_video_decoder_dev) { trace_error("%s, no viable %s\n", __func__, PPP_VIDEODECODER_DEV_INTERFACE); return 0; } if (pp_resource_get_type(context) != PP_RESOURCE_GRAPHICS3D) { trace_error("%s, bad resource\n", __func__); return 0; } PP_Resource video_decoder = pp_resource_allocate(PP_RESOURCE_VIDEO_DECODER, pp_i); struct pp_video_decoder_s *vd = pp_resource_acquire(video_decoder, PP_RESOURCE_VIDEO_DECODER); if (!vd) { trace_error("%s, resource allocation failed\n", __func__); return 0; } vd->orig_graphics3d = pp_resource_ref(context); vd->ppp_video_decoder_dev = ppp_video_decoder_dev; // create auxiliary GL context int32_t attribs[] = { PP_GRAPHICS3DATTRIB_WIDTH, 32, // dimensions can be arbitrary PP_GRAPHICS3DATTRIB_HEIGHT, 32, PP_GRAPHICS3DATTRIB_RED_SIZE, 8, PP_GRAPHICS3DATTRIB_GREEN_SIZE, 8, PP_GRAPHICS3DATTRIB_BLUE_SIZE, 8, PP_GRAPHICS3DATTRIB_ALPHA_SIZE, 8, PP_GRAPHICS3DATTRIB_DEPTH_SIZE, 16, GLX_Y_INVERTED_EXT, True, GLX_BIND_TO_TEXTURE_RGBA_EXT, True, PP_GRAPHICS3DATTRIB_NONE, }; vd->graphics3d = ppb_graphics3d_create(vd->instance->id, vd->orig_graphics3d, attribs); if (!vd->graphics3d) { trace_error("%s, can't create graphics3d context\n", __func__); goto err_1; } vd->codec_id = AV_CODEC_ID_H264; // TODO: other codecs vd->avcodec = avcodec_find_decoder(vd->codec_id); if (!vd->avcodec) { trace_error("%s, can't create codec\n", __func__); goto err_1; } vd->avparser = av_parser_init(vd->codec_id); if (!vd->avparser) { trace_error("%s, can't create parser\n", __func__); goto err_1; } vd->avctx = avcodec_alloc_context3(vd->avcodec); if (!vd->avctx) { trace_error("%s, can't create codec context\n", __func__); goto err_1; } if (vd->avcodec->capabilities & CODEC_CAP_TRUNCATED) { trace_info("%s, codec have CODEC_CAP_TRUNCATED\n", __func__); vd->avctx->flags |= CODEC_FLAG_TRUNCATED; } vd->avctx->opaque = vd; vd->avctx->thread_count = 1; vd->avctx->get_format = get_format; #if AVCTX_HAVE_REFCOUNTED_BUFFERS vd->avctx->get_buffer2 = get_buffer2; vd->avctx->refcounted_frames = 1; #else vd->avctx->get_buffer = get_buffer; vd->avctx->release_buffer = release_buffer; #endif if (avcodec_open2(vd->avctx, vd->avcodec, NULL) < 0) { trace_error("%s, can't open codec\n", __func__); goto err_1; } vd->avframe = av_frame_alloc(); if (!vd->avframe) { trace_error("%s, can't alloc frame\n", __func__); goto err_1; } pp_resource_release(video_decoder); return video_decoder; err_1: ppb_video_decoder_destroy_priv(vd); pp_resource_release(video_decoder); pp_resource_expunge(video_decoder); return 0; }
nsresult FFmpegDataDecoder<LIBAV_VER>::InitDecoder() { FFMPEG_LOG("Initialising FFmpeg decoder."); AVCodec* codec = FindAVCodec(mCodecID); if (!codec) { NS_WARNING("Couldn't find ffmpeg decoder"); return NS_ERROR_FAILURE; } StaticMutexAutoLock mon(sMonitor); if (!(mCodecContext = avcodec_alloc_context3(codec))) { NS_WARNING("Couldn't init ffmpeg context"); return NS_ERROR_FAILURE; } mCodecContext->opaque = this; // FFmpeg takes this as a suggestion for what format to use for audio samples. uint32_t major, minor; FFmpegRuntimeLinker::GetVersion(major, minor); // LibAV 0.8 produces rubbish float interlaved samples, request 16 bits audio. mCodecContext->request_sample_fmt = major == 53 && minor <= 34 ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_FLT; // FFmpeg will call back to this to negotiate a video pixel format. mCodecContext->get_format = ChoosePixelFormat; mCodecContext->thread_count = PR_GetNumberOfProcessors(); mCodecContext->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME; mCodecContext->thread_safe_callbacks = false; if (mExtraData) { mCodecContext->extradata_size = mExtraData->Length(); // FFmpeg may use SIMD instructions to access the data which reads the // data in 32 bytes block. Must ensure we have enough data to read. mExtraData->AppendElements(FF_INPUT_BUFFER_PADDING_SIZE); mCodecContext->extradata = mExtraData->Elements(); } else { mCodecContext->extradata_size = 0; } if (codec->capabilities & CODEC_CAP_DR1) { mCodecContext->flags |= CODEC_FLAG_EMU_EDGE; } if (avcodec_open2(mCodecContext, codec, nullptr) < 0) { NS_WARNING("Couldn't initialise ffmpeg decoder"); avcodec_close(mCodecContext); av_freep(&mCodecContext); return NS_ERROR_FAILURE; } if (mCodecContext->codec_type == AVMEDIA_TYPE_AUDIO && mCodecContext->sample_fmt != AV_SAMPLE_FMT_FLT && mCodecContext->sample_fmt != AV_SAMPLE_FMT_FLTP && mCodecContext->sample_fmt != AV_SAMPLE_FMT_S16 && mCodecContext->sample_fmt != AV_SAMPLE_FMT_S16P) { NS_WARNING("FFmpeg audio decoder outputs unsupported audio format."); return NS_ERROR_FAILURE; } mCodecParser = av_parser_init(mCodecID); if (mCodecParser) { mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } FFMPEG_LOG("FFmpeg init successful."); return NS_OK; }
FFMS_Index *FFLAVFIndexer::DoIndexing() { std::vector<SharedAudioContext> AudioContexts(FormatContext->nb_streams, SharedAudioContext(false)); std::vector<SharedVideoContext> VideoContexts(FormatContext->nb_streams, SharedVideoContext(false)); std::auto_ptr<FFMS_Index> TrackIndices(new FFMS_Index(Filesize, Digest)); TrackIndices->Decoder = FFMS_SOURCE_LAVF; for (unsigned int i = 0; i < FormatContext->nb_streams; i++) { TrackIndices->push_back(FFMS_Track((int64_t)FormatContext->streams[i]->time_base.num * 1000, FormatContext->streams[i]->time_base.den, static_cast<FFMS_TrackType>(FormatContext->streams[i]->codec->codec_type))); if (FormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { AVCodec *VideoCodec = avcodec_find_decoder(FormatContext->streams[i]->codec->codec_id); if (!VideoCodec) throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_UNSUPPORTED, "Video codec not found"); if (avcodec_open2(FormatContext->streams[i]->codec, VideoCodec, NULL) < 0) throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING, "Could not open video codec"); VideoContexts[i].CodecContext = FormatContext->streams[i]->codec; VideoContexts[i].Parser = av_parser_init(FormatContext->streams[i]->codec->codec_id); if (VideoContexts[i].Parser) VideoContexts[i].Parser->flags = PARSER_FLAG_COMPLETE_FRAMES; IndexMask |= 1 << i; } else if (IndexMask & (1 << i) && FormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { AVCodecContext *AudioCodecContext = FormatContext->streams[i]->codec; AVCodec *AudioCodec = avcodec_find_decoder(AudioCodecContext->codec_id); if (AudioCodec == NULL) throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_UNSUPPORTED, "Audio codec not found"); if (avcodec_open2(AudioCodecContext, AudioCodec, NULL) < 0) throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING, "Could not open audio codec"); AudioContexts[i].CodecContext = AudioCodecContext; } else { IndexMask &= ~(1 << i); } } AVPacket Packet; InitNullPacket(Packet); std::vector<int64_t> LastValidTS(FormatContext->nb_streams, ffms_av_nopts_value); std::vector<int> LastDuration(FormatContext->nb_streams, 0); #if (LIBAVFORMAT_VERSION_INT) < (AV_VERSION_INT(52,106,0)) int64_t filesize = FormatContext->file_size; #else int64_t filesize = avio_size(FormatContext->pb); #endif while (av_read_frame(FormatContext, &Packet) >= 0) { // Update progress // FormatContext->pb can apparently be NULL when opening images. if (IC && FormatContext->pb) { if ((*IC)(FormatContext->pb->pos, filesize, ICPrivate)) throw FFMS_Exception(FFMS_ERROR_CANCELLED, FFMS_ERROR_USER, "Cancelled by user"); } if (!(IndexMask & (1 << Packet.stream_index))) { av_free_packet(&Packet); continue; } int Track = Packet.stream_index; bool KeyFrame = !!(Packet.flags & AV_PKT_FLAG_KEY); ReadTS(Packet, LastValidTS[Track], (*TrackIndices)[Track].UseDTS); if (FormatContext->streams[Track]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { int64_t PTS = LastValidTS[Track]; if (PTS == ffms_av_nopts_value) { if (Packet.duration == 0) throw FFMS_Exception(FFMS_ERROR_INDEXING, FFMS_ERROR_PARSER, "Invalid initial pts, dts, and duration"); if ((*TrackIndices)[Track].empty()) PTS = 0; else PTS = (*TrackIndices)[Track].back().PTS + LastDuration[Track]; (*TrackIndices)[Track].HasTS = false; LastDuration[Track] = Packet.duration; } int RepeatPict = -1; int FrameType = 0; ParseVideoPacket(VideoContexts[Track], Packet, &RepeatPict, &FrameType); (*TrackIndices)[Track].push_back(TFrameInfo::VideoFrameInfo(PTS, RepeatPict, KeyFrame, FrameType, Packet.pos)); } else if (FormatContext->streams[Track]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { int64_t StartSample = AudioContexts[Track].CurrentSample; int64_t SampleCount = IndexAudioPacket(Track, &Packet, AudioContexts[Track], *TrackIndices); if (SampleCount != 0) (*TrackIndices)[Track].push_back(TFrameInfo::AudioFrameInfo(LastValidTS[Track], StartSample, SampleCount, KeyFrame, Packet.pos)); } av_free_packet(&Packet); } TrackIndices->Sort(); return TrackIndices.release(); }
int main(int argc, char* argv[]) { AVCodec *pCodec; AVCodecContext *pCodecCtx= NULL; AVCodecParserContext *pCodecParserCtx=NULL; int frame_count; FILE *fp_in; FILE *fp_out; AVFrame *pFrame,*pFrameYUV; uint8_t *out_buffer; const int in_buffer_size=4096; uint8_t in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE]={0}; uint8_t *cur_ptr; int cur_size; AVPacket packet; int ret, got_picture; int y_size; #if TEST_HEVC enum AVCodecID codec_id=AV_CODEC_ID_HEVC; char filepath_in[]="bigbuckbunny_480x272.hevc"; #elif TEST_H264 AVCodecID codec_id=AV_CODEC_ID_H264; char filepath_in[]="bigbuckbunny_480x272.h264"; #else AVCodecID codec_id=AV_CODEC_ID_MPEG2VIDEO; char filepath_in[]="bigbuckbunny_480x272.m2v"; #endif char filepath_out[]="bigbuckbunny_480x272.yuv"; int first_time=1; struct SwsContext *img_convert_ctx; //av_log_set_level(AV_LOG_DEBUG); avcodec_register_all(); pCodec = avcodec_find_decoder(codec_id); if (!pCodec) { printf("Codec not found\n"); return -1; } pCodecCtx = avcodec_alloc_context3(pCodec); if (!pCodecCtx){ printf("Could not allocate video codec context\n"); return -1; } pCodecParserCtx=av_parser_init(codec_id); if (!pCodecParserCtx){ printf("Could not allocate video parser context\n"); return -1; } //if(pCodec->capabilities&CODEC_CAP_TRUNCATED) // pCodecCtx->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { printf("Could not open codec\n"); return -1; } //Input File fp_in = fopen(filepath_in, "rb"); if (!fp_in) { printf("Could not open input stream\n"); return -1; } //Output File fp_out = fopen(filepath_out, "wb"); if (!fp_out) { printf("Could not open output YUV file\n"); return -1; } pFrame = av_frame_alloc(); av_init_packet(&packet); while (1) { cur_size = fread(in_buffer, 1, in_buffer_size, fp_in); if (cur_size == 0) break; cur_ptr=in_buffer; while (cur_size>0){ int len = av_parser_parse2( pCodecParserCtx, pCodecCtx, &packet.data, &packet.size, cur_ptr , cur_size , AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE); cur_ptr += len; cur_size -= len; if(packet.size==0) continue; //Some Info from AVCodecParserContext printf("Packet Size:%6d\t",packet.size); switch(pCodecParserCtx->pict_type){ case AV_PICTURE_TYPE_I: printf("Type: I\t");break; case AV_PICTURE_TYPE_P: printf("Type: P\t");break; case AV_PICTURE_TYPE_B: printf("Type: B\t");break; default: printf("Type: Other\t");break; } printf("Output Number:%4d\t",pCodecParserCtx->output_picture_number); printf("Offset:%lld\n",pCodecParserCtx->cur_offset); ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet); if (ret < 0) { printf("Decode Error.\n"); return ret; } if (got_picture) { if(first_time){ printf("\nCodec Full Name:%s\n",pCodecCtx->codec->long_name); printf("width:%d\nheight:%d\n\n",pCodecCtx->width,pCodecCtx->height); //SwsContext img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); pFrameYUV=av_frame_alloc(); out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); y_size=pCodecCtx->width*pCodecCtx->height; first_time=0; } printf("Succeed to decode 1 frame!\n"); sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); fwrite(pFrameYUV->data[0],1,y_size,fp_out); //Y fwrite(pFrameYUV->data[1],1,y_size/4,fp_out); //U fwrite(pFrameYUV->data[2],1,y_size/4,fp_out); //V } } } //Flush Decoder packet.data = NULL; packet.size = 0; while(1){ ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet); if (ret < 0) { printf("Decode Error.\n"); return ret; } if (!got_picture) break; if (got_picture) { printf("Flush Decoder: Succeed to decode 1 frame!\n"); sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); fwrite(pFrameYUV->data[0],1,y_size,fp_out); //Y fwrite(pFrameYUV->data[1],1,y_size/4,fp_out); //U fwrite(pFrameYUV->data[2],1,y_size/4,fp_out); //V } } fclose(fp_in); fclose(fp_out); sws_freeContext(img_convert_ctx); av_parser_close(pCodecParserCtx); av_frame_free(&pFrameYUV); av_frame_free(&pFrame); avcodec_close(pCodecCtx); av_free(pCodecCtx); return 0; }
STDMETHODIMP CDecAvcodec::InitDecoder(AVCodecID codec, const CMediaType *pmt) { DestroyDecoder(); DbgLog((LOG_TRACE, 10, L"Initializing ffmpeg for codec %S", avcodec_get_name(codec))); BITMAPINFOHEADER *pBMI = NULL; videoFormatTypeHandler((const BYTE *)pmt->Format(), pmt->FormatType(), &pBMI); m_pAVCodec = avcodec_find_decoder(codec); CheckPointer(m_pAVCodec, VFW_E_UNSUPPORTED_VIDEO); m_pAVCtx = avcodec_alloc_context3(m_pAVCodec); CheckPointer(m_pAVCtx, E_POINTER); if(codec == AV_CODEC_ID_MPEG1VIDEO || codec == AV_CODEC_ID_MPEG2VIDEO || pmt->subtype == FOURCCMap(MKTAG('H','2','6','4')) || pmt->subtype == FOURCCMap(MKTAG('h','2','6','4'))) { m_pParser = av_parser_init(codec); } DWORD dwDecFlags = m_pCallback->GetDecodeFlags(); LONG biRealWidth = pBMI->biWidth, biRealHeight = pBMI->biHeight; if (pmt->formattype == FORMAT_VideoInfo || pmt->formattype == FORMAT_MPEGVideo) { VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)pmt->Format(); if (vih->rcTarget.right != 0 && vih->rcTarget.bottom != 0) { biRealWidth = vih->rcTarget.right; biRealHeight = vih->rcTarget.bottom; } } else if (pmt->formattype == FORMAT_VideoInfo2 || pmt->formattype == FORMAT_MPEG2Video) { VIDEOINFOHEADER2 *vih2 = (VIDEOINFOHEADER2 *)pmt->Format(); if (vih2->rcTarget.right != 0 && vih2->rcTarget.bottom != 0) { biRealWidth = vih2->rcTarget.right; biRealHeight = vih2->rcTarget.bottom; } } m_pAVCtx->codec_id = codec; m_pAVCtx->codec_tag = pBMI->biCompression; m_pAVCtx->coded_width = pBMI->biWidth; m_pAVCtx->coded_height = abs(pBMI->biHeight); m_pAVCtx->bits_per_coded_sample = pBMI->biBitCount; m_pAVCtx->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; m_pAVCtx->err_recognition = AV_EF_CAREFUL; m_pAVCtx->workaround_bugs = FF_BUG_AUTODETECT; m_pAVCtx->refcounted_frames = 1; if (codec == AV_CODEC_ID_H264) m_pAVCtx->flags2 |= CODEC_FLAG2_SHOW_ALL; // Setup threading int thread_type = getThreadFlags(codec); if (thread_type) { // Thread Count. 0 = auto detect int thread_count = m_pSettings->GetNumThreads(); if (thread_count == 0) { thread_count = av_cpu_count() * 3 / 2; } m_pAVCtx->thread_count = max(1, min(thread_count, AVCODEC_MAX_THREADS)); m_pAVCtx->thread_type = thread_type; } else { m_pAVCtx->thread_count = 1; } if (dwDecFlags & LAV_VIDEO_DEC_FLAG_NO_MT) { m_pAVCtx->thread_count = 1; } m_pFrame = av_frame_alloc(); CheckPointer(m_pFrame, E_POINTER); m_h264RandomAccess.SetAVCNALSize(0); // Process Extradata BYTE *extra = NULL; size_t extralen = 0; getExtraData(*pmt, NULL, &extralen); BOOL bH264avc = FALSE; if (extralen > 0) { DbgLog((LOG_TRACE, 10, L"-> Processing extradata of %d bytes", extralen)); // Reconstruct AVC1 extradata format if (pmt->formattype == FORMAT_MPEG2Video && (m_pAVCtx->codec_tag == MAKEFOURCC('a','v','c','1') || m_pAVCtx->codec_tag == MAKEFOURCC('A','V','C','1') || m_pAVCtx->codec_tag == MAKEFOURCC('C','C','V','1'))) { MPEG2VIDEOINFO *mp2vi = (MPEG2VIDEOINFO *)pmt->Format(); extralen += 7; extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE); extra[0] = 1; extra[1] = (BYTE)mp2vi->dwProfile; extra[2] = 0; extra[3] = (BYTE)mp2vi->dwLevel; extra[4] = (BYTE)(mp2vi->dwFlags ? mp2vi->dwFlags : 4) - 1; // Actually copy the metadata into our new buffer size_t actual_len; getExtraData(*pmt, extra+6, &actual_len); // Count the number of SPS/PPS in them and set the length // We'll put them all into one block and add a second block with 0 elements afterwards // The parsing logic does not care what type they are, it just expects 2 blocks. BYTE *p = extra+6, *end = extra+6+actual_len; BOOL bSPS = FALSE, bPPS = FALSE; int count = 0; while (p+1 < end) { unsigned len = (((unsigned)p[0] << 8) | p[1]) + 2; if (p + len > end) { break; } if ((p[2] & 0x1F) == 7) bSPS = TRUE; if ((p[2] & 0x1F) == 8) bPPS = TRUE; count++; p += len; } extra[5] = count; extra[extralen-1] = 0; bH264avc = TRUE; m_h264RandomAccess.SetAVCNALSize(mp2vi->dwFlags); } else if (pmt->subtype == MEDIASUBTYPE_LAV_RAWVIDEO) { if (extralen < sizeof(m_pAVCtx->pix_fmt)) { DbgLog((LOG_TRACE, 10, L"-> LAV RAW Video extradata is missing..")); } else { extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE); getExtraData(*pmt, extra, NULL); m_pAVCtx->pix_fmt = *(AVPixelFormat *)extra; extralen -= sizeof(AVPixelFormat); memmove(extra, extra+sizeof(AVPixelFormat), extralen); } } else { // Just copy extradata for other formats extra = (uint8_t *)av_mallocz(extralen + FF_INPUT_BUFFER_PADDING_SIZE); getExtraData(*pmt, extra, NULL); } // Hack to discard invalid MP4 metadata with AnnexB style video if (codec == AV_CODEC_ID_H264 && !bH264avc && extra[0] == 1) { av_freep(&extra); extralen = 0; } m_pAVCtx->extradata = extra; m_pAVCtx->extradata_size = (int)extralen; } else { if (codec == AV_CODEC_ID_VP6 || codec == AV_CODEC_ID_VP6A || codec == AV_CODEC_ID_VP6F) { int cropH = pBMI->biWidth - biRealWidth; int cropV = pBMI->biHeight - biRealHeight; if (cropH >= 0 && cropH <= 0x0f && cropV >= 0 && cropV <= 0x0f) { m_pAVCtx->extradata = (uint8_t *)av_mallocz(1 + FF_INPUT_BUFFER_PADDING_SIZE); m_pAVCtx->extradata_size = 1; m_pAVCtx->extradata[0] = (cropH << 4) | cropV; } } } m_h264RandomAccess.flush(m_pAVCtx->thread_count); m_CurrentThread = 0; m_rtStartCache = AV_NOPTS_VALUE; LAVPinInfo lavPinInfo = {0}; BOOL bLAVInfoValid = SUCCEEDED(m_pCallback->GetLAVPinInfo(lavPinInfo)); m_bInputPadded = dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER; // Setup codec-specific timing logic BOOL bVC1IsPTS = (codec == AV_CODEC_ID_VC1 && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_VC1_DTS)); // Use ffmpegs logic to reorder timestamps // This is required for H264 content (except AVI), and generally all codecs that use frame threading // VC-1 is also a special case. Its required for splitters that deliver PTS timestamps (see bVC1IsPTS above) m_bFFReordering = ( codec == AV_CODEC_ID_H264 && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_H264_AVI)) || codec == AV_CODEC_ID_VP8 || codec == AV_CODEC_ID_VP3 || codec == AV_CODEC_ID_THEORA || codec == AV_CODEC_ID_HUFFYUV || codec == AV_CODEC_ID_FFVHUFF || codec == AV_CODEC_ID_MPEG2VIDEO || codec == AV_CODEC_ID_MPEG1VIDEO || codec == AV_CODEC_ID_DIRAC || codec == AV_CODEC_ID_UTVIDEO || codec == AV_CODEC_ID_DNXHD || codec == AV_CODEC_ID_JPEG2000 || (codec == AV_CODEC_ID_MPEG4 && pmt->formattype == FORMAT_MPEG2Video) || bVC1IsPTS; // Stop time is unreliable, drop it and calculate it m_bCalculateStopTime = (codec == AV_CODEC_ID_H264 || codec == AV_CODEC_ID_DIRAC || (codec == AV_CODEC_ID_MPEG4 && pmt->formattype == FORMAT_MPEG2Video) || bVC1IsPTS); // Real Video content has some odd timestamps // LAV Splitter does them allright with RV30/RV40, everything else screws them up m_bRVDropBFrameTimings = (codec == AV_CODEC_ID_RV10 || codec == AV_CODEC_ID_RV20 || ((codec == AV_CODEC_ID_RV30 || codec == AV_CODEC_ID_RV40) && (!(dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER) || (bLAVInfoValid && (lavPinInfo.flags & LAV_STREAM_FLAG_RV34_MKV))))); // Enable B-Frame delay handling m_bBFrameDelay = !m_bFFReordering && !m_bRVDropBFrameTimings; m_bWaitingForKeyFrame = TRUE; m_bResumeAtKeyFrame = codec == AV_CODEC_ID_MPEG2VIDEO || codec == AV_CODEC_ID_VC1 || codec == AV_CODEC_ID_RV30 || codec == AV_CODEC_ID_RV40 || codec == AV_CODEC_ID_VP3 || codec == AV_CODEC_ID_THEORA || codec == AV_CODEC_ID_MPEG4; m_bNoBufferConsumption = codec == AV_CODEC_ID_MJPEGB || codec == AV_CODEC_ID_LOCO || codec == AV_CODEC_ID_JPEG2000; m_bHasPalette = m_pAVCtx->bits_per_coded_sample <= 8 && m_pAVCtx->extradata_size && !(dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER) && (codec == AV_CODEC_ID_MSVIDEO1 || codec == AV_CODEC_ID_MSRLE || codec == AV_CODEC_ID_CINEPAK || codec == AV_CODEC_ID_8BPS || codec == AV_CODEC_ID_QPEG || codec == AV_CODEC_ID_QTRLE || codec == AV_CODEC_ID_TSCC); if (FAILED(AdditionaDecoderInit())) { return E_FAIL; } if (bLAVInfoValid) { // Setting has_b_frames to a proper value will ensure smoother decoding of H264 if (lavPinInfo.has_b_frames >= 0) { DbgLog((LOG_TRACE, 10, L"-> Setting has_b_frames to %d", lavPinInfo.has_b_frames)); m_pAVCtx->has_b_frames = lavPinInfo.has_b_frames; } } // Open the decoder int ret = avcodec_open2(m_pAVCtx, m_pAVCodec, NULL); if (ret >= 0) { DbgLog((LOG_TRACE, 10, L"-> ffmpeg codec opened successfully (ret: %d)", ret)); m_nCodecId = codec; } else { DbgLog((LOG_TRACE, 10, L"-> ffmpeg codec failed to open (ret: %d)", ret)); DestroyDecoder(); return VFW_E_UNSUPPORTED_VIDEO; } m_iInterlaced = 0; for (int i = 0; i < countof(ff_interlace_capable); i++) { if (codec == ff_interlace_capable[i]) { m_iInterlaced = -1; break; } } // Detect chroma and interlaced if (m_pAVCtx->extradata && m_pAVCtx->extradata_size) { if (codec == AV_CODEC_ID_MPEG2VIDEO) { CMPEG2HeaderParser mpeg2Parser(extra, extralen); if (mpeg2Parser.hdr.valid) { if (mpeg2Parser.hdr.chroma < 2) { m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV420P; } else if (mpeg2Parser.hdr.chroma == 2) { m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV422P; } m_iInterlaced = mpeg2Parser.hdr.interlaced; } } else if (codec == AV_CODEC_ID_H264) { CH264SequenceParser h264parser; if (bH264avc) h264parser.ParseNALs(extra+6, extralen-6, 2); else h264parser.ParseNALs(extra, extralen, 0); if (h264parser.sps.valid) m_iInterlaced = h264parser.sps.interlaced; } else if (codec == AV_CODEC_ID_VC1) { CVC1HeaderParser vc1parser(extra, extralen); if (vc1parser.hdr.valid) m_iInterlaced = (vc1parser.hdr.interlaced ? -1 : 0); } } if (codec == AV_CODEC_ID_DNXHD) m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV422P10; else if (codec == AV_CODEC_ID_FRAPS) m_pAVCtx->pix_fmt = AV_PIX_FMT_BGR24; if (bLAVInfoValid && codec != AV_CODEC_ID_FRAPS && m_pAVCtx->pix_fmt != AV_PIX_FMT_DXVA2_VLD) m_pAVCtx->pix_fmt = lavPinInfo.pix_fmt; DbgLog((LOG_TRACE, 10, L"AVCodec init successfull. interlaced: %d", m_iInterlaced)); return S_OK; }
STDMETHODIMP CDecAvcodec::InitDecoder(AVCodecID codec, const CMediaType *pmt) { DestroyDecoder(); DbgLog((LOG_TRACE, 10, L"Initializing ffmpeg for codec %S", avcodec_get_name(codec))); BITMAPINFOHEADER *pBMI = nullptr; videoFormatTypeHandler((const BYTE *)pmt->Format(), pmt->FormatType(), &pBMI); m_pAVCodec = avcodec_find_decoder(codec); CheckPointer(m_pAVCodec, VFW_E_UNSUPPORTED_VIDEO); m_pAVCtx = avcodec_alloc_context3(m_pAVCodec); CheckPointer(m_pAVCtx, E_POINTER); DWORD dwDecFlags = m_pCallback->GetDecodeFlags(); // Use parsing for mpeg1/2 at all times, or H264/HEVC when its not from LAV Splitter if( codec == AV_CODEC_ID_MPEG1VIDEO || codec == AV_CODEC_ID_MPEG2VIDEO || (!(dwDecFlags & LAV_VIDEO_DEC_FLAG_LAVSPLITTER) && (pmt->subtype == MEDIASUBTYPE_H264 || pmt->subtype == MEDIASUBTYPE_h264 || pmt->subtype == MEDIASUBTYPE_X264 || pmt->subtype == MEDIASUBTYPE_x264 || pmt->subtype == MEDIASUBTYPE_H264_bis || pmt->subtype == MEDIASUBTYPE_HEVC))) { m_pParser = av_parser_init(codec); } LONG biRealWidth = pBMI->biWidth, biRealHeight = pBMI->biHeight; if (pmt->formattype == FORMAT_VideoInfo || pmt->formattype == FORMAT_MPEGVideo) { VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)pmt->Format(); if (vih->rcTarget.right != 0 && vih->rcTarget.bottom != 0) { biRealWidth = vih->rcTarget.right; biRealHeight = vih->rcTarget.bottom; } } else if (pmt->formattype == FORMAT_VideoInfo2 || pmt->formattype == FORMAT_MPEG2Video) { VIDEOINFOHEADER2 *vih2 = (VIDEOINFOHEADER2 *)pmt->Format(); if (vih2->rcTarget.right != 0 && vih2->rcTarget.bottom != 0) { biRealWidth = vih2->rcTarget.right; biRealHeight = vih2->rcTarget.bottom; } } m_pAVCtx->codec_id = codec; m_pAVCtx->codec_tag = pBMI->biCompression; m_pAVCtx->coded_width = pBMI->biWidth; m_pAVCtx->coded_height = abs(pBMI->biHeight); m_pAVCtx->bits_per_coded_sample = pBMI->biBitCount; m_pAVCtx->err_recognition = 0; m_pAVCtx->workaround_bugs = FF_BUG_AUTODETECT; m_pAVCtx->refcounted_frames = 1; // Setup threading // Thread Count. 0 = auto detect int thread_count = m_pSettings->GetNumThreads(); if (thread_count == 0) { thread_count = av_cpu_count(); } m_pAVCtx->thread_count = max(1, min(thread_count, AVCODEC_MAX_THREADS)); if (dwDecFlags & LAV_VIDEO_DEC_FLAG_NO_MT || codec == AV_CODEC_ID_MPEG4) { m_pAVCtx->thread_count = 1; } m_pFrame = av_frame_alloc(); CheckPointer(m_pFrame, E_POINTER); // Process Extradata BYTE *extra = nullptr; size_t extralen = 0; getExtraData(*pmt, nullptr, &extralen); BOOL bH264avc = FALSE; if (pmt->formattype == FORMAT_MPEG2Video && (m_pAVCtx->codec_tag == MAKEFOURCC('a','v','c','1') || m_pAVCtx->codec_tag == MAKEFOURCC('A','V','C','1') || m_pAVCtx->codec_tag == MAKEFOURCC('C','C','V','1'))) { // Reconstruct AVC1 extradata format DbgLog((LOG_TRACE, 10, L"-> Processing AVC1 extradata of %d bytes", extralen)); MPEG2VIDEOINFO *mp2vi = (MPEG2VIDEOINFO *)pmt->Format(); extralen += 7; extra = (uint8_t *)av_mallocz(extralen + AV_INPUT_BUFFER_PADDING_SIZE); extra[0] = 1; extra[1] = (BYTE)mp2vi->dwProfile; extra[2] = 0; extra[3] = (BYTE)mp2vi->dwLevel; extra[4] = (BYTE)(mp2vi->dwFlags ? mp2vi->dwFlags : 4) - 1; // only process extradata if available uint8_t ps_count = 0; if (extralen > 7) { // Actually copy the metadata into our new buffer size_t actual_len; getExtraData(*pmt, extra + 6, &actual_len); // Count the number of SPS/PPS in them and set the length // We'll put them all into one block and add a second block with 0 elements afterwards // The parsing logic does not care what type they are, it just expects 2 blocks. BYTE *p = extra + 6, *end = extra + 6 + actual_len; BOOL bSPS = FALSE, bPPS = FALSE; while (p + 1 < end) { unsigned len = (((unsigned)p[0] << 8) | p[1]) + 2; if (p + len > end) { break; } if ((p[2] & 0x1F) == 7) bSPS = TRUE; if ((p[2] & 0x1F) == 8) bPPS = TRUE; ps_count++; p += len; } } extra[5] = ps_count; extra[extralen - 1] = 0; bH264avc = TRUE; m_pAVCtx->extradata = extra; m_pAVCtx->extradata_size = (int)extralen; } else if (extralen > 0) { DbgLog((LOG_TRACE, 10, L"-> Processing extradata of %d bytes", extralen)); if (pmt->subtype == MEDIASUBTYPE_LAV_RAWVIDEO) { if (extralen < sizeof(m_pAVCtx->pix_fmt)) { DbgLog((LOG_TRACE, 10, L"-> LAV RAW Video extradata is missing..")); } else { extra = (uint8_t *)av_mallocz(extralen + AV_INPUT_BUFFER_PADDING_SIZE); getExtraData(*pmt, extra, nullptr); m_pAVCtx->pix_fmt = *(AVPixelFormat *)extra; extralen -= sizeof(AVPixelFormat); memmove(extra, extra+sizeof(AVPixelFormat), extralen); } } else if (codec == AV_CODEC_ID_VP9) { // read custom vpcC headers if (extralen >= 16) { extra = (uint8_t *)av_mallocz(extralen + AV_INPUT_BUFFER_PADDING_SIZE); getExtraData(*pmt, extra, nullptr); if (AV_RB32(extra) == MKBETAG('v', 'p', 'c', 'C') && AV_RB8(extra + 4) == 1) { m_pAVCtx->profile = AV_RB8(extra + 8); m_pAVCtx->color_primaries = (AVColorPrimaries)AV_RB8(extra + 11); m_pAVCtx->color_trc = (AVColorTransferCharacteristic)AV_RB8(extra + 12); m_pAVCtx->colorspace = (AVColorSpace)AV_RB8(extra + 13); int bitdepth = AV_RB8(extra + 10) >> 4; if (m_pAVCtx->profile == 2 && bitdepth == 10) { m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV420P10; } else if (m_pAVCtx->profile == 2 && bitdepth == 12) { m_pAVCtx->pix_fmt = AV_PIX_FMT_YUV420P12; } } av_freep(&extra); extralen = 0; }
int main(int argc, char **argv) { const char *filename, *outfilename; const AVCodec *codec; AVCodecParserContext *parser; AVCodecContext *c= NULL; FILE *f; AVFrame *picture; uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; uint8_t *data; size_t data_size; int ret; AVPacket *pkt; if (argc <= 2) { fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]); exit(0); } filename = argv[1]; outfilename = argv[2]; avcodec_register_all(); pkt = av_packet_alloc(); if (!pkt) exit(1); /* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */ memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE); /* find the MPEG-1 video decoder */ codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } parser = av_parser_init(codec->id); if (!parser) { fprintf(stderr, "parser not found\n"); exit(1); } c = avcodec_alloc_context3(codec); picture = av_frame_alloc(); /* For some codecs, such as msmpeg4 and mpeg4, width and height MUST be initialized there because this information is not available in the bitstream. */ /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } while (!feof(f)) { /* read raw data from the input file */ data_size = fread(inbuf, 1, INBUF_SIZE, f); if (!data_size) break; /* use the parser to split the data into frames */ data = inbuf; while (data_size > 0) { ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size, data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); if (ret < 0) { fprintf(stderr, "Error while parsing\n"); exit(1); } data += ret; data_size -= ret; if (pkt->size) decode(c, picture, pkt, outfilename); } } /* flush the decoder */ decode(c, picture, NULL, outfilename); fclose(f); av_parser_close(parser); avcodec_free_context(&c); av_frame_free(&picture); av_packet_free(&pkt); return 0; }