void operator()(AVCodecContext* pAVCodecContext) { avcodec_free_context(&pAVCodecContext); }
void FFmpegAudioDecoder::ReleaseFFmpegResource() { avcodec_free_context(&av_codec_context_); }
QByteArray AVDecoder::WriteJPEG(AVCodecContext *pCodecCtx, AVFrame *pFrame, int width, int height) { AVCodecContext *pOCodecCtx; AVCodec *pOCodec; QByteArray data; pOCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG); if (!pOCodec) { return data; } SwsContext *sws_ctx = sws_getContext( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, width, height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); if(!sws_ctx) { return data; } #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) AVFrame *pFrameRGB = av_frame_alloc(); #else AVFrame *pFrameRGB = avcodec_alloc_frame(); #endif if(pFrameRGB == NULL) { sws_freeContext(sws_ctx); return data; } // detect ffmpeg (>= 100) or libav (< 100) #if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \ (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0)) int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 16); #else int numBytes = avpicture_get_size(PIX_FMT_YUVJ420P, width, height); #endif uint8_t *buffer = (uint8_t *)av_malloc(numBytes); if(!buffer) { #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) av_frame_free(&pFrameRGB); #else avcodec_free_frame(&pFrameRGB); #endif sws_freeContext(sws_ctx); return data; } // detect ffmpeg (>= 100) or libav (< 100) #if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \ (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0)) av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_YUV420P, width, height, 1); #else avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_YUVJ420P, width, height); #endif sws_scale( sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize ); pOCodecCtx = avcodec_alloc_context3(pOCodec); if(pOCodecCtx == NULL) { #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0) avcodec_free_context(&pOCodecCtx); #else avcodec_close(pOCodecCtx); av_free(pOCodecCtx); #endif av_free(buffer); #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) av_frame_free(&pFrameRGB); #else avcodec_free_frame(&pFrameRGB); #endif sws_freeContext(sws_ctx); return 0; } pOCodecCtx->bit_rate = pCodecCtx->bit_rate; pOCodecCtx->width = width; pOCodecCtx->height = height; pOCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P; pOCodecCtx->color_range = AVCOL_RANGE_JPEG; pOCodecCtx->codec_id = AV_CODEC_ID_MJPEG; pOCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pOCodecCtx->time_base.num = pCodecCtx->time_base.num; pOCodecCtx->time_base.den = pCodecCtx->time_base.den; AVDictionary *opts = NULL; if(avcodec_open2(pOCodecCtx, pOCodec, &opts) < 0) { #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0) avcodec_free_context(&pOCodecCtx); #else avcodec_close(pOCodecCtx); av_free(pOCodecCtx); #endif av_free(buffer); #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) av_frame_free(&pFrameRGB); #else avcodec_free_frame(&pFrameRGB); #endif sws_freeContext(sws_ctx); return 0; } av_opt_set_int(pOCodecCtx, "lmin", pOCodecCtx->qmin * FF_QP2LAMBDA, 0); av_opt_set_int(pOCodecCtx, "lmax", pOCodecCtx->qmax * FF_QP2LAMBDA, 0); pOCodecCtx->mb_lmin = pOCodecCtx->qmin * FF_QP2LAMBDA; pOCodecCtx->mb_lmax = pOCodecCtx->qmax * FF_QP2LAMBDA; pOCodecCtx->flags = CODEC_FLAG_QSCALE; pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA; pFrame->pts = 1; pFrame->quality = pOCodecCtx->global_quality; AVPacket pkt; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; int gotPacket; avcodec_encode_video2(pOCodecCtx, &pkt, pFrameRGB, &gotPacket); QByteArray buffer2(reinterpret_cast<char *>(pkt.data), pkt.size); #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0) avcodec_free_context(&pOCodecCtx); #else avcodec_close(pOCodecCtx); av_free(pOCodecCtx); #endif av_free(buffer); #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1) av_frame_free(&pFrameRGB); #else avcodec_free_frame(&pFrameRGB); #endif avcodec_close(pOCodecCtx); sws_freeContext(sws_ctx); return buffer2; }
static int seek_test(const char *input_filename, const char *start, const char *end) { AVCodec *codec = NULL; AVCodecContext *ctx= NULL; AVCodecParameters *origin_par = NULL; AVFrame *fr = NULL; AVFormatContext *fmt_ctx = NULL; int video_stream; int result; int i, j; long int start_ts, end_ts; size_of_array = 0; number_of_elements = 0; crc_array = pts_array = NULL; result = avformat_open_input(&fmt_ctx, input_filename, NULL, NULL); if (result < 0) { av_log(NULL, AV_LOG_ERROR, "Can't open file\n"); return result; } result = avformat_find_stream_info(fmt_ctx, NULL); if (result < 0) { av_log(NULL, AV_LOG_ERROR, "Can't get stream info\n"); return result; } start_ts = read_seek_range(start); end_ts = read_seek_range(end); if ((start_ts < 0) || (end_ts < 0)) return -1; //TODO: add ability to work with audio format video_stream = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (video_stream < 0) { av_log(NULL, AV_LOG_ERROR, "Can't find video stream in input file\n"); return -1; } origin_par = fmt_ctx->streams[video_stream]->codecpar; codec = avcodec_find_decoder(origin_par->codec_id); if (!codec) { av_log(NULL, AV_LOG_ERROR, "Can't find decoder\n"); return -1; } ctx = avcodec_alloc_context3(codec); if (!ctx) { av_log(NULL, AV_LOG_ERROR, "Can't allocate decoder context\n"); return AVERROR(ENOMEM); } result = avcodec_parameters_to_context(ctx, origin_par); if (result) { av_log(NULL, AV_LOG_ERROR, "Can't copy decoder context\n"); return result; } result = avcodec_open2(ctx, codec, NULL); if (result < 0) { av_log(ctx, AV_LOG_ERROR, "Can't open decoder\n"); return result; } fr = av_frame_alloc(); if (!fr) { av_log(NULL, AV_LOG_ERROR, "Can't allocate frame\n"); return AVERROR(ENOMEM); } result = compute_crc_of_packets(fmt_ctx, video_stream, ctx, fr, i, j, 1); if (result != 0) return -1; for (i = start_ts; i < end_ts; i += 100) { for (j = i + 100; j < end_ts; j += 100) result = compute_crc_of_packets(fmt_ctx, video_stream, ctx, fr, i, j, 0); if (result != 0) return -1; } av_freep(&crc_array); av_freep(&pts_array); av_frame_free(&fr); avcodec_close(ctx); avformat_close_input(&fmt_ctx); avcodec_free_context(&ctx); return 0; }
int main(int argc, char* argv[]) { printf("Play simple video\n"); if(argc < 2) { printf("Miss input video"); return -1; } int ret = -1, i = -1, v_stream_idx = -1; char* vf_path = argv[1]; // f**k, fmt_ctx must be inited by NULL AVFormatContext* fmt_ctx = NULL; AVCodecContext* codec_ctx = NULL; AVCodec* codec; AVFrame * frame; AVPacket packet; av_register_all(); ret = avformat_open_input(&fmt_ctx, vf_path, NULL, NULL); if(ret < 0){ printf("Open video file %s failed \n", vf_path); goto end; } if(avformat_find_stream_info(fmt_ctx, NULL)<0) goto end; av_dump_format(fmt_ctx, 0, vf_path, 0); for(i = 0; i< fmt_ctx->nb_streams; i++) { if(fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { v_stream_idx = i; break; } } if(v_stream_idx == -1) { printf("Cannot find video stream\n"); goto end; } codec_ctx = avcodec_alloc_context3(NULL); avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[v_stream_idx]->codecpar); codec = avcodec_find_decoder(codec_ctx->codec_id); if(codec == NULL){ printf("Unsupported codec for video file\n"); goto end; } if(avcodec_open2(codec_ctx, codec, NULL) < 0){ printf("Can not open codec\n"); goto end; } if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { printf("Could not init SDL due to %s", SDL_GetError()); goto end; } SDL_Window *window; SDL_Renderer *renderer; SDL_Texture *texture; SDL_Event event; SDL_Rect r; window = SDL_CreateWindow("SDL_CreateTexture", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, codec_ctx->width, codec_ctx->height, SDL_WINDOW_RESIZABLE); r.x = 0; r.y = 0; r.w = codec_ctx->width; r.h = codec_ctx->height; renderer = SDL_CreateRenderer(window, -1, 0); // texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET, // codec_ctx->width, codec_ctx->height); texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, codec_ctx->width, codec_ctx->height); struct SwsContext *sws_ctx = NULL; sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); frame = av_frame_alloc(); int ret1, ret2; AVFrame* pict; pict = av_frame_alloc(); int numBytes; uint8_t *buffer = NULL; numBytes=avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width, codec_ctx->height); buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); // required, or bad dst image pointers avpicture_fill((AVPicture *)pict, buffer, AV_PIX_FMT_YUV420P, codec_ctx->width, codec_ctx->height); i = 0; while (1) { SDL_PollEvent(&event); if(event.type == SDL_QUIT) break; ret = av_read_frame(fmt_ctx, &packet); if(ret <0){ continue; } if(packet.stream_index == v_stream_idx) { ret1 = avcodec_send_packet(codec_ctx, &packet); ret2 = avcodec_receive_frame(codec_ctx, frame); if(ret2 < 0 ){ continue; } sws_scale(sws_ctx, (uint8_t const * const *)frame->data, frame->linesize, 0, codec_ctx->height, pict->data, pict->linesize); // if(++i <=5 ){ // save_frame(pict, codec_ctx->width, codec_ctx->height, i); // } SDL_UpdateYUVTexture(texture, &r, pict->data[0], pict->linesize[0], pict->data[1], pict->linesize[1], pict->data[2], pict->linesize[2]); // SDL_UpdateTexture(texture, &r, pict->data[0], pict->linesize[0]); // r.x=rand()%500; // r.y=rand()%500; // SDL_SetRenderTarget(renderer, texture); // SDL_SetRenderDrawColor(renderer, 0x00, 0x00, 0x00, 0x00); SDL_RenderClear(renderer); // SDL_RenderDrawRect(renderer,&r); // SDL_SetRenderDrawColor(renderer, 0xFF, 0x00, 0x00, 0x00); // SDL_RenderFillRect(renderer, &r); // SDL_SetRenderTarget(renderer, NULL); SDL_RenderCopy(renderer, texture, NULL, NULL); // SDL_RenderCopy(renderer, texture, &r, &r); SDL_RenderPresent(renderer); // SDL_Delay(50); } av_packet_unref(&packet); } SDL_DestroyRenderer(renderer); SDL_Quit(); av_frame_free(&frame); avcodec_close(codec_ctx); avcodec_free_context(&codec_ctx); end: avformat_close_input(&fmt_ctx); printf("Shutdown\n"); return 0; }
int main(int argc, char **argv) { const char *filename, *outfilename; const AVCodec *codec; AVCodecParserContext *parser; AVCodecContext *c= NULL; FILE *f; AVFrame *picture; uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; uint8_t *data; size_t data_size; int ret; AVPacket *pkt; if (argc <= 2) { fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]); exit(0); } filename = argv[1]; outfilename = argv[2]; avcodec_register_all(); pkt = av_packet_alloc(); if (!pkt) exit(1); /* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */ memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE); /* find the MPEG-1 video decoder */ codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } parser = av_parser_init(codec->id); if (!parser) { fprintf(stderr, "parser not found\n"); exit(1); } c = avcodec_alloc_context3(codec); picture = av_frame_alloc(); /* For some codecs, such as msmpeg4 and mpeg4, width and height MUST be initialized there because this information is not available in the bitstream. */ /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } while (!feof(f)) { /* read raw data from the input file */ data_size = fread(inbuf, 1, INBUF_SIZE, f); if (!data_size) break; /* use the parser to split the data into frames */ data = inbuf; while (data_size > 0) { ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size, data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); if (ret < 0) { fprintf(stderr, "Error while parsing\n"); exit(1); } data += ret; data_size -= ret; if (pkt->size) decode(c, picture, pkt, outfilename); } } /* flush the decoder */ decode(c, picture, NULL, outfilename); fclose(f); av_parser_close(parser); avcodec_free_context(&c); av_frame_free(&picture); av_packet_free(&pkt); return 0; }
Decoder::~Decoder() { flush_pkt = NULL; av_packet_unref(&pkt); avcodec_free_context(&avctx); }
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { const uint64_t fuzz_tag = FUZZ_TAG; FuzzDataBuffer buffer; const uint8_t *last = data; const uint8_t *end = data + size; uint32_t it = 0; int (*decode_handler)(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt) = NULL; if (!c) { #ifdef FFMPEG_DECODER #define DECODER_SYMBOL0(CODEC) ff_##CODEC##_decoder #define DECODER_SYMBOL(CODEC) DECODER_SYMBOL0(CODEC) extern AVCodec DECODER_SYMBOL(FFMPEG_DECODER); avcodec_register(&DECODER_SYMBOL(FFMPEG_DECODER)); c = &DECODER_SYMBOL(FFMPEG_DECODER); #else avcodec_register_all(); c = AVCodecInitialize(FFMPEG_CODEC); // Done once. #endif av_log_set_level(AV_LOG_PANIC); } // Unsupported if (c->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) return 0; switch (c->type) { case AVMEDIA_TYPE_AUDIO : decode_handler = avcodec_decode_audio4; break; case AVMEDIA_TYPE_VIDEO : decode_handler = avcodec_decode_video2; break; case AVMEDIA_TYPE_SUBTITLE: decode_handler = subtitle_handler ; break; } AVCodecContext* ctx = avcodec_alloc_context3(NULL); if (!ctx) error("Failed memory allocation"); ctx->max_pixels = 4096 * 4096; //To reduce false positive OOM and hangs if (size > 1024) { GetByteContext gbc; bytestream2_init(&gbc, data + size - 1024, 1024); ctx->width = bytestream2_get_le32(&gbc); ctx->height = bytestream2_get_le32(&gbc); ctx->bit_rate = bytestream2_get_le64(&gbc); ctx->bits_per_coded_sample = bytestream2_get_le32(&gbc); if (av_image_check_size(ctx->width, ctx->height, 0, ctx)) ctx->width = ctx->height = 0; size -= 1024; } int res = avcodec_open2(ctx, c, NULL); if (res < 0) { av_free(ctx); return 0; // Failure of avcodec_open2() does not imply that a issue was found } FDBCreate(&buffer); int got_frame; AVFrame *frame = av_frame_alloc(); if (!frame) error("Failed memory allocation"); // Read very simple container AVPacket avpkt; while (data < end && it < maxiteration) { // Search for the TAG while (data + sizeof(fuzz_tag) < end) { if (data[0] == (fuzz_tag & 0xFF) && AV_RN64(data) == fuzz_tag) break; data++; } if (data + sizeof(fuzz_tag) > end) data = end; FDBPrepare(&buffer, &avpkt, last, data - last); data += sizeof(fuzz_tag); last = data; // Iterate through all data while (avpkt.size > 0 && it++ < maxiteration) { av_frame_unref(frame); int ret = decode_handler(ctx, frame, &got_frame, &avpkt); if (it > 20) ctx->error_concealment = 0; if (ret <= 0 || ret > avpkt.size) break; if (ctx->codec_type != AVMEDIA_TYPE_AUDIO) ret = avpkt.size; avpkt.data += ret; avpkt.size -= ret; } } av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; do { got_frame = 0; decode_handler(ctx, frame, &got_frame, &avpkt); } while (got_frame == 1 && it++ < maxiteration); av_frame_free(&frame); avcodec_free_context(&ctx); av_freep(&ctx); FDBDesroy(&buffer); return 0; }
int main(int argc, char **argv) { int ret = 0; AVPacket dec_pkt; AVCodec *enc_codec; if (argc != 4) { fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n" "The output format is guessed according to the file extension.\n" "\n", argv[0]); return -1; } ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, NULL, NULL, 0); if (ret < 0) { fprintf(stderr, "Failed to create a VAAPI device. Error code: %s\n", av_err2str(ret)); return -1; } if ((ret = open_input_file(argv[1])) < 0) goto end; if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) { fprintf(stderr, "Could not find encoder '%s'\n", argv[2]); ret = -1; goto end; } if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, argv[3]))) < 0) { fprintf(stderr, "Failed to deduce output format from file extension. Error code: " "%s\n", av_err2str(ret)); goto end; } if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) { ret = AVERROR(ENOMEM); goto end; } ret = avio_open(&ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Cannot open output file. " "Error code: %s\n", av_err2str(ret)); goto end; } /* read all packets and only transcoding video */ while (ret >= 0) { if ((ret = av_read_frame(ifmt_ctx, &dec_pkt)) < 0) break; if (video_stream == dec_pkt.stream_index) ret = dec_enc(&dec_pkt, enc_codec); av_packet_unref(&dec_pkt); } /* flush decoder */ dec_pkt.data = NULL; dec_pkt.size = 0; ret = dec_enc(&dec_pkt, enc_codec); av_packet_unref(&dec_pkt); /* flush encoder */ ret = encode_write(NULL); /* write the trailer for output stream */ av_write_trailer(ofmt_ctx); end: avformat_close_input(&ifmt_ctx); avformat_close_input(&ofmt_ctx); avcodec_free_context(&decoder_ctx); avcodec_free_context(&encoder_ctx); av_buffer_unref(&hw_device_ctx); return ret; }
void lavc_conv_uninit(struct lavc_conv *priv) { avsubtitle_free(&priv->cur); avcodec_free_context(&priv->avctx); talloc_free(priv); }
bool CDVDVideoCodecFFmpeg::Open(CDVDStreamInfo &hints, CDVDCodecOptions &options) { m_hints = hints; m_options = options; AVCodec* pCodec; m_iOrientation = hints.orientation; for(std::vector<ERenderFormat>::iterator it = options.m_formats.begin(); it != options.m_formats.end(); ++it) { m_formats.push_back((AVPixelFormat)CDVDCodecUtils::PixfmtFromEFormat(*it)); if(*it == RENDER_FMT_YUV420P) m_formats.push_back(AV_PIX_FMT_YUVJ420P); } m_formats.push_back(AV_PIX_FMT_NONE); /* always add none to get a terminated list in ffmpeg world */ pCodec = avcodec_find_decoder(hints.codec); if(pCodec == NULL) { CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to find codec %d", hints.codec); return false; } CLog::Log(LOGNOTICE,"CDVDVideoCodecFFmpeg::Open() Using codec: %s",pCodec->long_name ? pCodec->long_name : pCodec->name); m_pCodecContext = avcodec_alloc_context3(pCodec); if (!m_pCodecContext) return false; m_pCodecContext->opaque = (void*)this; m_pCodecContext->debug_mv = 0; m_pCodecContext->debug = 0; m_pCodecContext->workaround_bugs = FF_BUG_AUTODETECT; m_pCodecContext->get_format = GetFormat; m_pCodecContext->codec_tag = hints.codec_tag; // setup threading model if (!hints.software) { bool tryhw = false; #ifdef HAVE_LIBVDPAU if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEVDPAU)) tryhw = true; #endif #ifdef HAVE_LIBVA if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEVAAPI)) tryhw = true; #endif #ifdef HAS_DX if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEDXVA2)) tryhw = true; #endif #ifdef TARGET_DARWIN if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEVTB)) tryhw = true; #endif #ifdef HAS_MMAL tryhw = true; #endif if (tryhw && m_decoderState == STATE_NONE) { m_decoderState = STATE_HW_SINGLE; } else { int num_threads = std::min(8 /*MAX_THREADS*/, g_cpuInfo.getCPUCount()); if( num_threads > 1) m_pCodecContext->thread_count = num_threads; m_pCodecContext->thread_safe_callbacks = 1; m_decoderState = STATE_SW_MULTI; CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg - open frame threaded with %d threads", num_threads); } } else m_decoderState = STATE_SW_SINGLE; #if defined(TARGET_DARWIN_IOS) // ffmpeg with enabled neon will crash and burn if this is enabled m_pCodecContext->flags &= CODEC_FLAG_EMU_EDGE; #else if (pCodec->id != AV_CODEC_ID_H264 && pCodec->capabilities & CODEC_CAP_DR1 && pCodec->id != AV_CODEC_ID_VP8 ) m_pCodecContext->flags |= CODEC_FLAG_EMU_EDGE; #endif // if we don't do this, then some codecs seem to fail. m_pCodecContext->coded_height = hints.height; m_pCodecContext->coded_width = hints.width; m_pCodecContext->bits_per_coded_sample = hints.bitsperpixel; if( hints.extradata && hints.extrasize > 0 ) { m_pCodecContext->extradata_size = hints.extrasize; m_pCodecContext->extradata = (uint8_t*)av_mallocz(hints.extrasize + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(m_pCodecContext->extradata, hints.extradata, hints.extrasize); } // advanced setting override for skip loop filter (see avcodec.h for valid options) // TODO: allow per video setting? if (g_advancedSettings.m_iSkipLoopFilter != 0) { m_pCodecContext->skip_loop_filter = (AVDiscard)g_advancedSettings.m_iSkipLoopFilter; } // set any special options for(std::vector<CDVDCodecOption>::iterator it = options.m_keys.begin(); it != options.m_keys.end(); ++it) { if (it->m_name == "surfaces") m_uSurfacesCount = atoi(it->m_value.c_str()); else av_opt_set(m_pCodecContext, it->m_name.c_str(), it->m_value.c_str(), 0); } // If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() are reference-counted and are valid indefinitely. // Without this frames will get (deep) copied when deinterlace is set to automatic, but file is not deinterlaced. m_pCodecContext->refcounted_frames = 1; if (avcodec_open2(m_pCodecContext, pCodec, nullptr) < 0) { CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to open codec"); avcodec_free_context(&m_pCodecContext); return false; } m_pFrame = av_frame_alloc(); if (!m_pFrame) { avcodec_free_context(&m_pCodecContext); return false; } m_pDecodedFrame = av_frame_alloc(); if (!m_pDecodedFrame) { av_frame_free(&m_pFrame); avcodec_free_context(&m_pCodecContext); return false; } m_pFilterFrame = av_frame_alloc(); if (!m_pFilterFrame) { av_frame_free(&m_pFrame); av_frame_free(&m_pDecodedFrame); avcodec_free_context(&m_pCodecContext); return false; } UpdateName(); return true; }
int main(int argc, char* argv[]) { printf("Read few frame and write to image\n"); if(argc < 2) { printf("Missing input video file\n"); return -1; } int ret = -1, i = 0, v_stream_idx = -1; char* vf_path = argv[1]; AVFormatContext* fmt_ctx = NULL; AVCodecContext* codec_ctx = NULL; AVCodec* codec = NULL; AVPacket pkt; AVFrame* frm = NULL; av_register_all(); ret = avformat_open_input(&fmt_ctx, vf_path, NULL, NULL); if(ret < 0){ printf("Open video file %s failed \n", vf_path); goto end; } // i dont know but without this function, sws_getContext does not work if(avformat_find_stream_info(fmt_ctx, NULL)<0) return -1; av_dump_format(fmt_ctx, 0, argv[1], 0); for(i = 0; i < fmt_ctx->nb_streams; i++) { if(fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { v_stream_idx = i; break; } } if(v_stream_idx == -1) { printf("Cannot find video stream\n"); goto end; }else{ printf("Video stream %d with resolution %dx%d\n", v_stream_idx, fmt_ctx->streams[i]->codecpar->width, fmt_ctx->streams[i]->codecpar->height); } codec_ctx = avcodec_alloc_context3(NULL); avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[v_stream_idx]->codecpar); codec = avcodec_find_decoder(codec_ctx->codec_id); if(codec == NULL){ printf("Unsupported codec for video file\n"); goto end; } ret = avcodec_open2(codec_ctx, codec, NULL); if(ret < 0){ printf("Can not open codec\n"); goto end; } frm = av_frame_alloc(); struct SwsContext *sws_ctx = NULL; AVFrame *pFrameRGB = NULL; int numBytes; uint8_t *buffer = NULL; // Allocate an AVFrame structure pFrameRGB=av_frame_alloc(); if(pFrameRGB==NULL) return -1; // Determine required buffer size and allocate buffer numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height); buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); sws_ctx = sws_getContext ( codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL ); if(sws_ctx == NULL) { printf("Can not use sws\n"); goto end; } avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24, codec_ctx->width, codec_ctx->height); i=0; int ret1 = -1, ret2 = -1, fi = -1; while(av_read_frame(fmt_ctx, &pkt)>=0) { if(pkt.stream_index == v_stream_idx) { ret1 = avcodec_send_packet(codec_ctx, &pkt); ret2 = avcodec_receive_frame(codec_ctx, frm); printf("ret1 %d ret2 %d\n", ret1, ret2); // avcodec_decode_video2(codec_ctx, frm, &fi, &pkt); } // if not check ret2, error occur [swscaler @ 0x1cb3c40] bad src image pointers // ret2 same as fi // if(fi && ++i <= 5) { if(ret2>= 0 && ++i <= 5) { sws_scale ( sws_ctx, (uint8_t const * const *)frm->data, frm->linesize, 0, codec_ctx->height, pFrameRGB->data, pFrameRGB->linesize ); save_frame(pFrameRGB, codec_ctx->width, codec_ctx->height, i); // save_frame(frm, codec_ctx->width, codec_ctx->height, i); } av_packet_unref(&pkt); if(i>=5){ break; } } av_frame_free(&frm); avcodec_close(codec_ctx); avcodec_free_context(&codec_ctx); end: avformat_close_input(&fmt_ctx); printf("Shutdown\n"); return 0; }
bool VideoEncoderFFmpegPrivate::open() { nb_encoded = 0LL; if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); avctx->width = width; // coded_width works, why? avctx->height = height; // reset format_used to user defined format. important to update default format if format is invalid format_used = format.pixelFormat(); if (format.pixelFormat() == VideoFormat::Format_Invalid) { if (codec->pix_fmts) { qDebug("use first supported pixel format: %d", codec->pix_fmts[0]); format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]); } else { qWarning("pixel format and supported pixel format are not set. use yuv420p"); format_used = VideoFormat::Format_YUV420P; } } //avctx->sample_aspect_ratio = avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used); if (frame_rate > 0) avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); else avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2); qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; #if 1 //AVDictionary *dict = 0; if(avctx->codec_id == QTAV_CODEC_ID(H264)) { avctx->gop_size = 10; //avctx->max_b_frames = 3;//h264 av_dict_set(&dict, "preset", "fast", 0); av_dict_set(&dict, "tune", "zerolatency", 0); av_dict_set(&dict, "profile", "main", 0); } #ifdef FF_PROFILE_HEVC_MAIN if(avctx->codec_id == AV_CODEC_ID_HEVC){ av_dict_set(&dict, "preset", "ultrafast", 0); av_dict_set(&dict, "tune", "zero-latency", 0); } #endif //FF_PROFILE_HEVC_MAIN #endif applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
bool AudioEncoderFFmpegPrivate::open() { if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); // reset format_used to user defined format. important to update default format if format is invalid format_used = format; if (format.sampleRate() <= 0) { if (codec->supported_samplerates) { qDebug("use first supported sample rate: %d", codec->supported_samplerates[0]); format_used.setSampleRate(codec->supported_samplerates[0]); } else { qWarning("sample rate and supported sample rate are not set. use 44100"); format_used.setSampleRate(44100); } } if (format.sampleFormat() == AudioFormat::SampleFormat_Unknown) { if (codec->sample_fmts) { qDebug("use first supported sample format: %d", codec->sample_fmts[0]); format_used.setSampleFormatFFmpeg((int)codec->sample_fmts[0]); } else { qWarning("sample format and supported sample format are not set. use s16"); format_used.setSampleFormat(AudioFormat::SampleFormat_Signed16); } } if (format.channelLayout() == AudioFormat::ChannelLayout_Unsupported) { if (codec->channel_layouts) { qDebug("use first supported channel layout: %lld", codec->channel_layouts[0]); format_used.setChannelLayoutFFmpeg((qint64)codec->channel_layouts[0]); } else { qWarning("channel layout and supported channel layout are not set. use stero"); format_used.setChannelLayout(AudioFormat::ChannelLayout_Stero); } } avctx->sample_fmt = (AVSampleFormat)format_used.sampleFormatFFmpeg(); avctx->channel_layout = format_used.channelLayoutFFmpeg(); avctx->channels = format_used.channels(); avctx->sample_rate = format_used.sampleRate(); avctx->bits_per_raw_sample = format_used.bytesPerSample()*8; /// set the time base. TODO avctx->time_base.num = 1; avctx->time_base.den = format_used.sampleRate(); avctx->bit_rate = bit_rate; qDebug() << format_used; av_dict_set(&dict, "strict", "-2", 0); //aac, vorbis applyOptionsForContext(); // avctx->frame_size will be set in avcodec_open2 AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc int pcm_hack = 0; int buffer_size = 0; frame_size = avctx->frame_size; if (frame_size <= 1) pcm_hack = av_get_bits_per_sample(avctx->codec_id)/8; if (pcm_hack) { frame_size = 16384; // "enough" buffer_size = frame_size*pcm_hack*format_used.channels()*2+200; } else { buffer_size = frame_size*format_used.bytesPerSample()*format_used.channels()*2+200; } if (buffer_size < FF_MIN_BUFFER_SIZE) buffer_size = FF_MIN_BUFFER_SIZE; buffer.resize(buffer_size); return true; }
bool VideoEncoderFFmpegPrivate::open() { nb_encoded = 0LL; if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); avctx->width = width; // coded_width works, why? avctx->height = height; // reset format_used to user defined format. important to update default format if format is invalid format_used = VideoFormat::Format_Invalid; AVPixelFormat fffmt = (AVPixelFormat)format.pixelFormatFFmpeg(); if (codec->pix_fmts && format.isValid()) { for (int i = 0; codec->pix_fmts[i] != AVPixelFormat(-1); ++i) { if (fffmt == codec->pix_fmts[i]) { format_used = format.pixelFormat(); break; } } } //avctx->sample_aspect_ratio = AVPixelFormat hwfmt = AVPixelFormat(-1); if (av_pix_fmt_desc_get(codec->pix_fmts[0])->flags & AV_PIX_FMT_FLAG_HWACCEL) hwfmt = codec->pix_fmts[0]; bool use_hwctx = false; if (hwfmt != AVPixelFormat(-1)) { #ifdef HAVE_AVHWCTX const AVHWDeviceType dt = fromHWAName(codec_name.section(QChar('_'), -1).toUtf8().constData()); if (dt != AVHWDeviceType(-1)) { use_hwctx = true; avctx->pix_fmt = hwfmt; hw_device_ctx = NULL; AV_ENSURE(av_hwdevice_ctx_create(&hw_device_ctx, dt, hwdev.toLatin1().constData(), NULL, 0), false); avctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx); if (!avctx->hw_frames_ctx) { qWarning("Failed to create hw frame context for '%s'", codec_name.toLatin1().constData()); return false; } // get sw formats const void *hwcfg = NULL; AVHWFramesConstraints *constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx, hwcfg); const AVPixelFormat* in_fmts = constraints->valid_sw_formats; AVPixelFormat sw_fmt = AVPixelFormat(-1); if (in_fmts) { sw_fmt = in_fmts[0]; while (*in_fmts != AVPixelFormat(-1)) { if (*in_fmts == fffmt) sw_fmt = *in_fmts; sw_fmts.append(*in_fmts); ++in_fmts; } } else { sw_fmt = QTAV_PIX_FMT_C(YUV420P); } av_hwframe_constraints_free(&constraints); format_used = VideoFormat::pixelFormatFromFFmpeg(sw_fmt); // encoder surface pool parameters AVHWFramesContext* hwfs = (AVHWFramesContext*)avctx->hw_frames_ctx->data; hwfs->format = hwfmt; // must the same as avctx->pix_fmt hwfs->sw_format = sw_fmt; // if it's not set, vaapi will choose the last valid_sw_formats, but that's wrong for vaGetImage/DeriveImage. nvenc always need sw_format // hw upload parameters. encoder's hwframes is just for parameter checking, will never be intialized, so we allocate an individual one. hwframes_ref = av_hwframe_ctx_alloc(hw_device_ctx); if (!hwframes_ref) { qWarning("Failed to create hw frame context for uploading '%s'", codec_name.toLatin1().constData()); } else { hwframes = (AVHWFramesContext*)hwframes_ref->data; hwframes->format = hwfmt; } } #endif //HAVE_AVHWCTX } if (!use_hwctx) { // no hw device (videotoolbox, wrong device name etc.), or old ffmpeg // TODO: check frame is hw frame if (hwfmt == AVPixelFormat(-1)) { // sw enc if (format_used == VideoFormat::Format_Invalid) {// requested format is not supported by sw enc if (codec->pix_fmts) { //pix_fmts[0] is always a sw format here qDebug("use first supported pixel format '%d' for sw encoder", codec->pix_fmts[0]); format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]); } } } else { if (format_used == VideoFormat::Format_Invalid) { // requested format is not supported by hw enc qDebug("use first supported sw pixel format '%d' for hw encoder", codec->pix_fmts[1]); if (codec->pix_fmts && codec->pix_fmts[1] != AVPixelFormat(-1)) format_used = VideoFormat::pixelFormatFromFFmpeg(codec->pix_fmts[1]); } } if (format_used == VideoFormat::Format_Invalid) { qWarning("fallback to yuv420p"); format_used = VideoFormat::Format_YUV420P; } avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used); } if (frame_rate > 0) avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); else avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2); qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; //AVDictionary *dict = 0; if(avctx->codec_id == QTAV_CODEC_ID(H264)) { avctx->gop_size = 10; //avctx->max_b_frames = 3;//h264 av_dict_set(&dict, "preset", "fast", 0); //x264 av_dict_set(&dict, "tune", "zerolatency", 0); //x264 //av_dict_set(&dict, "profile", "main", 0); // conflict with vaapi (int values) } if(avctx->codec_id == AV_CODEC_ID_HEVC){ av_dict_set(&dict, "preset", "ultrafast", 0); av_dict_set(&dict, "tune", "zero-latency", 0); } if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) { av_dict_set(&dict, "strict", "-2", 0); // mpeg2 arbitrary fps } applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
int main(int argc, char **argv) { const char *filename; const AVCodec *codec; AVCodecContext *c = NULL; AVFrame *frame; AVPacket *pkt; int i, j, k, ret; FILE *f; uint16_t *samples; float t, tincr; if (argc <= 1) { fprintf(stderr, "Usage: %s <output file>\n", argv[0]); return 0; } filename = argv[1]; /* find the MP2 encoder */ codec = avcodec_find_encoder(AV_CODEC_ID_MP2); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate audio codec context\n"); exit(1); } /* put sample parameters */ c->bit_rate = 64000; /* check that the encoder supports s16 pcm input */ c->sample_fmt = AV_SAMPLE_FMT_S16; if (!check_sample_fmt(codec, c->sample_fmt)) { fprintf(stderr, "Encoder does not support sample format %s", av_get_sample_fmt_name(c->sample_fmt)); exit(1); } /* select other audio parameters supported by the encoder */ c->sample_rate = select_sample_rate(codec); c->channel_layout = select_channel_layout(codec); c->channels = av_get_channel_layout_nb_channels(c->channel_layout); /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } /* packet for holding encoded output */ pkt = av_packet_alloc(); if (!pkt) { fprintf(stderr, "could not allocate the packet\n"); exit(1); } /* frame containing input raw audio */ frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate audio frame\n"); exit(1); } frame->nb_samples = c->frame_size; frame->format = c->sample_fmt; frame->channel_layout = c->channel_layout; /* allocate the data buffers */ ret = av_frame_get_buffer(frame, 0); if (ret < 0) { fprintf(stderr, "Could not allocate audio data buffers\n"); exit(1); } /* encode a single tone sound */ t = 0; tincr = 2 * M_PI * 440.0 / c->sample_rate; for (i = 0; i < 200; i++) { /* make sure the frame is writable -- makes a copy if the encoder * kept a reference internally */ ret = av_frame_make_writable(frame); if (ret < 0) exit(1); samples = (uint16_t*)frame->data[0]; for (j = 0; j < c->frame_size; j++) { samples[2 * j] = (int)(sin(t) * 10000); for (k = 1; k < c->channels; k++) samples[2 * j + k] = samples[2 * j]; t += tincr; } encode(c, frame, pkt, f); } /* flush the encoder */ encode(c, NULL, pkt, f); fclose(f); av_frame_free(&frame); av_packet_free(&pkt); avcodec_free_context(&c); return 0; }
bool CFFmpegImage::Initialize(unsigned char* buffer, size_t bufSize) { int bufferSize = 4096; uint8_t* fbuffer = (uint8_t*)av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE); if (!fbuffer) { CLog::LogF(LOGERROR, "Could not allocate buffer"); return false; } m_buf.data = buffer; m_buf.size = bufSize; m_buf.pos = 0; m_ioctx = avio_alloc_context(fbuffer, bufferSize, 0, &m_buf, mem_file_read, NULL, mem_file_seek); if (!m_ioctx) { av_free(fbuffer); CLog::LogF(LOGERROR, "Could not allocate AVIOContext"); return false; } // signal to ffmepg this is not streaming protocol m_ioctx->max_packet_size = bufferSize; m_fctx = avformat_alloc_context(); if (!m_fctx) { FreeIOCtx(&m_ioctx); CLog::LogF(LOGERROR, "Could not allocate AVFormatContext"); return false; } m_fctx->pb = m_ioctx; // Some clients have pngs saved as jpeg or ask us for png but are jpeg // mythv throws all mimetypes away and asks us with application/octet-stream // this is poor man's fallback to at least identify png / jpeg bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF); bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G'); bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*'); AVInputFormat* inp = nullptr; if (is_jpeg) inp = av_find_input_format("jpeg_pipe"); else if (m_strMimeType == "image/apng") inp = av_find_input_format("apng"); else if (is_png) inp = av_find_input_format("png_pipe"); else if (is_tiff) inp = av_find_input_format("tiff_pipe"); else if (m_strMimeType == "image/jp2") inp = av_find_input_format("j2k_pipe"); else if (m_strMimeType == "image/webp") inp = av_find_input_format("webp_pipe"); // brute force parse if above check already failed else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg") inp = av_find_input_format("jpeg_pipe"); else if (m_strMimeType == "image/png") inp = av_find_input_format("png_pipe"); else if (m_strMimeType == "image/tiff") inp = av_find_input_format("tiff_pipe"); else if (m_strMimeType == "image/gif") inp = av_find_input_format("gif"); if (avformat_open_input(&m_fctx, NULL, inp, NULL) < 0) { CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str()); avformat_close_input(&m_fctx); FreeIOCtx(&m_ioctx); return false; } if (m_fctx->nb_streams <= 0) { avformat_close_input(&m_fctx); FreeIOCtx(&m_ioctx); return false; } AVCodecParameters* codec_params = m_fctx->streams[0]->codecpar; AVCodec* codec = avcodec_find_decoder(codec_params->codec_id); m_codec_ctx = avcodec_alloc_context3(codec); if (!m_codec_ctx) { avformat_close_input(&m_fctx); FreeIOCtx(&m_ioctx); return false; } if (avcodec_parameters_to_context(m_codec_ctx, codec_params) < 0) { avformat_close_input(&m_fctx); avcodec_free_context(&m_codec_ctx); FreeIOCtx(&m_ioctx); return false; } if (avcodec_open2(m_codec_ctx, codec, NULL) < 0) { avformat_close_input(&m_fctx); avcodec_free_context(&m_codec_ctx); FreeIOCtx(&m_ioctx); return false; } return true; }
CAEEncoderFFmpeg::~CAEEncoderFFmpeg() { Reset(); swr_free(&m_SwrCtx); avcodec_free_context(&m_CodecCtx); }
FFmpegVideo::FFmpegVideo() { avcodec_register_all(); // Encoding encoding_codec = NULL ; encoding_frame_buffer = NULL ; encoding_context = NULL ; //AVCodecID codec_id = AV_CODEC_ID_H264 ; //AVCodecID codec_id = AV_CODEC_ID_MPEG2VIDEO; #if LIBAVCODEC_VERSION_MAJOR < 54 CodecID codec_id = CODEC_ID_MPEG4; #else AVCodecID codec_id = AV_CODEC_ID_MPEG4; #endif /* find the video encoder */ encoding_codec = avcodec_find_encoder(codec_id); if (!encoding_codec) std::cerr << "AV codec not found for codec id " << std::endl; if (!encoding_codec) throw std::runtime_error("AV codec not found for codec id ") ; encoding_context = avcodec_alloc_context3(encoding_codec); if (!encoding_context) std::cerr << "AV: Could not allocate video codec encoding context" << std::endl; if (!encoding_context) throw std::runtime_error("AV: Could not allocate video codec encoding context"); /* put sample parameters */ encoding_context->bit_rate = 10*1024 ; // default bitrate is 30KB/s encoding_context->bit_rate_tolerance = encoding_context->bit_rate ; #ifdef USE_VARIABLE_BITRATE encoding_context->rc_min_rate = 0; encoding_context->rc_max_rate = 10*1024;//encoding_context->bit_rate; encoding_context->rc_buffer_size = 10*1024*1024; encoding_context->rc_initial_buffer_occupancy = (int) ( 0.9 * encoding_context->rc_buffer_size); encoding_context->rc_max_available_vbv_use = 1.0; encoding_context->rc_min_vbv_overflow_use = 0.0; #else encoding_context->rc_min_rate = 0; encoding_context->rc_max_rate = 0; encoding_context->rc_buffer_size = 0; #endif if (encoding_codec->capabilities & CODEC_CAP_TRUNCATED) encoding_context->flags |= CODEC_FLAG_TRUNCATED; encoding_context->flags |= CODEC_FLAG_PSNR;//Peak signal-to-noise ratio encoding_context->flags |= CODEC_CAP_PARAM_CHANGE; encoding_context->i_quant_factor = 0.769f; encoding_context->b_quant_factor = 1.4f; encoding_context->time_base.num = 1; encoding_context->time_base.den = 15;//framesPerSecond; encoding_context->qmin = 1; encoding_context->qmax = 51; encoding_context->max_qdiff = 4; //encoding_context->me_method = ME_HEX; //encoding_context->max_b_frames = 4; //encoding_context->flags |= CODEC_FLAG_LOW_DELAY; // MPEG2 only //encoding_context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_P4X4 | X264_PART_B8X8; //encoding_context->crf = 0.0f; //encoding_context->cqp = 26; /* resolution must be a multiple of two */ encoding_context->width = 640;//176; encoding_context->height = 480;//144; /* frames per second */ encoding_context->time_base = av_make_q(1, 25); /* emit one intra frame every ten frames * check frame pict_type before passing frame * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I * then gop_size is ignored and the output of encoder * will always be I frame irrespective to gop_size */ encoding_context->gop_size = 100; //encoding_context->max_b_frames = 1; #if LIBAVCODEC_VERSION_MAJOR < 54 encoding_context->pix_fmt = PIX_FMT_YUV420P; //context->pix_fmt = PIX_FMT_RGB24; if (codec_id == CODEC_ID_H264) { #else encoding_context->pix_fmt = AV_PIX_FMT_YUV420P; //context->pix_fmt = AV_PIX_FMT_RGB24; if (codec_id == AV_CODEC_ID_H264) { #endif av_opt_set(encoding_context->priv_data, "preset", "slow", 0); } /* open it */ if (avcodec_open2(encoding_context, encoding_codec, NULL) < 0) { std::cerr << "AV: Could not open codec context. Something's wrong." << std::endl; throw std::runtime_error( "AV: Could not open codec context. Something's wrong."); } #if (LIBAVCODEC_VERSION_MAJOR < 57) | (LIBAVCODEC_VERSION_MAJOR == 57 && LIBAVCODEC_VERSION_MINOR <3 ) encoding_frame_buffer = avcodec_alloc_frame() ;//(AVFrame*)malloc(sizeof(AVFrame)) ; #else encoding_frame_buffer = av_frame_alloc() ; #endif if(!encoding_frame_buffer) std::cerr << "AV: could not allocate frame buffer." << std::endl; if(!encoding_frame_buffer) throw std::runtime_error("AV: could not allocate frame buffer.") ; encoding_frame_buffer->format = encoding_context->pix_fmt; encoding_frame_buffer->width = encoding_context->width; encoding_frame_buffer->height = encoding_context->height; /* the image can be allocated by any means and av_image_alloc() is * just the most convenient way if av_malloc() is to be used */ int ret = av_image_alloc(encoding_frame_buffer->data, encoding_frame_buffer->linesize, encoding_context->width, encoding_context->height, encoding_context->pix_fmt, 32); if (ret < 0) std::cerr << "AV: Could not allocate raw picture buffer" << std::endl; if (ret < 0) throw std::runtime_error("AV: Could not allocate raw picture buffer"); encoding_frame_count = 0 ; // Decoding decoding_codec = avcodec_find_decoder(codec_id); if (!decoding_codec) std::cerr << "AV codec not found for codec id " << std::endl; if (!decoding_codec) throw("AV codec not found for codec id ") ; decoding_context = avcodec_alloc_context3(decoding_codec); if(!decoding_context) std::cerr << "AV: Could not allocate video codec decoding context" << std::endl; if(!decoding_context) throw std::runtime_error("AV: Could not allocate video codec decoding context"); decoding_context->width = encoding_context->width; decoding_context->height = encoding_context->height; #if LIBAVCODEC_VERSION_MAJOR < 54 decoding_context->pix_fmt = PIX_FMT_YUV420P; #else decoding_context->pix_fmt = AV_PIX_FMT_YUV420P; #endif if(decoding_codec->capabilities & CODEC_CAP_TRUNCATED) decoding_context->flags |= CODEC_FLAG_TRUNCATED; // we do not send complete frames //we can receive truncated frames decoding_context->flags2 |= CODEC_FLAG2_CHUNKS; AVDictionary* dictionary = NULL; if(avcodec_open2(decoding_context, decoding_codec, &dictionary) < 0) { std::cerr << "AV codec open action failed! " << std::endl; throw("AV codec open action failed! ") ; } //decoding_frame_buffer = avcodec_alloc_frame() ;//(AVFrame*)malloc(sizeof(AVFrame)) ; decoding_frame_buffer = av_frame_alloc() ; av_init_packet(&decoding_buffer); decoding_buffer.data = NULL ; decoding_buffer.size = 0 ; //ret = av_image_alloc(decoding_frame_buffer->data, decoding_frame_buffer->linesize, decoding_context->width, decoding_context->height, decoding_context->pix_fmt, 32); //if (ret < 0) //throw std::runtime_error("AV: Could not allocate raw picture buffer"); // debug #ifdef DEBUG_MPEG_VIDEO std::cerr << "Dumping captured data to file tmpvideo.mpg" << std::endl; encoding_debug_file = fopen("tmpvideo.mpg","w") ; #endif } FFmpegVideo::~FFmpegVideo() { avcodec_free_context(&encoding_context); avcodec_free_context(&decoding_context); av_frame_free(&encoding_frame_buffer); av_frame_free(&decoding_frame_buffer); } #define MAX_FFMPEG_ENCODING_BITRATE 81920 bool FFmpegVideo::encodeData(const QImage& image, uint32_t target_encoding_bitrate, RsVOIPDataChunk& voip_chunk) { #ifdef DEBUG_MPEG_VIDEO std::cerr << "Encoding frame of size " << image.width() << "x" << image.height() << ", resized to " << encoding_frame_buffer->width << "x" << encoding_frame_buffer->height << " : "; #endif QImage input ; if(target_encoding_bitrate > MAX_FFMPEG_ENCODING_BITRATE) { std::cerr << "Max encodign bitrate eexceeded. Capping to " << MAX_FFMPEG_ENCODING_BITRATE << std::endl; target_encoding_bitrate = MAX_FFMPEG_ENCODING_BITRATE ; } //encoding_context->bit_rate = target_encoding_bitrate; encoding_context->rc_max_rate = target_encoding_bitrate; //encoding_context->bit_rate_tolerance = target_encoding_bitrate; if(image.width() != encoding_frame_buffer->width || image.height() != encoding_frame_buffer->height) input = image.scaled(QSize(encoding_frame_buffer->width,encoding_frame_buffer->height),Qt::IgnoreAspectRatio,Qt::SmoothTransformation) ; else input = image ; /* prepare a dummy image */ /* Y */ for (int y = 0; y < encoding_context->height/2; y++) for (int x = 0; x < encoding_context->width/2; x++) { QRgb pix00 = input.pixel(QPoint(2*x+0,2*y+0)) ; QRgb pix01 = input.pixel(QPoint(2*x+0,2*y+1)) ; QRgb pix10 = input.pixel(QPoint(2*x+1,2*y+0)) ; QRgb pix11 = input.pixel(QPoint(2*x+1,2*y+1)) ; int R00 = (pix00 >> 16) & 0xff ; int G00 = (pix00 >> 8) & 0xff ; int B00 = (pix00 >> 0) & 0xff ; int R01 = (pix01 >> 16) & 0xff ; int G01 = (pix01 >> 8) & 0xff ; int B01 = (pix01 >> 0) & 0xff ; int R10 = (pix10 >> 16) & 0xff ; int G10 = (pix10 >> 8) & 0xff ; int B10 = (pix10 >> 0) & 0xff ; int R11 = (pix11 >> 16) & 0xff ; int G11 = (pix11 >> 8) & 0xff ; int B11 = (pix11 >> 0) & 0xff ; int Y00 = (0.257 * R00) + (0.504 * G00) + (0.098 * B00) + 16 ; int Y01 = (0.257 * R01) + (0.504 * G01) + (0.098 * B01) + 16 ; int Y10 = (0.257 * R10) + (0.504 * G10) + (0.098 * B10) + 16 ; int Y11 = (0.257 * R11) + (0.504 * G11) + (0.098 * B11) + 16 ; float R = 0.25*(R00+R01+R10+R11) ; float G = 0.25*(G00+G01+G10+G11) ; float B = 0.25*(B00+B01+B10+B11) ; int U = (0.439 * R) - (0.368 * G) - (0.071 * B) + 128 ; int V = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128 ; encoding_frame_buffer->data[0][(2*y+0) * encoding_frame_buffer->linesize[0] + 2*x+0] = std::min(255,std::max(0,Y00)); // Y encoding_frame_buffer->data[0][(2*y+0) * encoding_frame_buffer->linesize[0] + 2*x+1] = std::min(255,std::max(0,Y01)); // Y encoding_frame_buffer->data[0][(2*y+1) * encoding_frame_buffer->linesize[0] + 2*x+0] = std::min(255,std::max(0,Y10)); // Y encoding_frame_buffer->data[0][(2*y+1) * encoding_frame_buffer->linesize[0] + 2*x+1] = std::min(255,std::max(0,Y11)); // Y encoding_frame_buffer->data[1][y * encoding_frame_buffer->linesize[1] + x] = std::min(255,std::max(0,U));// Cr encoding_frame_buffer->data[2][y * encoding_frame_buffer->linesize[2] + x] = std::min(255,std::max(0,V));// Cb } encoding_frame_buffer->pts = encoding_frame_count++; /* encode the image */ int got_output = 0; AVPacket pkt ; av_init_packet(&pkt); #if LIBAVCODEC_VERSION_MAJOR < 54 pkt.size = avpicture_get_size(encoding_context->pix_fmt, encoding_context->width, encoding_context->height); pkt.data = (uint8_t*)av_malloc(pkt.size); // do // { int ret = avcodec_encode_video(encoding_context, pkt.data, pkt.size, encoding_frame_buffer) ; if (ret > 0) { got_output = ret; } #else pkt.data = NULL; // packet data will be allocated by the encoder pkt.size = 0; // do // { int ret = avcodec_encode_video2(encoding_context, &pkt, encoding_frame_buffer, &got_output) ; #endif if (ret < 0) { std::cerr << "Error encoding frame!" << std::endl; return false ; } // frame = NULL ; // next attempts: do not encode anything. Do this to just flush the buffer // // } while(got_output) ; if(got_output) { voip_chunk.data = rs_malloc(pkt.size + HEADER_SIZE) ; if(!voip_chunk.data) return false ; uint32_t flags = 0; ((unsigned char *)voip_chunk.data)[0] = VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_MPEG_VIDEO & 0xff ; ((unsigned char *)voip_chunk.data)[1] = (VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_MPEG_VIDEO >> 8) & 0xff ; ((unsigned char *)voip_chunk.data)[2] = flags & 0xff ; ((unsigned char *)voip_chunk.data)[3] = (flags >> 8) & 0xff ; memcpy(&((unsigned char*)voip_chunk.data)[HEADER_SIZE],pkt.data,pkt.size) ; voip_chunk.size = pkt.size + HEADER_SIZE; voip_chunk.type = RsVOIPDataChunk::RS_VOIP_DATA_TYPE_VIDEO ; #ifdef DEBUG_MPEG_VIDEO std::cerr << "Output : " << pkt.size << " bytes." << std::endl; fwrite(pkt.data,1,pkt.size,encoding_debug_file) ; fflush(encoding_debug_file) ; #endif av_free_packet(&pkt); return true ; } else {
bool CAEEncoderFFmpeg::Initialize(AEAudioFormat &format, bool allow_planar_input) { Reset(); bool ac3 = CServiceBroker::GetSettings().GetBool(CSettings::SETTING_AUDIOOUTPUT_AC3PASSTHROUGH); AVCodec *codec = NULL; /* fallback to ac3 if we support it, we might not have DTS support */ if (!codec && ac3) { m_CodecName = "AC3"; m_CodecID = AV_CODEC_ID_AC3; m_BitRate = AC3_ENCODE_BITRATE; codec = avcodec_find_encoder(m_CodecID); } /* check we got the codec */ if (!codec) return false; m_CodecCtx = avcodec_alloc_context3(codec); if (!m_CodecCtx) return false; m_CodecCtx->bit_rate = m_BitRate; m_CodecCtx->sample_rate = format.m_sampleRate; m_CodecCtx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK; /* select a suitable data format */ if (codec->sample_fmts) { bool hasFloat = false; bool hasDouble = false; bool hasS32 = false; bool hasS16 = false; bool hasU8 = false; bool hasFloatP = false; bool hasUnknownFormat = false; for(int i = 0; codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; ++i) { switch (codec->sample_fmts[i]) { case AV_SAMPLE_FMT_FLT: hasFloat = true; break; case AV_SAMPLE_FMT_DBL: hasDouble = true; break; case AV_SAMPLE_FMT_S32: hasS32 = true; break; case AV_SAMPLE_FMT_S16: hasS16 = true; break; case AV_SAMPLE_FMT_U8 : hasU8 = true; break; case AV_SAMPLE_FMT_FLTP: if (allow_planar_input) hasFloatP = true; else hasUnknownFormat = true; break; case AV_SAMPLE_FMT_NONE: return false; default: hasUnknownFormat = true; break; } } if (hasFloat) { m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_FLT; format.m_dataFormat = AE_FMT_FLOAT; } else if (hasFloatP) { m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_FLTP; format.m_dataFormat = AE_FMT_FLOATP; } else if (hasDouble) { m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_DBL; format.m_dataFormat = AE_FMT_DOUBLE; } else if (hasS32) { m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_S32; format.m_dataFormat = AE_FMT_S32NE; } else if (hasS16) { m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_S16; format.m_dataFormat = AE_FMT_S16NE; } else if (hasU8) { m_CodecCtx->sample_fmt = AV_SAMPLE_FMT_U8; format.m_dataFormat = AE_FMT_U8; } else if (hasUnknownFormat) { m_CodecCtx->sample_fmt = codec->sample_fmts[0]; format.m_dataFormat = AE_FMT_FLOAT; m_NeedConversion = true; CLog::Log(LOGNOTICE, "CAEEncoderFFmpeg::Initialize - Unknown audio format, it will be resampled."); } else { CLog::Log(LOGERROR, "CAEEncoderFFmpeg::Initialize - Unable to find a suitable data format for the codec (%s)", m_CodecName.c_str()); avcodec_free_context(&m_CodecCtx); return false; } } m_CodecCtx->channels = BuildChannelLayout(m_CodecCtx->channel_layout, m_Layout); /* open the codec */ if (avcodec_open2(m_CodecCtx, codec, NULL)) { avcodec_free_context(&m_CodecCtx); return false; } format.m_frames = m_CodecCtx->frame_size; format.m_frameSize = m_CodecCtx->channels * (CAEUtil::DataFormatToBits(format.m_dataFormat) >> 3); format.m_channelLayout = m_Layout; m_CurrentFormat = format; m_NeededFrames = format.m_frames; m_OutputRatio = (double)m_NeededFrames / m_OutputSize; m_SampleRateMul = 1.0 / (double)m_CodecCtx->sample_rate; if (m_NeedConversion) { m_SwrCtx = swr_alloc_set_opts(NULL, m_CodecCtx->channel_layout, m_CodecCtx->sample_fmt, m_CodecCtx->sample_rate, m_CodecCtx->channel_layout, AV_SAMPLE_FMT_FLT, m_CodecCtx->sample_rate, 0, NULL); if (!m_SwrCtx || swr_init(m_SwrCtx) < 0) { CLog::Log(LOGERROR, "CAEEncoderFFmpeg::Initialize - Failed to initialise resampler."); swr_free(&m_SwrCtx); avcodec_free_context(&m_CodecCtx); return false; } } CLog::Log(LOGNOTICE, "CAEEncoderFFmpeg::Initialize - %s encoder ready", m_CodecName.c_str()); return true; }
int main(int argc, char **argv) { const char *outfilename, *filename; const AVCodec *codec; AVCodecContext *c= NULL; int len; FILE *f, *outfile; uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; AVPacket avpkt; AVFrame *decoded_frame = NULL; if (argc <= 2) { fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]); exit(0); } filename = argv[1]; outfilename = argv[2]; /* register all the codecs */ avcodec_register_all(); av_init_packet(&avpkt); /* find the MPEG audio decoder */ codec = avcodec_find_decoder(AV_CODEC_ID_MP2); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } outfile = fopen(outfilename, "wb"); if (!outfile) { av_free(c); exit(1); } /* decode until eof */ avpkt.data = inbuf; avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); while (avpkt.size > 0) { int got_frame = 0; if (!decoded_frame) { if (!(decoded_frame = av_frame_alloc())) { fprintf(stderr, "out of memory\n"); exit(1); } } len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt); if (len < 0) { fprintf(stderr, "Error while decoding\n"); exit(1); } if (got_frame) { /* if a frame has been decoded, output it */ int data_size = av_samples_get_buffer_size(NULL, c->channels, decoded_frame->nb_samples, c->sample_fmt, 1); fwrite(decoded_frame->data[0], 1, data_size, outfile); } avpkt.size -= len; avpkt.data += len; if (avpkt.size < AUDIO_REFILL_THRESH) { /* Refill the input buffer, to avoid trying to decode * incomplete frames. Instead of this, one could also use * a parser, or use a proper container format through * libavformat. */ memmove(inbuf, avpkt.data, avpkt.size); avpkt.data = inbuf; len = fread(avpkt.data + avpkt.size, 1, AUDIO_INBUF_SIZE - avpkt.size, f); if (len > 0) avpkt.size += len; } } fclose(outfile); fclose(f); avcodec_free_context(&c); av_frame_free(&decoded_frame); return 0; }
int FFmpeg_Input::Open( const char *filepath ) { int error; /** Open the input file to read from it. */ if ( (error = avformat_open_input( &input_format_context, filepath, NULL, NULL)) < 0 ) { Error("Could not open input file '%s' (error '%s')\n", filepath, av_make_error_string(error).c_str() ); input_format_context = NULL; return error; } /** Get information on the input file (number of streams etc.). */ if ( (error = avformat_find_stream_info(input_format_context, NULL)) < 0 ) { Error( "Could not open find stream info (error '%s')\n", av_make_error_string(error).c_str() ); avformat_close_input(&input_format_context); return error; } streams = new stream[input_format_context->nb_streams]; for ( unsigned int i = 0; i < input_format_context->nb_streams; i += 1 ) { if ( is_video_stream( input_format_context->streams[i] ) ) { zm_dump_stream_format(input_format_context, i, 0, 0); if ( video_stream_id == -1 ) { video_stream_id = i; // if we break, then we won't find the audio stream } else { Warning( "Have another video stream." ); } } else if ( is_audio_stream( input_format_context->streams[i] ) ) { if ( audio_stream_id == -1 ) { audio_stream_id = i; } else { Warning( "Have another audio stream." ); } } streams[i].frame_count = 0; #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) streams[i].context = avcodec_alloc_context3( NULL ); avcodec_parameters_to_context( streams[i].context, input_format_context->streams[i]->codecpar ); #else streams[i].context = input_format_context->streams[i]->codec; #endif if ( !(streams[i].codec = avcodec_find_decoder(streams[i].context->codec_id)) ) { Error( "Could not find input codec\n"); avformat_close_input(&input_format_context); return AVERROR_EXIT; } else { Debug(1, "Using codec (%s) for stream %d", streams[i].codec->name, i ); } if ((error = avcodec_open2( streams[i].context, streams[i].codec, NULL)) < 0) { Error( "Could not open input codec (error '%s')\n", av_make_error_string(error).c_str() ); #if LIBAVCODEC_VERSION_CHECK(57, 64, 0, 64, 0) avcodec_free_context( &streams[i].context ); #endif avformat_close_input(&input_format_context); return error; } } // end foreach stream if ( video_stream_id == -1 ) Error( "Unable to locate video stream in %s", filepath ); if ( audio_stream_id == -1 ) Debug( 3, "Unable to locate audio stream in %s", filepath ); return 0; } // end int FFmpeg_Input::Open( const char * filepath )