// Internally-used function // Initializes a new frame_puller struct with a given media type. int _frame_puller_init(frame_puller *fp, enum AVMediaType media_type) { int ret; // Find the first video stream // TODO: Perhaps some files have two or more video streams? unsigned int i; for (i = 0; i < fp->fmt_ctx->nb_streams; ++i) if (fp->fmt_ctx->streams[i]->codec->codec_type == media_type) { fp->target_stream_idx = i; break; } if (fp->target_stream_idx == -1) { av_log(NULL, AV_LOG_ERROR, "frame_puller: Cannot find a valid stream\n"); return AVERROR_INVALIDDATA; } // Create the codec context and open codec // Seems using the original codec context cause potential problems...? if (!(fp->codec = avcodec_find_decoder(fp->fmt_ctx->streams[i]->codec->codec_id))) { av_log(NULL, AV_LOG_ERROR, "frame_puller: Cannot find a proper decoder\n"); return AVERROR_INVALIDDATA; } fp->codec_ctx = avcodec_alloc_context3(fp->codec); if ((ret = avcodec_copy_context(fp->codec_ctx, fp->fmt_ctx->streams[i]->codec)) < 0) return ret; // NOTE: See issue #5 and https://trac.ffmpeg.org/ticket/4404 fp->codec_ctx->thread_count = fp->fmt_ctx->streams[i]->codec->thread_count = 1; if ((ret = avcodec_open2(fp->codec_ctx, fp->codec, NULL)) < 0) return ret; // Allocate a frame to store the read data fp->orig_frame = av_frame_alloc(); return 0; }
static int segment_mux_init(AVFormatContext *s) { SegmentContext *seg = s->priv_data; AVFormatContext *oc; int i; seg->avf = oc = avformat_alloc_context(); if (!oc) return AVERROR(ENOMEM); oc->oformat = seg->oformat; oc->interrupt_callback = s->interrupt_callback; oc->opaque = s->opaque; oc->io_close = s->io_close; oc->io_open = s->io_open; for (i = 0; i < s->nb_streams; i++) { AVStream *st; if (!(st = avformat_new_stream(oc, NULL))) return AVERROR(ENOMEM); avcodec_copy_context(st->codec, s->streams[i]->codec); st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio; st->time_base = s->streams[i]->time_base; } return 0; }
void copyAVFormatContext(AVFormatContext **dest, AVFormatContext **source){ int numStreams = (*source)->nb_streams; LOGI("copyAVFormatContext source has %d streams", numStreams); int i; for (i = 0; i < numStreams; i++) { // Get input stream AVStream *inputStream = (*source)->streams[i]; AVCodecContext *inputCodecContext = inputStream->codec; // Add new stream to output with codec from input stream //LOGI("Attempting to find encoder %s", avcodec_get_name(inputCodecContext->codec_id)); AVCodec *outputCodec = avcodec_find_encoder(inputCodecContext->codec_id); if(outputCodec == NULL){ LOGI("Unable to find encoder %s", avcodec_get_name(inputCodecContext->codec_id)); } AVStream *outputStream = avformat_new_stream(*dest, outputCodec); AVCodecContext *outputCodecContext = outputStream->codec; // Copy input stream's codecContext for output stream's codecContext avcodec_copy_context(outputCodecContext, inputCodecContext); outputCodecContext->strict_std_compliance = FF_COMPLIANCE_UNOFFICIAL; LOGI("copyAVFormatContext Copied stream %d with codec %s sample_fmt %s", i, avcodec_get_name(inputCodecContext->codec_id), av_get_sample_fmt_name(inputCodecContext->sample_fmt)); } }
/** * open a video stream * * mv->videoStream must be a valid audio stream * * @return 0 if opened * @return -1 on failure */ int video_open_stream(MovieState *mv) { if (mv->videoStream == -1) return -1; // Get a pointer to the codec context for the video stream mv->pCodecCtxOrig = mv->pFormatCtx->streams[mv->videoStream]->codec; // Find the decoder for the video stream mv->pCodec = avcodec_find_decoder(mv->pCodecCtxOrig->codec_id); if (mv->pCodec == NULL) { fprintf(stderr, "Unsupported video codec!\n"); return -1; // Codec not found } else { printf("video decoder : %s - OK\n", mv->pCodec->name); } // Copy context mv->pCodecCtx = avcodec_alloc_context3(mv->pCodec); if (avcodec_copy_context(mv->pCodecCtx, mv->pCodecCtxOrig) != 0) { fprintf(stderr, "Couldn't copy video codec context"); return -1; // Error copying codec context } // Open codec if (avcodec_open2(mv->pCodecCtx, mv->pCodec, NULL) < 0) { printf("Could not open codec\n"); return -1; } return 0; }
int openvideocodec(AVFormatContext *s, AVCodecContext **vcodecc, int vstreamid) { int ret; AVCodec *vcodec; vcodec = NULL; if ((vcodec = avcodec_find_decoder( s->streams[vstreamid]->codec->codec_id)) == NULL) { fprintf(stderr, "Cannot find decoder.\n"); return (-1); } *vcodecc = avcodec_alloc_context3(vcodec); avcodec_copy_context(*vcodecc, s->streams[vstreamid]->codec); if (vcodec->capabilities & CODEC_CAP_TRUNCATED) (*vcodecc)->flags |= CODEC_FLAG_TRUNCATED; if ((ret = avcodec_open2(*vcodecc, vcodec, NULL)) < 0) { char buf[255]; av_strerror(ret, buf, 255); fprintf(stderr, "%s\n", buf); return (-1); } vcodec = NULL; return 0; }
static int init_output(AVFormatContext *ofmt_ctx){ int ret; AVStream *in_stream; AVStream *out_stream; for (int i = 0; i < fmt_ctx->nb_streams; i++) { in_stream = fmt_ctx->streams[i]; out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); if (!out_stream) { printf( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; return 1; } //copy codec_context from input ret = avcodec_copy_context(out_stream->codec, in_stream->codec); if (ret < 0) { printf( "Failed to copy context from input to output stream codec context\n"); return 1; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } printf("==========Output Information==========\n"); av_dump_format(ofmt_ctx, 0, out_filename, 1); printf("======================================\n"); return 0; }
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st, URLContext *handle, int packet_size) { AVFormatContext *rtpctx; int ret; AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL); if (!rtp_format) return NULL; /* Allocate an AVFormatContext for each output stream */ rtpctx = avformat_alloc_context(); if (!rtpctx) return NULL; rtpctx->oformat = rtp_format; if (!av_new_stream(rtpctx, 0)) { av_free(rtpctx); return NULL; } /* Copy the max delay setting; the rtp muxer reads this. */ rtpctx->max_delay = s->max_delay; /* Copy other stream parameters. */ rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio; rtpctx->flags |= s->flags & AVFMT_FLAG_MP4A_LATM; av_set_parameters(rtpctx, NULL); /* Copy the rtpflags values straight through */ if (s->oformat->priv_class && av_find_opt(s->priv_data, "rtpflags", NULL, 0, 0)) av_set_int(rtpctx->priv_data, "rtpflags", av_get_int(s->priv_data, "rtpflags", NULL)); /* Set the synchronized start time. */ rtpctx->start_time_realtime = s->start_time_realtime; avcodec_copy_context(rtpctx->streams[0]->codec, st->codec); if (handle) { ffio_fdopen(&rtpctx->pb, handle); } else ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size); ret = avformat_write_header(rtpctx, NULL); if (ret) { if (handle) { avio_close(rtpctx->pb); } else { uint8_t *ptr; avio_close_dyn_buf(rtpctx->pb, &ptr); av_free(ptr); } avformat_free_context(rtpctx); return NULL; } return rtpctx; }
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st, URLContext *handle, int packet_size) { AVFormatContext *rtpctx; int ret; AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL); uint8_t *rtpflags; AVDictionary *opts = NULL; if (!rtp_format) return NULL; /* Allocate an AVFormatContext for each output stream */ rtpctx = avformat_alloc_context(); if (!rtpctx) return NULL; rtpctx->oformat = rtp_format; if (!av_new_stream(rtpctx, 0)) { av_free(rtpctx); return NULL; } /* Copy the max delay setting; the rtp muxer reads this. */ rtpctx->max_delay = s->max_delay; /* Copy other stream parameters. */ rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio; if (av_opt_get(s, "rtpflags", AV_OPT_SEARCH_CHILDREN, &rtpflags) >= 0) av_dict_set(&opts, "rtpflags", rtpflags, AV_DICT_DONT_STRDUP_VAL); /* Set the synchronized start time. */ rtpctx->start_time_realtime = s->start_time_realtime; avcodec_copy_context(rtpctx->streams[0]->codec, st->codec); if (handle) { ffio_fdopen(&rtpctx->pb, handle); } else ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size); ret = avformat_write_header(rtpctx, &opts); av_dict_free(&opts); if (ret) { if (handle) { avio_close(rtpctx->pb); } else { uint8_t *ptr; avio_close_dyn_buf(rtpctx->pb, &ptr); av_free(ptr); } avformat_free_context(rtpctx); return NULL; } return rtpctx; }
AVStream* FFmpegMuxer::addOutputStream(AVStream *inputStream) { AVStream *outStream = avformat_new_stream(_fmtContext, 0); avcodec_copy_context(outStream->codec, inputStream->codec); outStream->codec->codec_tag = 0; if (_fmtContext->flags & AVFMT_GLOBALHEADER) { outStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } _videoStreamIdx = 0; return outStream; }
static bool AVStreamCopyContext(AVStream* stream, AVCodecContext* codec_context) { #if (LIBAVCODEC_VERSION_MICRO >= 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 33, 100)) || \ (LIBAVCODEC_VERSION_MICRO < 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 5, 0)) stream->time_base = codec_context->time_base; return avcodec_parameters_from_context(stream->codecpar, codec_context) >= 0; #else return avcodec_copy_context(stream->codec, codec_context) >= 0; #endif }
void AVEncoder::copyAVCodecContext(void* ctx) { if (!ctx) return; DPTR_D(AVEncoder); AVCodecContext* c = static_cast<AVCodecContext*>(ctx); if (d.avctx) { // dest should be avcodec_alloc_context3(NULL) AV_ENSURE_OK(avcodec_copy_context(d.avctx, c)); d.is_open = false; return; } }
static int media_codec_create_lavc(media_codec_t *cw, const media_codec_params_t *mcp, media_pipe_t *mp) { const AVCodec *codec = avcodec_find_decoder(cw->codec_id); if(codec == NULL) return -1; cw->ctx = avcodec_alloc_context3(codec); if(cw->fmt_ctx != NULL) avcodec_copy_context(cw->ctx, cw->fmt_ctx); // cw->ctx->debug = FF_DEBUG_PICT_INFO | FF_DEBUG_BUGS; if(mcp != NULL && mcp->extradata != NULL && !cw->ctx->extradata) { cw->ctx->extradata = calloc(1, mcp->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(cw->ctx->extradata, mcp->extradata, mcp->extradata_size); cw->ctx->extradata_size = mcp->extradata_size; } if(mcp && mcp->cheat_for_speed) cw->ctx->flags2 |= CODEC_FLAG2_FAST; if(codec->type == AVMEDIA_TYPE_VIDEO) { cw->get_buffer2 = &avcodec_default_get_buffer2; cw->ctx->opaque = cw; cw->ctx->refcounted_frames = 1; cw->ctx->get_format = &libav_get_format; cw->ctx->get_buffer2 = &get_buffer2_wrapper; cw->decode = &libav_decode_video; cw->flush = &libav_video_flush; } if(avcodec_open2(cw->ctx, codec, NULL) < 0) { TRACE(TRACE_INFO, "libav", "Unable to open codec %s", codec ? codec->name : "<noname>"); av_freep(&cw->ctx); return -1; } return 0; }
AVCodecContext * new_codec_ctx( AVFormatContext * fmt_ctx, int stream_idx ) { AVCodecContext * codec_ctx = fmt_ctx->streams[stream_idx]->codec; AVCodec * codec = avcodec_find_decoder( codec_ctx->codec_id ); assert(codec); int err; AVCodecContext *copy_codec_ctx = avcodec_alloc_context3(codec); err = avcodec_copy_context(copy_codec_ctx, codec_ctx); assert(err == 0); err = avcodec_open2( copy_codec_ctx, codec, NULL ); assert(err >= 0); return copy_codec_ctx; }
static AVCodecContext* createCodecContext(AVFormatContext* formatContext, int streamId) { AVCodecContext* codecContext = nullptr; auto orignalContext = formatContext->streams[streamId]->codec; auto codec = avcodec_find_decoder(orignalContext->codec_id); JIF(codec == nullptr, "failed avcodec_find_decoder %d", orignalContext->codec_id); codecContext = avcodec_alloc_context3(codec); JIF(codecContext == nullptr, "failed avcodec_alloc_context3"); JIF(avcodec_copy_context(codecContext, orignalContext) != 0, "failed avcodec_copy_context"); JIF(avcodec_open2(codecContext, codec, nullptr) < 0, "failed avcodec_open2"); return codecContext; err: if (codecContext) avcodec_free_context(&codecContext); return nullptr; }
int H264::init() { AVCodec* decoder = avcodec_find_decoder(stream()->codec->codec_id); if(!decoder) return error("Could not find decoder"); if(avcodec_open2(stream()->codec, decoder, NULL) != 0) return error("Could not open decoder"); m_h = (H264Context*)stream()->codec->priv_data; m_codec = avcodec_find_encoder(stream()->codec->codec_id); if(!m_codec) return error("Could not find encoder"); outputStream()->codec = avcodec_alloc_context3(m_codec); avcodec_copy_context(outputStream()->codec, stream()->codec); outputStream()->sample_aspect_ratio = outputStream()->codec->sample_aspect_ratio; outputStream()->codec->thread_type = 0; outputStream()->codec->thread_count = 1; AVCodecContext* ctx = outputStream()->codec; // ctx->bit_rate = 3 * 500 * 1024; // ctx->rc_max_rate = 0; // ctx->rc_buffer_size = 0; // ctx->gop_size = 40; // ctx->coder_type = 1; // ctx->me_cmp = 1; // ctx->me_range = 16; ctx->colorspace = AVCOL_SPC_BT709; // ctx->flags2 |= CODEC_FLAG2_8X8DCT; m_nc = cutList().nextCutPoint(0); m_isCutout = m_nc->direction == CutPoint::IN; m_startDecodeOffset = av_rescale_q(7, (AVRational){1,1}, stream()->time_base); m_encodeBuffer = (uint8_t*)av_malloc(ENCODE_BUFSIZE); m_encoding = false; m_decoding = false; m_syncing = false; m_syncPoint = -1; return 0; }
long FFmpegVideo::Save2jpeg (uint8_t *buffer, int width, int height, char *fileName) { AVFrame *pFrame = 0 ; pFrame = avcodec_alloc_frame(); if(pFrame==0) return -1; avpicture_fill((AVPicture *)pFrame, buffer, PIX_FMT_BGR24, width, height);// pFrame->width = width; pFrame->height = height; pFrame->format = PIX_FMT_BGR24; AVCodec *pCodec = 0; // 寻找视频流的解码器 pCodec = avcodec_find_encoder ( CODEC_ID_MJPEG ); AVCodecContext *pCodeContext = 0 ; // 得到视频流编码上下文的指针 pCodeContext = avcodec_alloc_context(); if(pCodeContext==NULL) return -1; if(pCodecCtx!=0){ avcodec_copy_context(pCodeContext, pCodecCtx); } pCodeContext->pix_fmt = PIX_FMT_BGR24; pCodeContext->codec_id = CODEC_ID_MJPEG; pCodeContext->codec_type = AVMEDIA_TYPE_VIDEO;//CODEC_TYPE_VIDEO; pCodeContext->width = width; pCodeContext->height = height; // 打开解码器 if(avcodec_open(pCodeContext, pCodec)<0){ // return -1;//handle_error(); // 打不开解码器 } int BufSizActual = SaveFrame2jpeg( pCodeContext, pFrame, fileName); av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); return BufSizActual; }
TFFmpegReaderImp(const char* fname, TParams* params) { FormatCtx = NULL; Packet = NULL; av_log_set_level(AV_LOG_QUIET); av_register_all(); avdevice_register_all(); AVDictionary *av_options = NULL; if(params) for(auto i = params->begin(); i != params->end(); ++i) av_dict_set(&av_options, i->first.c_str(), i->second.c_str(), 0); if(avformat_open_input(&FormatCtx, fname, NULL, &av_options) != 0) throw TFFmpegException("Couldn't open file"); if(avformat_find_stream_info(FormatCtx, NULL) < 0) throw TFFmpegException("Stream info not found"); // Init streams Streams.resize(FormatCtx->nb_streams); for(size_t i = 0; i < Streams.size(); ++i) { TFFmpegStream& Stream = Streams[i]; AVCodecContext *CodecCtxTmp = FormatCtx->streams[i]->codec; if(CodecCtxTmp->codec_type == AVMEDIA_TYPE_AUDIO) Stream.Type = EFF_AUDIO_STREAM; else if(CodecCtxTmp->codec_type == AVMEDIA_TYPE_VIDEO) { Stream.Type = EFF_VIDEO_STREAM; } else { Stream.Type = EFF_UNK_STREAM; continue; } AVCodec *Codec = avcodec_find_decoder(CodecCtxTmp->codec_id); if(Codec == NULL) throw TFFmpegException("Codec not found"); Stream.CodecCtx = avcodec_alloc_context3(Codec); if(avcodec_copy_context(Stream.CodecCtx, CodecCtxTmp) != 0) throw TFFmpegException("Couldn't copy codec context"); if(avcodec_open2(Stream.CodecCtx, Codec, NULL) < 0) throw TFFmpegException("Couldn't open codec"); avcodec_close(CodecCtxTmp); Stream.Init(); } Packet = new AVPacket(); av_init_packet(Packet); }
static int sap_fetch_packet(AVFormatContext *s, AVPacket *pkt) { struct SAPState *sap = s->priv_data; int fd = url_get_file_handle(sap->ann_fd); int n, ret; fd_set rfds; struct timeval tv; uint8_t recvbuf[1500]; if (sap->eof) return AVERROR_EOF; while (1) { FD_ZERO(&rfds); FD_SET(fd, &rfds); tv.tv_sec = tv.tv_usec = 0; n = select(fd + 1, &rfds, NULL, NULL, &tv); if (n <= 0 || !FD_ISSET(fd, &rfds)) break; ret = url_read(sap->ann_fd, recvbuf, sizeof(recvbuf)); if (ret >= 8) { uint16_t hash = AV_RB16(&recvbuf[2]); /* Should ideally check the source IP address, too */ if (recvbuf[0] & 0x04 && hash == sap->hash) { /* Stream deletion */ sap->eof = 1; return AVERROR_EOF; } } } ret = av_read_frame(sap->sdp_ctx, pkt); if (ret < 0) return ret; if (s->ctx_flags & AVFMTCTX_NOHEADER) { while (sap->sdp_ctx->nb_streams > s->nb_streams) { int i = s->nb_streams; AVStream *st = av_new_stream(s, i); if (!st) { av_free_packet(pkt); return AVERROR(ENOMEM); } avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec); st->time_base = sap->sdp_ctx->streams[i]->time_base; } } return ret; }
static int sap_fetch_packet(AVFormatContext *s, AVPacket *pkt) { struct SAPState *sap = s->priv_data; int fd = ffurl_get_file_handle(sap->ann_fd); int n, ret; struct pollfd p = {fd, POLLIN, 0}; uint8_t recvbuf[RTP_MAX_PACKET_LENGTH]; if (sap->eof) return AVERROR_EOF; while (1) { n = poll(&p, 1, 0); if (n <= 0 || !(p.revents & POLLIN)) break; ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf)); if (ret >= 8) { uint16_t hash = AV_RB16(&recvbuf[2]); /* Should ideally check the source IP address, too */ if (recvbuf[0] & 0x04 && hash == sap->hash) { /* Stream deletion */ sap->eof = 1; return AVERROR_EOF; } } } ret = av_read_frame(sap->sdp_ctx, pkt); if (ret < 0) return ret; if (s->ctx_flags & AVFMTCTX_NOHEADER) { while (sap->sdp_ctx->nb_streams > s->nb_streams) { int i = s->nb_streams; AVStream *st = avformat_new_stream(s, NULL); if (!st) { av_free_packet(pkt); return AVERROR(ENOMEM); } st->id = i; avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec); st->time_base = sap->sdp_ctx->streams[i]->time_base; } } return ret; }
int GenericAudio::init() { AVCodec* codec = avcodec_find_decoder(stream()->codec->codec_id); if(!codec) return error("Could not find decoder"); if(avcodec_open2(stream()->codec, codec, 0) != 0) return error("Could not open decoder"); // avcodec_find_decoder does not take sample_fmt into account, // so we have to find the decoder ourself... AVCodec* encoder = findCodec( stream()->codec->codec_id, stream()->codec->sample_fmt ); if(!encoder) return error("Could not find encoder"); outputStream()->disposition = stream()->disposition; av_dict_copy(&outputStream()->metadata, stream()->metadata, 0); outputStream()->codec = avcodec_alloc_context3(encoder); avcodec_copy_context(outputStream()->codec, stream()->codec); if(avcodec_open2(outputStream()->codec, encoder, 0) != 0) return error("Could not open encoder"); // Allocate sample buffer m_cutout_buf = (int16_t*)av_malloc(BUFSIZE); m_cutin_buf = (int16_t*)av_malloc(BUFSIZE); if(!m_cutout_buf || !m_cutin_buf) return error("Could not allocate sample buffer"); m_nc = cutList().nextCutPoint(0); m_cutout = m_nc->direction == CutPoint::IN; return 0; }
bool OutputFormat::addStream(QString input, OutputParams outputParams) { StreamPtr stream = StreamPtr(avformat_new_stream(this->m_outputContext.data(), outputParams.codecContext()->codec), CustomDeleters::deleteStream); if (!stream || avcodec_copy_context(stream->codec, outputParams.codecContext().data()) != 0) return false; stream->id = outputParams.outputIndex(); // Some formats want stream headers to be separate. if (this->m_outputContext->oformat->flags & AVFMT_GLOBALHEADER) stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (avcodec_open2(stream->codec, outputParams.codecContext()->codec, NULL) < 0) return false; this->m_streams[input] = stream; return true; }
int stream_component_open(VideoState *is, int stream_index) { AVFormatContext *pFormatCtx = is->pFormatCtx; AVCodecContext *codecCtx; AVCodec *codec; SDL_AudioSpec wanted_spec, spec; if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { return -1; } codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codec->codec_id); if(!codec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } codecCtx = avcodec_alloc_context3(codec); if(avcodec_copy_context(codecCtx, pFormatCtx->streams[stream_index]->codec) != 0) { fprintf(stderr, "Couldn't copy codec context"); return -1; } }
AVStream* copy_ctx_from_input(AVFormatContext *s, struct camera *cam) { int ret; AVStream* ost = avformat_new_stream(s, (AVCodec *)cam->codec->codec); if(ost == NULL) { av_err_msg("avformat_new_stream", 0); return NULL; } if((ret = avcodec_copy_context(ost->codec, (const AVCodecContext *)cam->codec)) < 0) { av_err_msg("avcodec_copy_context", ret); return NULL; } ost->sample_aspect_ratio = cam->codec->sample_aspect_ratio; ost->r_frame_rate = cam->input_stream->r_frame_rate; ost->avg_frame_rate = ost->r_frame_rate; ost->time_base = cam->input_stream->time_base; ost->codec->time_base = ost->time_base; if(s->oformat->flags & AVFMT_GLOBALHEADER) ost->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; return ost; }
/* * do nothing if equal * close the old one. the codec context can not be shared in more than 1 decoder. */ void AVDecoder::setCodecContext(void *codecCtx) { DPTR_D(AVDecoder); AVCodecContext *ctx = (AVCodecContext*)codecCtx; if (d.codec_ctx == ctx) return; if (isOpen()) { qWarning("Can not copy codec properties when it's open"); close(); // } d.is_open = false; if (!ctx) { avcodec_free_context(&d.codec_ctx); d.codec_ctx = 0; return; } if (!d.codec_ctx) d.codec_ctx = avcodec_alloc_context3(NULL); if (!d.codec_ctx) { qWarning("avcodec_alloc_context3 failed"); return; } AV_ENSURE_OK(avcodec_copy_context(d.codec_ctx, ctx)); }
int CombineVideo_OpenOutput(const char* outFileName, int isAudio) { int ret = -1; int i = 0; if ((ret = avformat_alloc_output_context2(&out_fmtctx, NULL, NULL, outFileName)) < 0) { ret = -1; goto ErrLab; } //new stream for out put for (i = 0; i < in1_fmtctx->nb_streams; i++) { if (in1_fmtctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_stream_index = i; out_video_stream = avformat_new_stream(out_fmtctx, NULL); if (!out_video_stream) { ret = -1; goto ErrLab; } if ((ret = avcodec_copy_context(out_video_stream->codec, in1_fmtctx->streams[i]->codec)) < 0) { ret = -1; goto ErrLab; } out_video_stream->codec->codec_tag = 0; if(out_fmtctx->oformat->flags & AVFMT_GLOBALHEADER) { out_video_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } } else if (in1_fmtctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && isAudio) { audio_stream_index = i; out_audio_stream = avformat_new_stream(out_fmtctx, NULL); if (!out_audio_stream) { ret = -1; goto ErrLab; } if ((ret = avcodec_copy_context(out_audio_stream->codec, in1_fmtctx->streams[i]->codec)) < 0) { ret = -1; goto ErrLab; } out_audio_stream->codec->codec_tag = 0; if(out_fmtctx->oformat->flags & AVFMT_GLOBALHEADER) { out_audio_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } } } //open output file if (!(out_fmtctx->oformat->flags & AVFMT_NOFILE)) { if ((ret = avio_open(&out_fmtctx->pb, outFileName, AVIO_FLAG_WRITE)) < 0) { ret = -1; goto ErrLab; } } //write out file header if ((ret = avformat_write_header(out_fmtctx, NULL)) < 0) { ret = -1; goto ErrLab; } ret = 0; ErrLab: return ret; }
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave) { int i, ret; AVDictionary *options = NULL; AVDictionaryEntry *entry; char *filename; char *format = NULL, *select = NULL; AVFormatContext *avf2 = NULL; AVStream *st, *st2; int stream_count; if ((ret = parse_slave_options(avf, slave, &options, &filename)) < 0) return ret; #define STEAL_OPTION(option, field) do { \ if ((entry = av_dict_get(options, option, NULL, 0))) { \ field = entry->value; \ entry->value = NULL; /* prevent it from being freed */ \ av_dict_set(&options, option, NULL, 0); \ } \ } while (0) STEAL_OPTION("f", format); STEAL_OPTION("select", select); ret = avformat_alloc_output_context2(&avf2, NULL, format, filename); if (ret < 0) goto end; av_dict_copy(&avf2->metadata, avf->metadata, 0); tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map)); if (!tee_slave->stream_map) { ret = AVERROR(ENOMEM); goto end; } stream_count = 0; for (i = 0; i < avf->nb_streams; i++) { st = avf->streams[i]; if (select) { ret = avformat_match_stream_specifier(avf, avf->streams[i], select); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Invalid stream specifier '%s' for output '%s'\n", select, slave); goto end; } if (ret == 0) { /* no match */ tee_slave->stream_map[i] = -1; continue; } } tee_slave->stream_map[i] = stream_count++; if (!(st2 = avformat_new_stream(avf2, NULL))) { ret = AVERROR(ENOMEM); goto end; } st2->id = st->id; st2->r_frame_rate = st->r_frame_rate; st2->time_base = st->time_base; st2->start_time = st->start_time; st2->duration = st->duration; st2->nb_frames = st->nb_frames; st2->disposition = st->disposition; st2->sample_aspect_ratio = st->sample_aspect_ratio; st2->avg_frame_rate = st->avg_frame_rate; av_dict_copy(&st2->metadata, st->metadata, 0); if ((ret = avcodec_copy_context(st2->codec, st->codec)) < 0) goto end; } if (!(avf2->oformat->flags & AVFMT_NOFILE)) { if ((ret = avio_open(&avf2->pb, filename, AVIO_FLAG_WRITE)) < 0) { av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n", slave, av_err2str(ret)); goto end; } } if ((ret = avformat_write_header(avf2, &options)) < 0) { av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n", slave, av_err2str(ret)); goto end; } tee_slave->avf = avf2; tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(TeeSlave)); if (!tee_slave->bsfs) { ret = AVERROR(ENOMEM); goto end; } entry = NULL; while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) { const char *spec = entry->key + strlen("bsfs"); if (*spec) { if (strspn(spec, slave_bsfs_spec_sep) != 1) { av_log(avf, AV_LOG_ERROR, "Specifier separator in '%s' is '%c', but only characters '%s' " "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep); return AVERROR(EINVAL); } spec++; /* consume separator */ } for (i = 0; i < avf2->nb_streams; i++) { ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Invalid stream specifier '%s' in bsfs option '%s' for slave " "output '%s'\n", spec, entry->key, filename); goto end; } if (ret > 0) { av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave " "output '%s'\n", spec, entry->value, i, filename); if (tee_slave->bsfs[i]) { av_log(avf, AV_LOG_WARNING, "Duplicate bsfs specification associated to stream %d of slave " "output '%s', filters will be ignored\n", i, filename); continue; } ret = parse_bsfs(avf, entry->value, &tee_slave->bsfs[i]); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Error parsing bitstream filter sequence '%s' associated to " "stream %d of slave output '%s'\n", entry->value, i, filename); goto end; } } } av_dict_set(&options, entry->key, NULL, 0); } if (options) { entry = NULL; while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX))) av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key); ret = AVERROR_OPTION_NOT_FOUND; goto end; } end: av_free(format); av_free(select); av_dict_free(&options); return ret; }
//链接h264流 int joinmp4(char (*h264file)[400] ,char (*aacfile)[400],char * mp4,int length,int usefilter) { //AVOutputFormat *ofmt = NULL; AVPacket pkt; AVStream *out_vstream = NULL; AVStream *out_astream = NULL; AVFormatContext *ofmt_ctx = NULL; int join_index = 0; AVBitStreamFilterContext* aacbsfc = NULL; long last_video_pts = 0; long last_audio_pts = 0; long end_video_pts = 0; long end_audio_pts = 0; int videoindex_out = -1; int audioindex_out = -1; //Input AVFormatContext and Output AVFormatContext AVFormatContext * ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL; int ret, i,retu =0,filter_ret=0; // int fps; int videoindex_v=-1; int audioindex_a=-1; int frame_index=0; int64_t cur_pts_v=0,cur_pts_a=0; //set file path char *in_filename_v = h264file[join_index]; char *in_filename_a = aacfile[join_index]; char *out_filename = mp4; joinone: //Input AVFormatContext and Output AVFormatContext ifmt_ctx_v = NULL; ifmt_ctx_a = NULL; ret = 0; i = 0;retu =0;filter_ret=0; // int fps; videoindex_v=-1; audioindex_a=-1; frame_index=0; cur_pts_v=0;cur_pts_a=0; //set file path in_filename_v = h264file[join_index]; in_filename_a = aacfile[join_index]; out_filename = mp4; //register before use av_register_all(); //open Input and set avformatcontext if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) { retu = -1;//-1 mean audio file opened failed goto end; } if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) { retu = -2; //-2 mean video file opened failed goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) { retu = -3; //-3 mean get video info failed goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) { retu = -4;//-4 mean get audio info failed goto end; } //open Output if(join_index == 0) { avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { retu = -5; goto end; } } //ofmt = ofmt_ctx->oformat; //find all video stream input type for (i = 0; i < ifmt_ctx_v->nb_streams; i++) { //Create output AVStream according to input AVStream if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ AVStream *in_stream = ifmt_ctx_v->streams[i]; videoindex_v=i; if(join_index == 0) { out_vstream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); videoindex_out=out_vstream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_vstream->codec, in_stream->codec) < 0) { retu = -7; goto end; } out_vstream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_vstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } else { out_vstream->duration += in_stream->duration; //printf("duration = %ld\n",out_vstream->duration); } if (!out_vstream) { retu = -6; goto end; } break; } } //find all audio stream input type for (i = 0; i < ifmt_ctx_a->nb_streams; i++) { //Create output AVStream according to input AVStream if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){ AVStream *in_stream = ifmt_ctx_a->streams[i]; audioindex_a=i; if(join_index == 0) { out_astream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); audioindex_out=out_astream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_astream->codec, in_stream->codec) < 0) { retu = -7; goto end; } out_astream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_astream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } else { out_astream->duration += in_stream->duration; //printf("duration = %ld\n",out_astream->duration); } if (!out_astream) { retu = -6; goto end; } break; } } if(join_index == 0) { //Open output file if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) { retu = -10; goto end; } } //Write file header if (avformat_write_header(ofmt_ctx, NULL) < 0) { retu = -11; goto end; } } if(usefilter&& aacbsfc == NULL) aacbsfc = av_bitstream_filter_init("aac_adtstoasc"); while (true) { AVFormatContext *ifmt_ctx; int stream_index=0; AVStream *in_stream, *out_stream; //Get an AVPacket if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) { ifmt_ctx=ifmt_ctx_v; stream_index=videoindex_out; if(av_read_frame(ifmt_ctx, &pkt) >= 0){ do{ in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = out_vstream; if(pkt.stream_index==videoindex_v){ //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_v=pkt.pts; break; } } while(av_read_frame(ifmt_ctx, &pkt) >= 0); } else { //printf("pkt.duration = %ld\n",pkt.duration); join_index++; end_video_pts = last_video_pts; end_audio_pts = last_audio_pts; break; } } else { ifmt_ctx=ifmt_ctx_a; stream_index=audioindex_out; if(av_read_frame(ifmt_ctx, &pkt) >= 0){ do { in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = out_astream; if(pkt.stream_index==audioindex_a) { //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE) { //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_a=pkt.pts; break; } } while(av_read_frame(ifmt_ctx, &pkt) >= 0); } else { join_index++; end_video_pts = last_video_pts; end_audio_pts = last_audio_pts; break; } } if(usefilter) filter_ret = av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data,&pkt.size, pkt.data, pkt.size, 0); if(filter_ret) { retu = -10; goto end; } //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index=stream_index; if(pkt.stream_index == audioindex_out) { pkt.pts += end_audio_pts; pkt.dts += end_audio_pts; last_audio_pts = pkt.pts+pkt.duration; // printf("audio pts = %lld ,audio dts = %lld\n",pkt.pts,pkt.dts); } else { pkt.pts += end_video_pts; pkt.dts += end_video_pts; last_video_pts = pkt.pts+pkt.duration; } //Write if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) { av_free_packet(&pkt); break; } //av_packet_unref(&pkt); //av_interleaved_write_frame(ofmt_ctx, &pkt); av_free_packet(&pkt); } end: avformat_close_input(&ifmt_ctx_v); avformat_close_input(&ifmt_ctx_a); avformat_free_context(ifmt_ctx_v); avformat_free_context(ifmt_ctx_a); if (ret < 0 && ret != AVERROR_EOF) { } if(join_index < length) goto joinone; av_write_trailer(ofmt_ctx); if(usefilter) av_bitstream_filter_close(aacbsfc); /* close output */ if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); return retu; }
int main_dummy(int argc, char **argv) #endif { AVOutputFormat *ofmt = NULL; AVFormatContext *inVideoFmtCtx = NULL, *inAudioFmtCtx = NULL, *outFmtCtx = NULL; AVPacket pkt; const char *inVideo_filename, *inAudio_filename, *out_filename; int ret, i; if (argc < 3) { printf("usage: %s input output\n" "API example program to remux a media file with libavformat and libavcodec.\n" "The output format is guessed according to the file extension.\n" "\n", argv[0]); return 1; } inVideo_filename = argv[1]; inAudio_filename = argv[2]; out_filename = argv[3]; av_register_all(); /* =============== OPEN STREAMS ================*/ if ((ret = open_input_file2(argv[1], &inVideoFmtCtx)) < 0) goto end; if ((ret = open_input_file2(argv[2], &inAudioFmtCtx)) < 0) goto end; /* ========== ALLOCATE OUTPUT CONTEXT ==========*/ avformat_alloc_output_context2(&outFmtCtx, NULL, NULL, out_filename); if (!outFmtCtx) { fprintf(stderr, "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt = outFmtCtx->oformat; /* =============== SETUP VIDEO CODEC ================*/ for (i = 0; i < inVideoFmtCtx->nb_streams; i++) { AVStream *in_stream = inVideoFmtCtx->streams[i]; AVStream *out_stream = avformat_new_stream(outFmtCtx, in_stream->codec->codec); if (!out_stream) { fprintf(stderr, "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } ret = avcodec_copy_context(out_stream->codec, in_stream->codec); if (ret < 0) { fprintf(stderr, "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (outFmtCtx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } /* =============== SETUP AUDIO CODEC ================*/ for (i = 0; i < inAudioFmtCtx->nb_streams; i++) { setup_mp3_audio_codec(outFmtCtx); } av_dump_format(outFmtCtx, 0, out_filename, 1); if (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&outFmtCtx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Could not open output file '%s'", out_filename); goto end; } } ret = avformat_write_header(outFmtCtx, NULL); if (ret < 0) { fprintf(stderr, "Error occurred when opening output file\n"); goto end; } /* =============== SETUP FILTERS ================*/ init_filters(inAudioFmtCtx, outFmtCtx); AVStream *in_stream, *out_stream; AVFrame* frame; while (1) { /* =============== VIDEO STREAM ================*/ ret = av_read_frame(inVideoFmtCtx, &pkt); if (ret < 0) break; in_stream = inVideoFmtCtx->streams[pkt.stream_index]; out_stream = outFmtCtx->streams[pkt.stream_index]; log_packet(inVideoFmtCtx, &pkt, "in"); /* copy packet */ pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.duration = (int)av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; log_packet(outFmtCtx, &pkt, "out"); ret = av_interleaved_write_frame(outFmtCtx, &pkt); if (ret < 0) { fprintf(stderr, "Error muxing packet\n"); break; } av_free_packet(&pkt); /* =============== AUDIO STREAM ================*/ #if 0 ret = av_read_frame(inAudioFmtCtx, &pkt); if (ret < 0) break; in_stream = inAudioFmtCtx->streams[pkt.stream_index]; pkt.stream_index++; out_stream = outFmtCtx->streams[pkt.stream_index]; log_packet(inAudioFmtCtx, &pkt, "in"); /* copy packet */ pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; log_packet(outFmtCtx, &pkt, "out"); ret = av_interleaved_write_frame(outFmtCtx, &pkt); if (ret < 0) { fprintf(stderr, "Error muxing packet\n"); break; } av_free_packet(&pkt); #else if ((ret = av_read_frame(inAudioFmtCtx, &pkt)) < 0) break; int streamIndex = pkt.stream_index; int gotFrame; if (_filterCtx[streamIndex].FilterGraph) { av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n"); frame = av_frame_alloc(); if (!frame) { ret = AVERROR(ENOMEM); break; } av_packet_rescale_ts(&pkt, inAudioFmtCtx->streams[streamIndex]->time_base, inAudioFmtCtx->streams[streamIndex]->codec->time_base); ret = avcodec_decode_audio4(inAudioFmtCtx->streams[streamIndex]->codec, frame, &gotFrame, &pkt); if (ret < 0) { av_frame_free(&frame); av_log(NULL, AV_LOG_ERROR, "Decoding failed\n"); break; } if (gotFrame) { frame->pts = av_frame_get_best_effort_timestamp(frame); ret = filter_encode_write_frame(frame, inAudioFmtCtx, outFmtCtx, streamIndex); av_frame_free(&frame); if (ret < 0) goto end; } else { av_frame_free(&frame); } } else { /* remux this frame without reencoding */ av_packet_rescale_ts(&pkt, inAudioFmtCtx->streams[streamIndex]->time_base, outFmtCtx->streams[streamIndex+1]->time_base); ret = av_interleaved_write_frame(outFmtCtx, &pkt); if (ret < 0) goto end; } av_free_packet(&pkt); #endif // 0 } av_write_trailer(outFmtCtx); end: av_free_packet(&pkt); av_frame_free(&frame); for (i = 0; i < inAudioFmtCtx->nb_streams; i++) { avcodec_close(inAudioFmtCtx->streams[i]->codec); if (outFmtCtx && outFmtCtx->nb_streams > i && outFmtCtx->streams[i] && outFmtCtx->streams[i]->codec) avcodec_close(outFmtCtx->streams[i]->codec); if (_filterCtx && _filterCtx[i].FilterGraph) avfilter_graph_free(&_filterCtx[i].FilterGraph); } av_free(_filterCtx); avformat_close_input(&inVideoFmtCtx); avformat_close_input(&inAudioFmtCtx); /* close output */ if (outFmtCtx && !(ofmt->flags & AVFMT_NOFILE)) avio_closep(&outFmtCtx->pb); avformat_free_context(outFmtCtx); if (ret < 0 && ret != AVERROR_EOF) { //fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); return 1; } return 0; }
static AVFormatContext * init_output_context(const struct transcoder_ctx_t *ctx, AVStream **video_stream, AVStream **audio_stream) { AVFormatContext *oc; AVOutputFormat *fmt; AVStream *input_stream, *output_stream; AVCodec *c; AVCodecContext *cc; int audio_copied = 0; //copy just 1 stream fmt = av_guess_format("mpegts", NULL, NULL); if (!fmt) { fprintf(stderr, "[DEBUG] Error guessing format, dying\n"); exit(199); } oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "[DEBUG] Error allocating context, dying\n"); exit(200); } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", ctx->output_filename); oc->debug = 1; oc->start_time_realtime = ctx->input_context->start_time; oc->start_time = ctx->input_context->start_time; oc->duration = 0; oc->bit_rate = 0; for (int i = 0; i < ctx->input_context->nb_streams; i++) { input_stream = ctx->input_context->streams[i]; output_stream = NULL; if (input_stream->index == ctx->video_stream_index) { //copy stuff from input video index c = avcodec_find_encoder(CODEC_ID_H264); output_stream = avformat_new_stream(oc, c); *video_stream = output_stream; cc = output_stream->codec; cc->width = input_stream->codec->width; cc->height = input_stream->codec->height; #if 0 cc->width = viddef->nFrameWidth; cc->height = viddef->nFrameHeight; #endif cc->codec_id = CODEC_ID_H264; cc->codec_type = AVMEDIA_TYPE_VIDEO; cc->bit_rate = ENCODED_BITRATE; cc->time_base = input_stream->codec->time_base; output_stream->avg_frame_rate = input_stream->avg_frame_rate; output_stream->r_frame_rate = input_stream->r_frame_rate; output_stream->start_time = AV_NOPTS_VALUE; } else if ((input_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) && !audio_copied) { /* i care only about audio */ c = avcodec_find_encoder(input_stream->codec->codec_id); output_stream = avformat_new_stream(oc, c); *audio_stream = output_stream; avcodec_copy_context(output_stream->codec, input_stream->codec); /* Apparently fixes a crash on .mkvs with attachments: */ av_dict_copy(&output_stream->metadata, input_stream->metadata, 0); /* Reset the codec tag so as not to cause problems with output format */ output_stream->codec->codec_tag = 0; audio_copied = 1; } } for (int i = 0; i < oc->nb_streams; i++) { if (oc->oformat->flags & AVFMT_GLOBALHEADER) oc->streams[i]->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (oc->streams[i]->codec->sample_rate == 0) oc->streams[i]->codec->sample_rate = 48000; /* ish */ } if (!(fmt->flags & AVFMT_NOFILE)) { fprintf(stderr, "[DEBUG] AVFMT_NOFILE set, allocating output container\n"); if (avio_open(&oc->pb, ctx->output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "[DEBUG] error creating the output context\n"); exit(1); } } return oc; }
int main(int argc, char* argv[]) { AVOutputFormat *ofmt = NULL; //输入对应一个AVFormatContext,输出对应一个AVFormatContext //(Input AVFormatContext and Output AVFormatContext) AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; AVPacket pkt; const char *in_filename, *out_filename; int ret, i; int videoindex=-1; int frame_index=0; int64_t start_time=0; //in_filename = "cuc_ieschool.mov"; //in_filename = "cuc_ieschool.mkv"; //in_filename = "cuc_ieschool.ts"; //in_filename = "cuc_ieschool.mp4"; //in_filename = "cuc_ieschool.h264"; in_filename = "cuc_ieschool.flv";//输入URL(Input file URL) //in_filename = "shanghai03_p.h264"; out_filename = "rtmp://localhost/publishlive/livestream";//输出 URL(Output URL)[RTMP] //out_filename = "rtp://233.233.233.233:6666";//输出 URL(Output URL)[UDP] av_register_all(); //Network avformat_network_init(); //输入(Input) if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { printf( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { printf( "Failed to retrieve input stream information"); goto end; } for(i=0; i<ifmt_ctx->nb_streams; i++) if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; break; } av_dump_format(ifmt_ctx, 0, in_filename, 0); //输出(Output) avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_filename); //RTMP //avformat_alloc_output_context2(&ofmt_ctx, NULL, "mpegts", out_filename);//UDP if (!ofmt_ctx) { printf( "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt = ofmt_ctx->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { //根据输入流创建输出流(Create output AVStream according to input AVStream) AVStream *in_stream = ifmt_ctx->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); if (!out_stream) { printf( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } //复制AVCodecContext的设置(Copy the settings of AVCodecContext) ret = avcodec_copy_context(out_stream->codec, in_stream->codec); if (ret < 0) { printf( "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } //Dump Format------------------ av_dump_format(ofmt_ctx, 0, out_filename, 1); //打开输出URL(Open output URL) if (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { printf( "Could not open output URL '%s'", out_filename); goto end; } } //写文件头(Write file header) ret = avformat_write_header(ofmt_ctx, NULL); if (ret < 0) { printf( "Error occurred when opening output URL\n"); goto end; } start_time=av_gettime(); while (1) { AVStream *in_stream, *out_stream; //获取一个AVPacket(Get an AVPacket) ret = av_read_frame(ifmt_ctx, &pkt); if (ret < 0) break; //FIX:No PTS (Example: Raw H.264) //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=ifmt_ctx->streams[videoindex]->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); } //Important:Delay if(pkt.stream_index==videoindex){ AVRational time_base=ifmt_ctx->streams[videoindex]->time_base; AVRational time_base_q={1,AV_TIME_BASE}; int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q); int64_t now_time = av_gettime() - start_time; if (pts_time > now_time) av_usleep(pts_time - now_time); } in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[pkt.stream_index]; /* copy packet */ //转换PTS/DTS(Convert PTS/DTS) pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; //Print to Screen if(pkt.stream_index==videoindex){ printf("Send %8d video frames to output URL\n",frame_index); frame_index++; } //ret = av_write_frame(ofmt_ctx, &pkt); ret = av_interleaved_write_frame(ofmt_ctx, &pkt); if (ret < 0) { printf( "Error muxing packet\n"); break; } av_free_packet(&pkt); } //写文件尾(Write file trailer) av_write_trailer(ofmt_ctx); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf( "Error occurred.\n"); return -1; } return 0; }