static int ffat_decode(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { ATDecodeContext *at = avctx->priv_data; AVFrame *frame = data; int pkt_size = avpkt->size; AVPacket filtered_packet = {0}; OSStatus ret; AudioBufferList out_buffers; if (avctx->codec_id == AV_CODEC_ID_AAC && avpkt->size > 2 && (AV_RB16(avpkt->data) & 0xfff0) == 0xfff0) { AVPacket filter_pkt = {0}; if (!at->bsf) { const AVBitStreamFilter *bsf = av_bsf_get_by_name("aac_adtstoasc"); if(!bsf) return AVERROR_BSF_NOT_FOUND; if ((ret = av_bsf_alloc(bsf, &at->bsf))) return ret; if (((ret = avcodec_parameters_from_context(at->bsf->par_in, avctx)) < 0) || ((ret = av_bsf_init(at->bsf)) < 0)) { av_bsf_free(&at->bsf); return ret; } } if ((ret = av_packet_ref(&filter_pkt, avpkt)) < 0) return ret; if ((ret = av_bsf_send_packet(at->bsf, &filter_pkt)) < 0) { av_packet_unref(&filter_pkt); return ret; } if ((ret = av_bsf_receive_packet(at->bsf, &filtered_packet)) < 0) return ret; at->extradata = at->bsf->par_out->extradata; at->extradata_size = at->bsf->par_out->extradata_size; avpkt = &filtered_packet; } if (!at->converter) { if ((ret = ffat_create_decoder(avctx, avpkt)) < 0) { av_packet_unref(&filtered_packet); return ret; } } out_buffers = (AudioBufferList){ .mNumberBuffers = 1, .mBuffers = { { .mNumberChannels = avctx->channels, .mDataByteSize = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->frame_size * avctx->channels, } } };
OutputContext* output_context_new (const char * filename, const AVFrame * input_frame) { int ok = 0; // prepare muxer AVFormatContext * pfc = NULL; avformat_alloc_output_context2(&pfc, NULL, NULL, filename); if (!pfc) { goto failed; } // prepare encoding stream AVStream * pst = avformat_new_stream(pfc, NULL); if (!pst) { goto close_muxer; } // find encoder enum AVCodecID codec_id = av_guess_codec(pfc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO); if (codec_id == AV_CODEC_ID_NONE) { goto close_muxer; } AVCodec * pc = avcodec_find_encoder(codec_id); if (!pc) { goto close_muxer; } // prepare encoder AVCodecContext * pcc = avcodec_alloc_context3(pc); pcc->pix_fmt = pc->pix_fmts[0]; pcc->codec_id = pc->id; pcc->codec_type = pc->type; pcc->time_base.num = 1; pcc->time_base.den = 1; pcc->width = input_frame->width; pcc->height = input_frame->height; ok = avcodec_open2(pcc, pc, NULL); if (ok != 0) { goto free_encoder; } ok = avcodec_parameters_from_context(pst->codecpar, pcc); if (ok < 0) { goto free_encoder; } OutputContext * context = malloc(sizeof(OutputContext)); context->format_context = pfc; context->stream = pst; context->codec = pc; context->codec_context = pcc; return context; free_encoder: avcodec_free_context(&pcc); close_muxer: avformat_free_context(pfc); failed: return NULL; }
static bool AVStreamCopyContext(AVStream* stream, AVCodecContext* codec_context) { #if (LIBAVCODEC_VERSION_MICRO >= 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 33, 100)) || \ (LIBAVCODEC_VERSION_MICRO < 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 5, 0)) stream->time_base = codec_context->time_base; return avcodec_parameters_from_context(stream->codecpar, codec_context) >= 0; #else return avcodec_copy_context(stream->codec, codec_context) >= 0; #endif }
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) { int ret; AVCodecContext *c = ost->enc; AVDictionary *opt = NULL; av_dict_copy(&opt, opt_arg, 0); /* open the codec */ ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); exit(1); } /* allocate and init a re-usable frame */ ost->frame = alloc_picture(c->pix_fmt, c->width, c->height); if (!ost->frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); } /* If the output format is not YUV420P, then a temporary YUV420P * picture is needed too. It is then converted to the required * output format. */ ost->tmp_frame = NULL; if (c->pix_fmt != AV_PIX_FMT_YUV420P) { ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height); if (!ost->tmp_frame) { fprintf(stderr, "Could not allocate temporary picture\n"); exit(1); } } /* copy the stream parameters to the muxer */ ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { fprintf(stderr, "Could not copy the stream parameters\n"); exit(1); } }
static void open_video(AVFormatContext *oc, OutputStream *ost) { AVCodecContext *c; int ret; c = ost->enc; /* open the codec */ if (avcodec_open2(c, NULL, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* Allocate the encoded raw picture. */ ost->frame = alloc_picture(c->pix_fmt, c->width, c->height); if (!ost->frame) { fprintf(stderr, "Could not allocate picture\n"); exit(1); } /* If the output format is not YUV420P, then a temporary YUV420P * picture is needed too. It is then converted to the required * output format. */ ost->tmp_frame = NULL; if (c->pix_fmt != AV_PIX_FMT_YUV420P) { ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height); if (!ost->tmp_frame) { fprintf(stderr, "Could not allocate temporary picture\n"); exit(1); } } /* copy the stream parameters to the muxer */ ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { fprintf(stderr, "Could not copy the stream parameters\n"); exit(1); } }
VideoEncoder* Muxer::AddVideoEncoder(const QString& codec_name, const std::vector<std::pair<QString, QString> >& codec_options, unsigned int bit_rate, unsigned int width, unsigned int height, unsigned int frame_rate) { AVCodec *codec = FindCodec(codec_name); AVCodecContext *codec_context = NULL; AVStream *stream = AddStream(codec, &codec_context); VideoEncoder *encoder; AVDictionary *options = NULL; try { VideoEncoder::PrepareStream(stream, codec_context, codec, &options, codec_options, bit_rate, width, height, frame_rate); m_encoders[stream->index] = encoder = new VideoEncoder(this, stream, codec_context, codec, &options); #if SSR_USE_AVSTREAM_CODECPAR if(avcodec_parameters_from_context(stream->codecpar, codec_context) < 0) { Logger::LogError("[Muxer::AddVideoEncoder] " + Logger::tr("Error: Can't copy parameters to stream!")); throw LibavException(); } #endif av_dict_free(&options); } catch(...) { av_dict_free(&options); throw; } return encoder; }
static void open_audio(AVFormatContext *oc, OutputStream *ost) { AVCodecContext *c; int nb_samples, ret; c = ost->enc; /* open it */ if (avcodec_open2(c, NULL, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* init signal generator */ ost->t = 0; ost->tincr = 2 * M_PI * 110.0 / c->sample_rate; /* increment frequency by 110 Hz per second */ ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, 44100, nb_samples); /* copy the stream parameters to the muxer */ ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { fprintf(stderr, "Could not copy the stream parameters\n"); exit(1); } }
/** * Open an output file and the required encoder. * Also set some basic encoder parameters. * Some of these parameters are based on the input file's parameters. * @param filename File to be opened * @param input_codec_context Codec context of input file * @param[out] output_format_context Format context of output file * @param[out] output_codec_context Codec context of output file * @return Error code (0 if successful) */ int Transcode::open_output_file(const char *filename, AVCodecContext *input_codec_context, AVFormatContext **output_format_context, AVCodecContext **output_codec_context) { AVCodecContext *avctx = NULL; AVIOContext *output_io_context = NULL; AVStream *stream = NULL; AVCodec *output_codec = NULL; int error; /* Open the output file to write to it. */ if ((error = avio_open(&output_io_context, filename, AVIO_FLAG_WRITE)) < 0) { fprintf(stderr, "Could not open output file '%s' (error '%s')\n", filename, av_cplus_err2str(error)); return error; } /* Create a new format context for the output container format. */ if (!(*output_format_context = avformat_alloc_context())) { fprintf(stderr, "Could not allocate output format context\n"); return AVERROR(ENOMEM); } /* Associate the output file (pointer) with the container format context. */ (*output_format_context)->pb = output_io_context; /* Guess the desired container format based on the file extension. */ if (!((*output_format_context)->oformat = av_guess_format(NULL, filename, NULL))) { fprintf(stderr, "Could not find output file format\n"); goto cleanup; } av_strlcpy((*output_format_context)->filename, filename, sizeof((*output_format_context)->filename)); /* Find the encoder to be used by its name. */ //if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) { if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_VORBIS))) { fprintf(stderr, "Could not find an OGG encoder.\n"); goto cleanup; } /* Create a new audio stream in the output file container. */ if (!(stream = avformat_new_stream(*output_format_context, NULL))) { fprintf(stderr, "Could not create new stream\n"); error = AVERROR(ENOMEM); goto cleanup; } avctx = avcodec_alloc_context3(output_codec); if (!avctx) { fprintf(stderr, "Could not allocate an encoding context\n"); error = AVERROR(ENOMEM); goto cleanup; } /* Set the basic encoder parameters. * The input file's sample rate is used to avoid a sample rate conversion. */ avctx->channels = this->audio_dst.channels; avctx->channel_layout = av_get_default_channel_layout(this->audio_dst.channels); avctx->sample_rate = input_codec_context->sample_rate; avctx->sample_fmt = output_codec->sample_fmts[0]; avctx->bit_rate = this->audio_dst.bitrate; /* Allow the use of the experimental AAC encoder. */ avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; /* Set the sample rate for the container. */ stream->time_base.den = input_codec_context->sample_rate; stream->time_base.num = 1; /* Some container formats (like MP4) require global headers to be present. * Mark the encoder so that it behaves accordingly. */ if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER) avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; /* Open the encoder for the audio stream to use it later. */ if ((error = avcodec_open2(avctx, output_codec, NULL)) < 0) { fprintf(stderr, "Could not open output codec (error '%s')\n", av_cplus_err2str(error)); goto cleanup; } error = avcodec_parameters_from_context(stream->codecpar, avctx); if (error < 0) { fprintf(stderr, "Could not initialize stream parameters\n"); goto cleanup; } /* Save the encoder context for easier access later. */ *output_codec_context = avctx; return 0; cleanup: avcodec_free_context(&avctx); avio_closep(&(*output_format_context)->pb); avformat_free_context(*output_format_context); *output_format_context = NULL; return error < 0 ? error : AVERROR_EXIT; }
int encode_lavc_open_codec(struct encode_lavc_context *ctx, AVCodecContext *codec) { AVDictionaryEntry *de; int ret; CHECK_FAIL(ctx, -1); switch (codec->codec_type) { case AVMEDIA_TYPE_VIDEO: MP_INFO(ctx, "Opening video encoder: %s [%s]\n", ctx->vc->long_name, ctx->vc->name); if (ctx->vc->capabilities & AV_CODEC_CAP_EXPERIMENTAL) { codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; MP_WARN(ctx, "\n\n" " ********************************************\n" " **** Experimental VIDEO codec selected! ****\n" " ********************************************\n\n" "This means the output file may be broken or bad.\n" "Possible reasons, problems, workarounds:\n" "- Codec implementation in ffmpeg/libav is not finished yet.\n" " Try updating ffmpeg or libav.\n" "- Bad picture quality, blocks, blurriness.\n" " Experiment with codec settings (--ovcopts) to maybe still get the\n" " desired quality output at the expense of bitrate.\n" "- Slow compression.\n" " Bear with it.\n" "- Crashes.\n" " Happens. Try varying options to work around.\n" "If none of this helps you, try another codec in place of %s.\n\n", ctx->vc->name); } ret = avcodec_open2(codec, ctx->vc, &ctx->voptions); if (ret >= 0) ret = avcodec_parameters_from_context(ctx->vst->codecpar, codec); // complain about all remaining options, then free the dict for (de = NULL; (de = av_dict_get(ctx->voptions, "", de, AV_DICT_IGNORE_SUFFIX));) MP_WARN(ctx, "ovcopts: key '%s' not found.\n", de->key); av_dict_free(&ctx->voptions); break; case AVMEDIA_TYPE_AUDIO: MP_INFO(ctx, "Opening audio encoder: %s [%s]\n", ctx->ac->long_name, ctx->ac->name); if (ctx->ac->capabilities & AV_CODEC_CAP_EXPERIMENTAL) { codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; MP_WARN(ctx, "\n\n" " ********************************************\n" " **** Experimental AUDIO codec selected! ****\n" " ********************************************\n\n" "This means the output file may be broken or bad.\n" "Possible reasons, problems, workarounds:\n" "- Codec implementation in ffmpeg/libav is not finished yet.\n" " Try updating ffmpeg or libav.\n" "- Bad sound quality, noise, clicking, whistles, choppiness.\n" " Experiment with codec settings (--oacopts) to maybe still get the\n" " desired quality output at the expense of bitrate.\n" "- Slow compression.\n" " Bear with it.\n" "- Crashes.\n" " Happens. Try varying options to work around.\n" "If none of this helps you, try another codec in place of %s.\n\n", ctx->ac->name); } ret = avcodec_open2(codec, ctx->ac, &ctx->aoptions); if (ret >= 0) ret = avcodec_parameters_from_context(ctx->ast->codecpar, codec); // complain about all remaining options, then free the dict for (de = NULL; (de = av_dict_get(ctx->aoptions, "", de, AV_DICT_IGNORE_SUFFIX));) MP_WARN(ctx, "oacopts: key '%s' not found.\n", de->key); av_dict_free(&ctx->aoptions); break; default: ret = -1; break; } if (ret < 0) encode_lavc_fail(ctx, "unable to open encoder (see above for the cause)\n"); return ret; }
int janus_pp_h264_create(char *destination, char *metadata) { if(destination == NULL) return -1; /* Setup FFmpeg */ av_register_all(); /* Adjust logging to match the postprocessor's */ av_log_set_level(janus_log_level <= LOG_NONE ? AV_LOG_QUIET : (janus_log_level == LOG_FATAL ? AV_LOG_FATAL : (janus_log_level == LOG_ERR ? AV_LOG_ERROR : (janus_log_level == LOG_WARN ? AV_LOG_WARNING : (janus_log_level == LOG_INFO ? AV_LOG_INFO : (janus_log_level == LOG_VERB ? AV_LOG_VERBOSE : AV_LOG_DEBUG)))))); /* MP4 output */ fctx = avformat_alloc_context(); if(fctx == NULL) { JANUS_LOG(LOG_ERR, "Error allocating context\n"); return -1; } /* We save the metadata part as a comment (see #1189) */ if(metadata) av_dict_set(&fctx->metadata, "comment", metadata, 0); fctx->oformat = av_guess_format("mp4", NULL, NULL); if(fctx->oformat == NULL) { JANUS_LOG(LOG_ERR, "Error guessing format\n"); return -1; } snprintf(fctx->filename, sizeof(fctx->filename), "%s", destination); #ifdef USE_CODECPAR AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264); if(!codec) { /* Error opening video codec */ JANUS_LOG(LOG_ERR, "Encoder not available\n"); return -1; } fctx->video_codec = codec; fctx->oformat->video_codec = codec->id; vStream = avformat_new_stream(fctx, codec); vStream->id = fctx->nb_streams-1; vEncoder = avcodec_alloc_context3(codec); vEncoder->width = max_width; vEncoder->height = max_height; vEncoder->time_base = (AVRational){ 1, fps }; vEncoder->pix_fmt = AV_PIX_FMT_YUV420P; vEncoder->flags |= CODEC_FLAG_GLOBAL_HEADER; if(avcodec_open2(vEncoder, codec, NULL) < 0) { /* Error opening video codec */ JANUS_LOG(LOG_ERR, "Encoder error\n"); return -1; } avcodec_parameters_from_context(vStream->codecpar, vEncoder); #else vStream = avformat_new_stream(fctx, 0); if(vStream == NULL) { JANUS_LOG(LOG_ERR, "Error adding stream\n"); return -1; } #if LIBAVCODEC_VER_AT_LEAST(53, 21) avcodec_get_context_defaults3(vStream->codec, AVMEDIA_TYPE_VIDEO); #else avcodec_get_context_defaults2(vStream->codec, AVMEDIA_TYPE_VIDEO); #endif #if LIBAVCODEC_VER_AT_LEAST(54, 25) vStream->codec->codec_id = AV_CODEC_ID_H264; #else vStream->codec->codec_id = CODEC_ID_H264; #endif vStream->codec->codec_type = AVMEDIA_TYPE_VIDEO; vStream->codec->time_base = (AVRational){1, fps}; vStream->time_base = (AVRational){1, 90000}; vStream->codec->width = max_width; vStream->codec->height = max_height; vStream->codec->pix_fmt = PIX_FMT_YUV420P; //~ if (fctx->flags & AVFMT_GLOBALHEADER) vStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif if(avio_open(&fctx->pb, fctx->filename, AVIO_FLAG_WRITE) < 0) { JANUS_LOG(LOG_ERR, "Error opening file for output\n"); return -1; } if(avformat_write_header(fctx, NULL) < 0) { JANUS_LOG(LOG_ERR, "Error writing header\n"); return -1; } return 0; }
STATUS DemuxerLibAV::selectVideoStream() { if (!_initialized) { log->printf("DemuxerLibAV::selectVideoStream(): demuxer not opened!\n"); return S_FAIL; } for (U32 i = 0; i < _afc->nb_streams; i++) { AVStream *stream = _afc->streams[i]; AVCodec *codec = avcodec_find_decoder(stream->codecpar->codec_id); if (codec == NULL) { log->printf("DemuxerLibAV::selectVideoStream(): avcodec_find_decoder failed!\n"); return S_FAIL; } AVCodecContext *cc = avcodec_alloc_context3(codec); if (cc == NULL) { log->printf("DemuxerLibAV::selectVideoStream(): avcodec_alloc_context3 failed!\n"); return S_FAIL; } if (avcodec_parameters_to_context(cc, stream->codecpar) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_to_context failed!\n"); avcodec_free_context(&cc); return S_FAIL; } if (cc->codec_type == AVMEDIA_TYPE_VIDEO) { _videoStream = stream; if (cc->codec_id == AV_CODEC_ID_H264) { if (cc->extradata && cc->extradata_size >= 8 && cc->extradata[0] == 1) { const AVBitStreamFilter *bsf = av_bsf_get_by_name("h264_mp4toannexb"); if (bsf == nullptr) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_get_by_name failed!\n"); avcodec_free_context(&cc); return S_FAIL; } if (av_bsf_alloc(bsf, &_bsf) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_alloc failed!\n"); avcodec_free_context(&cc); return S_FAIL; } if (avcodec_parameters_from_context(_bsf->par_in, cc) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_from_context failed!\n"); av_bsf_free(&_bsf); avcodec_free_context(&cc); return S_FAIL; } _bsf->time_base_in = cc->time_base; if (av_bsf_init(_bsf) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_init failed!\n"); av_bsf_free(&_bsf); avcodec_free_context(&cc); return S_FAIL; } } } else if (cc->codec_id == AV_CODEC_ID_MPEG4) { const AVBitStreamFilter *bsf = av_bsf_get_by_name("mpeg4_unpack_bframes"); if (bsf == nullptr) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_get_by_name failed!\n"); avcodec_free_context(&cc); return S_FAIL; } if (av_bsf_alloc(bsf, &_bsf) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_alloc failed!\n"); avcodec_free_context(&cc); return S_FAIL; } if (avcodec_parameters_from_context(_bsf->par_in, cc) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_from_context failed!\n"); av_bsf_free(&_bsf); avcodec_free_context(&cc); return S_FAIL; } _bsf->time_base_in = cc->time_base; if (av_bsf_init(_bsf) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_init failed!\n"); av_bsf_free(&_bsf); avcodec_free_context(&cc); return S_FAIL; } } else if (cc->codec_id == AV_CODEC_ID_HEVC) { if (cc->extradata && cc->extradata_size >= 8 && cc->extradata[0] == 1) { const AVBitStreamFilter *bsf = av_bsf_get_by_name("hevc_mp4toannexb"); if (bsf == nullptr) { log->printf("DemuxerLibAV::selectVideoStream(): av_bitstream_filter_init failed!\n"); avcodec_free_context(&cc); return S_FAIL; } if (av_bsf_alloc(bsf, &_bsf) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_alloc failed!\n"); avcodec_free_context(&cc); return S_FAIL; } if (avcodec_parameters_from_context(_bsf->par_in, cc) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): avcodec_parameters_from_context failed!\n"); av_bsf_free(&_bsf); avcodec_free_context(&cc); return S_FAIL; } _bsf->time_base_in = cc->time_base; if (av_bsf_init(_bsf) < 0) { log->printf("DemuxerLibAV::selectVideoStream(): av_bsf_init failed!\n"); av_bsf_free(&_bsf); avcodec_free_context(&cc); return S_FAIL; } } } _videoStreamInfo.width = (U32)cc->width; _videoStreamInfo.height = (U32)cc->height; _videoStreamInfo.timeBaseScale = (U32)cc->time_base.num; _videoStreamInfo.timeBaseRate = (U32)cc->time_base.den; _videoStreamInfo.priv = cc; switch (cc->codec_id) { case AV_CODEC_ID_MPEG1VIDEO: _videoStreamInfo.codecId = CODEC_ID_MPEG1VIDEO; break; case AV_CODEC_ID_MPEG2VIDEO: _videoStreamInfo.codecId = CODEC_ID_MPEG2VIDEO; break; case AV_CODEC_ID_H261: _videoStreamInfo.codecId = CODEC_ID_H261; break; case AV_CODEC_ID_H263: _videoStreamInfo.codecId = CODEC_ID_H263; break; case AV_CODEC_ID_MPEG4: _videoStreamInfo.codecId = CODEC_ID_MPEG4; break; case AV_CODEC_ID_MSMPEG4V1: _videoStreamInfo.codecId = CODEC_ID_MSMPEG4V1; break; case AV_CODEC_ID_MSMPEG4V2: _videoStreamInfo.codecId = CODEC_ID_MSMPEG4V2; break; case AV_CODEC_ID_MSMPEG4V3: _videoStreamInfo.codecId = CODEC_ID_MSMPEG4V3; break; case AV_CODEC_ID_H263P: _videoStreamInfo.codecId = CODEC_ID_H263P; break; case AV_CODEC_ID_H263I: _videoStreamInfo.codecId = CODEC_ID_H263I; break; case AV_CODEC_ID_FLV1: _videoStreamInfo.codecId = CODEC_ID_FLV1; break; case AV_CODEC_ID_SVQ1: _videoStreamInfo.codecId = CODEC_ID_SVQ1; break; case AV_CODEC_ID_SVQ3: _videoStreamInfo.codecId = CODEC_ID_SVQ3; break; case AV_CODEC_ID_AIC: _videoStreamInfo.codecId = CODEC_ID_AIC; break; case AV_CODEC_ID_DVVIDEO: _videoStreamInfo.codecId = CODEC_ID_DVVIDEO; break; case AV_CODEC_ID_VP3: _videoStreamInfo.codecId = CODEC_ID_VP3; break; case AV_CODEC_ID_VP5: _videoStreamInfo.codecId = CODEC_ID_VP5; break; case AV_CODEC_ID_VP6: _videoStreamInfo.codecId = CODEC_ID_VP6; break; case AV_CODEC_ID_VP6A: _videoStreamInfo.codecId = CODEC_ID_VP6A; break; case AV_CODEC_ID_VP6F: _videoStreamInfo.codecId = CODEC_ID_VP6F; break; case AV_CODEC_ID_VP7: _videoStreamInfo.codecId = CODEC_ID_VP7; break; case AV_CODEC_ID_VP8: _videoStreamInfo.codecId = CODEC_ID_VP8; break; case AV_CODEC_ID_VP9: _videoStreamInfo.codecId = CODEC_ID_VP9; break; case AV_CODEC_ID_WEBP: _videoStreamInfo.codecId = CODEC_ID_WEBP; break; case AV_CODEC_ID_THEORA: _videoStreamInfo.codecId = CODEC_ID_THEORA; break; case AV_CODEC_ID_RV10: _videoStreamInfo.codecId = CODEC_ID_RV10; break; case AV_CODEC_ID_RV20: _videoStreamInfo.codecId = CODEC_ID_RV20; break; case AV_CODEC_ID_RV30: _videoStreamInfo.codecId = CODEC_ID_RV30; break; case AV_CODEC_ID_RV40: _videoStreamInfo.codecId = CODEC_ID_RV40; break; case AV_CODEC_ID_WMV1: _videoStreamInfo.codecId = CODEC_ID_WMV1; break; case AV_CODEC_ID_WMV2: _videoStreamInfo.codecId = CODEC_ID_WMV2; break; case AV_CODEC_ID_WMV3: _videoStreamInfo.codecId = CODEC_ID_WMV3; break; case AV_CODEC_ID_VC1: _videoStreamInfo.codecId = CODEC_ID_VC1; break; case AV_CODEC_ID_H264: _videoStreamInfo.codecId = CODEC_ID_H264; break; case AV_CODEC_ID_HEVC: _videoStreamInfo.codecId = CODEC_ID_HEVC; break; default: _videoStreamInfo.codecId = CODEC_ID_NONE; log->printf("DemuxerLibAV::selectVideoStream(): Unknown codec: 0x%08x!\n", cc->codec_id); avcodec_free_context(&cc); return S_FAIL; } switch (cc->pix_fmt) { case AV_PIX_FMT_RGB24: _videoStreamInfo.pixelfmt = FMT_RGB24; break; case AV_PIX_FMT_ARGB: _videoStreamInfo.pixelfmt = FMT_ARGB; break; case AV_PIX_FMT_YUV420P: _videoStreamInfo.pixelfmt = FMT_YUV420P; break; case AV_PIX_FMT_YUV422P: _videoStreamInfo.pixelfmt = FMT_YUV422P; break; case AV_PIX_FMT_YUV444P: _videoStreamInfo.pixelfmt = FMT_YUV444P; break; case AV_PIX_FMT_NV12: _videoStreamInfo.pixelfmt = FMT_NV12; break; default: _videoStreamInfo.pixelfmt = FMT_NONE; log->printf("DemuxerLibAV::selectVideoStream(): Unknown pixel format: 0x%08x!\n", cc->pix_fmt); avcodec_free_context(&cc); return S_FAIL; } return S_OK; } } return S_FAIL; }
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) { AVCodecContext *c; int nb_samples; int ret; AVDictionary *opt = NULL; c = ost->enc; /* open it */ av_dict_copy(&opt, opt_arg, 0); ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); exit(1); } /* init signal generator */ ost->t = 0; ost->tincr = 2 * M_PI * 110.0 / c->sample_rate; /* increment frequency by 110 Hz per second */ ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, c->sample_rate, nb_samples); /* copy the stream parameters to the muxer */ ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { fprintf(stderr, "Could not copy the stream parameters\n"); exit(1); } /* create resampler context */ ost->swr_ctx = swr_alloc(); if (!ost->swr_ctx) { fprintf(stderr, "Could not allocate resampler context\n"); exit(1); } /* set options */ av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); /* initialize the resampling context */ if ((ret = swr_init(ost->swr_ctx)) < 0) { fprintf(stderr, "Failed to initialize the resampling context\n"); exit(1); } }
static av_cold int mediacodec_decode_init(AVCodecContext *avctx) { int ret; FFAMediaFormat *format = NULL; MediaCodecH264DecContext *s = avctx->priv_data; format = ff_AMediaFormat_new(); if (!format) { av_log(avctx, AV_LOG_ERROR, "Failed to create media format\n"); ret = AVERROR_EXTERNAL; goto done; } ff_AMediaFormat_setString(format, "mime", CODEC_MIME); ff_AMediaFormat_setInt32(format, "width", avctx->width); ff_AMediaFormat_setInt32(format, "height", avctx->height); if (avctx->extradata[0] == 1) { uint8_t *extradata = NULL; int extradata_size = 0; int sps_offset, sps_size; int pps_offset, pps_size; if ((ret = h264_extradata_to_annexb_sps_pps(avctx, &extradata, &extradata_size, &sps_offset, &sps_size, &pps_offset, &pps_size)) < 0) { goto done; } ff_AMediaFormat_setBuffer(format, "csd-0", extradata + sps_offset, sps_size); ff_AMediaFormat_setBuffer(format, "csd-1", extradata + pps_offset, pps_size); av_freep(&extradata); } else { ff_AMediaFormat_setBuffer(format, "csd-0", avctx->extradata, avctx->extradata_size); } if ((ret = ff_mediacodec_dec_init(avctx, &s->ctx, CODEC_MIME, format)) < 0) { goto done; } av_log(avctx, AV_LOG_INFO, "MediaCodec started successfully, ret = %d\n", ret); s->fifo = av_fifo_alloc(sizeof(AVPacket)); if (!s->fifo) { ret = AVERROR(ENOMEM); goto done; } const AVBitStreamFilter *bsf = av_bsf_get_by_name("h264_mp4toannexb"); if(!bsf) { ret = AVERROR_BSF_NOT_FOUND; goto done; } if ((ret = av_bsf_alloc(bsf, &s->bsf))) { goto done; } if (((ret = avcodec_parameters_from_context(s->bsf->par_in, avctx)) < 0) || ((ret = av_bsf_init(s->bsf)) < 0)) { goto done; } av_init_packet(&s->filtered_pkt); done: if (format) { ff_AMediaFormat_delete(format); } if (ret < 0) { mediacodec_decode_close(avctx); } return ret; }
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { BSFCompatContext *priv = bsfc->priv_data; AVPacket pkt = { 0 }; int ret; if (!priv->ctx) { ret = av_bsf_alloc(bsfc->filter, &priv->ctx); if (ret < 0) return ret; ret = avcodec_parameters_from_context(priv->ctx->par_in, avctx); if (ret < 0) return ret; priv->ctx->time_base_in = avctx->time_base; if (bsfc->args && bsfc->filter->priv_class) { const AVOption *opt = av_opt_next(priv->ctx->priv_data, NULL); const char * shorthand[2] = {NULL}; if (opt) shorthand[0] = opt->name; ret = av_opt_set_from_string(priv->ctx->priv_data, bsfc->args, shorthand, "=", ":"); } ret = av_bsf_init(priv->ctx); if (ret < 0) return ret; } pkt.data = buf; pkt.size = buf_size; ret = av_bsf_send_packet(priv->ctx, &pkt); if (ret < 0) return ret; *poutbuf = NULL; *poutbuf_size = 0; ret = av_bsf_receive_packet(priv->ctx, &pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return 0; else if (ret < 0) return ret; *poutbuf = av_malloc(pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); if (!*poutbuf) { av_packet_unref(&pkt); return AVERROR(ENOMEM); } *poutbuf_size = pkt.size; memcpy(*poutbuf, pkt.data, pkt.size); av_packet_unref(&pkt); /* drain all the remaining packets we cannot return */ while (ret >= 0) { ret = av_bsf_receive_packet(priv->ctx, &pkt); av_packet_unref(&pkt); } if (!priv->extradata_updated) { /* update extradata in avctx from the output codec parameters */ if (priv->ctx->par_out->extradata_size && (!args || !strstr(args, "private_spspps_buf"))) { av_freep(&avctx->extradata); avctx->extradata_size = 0; avctx->extradata = av_mallocz(priv->ctx->par_out->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) return AVERROR(ENOMEM); memcpy(avctx->extradata, priv->ctx->par_out->extradata, priv->ctx->par_out->extradata_size); avctx->extradata_size = priv->ctx->par_out->extradata_size; } priv->extradata_updated = 1; } return 1; }
bool VideoEncoder::init(const Desc& desc) { // Register the codecs av_register_all(); // create the output context avformat_alloc_output_context2(&mpOutputContext, nullptr, nullptr, mFilename.c_str()); if(mpOutputContext == nullptr) { // The sample tries again, while explicitly requesting mpeg format. I chose not to do it, since it might lead to a container with a wrong extension return error(mFilename, "File output format not recognized. Make sure you use a known file extension (avi/mpeg/mp4)"); } // Get the output format of the container AVOutputFormat* pOutputFormat = mpOutputContext->oformat; assert((pOutputFormat->flags & AVFMT_NOFILE) == 0); // Problem. We want a file. // create the video codec AVCodec* pVideoCodec; mpOutputStream = createVideoStream(mpOutputContext, desc.fps, getCodecID(desc.codec), mFilename, pVideoCodec); if(mpOutputStream == nullptr) { return false; } mpCodecContext = createCodecContext(mpOutputContext, desc.width, desc.height, desc.fps, desc.bitrateMbps, desc.gopSize, getCodecID(desc.codec), pVideoCodec); if(mpCodecContext == nullptr) { return false; } // Open the video stream if(openVideo(pVideoCodec, mpCodecContext, mpFrame, mFilename) == false) { return false; } // copy the stream parameters to the muxer if(avcodec_parameters_from_context(mpOutputStream->codecpar, mpCodecContext) < 0) { return error(desc.filename, "Could not copy the stream parameters\n"); } av_dump_format(mpOutputContext, 0, mFilename.c_str(), 1); // Open the output file assert((pOutputFormat->flags & AVFMT_NOFILE) == 0); // No output file required. Not sure if/when this happens. if(avio_open(&mpOutputContext->pb, mFilename.c_str(), AVIO_FLAG_WRITE) < 0) { return error(mFilename, "Can't open output file."); } // Write the stream header if(avformat_write_header(mpOutputContext, nullptr) < 0) { return error(mFilename, "Can't write file header."); } mFormat = desc.format; mRowPitch = getFormatBytesPerBlock(desc.format) * desc.width; if(desc.flipY) { mpFlippedImage = new uint8_t[desc.height * mRowPitch]; } mpSwsContext = sws_getContext(desc.width, desc.height, getPictureFormatFromFalcorFormat(desc.format), desc.width, desc.height, mpCodecContext->pix_fmt, SWS_POINT, nullptr, nullptr, nullptr); if(mpSwsContext == nullptr) { return error(mFilename, "Failed to allocate SWScale context"); } return true; }
static int dec_enc(AVPacket *pkt, AVCodec *enc_codec) { AVFrame *frame; int ret = 0; ret = avcodec_send_packet(decoder_ctx, pkt); if (ret < 0) { fprintf(stderr, "Error during decoding. Error code: %s\n", av_err2str(ret)); return ret; } while (ret >= 0) { if (!(frame = av_frame_alloc())) return AVERROR(ENOMEM); ret = avcodec_receive_frame(decoder_ctx, frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { av_frame_free(&frame); return 0; } else if (ret < 0) { fprintf(stderr, "Error while decoding. Error code: %s\n", av_err2str(ret)); goto fail; } if (!initialized) { /* we need to ref hw_frames_ctx of decoder to initialize encoder's codec. Only after we get a decoded frame, can we obtain its hw_frames_ctx */ encoder_ctx->hw_frames_ctx = av_buffer_ref(decoder_ctx->hw_frames_ctx); if (!encoder_ctx->hw_frames_ctx) { ret = AVERROR(ENOMEM); goto fail; } /* set AVCodecContext Parameters for encoder, here we keep them stay * the same as decoder. * xxx: now the sample can't handle resolution change case. */ encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate); encoder_ctx->pix_fmt = AV_PIX_FMT_VAAPI; encoder_ctx->width = decoder_ctx->width; encoder_ctx->height = decoder_ctx->height; if ((ret = avcodec_open2(encoder_ctx, enc_codec, NULL)) < 0) { fprintf(stderr, "Failed to open encode codec. Error code: %s\n", av_err2str(ret)); goto fail; } if (!(ost = avformat_new_stream(ofmt_ctx, enc_codec))) { fprintf(stderr, "Failed to allocate stream for output format.\n"); ret = AVERROR(ENOMEM); goto fail; } ost->time_base = encoder_ctx->time_base; ret = avcodec_parameters_from_context(ost->codecpar, encoder_ctx); if (ret < 0) { fprintf(stderr, "Failed to copy the stream parameters. " "Error code: %s\n", av_err2str(ret)); goto fail; } /* write the stream header */ if ((ret = avformat_write_header(ofmt_ctx, NULL)) < 0) { fprintf(stderr, "Error while writing stream header. " "Error code: %s\n", av_err2str(ret)); goto fail; } initialized = 1; } if ((ret = encode_write(frame)) < 0) fprintf(stderr, "Error during encoding and writing.\n"); fail: av_frame_free(&frame); if (ret < 0) return ret; } return 0; }
static int bsfs_init(AVCodecContext *avctx) { AVCodecInternal *avci = avctx->internal; DecodeFilterContext *s = &avci->filter; const char *bsfs_str; int ret; if (s->nb_bsfs) return 0; bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null"; while (bsfs_str && *bsfs_str) { AVBSFContext **tmp; const AVBitStreamFilter *filter; char *bsf; bsf = av_get_token(&bsfs_str, ","); if (!bsf) { ret = AVERROR(ENOMEM); goto fail; } filter = av_bsf_get_by_name(bsf); if (!filter) { av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s " "requested by a decoder. This is a bug, please report it.\n", bsf); ret = AVERROR_BUG; av_freep(&bsf); goto fail; } av_freep(&bsf); tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs)); if (!tmp) { ret = AVERROR(ENOMEM); goto fail; } s->bsfs = tmp; s->nb_bsfs++; ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]); if (ret < 0) goto fail; if (s->nb_bsfs == 1) { /* We do not currently have an API for passing the input timebase into decoders, * but no filters used here should actually need it. * So we make up some plausible-looking number (the MPEG 90kHz timebase) */ s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 }; ret = avcodec_parameters_from_context(s->bsfs[s->nb_bsfs - 1]->par_in, avctx); } else { s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out; ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in, s->bsfs[s->nb_bsfs - 2]->par_out); } if (ret < 0) goto fail; ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]); if (ret < 0) goto fail; } return 0; fail: ff_decode_bsfs_uninit(avctx); return ret; }
FFMPEGWriter::FFMPEGWriter(std::string filename, DeviceSpecs specs, Container format, Codec codec, unsigned int bitrate) : m_position(0), m_specs(specs), m_formatCtx(nullptr), m_codecCtx(nullptr), m_stream(nullptr), m_packet(nullptr), m_frame(nullptr), m_input_samples(0), m_deinterleave(false) { static const char* formats[] = { nullptr, "ac3", "flac", "matroska", "mp2", "mp3", "ogg", "wav" }; if(avformat_alloc_output_context2(&m_formatCtx, nullptr, formats[format], filename.c_str()) < 0) AUD_THROW(FileException, "File couldn't be written, format couldn't be found with ffmpeg."); AVOutputFormat* outputFmt = m_formatCtx->oformat; if(!outputFmt) { avformat_free_context(m_formatCtx); AUD_THROW(FileException, "File couldn't be written, output format couldn't be found with ffmpeg."); } outputFmt->audio_codec = AV_CODEC_ID_NONE; switch(codec) { case CODEC_AAC: outputFmt->audio_codec = AV_CODEC_ID_AAC; break; case CODEC_AC3: outputFmt->audio_codec = AV_CODEC_ID_AC3; break; case CODEC_FLAC: outputFmt->audio_codec = AV_CODEC_ID_FLAC; break; case CODEC_MP2: outputFmt->audio_codec = AV_CODEC_ID_MP2; break; case CODEC_MP3: outputFmt->audio_codec = AV_CODEC_ID_MP3; break; case CODEC_OPUS: outputFmt->audio_codec = AV_CODEC_ID_OPUS; break; case CODEC_PCM: switch(specs.format) { case FORMAT_U8: outputFmt->audio_codec = AV_CODEC_ID_PCM_U8; break; case FORMAT_S16: outputFmt->audio_codec = AV_CODEC_ID_PCM_S16LE; break; case FORMAT_S24: outputFmt->audio_codec = AV_CODEC_ID_PCM_S24LE; break; case FORMAT_S32: outputFmt->audio_codec = AV_CODEC_ID_PCM_S32LE; break; case FORMAT_FLOAT32: outputFmt->audio_codec = AV_CODEC_ID_PCM_F32LE; break; case FORMAT_FLOAT64: outputFmt->audio_codec = AV_CODEC_ID_PCM_F64LE; break; default: outputFmt->audio_codec = AV_CODEC_ID_NONE; break; } break; case CODEC_VORBIS: outputFmt->audio_codec = AV_CODEC_ID_VORBIS; break; default: outputFmt->audio_codec = AV_CODEC_ID_NONE; break; } uint64_t channel_layout = 0; switch(m_specs.channels) { case CHANNELS_MONO: channel_layout = AV_CH_LAYOUT_MONO; break; case CHANNELS_STEREO: channel_layout = AV_CH_LAYOUT_STEREO; break; case CHANNELS_STEREO_LFE: channel_layout = AV_CH_LAYOUT_2POINT1; break; case CHANNELS_SURROUND4: channel_layout = AV_CH_LAYOUT_QUAD; break; case CHANNELS_SURROUND5: channel_layout = AV_CH_LAYOUT_5POINT0_BACK; break; case CHANNELS_SURROUND51: channel_layout = AV_CH_LAYOUT_5POINT1_BACK; break; case CHANNELS_SURROUND61: channel_layout = AV_CH_LAYOUT_6POINT1_BACK; break; case CHANNELS_SURROUND71: channel_layout = AV_CH_LAYOUT_7POINT1; break; default: AUD_THROW(FileException, "File couldn't be written, channel layout not supported."); } try { if(outputFmt->audio_codec == AV_CODEC_ID_NONE) AUD_THROW(FileException, "File couldn't be written, audio codec not found with ffmpeg."); AVCodec* codec = avcodec_find_encoder(outputFmt->audio_codec); if(!codec) AUD_THROW(FileException, "File couldn't be written, audio encoder couldn't be found with ffmpeg."); m_stream = avformat_new_stream(m_formatCtx, codec); if(!m_stream) AUD_THROW(FileException, "File couldn't be written, stream creation failed with ffmpeg."); m_stream->id = m_formatCtx->nb_streams - 1; #ifdef FFMPEG_OLD_CODE m_codecCtx = m_stream->codec; #else m_codecCtx = avcodec_alloc_context3(codec); #endif if(!m_codecCtx) AUD_THROW(FileException, "File couldn't be written, context creation failed with ffmpeg."); switch(m_specs.format) { case FORMAT_U8: m_convert = convert_float_u8; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_U8; break; case FORMAT_S16: m_convert = convert_float_s16; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_S16; break; case FORMAT_S32: m_convert = convert_float_s32; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_S32; break; case FORMAT_FLOAT64: m_convert = convert_float_double; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_DBL; break; default: m_convert = convert_copy<sample_t>; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_FLT; break; } if(m_formatCtx->oformat->flags & AVFMT_GLOBALHEADER) m_codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; bool format_supported = false; for(int i = 0; codec->sample_fmts[i] != -1; i++) { if(av_get_alt_sample_fmt(codec->sample_fmts[i], false) == m_codecCtx->sample_fmt) { m_deinterleave = av_sample_fmt_is_planar(codec->sample_fmts[i]); m_codecCtx->sample_fmt = codec->sample_fmts[i]; format_supported = true; } } if(!format_supported) { int chosen_index = 0; auto chosen = av_get_alt_sample_fmt(codec->sample_fmts[chosen_index], false); for(int i = 1; codec->sample_fmts[i] != -1; i++) { auto fmt = av_get_alt_sample_fmt(codec->sample_fmts[i], false); if((fmt > chosen && chosen < m_codecCtx->sample_fmt) || (fmt > m_codecCtx->sample_fmt && fmt < chosen)) { chosen = fmt; chosen_index = i; } } m_codecCtx->sample_fmt = codec->sample_fmts[chosen_index]; m_deinterleave = av_sample_fmt_is_planar(m_codecCtx->sample_fmt); switch(av_get_alt_sample_fmt(m_codecCtx->sample_fmt, false)) { case AV_SAMPLE_FMT_U8: specs.format = FORMAT_U8; m_convert = convert_float_u8; break; case AV_SAMPLE_FMT_S16: specs.format = FORMAT_S16; m_convert = convert_float_s16; break; case AV_SAMPLE_FMT_S32: specs.format = FORMAT_S32; m_convert = convert_float_s32; break; case AV_SAMPLE_FMT_FLT: specs.format = FORMAT_FLOAT32; m_convert = convert_copy<sample_t>; break; case AV_SAMPLE_FMT_DBL: specs.format = FORMAT_FLOAT64; m_convert = convert_float_double; break; default: AUD_THROW(FileException, "File couldn't be written, sample format not supported with ffmpeg."); } } m_codecCtx->sample_rate = 0; if(codec->supported_samplerates) { for(int i = 0; codec->supported_samplerates[i]; i++) { if(codec->supported_samplerates[i] == m_specs.rate) { m_codecCtx->sample_rate = codec->supported_samplerates[i]; break; } else if((codec->supported_samplerates[i] > m_codecCtx->sample_rate && m_specs.rate > m_codecCtx->sample_rate) || (codec->supported_samplerates[i] < m_codecCtx->sample_rate && m_specs.rate < codec->supported_samplerates[i])) { m_codecCtx->sample_rate = codec->supported_samplerates[i]; } } } if(m_codecCtx->sample_rate == 0) m_codecCtx->sample_rate = m_specs.rate; m_specs.rate = m_codecCtx->sample_rate; #ifdef FFMPEG_OLD_CODE m_codecCtx->codec_id = outputFmt->audio_codec; #endif m_codecCtx->codec_type = AVMEDIA_TYPE_AUDIO; m_codecCtx->bit_rate = bitrate; m_codecCtx->channel_layout = channel_layout; m_codecCtx->channels = m_specs.channels; m_stream->time_base.num = m_codecCtx->time_base.num = 1; m_stream->time_base.den = m_codecCtx->time_base.den = m_codecCtx->sample_rate; if(avcodec_open2(m_codecCtx, codec, nullptr) < 0) AUD_THROW(FileException, "File couldn't be written, encoder couldn't be opened with ffmpeg."); #ifndef FFMPEG_OLD_CODE if(avcodec_parameters_from_context(m_stream->codecpar, m_codecCtx) < 0) AUD_THROW(FileException, "File couldn't be written, codec parameters couldn't be copied to the context."); #endif int samplesize = std::max(int(AUD_SAMPLE_SIZE(m_specs)), AUD_DEVICE_SAMPLE_SIZE(m_specs)); if((m_input_size = m_codecCtx->frame_size)) m_input_buffer.resize(m_input_size * samplesize); if(avio_open(&m_formatCtx->pb, filename.c_str(), AVIO_FLAG_WRITE)) AUD_THROW(FileException, "File couldn't be written, file opening failed with ffmpeg."); if(avformat_write_header(m_formatCtx, nullptr) < 0) AUD_THROW(FileException, "File couldn't be written, writing the header failed."); } catch(Exception&) { #ifndef FFMPEG_OLD_CODE if(m_codecCtx) avcodec_free_context(&m_codecCtx); #endif avformat_free_context(m_formatCtx); throw; } #ifdef FFMPEG_OLD_CODE m_packet = new AVPacket({}); #else m_packet = av_packet_alloc(); #endif m_frame = av_frame_alloc(); }
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) { AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec); AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec); if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) { return false; } encoder->currentAudioSample = 0; encoder->currentAudioFrame = 0; encoder->currentVideoFrame = 0; encoder->nextAudioPts = 0; AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0); #ifndef USE_LIBAV avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile); #else encoder->context = avformat_alloc_context(); strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1); encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0'; encoder->context->oformat = oformat; #endif if (acodec) { #ifdef FFMPEG_USE_CODECPAR encoder->audioStream = avformat_new_stream(encoder->context, NULL); encoder->audio = avcodec_alloc_context3(acodec); #else encoder->audioStream = avformat_new_stream(encoder->context, acodec); encoder->audio = encoder->audioStream->codec; #endif encoder->audio->bit_rate = encoder->audioBitrate; encoder->audio->channels = 2; encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO; encoder->audio->sample_rate = encoder->sampleRate; encoder->audio->sample_fmt = encoder->sampleFormat; AVDictionary* opts = 0; av_dict_set(&opts, "strict", "-2", 0); if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { #ifdef AV_CODEC_FLAG_GLOBAL_HEADER encoder->audio->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; #else encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif } avcodec_open2(encoder->audio, acodec, &opts); av_dict_free(&opts); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->audioFrame = av_frame_alloc(); #else encoder->audioFrame = avcodec_alloc_frame(); #endif if (!encoder->audio->frame_size) { encoder->audio->frame_size = 1; } encoder->audioFrame->nb_samples = encoder->audio->frame_size; encoder->audioFrame->format = encoder->audio->sample_fmt; encoder->audioFrame->pts = 0; #ifdef USE_LIBAVRESAMPLE encoder->resampleContext = avresample_alloc_context(); av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0); av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0); av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0); avresample_open(encoder->resampleContext); #else encoder->resampleContext = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, encoder->sampleFormat, encoder->sampleRate, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, PREFERRED_SAMPLE_RATE, 0, NULL); swr_init(encoder->resampleContext); #endif encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4; encoder->audioBuffer = av_malloc(encoder->audioBufferSize); encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0); encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize); avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0); if (encoder->audio->codec->id == AV_CODEC_ID_AAC && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // MP4 container doesn't support the raw ADTS AAC format that the encoder spits out #ifdef FFMPEG_USE_NEW_BSF av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf); avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio); av_bsf_init(encoder->absf); #else encoder->absf = av_bitstream_filter_init("aac_adtstoasc"); #endif } #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio); #endif } #ifdef FFMPEG_USE_CODECPAR encoder->videoStream = avformat_new_stream(encoder->context, NULL); encoder->video = avcodec_alloc_context3(vcodec); #else encoder->videoStream = avformat_new_stream(encoder->context, vcodec); encoder->video = encoder->videoStream->codec; #endif encoder->video->bit_rate = encoder->videoBitrate; encoder->video->width = encoder->width; encoder->video->height = encoder->height; encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY }; encoder->video->pix_fmt = encoder->pixFormat; encoder->video->gop_size = 60; encoder->video->max_b_frames = 3; if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { #ifdef AV_CODEC_FLAG_GLOBAL_HEADER encoder->video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; #else encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif } if (encoder->video->codec->id == AV_CODEC_ID_H264 && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // QuickTime and a few other things require YUV420 encoder->video->pix_fmt = AV_PIX_FMT_YUV420P; } if (strcmp(vcodec->name, "libx264") == 0) { // Try to adaptively figure out when you can use a slower encoder if (encoder->width * encoder->height > 1000000) { av_opt_set(encoder->video->priv_data, "preset", "superfast", 0); } else if (encoder->width * encoder->height > 500000) { av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0); } else { av_opt_set(encoder->video->priv_data, "preset", "faster", 0); } if (encoder->videoBitrate == 0) { av_opt_set(encoder->video->priv_data, "crf", "0", 0); encoder->video->pix_fmt = AV_PIX_FMT_YUV444P; } } avcodec_open2(encoder->video, vcodec, 0); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->videoFrame = av_frame_alloc(); #else encoder->videoFrame = avcodec_alloc_frame(); #endif encoder->videoFrame->format = encoder->video->pix_fmt; encoder->videoFrame->width = encoder->video->width; encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->pts = 0; _ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight); av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32); #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video); #endif if (avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE) < 0) { return false; } return avformat_write_header(encoder->context, 0) >= 0; }
void MP4Encoder::EncodeStart() { //1. 注册所有组件 av_register_all(); //2. 初始化输出码流的AVFormatContext avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, this->mp4Path); //3. 打开待输出的视频文件 if (avio_open(&pFormatCtx->pb, this->mp4Path, AVIO_FLAG_READ_WRITE)) { LOGE("open output file failed"); return; } //4. 初始化视频码流 pStream = avformat_new_stream(pFormatCtx, NULL); if (pStream == NULL) { LOGE("allocating output stream failed"); return; } //5. 寻找编码器并打开编码器 pCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4); if (!pCodec) { LOGE("could not find encoder"); return; } //6. 分配编码器并设置参数 pCodecCtx = avcodec_alloc_context3(pCodec); pCodecCtx->codec_id = pCodec->id; pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; pCodecCtx->width = height; pCodecCtx->height = width; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; pCodecCtx->bit_rate = 400000; pCodecCtx->gop_size = 12; //将AVCodecContext的成员复制到AVCodecParameters结构体 avcodec_parameters_from_context(pStream->codecpar, pCodecCtx); av_stream_set_r_frame_rate(pStream, {1, 25}); //7. 打开编码器 if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { LOGE("open encoder fail!"); return; } //输出格式信息 av_dump_format(pFormatCtx, 0, this->mp4Path, 1); //初始化帧 pFrame = av_frame_alloc(); pFrame->width = pCodecCtx->width; pFrame->height = pCodecCtx->height; pFrame->format = pCodecCtx->pix_fmt; int bufferSize = av_image_get_buffer_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 1); pFrameBuffer = (uint8_t *) av_malloc(bufferSize); av_image_fill_arrays(pFrame->data, pFrame->linesize, pFrameBuffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 1); AVDictionary *opt = 0; //H.264 if (pCodecCtx->codec_id == AV_CODEC_ID_H264) { av_dict_set_int(&opt, "video_track_timescale", 25, 0); av_dict_set(&opt, "preset", "slow", 0); av_dict_set(&opt, "tune", "zerolatency", 0); } //8. 写文件头 avformat_write_header(pFormatCtx, &opt); //创建已编码帧 av_new_packet(&avPacket, bufferSize * 3); //标记正在转换 this->transform = true; }