bool VideoEncoderFFmpegPrivate::open() { if (codec_name.isEmpty()) { // copy ctx from muxer AVCodec *codec = avcodec_find_decoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); qDebug("tbc: %f", av_q2d(avctx->time_base)); avctx->width = width; // coded_width works, why? avctx->height = height; avctx->pix_fmt = QTAV_PIX_FMT_C(YUV420P); avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); //avctx->max_b_frames = 3;//h264 qDebug("2 tbc: %f=%d/%d", av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; // Set Option AVDictionary *param = 0; #if 0 //H.264 if(avctx->codec_id == QTAV_CODEC_ID(H264)) { av_dict_set(¶m, "preset", "slow", 0); av_dict_set(¶m, "tune", "zerolatency", 0); //av_dict_set(¶m, "profile", "main", 0); } //H.265 if(avctx->codec_id == AV_CODEC_ID_H265){ av_dict_set(¶m, "preset", "ultrafast", 0); av_dict_set(¶m, "tune", "zero-latency", 0); } #endif applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
bool AVDecoder::open() { DPTR_D(AVDecoder); // codec_ctx can't be null for none-ffmpeg based decoders because we may use it's properties in those decoders if (!d.codec_ctx) { qWarning("FFmpeg codec context not ready"); return false; } AVCodec *codec = 0; if (!d.codec_name.isEmpty()) { codec = avcodec_find_decoder_by_name(d.codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(d.codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_decoder(cd->id); } } } else { codec = avcodec_find_decoder(d.codec_ctx->codec_id); } if (!codec) { // TODO: can be null for none-ffmpeg based decoders QString es(tr("No codec could be found for '%1'")); if (d.codec_name.isEmpty()) { es = es.arg(QLatin1String(avcodec_get_name(d.codec_ctx->codec_id))); } else { es = es.arg(d.codec_name); } qWarning() << es; AVError::ErrorCode ec(AVError::CodecError); switch (d.codec_ctx->coder_type) { case AVMEDIA_TYPE_VIDEO: ec = AVError::VideoCodecNotFound; break; case AVMEDIA_TYPE_AUDIO: ec = AVError::AudioCodecNotFound; break; case AVMEDIA_TYPE_SUBTITLE: ec = AVError::SubtitleCodecNotFound; default: break; } emit error(AVError(ec, es)); return false; } // hwa extra init can be here if (!d.open()) { d.close(); return false; } // TODO: skip for none-ffmpeg based decoders d.applyOptionsForDict(); av_opt_set_int(d.codec_ctx, "refcounted_frames", d.enableFrameRef(), 0); // why dict may have no effect? // TODO: only open for ff decoders AV_ENSURE_OK(avcodec_open2(d.codec_ctx, codec, d.options.isEmpty() ? NULL : &d.dict), false); d.is_open = true; return true; }
bool AVMuxer::open() { // avformatcontext will be allocated in avformat_alloc_output_context2() //d->format_ctx->interrupt_callback = *d->interrupt_hanlder; d->applyOptionsForDict(); // check special dict keys // d->format_forced can be set from AVFormatContext.format_whitelist if (!d->format_forced.isEmpty()) { d->format = av_guess_format(d->format_forced.toUtf8().constData(), NULL, NULL); qDebug() << "force format: " << d->format_forced; } //d->interrupt_hanlder->begin(InterruptHandler::Open); if (d->io) { if (d->io->accessMode() == MediaIO::Read) { qWarning("wrong MediaIO accessMode. MUST be Write"); } AV_ENSURE_OK(avformat_alloc_output_context2(&d->format_ctx, d->format, d->format_forced.isEmpty() ? 0 : d->format_forced.toUtf8().constData(), ""), false); d->format_ctx->pb = (AVIOContext*)d->io->avioContext(); d->format_ctx->flags |= AVFMT_FLAG_CUSTOM_IO; //d->format_ctx->flags |= AVFMT_FLAG_GENPTS; } else { AV_ENSURE_OK(avformat_alloc_output_context2(&d->format_ctx, d->format, d->format_forced.isEmpty() ? 0 : d->format_forced.toUtf8().constData(), fileName().toUtf8().constData()), false); } //d->interrupt_hanlder->end(); if (!d->prepareStreams()) { return false; } // TODO: AVFMT_NOFILE ? examples/muxing.c only check AVFMT_NOFILE // a custome io does not need avio_open. it open resource in it's own way, e.g. QIODevice.open if (!(d->format_ctx->oformat->flags & AVFMT_NOFILE) && !(d->format_ctx->flags & AVFMT_FLAG_CUSTOM_IO)) { // avio_open2? AV_ENSURE_OK(avio_open(&d->format_ctx->pb, fileName().toUtf8().constData(), AVIO_FLAG_WRITE), false); } // d->format_ctx->start_time_realtime AV_ENSURE_OK(avformat_write_header(d->format_ctx, &d->dict), false); d->started = false; return true; }
void AVEncoder::copyAVCodecContext(void* ctx) { if (!ctx) return; DPTR_D(AVEncoder); AVCodecContext* c = static_cast<AVCodecContext*>(ctx); if (d.avctx) { // dest should be avcodec_alloc_context3(NULL) AV_ENSURE_OK(avcodec_copy_context(d.avctx, c)); d.is_open = false; return; } }
bool AVDecoder::close() { if (!isOpen()) { return true; } DPTR_D(AVDecoder); d.is_open = false; // hwa extra finalize can be here d.close(); // TODO: reset config? if (d.codec_ctx) { AV_ENSURE_OK(avcodec_close(d.codec_ctx), false); } return true; }
/* * do nothing if equal * close the old one. the codec context can not be shared in more than 1 decoder. */ void AVDecoder::setCodecContext(void *codecCtx) { DPTR_D(AVDecoder); AVCodecContext *ctx = (AVCodecContext*)codecCtx; if (d.codec_ctx == ctx) return; if (isOpen()) { qWarning("Can not copy codec properties when it's open"); close(); // } d.is_open = false; if (!ctx) { avcodec_free_context(&d.codec_ctx); d.codec_ctx = 0; return; } if (!d.codec_ctx) d.codec_ctx = avcodec_alloc_context3(NULL); if (!d.codec_ctx) { qWarning("avcodec_alloc_context3 failed"); return; } AV_ENSURE_OK(avcodec_copy_context(d.codec_ctx, ctx)); }
bool VideoEncoderFFmpegPrivate::close() { AV_ENSURE_OK(avcodec_close(avctx), false); return true; }
bool AudioEncoderFFmpegPrivate::open() { if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); // reset format_used to user defined format. important to update default format if format is invalid format_used = format; if (format.sampleRate() <= 0) { if (codec->supported_samplerates) { qDebug("use first supported sample rate: %d", codec->supported_samplerates[0]); format_used.setSampleRate(codec->supported_samplerates[0]); } else { qWarning("sample rate and supported sample rate are not set. use 44100"); format_used.setSampleRate(44100); } } if (format.sampleFormat() == AudioFormat::SampleFormat_Unknown) { if (codec->sample_fmts) { qDebug("use first supported sample format: %d", codec->sample_fmts[0]); format_used.setSampleFormatFFmpeg((int)codec->sample_fmts[0]); } else { qWarning("sample format and supported sample format are not set. use s16"); format_used.setSampleFormat(AudioFormat::SampleFormat_Signed16); } } if (format.channelLayout() == AudioFormat::ChannelLayout_Unsupported) { if (codec->channel_layouts) { qDebug("use first supported channel layout: %lld", codec->channel_layouts[0]); format_used.setChannelLayoutFFmpeg((qint64)codec->channel_layouts[0]); } else { qWarning("channel layout and supported channel layout are not set. use stereo"); format_used.setChannelLayout(AudioFormat::ChannelLayout_Stereo); } } avctx->sample_fmt = (AVSampleFormat)format_used.sampleFormatFFmpeg(); avctx->channel_layout = format_used.channelLayoutFFmpeg(); avctx->channels = format_used.channels(); avctx->sample_rate = format_used.sampleRate(); avctx->bits_per_raw_sample = format_used.bytesPerSample()*8; /// set the time base. TODO avctx->time_base.num = 1; avctx->time_base.den = format_used.sampleRate(); avctx->bit_rate = bit_rate; qDebug() << format_used; av_dict_set(&dict, "strict", "-2", 0); //aac, vorbis applyOptionsForContext(); // avctx->frame_size will be set in avcodec_open2 AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc int pcm_hack = 0; int buffer_size = 0; frame_size = avctx->frame_size; if (frame_size <= 1) pcm_hack = av_get_bits_per_sample(avctx->codec_id)/8; if (pcm_hack) { frame_size = 16384; // "enough" buffer_size = frame_size*pcm_hack*format_used.channels()*2+200; } else { buffer_size = frame_size*format_used.bytesPerSample()*format_used.channels()*2+200; } if (buffer_size < FF_MIN_BUFFER_SIZE) buffer_size = FF_MIN_BUFFER_SIZE; buffer.resize(buffer_size); return true; }
bool VideoEncoderFFmpegPrivate::open() { nb_encoded = 0LL; if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); avctx->width = width; // coded_width works, why? avctx->height = height; // reset format_used to user defined format. important to update default format if format is invalid format_used = VideoFormat::Format_Invalid; AVPixelFormat fffmt = (AVPixelFormat)format.pixelFormatFFmpeg(); if (codec->pix_fmts && format.isValid()) { for (int i = 0; codec->pix_fmts[i] != AVPixelFormat(-1); ++i) { if (fffmt == codec->pix_fmts[i]) { format_used = format.pixelFormat(); break; } } } //avctx->sample_aspect_ratio = AVPixelFormat hwfmt = AVPixelFormat(-1); if (av_pix_fmt_desc_get(codec->pix_fmts[0])->flags & AV_PIX_FMT_FLAG_HWACCEL) hwfmt = codec->pix_fmts[0]; bool use_hwctx = false; if (hwfmt != AVPixelFormat(-1)) { #ifdef HAVE_AVHWCTX const AVHWDeviceType dt = fromHWAName(codec_name.section(QChar('_'), -1).toUtf8().constData()); if (dt != AVHWDeviceType(-1)) { use_hwctx = true; avctx->pix_fmt = hwfmt; hw_device_ctx = NULL; AV_ENSURE(av_hwdevice_ctx_create(&hw_device_ctx, dt, hwdev.toLatin1().constData(), NULL, 0), false); avctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx); if (!avctx->hw_frames_ctx) { qWarning("Failed to create hw frame context for '%s'", codec_name.toLatin1().constData()); return false; } // get sw formats const void *hwcfg = NULL; AVHWFramesConstraints *constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx, hwcfg); const AVPixelFormat* in_fmts = constraints->valid_sw_formats; AVPixelFormat sw_fmt = AVPixelFormat(-1); if (in_fmts) { sw_fmt = in_fmts[0]; while (*in_fmts != AVPixelFormat(-1)) { if (*in_fmts == fffmt) sw_fmt = *in_fmts; sw_fmts.append(*in_fmts); ++in_fmts; } } else { sw_fmt = QTAV_PIX_FMT_C(YUV420P); } av_hwframe_constraints_free(&constraints); format_used = VideoFormat::pixelFormatFromFFmpeg(sw_fmt); // encoder surface pool parameters AVHWFramesContext* hwfs = (AVHWFramesContext*)avctx->hw_frames_ctx->data; hwfs->format = hwfmt; // must the same as avctx->pix_fmt hwfs->sw_format = sw_fmt; // if it's not set, vaapi will choose the last valid_sw_formats, but that's wrong for vaGetImage/DeriveImage. nvenc always need sw_format // hw upload parameters. encoder's hwframes is just for parameter checking, will never be intialized, so we allocate an individual one. hwframes_ref = av_hwframe_ctx_alloc(hw_device_ctx); if (!hwframes_ref) { qWarning("Failed to create hw frame context for uploading '%s'", codec_name.toLatin1().constData()); } else { hwframes = (AVHWFramesContext*)hwframes_ref->data; hwframes->format = hwfmt; } } #endif //HAVE_AVHWCTX } if (!use_hwctx) { // no hw device (videotoolbox, wrong device name etc.), or old ffmpeg // TODO: check frame is hw frame if (hwfmt == AVPixelFormat(-1)) { // sw enc if (format_used == VideoFormat::Format_Invalid) {// requested format is not supported by sw enc if (codec->pix_fmts) { //pix_fmts[0] is always a sw format here qDebug("use first supported pixel format '%d' for sw encoder", codec->pix_fmts[0]); format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]); } } } else { if (format_used == VideoFormat::Format_Invalid) { // requested format is not supported by hw enc qDebug("use first supported sw pixel format '%d' for hw encoder", codec->pix_fmts[1]); if (codec->pix_fmts && codec->pix_fmts[1] != AVPixelFormat(-1)) format_used = VideoFormat::pixelFormatFromFFmpeg(codec->pix_fmts[1]); } } if (format_used == VideoFormat::Format_Invalid) { qWarning("fallback to yuv420p"); format_used = VideoFormat::Format_YUV420P; } avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used); } if (frame_rate > 0) avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); else avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2); qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; //AVDictionary *dict = 0; if(avctx->codec_id == QTAV_CODEC_ID(H264)) { avctx->gop_size = 10; //avctx->max_b_frames = 3;//h264 av_dict_set(&dict, "preset", "fast", 0); //x264 av_dict_set(&dict, "tune", "zerolatency", 0); //x264 //av_dict_set(&dict, "profile", "main", 0); // conflict with vaapi (int values) } if(avctx->codec_id == AV_CODEC_ID_HEVC){ av_dict_set(&dict, "preset", "ultrafast", 0); av_dict_set(&dict, "tune", "zero-latency", 0); } if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) { av_dict_set(&dict, "strict", "-2", 0); // mpeg2 arbitrary fps } applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
bool VideoEncoderFFmpegPrivate::open() { nb_encoded = 0LL; if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); avctx->width = width; // coded_width works, why? avctx->height = height; // reset format_used to user defined format. important to update default format if format is invalid format_used = format.pixelFormat(); if (format.pixelFormat() == VideoFormat::Format_Invalid) { if (codec->pix_fmts) { qDebug("use first supported pixel format: %d", codec->pix_fmts[0]); format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]); } else { qWarning("pixel format and supported pixel format are not set. use yuv420p"); format_used = VideoFormat::Format_YUV420P; } } //avctx->sample_aspect_ratio = avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used); if (frame_rate > 0) avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); else avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2); qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; #if 1 //AVDictionary *dict = 0; if(avctx->codec_id == QTAV_CODEC_ID(H264)) { avctx->gop_size = 10; //avctx->max_b_frames = 3;//h264 av_dict_set(&dict, "preset", "fast", 0); av_dict_set(&dict, "tune", "zerolatency", 0); av_dict_set(&dict, "profile", "main", 0); } #ifdef FF_PROFILE_HEVC_MAIN if(avctx->codec_id == AV_CODEC_ID_HEVC){ av_dict_set(&dict, "preset", "ultrafast", 0); av_dict_set(&dict, "tune", "zero-latency", 0); } #endif //FF_PROFILE_HEVC_MAIN #endif applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }