Пример #1
0
bool VideoEncoderFFmpegPrivate::open()
{
    if (codec_name.isEmpty()) {
        // copy ctx from muxer
        AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
        AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
        return true;
    }
    AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
    if (!codec) {
        qWarning() << "Can not find encoder for codec " << codec_name;
        return false;
    }
    if (avctx) {
        avcodec_free_context(&avctx);
        avctx = 0;
    }
    avctx = avcodec_alloc_context3(codec);
    qDebug("tbc: %f", av_q2d(avctx->time_base));
    avctx->width = width; // coded_width works, why?
    avctx->height = height;
    avctx->pix_fmt = QTAV_PIX_FMT_C(YUV420P);
    avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2);
    //avctx->max_b_frames = 3;//h264
    qDebug("2 tbc: %f=%d/%d", av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den);
    avctx->bit_rate = bit_rate;
    // Set Option
        AVDictionary *param = 0;
#if 0
        //H.264
        if(avctx->codec_id == QTAV_CODEC_ID(H264)) {
            av_dict_set(&param, "preset", "slow", 0);
            av_dict_set(&param, "tune", "zerolatency", 0);
            //av_dict_set(&param, "profile", "main", 0);
        }
        //H.265
        if(avctx->codec_id == AV_CODEC_ID_H265){
            av_dict_set(&param, "preset", "ultrafast", 0);
            av_dict_set(&param, "tune", "zero-latency", 0);
        }
#endif
    applyOptionsForContext();
    AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
    const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//??
    buffer.resize(buffer_size);
    return true;
}
Пример #2
0
bool VideoEncoderFFmpegPrivate::open()
{
    nb_encoded = 0LL;
    if (codec_name.isEmpty()) {
        // copy ctx from muxer by copyAVCodecContext
        AVCodec *codec = avcodec_find_encoder(avctx->codec_id);
        AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
        return true;
    }
    AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
    if (!codec) {
        const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData());
        if (cd) {
            codec = avcodec_find_encoder(cd->id);
        }
    }
    if (!codec) {
        qWarning() << "Can not find encoder for codec " << codec_name;
        return false;
    }
    if (avctx) {
        avcodec_free_context(&avctx);
        avctx = 0;
    }
    avctx = avcodec_alloc_context3(codec);
    avctx->width = width; // coded_width works, why?
    avctx->height = height;
    // reset format_used to user defined format. important to update default format if format is invalid
    format_used = VideoFormat::Format_Invalid;
    AVPixelFormat fffmt = (AVPixelFormat)format.pixelFormatFFmpeg();
    if (codec->pix_fmts && format.isValid()) {
        for (int i = 0; codec->pix_fmts[i] != AVPixelFormat(-1); ++i) {
            if (fffmt == codec->pix_fmts[i]) {
                format_used = format.pixelFormat();
                break;
            }
        }
    }
    //avctx->sample_aspect_ratio =
    AVPixelFormat hwfmt = AVPixelFormat(-1);
    if (av_pix_fmt_desc_get(codec->pix_fmts[0])->flags & AV_PIX_FMT_FLAG_HWACCEL)
        hwfmt = codec->pix_fmts[0];
    bool use_hwctx = false;
    if (hwfmt != AVPixelFormat(-1)) {
#ifdef HAVE_AVHWCTX
        const AVHWDeviceType dt = fromHWAName(codec_name.section(QChar('_'), -1).toUtf8().constData());
        if (dt != AVHWDeviceType(-1)) {
            use_hwctx = true;
            avctx->pix_fmt = hwfmt;
            hw_device_ctx = NULL;
            AV_ENSURE(av_hwdevice_ctx_create(&hw_device_ctx, dt, hwdev.toLatin1().constData(), NULL, 0), false);
            avctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
            if (!avctx->hw_frames_ctx) {
                qWarning("Failed to create hw frame context for '%s'", codec_name.toLatin1().constData());
                return false;
            }
            // get sw formats
            const void *hwcfg = NULL;
            AVHWFramesConstraints *constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx, hwcfg);
            const AVPixelFormat* in_fmts = constraints->valid_sw_formats;
            AVPixelFormat sw_fmt = AVPixelFormat(-1);
            if (in_fmts) {
                sw_fmt = in_fmts[0];
                while (*in_fmts != AVPixelFormat(-1)) {
                    if (*in_fmts == fffmt)
                        sw_fmt = *in_fmts;
                    sw_fmts.append(*in_fmts);
                    ++in_fmts;
                }
            } else {
                sw_fmt = QTAV_PIX_FMT_C(YUV420P);
            }
            av_hwframe_constraints_free(&constraints);
            format_used = VideoFormat::pixelFormatFromFFmpeg(sw_fmt);
            // encoder surface pool parameters
            AVHWFramesContext* hwfs = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
            hwfs->format = hwfmt; // must the same as avctx->pix_fmt
            hwfs->sw_format = sw_fmt; // if it's not set, vaapi will choose the last valid_sw_formats, but that's wrong for vaGetImage/DeriveImage. nvenc always need sw_format
            // hw upload parameters. encoder's hwframes is just for parameter checking, will never be intialized, so we allocate an individual one.
            hwframes_ref = av_hwframe_ctx_alloc(hw_device_ctx);
            if (!hwframes_ref) {
                qWarning("Failed to create hw frame context for uploading '%s'", codec_name.toLatin1().constData());
            } else {
                hwframes = (AVHWFramesContext*)hwframes_ref->data;
                hwframes->format = hwfmt;
            }
        }
#endif //HAVE_AVHWCTX
    }

    if (!use_hwctx) { // no hw device (videotoolbox, wrong device name etc.), or old ffmpeg
        // TODO: check frame is hw frame
        if (hwfmt == AVPixelFormat(-1)) { // sw enc
            if (format_used == VideoFormat::Format_Invalid) {// requested format is not supported by sw enc
                if (codec->pix_fmts) { //pix_fmts[0] is always a sw format here
                    qDebug("use first supported pixel format '%d' for sw encoder", codec->pix_fmts[0]);
                    format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]);
                }
            }
        } else {
            if (format_used == VideoFormat::Format_Invalid) { // requested format is not supported by hw enc
                qDebug("use first supported sw pixel format '%d' for hw encoder", codec->pix_fmts[1]);
                if (codec->pix_fmts && codec->pix_fmts[1] != AVPixelFormat(-1))
                    format_used = VideoFormat::pixelFormatFromFFmpeg(codec->pix_fmts[1]);
            }
        }
        if (format_used == VideoFormat::Format_Invalid) {
            qWarning("fallback to yuv420p");
            format_used = VideoFormat::Format_YUV420P;
        }
        avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used);
    }
    if (frame_rate > 0)
        avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2);
    else
        avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2);
    qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den);
    avctx->bit_rate = bit_rate;
    //AVDictionary *dict = 0;
    if(avctx->codec_id == QTAV_CODEC_ID(H264)) {
        avctx->gop_size = 10;
        //avctx->max_b_frames = 3;//h264
        av_dict_set(&dict, "preset", "fast", 0); //x264
        av_dict_set(&dict, "tune", "zerolatency", 0);  //x264
        //av_dict_set(&dict, "profile", "main", 0); // conflict with vaapi (int values)
    }
    if(avctx->codec_id == AV_CODEC_ID_HEVC){
        av_dict_set(&dict, "preset", "ultrafast", 0);
        av_dict_set(&dict, "tune", "zero-latency", 0);
    }
    if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
        av_dict_set(&dict, "strict", "-2", 0); // mpeg2 arbitrary fps
    }
    applyOptionsForContext();
    AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
    // from mpv ao_lavc
    const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//??
    buffer.resize(buffer_size);
    return true;
}
Пример #3
0
bool AudioEncoderFFmpegPrivate::open()
{
    if (codec_name.isEmpty()) {
        // copy ctx from muxer by copyAVCodecContext
        AVCodec *codec = avcodec_find_encoder(avctx->codec_id);
        AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
        return true;
    }
    AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
    if (!codec) {
        const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData());
        if (cd) {
            codec = avcodec_find_encoder(cd->id);
        }
    }
    if (!codec) {
        qWarning() << "Can not find encoder for codec " << codec_name;
        return false;
    }
    if (avctx) {
        avcodec_free_context(&avctx);
        avctx = 0;
    }
    avctx = avcodec_alloc_context3(codec);

    // reset format_used to user defined format. important to update default format if format is invalid
    format_used = format;
    if (format.sampleRate() <= 0) {
        if (codec->supported_samplerates) {
            qDebug("use first supported sample rate: %d", codec->supported_samplerates[0]);
            format_used.setSampleRate(codec->supported_samplerates[0]);
        } else {
            qWarning("sample rate and supported sample rate are not set. use 44100");
            format_used.setSampleRate(44100);
        }
    }
    if (format.sampleFormat() == AudioFormat::SampleFormat_Unknown) {
        if (codec->sample_fmts) {
            qDebug("use first supported sample format: %d", codec->sample_fmts[0]);
            format_used.setSampleFormatFFmpeg((int)codec->sample_fmts[0]);
        } else {
            qWarning("sample format and supported sample format are not set. use s16");
            format_used.setSampleFormat(AudioFormat::SampleFormat_Signed16);
        }
    }
    if (format.channelLayout() == AudioFormat::ChannelLayout_Unsupported) {
        if (codec->channel_layouts) {
            qDebug("use first supported channel layout: %lld", codec->channel_layouts[0]);
            format_used.setChannelLayoutFFmpeg((qint64)codec->channel_layouts[0]);
        } else {
            qWarning("channel layout and supported channel layout are not set. use stereo");
            format_used.setChannelLayout(AudioFormat::ChannelLayout_Stereo);
        }
    }
    avctx->sample_fmt = (AVSampleFormat)format_used.sampleFormatFFmpeg();
    avctx->channel_layout = format_used.channelLayoutFFmpeg();
    avctx->channels = format_used.channels();
    avctx->sample_rate = format_used.sampleRate();
    avctx->bits_per_raw_sample = format_used.bytesPerSample()*8;

    /// set the time base. TODO
    avctx->time_base.num = 1;
    avctx->time_base.den = format_used.sampleRate();

    avctx->bit_rate = bit_rate;
    qDebug() << format_used;

    av_dict_set(&dict, "strict", "-2", 0); //aac, vorbis
    applyOptionsForContext();
    // avctx->frame_size will be set in avcodec_open2
    AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
    // from mpv ao_lavc
    int pcm_hack = 0;
    int buffer_size = 0;
    frame_size = avctx->frame_size;
    if (frame_size <= 1)
        pcm_hack = av_get_bits_per_sample(avctx->codec_id)/8;
    if (pcm_hack) {
        frame_size = 16384; // "enough"
        buffer_size = frame_size*pcm_hack*format_used.channels()*2+200;
    } else {
        buffer_size = frame_size*format_used.bytesPerSample()*format_used.channels()*2+200;
    }
    if (buffer_size < FF_MIN_BUFFER_SIZE)
        buffer_size = FF_MIN_BUFFER_SIZE;
    buffer.resize(buffer_size);
    return true;
}
Пример #4
0
bool VideoEncoderFFmpegPrivate::open()
{
    nb_encoded = 0LL;
    if (codec_name.isEmpty()) {
        // copy ctx from muxer by copyAVCodecContext
        AVCodec *codec = avcodec_find_encoder(avctx->codec_id);
        AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
        return true;
    }
    AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
    if (!codec) {
        const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData());
        if (cd) {
            codec = avcodec_find_encoder(cd->id);
        }
    }
    if (!codec) {
        qWarning() << "Can not find encoder for codec " << codec_name;
        return false;
    }
    if (avctx) {
        avcodec_free_context(&avctx);
        avctx = 0;
    }
    avctx = avcodec_alloc_context3(codec);
    avctx->width = width; // coded_width works, why?
    avctx->height = height;
    // reset format_used to user defined format. important to update default format if format is invalid
    format_used = format.pixelFormat();
    if (format.pixelFormat() == VideoFormat::Format_Invalid) {
        if (codec->pix_fmts) {
            qDebug("use first supported pixel format: %d", codec->pix_fmts[0]);
            format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]);
        } else {
            qWarning("pixel format and supported pixel format are not set. use yuv420p");
            format_used = VideoFormat::Format_YUV420P;
        }
    }
    //avctx->sample_aspect_ratio =
    avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used);
    if (frame_rate > 0)
        avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2);
    else
        avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2);
    qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den);
    avctx->bit_rate = bit_rate;
#if 1
    //AVDictionary *dict = 0;
    if(avctx->codec_id == QTAV_CODEC_ID(H264)) {
        avctx->gop_size = 10;
        //avctx->max_b_frames = 3;//h264
        av_dict_set(&dict, "preset", "fast", 0);
        av_dict_set(&dict, "tune", "zerolatency", 0);
        av_dict_set(&dict, "profile", "main", 0);
    }
#ifdef FF_PROFILE_HEVC_MAIN
    if(avctx->codec_id == AV_CODEC_ID_HEVC){
        av_dict_set(&dict, "preset", "ultrafast", 0);
        av_dict_set(&dict, "tune", "zero-latency", 0);
    }
#endif //FF_PROFILE_HEVC_MAIN
#endif
    applyOptionsForContext();
    AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
    // from mpv ao_lavc
    const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//??
    buffer.resize(buffer_size);
    return true;
}