示例#1
0
AVFrame* capturer::convert_pix_fmt(AVFrame* src,AVPixelFormat dst_pix_fmt,std::string& error_message){
	SwsContext* c = 0;
	bool ret = true;
	AVFrame* ret_frame = 0;

	try{

		//TODO: SwsContext cashe
		c =  libav::sws_getContext( src->width,src->height, 
			AVPixelFormat(src->format),
			src->width,src->height,dst_pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
		if (!c)
			throw std::runtime_error("cannot allocate SwsContext");

		int res=-55;

		ret_frame = libav::av_frame_alloc();
		ret_frame->width = src->width;
		ret_frame->height = src->height;
		ret_frame->format = dst_pix_fmt;
		libav::av_frame_get_buffer(ret_frame,1);

		res = libav::sws_scale( c ,
			src->data, src->linesize, 
			0, 
			src->height,
			ret_frame->data, ret_frame->linesize ); 

		ret_frame->format = dst_pix_fmt;

	}
	catch(std::runtime_error& ex){
		error_message = std::string(ex.what());
		if (ret_frame != 0){
			libav::av_frame_free(&ret_frame);
		}
		ret_frame = 0;
	}


	if (c)
		libav::sws_freeContext(c);

	return ret_frame;

}
示例#2
0
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    QScopedPointer<AVFrame, ScopedAVFrameDeleter> f;
    // hwupload
    AVPixelFormat pixfmt = AVPixelFormat(frame.pixelFormatFFmpeg());
    if (frame.isValid()) {
        f.reset(av_frame_alloc());
        f->format = pixfmt;
        f->width = frame.width();
        f->height = frame.height();
//        f->quality = d.avctx->global_quality;
        switch (timestampMode()) {
        case TimestampCopy:
            f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
            break;
        case TimestampMonotonic:
            f->pts = d.nb_encoded+1;
            break;
        default:
            break;
        }

        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.constBits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
#ifdef HAVE_AVHWCTX
        if (d.avctx->hw_frames_ctx) {
            // TODO: try to map to SourceSurface
            // checl valid sw_formats
            if (!d.hwframes_ref) {
                qWarning("no hw frame context for uploading");
                return false;
            }
            if (pixfmt != d.hwframes->sw_format) {
                // reinit or got an unsupported format. assume parameters will not change, so it's  the 1st init
                // check constraints
                bool init_frames_ctx = d.hwframes->sw_format == AVPixelFormat(-1);
                if (d.sw_fmts.contains(pixfmt)) { // format changed
                    init_frames_ctx = true;
                } else { // convert to supported sw format
                    pixfmt = d.sw_fmts[0];
                    f->format = pixfmt;
                    VideoFrame converted = frame.to(VideoFormat::pixelFormatFromFFmpeg(pixfmt));
                    for (int i = 0; i < converted.planeCount(); ++i) {
                        f->linesize[i] = converted.bytesPerLine(i);
                        f->data[i] = (uint8_t*)frame.constBits(i);
                    }
                }
                if (init_frames_ctx) {
                    d.hwframes->sw_format = pixfmt;
                    d.hwframes->width = frame.width();
                    d.hwframes->height = frame.height();
                    AV_ENSURE(av_hwframe_ctx_init(d.hwframes_ref), false);
                }
            }
            // upload
            QScopedPointer<AVFrame, ScopedAVFrameDeleter> hwf( av_frame_alloc());
            AV_ENSURE(av_hwframe_get_buffer(d.hwframes_ref, hwf.data(), 0), false);
            //hwf->format = d.hwframes->format; // not necessary
            //hwf->width = f->width;
            //hwf->height = f->height;
            AV_ENSURE(av_hwframe_transfer_data(hwf.data(), f.data(), 0), false);
            AV_ENSURE(av_frame_copy_props(hwf.data(), f.data()), false);
            av_frame_unref(f.data());
            av_frame_move_ref(f.data(), hwf.data());
        }
#endif //HAVE_AVHWCTX
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f.data(), &got_packet);
    if (ret < 0) {
        qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    d.nb_encoded++;
    if (!got_packet) {
        qWarning("no packet got");
        d.packet = Packet();
        // invalid frame means eof
        return frame.isValid();
    }
   // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
   // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}
示例#3
0
QbPacket VideoStream::convert(AVFrame *iFrame)
{
    AVPicture *oPicture;
    AVPixelFormat oFormat;
    bool delFrame = false;

    if (outputFormats->contains(AVPixelFormat(iFrame->format))) {
        oPicture = (AVPicture *) iFrame;
        oFormat = AVPixelFormat(iFrame->format);
    }
    else {
        oPicture = new AVPicture;
        oFormat = AV_PIX_FMT_BGRA;

        avpicture_alloc(oPicture,
                        oFormat,
                        iFrame->width,
                        iFrame->height);

        this->m_scaleContext = sws_getCachedContext(this->m_scaleContext,
                                                    iFrame->width,
                                                    iFrame->height,
                                                    AVPixelFormat(iFrame->format),
                                                    iFrame->width,
                                                    iFrame->height,
                                                    oFormat,
                                                    SWS_FAST_BILINEAR,
                                                    NULL,
                                                    NULL,
                                                    NULL);

        sws_scale(this->m_scaleContext,
                  (uint8_t **) iFrame->data,
                  iFrame->linesize,
                  0,
                  iFrame->height,
                  oPicture->data,
                  oPicture->linesize);

        delFrame = true;
    }

    QbVideoPacket packet;
    packet.caps().isValid() = true;
    packet.caps().format() = outputFormats->value(oFormat);
    packet.caps().width() = iFrame->width;
    packet.caps().height() = iFrame->height;
    packet.caps().fps() = this->fps();

    int frameSize = avpicture_get_size(oFormat,
                                       iFrame->width,
                                       iFrame->height);

    QbBufferPtr oBuffer(new char[frameSize]);

    avpicture_layout(oPicture,
                     oFormat,
                     iFrame->width,
                     iFrame->height,
                     (uint8_t *) oBuffer.data(),
                     frameSize);

    packet.buffer() = oBuffer;
    packet.bufferSize() = frameSize;
    packet.pts() = av_frame_get_best_effort_timestamp(iFrame);
    packet.timeBase() = this->timeBase();
    packet.index() = this->index();
    packet.id() = this->id();

    if (delFrame) {
        avpicture_free(oPicture);
        delete oPicture;
    }

    return packet.toPacket();
}
示例#4
0
bool VideoEncoderFFmpegPrivate::open()
{
    nb_encoded = 0LL;
    if (codec_name.isEmpty()) {
        // copy ctx from muxer by copyAVCodecContext
        AVCodec *codec = avcodec_find_encoder(avctx->codec_id);
        AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
        return true;
    }
    AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData());
    if (!codec) {
        const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData());
        if (cd) {
            codec = avcodec_find_encoder(cd->id);
        }
    }
    if (!codec) {
        qWarning() << "Can not find encoder for codec " << codec_name;
        return false;
    }
    if (avctx) {
        avcodec_free_context(&avctx);
        avctx = 0;
    }
    avctx = avcodec_alloc_context3(codec);
    avctx->width = width; // coded_width works, why?
    avctx->height = height;
    // reset format_used to user defined format. important to update default format if format is invalid
    format_used = VideoFormat::Format_Invalid;
    AVPixelFormat fffmt = (AVPixelFormat)format.pixelFormatFFmpeg();
    if (codec->pix_fmts && format.isValid()) {
        for (int i = 0; codec->pix_fmts[i] != AVPixelFormat(-1); ++i) {
            if (fffmt == codec->pix_fmts[i]) {
                format_used = format.pixelFormat();
                break;
            }
        }
    }
    //avctx->sample_aspect_ratio =
    AVPixelFormat hwfmt = AVPixelFormat(-1);
    if (av_pix_fmt_desc_get(codec->pix_fmts[0])->flags & AV_PIX_FMT_FLAG_HWACCEL)
        hwfmt = codec->pix_fmts[0];
    bool use_hwctx = false;
    if (hwfmt != AVPixelFormat(-1)) {
#ifdef HAVE_AVHWCTX
        const AVHWDeviceType dt = fromHWAName(codec_name.section(QChar('_'), -1).toUtf8().constData());
        if (dt != AVHWDeviceType(-1)) {
            use_hwctx = true;
            avctx->pix_fmt = hwfmt;
            hw_device_ctx = NULL;
            AV_ENSURE(av_hwdevice_ctx_create(&hw_device_ctx, dt, hwdev.toLatin1().constData(), NULL, 0), false);
            avctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
            if (!avctx->hw_frames_ctx) {
                qWarning("Failed to create hw frame context for '%s'", codec_name.toLatin1().constData());
                return false;
            }
            // get sw formats
            const void *hwcfg = NULL;
            AVHWFramesConstraints *constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx, hwcfg);
            const AVPixelFormat* in_fmts = constraints->valid_sw_formats;
            AVPixelFormat sw_fmt = AVPixelFormat(-1);
            if (in_fmts) {
                sw_fmt = in_fmts[0];
                while (*in_fmts != AVPixelFormat(-1)) {
                    if (*in_fmts == fffmt)
                        sw_fmt = *in_fmts;
                    sw_fmts.append(*in_fmts);
                    ++in_fmts;
                }
            } else {
                sw_fmt = QTAV_PIX_FMT_C(YUV420P);
            }
            av_hwframe_constraints_free(&constraints);
            format_used = VideoFormat::pixelFormatFromFFmpeg(sw_fmt);
            // encoder surface pool parameters
            AVHWFramesContext* hwfs = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
            hwfs->format = hwfmt; // must the same as avctx->pix_fmt
            hwfs->sw_format = sw_fmt; // if it's not set, vaapi will choose the last valid_sw_formats, but that's wrong for vaGetImage/DeriveImage. nvenc always need sw_format
            // hw upload parameters. encoder's hwframes is just for parameter checking, will never be intialized, so we allocate an individual one.
            hwframes_ref = av_hwframe_ctx_alloc(hw_device_ctx);
            if (!hwframes_ref) {
                qWarning("Failed to create hw frame context for uploading '%s'", codec_name.toLatin1().constData());
            } else {
                hwframes = (AVHWFramesContext*)hwframes_ref->data;
                hwframes->format = hwfmt;
            }
        }
#endif //HAVE_AVHWCTX
    }

    if (!use_hwctx) { // no hw device (videotoolbox, wrong device name etc.), or old ffmpeg
        // TODO: check frame is hw frame
        if (hwfmt == AVPixelFormat(-1)) { // sw enc
            if (format_used == VideoFormat::Format_Invalid) {// requested format is not supported by sw enc
                if (codec->pix_fmts) { //pix_fmts[0] is always a sw format here
                    qDebug("use first supported pixel format '%d' for sw encoder", codec->pix_fmts[0]);
                    format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]);
                }
            }
        } else {
            if (format_used == VideoFormat::Format_Invalid) { // requested format is not supported by hw enc
                qDebug("use first supported sw pixel format '%d' for hw encoder", codec->pix_fmts[1]);
                if (codec->pix_fmts && codec->pix_fmts[1] != AVPixelFormat(-1))
                    format_used = VideoFormat::pixelFormatFromFFmpeg(codec->pix_fmts[1]);
            }
        }
        if (format_used == VideoFormat::Format_Invalid) {
            qWarning("fallback to yuv420p");
            format_used = VideoFormat::Format_YUV420P;
        }
        avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used);
    }
    if (frame_rate > 0)
        avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2);
    else
        avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2);
    qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den);
    avctx->bit_rate = bit_rate;
    //AVDictionary *dict = 0;
    if(avctx->codec_id == QTAV_CODEC_ID(H264)) {
        avctx->gop_size = 10;
        //avctx->max_b_frames = 3;//h264
        av_dict_set(&dict, "preset", "fast", 0); //x264
        av_dict_set(&dict, "tune", "zerolatency", 0);  //x264
        //av_dict_set(&dict, "profile", "main", 0); // conflict with vaapi (int values)
    }
    if(avctx->codec_id == AV_CODEC_ID_HEVC){
        av_dict_set(&dict, "preset", "ultrafast", 0);
        av_dict_set(&dict, "tune", "zero-latency", 0);
    }
    if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
        av_dict_set(&dict, "strict", "-2", 0); // mpeg2 arbitrary fps
    }
    applyOptionsForContext();
    AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false);
    // from mpv ao_lavc
    const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//??
    buffer.resize(buffer_size);
    return true;
}