Exemple #1
0
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q,
                  AVPacket *pkt, const AVFrame *frame, int *got_packet)
{
    mfxBitstream bs = { { { 0 } } };

    mfxFrameSurface1 *surf = NULL;
    mfxSyncPoint sync      = NULL;
    int ret;

    if (frame) {
        ret = submit_frame(q, frame, &surf);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR, "Error submitting the frame for encoding.\n");
            return ret;
        }
    }

    ret = ff_alloc_packet(pkt, q->packet_size);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating the output packet\n");
        return ret;
    }
    bs.Data      = pkt->data;
    bs.MaxLength = pkt->size;

    do {
        ret = MFXVideoENCODE_EncodeFrameAsync(q->session, NULL, surf, &bs, &sync);
        if (ret == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);
    } while (ret > 0);

    if (ret < 0)
        return (ret == MFX_ERR_MORE_DATA) ? 0 : ff_qsv_error(ret);

    if (ret == MFX_WRN_INCOMPATIBLE_VIDEO_PARAM && frame->interlaced_frame)
        print_interlace_msg(avctx, q);

    if (sync) {
        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        if (bs.FrameType & MFX_FRAMETYPE_I || bs.FrameType & MFX_FRAMETYPE_xI)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        else if (bs.FrameType & MFX_FRAMETYPE_P || bs.FrameType & MFX_FRAMETYPE_xP)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
        else if (bs.FrameType & MFX_FRAMETYPE_B || bs.FrameType & MFX_FRAMETYPE_xB)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;

        pkt->dts  = av_rescale_q(bs.DecodeTimeStamp, (AVRational){1, 90000}, avctx->time_base);
        pkt->pts  = av_rescale_q(bs.TimeStamp,       (AVRational){1, 90000}, avctx->time_base);
        pkt->size = bs.DataLength;

        if (bs.FrameType & MFX_FRAMETYPE_IDR ||
            bs.FrameType & MFX_FRAMETYPE_xIDR)
            pkt->flags |= AV_PKT_FLAG_KEY;

        *got_packet = 1;
    }

    return 0;
}
Exemple #2
0
int ff_qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
{
    mfxVideoParam param = { { 0 } };
    int ret;

    q->iopattern  = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;

    if (avctx->hwaccel_context) {
        AVQSVContext *qsv = avctx->hwaccel_context;

        q->session        = qsv->session;
        q->iopattern      = qsv->iopattern;
        q->ext_buffers    = qsv->ext_buffers;
        q->nb_ext_buffers = qsv->nb_ext_buffers;
    }
    if (!q->session) {
        ret = ff_qsv_init_internal_session(avctx, &q->internal_qs, NULL);
        if (ret < 0)
            return ret;

        q->session = q->internal_qs.session;
    }

    ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
    if (ret < 0)
        return ret;

    param.mfx.CodecId      = ret;
    param.mfx.CodecProfile = avctx->profile;
    param.mfx.CodecLevel   = avctx->level;

    param.mfx.FrameInfo.BitDepthLuma   = 8;
    param.mfx.FrameInfo.BitDepthChroma = 8;
    param.mfx.FrameInfo.Shift          = 0;
    param.mfx.FrameInfo.FourCC         = MFX_FOURCC_NV12;
    param.mfx.FrameInfo.Width          = avctx->coded_width;
    param.mfx.FrameInfo.Height         = avctx->coded_height;
    param.mfx.FrameInfo.ChromaFormat   = MFX_CHROMAFORMAT_YUV420;

    param.IOPattern   = q->iopattern;
    param.AsyncDepth  = q->async_depth;
    param.ExtParam    = q->ext_buffers;
    param.NumExtParam = q->nb_ext_buffers;

    ret = MFXVideoDECODE_Init(q->session, &param);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder\n");
        return ff_qsv_error(ret);
    }

    return 0;
}
Exemple #3
0
static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, mfxSession session)
{
    mfxVideoParam param = { { 0 } };
    int ret;

    if (!q->async_fifo) {
        q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
                                      (sizeof(mfxSyncPoint*) + sizeof(QSVFrame*)));
        if (!q->async_fifo)
            return AVERROR(ENOMEM);
    }

    ret = qsv_init_session(avctx, q, session);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
        return ret;
    }


    ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
    if (ret < 0)
        return ret;

    param.mfx.CodecId      = ret;
    param.mfx.CodecProfile = avctx->profile;
    param.mfx.CodecLevel   = avctx->level;

    param.mfx.FrameInfo.BitDepthLuma   = 8;
    param.mfx.FrameInfo.BitDepthChroma = 8;
    param.mfx.FrameInfo.Shift          = 0;
    param.mfx.FrameInfo.FourCC         = MFX_FOURCC_NV12;
    param.mfx.FrameInfo.Width          = avctx->coded_width;
    param.mfx.FrameInfo.Height         = avctx->coded_height;
    param.mfx.FrameInfo.ChromaFormat   = MFX_CHROMAFORMAT_YUV420;

    param.IOPattern   = q->iopattern;
    param.AsyncDepth  = q->async_depth;
    param.ExtParam    = q->ext_buffers;
    param.NumExtParam = q->nb_ext_buffers;

    ret = MFXVideoDECODE_Init(q->session, &param);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder\n");
        return ff_qsv_error(ret);
    }

    return 0;
}
Exemple #4
0
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session)
{
    if (!session) {
        if (!q->internal_session) {
            mfxIMPL impl   = MFX_IMPL_AUTO_ANY;
            mfxVersion ver = { { QSV_VERSION_MINOR, QSV_VERSION_MAJOR } };

            const char *desc;
            int ret;

            ret = MFXInit(impl, &ver, &q->internal_session);
            if (ret < 0) {
                av_log(avctx, AV_LOG_ERROR, "Error initializing an internal MFX session\n");
                return ff_qsv_error(ret);
            }

            MFXQueryIMPL(q->internal_session, &impl);

            if (impl & MFX_IMPL_SOFTWARE)
                desc = "software";
            else if (impl & MFX_IMPL_HARDWARE)
                desc = "hardware accelerated";
            else
                desc = "unknown";

            av_log(avctx, AV_LOG_VERBOSE,
                   "Initialized an internal MFX session using %s implementation\n",
                   desc);
        }

        q->session = q->internal_session;
    } else {
        q->session = session;
    }

    /* make sure the decoder is uninitialized */
    MFXVideoDECODE_Close(q->session);

    return 0;
}
Exemple #5
0
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q,
                  AVPacket *pkt, const AVFrame *frame, int *got_packet)
{
    AVPacket new_pkt = { 0 };
    mfxBitstream *bs;

    mfxFrameSurface1 *surf = NULL;
    mfxSyncPoint sync      = NULL;
    int ret;

    if (frame) {
        ret = submit_frame(q, frame, &surf);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR, "Error submitting the frame for encoding.\n");
            return ret;
        }
    }

    ret = av_new_packet(&new_pkt, q->packet_size);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating the output packet\n");
        return ret;
    }

    bs = av_mallocz(sizeof(*bs));
    if (!bs) {
        av_packet_unref(&new_pkt);
        return AVERROR(ENOMEM);
    }
    bs->Data      = new_pkt.data;
    bs->MaxLength = new_pkt.size;

    do {
        ret = MFXVideoENCODE_EncodeFrameAsync(q->session, NULL, surf, bs, &sync);
        if (ret == MFX_WRN_DEVICE_BUSY) {
            av_usleep(500);
            continue;
        }
        break;
    } while ( 1 );

    if (ret < 0) {
        av_packet_unref(&new_pkt);
        av_freep(&bs);
        if (ret == MFX_ERR_MORE_DATA)
            return 0;
        av_log(avctx, AV_LOG_ERROR, "EncodeFrameAsync returned %d\n", ret);
        return ff_qsv_error(ret);
    }

    if (ret == MFX_WRN_INCOMPATIBLE_VIDEO_PARAM) {
        if (frame->interlaced_frame)
            print_interlace_msg(avctx, q);
        else
            av_log(avctx, AV_LOG_WARNING,
                   "EncodeFrameAsync returned 'incompatible param' code\n");
    }
    if (sync) {
        av_fifo_generic_write(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL);
        av_fifo_generic_write(q->async_fifo, &sync,    sizeof(sync),    NULL);
        av_fifo_generic_write(q->async_fifo, &bs,      sizeof(bs),    NULL);
    } else {
        av_packet_unref(&new_pkt);
        av_freep(&bs);
    }

    if (!av_fifo_space(q->async_fifo) ||
        (!frame && av_fifo_size(q->async_fifo))) {
        av_fifo_generic_read(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,    sizeof(sync),    NULL);
        av_fifo_generic_read(q->async_fifo, &bs,      sizeof(bs),      NULL);

        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        new_pkt.dts  = av_rescale_q(bs->DecodeTimeStamp, (AVRational){1, 90000}, avctx->time_base);
        new_pkt.pts  = av_rescale_q(bs->TimeStamp,       (AVRational){1, 90000}, avctx->time_base);
        new_pkt.size = bs->DataLength;

        if (bs->FrameType & MFX_FRAMETYPE_IDR ||
            bs->FrameType & MFX_FRAMETYPE_xIDR)
            new_pkt.flags |= AV_PKT_FLAG_KEY;

#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
        if (bs->FrameType & MFX_FRAMETYPE_I || bs->FrameType & MFX_FRAMETYPE_xI)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        else if (bs->FrameType & MFX_FRAMETYPE_P || bs->FrameType & MFX_FRAMETYPE_xP)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
        else if (bs->FrameType & MFX_FRAMETYPE_B || bs->FrameType & MFX_FRAMETYPE_xB)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
FF_ENABLE_DEPRECATION_WARNINGS
#endif

        av_freep(&bs);

        if (pkt->data) {
            if (pkt->size < new_pkt.size) {
                av_log(avctx, AV_LOG_ERROR, "Submitted buffer not large enough: %d < %d\n",
                       pkt->size, new_pkt.size);
                av_packet_unref(&new_pkt);
                return AVERROR(EINVAL);
            }

            memcpy(pkt->data, new_pkt.data, new_pkt.size);
            pkt->size = new_pkt.size;

            ret = av_packet_copy_props(pkt, &new_pkt);
            av_packet_unref(&new_pkt);
            if (ret < 0)
                return ret;
        } else
            *pkt = new_pkt;

        *got_packet = 1;
    }
Exemple #6
0
static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q)
{
    uint8_t sps_buf[128];
    uint8_t pps_buf[128];

    mfxExtCodingOptionSPSPPS extradata = {
        .Header.BufferId = MFX_EXTBUFF_CODING_OPTION_SPSPPS,
        .Header.BufferSz = sizeof(extradata),
        .SPSBuffer = sps_buf, .SPSBufSize = sizeof(sps_buf),
        .PPSBuffer = pps_buf, .PPSBufSize = sizeof(pps_buf)
    };

    mfxExtBuffer *ext_buffers[] = {
        (mfxExtBuffer*)&extradata,
    };

    int need_pps = avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO;
    int ret;

    q->param.ExtParam    = ext_buffers;
    q->param.NumExtParam = FF_ARRAY_ELEMS(ext_buffers);

    ret = MFXVideoENCODE_GetVideoParam(q->session, &q->param);
    if (ret < 0)
        return ff_qsv_error(ret);

    q->packet_size = q->param.mfx.BufferSizeInKB * 1000;

    if (!extradata.SPSBufSize || (need_pps && !extradata.PPSBufSize)) {
        av_log(avctx, AV_LOG_ERROR, "No extradata returned from libmfx.\n");
        return AVERROR_UNKNOWN;
    }

    avctx->extradata = av_malloc(extradata.SPSBufSize + need_pps * extradata.PPSBufSize +
                                 AV_INPUT_BUFFER_PADDING_SIZE);
    if (!avctx->extradata)
        return AVERROR(ENOMEM);

    memcpy(avctx->extradata,                        sps_buf, extradata.SPSBufSize);
    if (need_pps)
        memcpy(avctx->extradata + extradata.SPSBufSize, pps_buf, extradata.PPSBufSize);
    avctx->extradata_size = extradata.SPSBufSize + need_pps * extradata.PPSBufSize;
    memset(avctx->extradata + avctx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    return 0;
}

int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
{
    int ret;

    q->param.IOPattern  = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
    q->param.AsyncDepth = q->async_depth;

    q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
                                  (sizeof(AVPacket) + sizeof(mfxSyncPoint) + sizeof(mfxBitstream*)));
    if (!q->async_fifo)
        return AVERROR(ENOMEM);

    if (avctx->hwaccel_context) {
        AVQSVContext *qsv = avctx->hwaccel_context;

        q->session         = qsv->session;
        q->param.IOPattern = qsv->iopattern;
    }

    if (!q->session) {
        ret = ff_qsv_init_internal_session(avctx, &q->internal_qs,
                                           q->load_plugins);
        if (ret < 0)
            return ret;

        q->session = q->internal_qs.session;
    }

    ret = init_video_param(avctx, q);
    if (ret < 0)
        return ret;

    ret = MFXVideoENCODE_QueryIOSurf(q->session, &q->param, &q->req);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error querying the encoding parameters\n");
        return ff_qsv_error(ret);
    }

    ret = MFXVideoENCODE_Init(q->session, &q->param);
    if (MFX_WRN_PARTIAL_ACCELERATION==ret) {
        av_log(avctx, AV_LOG_WARNING, "Encoder will work with partial HW acceleration\n");
    } else if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing the encoder\n");
        return ff_qsv_error(ret);
    }

    ret = qsv_retrieve_enc_params(avctx, q);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error retrieving encoding parameters.\n");
        return ret;
    }

    q->avctx = avctx;

    return 0;
}
Exemple #7
0
static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
{
    mfxSession session = NULL;
    int iopattern = 0;
    mfxVideoParam param = { { 0 } };
    int ret;

    if (!q->async_fifo) {
        q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
                                      (sizeof(mfxSyncPoint*) + sizeof(QSVFrame*)));
        if (!q->async_fifo)
            return AVERROR(ENOMEM);
    }

    if (avctx->hwaccel_context) {
        AVQSVContext *user_ctx = avctx->hwaccel_context;
        session           = user_ctx->session;
        iopattern         = user_ctx->iopattern;
        q->ext_buffers    = user_ctx->ext_buffers;
        q->nb_ext_buffers = user_ctx->nb_ext_buffers;
    }

    if (avctx->hw_frames_ctx) {
        AVHWFramesContext    *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
        AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;

        if (!iopattern) {
            if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
                iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
            else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
                iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
        }
    }

    if (!iopattern)
        iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
    q->iopattern = iopattern;

    ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
        return ret;
    }

    ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
    if (ret < 0)
        return ret;

    param.mfx.CodecId      = ret;
    param.mfx.CodecProfile = avctx->profile;
    param.mfx.CodecLevel   = avctx->level;

    param.mfx.FrameInfo.BitDepthLuma   = 8;
    param.mfx.FrameInfo.BitDepthChroma = 8;
    param.mfx.FrameInfo.Shift          = 0;
    param.mfx.FrameInfo.FourCC         = MFX_FOURCC_NV12;
    param.mfx.FrameInfo.Width          = avctx->coded_width;
    param.mfx.FrameInfo.Height         = avctx->coded_height;
    param.mfx.FrameInfo.ChromaFormat   = MFX_CHROMAFORMAT_YUV420;

    param.IOPattern   = q->iopattern;
    param.AsyncDepth  = q->async_depth;
    param.ExtParam    = q->ext_buffers;
    param.NumExtParam = q->nb_ext_buffers;

    ret = MFXVideoDECODE_Init(q->session, &param);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder\n");
        return ff_qsv_error(ret);
    }

    return 0;
}
Exemple #8
0
static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
                      AVFrame *frame, int *got_frame,
                      AVPacket *avpkt)
{
    QSVFrame *out_frame;
    mfxFrameSurface1 *insurf;
    mfxFrameSurface1 *outsurf;
    mfxSyncPoint *sync;
    mfxBitstream bs = { { { 0 } } };
    int ret;

    if (avpkt->size) {
        bs.Data       = avpkt->data;
        bs.DataLength = avpkt->size;
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    }

    sync = av_mallocz(sizeof(*sync));
    if (!sync) {
        av_freep(&sync);
        return AVERROR(ENOMEM);
    }

    do {
        ret = get_surface(avctx, q, &insurf);
        if (ret < 0)
            return ret;

        ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
                                              insurf, &outsurf, sync);
        if (ret == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);

    } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);

    if (ret != MFX_ERR_NONE &&
        ret != MFX_ERR_MORE_DATA &&
        ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
        ret != MFX_ERR_MORE_SURFACE) {
        av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n");
        av_freep(&sync);
        return ff_qsv_error(ret);
    }

    /* make sure we do not enter an infinite loop if the SDK
     * did not consume any data and did not return anything */
    if (!*sync && !bs.DataOffset) {
        av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n");
        bs.DataOffset = avpkt->size;
    }

    if (*sync) {
        QSVFrame *out_frame = find_frame(q, outsurf);

        if (!out_frame) {
            av_log(avctx, AV_LOG_ERROR,
                   "The returned surface does not correspond to any frame\n");
            av_freep(&sync);
            return AVERROR_BUG;
        }

        out_frame->queued = 1;
        av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
        av_fifo_generic_write(q->async_fifo, &sync,      sizeof(sync),      NULL);
    } else {
        av_freep(&sync);
    }

    if (!av_fifo_space(q->async_fifo) ||
        (!avpkt->size && av_fifo_size(q->async_fifo))) {
        AVFrame *src_frame;

        av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,      sizeof(sync),      NULL);
        out_frame->queued = 0;

        do {
            ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
        } while (ret == MFX_WRN_IN_EXECUTION);

        av_freep(&sync);

        src_frame = out_frame->frame;

        ret = av_frame_ref(frame, src_frame);
        if (ret < 0)
            return ret;

        outsurf = out_frame->surface;

#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
        frame->pkt_pts = outsurf->Data.TimeStamp;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
        frame->pts = outsurf->Data.TimeStamp;

        frame->repeat_pict =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
        frame->top_field_first =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
        frame->interlaced_frame =
            !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);

        *got_frame = 1;
    }

    return bs.DataOffset;
}
Exemple #9
0
int ff_qsv_decode(AVCodecContext *avctx, QSVContext *q,
                  AVFrame *frame, int *got_frame,
                  AVPacket *avpkt)
{
    mfxFrameSurface1 *insurf;
    mfxFrameSurface1 *outsurf;
    mfxSyncPoint sync;
    mfxBitstream bs = { { { 0 } } };
    int ret;

    if (avpkt->size) {
        bs.Data       = avpkt->data;
        bs.DataLength = avpkt->size;
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    }

    do {
        ret = get_surface(avctx, q, &insurf);
        if (ret < 0)
            return ret;

        ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
                                              insurf, &outsurf, &sync);
        if (ret == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);

    } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);

    if (ret != MFX_ERR_NONE &&
        ret != MFX_ERR_MORE_DATA &&
        ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
        ret != MFX_ERR_MORE_SURFACE) {
        av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n");
        return ff_qsv_error(ret);
    }

    if (sync) {
        AVFrame *src_frame;

        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        src_frame = find_frame(q, outsurf);
        if (!src_frame) {
            av_log(avctx, AV_LOG_ERROR,
                   "The returned surface does not correspond to any frame\n");
            return AVERROR_BUG;
        }

        ret = av_frame_ref(frame, src_frame);
        if (ret < 0)
            return ret;

        frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp;

        frame->repeat_pict =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
        frame->top_field_first =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
        frame->interlaced_frame =
            !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);

        *got_frame = 1;
    }

    return bs.DataOffset;
}
Exemple #10
0
int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
{
    int opaque_alloc = 0;
    int ret;

    q->param.IOPattern  = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
    q->param.AsyncDepth = q->async_depth;

    q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
                                  (sizeof(AVPacket) + sizeof(mfxSyncPoint) + sizeof(mfxBitstream*)));
    if (!q->async_fifo)
        return AVERROR(ENOMEM);

    if (avctx->hwaccel_context) {
        AVQSVContext *qsv = avctx->hwaccel_context;

        q->session         = qsv->session;
        q->param.IOPattern = qsv->iopattern;

        opaque_alloc = qsv->opaque_alloc;
    }

    if (!q->session) {
        ret = ff_qsv_init_internal_session(avctx, &q->internal_session,
                                           q->load_plugins);
        if (ret < 0)
            return ret;

        q->session = q->internal_session;
    }

    ret = init_video_param(avctx, q);
    if (ret < 0)
        return ret;

    ret = MFXVideoENCODE_QueryIOSurf(q->session, &q->param, &q->req);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error querying the encoding parameters\n");
        return ff_qsv_error(ret);
    }

    if (opaque_alloc) {
        ret = qsv_init_opaque_alloc(avctx, q);
        if (ret < 0)
            return ret;
    }

    if (avctx->hwaccel_context) {
        AVQSVContext *qsv = avctx->hwaccel_context;
        int i, j;

        q->extparam = av_mallocz_array(qsv->nb_ext_buffers + q->nb_extparam_internal,
                                       sizeof(*q->extparam));
        if (!q->extparam)
            return AVERROR(ENOMEM);

        q->param.ExtParam = q->extparam;
        for (i = 0; i < qsv->nb_ext_buffers; i++)
            q->param.ExtParam[i] = qsv->ext_buffers[i];
        q->param.NumExtParam = qsv->nb_ext_buffers;

        for (i = 0; i < q->nb_extparam_internal; i++) {
            for (j = 0; j < qsv->nb_ext_buffers; j++) {
                if (qsv->ext_buffers[j]->BufferId == q->extparam_internal[i]->BufferId)
                    break;
            }
            if (j < qsv->nb_ext_buffers)
                continue;

            q->param.ExtParam[q->param.NumExtParam++] = q->extparam_internal[i];
        }
    } else {
        q->param.ExtParam    = q->extparam_internal;
        q->param.NumExtParam = q->nb_extparam_internal;
    }

    ret = MFXVideoENCODE_Init(q->session, &q->param);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing the encoder\n");
        return ff_qsv_error(ret);
    }

    ret = qsv_retrieve_enc_params(avctx, q);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error retrieving encoding parameters.\n");
        return ret;
    }

    q->avctx = avctx;

    return 0;
}
Exemple #11
0
static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q)
{
    uint8_t sps_buf[128];
    uint8_t pps_buf[128];

    mfxExtCodingOptionSPSPPS extradata = {
        .Header.BufferId = MFX_EXTBUFF_CODING_OPTION_SPSPPS,
        .Header.BufferSz = sizeof(extradata),
        .SPSBuffer = sps_buf, .SPSBufSize = sizeof(sps_buf),
        .PPSBuffer = pps_buf, .PPSBufSize = sizeof(pps_buf)
    };

    mfxExtBuffer *ext_buffers[] = {
        (mfxExtBuffer*)&extradata,
    };

    int need_pps = avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO;
    int ret;

    q->param.ExtParam    = ext_buffers;
    q->param.NumExtParam = FF_ARRAY_ELEMS(ext_buffers);

    ret = MFXVideoENCODE_GetVideoParam(q->session, &q->param);
    if (ret < 0)
        return ff_qsv_error(ret);

    q->packet_size = q->param.mfx.BufferSizeInKB * 1000;

    if (!extradata.SPSBufSize || (need_pps && !extradata.PPSBufSize)) {
        av_log(avctx, AV_LOG_ERROR, "No extradata returned from libmfx.\n");
        return AVERROR_UNKNOWN;
    }

    avctx->extradata = av_malloc(extradata.SPSBufSize + need_pps * extradata.PPSBufSize +
                                 AV_INPUT_BUFFER_PADDING_SIZE);
    if (!avctx->extradata)
        return AVERROR(ENOMEM);

    memcpy(avctx->extradata,                        sps_buf, extradata.SPSBufSize);
    if (need_pps)
        memcpy(avctx->extradata + extradata.SPSBufSize, pps_buf, extradata.PPSBufSize);
    avctx->extradata_size = extradata.SPSBufSize + need_pps * extradata.PPSBufSize;
    memset(avctx->extradata + avctx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    return 0;
}

static int qsv_init_opaque_alloc(AVCodecContext *avctx, QSVEncContext *q)
{
    AVQSVContext *qsv = avctx->hwaccel_context;
    mfxFrameSurface1 *surfaces;
    int nb_surfaces, i;

    nb_surfaces = qsv->nb_opaque_surfaces + q->req.NumFrameSuggested + q->async_depth;

    q->opaque_alloc_buf = av_buffer_allocz(sizeof(*surfaces) * nb_surfaces);
    if (!q->opaque_alloc_buf)
        return AVERROR(ENOMEM);

    q->opaque_surfaces = av_malloc_array(nb_surfaces, sizeof(*q->opaque_surfaces));
    if (!q->opaque_surfaces)
        return AVERROR(ENOMEM);

    surfaces = (mfxFrameSurface1*)q->opaque_alloc_buf->data;
    for (i = 0; i < nb_surfaces; i++) {
        surfaces[i].Info      = q->req.Info;
        q->opaque_surfaces[i] = surfaces + i;
    }

    q->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
    q->opaque_alloc.Header.BufferSz = sizeof(q->opaque_alloc);
    q->opaque_alloc.In.Surfaces     = q->opaque_surfaces;
    q->opaque_alloc.In.NumSurface   = nb_surfaces;
    q->opaque_alloc.In.Type         = q->req.Type;

    q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->opaque_alloc;

    qsv->nb_opaque_surfaces = nb_surfaces;
    qsv->opaque_surfaces    = q->opaque_alloc_buf;
    qsv->opaque_alloc_type  = q->req.Type;

    return 0;
}
Exemple #12
0
static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q)
{
    uint8_t sps_buf[128];
    uint8_t pps_buf[128];

    mfxExtCodingOptionSPSPPS extradata = {
        .Header.BufferId = MFX_EXTBUFF_CODING_OPTION_SPSPPS,
        .Header.BufferSz = sizeof(extradata),
        .SPSBuffer = sps_buf, .SPSBufSize = sizeof(sps_buf),
        .PPSBuffer = pps_buf, .PPSBufSize = sizeof(pps_buf)
    };

    mfxExtBuffer *ext_buffers[] = {
        (mfxExtBuffer*)&extradata,
    };

    int ret;

    q->param.ExtParam    = ext_buffers;
    q->param.NumExtParam = FF_ARRAY_ELEMS(ext_buffers);

    ret = MFXVideoENCODE_GetVideoParam(q->session, &q->param);
    if (ret < 0)
        return ff_qsv_error(ret);

    q->packet_size = q->param.mfx.BufferSizeInKB * 1000;

    if (!extradata.SPSBufSize || !extradata.PPSBufSize) {
        av_log(avctx, AV_LOG_ERROR, "No extradata returned from libmfx.\n");
        return AVERROR_UNKNOWN;
    }

    avctx->extradata = av_malloc(extradata.SPSBufSize + extradata.PPSBufSize +
                                 FF_INPUT_BUFFER_PADDING_SIZE);
    if (!avctx->extradata)
        return AVERROR(ENOMEM);

    memcpy(avctx->extradata,                        sps_buf, extradata.SPSBufSize);
    memcpy(avctx->extradata + extradata.SPSBufSize, pps_buf, extradata.PPSBufSize);
    avctx->extradata_size = extradata.SPSBufSize + extradata.PPSBufSize;
    memset(avctx->extradata + avctx->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    return 0;
}

int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
{
    int ret;

    q->param.IOPattern  = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
    q->param.AsyncDepth = q->async_depth;

    if (avctx->hwaccel_context) {
        AVQSVContext *qsv = avctx->hwaccel_context;

        q->session         = qsv->session;
        q->param.IOPattern = qsv->iopattern;
    }

    if (!q->session) {
        ret = ff_qsv_init_internal_session(avctx, &q->internal_session);
        if (ret < 0)
            return ret;

        q->session = q->internal_session;
    }

    ret = init_video_param(avctx, q);
    if (ret < 0)
        return ret;

    ret = MFXVideoENCODE_QueryIOSurf(q->session, &q->param, &q->req);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error querying the encoding parameters\n");
        return ff_qsv_error(ret);
    }

    ret = MFXVideoENCODE_Init(q->session, &q->param);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing the encoder\n");
        return ff_qsv_error(ret);
    }

    ret = qsv_retrieve_enc_params(avctx, q);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error retrieving encoding parameters.\n");
        return ret;
    }

    avctx->coded_frame = av_frame_alloc();
    if (!avctx->coded_frame)
        return AVERROR(ENOMEM);

    q->avctx = avctx;

    return 0;
}
int ff_qsv_decode_init(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt)
{
    mfxVideoParam param = { { 0 } };
    mfxBitstream bs   = { { { 0 } } };
    int ret;
    enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
                                       AV_PIX_FMT_NV12,
                                       AV_PIX_FMT_NONE };

    q->iopattern  = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
    if (!q->session) {
        if (avctx->hwaccel_context) {
            AVQSVContext *qsv = avctx->hwaccel_context;

            q->session        = qsv->session;
            q->iopattern      = qsv->iopattern;
            q->ext_buffers    = qsv->ext_buffers;
            q->nb_ext_buffers = qsv->nb_ext_buffers;
        }
        if (!q->session) {
            ret = ff_qsv_init_internal_session(avctx, &q->internal_qs,
                                               q->load_plugins);
            if (ret < 0)
                return ret;

            q->session = q->internal_qs.session;
        }
    }

    if (avpkt->size) {
        bs.Data       = avpkt->data;
        bs.DataLength = avpkt->size;
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    } else
        return AVERROR_INVALIDDATA;

    ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Unsupported codec_id %08x\n", avctx->codec_id);
        return ret;
    }

    param.mfx.CodecId = ret;

    ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, &param);
    if (MFX_ERR_MORE_DATA==ret) {
        /* this code means that header not found so we return packet size to skip
           a current packet
         */
        return avpkt->size;
    } else if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Decode header error %d\n", ret);
        return ff_qsv_error(ret);
    }
    param.IOPattern   = q->iopattern;
    param.AsyncDepth  = q->async_depth;
    param.ExtParam    = q->ext_buffers;
    param.NumExtParam = q->nb_ext_buffers;
    param.mfx.FrameInfo.BitDepthLuma   = 8;
    param.mfx.FrameInfo.BitDepthChroma = 8;

    ret = MFXVideoDECODE_Init(q->session, &param);
    if (ret < 0) {
        if (MFX_ERR_INVALID_VIDEO_PARAM==ret) {
            av_log(avctx, AV_LOG_ERROR,
                   "Error initializing the MFX video decoder, unsupported video\n");
        } else {
            av_log(avctx, AV_LOG_ERROR,
                   "Error initializing the MFX video decoder %d\n", ret);
        }
        return ff_qsv_error(ret);
    }

    ret = ff_get_format(avctx, pix_fmts);
    if (ret < 0)
        return ret;

    avctx->pix_fmt      = ret;
    avctx->profile      = param.mfx.CodecProfile;
    avctx->level        = param.mfx.CodecLevel;
    avctx->coded_width  = param.mfx.FrameInfo.Width;
    avctx->coded_height = param.mfx.FrameInfo.Height;
    avctx->width        = param.mfx.FrameInfo.CropW - param.mfx.FrameInfo.CropX;
    avctx->height       = param.mfx.FrameInfo.CropH - param.mfx.FrameInfo.CropY;

    /* maximum decoder latency should be not exceed max DPB size for h.264 and
       HEVC which is 16 for both cases.
       So weare  pre-allocating fifo big enough for 17 elements:
     */
    if (!q->async_fifo) {
        q->async_fifo = av_fifo_alloc((1 + 16) *
                                      (sizeof(mfxSyncPoint) + sizeof(QSVFrame*)));
        if (!q->async_fifo)
            return AVERROR(ENOMEM);
    }

    q->input_fifo = av_fifo_alloc(1024*16);
    if (!q->input_fifo)
        return AVERROR(ENOMEM);

    q->engine_ready = 1;

    return 0;
}
int ff_qsv_decode(AVCodecContext *avctx, QSVContext *q,
                  AVFrame *frame, int *got_frame,
                  AVPacket *avpkt)
{
    QSVFrame *out_frame;
    mfxFrameSurface1 *insurf;
    mfxFrameSurface1 *outsurf;
    mfxSyncPoint sync;
    mfxBitstream bs = { { { 0 } } };
    int ret;
    int n_out_frames;
    int buffered = 0;

    if (!q->engine_ready) {
        ret = ff_qsv_decode_init(avctx, q, avpkt);
        if (ret)
            return ret;
    }

    if (avpkt->size ) {
        if (av_fifo_size(q->input_fifo)) {
            /* we have got rest of previous packet into buffer */
            if (av_fifo_space(q->input_fifo) < avpkt->size) {
                ret = av_fifo_grow(q->input_fifo, avpkt->size);
                if (ret < 0)
                    return ret;
            }
            av_fifo_generic_write(q->input_fifo, avpkt->data, avpkt->size, NULL);
            bs.Data       = q->input_fifo->rptr;
            bs.DataLength = av_fifo_size(q->input_fifo);
            buffered = 1;
        } else {
            bs.Data       = avpkt->data;
            bs.DataLength = avpkt->size;
        }
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    }

    while (1) {
        ret = get_surface(avctx, q, &insurf);
        if (ret < 0)
            return ret;
        do {
            ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
                                                  insurf, &outsurf, &sync);
            if (ret != MFX_WRN_DEVICE_BUSY)
                break;
            av_usleep(500);
        } while (1);

        if (MFX_WRN_VIDEO_PARAM_CHANGED==ret) {
            /* TODO: handle here sequence header changing */
        }

        if (sync) {
            QSVFrame *out_frame = find_frame(q, outsurf);

            if (!out_frame) {
                av_log(avctx, AV_LOG_ERROR,
                       "The returned surface does not correspond to any frame\n");
                return AVERROR_BUG;
            }

            out_frame->queued = 1;
            av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
            av_fifo_generic_write(q->async_fifo, &sync,      sizeof(sync),      NULL);

            continue;
        }
        if (MFX_ERR_MORE_SURFACE != ret && ret < 0)
            break;
    }

    /* make sure we do not enter an infinite loop if the SDK
     * did not consume any data and did not return anything */
    if (!sync && !bs.DataOffset) {
        av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n");
        bs.DataOffset = avpkt->size;
    }

    if (buffered) {
        qsv_fifo_relocate(q->input_fifo, bs.DataOffset);
    } else if (bs.DataOffset!=avpkt->size) {
        /* some data of packet was not consumed. store it to local buffer */
        av_fifo_generic_write(q->input_fifo, avpkt->data+bs.DataOffset,
                              avpkt->size - bs.DataOffset, NULL);
    }

    if (MFX_ERR_MORE_DATA!=ret && ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error %d during QSV decoding.\n", ret);
        return ff_qsv_error(ret);
    }
    n_out_frames = av_fifo_size(q->async_fifo) / (sizeof(out_frame)+sizeof(sync));

    if (n_out_frames > q->async_depth || (!avpkt->size && n_out_frames) ) {
        AVFrame *src_frame;

        av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,      sizeof(sync),      NULL);
        out_frame->queued = 0;

        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        src_frame = out_frame->frame;

        ret = av_frame_ref(frame, src_frame);
        if (ret < 0)
            return ret;

        outsurf = out_frame->surface;

        frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp;

        frame->repeat_pict =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
        frame->top_field_first =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
        frame->interlaced_frame =
            !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);

        *got_frame = 1;
    }

    return avpkt->size;
}
Exemple #15
0
int ff_qsv_decode_init(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt)
{
    mfxVideoParam param = { { 0 } };
    mfxBitstream bs   = { { { 0 } } };
    int ret;

    q->iopattern  = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
    if (!q->session) {
        if (avctx->hwaccel_context) {
            AVQSVContext *qsv = avctx->hwaccel_context;

            q->session        = qsv->session;
            q->iopattern      = qsv->iopattern;
            q->ext_buffers    = qsv->ext_buffers;
            q->nb_ext_buffers = qsv->nb_ext_buffers;
        }
        if (!q->session) {
            ret = ff_qsv_init_internal_session(avctx, &q->internal_qs, NULL);
            if (ret < 0)
                return ret;

            q->session = q->internal_qs.session;
        }
    }

    if (avpkt->size) {
        bs.Data       = avpkt->data;
        bs.DataLength = avpkt->size;
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    } else
        return AVERROR_INVALIDDATA;

    ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Unsupported codec_id %08x\n", avctx->codec_id);
        return ret;
    }

    param.mfx.CodecId = ret;

    ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, &param);
    if (MFX_ERR_MORE_DATA==ret) {
        return AVERROR(EAGAIN);
    } else if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Decode header error %d\n", ret);
        return ff_qsv_error(ret);
    }
    param.IOPattern   = q->iopattern;
    param.AsyncDepth  = q->async_depth;
    param.ExtParam    = q->ext_buffers;
    param.NumExtParam = q->nb_ext_buffers;
    param.mfx.FrameInfo.BitDepthLuma   = 8;
    param.mfx.FrameInfo.BitDepthChroma = 8;

    ret = MFXVideoDECODE_Init(q->session, &param);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder\n");
        return ff_qsv_error(ret);
    }

    avctx->pix_fmt      = AV_PIX_FMT_NV12;
    avctx->profile      = param.mfx.CodecProfile;
    avctx->level        = param.mfx.CodecLevel;
    avctx->coded_width  = param.mfx.FrameInfo.Width;
    avctx->coded_height = param.mfx.FrameInfo.Height;
    avctx->width        = param.mfx.FrameInfo.CropW - param.mfx.FrameInfo.CropX;
    avctx->height       = param.mfx.FrameInfo.CropH - param.mfx.FrameInfo.CropY;

    q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
                                  (sizeof(mfxSyncPoint) + sizeof(QSVFrame*)));
    if (!q->async_fifo)
        return AVERROR(ENOMEM);


    return 0;
}