コード例 #1
0
ファイル: vaapi_vp8.c プロジェクト: DeHackEd/FFmpeg
static VASurfaceID vaapi_vp8_surface_id(VP8Frame *vf)
{
    if (vf)
        return ff_vaapi_get_surface_id(vf->tf.f);
    else
        return VA_INVALID_SURFACE;
}
コード例 #2
0
/**
 * Append picture to the decoded picture buffer, in a VA API form that
 * merges the second field picture attributes with the first, if
 * available.  The decoded picture buffer's size must be large enough
 * to receive the new VA API picture object.
 */
static int dpb_add(DPB *dpb, const H264Picture *pic)
{
    int i;

    if (dpb->size >= dpb->max_size)
        return -1;

    for (i = 0; i < dpb->size; i++) {
        VAPictureH264 * const va_pic = &dpb->va_pics[i];
        if (va_pic->picture_id == ff_vaapi_get_surface_id(pic->f)) {
            VAPictureH264 temp_va_pic;
            fill_vaapi_pic(&temp_va_pic, pic, 0);

            if ((temp_va_pic.flags ^ va_pic->flags) & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) {
                va_pic->flags |= temp_va_pic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD);
                /* Merge second field */
                if (temp_va_pic.flags & VA_PICTURE_H264_TOP_FIELD) {
                    va_pic->TopFieldOrderCnt    = temp_va_pic.TopFieldOrderCnt;
                } else {
                    va_pic->BottomFieldOrderCnt = temp_va_pic.BottomFieldOrderCnt;
                }
            }
            return 0;
        }
    }

    fill_vaapi_pic(&dpb->va_pics[dpb->size++], pic, 0);
    return 0;
}
コード例 #3
0
ファイル: vaapi.c プロジェクト: Tonyton/gstreamer-ducati
int ff_vaapi_common_end_frame(MpegEncContext *s)
{
    struct vaapi_context * const vactx = s->avctx->hwaccel_context;
    int ret = -1;

    av_dlog(s->avctx, "ff_vaapi_common_end_frame()\n");

    if (commit_slices(vactx) < 0)
        goto done;
    if (vactx->n_slice_buf_ids > 0) {
        if (render_picture(vactx, ff_vaapi_get_surface_id(s->current_picture_ptr)) < 0)
            goto done;
        ff_draw_horiz_band(s, 0, s->avctx->height);
    }
    ret = 0;

done:
    destroy_buffers(vactx->display, &vactx->pic_param_buf_id, 1);
    destroy_buffers(vactx->display, &vactx->iq_matrix_buf_id, 1);
    destroy_buffers(vactx->display, &vactx->bitplane_buf_id, 1);
    destroy_buffers(vactx->display, vactx->slice_buf_ids, vactx->n_slice_buf_ids);
    av_freep(&vactx->slice_buf_ids);
    av_freep(&vactx->slice_params);
    vactx->n_slice_buf_ids     = 0;
    vactx->slice_buf_ids_alloc = 0;
    vactx->slice_count         = 0;
    vactx->slice_params_alloc  = 0;
    return ret;
}
コード例 #4
0
/**
 * Translate an Libav Picture into its VA API form.
 *
 * @param[out] va_pic          A pointer to VA API's own picture struct
 * @param[in]  pic             A pointer to the Libav picture struct to convert
 * @param[in]  pic_structure   The picture field type (as defined in mpegvideo.h),
 *                             supersedes pic's field type if nonzero.
 */
static void fill_vaapi_pic(VAPictureH264 *va_pic,
                           const H264Picture *pic,
                           int            pic_structure)
{
    if (pic_structure == 0)
        pic_structure = pic->reference;
    pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */

    va_pic->picture_id = ff_vaapi_get_surface_id(pic->f);
    va_pic->frame_idx  = pic->long_ref ? pic->pic_id : pic->frame_num;

    va_pic->flags      = 0;
    if (pic_structure != PICT_FRAME)
        va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ? VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD;
    if (pic->reference)
        va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE;

    va_pic->TopFieldOrderCnt = 0;
    if (pic->field_poc[0] != INT_MAX)
        va_pic->TopFieldOrderCnt = pic->field_poc[0];

    va_pic->BottomFieldOrderCnt = 0;
    if (pic->field_poc[1] != INT_MAX)
        va_pic->BottomFieldOrderCnt = pic->field_poc[1];
}
コード例 #5
0
/** Initialize and start decoding a frame with VA API. */
static int vaapi_h264_start_frame(AVCodecContext          *avctx,
                                  av_unused const uint8_t *buffer,
                                  av_unused uint32_t       size)
{
    const H264Context *h = avctx->priv_data;
    VAAPIDecodePicture *pic = h->cur_pic_ptr->hwaccel_picture_private;
    const PPS *pps = h->ps.pps;
    const SPS *sps = h->ps.sps;
    VAPictureParameterBufferH264 pic_param;
    VAIQMatrixBufferH264 iq_matrix;
    int err;

    pic->output_surface = ff_vaapi_get_surface_id(h->cur_pic_ptr->f);

    pic_param = (VAPictureParameterBufferH264) {
        .picture_width_in_mbs_minus1                = h->mb_width - 1,
        .picture_height_in_mbs_minus1               = h->mb_height - 1,
        .bit_depth_luma_minus8                      = sps->bit_depth_luma - 8,
        .bit_depth_chroma_minus8                    = sps->bit_depth_chroma - 8,
        .num_ref_frames                             = sps->ref_frame_count,
        .seq_fields.bits = {
            .chroma_format_idc                      = sps->chroma_format_idc,
            .residual_colour_transform_flag         = sps->residual_color_transform_flag,
            .gaps_in_frame_num_value_allowed_flag   = sps->gaps_in_frame_num_allowed_flag,
            .frame_mbs_only_flag                    = sps->frame_mbs_only_flag,
            .mb_adaptive_frame_field_flag           = sps->mb_aff,
            .direct_8x8_inference_flag              = sps->direct_8x8_inference_flag,
            .MinLumaBiPredSize8x8                   = sps->level_idc >= 31, /* A.3.3.2 */
            .log2_max_frame_num_minus4              = sps->log2_max_frame_num - 4,
            .pic_order_cnt_type                     = sps->poc_type,
            .log2_max_pic_order_cnt_lsb_minus4      = sps->log2_max_poc_lsb - 4,
            .delta_pic_order_always_zero_flag       = sps->delta_pic_order_always_zero_flag,
        },
        .num_slice_groups_minus1                    = pps->slice_group_count - 1,
        .slice_group_map_type                       = pps->mb_slice_group_map_type,
        .slice_group_change_rate_minus1             = 0, /* FMO is not implemented */
        .pic_init_qp_minus26                        = pps->init_qp - 26,
        .pic_init_qs_minus26                        = pps->init_qs - 26,
        .chroma_qp_index_offset                     = pps->chroma_qp_index_offset[0],
        .second_chroma_qp_index_offset              = pps->chroma_qp_index_offset[1],
        .pic_fields.bits = {
            .entropy_coding_mode_flag               = pps->cabac,
            .weighted_pred_flag                     = pps->weighted_pred,
            .weighted_bipred_idc                    = pps->weighted_bipred_idc,
            .transform_8x8_mode_flag                = pps->transform_8x8_mode,
            .field_pic_flag                         = h->picture_structure != PICT_FRAME,
            .constrained_intra_pred_flag            = pps->constrained_intra_pred,
            .pic_order_present_flag                 = pps->pic_order_present,
            .deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present,
            .redundant_pic_cnt_present_flag         = pps->redundant_pic_cnt_present,
            .reference_pic_flag                     = h->nal_ref_idc != 0,
        },
        .frame_num                                  = h->poc.frame_num,
コード例 #6
0
ファイル: vaapi_hevc.c プロジェクト: 0day-ci/FFmpeg
static int find_frame_rps_type(const HEVCContext *h, const HEVCFrame *pic)
{
    VASurfaceID pic_surf = ff_vaapi_get_surface_id(pic->frame);
    int i;

    for (i = 0; i < h->rps[ST_CURR_BEF].nb_refs; ++i) {
        if (pic_surf == ff_vaapi_get_surface_id(h->rps[ST_CURR_BEF].ref[i]->frame))
            return VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE;
    }

    for (i = 0; i < h->rps[ST_CURR_AFT].nb_refs; ++i) {
        if (pic_surf == ff_vaapi_get_surface_id(h->rps[ST_CURR_AFT].ref[i]->frame))
            return VA_PICTURE_HEVC_RPS_ST_CURR_AFTER;
    }

    for (i = 0; i < h->rps[LT_CURR].nb_refs; ++i) {
        if (pic_surf == ff_vaapi_get_surface_id(h->rps[LT_CURR].ref[i]->frame))
            return VA_PICTURE_HEVC_RPS_LT_CURR;
    }

    return 0;
}
コード例 #7
0
ファイル: vaapi_hevc.c プロジェクト: Rodeo314/tim-libav
static void fill_vaapi_pic(VAPictureHEVC *va_pic, const HEVCFrame *pic, int rps_type)
{
    va_pic->picture_id    = ff_vaapi_get_surface_id(pic->frame);
    va_pic->pic_order_cnt = pic->poc;
    va_pic->flags         = rps_type;

    if (pic->flags & HEVC_FRAME_FLAG_LONG_REF)
        va_pic->flags |= VA_PICTURE_HEVC_LONG_TERM_REFERENCE;

    if (pic->frame->interlaced_frame) {
        va_pic->flags |= VA_PICTURE_HEVC_FIELD_PIC;

        if (!pic->frame->top_field_first)
            va_pic->flags |= VA_PICTURE_HEVC_BOTTOM_FIELD;
    }
}
コード例 #8
0
ファイル: vaapi_hevc.c プロジェクト: 0day-ci/FFmpeg
static uint8_t get_ref_pic_index(const HEVCContext *h, const HEVCFrame *frame)
{
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    VAPictureParameterBufferHEVC *pp = frame_data->pic_param;
    uint8_t i;

    if (!frame)
        return 0xff;

    for (i = 0; i < FF_ARRAY_ELEMS(pp->ReferenceFrames); ++i) {
        VASurfaceID pid = pp->ReferenceFrames[i].picture_id;
        int poc = pp->ReferenceFrames[i].pic_order_cnt;
        if (pid != VA_INVALID_ID && pid == ff_vaapi_get_surface_id(frame->frame) && poc == frame->poc)
            return i;
    }

    return 0xff;
}
コード例 #9
0
ファイル: vaapi_vp9.c プロジェクト: KangLin/FFmpeg
static int vaapi_vp9_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    const VP9SharedContext *h = avctx->priv_data;
    int ret;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->frames[CUR_FRAME].tf.f));
    if (ret < 0)
        goto finish;

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
コード例 #10
0
ファイル: vaapi.c プロジェクト: AndiDog/FFmpeg
int ff_vaapi_mpeg_end_frame(AVCodecContext *avctx)
{
    struct vaapi_context * const vactx = avctx->hwaccel_context;
    MpegEncContext *s = avctx->priv_data;
    int ret;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx,
                                  ff_vaapi_get_surface_id(s->current_picture_ptr));
    if (ret < 0)
        goto finish;

    ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
コード例 #11
0
ファイル: vaapi_h264.c プロジェクト: DonDiego/libav
/** End a hardware decoding based frame. */
static int vaapi_h264_end_frame(AVCodecContext *avctx)
{
    struct vaapi_context * const vactx = avctx->hwaccel_context;
    H264Context * const h = avctx->priv_data;
    int ret;

    av_dlog(avctx, "vaapi_h264_end_frame()\n");
    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->cur_pic_ptr));
    if (ret < 0)
        goto finish;

    ff_h264_draw_horiz_band(h, 0, h->avctx->height);

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
コード例 #12
0
ファイル: vaapi_h264.c プロジェクト: 15806905685/FFmpeg
/** End a hardware decoding based frame. */
static int vaapi_h264_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    H264Context * const h = avctx->priv_data;
    H264SliceContext *sl = &h->slice_ctx[0];
    int ret;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->cur_pic_ptr->f));
    if (ret < 0)
        goto finish;

    ff_h264_draw_horiz_band(h, sl, 0, h->avctx->height);

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
コード例 #13
0
ファイル: vaapi_mpeg4.c プロジェクト: elnormous/libav
static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
    Mpeg4DecContext *ctx = avctx->priv_data;
    MpegEncContext *s = &ctx->m;
    VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
    VAPictureParameterBufferMPEG4 pic_param;
    int i, err;

    pic->output_surface = ff_vaapi_get_surface_id(s->current_picture_ptr->f);

    pic_param = (VAPictureParameterBufferMPEG4) {
        .vop_width                        = s->width,
        .vop_height                       = s->height,
        .forward_reference_picture        = VA_INVALID_ID,
        .backward_reference_picture       = VA_INVALID_ID,
        .vol_fields.bits = {
            .short_video_header           = avctx->codec->id == AV_CODEC_ID_H263,
            .chroma_format                = CHROMA_420,
            .interlaced                   = !s->progressive_sequence,
            .obmc_disable                 = 1,
            .sprite_enable                = ctx->vol_sprite_usage,
            .sprite_warping_accuracy      = s->sprite_warping_accuracy,
            .quant_type                   = s->mpeg_quant,
            .quarter_sample               = s->quarter_sample,
            .data_partitioned             = s->data_partitioning,
            .reversible_vlc               = ctx->rvlc,
            .resync_marker_disable        = !ctx->resync_marker,
        },
        .no_of_sprite_warping_points      = ctx->num_sprite_warping_points,
        .quant_precision                  = s->quant_precision,
        .vop_fields.bits = {
            .vop_coding_type              = s->pict_type - AV_PICTURE_TYPE_I,
            .backward_reference_vop_coding_type =
                s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f->pict_type - AV_PICTURE_TYPE_I : 0,
            .vop_rounding_type            = s->no_rounding,
            .intra_dc_vlc_thr             = mpeg4_get_intra_dc_vlc_thr(ctx),
            .top_field_first              = s->top_field_first,
            .alternate_vertical_scan_flag = s->alternate_scan,
        },
        .vop_fcode_forward                = s->f_code,
コード例 #14
0
ファイル: vaapi_hevc.c プロジェクト: 0day-ci/FFmpeg
/** End a hardware decoding based frame. */
static int vaapi_hevc_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    HEVCContext * const h = avctx->priv_data;
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    int ret;

    ff_dlog(avctx, "vaapi_hevc_end_frame()\n");

    frame_data->last_slice_param->LongSliceFlags.fields.LastSliceOfPic = 1;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->ref->frame));
    if (ret < 0)
        goto finish;

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
コード例 #15
0
ファイル: vaapi_mpeg2.c プロジェクト: BOTCrusher/sagetv
static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
    struct MpegEncContext * const s = avctx->priv_data;
    struct vaapi_context * const vactx = avctx->hwaccel_context;
    VAPictureParameterBufferMPEG2 *pic_param;
    VAIQMatrixBufferMPEG2 *iq_matrix;
    int i;

    dprintf(avctx, "vaapi_mpeg2_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG2);

    /* Fill in VAPictureParameterBufferMPEG2 */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG2));
    if (!pic_param)
        return -1;
    pic_param->horizontal_size                                  = s->width;
    pic_param->vertical_size                                    = s->height;
    pic_param->forward_reference_picture                        = VA_INVALID_ID;
    pic_param->backward_reference_picture                       = VA_INVALID_ID;
    pic_param->picture_coding_type                              = s->pict_type;
    pic_param->f_code                                           = mpeg2_get_f_code(s);
    pic_param->picture_coding_extension.value                   = 0; /* reset all bits */
    pic_param->picture_coding_extension.bits.intra_dc_precision = s->intra_dc_precision;
    pic_param->picture_coding_extension.bits.picture_structure  = s->picture_structure;
    pic_param->picture_coding_extension.bits.top_field_first    = s->top_field_first;
    pic_param->picture_coding_extension.bits.frame_pred_frame_dct = s->frame_pred_frame_dct;
    pic_param->picture_coding_extension.bits.concealment_motion_vectors = s->concealment_motion_vectors;
    pic_param->picture_coding_extension.bits.q_scale_type       = s->q_scale_type;
    pic_param->picture_coding_extension.bits.intra_vlc_format   = s->intra_vlc_format;
    pic_param->picture_coding_extension.bits.alternate_scan     = s->alternate_scan;
    pic_param->picture_coding_extension.bits.repeat_first_field = s->repeat_first_field;
    pic_param->picture_coding_extension.bits.progressive_frame  = s->progressive_frame;
    pic_param->picture_coding_extension.bits.is_first_field     = mpeg2_get_is_frame_start(s);

    switch (s->pict_type) {
    case FF_B_TYPE:
        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture);
        // fall-through
    case FF_P_TYPE:
        pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture);
        break;
    }

    /* Fill in VAIQMatrixBufferMPEG2 */
    iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG2));
    if (!iq_matrix)
        return -1;
    iq_matrix->load_intra_quantiser_matrix              = 1;
    iq_matrix->load_non_intra_quantiser_matrix          = 1;
    iq_matrix->load_chroma_intra_quantiser_matrix       = 1;
    iq_matrix->load_chroma_non_intra_quantiser_matrix   = 1;

    for (i = 0; i < 64; i++) {
        int n = s->dsp.idct_permutation[ff_zigzag_direct[i]];
        iq_matrix->intra_quantiser_matrix[i]            = s->intra_matrix[n];
        iq_matrix->non_intra_quantiser_matrix[i]        = s->inter_matrix[n];
        iq_matrix->chroma_intra_quantiser_matrix[i]     = s->chroma_intra_matrix[n];
        iq_matrix->chroma_non_intra_quantiser_matrix[i] = s->chroma_inter_matrix[n];
    }
    return 0;
}
コード例 #16
0
ファイル: vaapi_vp9.c プロジェクト: KangLin/FFmpeg
static void fill_picture_parameters(AVCodecContext                 *avctx,
                                    const VP9SharedContext         *h,
                                    VADecPictureParameterBufferVP9 *pp)
{
    const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
    int i;

    pp->frame_width = avctx->width;
    pp->frame_height = avctx->height;

    pp->frame_header_length_in_bytes = h->h.uncompressed_header_size;
    pp->first_partition_size = h->h.compressed_header_size;

    pp->profile = h->h.profile;
    pp->bit_depth = h->h.bpp;

    pp->filter_level = h->h.filter.level;
    pp->sharpness_level = h->h.filter.sharpness;
    pp->log2_tile_rows = h->h.tiling.log2_tile_rows;
    pp->log2_tile_columns = h->h.tiling.log2_tile_cols;

    pp->pic_fields.bits.subsampling_x = pixdesc->log2_chroma_w;
    pp->pic_fields.bits.subsampling_y = pixdesc->log2_chroma_h;
    pp->pic_fields.bits.frame_type = !h->h.keyframe;
    pp->pic_fields.bits.show_frame = !h->h.invisible;
    pp->pic_fields.bits.error_resilient_mode = h->h.errorres;
    pp->pic_fields.bits.intra_only = h->h.intraonly;
    pp->pic_fields.bits.allow_high_precision_mv = h->h.keyframe ? 0 : h->h.highprecisionmvs;
    pp->pic_fields.bits.mcomp_filter_type = h->h.filtermode ^ (h->h.filtermode <= 1);
    pp->pic_fields.bits.frame_parallel_decoding_mode = h->h.parallelmode;
    pp->pic_fields.bits.reset_frame_context = h->h.resetctx;
    pp->pic_fields.bits.refresh_frame_context = h->h.refreshctx;
    pp->pic_fields.bits.frame_context_idx = h->h.framectxid;

    pp->pic_fields.bits.segmentation_enabled = h->h.segmentation.enabled;
    pp->pic_fields.bits.segmentation_temporal_update = h->h.segmentation.temporal;
    pp->pic_fields.bits.segmentation_update_map = h->h.segmentation.update_map;

    pp->pic_fields.bits.last_ref_frame = h->h.refidx[0];
    pp->pic_fields.bits.last_ref_frame_sign_bias = h->h.signbias[0];
    pp->pic_fields.bits.golden_ref_frame = h->h.refidx[1];
    pp->pic_fields.bits.golden_ref_frame_sign_bias = h->h.signbias[1];
    pp->pic_fields.bits.alt_ref_frame = h->h.refidx[2];
    pp->pic_fields.bits.alt_ref_frame_sign_bias = h->h.signbias[2];
    pp->pic_fields.bits.lossless_flag = h->h.lossless;

    for (i = 0; i < 7; i++)
        pp->mb_segment_tree_probs[i] = h->h.segmentation.prob[i];

    if (h->h.segmentation.temporal) {
        for (i = 0; i < 3; i++)
            pp->segment_pred_probs[i] = h->h.segmentation.pred_prob[i];
    } else {
        memset(pp->segment_pred_probs, 255, sizeof(pp->segment_pred_probs));
    }

    for (i = 0; i < 8; i++) {
        if (h->refs[i].f->buf[0]) {
            pp->reference_frames[i] = ff_vaapi_get_surface_id(h->refs[i].f);
        } else {
            pp->reference_frames[i] = VA_INVALID_ID;
        }
    }
}
コード例 #17
0
ファイル: vaapi_hevc.c プロジェクト: Rodeo314/tim-libav
static int vaapi_hevc_start_frame(AVCodecContext          *avctx,
                                  av_unused const uint8_t *buffer,
                                  av_unused uint32_t       size)
{
    const HEVCContext        *h = avctx->priv_data;
    VAAPIDecodePictureHEVC *pic = h->ref->hwaccel_picture_private;
    const HEVCSPS          *sps = h->ps.sps;
    const HEVCPPS          *pps = h->ps.pps;

    const ScalingList *scaling_list = NULL;
    int err, i;

    pic->pic.output_surface = ff_vaapi_get_surface_id(h->ref->frame);

    pic->pic_param = (VAPictureParameterBufferHEVC) {
        .pic_fields.value                             = 0,
        .slice_parsing_fields.value                   = 0,
        .pic_width_in_luma_samples                    = sps->width,
        .pic_height_in_luma_samples                   = sps->height,
        .log2_min_luma_coding_block_size_minus3       = sps->log2_min_cb_size - 3,
        .sps_max_dec_pic_buffering_minus1             = sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering - 1,
        .log2_diff_max_min_luma_coding_block_size     = sps->log2_diff_max_min_coding_block_size,
        .log2_min_transform_block_size_minus2         = sps->log2_min_tb_size - 2,
        .log2_diff_max_min_transform_block_size       = sps->log2_max_trafo_size  - sps->log2_min_tb_size,
        .max_transform_hierarchy_depth_inter          = sps->max_transform_hierarchy_depth_inter,
        .max_transform_hierarchy_depth_intra          = sps->max_transform_hierarchy_depth_intra,
        .num_short_term_ref_pic_sets                  = sps->nb_st_rps,
        .num_long_term_ref_pic_sps                    = sps->num_long_term_ref_pics_sps,
        .num_ref_idx_l0_default_active_minus1         = pps->num_ref_idx_l0_default_active - 1,
        .num_ref_idx_l1_default_active_minus1         = pps->num_ref_idx_l1_default_active - 1,
        .init_qp_minus26                              = pps->pic_init_qp_minus26,
        .pps_cb_qp_offset                             = pps->cb_qp_offset,
        .pps_cr_qp_offset                             = pps->cr_qp_offset,
        .pcm_sample_bit_depth_luma_minus1             = sps->pcm.bit_depth - 1,
        .pcm_sample_bit_depth_chroma_minus1           = sps->pcm.bit_depth_chroma - 1,
        .log2_min_pcm_luma_coding_block_size_minus3   = sps->pcm.log2_min_pcm_cb_size - 3,
        .log2_diff_max_min_pcm_luma_coding_block_size = sps->pcm.log2_max_pcm_cb_size - sps->pcm.log2_min_pcm_cb_size,
        .diff_cu_qp_delta_depth                       = pps->diff_cu_qp_delta_depth,
        .pps_beta_offset_div2                         = pps->beta_offset / 2,
        .pps_tc_offset_div2                           = pps->tc_offset / 2,
        .log2_parallel_merge_level_minus2             = pps->log2_parallel_merge_level - 2,
        .bit_depth_luma_minus8                        = sps->bit_depth - 8,
        .bit_depth_chroma_minus8                      = sps->bit_depth - 8,
        .log2_max_pic_order_cnt_lsb_minus4            = sps->log2_max_poc_lsb - 4,
        .num_extra_slice_header_bits                  = pps->num_extra_slice_header_bits,
        .pic_fields.bits = {
            .chroma_format_idc                          = sps->chroma_format_idc,
            .tiles_enabled_flag                         = pps->tiles_enabled_flag,
            .separate_colour_plane_flag                 = sps->separate_colour_plane_flag,
            .pcm_enabled_flag                           = sps->pcm_enabled_flag,
            .scaling_list_enabled_flag                  = sps->scaling_list_enable_flag,
            .transform_skip_enabled_flag                = pps->transform_skip_enabled_flag,
            .amp_enabled_flag                           = sps->amp_enabled_flag,
            .strong_intra_smoothing_enabled_flag        = sps->sps_strong_intra_smoothing_enable_flag,
            .sign_data_hiding_enabled_flag              = pps->sign_data_hiding_flag,
            .constrained_intra_pred_flag                = pps->constrained_intra_pred_flag,
            .cu_qp_delta_enabled_flag                   = pps->cu_qp_delta_enabled_flag,
            .weighted_pred_flag                         = pps->weighted_pred_flag,
            .weighted_bipred_flag                       = pps->weighted_bipred_flag,
            .transquant_bypass_enabled_flag             = pps->transquant_bypass_enable_flag,
            .entropy_coding_sync_enabled_flag           = pps->entropy_coding_sync_enabled_flag,
            .pps_loop_filter_across_slices_enabled_flag = pps->seq_loop_filter_across_slices_enabled_flag,
            .loop_filter_across_tiles_enabled_flag      = pps->loop_filter_across_tiles_enabled_flag,
            .pcm_loop_filter_disabled_flag              = sps->pcm.loop_filter_disable_flag,
        },
        .slice_parsing_fields.bits = {
            .lists_modification_present_flag             = pps->lists_modification_present_flag,
            .long_term_ref_pics_present_flag             = sps->long_term_ref_pics_present_flag,
            .sps_temporal_mvp_enabled_flag               = sps->sps_temporal_mvp_enabled_flag,
            .cabac_init_present_flag                     = pps->cabac_init_present_flag,
            .output_flag_present_flag                    = pps->output_flag_present_flag,
            .dependent_slice_segments_enabled_flag       = pps->dependent_slice_segments_enabled_flag,
            .pps_slice_chroma_qp_offsets_present_flag    = pps->pic_slice_level_chroma_qp_offsets_present_flag,
            .sample_adaptive_offset_enabled_flag         = sps->sao_enabled,
            .deblocking_filter_override_enabled_flag     = pps->deblocking_filter_override_enabled_flag,
            .pps_disable_deblocking_filter_flag          = pps->disable_dbf,
            .slice_segment_header_extension_present_flag = pps->slice_header_extension_present_flag,
            .RapPicFlag                                  = IS_IRAP(h),
            .IdrPicFlag                                  = IS_IDR(h),
            .IntraPicFlag                                = IS_IRAP(h),
        },
    };
コード例 #18
0
ファイル: vaapi_mpeg4.c プロジェクト: HighlightCam/FFmpeg
static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
    MpegEncContext * const s = avctx->priv_data;
    struct vaapi_context * const vactx = avctx->hwaccel_context;
    VAPictureParameterBufferMPEG4 *pic_param;
    VAIQMatrixBufferMPEG4 *iq_matrix;
    int i;

    av_dlog(avctx, "vaapi_mpeg4_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG4);

    /* Fill in VAPictureParameterBufferMPEG4 */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG4));
    if (!pic_param)
        return -1;
    pic_param->vop_width                                = s->width;
    pic_param->vop_height                               = s->height;
    pic_param->forward_reference_picture                = VA_INVALID_ID;
    pic_param->backward_reference_picture               = VA_INVALID_ID;
    pic_param->vol_fields.value                         = 0; /* reset all bits */
    pic_param->vol_fields.bits.short_video_header       = avctx->codec->id == AV_CODEC_ID_H263;
    pic_param->vol_fields.bits.chroma_format            = CHROMA_420;
    pic_param->vol_fields.bits.interlaced               = !s->progressive_sequence;
    pic_param->vol_fields.bits.obmc_disable             = 1;
    pic_param->vol_fields.bits.sprite_enable            = s->vol_sprite_usage;
    pic_param->vol_fields.bits.sprite_warping_accuracy  = s->sprite_warping_accuracy;
    pic_param->vol_fields.bits.quant_type               = s->mpeg_quant;
    pic_param->vol_fields.bits.quarter_sample           = s->quarter_sample;
    pic_param->vol_fields.bits.data_partitioned         = s->data_partitioning;
    pic_param->vol_fields.bits.reversible_vlc           = s->rvlc;
    pic_param->vol_fields.bits.resync_marker_disable    = !s->resync_marker;
    pic_param->no_of_sprite_warping_points              = s->num_sprite_warping_points;
    for (i = 0; i < s->num_sprite_warping_points && i < 3; i++) {
        pic_param->sprite_trajectory_du[i]              = s->sprite_traj[i][0];
        pic_param->sprite_trajectory_dv[i]              = s->sprite_traj[i][1];
    }
    pic_param->quant_precision                          = s->quant_precision;
    pic_param->vop_fields.value                         = 0; /* reset all bits */
    pic_param->vop_fields.bits.vop_coding_type          = s->pict_type - AV_PICTURE_TYPE_I;
    pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f.pict_type - AV_PICTURE_TYPE_I : 0;
    pic_param->vop_fields.bits.vop_rounding_type        = s->no_rounding;
    pic_param->vop_fields.bits.intra_dc_vlc_thr         = mpeg4_get_intra_dc_vlc_thr(s);
    pic_param->vop_fields.bits.top_field_first          = s->top_field_first;
    pic_param->vop_fields.bits.alternate_vertical_scan_flag = s->alternate_scan;
    pic_param->vop_fcode_forward                        = s->f_code;
    pic_param->vop_fcode_backward                       = s->b_code;
    pic_param->vop_time_increment_resolution            = avctx->time_base.den;
    pic_param->num_macroblocks_in_gob                   = s->mb_width * ff_h263_get_gob_height(s);
    pic_param->num_gobs_in_vop                          = (s->mb_width * s->mb_height) / pic_param->num_macroblocks_in_gob;
    pic_param->TRB                                      = s->pb_time;
    pic_param->TRD                                      = s->pp_time;

    if (s->pict_type == AV_PICTURE_TYPE_B)
        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture);
    if (s->pict_type != AV_PICTURE_TYPE_I)
        pic_param->forward_reference_picture  = ff_vaapi_get_surface_id(&s->last_picture);

    /* Fill in VAIQMatrixBufferMPEG4 */
    /* Only the first inverse quantisation method uses the weighting matrices */
    if (pic_param->vol_fields.bits.quant_type) {
        iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG4));
        if (!iq_matrix)
            return -1;
        iq_matrix->load_intra_quant_mat         = 1;
        iq_matrix->load_non_intra_quant_mat     = 1;

        for (i = 0; i < 64; i++) {
            int n = s->dsp.idct_permutation[ff_zigzag_direct[i]];
            iq_matrix->intra_quant_mat[i]       = s->intra_matrix[n];
            iq_matrix->non_intra_quant_mat[i]   = s->inter_matrix[n];
        }
    }
    return 0;
}
コード例 #19
0
ファイル: vaapi_vc1.c プロジェクト: Flameeyes/libav
static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
    VC1Context * const v = avctx->priv_data;
    MpegEncContext * const s = &v->s;
    struct vaapi_context * const vactx = avctx->hwaccel_context;
    VAPictureParameterBufferVC1 *pic_param;

    av_dlog(avctx, "vaapi_vc1_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferVC1);

    /* Fill in VAPictureParameterBufferVC1 */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferVC1));
    if (!pic_param)
        return -1;
    pic_param->forward_reference_picture                            = VA_INVALID_ID;
    pic_param->backward_reference_picture                           = VA_INVALID_ID;
    pic_param->inloop_decoded_picture                               = VA_INVALID_ID;
    pic_param->sequence_fields.value                                = 0; /* reset all bits */
    pic_param->sequence_fields.bits.pulldown                        = v->broadcast;
    pic_param->sequence_fields.bits.interlace                       = v->interlace;
    pic_param->sequence_fields.bits.tfcntrflag                      = v->tfcntrflag;
    pic_param->sequence_fields.bits.finterpflag                     = v->finterpflag;
    pic_param->sequence_fields.bits.psf                             = v->psf;
    pic_param->sequence_fields.bits.multires                        = v->multires;
    pic_param->sequence_fields.bits.overlap                         = v->overlap;
    pic_param->sequence_fields.bits.syncmarker                      = s->resync_marker;
    pic_param->sequence_fields.bits.rangered                        = v->rangered;
    pic_param->sequence_fields.bits.max_b_frames                    = s->avctx->max_b_frames;
#if VA_CHECK_VERSION(0,32,0)
    pic_param->sequence_fields.bits.profile                         = v->profile;
#endif
    pic_param->coded_width                                          = s->avctx->coded_width;
    pic_param->coded_height                                         = s->avctx->coded_height;
    pic_param->entrypoint_fields.value                              = 0; /* reset all bits */
    pic_param->entrypoint_fields.bits.broken_link                   = v->broken_link;
    pic_param->entrypoint_fields.bits.closed_entry                  = v->closed_entry;
    pic_param->entrypoint_fields.bits.panscan_flag                  = v->panscanflag;
    pic_param->entrypoint_fields.bits.loopfilter                    = s->loop_filter;
    pic_param->conditional_overlap_flag                             = v->condover;
    pic_param->fast_uvmc_flag                                       = v->fastuvmc;
    pic_param->range_mapping_fields.value                           = 0; /* reset all bits */
    pic_param->range_mapping_fields.bits.luma_flag                  = v->range_mapy_flag;
    pic_param->range_mapping_fields.bits.luma                       = v->range_mapy;
    pic_param->range_mapping_fields.bits.chroma_flag                = v->range_mapuv_flag;
    pic_param->range_mapping_fields.bits.chroma                     = v->range_mapuv;
    pic_param->b_picture_fraction                                   = v->bfraction_lut_index;
    pic_param->cbp_table                                            = v->cbpcy_vlc ? v->cbpcy_vlc - ff_vc1_cbpcy_p_vlc : 0;
    pic_param->mb_mode_table                                        = 0; /* XXX: interlaced frame */
    pic_param->range_reduction_frame                                = v->rangeredfrm;
    pic_param->rounding_control                                     = v->rnd;
    pic_param->post_processing                                      = v->postproc;
    pic_param->picture_resolution_index                             = v->respic;
    pic_param->luma_scale                                           = v->lumscale;
    pic_param->luma_shift                                           = v->lumshift;
    pic_param->picture_fields.value                                 = 0; /* reset all bits */
    pic_param->picture_fields.bits.picture_type                     = vc1_get_PTYPE(v);
    pic_param->picture_fields.bits.frame_coding_mode                = v->fcm;
    pic_param->picture_fields.bits.top_field_first                  = v->tff;
    pic_param->picture_fields.bits.is_first_field                   = v->fcm == 0; /* XXX: interlaced frame */
    pic_param->picture_fields.bits.intensity_compensation           = v->mv_mode == MV_PMODE_INTENSITY_COMP;
    pic_param->raw_coding.value                                     = 0; /* reset all bits */
    pic_param->raw_coding.flags.mv_type_mb                          = v->mv_type_is_raw;
    pic_param->raw_coding.flags.direct_mb                           = v->dmb_is_raw;
    pic_param->raw_coding.flags.skip_mb                             = v->skip_is_raw;
    pic_param->raw_coding.flags.field_tx                            = 0; /* XXX: interlaced frame */
    pic_param->raw_coding.flags.forward_mb                          = 0; /* XXX: interlaced frame */
    pic_param->raw_coding.flags.ac_pred                             = v->acpred_is_raw;
    pic_param->raw_coding.flags.overflags                           = v->overflg_is_raw;
    pic_param->bitplane_present.value                               = 0; /* reset all bits */
    pic_param->bitplane_present.flags.bp_mv_type_mb                 = vc1_has_MVTYPEMB_bitplane(v);
    pic_param->bitplane_present.flags.bp_direct_mb                  = vc1_has_DIRECTMB_bitplane(v);
    pic_param->bitplane_present.flags.bp_skip_mb                    = vc1_has_SKIPMB_bitplane(v);
    pic_param->bitplane_present.flags.bp_field_tx                   = 0; /* XXX: interlaced frame */
    pic_param->bitplane_present.flags.bp_forward_mb                 = 0; /* XXX: interlaced frame */
    pic_param->bitplane_present.flags.bp_ac_pred                    = vc1_has_ACPRED_bitplane(v);
    pic_param->bitplane_present.flags.bp_overflags                  = vc1_has_OVERFLAGS_bitplane(v);
    pic_param->reference_fields.value                               = 0; /* reset all bits */
    pic_param->reference_fields.bits.reference_distance_flag        = v->refdist_flag;
    pic_param->reference_fields.bits.reference_distance             = 0; /* XXX: interlaced frame */
    pic_param->reference_fields.bits.num_reference_pictures         = 0; /* XXX: interlaced frame */
    pic_param->reference_fields.bits.reference_field_pic_indicator  = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.value                                      = 0; /* reset all bits */
    pic_param->mv_fields.bits.mv_mode                               = vc1_get_MVMODE(v);
    pic_param->mv_fields.bits.mv_mode2                              = vc1_get_MVMODE2(v);
    pic_param->mv_fields.bits.mv_table                              = s->mv_table_index;
    pic_param->mv_fields.bits.two_mv_block_pattern_table            = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.bits.four_mv_switch                        = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.bits.four_mv_block_pattern_table           = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.bits.extended_mv_flag                      = v->extended_mv;
    pic_param->mv_fields.bits.extended_mv_range                     = v->mvrange;
    pic_param->mv_fields.bits.extended_dmv_flag                     = v->extended_dmv;
    pic_param->mv_fields.bits.extended_dmv_range                    = 0; /* XXX: interlaced frame */
    pic_param->pic_quantizer_fields.value                           = 0; /* reset all bits */
    pic_param->pic_quantizer_fields.bits.dquant                     = v->dquant;
    pic_param->pic_quantizer_fields.bits.quantizer                  = v->quantizer_mode;
    pic_param->pic_quantizer_fields.bits.half_qp                    = v->halfpq;
    pic_param->pic_quantizer_fields.bits.pic_quantizer_scale        = v->pq;
    pic_param->pic_quantizer_fields.bits.pic_quantizer_type         = v->pquantizer;
    pic_param->pic_quantizer_fields.bits.dq_frame                   = v->dquantfrm;
    pic_param->pic_quantizer_fields.bits.dq_profile                 = v->dqprofile;
    pic_param->pic_quantizer_fields.bits.dq_sb_edge                 = v->dqprofile == DQPROFILE_SINGLE_EDGE  ? v->dqsbedge : 0;
    pic_param->pic_quantizer_fields.bits.dq_db_edge                 = v->dqprofile == DQPROFILE_DOUBLE_EDGES ? v->dqsbedge : 0;
    pic_param->pic_quantizer_fields.bits.dq_binary_level            = v->dqbilevel;
    pic_param->pic_quantizer_fields.bits.alt_pic_quantizer          = v->altpq;
    pic_param->transform_fields.value                               = 0; /* reset all bits */
    pic_param->transform_fields.bits.variable_sized_transform_flag  = v->vstransform;
    pic_param->transform_fields.bits.mb_level_transform_type_flag   = v->ttmbf;
    pic_param->transform_fields.bits.frame_level_transform_type     = vc1_get_TTFRM(v);
    pic_param->transform_fields.bits.transform_ac_codingset_idx1    = v->c_ac_table_index;
    pic_param->transform_fields.bits.transform_ac_codingset_idx2    = v->y_ac_table_index;
    pic_param->transform_fields.bits.intra_transform_dc_table       = v->s.dc_table_index;

    switch (s->pict_type) {
    case AV_PICTURE_TYPE_B:
        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture);
        // fall-through
    case AV_PICTURE_TYPE_P:
        pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture);
        break;
    }

    if (pic_param->bitplane_present.value) {
        uint8_t *bitplane;
        const uint8_t *ff_bp[3];
        int x, y, n;

        switch (s->pict_type) {
        case AV_PICTURE_TYPE_P:
            ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb  ? v->direct_mb_plane    : NULL;
            ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb    ? s->mbskip_table       : NULL;
            ff_bp[2] = pic_param->bitplane_present.flags.bp_mv_type_mb ? v->mv_type_mb_plane   : NULL;
            break;
        case AV_PICTURE_TYPE_B:
            if (!v->bi_type) {
                ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
                ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb   ? s->mbskip_table    : NULL;
                ff_bp[2] = NULL; /* XXX: interlaced frame (FORWARD plane) */
                break;
            }
            /* fall-through (BI-type) */
        case AV_PICTURE_TYPE_I:
            ff_bp[0] = NULL; /* XXX: interlaced frame (FIELDTX plane) */
            ff_bp[1] = pic_param->bitplane_present.flags.bp_ac_pred    ? v->acpred_plane       : NULL;
            ff_bp[2] = pic_param->bitplane_present.flags.bp_overflags  ? v->over_flags_plane   : NULL;
            break;
        default:
            ff_bp[0] = NULL;
            ff_bp[1] = NULL;
            ff_bp[2] = NULL;
            break;
        }

        bitplane = ff_vaapi_alloc_bitplane(vactx, (s->mb_width * s->mb_height + 1) / 2);
        if (!bitplane)
            return -1;

        n = 0;
        for (y = 0; y < s->mb_height; y++)
            for (x = 0; x < s->mb_width; x++, n++)
                vc1_pack_bitplanes(bitplane, n, ff_bp, x, y, s->mb_stride);
        if (n & 1) /* move last nibble to the high order */
            bitplane[n/2] <<= 4;
    }
    return 0;
}