예제 #1
0
파일: vaapi_vp9.c 프로젝트: KangLin/FFmpeg
static int vaapi_vp9_decode_slice(AVCodecContext *avctx,
                                  const uint8_t  *buffer,
                                  uint32_t        size)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    const VP9SharedContext *h = avctx->priv_data;
    VASliceParameterBufferVP9 *slice_param;
    int i;

    slice_param = (VASliceParameterBufferVP9*)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;

    for (i = 0; i < 8; i++) {
        slice_param->seg_param[i].segment_flags.fields.segment_reference_enabled = h->h.segmentation.feat[i].ref_enabled;
        slice_param->seg_param[i].segment_flags.fields.segment_reference = h->h.segmentation.feat[i].ref_val;
        slice_param->seg_param[i].segment_flags.fields.segment_reference_skipped = h->h.segmentation.feat[i].skip_enabled;

        memcpy(slice_param->seg_param[i].filter_level, h->h.segmentation.feat[i].lflvl, sizeof(slice_param->seg_param[i].filter_level));

        slice_param->seg_param[i].luma_dc_quant_scale = h->h.segmentation.feat[i].qmul[0][0];
        slice_param->seg_param[i].luma_ac_quant_scale = h->h.segmentation.feat[i].qmul[0][1];
        slice_param->seg_param[i].chroma_dc_quant_scale = h->h.segmentation.feat[i].qmul[1][0];
        slice_param->seg_param[i].chroma_ac_quant_scale = h->h.segmentation.feat[i].qmul[1][1];
    }

    return 0;
}
예제 #2
0
파일: vaapi_hevc.c 프로젝트: 0day-ci/FFmpeg
/** Initialize and start decoding a frame with VA API. */
static int vaapi_hevc_start_frame(AVCodecContext          *avctx,
                                  av_unused const uint8_t *buffer,
                                  av_unused uint32_t       size)
{
    HEVCContext * const h = avctx->priv_data;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    VAPictureParameterBufferHEVC *pic_param;
    VAIQMatrixBufferHEVC *iq_matrix;
    ScalingList const * scaling_list;
    int i, j, pos;

    ff_dlog(avctx, "vaapi_hevc_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferHEVC);

    /* Fill in VAPictureParameterBufferHEVC. */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferHEVC));
    if (!pic_param)
        return -1;
    fill_picture_parameters(h, pic_param);
    frame_data->pic_param = pic_param;

    /* Fill in VAIQMatrixBufferHEVC. */
    if (h->ps.pps->scaling_list_data_present_flag) {
        scaling_list = &h->ps.pps->scaling_list;
    } else if (h->ps.sps->scaling_list_enable_flag) {
        scaling_list = &h->ps.sps->scaling_list;
    } else {
        return 0;
    }

    iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferHEVC));
    if (!iq_matrix)
        return -1;

    for (i = 0; i < 6; ++i) {
        for (j = 0; j < 16; ++j) {
            pos = 4 * ff_hevc_diag_scan4x4_y[j] + ff_hevc_diag_scan4x4_x[j];
            iq_matrix->ScalingList4x4[i][j] = scaling_list->sl[0][i][pos];
        }
        for (j = 0; j < 64; ++j) {
            pos = 8 * ff_hevc_diag_scan8x8_y[j] + ff_hevc_diag_scan8x8_x[j];
            iq_matrix->ScalingList8x8[i][j] = scaling_list->sl[1][i][pos];
            iq_matrix->ScalingList16x16[i][j] = scaling_list->sl[2][i][pos];
            if (i < 2) {
                iq_matrix->ScalingList32x32[i][j] = scaling_list->sl[3][i * 3][pos];
            }
        }
        iq_matrix->ScalingListDC16x16[i] = scaling_list->sl_dc[0][i];
        if (i < 2) {
            iq_matrix->ScalingListDC32x32[i] = scaling_list->sl_dc[1][i * 3];
        }
    }

    return 0;
}
예제 #3
0
static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{
    MpegEncContext * const s = avctx->priv_data;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    VASliceParameterBufferMPEG4 *slice_param;

    /* Fill in VASliceParameterBufferMPEG4 */
    slice_param = (VASliceParameterBufferMPEG4 *)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->macroblock_offset      = get_bits_count(&s->gb) % 8;
    slice_param->macroblock_number      = 0;
    slice_param->quant_scale            = s->qscale;

    return 0;
}
예제 #4
0
파일: vaapi.c 프로젝트: 0day-ci/FFmpeg
void ff_vaapi_common_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);

    ff_dlog(avctx, "ff_vaapi_common_end_frame()\n");

    destroy_buffers(vactx->display, &vactx->pic_param_buf_id, 1);
    destroy_buffers(vactx->display, &vactx->iq_matrix_buf_id, 1);
    destroy_buffers(vactx->display, &vactx->bitplane_buf_id, 1);
    destroy_buffers(vactx->display, vactx->slice_buf_ids, vactx->n_slice_buf_ids);
    av_freep(&vactx->slice_buf_ids);
    av_freep(&vactx->slice_params);
    vactx->n_slice_buf_ids     = 0;
    vactx->slice_buf_ids_alloc = 0;
    vactx->slice_count         = 0;
    vactx->slice_params_alloc  = 0;
}
예제 #5
0
파일: vaapi_vp9.c 프로젝트: KangLin/FFmpeg
static int vaapi_vp9_start_frame(AVCodecContext          *avctx,
                                 av_unused const uint8_t *buffer,
                                 av_unused uint32_t       size)
{
    const VP9SharedContext *h = avctx->priv_data;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    VADecPictureParameterBufferVP9 *pic_param;

    vactx->slice_param_size = sizeof(VASliceParameterBufferVP9);

    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VADecPictureParameterBufferVP9));
    if (!pic_param)
        return -1;
    fill_picture_parameters(avctx, h, pic_param);

    return 0;
}
예제 #6
0
파일: vaapi_vp9.c 프로젝트: KangLin/FFmpeg
static int vaapi_vp9_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    const VP9SharedContext *h = avctx->priv_data;
    int ret;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->frames[CUR_FRAME].tf.f));
    if (ret < 0)
        goto finish;

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
예제 #7
0
파일: vaapi.c 프로젝트: 0day-ci/FFmpeg
int ff_vaapi_context_init(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    const struct vaapi_context * const user_vactx = avctx->hwaccel_context;

    if (!user_vactx) {
        av_log(avctx, AV_LOG_ERROR, "Hardware acceleration context (hwaccel_context) does not exist.\n");
        return AVERROR(ENOSYS);
    }

    vactx->display              = user_vactx->display;
    vactx->config_id            = user_vactx->config_id;
    vactx->context_id           = user_vactx->context_id;

    vactx->pic_param_buf_id     = VA_INVALID_ID;
    vactx->iq_matrix_buf_id     = VA_INVALID_ID;
    vactx->bitplane_buf_id      = VA_INVALID_ID;

    return 0;
}
예제 #8
0
파일: vaapi.c 프로젝트: 0day-ci/FFmpeg
int ff_vaapi_mpeg_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    MpegEncContext *s = avctx->priv_data;
    int ret;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx,
                                  ff_vaapi_get_surface_id(s->current_picture_ptr->f));
    if (ret < 0)
        goto finish;

    ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
예제 #9
0
/** End a hardware decoding based frame. */
static int vaapi_h264_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    H264Context * const h = avctx->priv_data;
    H264SliceContext *sl = &h->slice_ctx[0];
    int ret;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->cur_pic_ptr->f));
    if (ret < 0)
        goto finish;

    ff_h264_draw_horiz_band(h, sl, 0, h->avctx->height);

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
예제 #10
0
/** Decode the given H.264 slice with VA API. */
static int vaapi_h264_decode_slice(AVCodecContext *avctx,
                                   const uint8_t  *buffer,
                                   uint32_t        size)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    H264Context * const h = avctx->priv_data;
    H264SliceContext *sl  = &h->slice_ctx[0];
    VASliceParameterBufferH264 *slice_param;

    /* Fill in VASliceParameterBufferH264. */
    slice_param = (VASliceParameterBufferH264 *)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->slice_data_bit_offset          = get_bits_count(&sl->gb);
    slice_param->first_mb_in_slice              = (sl->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + sl->mb_x;
    slice_param->slice_type                     = ff_h264_get_slice_type(sl);
    slice_param->direct_spatial_mv_pred_flag    = sl->slice_type == AV_PICTURE_TYPE_B ? sl->direct_spatial_mv_pred : 0;
    slice_param->num_ref_idx_l0_active_minus1   = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0;
    slice_param->num_ref_idx_l1_active_minus1   = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0;
    slice_param->cabac_init_idc                 = sl->cabac_init_idc;
    slice_param->slice_qp_delta                 = sl->qscale - h->ps.pps->init_qp;
    slice_param->disable_deblocking_filter_idc  = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter;
    slice_param->slice_alpha_c0_offset_div2     = sl->slice_alpha_c0_offset / 2;
    slice_param->slice_beta_offset_div2         = sl->slice_beta_offset     / 2;
    slice_param->luma_log2_weight_denom         = sl->pwt.luma_log2_weight_denom;
    slice_param->chroma_log2_weight_denom       = sl->pwt.chroma_log2_weight_denom;

    fill_vaapi_RefPicList(slice_param->RefPicList0, sl->ref_list[0], sl->list_count > 0 ? sl->ref_count[0] : 0);
    fill_vaapi_RefPicList(slice_param->RefPicList1, sl->ref_list[1], sl->list_count > 1 ? sl->ref_count[1] : 0);

    fill_vaapi_plain_pred_weight_table(h, 0,
                                       &slice_param->luma_weight_l0_flag,   slice_param->luma_weight_l0,   slice_param->luma_offset_l0,
                                       &slice_param->chroma_weight_l0_flag, slice_param->chroma_weight_l0, slice_param->chroma_offset_l0);
    fill_vaapi_plain_pred_weight_table(h, 1,
                                       &slice_param->luma_weight_l1_flag,   slice_param->luma_weight_l1,   slice_param->luma_offset_l1,
                                       &slice_param->chroma_weight_l1_flag, slice_param->chroma_weight_l1, slice_param->chroma_offset_l1);
    return 0;
}
예제 #11
0
파일: vaapi_hevc.c 프로젝트: 0day-ci/FFmpeg
/** End a hardware decoding based frame. */
static int vaapi_hevc_end_frame(AVCodecContext *avctx)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    HEVCContext * const h = avctx->priv_data;
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    int ret;

    ff_dlog(avctx, "vaapi_hevc_end_frame()\n");

    frame_data->last_slice_param->LongSliceFlags.fields.LastSliceOfPic = 1;

    ret = ff_vaapi_commit_slices(vactx);
    if (ret < 0)
        goto finish;

    ret = ff_vaapi_render_picture(vactx, ff_vaapi_get_surface_id(h->ref->frame));
    if (ret < 0)
        goto finish;

finish:
    ff_vaapi_common_end_frame(avctx);
    return ret;
}
예제 #12
0
파일: vaapi_hevc.c 프로젝트: 0day-ci/FFmpeg
/** Decode the given hevc slice with VA API. */
static int vaapi_hevc_decode_slice(AVCodecContext *avctx,
                                   const uint8_t  *buffer,
                                   uint32_t        size)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    HEVCContext * const h = avctx->priv_data;
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    SliceHeader * const sh = &h->sh;
    VASliceParameterBufferHEVC *slice_param;
    int i, list_idx;
    uint8_t nb_list = sh->slice_type == B_SLICE ? 2 : 1;

    if (sh->slice_type == I_SLICE)
        nb_list = 0;

    ff_dlog(avctx, "vaapi_hevc_decode_slice(): buffer %p, size %d\n", buffer, size);

    /* Fill in VASliceParameterBufferH264. */
    slice_param = (VASliceParameterBufferHEVC *)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;

    frame_data->last_slice_param = slice_param;

    /* The base structure changed, so this has to be re-set in order to be valid on every byte order. */
    slice_param->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;

    /* Add 1 to the bits count here to account for the byte_alignment bit, which allways is at least one bit and not accounted for otherwise. */
    slice_param->slice_data_byte_offset = (get_bits_count(&h->HEVClc->gb) + 1 + 7) / 8;

    slice_param->slice_segment_address = sh->slice_segment_addr;

    slice_param->LongSliceFlags.value = 0;
    slice_param->LongSliceFlags.fields.dependent_slice_segment_flag = sh->dependent_slice_segment_flag;
    slice_param->LongSliceFlags.fields.slice_type = sh->slice_type;
    slice_param->LongSliceFlags.fields.color_plane_id = sh->colour_plane_id;
    slice_param->LongSliceFlags.fields.mvd_l1_zero_flag = sh->mvd_l1_zero_flag;
    slice_param->LongSliceFlags.fields.cabac_init_flag = sh->cabac_init_flag;
    slice_param->LongSliceFlags.fields.slice_temporal_mvp_enabled_flag = sh->slice_temporal_mvp_enabled_flag;
    slice_param->LongSliceFlags.fields.slice_deblocking_filter_disabled_flag = sh->disable_deblocking_filter_flag;
    slice_param->LongSliceFlags.fields.collocated_from_l0_flag = sh->collocated_list == L0 ? 1 : 0;
    slice_param->LongSliceFlags.fields.slice_loop_filter_across_slices_enabled_flag = sh->slice_loop_filter_across_slices_enabled_flag;

    slice_param->LongSliceFlags.fields.slice_sao_luma_flag = sh->slice_sample_adaptive_offset_flag[0];
    if (h->ps.sps->chroma_format_idc) {
        slice_param->LongSliceFlags.fields.slice_sao_chroma_flag = sh->slice_sample_adaptive_offset_flag[1];
    }

    if (sh->slice_temporal_mvp_enabled_flag) {
        slice_param->collocated_ref_idx = sh->collocated_ref_idx;
    } else {
        slice_param->collocated_ref_idx = 0xFF;
    }

    slice_param->slice_qp_delta = sh->slice_qp_delta;
    slice_param->slice_cb_qp_offset = sh->slice_cb_qp_offset;
    slice_param->slice_cr_qp_offset = sh->slice_cr_qp_offset;
    slice_param->slice_beta_offset_div2 = sh->beta_offset / 2;
    slice_param->slice_tc_offset_div2 = sh->tc_offset / 2;

    if (sh->slice_type == I_SLICE) {
        slice_param->five_minus_max_num_merge_cand = 0;
    } else {
        slice_param->five_minus_max_num_merge_cand = 5 - sh->max_num_merge_cand;
    }

    slice_param->num_ref_idx_l0_active_minus1 = sh->nb_refs[L0] ? sh->nb_refs[L0] - 1 : 0;
    slice_param->num_ref_idx_l1_active_minus1 = sh->nb_refs[L1] ? sh->nb_refs[L1] - 1 : 0;

    memset(slice_param->RefPicList, 0xFF, sizeof(slice_param->RefPicList));

    /* h->ref->refPicList is updated befor calling each slice */
    for (list_idx = 0; list_idx < nb_list; ++list_idx) {
        RefPicList *rpl = &h->ref->refPicList[list_idx];

        for (i = 0; i < rpl->nb_refs; ++i) {
            slice_param->RefPicList[list_idx][i] = get_ref_pic_index(h, rpl->ref[i]);
        }
    }

    return fill_pred_weight_table(h, slice_param, sh);
}
static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
    Mpeg4DecContext *ctx = avctx->priv_data;
    MpegEncContext * const s = &ctx->m;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    VAPictureParameterBufferMPEG4 *pic_param;
    VAIQMatrixBufferMPEG4 *iq_matrix;
    int i;

    ff_dlog(avctx, "vaapi_mpeg4_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG4);

    /* Fill in VAPictureParameterBufferMPEG4 */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG4));
    if (!pic_param)
        return -1;
    pic_param->vop_width                                = s->width;
    pic_param->vop_height                               = s->height;
    pic_param->forward_reference_picture                = VA_INVALID_ID;
    pic_param->backward_reference_picture               = VA_INVALID_ID;
    pic_param->vol_fields.value                         = 0; /* reset all bits */
    pic_param->vol_fields.bits.short_video_header       = avctx->codec->id == AV_CODEC_ID_H263;
    pic_param->vol_fields.bits.chroma_format            = CHROMA_420;
    pic_param->vol_fields.bits.interlaced               = !s->progressive_sequence;
    pic_param->vol_fields.bits.obmc_disable             = 1;
    pic_param->vol_fields.bits.sprite_enable            = ctx->vol_sprite_usage;
    pic_param->vol_fields.bits.sprite_warping_accuracy  = s->sprite_warping_accuracy;
    pic_param->vol_fields.bits.quant_type               = s->mpeg_quant;
    pic_param->vol_fields.bits.quarter_sample           = s->quarter_sample;
    pic_param->vol_fields.bits.data_partitioned         = s->data_partitioning;
    pic_param->vol_fields.bits.reversible_vlc           = ctx->rvlc;
    pic_param->vol_fields.bits.resync_marker_disable    = !ctx->resync_marker;
    pic_param->no_of_sprite_warping_points              = ctx->num_sprite_warping_points;
    for (i = 0; i < ctx->num_sprite_warping_points && i < 3; i++) {
        pic_param->sprite_trajectory_du[i]              = ctx->sprite_traj[i][0];
        pic_param->sprite_trajectory_dv[i]              = ctx->sprite_traj[i][1];
    }
    pic_param->quant_precision                          = s->quant_precision;
    pic_param->vop_fields.value                         = 0; /* reset all bits */
    pic_param->vop_fields.bits.vop_coding_type          = s->pict_type - AV_PICTURE_TYPE_I;
    pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f->pict_type - AV_PICTURE_TYPE_I : 0;
    pic_param->vop_fields.bits.vop_rounding_type        = s->no_rounding;
    pic_param->vop_fields.bits.intra_dc_vlc_thr         = mpeg4_get_intra_dc_vlc_thr(ctx);
    pic_param->vop_fields.bits.top_field_first          = s->top_field_first;
    pic_param->vop_fields.bits.alternate_vertical_scan_flag = s->alternate_scan;
    pic_param->vop_fcode_forward                        = s->f_code;
    pic_param->vop_fcode_backward                       = s->b_code;
    pic_param->vop_time_increment_resolution            = avctx->framerate.num;
    pic_param->num_macroblocks_in_gob                   = s->mb_width * H263_GOB_HEIGHT(s->height);
    pic_param->num_gobs_in_vop                          = (s->mb_width * s->mb_height) / pic_param->num_macroblocks_in_gob;
    pic_param->TRB                                      = s->pb_time;
    pic_param->TRD                                      = s->pp_time;

    if (s->pict_type == AV_PICTURE_TYPE_B)
        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
    if (s->pict_type != AV_PICTURE_TYPE_I)
        pic_param->forward_reference_picture  = ff_vaapi_get_surface_id(s->last_picture.f);

    /* Fill in VAIQMatrixBufferMPEG4 */
    /* Only the first inverse quantisation method uses the weighting matrices */
    if (pic_param->vol_fields.bits.quant_type) {
        iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG4));
        if (!iq_matrix)
            return -1;
        iq_matrix->load_intra_quant_mat         = 1;
        iq_matrix->load_non_intra_quant_mat     = 1;

        for (i = 0; i < 64; i++) {
            int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
            iq_matrix->intra_quant_mat[i]       = s->intra_matrix[n];
            iq_matrix->non_intra_quant_mat[i]   = s->inter_matrix[n];
        }
    }
    return 0;
}
예제 #14
0
/** Initialize and start decoding a frame with VA API. */
static int vaapi_h264_start_frame(AVCodecContext          *avctx,
                                  av_unused const uint8_t *buffer,
                                  av_unused uint32_t       size)
{
    H264Context * const h = avctx->priv_data;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    const PPS *pps = h->ps.pps;
    const SPS *sps = h->ps.sps;
    VAPictureParameterBufferH264 *pic_param;
    VAIQMatrixBufferH264 *iq_matrix;

    vactx->slice_param_size = sizeof(VASliceParameterBufferH264);

    /* Fill in VAPictureParameterBufferH264. */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferH264));
    if (!pic_param)
        return -1;
    fill_vaapi_pic(&pic_param->CurrPic, h->cur_pic_ptr, h->picture_structure);
    if (fill_vaapi_ReferenceFrames(pic_param, h) < 0)
        return -1;
    pic_param->picture_width_in_mbs_minus1                      = h->mb_width - 1;
    pic_param->picture_height_in_mbs_minus1                     = h->mb_height - 1;
    pic_param->bit_depth_luma_minus8                            = sps->bit_depth_luma - 8;
    pic_param->bit_depth_chroma_minus8                          = sps->bit_depth_chroma - 8;
    pic_param->num_ref_frames                                   = sps->ref_frame_count;
    pic_param->seq_fields.value                                 = 0; /* reset all bits */
    pic_param->seq_fields.bits.chroma_format_idc                = sps->chroma_format_idc;
    pic_param->seq_fields.bits.residual_colour_transform_flag   = sps->residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
    pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = sps->gaps_in_frame_num_allowed_flag;
    pic_param->seq_fields.bits.frame_mbs_only_flag              = sps->frame_mbs_only_flag;
    pic_param->seq_fields.bits.mb_adaptive_frame_field_flag     = sps->mb_aff;
    pic_param->seq_fields.bits.direct_8x8_inference_flag        = sps->direct_8x8_inference_flag;
    pic_param->seq_fields.bits.MinLumaBiPredSize8x8             = sps->level_idc >= 31; /* A.3.3.2 */
    pic_param->seq_fields.bits.log2_max_frame_num_minus4        = sps->log2_max_frame_num - 4;
    pic_param->seq_fields.bits.pic_order_cnt_type               = sps->poc_type;
    pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4;
    pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
    pic_param->num_slice_groups_minus1                          = pps->slice_group_count - 1;
    pic_param->slice_group_map_type                             = pps->mb_slice_group_map_type;
    pic_param->slice_group_change_rate_minus1                   = 0; /* XXX: unimplemented in FFmpeg */
    pic_param->pic_init_qp_minus26                              = pps->init_qp - 26;
    pic_param->pic_init_qs_minus26                              = pps->init_qs - 26;
    pic_param->chroma_qp_index_offset                           = pps->chroma_qp_index_offset[0];
    pic_param->second_chroma_qp_index_offset                    = pps->chroma_qp_index_offset[1];
    pic_param->pic_fields.value                                 = 0; /* reset all bits */
    pic_param->pic_fields.bits.entropy_coding_mode_flag         = pps->cabac;
    pic_param->pic_fields.bits.weighted_pred_flag               = pps->weighted_pred;
    pic_param->pic_fields.bits.weighted_bipred_idc              = pps->weighted_bipred_idc;
    pic_param->pic_fields.bits.transform_8x8_mode_flag          = pps->transform_8x8_mode;
    pic_param->pic_fields.bits.field_pic_flag                   = h->picture_structure != PICT_FRAME;
    pic_param->pic_fields.bits.constrained_intra_pred_flag      = pps->constrained_intra_pred;
    pic_param->pic_fields.bits.pic_order_present_flag           = pps->pic_order_present;
    pic_param->pic_fields.bits.deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
    pic_param->pic_fields.bits.redundant_pic_cnt_present_flag   = pps->redundant_pic_cnt_present;
    pic_param->pic_fields.bits.reference_pic_flag               = h->nal_ref_idc != 0;
    pic_param->frame_num                                        = h->poc.frame_num;

    /* Fill in VAIQMatrixBufferH264. */
    iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264));
    if (!iq_matrix)
        return -1;
    memcpy(iq_matrix->ScalingList4x4, pps->scaling_matrix4, sizeof(iq_matrix->ScalingList4x4));
    memcpy(iq_matrix->ScalingList8x8[0], pps->scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0]));
    memcpy(iq_matrix->ScalingList8x8[1], pps->scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0]));
    return 0;
}