Esempio n. 1
0
static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{
    MpegEncContext * const s = avctx->priv_data;
    VASliceParameterBufferMPEG4 *slice_param;

    av_dlog(avctx, "vaapi_mpeg4_decode_slice(): buffer %p, size %d\n", buffer, size);

    /* video_plane_with_short_video_header() contains all GOBs
     * in-order, and this is what VA API (Intel backend) expects: only
     * a single slice param. So fake macroblock_number for FFmpeg so
     * that we don't call vaapi_mpeg4_decode_slice() again
     */
    if (avctx->codec->id == AV_CODEC_ID_H263)
        size = s->gb.buffer_end - buffer;

    /* Fill in VASliceParameterBufferMPEG4 */
    slice_param = (VASliceParameterBufferMPEG4 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->macroblock_offset      = get_bits_count(&s->gb) % 8;
    slice_param->macroblock_number      = s->mb_y * s->mb_width + s->mb_x;
    slice_param->quant_scale            = s->qscale;

    if (avctx->codec->id == AV_CODEC_ID_H263)
        s->mb_y = s->mb_height;

    return 0;
}
Esempio n. 2
0
static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{
    MpegEncContext * const s = avctx->priv_data;
    VASliceParameterBufferMPEG2 *slice_param;
    GetBitContext gb;
    uint32_t start_code, quantiser_scale_code, intra_slice_flag, macroblock_offset;

    dprintf(avctx, "vaapi_mpeg2_decode_slice(): buffer %p, size %d\n", buffer, size);

    /* Determine macroblock_offset */
    init_get_bits(&gb, buffer, 8 * size);
    start_code = get_bits(&gb, 32);
    assert((start_code & 0xffffff00) == 0x00000100);
    quantiser_scale_code = get_bits(&gb, 5);
    intra_slice_flag = get_bits1(&gb);
    if (intra_slice_flag) {
        skip_bits(&gb, 8);
        while (get_bits1(&gb) != 0)
            skip_bits(&gb, 8);
    }
    macroblock_offset = get_bits_count(&gb);

    /* Fill in VASliceParameterBufferMPEG2 */
    slice_param = (VASliceParameterBufferMPEG2 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->macroblock_offset              = macroblock_offset;
    slice_param->slice_vertical_position        = s->mb_y;
    slice_param->quantiser_scale_code           = quantiser_scale_code;
    slice_param->intra_slice_flag               = intra_slice_flag;
    return 0;
}
Esempio n. 3
0
static int vaapi_vp9_decode_slice(AVCodecContext *avctx,
                                  const uint8_t  *buffer,
                                  uint32_t        size)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    const VP9SharedContext *h = avctx->priv_data;
    VASliceParameterBufferVP9 *slice_param;
    int i;

    slice_param = (VASliceParameterBufferVP9*)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;

    for (i = 0; i < 8; i++) {
        slice_param->seg_param[i].segment_flags.fields.segment_reference_enabled = h->h.segmentation.feat[i].ref_enabled;
        slice_param->seg_param[i].segment_flags.fields.segment_reference = h->h.segmentation.feat[i].ref_val;
        slice_param->seg_param[i].segment_flags.fields.segment_reference_skipped = h->h.segmentation.feat[i].skip_enabled;

        memcpy(slice_param->seg_param[i].filter_level, h->h.segmentation.feat[i].lflvl, sizeof(slice_param->seg_param[i].filter_level));

        slice_param->seg_param[i].luma_dc_quant_scale = h->h.segmentation.feat[i].qmul[0][0];
        slice_param->seg_param[i].luma_ac_quant_scale = h->h.segmentation.feat[i].qmul[0][1];
        slice_param->seg_param[i].chroma_dc_quant_scale = h->h.segmentation.feat[i].qmul[1][0];
        slice_param->seg_param[i].chroma_ac_quant_scale = h->h.segmentation.feat[i].qmul[1][1];
    }

    return 0;
}
Esempio n. 4
0
static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{
    MpegEncContext * const s = avctx->priv_data;
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    VASliceParameterBufferMPEG4 *slice_param;

    /* Fill in VASliceParameterBufferMPEG4 */
    slice_param = (VASliceParameterBufferMPEG4 *)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->macroblock_offset      = get_bits_count(&s->gb) % 8;
    slice_param->macroblock_number      = 0;
    slice_param->quant_scale            = s->qscale;

    return 0;
}
Esempio n. 5
0
/** Decode the given H.264 slice with VA API. */
static int decode_slice(AVCodecContext *avctx,
                        const uint8_t  *buffer,
                        uint32_t        size)
{
    H264Context * const h = avctx->priv_data;
    MpegEncContext * const s = &h->s;
    VASliceParameterBufferH264 *slice_param;

    av_dlog(avctx, "decode_slice(): buffer %p, size %d\n", buffer, size);

    /* Fill in VASliceParameterBufferH264. */
    slice_param = (VASliceParameterBufferH264 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->slice_data_bit_offset          = get_bits_count(&h->s.gb) + 8; /* bit buffer started beyond nal_unit_type */
    slice_param->first_mb_in_slice              = (s->mb_y >> FIELD_OR_MBAFF_PICTURE) * s->mb_width + s->mb_x;
    slice_param->slice_type                     = ff_h264_get_slice_type(h);
    slice_param->direct_spatial_mv_pred_flag    = h->slice_type == AV_PICTURE_TYPE_B ? h->direct_spatial_mv_pred : 0;
    slice_param->num_ref_idx_l0_active_minus1   = h->list_count > 0 ? h->ref_count[0] - 1 : 0;
    slice_param->num_ref_idx_l1_active_minus1   = h->list_count > 1 ? h->ref_count[1] - 1 : 0;
    slice_param->cabac_init_idc                 = h->cabac_init_idc;
    slice_param->slice_qp_delta                 = s->qscale - h->pps.init_qp;
    slice_param->disable_deblocking_filter_idc  = h->deblocking_filter < 2 ? !h->deblocking_filter : h->deblocking_filter;
    slice_param->slice_alpha_c0_offset_div2     = h->slice_alpha_c0_offset / 2 - 26;
    slice_param->slice_beta_offset_div2         = h->slice_beta_offset     / 2 - 26;
    slice_param->luma_log2_weight_denom         = h->luma_log2_weight_denom;
    slice_param->chroma_log2_weight_denom       = h->chroma_log2_weight_denom;

    fill_vaapi_RefPicList(slice_param->RefPicList0, h->ref_list[0], h->list_count > 0 ? h->ref_count[0] : 0);
    fill_vaapi_RefPicList(slice_param->RefPicList1, h->ref_list[1], h->list_count > 1 ? h->ref_count[1] : 0);

    fill_vaapi_plain_pred_weight_table(h, 0,
                                       &slice_param->luma_weight_l0_flag,   slice_param->luma_weight_l0,   slice_param->luma_offset_l0,
                                       &slice_param->chroma_weight_l0_flag, slice_param->chroma_weight_l0, slice_param->chroma_offset_l0);
    fill_vaapi_plain_pred_weight_table(h, 1,
                                       &slice_param->luma_weight_l1_flag,   slice_param->luma_weight_l1,   slice_param->luma_offset_l1,
                                       &slice_param->chroma_weight_l1_flag, slice_param->chroma_weight_l1, slice_param->chroma_offset_l1);
    return 0;
}
Esempio n. 6
0
static int vaapi_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
{
    VC1Context * const v = avctx->priv_data;
    MpegEncContext * const s = &v->s;
    VASliceParameterBufferVC1 *slice_param;

    av_dlog(avctx, "vaapi_vc1_decode_slice(): buffer %p, size %d\n", buffer, size);

    /* Current bit buffer is beyond any marker for VC-1, so skip it */
    if (avctx->codec_id == AV_CODEC_ID_VC1 && IS_MARKER(AV_RB32(buffer))) {
        buffer += 4;
        size -= 4;
    }

    /* Fill in VASliceParameterBufferVC1 */
    slice_param = (VASliceParameterBufferVC1 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->macroblock_offset       = get_bits_count(&s->gb);
    slice_param->slice_vertical_position = s->mb_y;
    return 0;
}
Esempio n. 7
0
/** Decode the given H.264 slice with VA API. */
static int vaapi_h264_decode_slice(AVCodecContext *avctx,
                                   const uint8_t  *buffer,
                                   uint32_t        size)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    H264Context * const h = avctx->priv_data;
    H264SliceContext *sl  = &h->slice_ctx[0];
    VASliceParameterBufferH264 *slice_param;

    /* Fill in VASliceParameterBufferH264. */
    slice_param = (VASliceParameterBufferH264 *)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;
    slice_param->slice_data_bit_offset          = get_bits_count(&sl->gb);
    slice_param->first_mb_in_slice              = (sl->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + sl->mb_x;
    slice_param->slice_type                     = ff_h264_get_slice_type(sl);
    slice_param->direct_spatial_mv_pred_flag    = sl->slice_type == AV_PICTURE_TYPE_B ? sl->direct_spatial_mv_pred : 0;
    slice_param->num_ref_idx_l0_active_minus1   = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0;
    slice_param->num_ref_idx_l1_active_minus1   = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0;
    slice_param->cabac_init_idc                 = sl->cabac_init_idc;
    slice_param->slice_qp_delta                 = sl->qscale - h->ps.pps->init_qp;
    slice_param->disable_deblocking_filter_idc  = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter;
    slice_param->slice_alpha_c0_offset_div2     = sl->slice_alpha_c0_offset / 2;
    slice_param->slice_beta_offset_div2         = sl->slice_beta_offset     / 2;
    slice_param->luma_log2_weight_denom         = sl->pwt.luma_log2_weight_denom;
    slice_param->chroma_log2_weight_denom       = sl->pwt.chroma_log2_weight_denom;

    fill_vaapi_RefPicList(slice_param->RefPicList0, sl->ref_list[0], sl->list_count > 0 ? sl->ref_count[0] : 0);
    fill_vaapi_RefPicList(slice_param->RefPicList1, sl->ref_list[1], sl->list_count > 1 ? sl->ref_count[1] : 0);

    fill_vaapi_plain_pred_weight_table(h, 0,
                                       &slice_param->luma_weight_l0_flag,   slice_param->luma_weight_l0,   slice_param->luma_offset_l0,
                                       &slice_param->chroma_weight_l0_flag, slice_param->chroma_weight_l0, slice_param->chroma_offset_l0);
    fill_vaapi_plain_pred_weight_table(h, 1,
                                       &slice_param->luma_weight_l1_flag,   slice_param->luma_weight_l1,   slice_param->luma_offset_l1,
                                       &slice_param->chroma_weight_l1_flag, slice_param->chroma_weight_l1, slice_param->chroma_offset_l1);
    return 0;
}
Esempio n. 8
0
/** Decode the given hevc slice with VA API. */
static int vaapi_hevc_decode_slice(AVCodecContext *avctx,
                                   const uint8_t  *buffer,
                                   uint32_t        size)
{
    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
    HEVCContext * const h = avctx->priv_data;
    vaapi_hevc_frame_data *frame_data = h->ref->hwaccel_picture_private;
    SliceHeader * const sh = &h->sh;
    VASliceParameterBufferHEVC *slice_param;
    int i, list_idx;
    uint8_t nb_list = sh->slice_type == B_SLICE ? 2 : 1;

    if (sh->slice_type == I_SLICE)
        nb_list = 0;

    ff_dlog(avctx, "vaapi_hevc_decode_slice(): buffer %p, size %d\n", buffer, size);

    /* Fill in VASliceParameterBufferH264. */
    slice_param = (VASliceParameterBufferHEVC *)ff_vaapi_alloc_slice(vactx, buffer, size);
    if (!slice_param)
        return -1;

    frame_data->last_slice_param = slice_param;

    /* The base structure changed, so this has to be re-set in order to be valid on every byte order. */
    slice_param->slice_data_flag = VA_SLICE_DATA_FLAG_ALL;

    /* Add 1 to the bits count here to account for the byte_alignment bit, which allways is at least one bit and not accounted for otherwise. */
    slice_param->slice_data_byte_offset = (get_bits_count(&h->HEVClc->gb) + 1 + 7) / 8;

    slice_param->slice_segment_address = sh->slice_segment_addr;

    slice_param->LongSliceFlags.value = 0;
    slice_param->LongSliceFlags.fields.dependent_slice_segment_flag = sh->dependent_slice_segment_flag;
    slice_param->LongSliceFlags.fields.slice_type = sh->slice_type;
    slice_param->LongSliceFlags.fields.color_plane_id = sh->colour_plane_id;
    slice_param->LongSliceFlags.fields.mvd_l1_zero_flag = sh->mvd_l1_zero_flag;
    slice_param->LongSliceFlags.fields.cabac_init_flag = sh->cabac_init_flag;
    slice_param->LongSliceFlags.fields.slice_temporal_mvp_enabled_flag = sh->slice_temporal_mvp_enabled_flag;
    slice_param->LongSliceFlags.fields.slice_deblocking_filter_disabled_flag = sh->disable_deblocking_filter_flag;
    slice_param->LongSliceFlags.fields.collocated_from_l0_flag = sh->collocated_list == L0 ? 1 : 0;
    slice_param->LongSliceFlags.fields.slice_loop_filter_across_slices_enabled_flag = sh->slice_loop_filter_across_slices_enabled_flag;

    slice_param->LongSliceFlags.fields.slice_sao_luma_flag = sh->slice_sample_adaptive_offset_flag[0];
    if (h->ps.sps->chroma_format_idc) {
        slice_param->LongSliceFlags.fields.slice_sao_chroma_flag = sh->slice_sample_adaptive_offset_flag[1];
    }

    if (sh->slice_temporal_mvp_enabled_flag) {
        slice_param->collocated_ref_idx = sh->collocated_ref_idx;
    } else {
        slice_param->collocated_ref_idx = 0xFF;
    }

    slice_param->slice_qp_delta = sh->slice_qp_delta;
    slice_param->slice_cb_qp_offset = sh->slice_cb_qp_offset;
    slice_param->slice_cr_qp_offset = sh->slice_cr_qp_offset;
    slice_param->slice_beta_offset_div2 = sh->beta_offset / 2;
    slice_param->slice_tc_offset_div2 = sh->tc_offset / 2;

    if (sh->slice_type == I_SLICE) {
        slice_param->five_minus_max_num_merge_cand = 0;
    } else {
        slice_param->five_minus_max_num_merge_cand = 5 - sh->max_num_merge_cand;
    }

    slice_param->num_ref_idx_l0_active_minus1 = sh->nb_refs[L0] ? sh->nb_refs[L0] - 1 : 0;
    slice_param->num_ref_idx_l1_active_minus1 = sh->nb_refs[L1] ? sh->nb_refs[L1] - 1 : 0;

    memset(slice_param->RefPicList, 0xFF, sizeof(slice_param->RefPicList));

    /* h->ref->refPicList is updated befor calling each slice */
    for (list_idx = 0; list_idx < nb_list; ++list_idx) {
        RefPicList *rpl = &h->ref->refPicList[list_idx];

        for (i = 0; i < rpl->nb_refs; ++i) {
            slice_param->RefPicList[list_idx][i] = get_ref_pic_index(h, rpl->ref[i]);
        }
    }

    return fill_pred_weight_table(h, slice_param, sh);
}