Example #1
0
static void setup_token_decoder(VP8D_COMP *pbi,
                                const unsigned char* token_part_sizes)
{
    vp8_reader *bool_decoder = &pbi->mbc[0];
    unsigned int partition_idx;
    unsigned int fragment_idx;
    unsigned int num_token_partitions;
    const unsigned char *first_fragment_end = pbi->fragments.ptrs[0] +
                                          pbi->fragments.sizes[0];

    TOKEN_PARTITION multi_token_partition =
            (TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2);
    if (!vp8dx_bool_error(&pbi->mbc[8]))
        pbi->common.multi_token_partition = multi_token_partition;
    num_token_partitions = 1 << pbi->common.multi_token_partition;

    /* Check for partitions within the fragments and unpack the fragments
     * so that each fragment pointer points to its corresponding partition. */
    for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx)
    {
        unsigned int fragment_size = pbi->fragments.sizes[fragment_idx];
        const unsigned char *fragment_end = pbi->fragments.ptrs[fragment_idx] +
                                            fragment_size;
        /* Special case for handling the first partition since we have already
         * read its size. */
        if (fragment_idx == 0)
        {
            /* Size of first partition + token partition sizes element */
            ptrdiff_t ext_first_part_size = token_part_sizes -
                pbi->fragments.ptrs[0] + 3 * (num_token_partitions - 1);
            fragment_size -= (unsigned int)ext_first_part_size;
            if (fragment_size > 0)
            {
                pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size;
                /* The fragment contains an additional partition. Move to
                 * next. */
                fragment_idx++;
                pbi->fragments.ptrs[fragment_idx] = pbi->fragments.ptrs[0] +
                  pbi->fragments.sizes[0];
            }
        }
        /* Split the chunk into partitions read from the bitstream */
        while (fragment_size > 0)
        {
            ptrdiff_t partition_size = read_available_partition_size(
                                                 pbi,
                                                 token_part_sizes,
                                                 pbi->fragments.ptrs[fragment_idx],
                                                 first_fragment_end,
                                                 fragment_end,
                                                 fragment_idx - 1,
                                                 num_token_partitions);
            pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size;
            fragment_size -= (unsigned int)partition_size;
            assert(fragment_idx <= num_token_partitions);
            if (fragment_size > 0)
            {
                /* The fragment contains an additional partition.
                 * Move to next. */
                fragment_idx++;
                pbi->fragments.ptrs[fragment_idx] =
                    pbi->fragments.ptrs[fragment_idx - 1] + partition_size;
            }
        }
    }

    pbi->fragments.count = num_token_partitions + 1;

    for (partition_idx = 1; partition_idx < pbi->fragments.count; ++partition_idx)
    {
        if (vp8dx_start_decode(bool_decoder,
                               pbi->fragments.ptrs[partition_idx],
                               pbi->fragments.sizes[partition_idx],
                               pbi->decrypt_cb, pbi->decrypt_state))
            vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
                               "Failed to allocate bool decoder %d",
                               partition_idx);

        bool_decoder++;
    }

#if CONFIG_MULTITHREAD
    /* Clamp number of decoder threads */
    if (pbi->decoding_thread_count > num_token_partitions - 1)
        pbi->decoding_thread_count = num_token_partitions - 1;
#endif
}
Example #2
0
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
                              unsigned int mb_idx)
{
    MB_PREDICTION_MODE mode;
    int i;
#if CONFIG_ERROR_CONCEALMENT
    int corruption_detected = 0;
#endif

    if (xd->mode_info_context->mbmi.mb_skip_coeff)
    {
        vp8_reset_mb_tokens_context(xd);
    }
    else if (!vp8dx_bool_error(xd->current_bc))
    {
        int eobtotal;
        eobtotal = vp8_decode_mb_tokens(pbi, xd);

        /* Special case:  Force the loopfilter to skip when eobtotal is zero */
        xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
    }

    mode = xd->mode_info_context->mbmi.mode;

    if (xd->segmentation_enabled)
        vp8_mb_init_dequantizer(pbi, xd);


#if CONFIG_ERROR_CONCEALMENT

    if(pbi->ec_active)
    {
        int throw_residual;
        /* When we have independent partitions we can apply residual even
         * though other partitions within the frame are corrupt.
         */
        throw_residual = (!pbi->independent_partitions &&
                          pbi->frame_corrupt_residual);
        throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));

        if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
        {
            /* MB with corrupt residuals or corrupt mode/motion vectors.
             * Better to use the predictor as reconstruction.
             */
            pbi->frame_corrupt_residual = 1;
            vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
            vp8_conceal_corrupt_mb(xd);


            corruption_detected = 1;

            /* force idct to be skipped for B_PRED and use the
             * prediction only for reconstruction
             * */
            vpx_memset(xd->eobs, 0, 25);
        }
    }
#endif

    /* do prediction */
    if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
    {
        vp8_build_intra_predictors_mbuv_s(xd,
                                          xd->recon_above[1],
                                          xd->recon_above[2],
                                          xd->recon_left[1],
                                          xd->recon_left[2],
                                          xd->recon_left_stride[1],
                                          xd->dst.u_buffer, xd->dst.v_buffer,
                                          xd->dst.uv_stride);

        if (mode != B_PRED)
        {
            vp8_build_intra_predictors_mby_s(xd,
                                                 xd->recon_above[0],
                                                 xd->recon_left[0],
                                                 xd->recon_left_stride[0],
                                                 xd->dst.y_buffer,
                                                 xd->dst.y_stride);
        }
        else
        {
            short *DQC = xd->dequant_y1;
            int dst_stride = xd->dst.y_stride;

            /* clear out residual eob info */
            if(xd->mode_info_context->mbmi.mb_skip_coeff)
                vpx_memset(xd->eobs, 0, 25);

            intra_prediction_down_copy(xd, xd->recon_above[0] + 16);

            for (i = 0; i < 16; i++)
            {
                BLOCKD *b = &xd->block[i];
                unsigned char *dst = xd->dst.y_buffer + b->offset;
                B_PREDICTION_MODE b_mode =
                    xd->mode_info_context->bmi[i].as_mode;
                unsigned char *Above = dst - dst_stride;
                unsigned char *yleft = dst - 1;
                int left_stride = dst_stride;
                unsigned char top_left = Above[-1];

                vp8_intra4x4_predict(Above, yleft, left_stride, b_mode,
                                     dst, dst_stride, top_left);

                if (xd->eobs[i])
                {
                    if (xd->eobs[i] > 1)
                    {
                    vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
                    }
                    else
                    {
                        vp8_dc_only_idct_add
                            (b->qcoeff[0] * DQC[0],
                                dst, dst_stride,
                                dst, dst_stride);
                        vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                    }
                }
            }
        }
    }
    else
    {
        vp8_build_inter_predictors_mb(xd);
    }


#if CONFIG_ERROR_CONCEALMENT
    if (corruption_detected)
    {
        return;
    }
#endif

    if(!xd->mode_info_context->mbmi.mb_skip_coeff)
    {
        /* dequantization and idct */
        if (mode != B_PRED)
        {
            short *DQC = xd->dequant_y1;

            if (mode != SPLITMV)
            {
                BLOCKD *b = &xd->block[24];

                /* do 2nd order transform on the dc block */
                if (xd->eobs[24] > 1)
                {
                    vp8_dequantize_b(b, xd->dequant_y2);

                    vp8_short_inv_walsh4x4(&b->dqcoeff[0],
                        xd->qcoeff);
                    vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
                }
                else
                {
                    b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
                    vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
                        xd->qcoeff);
                    vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                }

                /* override the dc dequant constant in order to preserve the
                 * dc components
                 */
                DQC = xd->dequant_y1_dc;
            }

            vp8_dequant_idct_add_y_block
                            (xd->qcoeff, DQC,
                             xd->dst.y_buffer,
                             xd->dst.y_stride, xd->eobs);
        }

        vp8_dequant_idct_add_uv_block
                        (xd->qcoeff+16*16, xd->dequant_uv,
                         xd->dst.u_buffer, xd->dst.v_buffer,
                         xd->dst.uv_stride, xd->eobs+16);
    }
}
Example #3
0
static void decode_mb_rows(VP8D_COMP *pbi)
{
    VP8_COMMON *const pc = & pbi->common;
    MACROBLOCKD *const xd  = & pbi->mb;

    MODE_INFO *lf_mic = xd->mode_info_context;

    int ibc = 0;
    int num_part = 1 << pc->multi_token_partition;

    int recon_yoffset, recon_uvoffset;
    int mb_row, mb_col;
    int mb_idx = 0;

    YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];

    int recon_y_stride = yv12_fb_new->y_stride;
    int recon_uv_stride = yv12_fb_new->uv_stride;

    unsigned char *ref_buffer[MAX_REF_FRAMES][3];
    unsigned char *dst_buffer[3];
    unsigned char *lf_dst[3];
    unsigned char *eb_dst[3];
    int i;
    int ref_fb_corrupted[MAX_REF_FRAMES];

    ref_fb_corrupted[INTRA_FRAME] = 0;

    for(i = 1; i < MAX_REF_FRAMES; i++)
    {
        YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i];

        ref_buffer[i][0] = this_fb->y_buffer;
        ref_buffer[i][1] = this_fb->u_buffer;
        ref_buffer[i][2] = this_fb->v_buffer;

        ref_fb_corrupted[i] = this_fb->corrupted;
    }

    /* Set up the buffer pointers */
    eb_dst[0] = lf_dst[0] = dst_buffer[0] = yv12_fb_new->y_buffer;
    eb_dst[1] = lf_dst[1] = dst_buffer[1] = yv12_fb_new->u_buffer;
    eb_dst[2] = lf_dst[2] = dst_buffer[2] = yv12_fb_new->v_buffer;

    xd->up_available = 0;

    /* Initialize the loop filter for this frame. */
    if(pc->filter_level)
        vp8_loop_filter_frame_init(pc, xd, pc->filter_level);

    vp8_setup_intra_recon_top_line(yv12_fb_new);

    /* Decode the individual macro block */
    for (mb_row = 0; mb_row < pc->mb_rows; mb_row++)
    {
        if (num_part > 1)
        {
            xd->current_bc = & pbi->mbc[ibc];
            ibc++;

            if (ibc == num_part)
                ibc = 0;
        }

        recon_yoffset = mb_row * recon_y_stride * 16;
        recon_uvoffset = mb_row * recon_uv_stride * 8;

        /* reset contexts */
        xd->above_context = pc->above_context;
        vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));

        xd->left_available = 0;

        xd->mb_to_top_edge = -((mb_row * 16) << 3);
        xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;

        xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
        xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
        xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;

        xd->recon_left[0] = xd->recon_above[0] - 1;
        xd->recon_left[1] = xd->recon_above[1] - 1;
        xd->recon_left[2] = xd->recon_above[2] - 1;

        xd->recon_above[0] -= xd->dst.y_stride;
        xd->recon_above[1] -= xd->dst.uv_stride;
        xd->recon_above[2] -= xd->dst.uv_stride;

        /* TODO: move to outside row loop */
        xd->recon_left_stride[0] = xd->dst.y_stride;
        xd->recon_left_stride[1] = xd->dst.uv_stride;

        setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1],
                               xd->recon_left[2], xd->dst.y_stride,
                               xd->dst.uv_stride);

        for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
        {
            /* Distance of Mb to the various image edges.
             * These are specified to 8th pel as they are always compared to values
             * that are in 1/8th pel units
             */
            xd->mb_to_left_edge = -((mb_col * 16) << 3);
            xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;

#if CONFIG_ERROR_CONCEALMENT
            {
                int corrupt_residual = (!pbi->independent_partitions &&
                                       pbi->frame_corrupt_residual) ||
                                       vp8dx_bool_error(xd->current_bc);
                if (pbi->ec_active &&
                    xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
                    corrupt_residual)
                {
                    /* We have an intra block with corrupt coefficients, better to
                     * conceal with an inter block. Interpolate MVs from neighboring
                     * MBs.
                     *
                     * Note that for the first mb with corrupt residual in a frame,
                     * we might not discover that before decoding the residual. That
                     * happens after this check, and therefore no inter concealment
                     * will be done.
                     */
                    vp8_interpolate_motion(xd,
                                           mb_row, mb_col,
                                           pc->mb_rows, pc->mb_cols,
                                           pc->mode_info_stride);
                }
            }
#endif

            xd->dst.y_buffer = dst_buffer[0] + recon_yoffset;
            xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
            xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;

            if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) {
              MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame;
              xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset;
              xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset;
              xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset;
            } else {
              // ref_frame is INTRA_FRAME, pre buffer should not be used.
              xd->pre.y_buffer = 0;
              xd->pre.u_buffer = 0;
              xd->pre.v_buffer = 0;
            }

            /* propagate errors from reference frames */
            xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];

            decode_macroblock(pbi, xd, mb_idx);

            mb_idx++;
            xd->left_available = 1;

            /* check if the boolean decoder has suffered an error */
            xd->corrupted |= vp8dx_bool_error(xd->current_bc);

            xd->recon_above[0] += 16;
            xd->recon_above[1] += 8;
            xd->recon_above[2] += 8;
            xd->recon_left[0] += 16;
            xd->recon_left[1] += 8;
            xd->recon_left[2] += 8;

            recon_yoffset += 16;
            recon_uvoffset += 8;

            ++xd->mode_info_context;  /* next mb */

            xd->above_context++;
        }

        /* adjust to the next row of mbs */
        vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16,
                          xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);

        ++xd->mode_info_context;      /* skip prediction column */
        xd->up_available = 1;

        if(pc->filter_level)
        {
            if(mb_row > 0)
            {
                if (pc->filter_type == NORMAL_LOOPFILTER)
                    vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1,
                                               recon_y_stride, recon_uv_stride,
                                               lf_dst[0], lf_dst[1], lf_dst[2]);
                else
                    vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1,
                                               recon_y_stride, recon_uv_stride,
                                               lf_dst[0], lf_dst[1], lf_dst[2]);
                if(mb_row > 1)
                {
                    yv12_extend_frame_left_right_c(yv12_fb_new,
                                                   eb_dst[0],
                                                   eb_dst[1],
                                                   eb_dst[2]);

                    eb_dst[0] += recon_y_stride  * 16;
                    eb_dst[1] += recon_uv_stride *  8;
                    eb_dst[2] += recon_uv_stride *  8;
                }

                lf_dst[0] += recon_y_stride  * 16;
                lf_dst[1] += recon_uv_stride *  8;
                lf_dst[2] += recon_uv_stride *  8;
                lf_mic += pc->mb_cols;
                lf_mic++;         /* Skip border mb */
            }
        }
        else
        {
            if(mb_row > 0)
            {
                /**/
                yv12_extend_frame_left_right_c(yv12_fb_new,
                                               eb_dst[0],
                                               eb_dst[1],
                                               eb_dst[2]);
                eb_dst[0] += recon_y_stride  * 16;
                eb_dst[1] += recon_uv_stride *  8;
                eb_dst[2] += recon_uv_stride *  8;
            }
        }
    }

    if(pc->filter_level)
    {
        if (pc->filter_type == NORMAL_LOOPFILTER)
            vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, recon_y_stride,
                                       recon_uv_stride, lf_dst[0], lf_dst[1],
                                       lf_dst[2]);
        else
            vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, recon_y_stride,
                                       recon_uv_stride, lf_dst[0], lf_dst[1],
                                       lf_dst[2]);

        yv12_extend_frame_left_right_c(yv12_fb_new,
                                       eb_dst[0],
                                       eb_dst[1],
                                       eb_dst[2]);
        eb_dst[0] += recon_y_stride  * 16;
        eb_dst[1] += recon_uv_stride *  8;
        eb_dst[2] += recon_uv_stride *  8;
    }
    yv12_extend_frame_left_right_c(yv12_fb_new,
                                   eb_dst[0],
                                   eb_dst[1],
                                   eb_dst[2]);
    yv12_extend_frame_top_c(yv12_fb_new);
    yv12_extend_frame_bottom_c(yv12_fb_new);

}
Example #4
0
static void
decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
{
    int recon_yoffset, recon_uvoffset;
    int mb_col;
    int ref_fb_idx = pc->lst_fb_idx;
    int dst_fb_idx = pc->new_fb_idx;
    int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride;
    int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride;

    vpx_memset(&pc->left_context, 0, sizeof(pc->left_context));
    recon_yoffset = mb_row * recon_y_stride * 16;
    recon_uvoffset = mb_row * recon_uv_stride * 8;
    /* reset above block coeffs */

    xd->above_context = pc->above_context;
    xd->up_available = (mb_row != 0);

    xd->mb_to_top_edge = -((mb_row * 16)) << 3;
    xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;



	for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
    {
        /* Distance of Mb to the various image edges.
         * These are specified to 8th pel as they are always compared to values
         * that are in 1/8th pel units
         */
        xd->mb_to_left_edge = -((mb_col * 16) << 3);
        xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;

#if CONFIG_ERROR_CONCEALMENT
        {
            int corrupt_residual = (!pbi->independent_partitions &&
                                   pbi->frame_corrupt_residual) ||
                                   vp8dx_bool_error(xd->current_bc);
            if (pbi->ec_active &&
                xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
                corrupt_residual)
            {
                /* We have an intra block with corrupt coefficients, better to
                 * conceal with an inter block. Interpolate MVs from neighboring
                 * MBs.
                 *
                 * Note that for the first mb with corrupt residual in a frame,
                 * we might not discover that before decoding the residual. That
                 * happens after this check, and therefore no inter concealment
                 * will be done.
                 */
                vp8_interpolate_motion(xd,
                                       mb_row, mb_col,
                                       pc->mb_rows, pc->mb_cols,
                                       pc->mode_info_stride);
            }
        }
#endif

        xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
        xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
        xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;

        xd->left_available = (mb_col != 0);

        /* Select the appropriate reference frame for this MB */
        if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
            ref_fb_idx = pc->lst_fb_idx;
        else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
            ref_fb_idx = pc->gld_fb_idx;
        else
            ref_fb_idx = pc->alt_fb_idx;

        xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
        xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
        xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;

        if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME)
        {
            /* propagate errors from reference frames */
            xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted;
        }

        decode_macroblock(pbi, xd, mb_row * pc->mb_cols  + mb_col);

        /* check if the boolean decoder has suffered an error */
        xd->corrupted |= vp8dx_bool_error(xd->current_bc);

        recon_yoffset += 16;
        recon_uvoffset += 8;

        ++xd->mode_info_context;  /* next mb */

        xd->above_context++;

    }

    /* adjust to the next row of mbs */
    vp8_extend_mb_row(
        &pc->yv12_fb[dst_fb_idx],
        xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8
    );

    ++xd->mode_info_context;      /* skip prediction column */
}
Example #5
0
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
                              unsigned int mb_idx)
{
    MB_PREDICTION_MODE mode;
    int i;
    int corruption_detected = 0;

    int eobtotal;
    if (xd->mode_info_context->mbmi.mb_skip_coeff)
    {
        vp8_reset_mb_tokens_context(xd);
    }
    else if (!vp8dx_bool_error(xd->current_bc))
    {
        eobtotal = vp8_decode_mb_tokens(pbi, xd);

        /* Special case:  Force the loopfilter to skip when eobtotal is zero */
        xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
    }

    mode = xd->mode_info_context->mbmi.mode;

    if (xd->segmentation_enabled)
        mb_init_dequantizer(pbi, xd);

#if PROFILE_OUTPUT
     if (xd->frame_type == KEY_FRAME)
         printf("Intra-Coded MB\n");
     else{
         if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME){
             printf("Intra-Coded Inter-Frame MB\n");
         } else {
            printf("Inter-Coded MB\n");
         }
     }
#endif

#if CONFIG_OPENCL && (ENABLE_CL_IDCT_DEQUANT || ENABLE_CL_SUBPIXEL)
    //If OpenCL is enabled and initialized, use CL-specific decoder for remains
    //of MB decoding.
    if (cl_initialized == CL_SUCCESS){
        vp8_decode_macroblock_cl(pbi, xd, eobtotal);
        return;
    }
#endif

#if CONFIG_ERROR_CONCEALMENT

    if(pbi->ec_active)
    {
        int throw_residual;
        /* When we have independent partitions we can apply residual even
         * though other partitions within the frame are corrupt.
         */
        throw_residual = (!pbi->independent_partitions &&
                          pbi->frame_corrupt_residual);
        throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));

        if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
        {
            /* MB with corrupt residuals or corrupt mode/motion vectors.
             * Better to use the predictor as reconstruction.
             */
            pbi->frame_corrupt_residual = 1;
            vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
            vp8_conceal_corrupt_mb(xd);


            corruption_detected = 1;

            /* force idct to be skipped for B_PRED and use the
             * prediction only for reconstruction
             * */
            vpx_memset(xd->eobs, 0, 25);
        }
    }
#endif


    /* do prediction */
    if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
    {
        vp8_build_intra_predictors_mbuv_s(xd);

        if (mode != B_PRED)
        {
            vp8_build_intra_predictors_mby_s(xd);
        }
        else
        {
            short *DQC = xd->dequant_y1;

            /* clear out residual eob info */
            if(xd->mode_info_context->mbmi.mb_skip_coeff)
                vpx_memset(xd->eobs, 0, 25);

            vp8_intra_prediction_down_copy(xd);

            for (i = 0; i < 16; i++)
            {
                BLOCKD *b = &xd->block[i];
                int b_mode = xd->mode_info_context->bmi[i].as_mode;

                vp8_intra4x4_predict
                              ( *(b->base_dst) + b->dst, b->dst_stride, b_mode,
                                *(b->base_dst) + b->dst, b->dst_stride );

                if (xd->eobs[i])
                {
                    if (xd->eobs[i] > 1)
                    {
                    vp8_dequant_idct_add
                            (&b->qcoeff_base[b->qcoeff_offset], DQC,
                            *(b->base_dst) + b->dst, b->dst_stride);
                    }
                    else
                    {
                        vp8_dc_only_idct_add
                            (b->qcoeff_base[b->qcoeff_offset] * DQC[0],
                            *(b->base_dst) + b->dst, b->dst_stride,
                            *(b->base_dst) + b->dst, b->dst_stride);
                        ((int *)&b->qcoeff_base[b->qcoeff_offset])[0] = 0;
                    }
                }
            }
        }
    }
    else
    {
        vp8_build_inter_predictors_mb(xd);
    }


#if CONFIG_ERROR_CONCEALMENT
    if (corruption_detected)
    {
        return;
    }
#endif

    if(!xd->mode_info_context->mbmi.mb_skip_coeff)
    {
        /* dequantization and idct */
        if (mode != B_PRED)
        {
            short *DQC = xd->dequant_y1;

            if (mode != SPLITMV)
            {
                BLOCKD *b = &xd->block[24];
                short *qcoeff = &b->qcoeff_base[b->qcoeff_offset];

                /* do 2nd order transform on the dc block */
                if (xd->eobs[24] > 1)
                {
                    vp8_dequantize_b(b, xd->dequant_y2);

                    vp8_short_inv_walsh4x4(&b->dqcoeff_base[b->dqcoeff_offset],
                        xd->qcoeff);
                    ((int *)qcoeff)[0] = 0;
                    ((int *)qcoeff)[1] = 0;
                    ((int *)qcoeff)[2] = 0;
                    ((int *)qcoeff)[3] = 0;
                    ((int *)qcoeff)[4] = 0;
                    ((int *)qcoeff)[5] = 0;
                    ((int *)qcoeff)[6] = 0;
                    ((int *)qcoeff)[7] = 0;
                }
                else
                {
                    b->dqcoeff_base[b->dqcoeff_offset] = qcoeff[0] * xd->dequant_y2[0];
                    vp8_short_inv_walsh4x4_1(&b->dqcoeff_base[b->dqcoeff_offset],
                        xd->qcoeff);
                    ((int *)qcoeff)[0] = 0;
                }

                /* override the dc dequant constant in order to preserve the
                 * dc components
                 */
                DQC = xd->dequant_y1_dc;
            }

            vp8_dequant_idct_add_y_block
                            (xd->qcoeff, DQC,
                             xd->dst.y_buffer,
                             xd->dst.y_stride, xd->eobs);
        }

        vp8_dequant_idct_add_uv_block
                        (xd->qcoeff+16*16, xd->dequant_uv,
                         xd->dst.u_buffer, xd->dst.v_buffer,
                         xd->dst.uv_stride, xd->eobs+16);
    }
}
Example #6
0
static void decode_mb_rows(VP8D_COMP *pbi)
{
    VP8_COMMON *const pc = & pbi->common;
    MACROBLOCKD *const xd  = & pbi->mb;

    int ibc = 0;
    int num_part = 1 << pc->multi_token_partition;

    int recon_yoffset, recon_uvoffset;
    int mb_row, mb_col;
    int mb_idx = 0;
    int dst_fb_idx = pc->new_fb_idx;
    int recon_y_stride = pc->yv12_fb[dst_fb_idx].y_stride;
    int recon_uv_stride = pc->yv12_fb[dst_fb_idx].uv_stride;

    unsigned char *ref_buffer[MAX_REF_FRAMES][3];
    unsigned char *dst_buffer[3];
    int i;
    int ref_fb_index[MAX_REF_FRAMES];
    int ref_fb_corrupted[MAX_REF_FRAMES];

    ref_fb_corrupted[INTRA_FRAME] = 0;

    ref_fb_index[LAST_FRAME]    = pc->lst_fb_idx;
    ref_fb_index[GOLDEN_FRAME]  = pc->gld_fb_idx;
    ref_fb_index[ALTREF_FRAME]  = pc->alt_fb_idx;

    for(i = 1; i < MAX_REF_FRAMES; i++)
    {
        ref_buffer[i][0] = pc->yv12_fb[ref_fb_index[i]].y_buffer;
        ref_buffer[i][1] = pc->yv12_fb[ref_fb_index[i]].u_buffer;
        ref_buffer[i][2] = pc->yv12_fb[ref_fb_index[i]].v_buffer;

        ref_fb_corrupted[i] = pc->yv12_fb[ref_fb_index[i]].corrupted;
    }

    dst_buffer[0] = pc->yv12_fb[dst_fb_idx].y_buffer;
    dst_buffer[1] = pc->yv12_fb[dst_fb_idx].u_buffer;
    dst_buffer[2] = pc->yv12_fb[dst_fb_idx].v_buffer;

    xd->up_available = 0;

    /* Decode the individual macro block */
    for (mb_row = 0; mb_row < pc->mb_rows; mb_row++)
    {
        if (num_part > 1)
        {
            xd->current_bc = & pbi->mbc[ibc];
            ibc++;

            if (ibc == num_part)
                ibc = 0;
        }

        recon_yoffset = mb_row * recon_y_stride * 16;
        recon_uvoffset = mb_row * recon_uv_stride * 8;

        /* reset contexts */
        xd->above_context = pc->above_context;
        vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));

        xd->left_available = 0;

        xd->mb_to_top_edge = -((mb_row * 16)) << 3;
        xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;

        xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
        xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
        xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;

        xd->recon_left[0] = xd->recon_above[0] - 1;
        xd->recon_left[1] = xd->recon_above[1] - 1;
        xd->recon_left[2] = xd->recon_above[2] - 1;

        xd->recon_above[0] -= xd->dst.y_stride;
        xd->recon_above[1] -= xd->dst.uv_stride;
        xd->recon_above[2] -= xd->dst.uv_stride;

        //TODO: move to outside row loop
        xd->recon_left_stride[0] = xd->dst.y_stride;
        xd->recon_left_stride[1] = xd->dst.uv_stride;

        for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
        {
            /* Distance of Mb to the various image edges.
             * These are specified to 8th pel as they are always compared to values
             * that are in 1/8th pel units
             */
            xd->mb_to_left_edge = -((mb_col * 16) << 3);
            xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;

#if CONFIG_ERROR_CONCEALMENT
            {
                int corrupt_residual = (!pbi->independent_partitions &&
                                       pbi->frame_corrupt_residual) ||
                                       vp8dx_bool_error(xd->current_bc);
                if (pbi->ec_active &&
                    xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
                    corrupt_residual)
                {
                    /* We have an intra block with corrupt coefficients, better to
                     * conceal with an inter block. Interpolate MVs from neighboring
                     * MBs.
                     *
                     * Note that for the first mb with corrupt residual in a frame,
                     * we might not discover that before decoding the residual. That
                     * happens after this check, and therefore no inter concealment
                     * will be done.
                     */
                    vp8_interpolate_motion(xd,
                                           mb_row, mb_col,
                                           pc->mb_rows, pc->mb_cols,
                                           pc->mode_info_stride);
                }
            }
#endif

            xd->dst.y_buffer = dst_buffer[0] + recon_yoffset;
            xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
            xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;

            xd->pre.y_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset;
            xd->pre.u_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset;
            xd->pre.v_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset;

            /* propagate errors from reference frames */
            xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];

            decode_macroblock(pbi, xd, mb_idx);

            mb_idx++;
            xd->left_available = 1;

            /* check if the boolean decoder has suffered an error */
            xd->corrupted |= vp8dx_bool_error(xd->current_bc);

            xd->recon_above[0] += 16;
            xd->recon_above[1] += 8;
            xd->recon_above[2] += 8;
            xd->recon_left[0] += 16;
            xd->recon_left[1] += 8;
            xd->recon_left[2] += 8;


            recon_yoffset += 16;
            recon_uvoffset += 8;

            ++xd->mode_info_context;  /* next mb */

            xd->above_context++;

        }

        /* adjust to the next row of mbs */
        vp8_extend_mb_row(
            &pc->yv12_fb[dst_fb_idx],
            xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8
        );

        ++xd->mode_info_context;      /* skip prediction column */
        xd->up_available = 1;

    }
}
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
                              unsigned int mb_idx)
{
    int eobtotal = 0;
    int throw_residual = 0;
    MB_PREDICTION_MODE mode;
    int i;

    if (xd->mode_info_context->mbmi.mb_skip_coeff)
    {
        vp8_reset_mb_tokens_context(xd);
    }
    else if (!vp8dx_bool_error(xd->current_bc))
    {
        eobtotal = vp8_decode_mb_tokens(pbi, xd);
    }



    mode = xd->mode_info_context->mbmi.mode;

    if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV &&
            !vp8dx_bool_error(xd->current_bc))
    {
        /* Special case:  Force the loopfilter to skip when eobtotal and
         * mb_skip_coeff are zero.
         * */
        xd->mode_info_context->mbmi.mb_skip_coeff = 1;

        skip_recon_mb(pbi, xd);
        return;
    }

    if (xd->segmentation_enabled)
        mb_init_dequantizer(pbi, xd);

    /* do prediction */
    if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
    {
        RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd);

        if (mode != B_PRED)
        {
            RECON_INVOKE(&pbi->common.rtcd.recon,
                         build_intra_predictors_mby_s)(xd);
        } else {
            vp8_intra_prediction_down_copy(xd);
        }
    }
    else
    {
        vp8_build_inter_predictors_mb(xd);
    }
    /* When we have independent partitions we can apply residual even
     * though other partitions within the frame are corrupt.
     */
    throw_residual = (!pbi->independent_partitions &&
                      pbi->frame_corrupt_residual);
    throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));

#if CONFIG_ERROR_CONCEALMENT
    if (pbi->ec_active &&
        (mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
    {
        /* MB with corrupt residuals or corrupt mode/motion vectors.
         * Better to use the predictor as reconstruction.
         */
        pbi->frame_corrupt_residual = 1;
        vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
        vp8_conceal_corrupt_mb(xd);
        return;
    }
#endif

    /* dequantization and idct */
    if (mode == B_PRED)
    {
        for (i = 0; i < 16; i++)
        {
            BLOCKD *b = &xd->block[i];
            int b_mode = xd->mode_info_context->bmi[i].as_mode;

            RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
                          ( *(b->base_dst) + b->dst, b->dst_stride, b_mode,
                            *(b->base_dst) + b->dst, b->dst_stride );

            if (xd->eobs[i] )
            {
                if (xd->eobs[i] > 1)
                {
                    DEQUANT_INVOKE(&pbi->dequant, idct_add)
                        (b->qcoeff, b->dequant,
                        *(b->base_dst) + b->dst, b->dst_stride);
                }
                else
                {
                    IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
                        (b->qcoeff[0] * b->dequant[0],
                        *(b->base_dst) + b->dst, b->dst_stride,
                        *(b->base_dst) + b->dst, b->dst_stride);
                    ((int *)b->qcoeff)[0] = 0;
                }
            }
        }
    }
    else
    {
        short *DQC = xd->block[0].dequant;

        /* save the dc dequant constant in case it is overridden */
        short dc_dequant_temp = DQC[0];

        if (mode != SPLITMV)
        {
            BLOCKD *b = &xd->block[24];

            /* do 2nd order transform on the dc block */
            if (xd->eobs[24] > 1)
            {
                DEQUANT_INVOKE(&pbi->dequant, block)(b);

                IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0],
                    xd->qcoeff);
                ((int *)b->qcoeff)[0] = 0;
                ((int *)b->qcoeff)[1] = 0;
                ((int *)b->qcoeff)[2] = 0;
                ((int *)b->qcoeff)[3] = 0;
                ((int *)b->qcoeff)[4] = 0;
                ((int *)b->qcoeff)[5] = 0;
                ((int *)b->qcoeff)[6] = 0;
                ((int *)b->qcoeff)[7] = 0;
            }
            else
            {
                b->dqcoeff[0] = b->qcoeff[0] * b->dequant[0];
                IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0],
                    xd->qcoeff);
                ((int *)b->qcoeff)[0] = 0;
            }

            /* override the dc dequant constant */
            DQC[0] = 1;
        }

        DEQUANT_INVOKE (&pbi->dequant, idct_add_y_block)
                        (xd->qcoeff, xd->block[0].dequant,
                         xd->dst.y_buffer,
                         xd->dst.y_stride, xd->eobs);

        /* restore the dc dequant constant */
        DQC[0] = dc_dequant_temp;
    }

    DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block)
                    (xd->qcoeff+16*16, xd->block[16].dequant,
                     xd->dst.u_buffer, xd->dst.v_buffer,
                     xd->dst.uv_stride, xd->eobs+16);
}