void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) { if (b->eob > 1) IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch); else IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch); }
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, int ib) { BLOCKD *b = &x->e_mbd.block[ib]; BLOCK *be = &x->block[ib]; RECON_INVOKE(&rtcd->common->recon, intra4x4_predict) (*(b->base_dst) + b->dst, b->dst_stride, b->bmi.as_mode, b->predictor, 16); ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16); x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32); x->quantize_b(be, b); if (*b->eob > 1) { IDCT_INVOKE(IF_RTCD(&rtcd->common->idct), idct16)(b->dqcoeff, b->predictor, 16, *(b->base_dst) + b->dst, b->dst_stride); } else { IDCT_INVOKE(IF_RTCD(&rtcd->common->idct), idct1_scalar_add) (b->dqcoeff[0], b->predictor, 16, *(b->base_dst) + b->dst, b->dst_stride); } }
void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) { if (*b->eob > 1) { IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->predictor, pitch, *(b->base_dst) + b->dst, b->dst_stride); } else { IDCT_INVOKE(rtcd, idct1_scalar_add)(b->dqcoeff[0], b->predictor, pitch, *(b->base_dst) + b->dst, b->dst_stride); } }
void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { int i; // do 2nd order transform on the dc block IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff); recon_dcblock(x); for (i = 0; i < 16; i++) { vp8_inverse_transform_b(rtcd, &x->block[i], 32); } }
void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { int i; if(x->mode_info_context->mbmi.mode != SPLITMV) { /* do 2nd order transform on the dc block */ IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->dqcoeff); } for (i = 0; i < 16; i++) { vp8_inverse_transform_b(rtcd, &x->block[i], 16); } }
void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) { int i; if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV) { // do 2nd order transform on the dc block IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff); recon_dcblock(x); } for (i = 0; i < 16; i++) { vp8_inverse_transform_b(rtcd, &x->block[i], 32); } for (i = 16; i < 24; i++) { vp8_inverse_transform_b(rtcd, &x->block[i], 16); } }
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, unsigned int mb_idx) { int eobtotal = 0; int throw_residual = 0; MB_PREDICTION_MODE mode; int i; if (xd->mode_info_context->mbmi.mb_skip_coeff) { vp8_reset_mb_tokens_context(xd); } else if (!vp8dx_bool_error(xd->current_bc)) { eobtotal = vp8_decode_mb_tokens(pbi, xd); } mode = xd->mode_info_context->mbmi.mode; if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV && !vp8dx_bool_error(xd->current_bc)) { /* Special case: Force the loopfilter to skip when eobtotal and * mb_skip_coeff are zero. * */ xd->mode_info_context->mbmi.mb_skip_coeff = 1; skip_recon_mb(pbi, xd); return; } if (xd->segmentation_enabled) mb_init_dequantizer(pbi, xd); /* do prediction */ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd); if (mode != B_PRED) { RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mby_s)(xd); } else { vp8_intra_prediction_down_copy(xd); } } else { vp8_build_inter_predictors_mb(xd); } /* When we have independent partitions we can apply residual even * though other partitions within the frame are corrupt. */ throw_residual = (!pbi->independent_partitions && pbi->frame_corrupt_residual); throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); #if CONFIG_ERROR_CONCEALMENT if (pbi->ec_active && (mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) { /* MB with corrupt residuals or corrupt mode/motion vectors. * Better to use the predictor as reconstruction. */ pbi->frame_corrupt_residual = 1; vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); vp8_conceal_corrupt_mb(xd); return; } #endif /* dequantization and idct */ if (mode == B_PRED) { for (i = 0; i < 16; i++) { BLOCKD *b = &xd->block[i]; int b_mode = xd->mode_info_context->bmi[i].as_mode; RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict) ( *(b->base_dst) + b->dst, b->dst_stride, b_mode, *(b->base_dst) + b->dst, b->dst_stride ); if (xd->eobs[i] ) { if (xd->eobs[i] > 1) { DEQUANT_INVOKE(&pbi->dequant, idct_add) (b->qcoeff, b->dequant, *(b->base_dst) + b->dst, b->dst_stride); } else { IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add) (b->qcoeff[0] * b->dequant[0], *(b->base_dst) + b->dst, b->dst_stride, *(b->base_dst) + b->dst, b->dst_stride); ((int *)b->qcoeff)[0] = 0; } } } } else { short *DQC = xd->block[0].dequant; /* save the dc dequant constant in case it is overridden */ short dc_dequant_temp = DQC[0]; if (mode != SPLITMV) { BLOCKD *b = &xd->block[24]; /* do 2nd order transform on the dc block */ if (xd->eobs[24] > 1) { DEQUANT_INVOKE(&pbi->dequant, block)(b); IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0], xd->qcoeff); ((int *)b->qcoeff)[0] = 0; ((int *)b->qcoeff)[1] = 0; ((int *)b->qcoeff)[2] = 0; ((int *)b->qcoeff)[3] = 0; ((int *)b->qcoeff)[4] = 0; ((int *)b->qcoeff)[5] = 0; ((int *)b->qcoeff)[6] = 0; ((int *)b->qcoeff)[7] = 0; } else { b->dqcoeff[0] = b->qcoeff[0] * b->dequant[0]; IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0], xd->qcoeff); ((int *)b->qcoeff)[0] = 0; } /* override the dc dequant constant */ DQC[0] = 1; } DEQUANT_INVOKE (&pbi->dequant, idct_add_y_block) (xd->qcoeff, xd->block[0].dequant, xd->dst.y_buffer, xd->dst.y_stride, xd->eobs); /* restore the dc dequant constant */ DQC[0] = dc_dequant_temp; } DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block) (xd->qcoeff+16*16, xd->block[16].dequant, xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride, xd->eobs+16); }