void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->mb.e_mbd; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; TOKENEXTRA *t_backup = *t; const int ctx = vp9_get_skip_context(xd); const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP); struct tokenize_b_args arg = {cpi, xd, t, mbmi->tx_size, cpi->mb.token_cache}; if (mbmi->skip_coeff) { if (!dry_run) cm->counts.mbskip[ctx][1] += skip_inc; reset_skip_context(xd, bsize); if (dry_run) *t = t_backup; return; } if (!dry_run) { cm->counts.mbskip[ctx][0] += skip_inc; foreach_transformed_block(xd, bsize, tokenize_b, &arg); } else { foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg); *t = t_backup; } }
static void decode_sb_intra(VP9D_COMP *pbi, MACROBLOCKD *xd, int mi_row, int mi_col, vp9_reader *r, BLOCK_SIZE_TYPE bsize) { MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; if (mbmi->mb_skip_coeff) { vp9_reset_sb_tokens_context(xd, bsize); } else { if (xd->segmentation_enabled) mb_init_dequantizer(&pbi->common, xd); if (!vp9_reader_has_error(r)) vp9_decode_tokens(pbi, r, bsize); } foreach_transformed_block(xd, bsize, decode_block_intra, xd); }
static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mi_row, int mi_col, vp9_reader *r, BLOCK_SIZE_TYPE bsize) { const int bwl = mi_width_log2(bsize), bhl = mi_height_log2(bsize); const int bw = 1 << bwl, bh = 1 << bhl; int n, eobtotal; VP9_COMMON *const pc = &pbi->common; MODE_INFO *const mi = xd->mode_info_context; MB_MODE_INFO *const mbmi = &mi->mbmi; const int mis = pc->mode_info_stride; assert(mbmi->sb_type == bsize); assert(mbmi->ref_frame[0] != INTRA_FRAME); vp9_setup_interp_filters(xd, mbmi->interp_filter, pc); // generate prediction vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); if (mbmi->mb_skip_coeff) { vp9_reset_sb_tokens_context(xd, bsize); } else { // re-initialize macroblock dequantizer before detokenization if (xd->segmentation_enabled) mb_init_dequantizer(pc, xd); // dequantization and idct eobtotal = vp9_decode_tokens(pbi, r, bsize); if (eobtotal == 0) { // skip loopfilter for (n = 0; n < bw * bh; n++) { const int x_idx = n & (bw - 1), y_idx = n >> bwl; if (mi_col + x_idx < pc->mi_cols && mi_row + y_idx < pc->mi_rows) mi[y_idx * mis + x_idx].mbmi.mb_skip_coeff = 1; } } else { foreach_transformed_block(xd, bsize, decode_block, xd); } }
static void decode_atom(VP9D_COMP *pbi, MACROBLOCKD *xd, int mi_row, int mi_col, vp9_reader *r, BLOCK_SIZE_TYPE bsize) { MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; assert(mbmi->ref_frame[0] != INTRA_FRAME); vp9_setup_interp_filters(xd, mbmi->interp_filter, &pbi->common); // prediction vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); if (mbmi->mb_skip_coeff) { vp9_reset_sb_tokens_context(xd, bsize); } else { if (xd->segmentation_enabled) mb_init_dequantizer(&pbi->common, xd); if (!vp9_reader_has_error(r)) vp9_decode_tokens(pbi, r, bsize); foreach_transformed_block(xd, bsize, decode_block, xd); } }
static int sb_is_skippable(MACROBLOCK *x, BLOCK_SIZE bsize) { int result = 1; struct is_skippable_args args = {x, &result}; foreach_transformed_block(&x->e_mbd, bsize, is_skippable, &args); return result; }
int vp9_sb_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) { int result = 1; struct is_skippable_args args = {xd, &result}; foreach_transformed_block(xd, bsize, is_skippable, &args); return result; }