static int read_mv_component(vp9_reader *r, const nmv_component *mvcomp, int usehp) { int mag, d, fr, hp; const int sign = vp9_read(r, mvcomp->sign); const int mv_class = vp9_read_tree(r, vp9_mv_class_tree, mvcomp->classes); const int class0 = mv_class == MV_CLASS_0; // Integer part if (class0) { d = vp9_read_tree(r, vp9_mv_class0_tree, mvcomp->class0); } else { int i; const int n = mv_class + CLASS0_BITS - 1; // number of bits d = 0; for (i = 0; i < n; ++i) d |= vp9_read(r, mvcomp->bits[i]) << i; } // Fractional part fr = vp9_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d] : mvcomp->fp); // High precision part (if hp is not used, the default value of the hp is 1) hp = usehp ? vp9_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1; // Result mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1; return sign ? -mag : mag; }
static void setup_txfm_mode(VP9_COMMON *pc, int lossless, vp9_reader *r) { if (lossless) { pc->txfm_mode = ONLY_4X4; } else { pc->txfm_mode = vp9_read_literal(r, 2); if (pc->txfm_mode == ALLOW_32X32) pc->txfm_mode += vp9_read_bit(r); if (pc->txfm_mode == TX_MODE_SELECT) { int i, j; for (i = 0; i < TX_SIZE_CONTEXTS; ++i) { for (j = 0; j < TX_SIZE_MAX_SB - 3; ++j) { if (vp9_read(r, VP9_MODE_UPDATE_PROB)) pc->fc.tx_probs_8x8p[i][j] = vp9_read_prob_diff_update(r, pc->fc.tx_probs_8x8p[i][j]); } } for (i = 0; i < TX_SIZE_CONTEXTS; ++i) { for (j = 0; j < TX_SIZE_MAX_SB - 2; ++j) { if (vp9_read(r, VP9_MODE_UPDATE_PROB)) pc->fc.tx_probs_16x16p[i][j] = vp9_read_prob_diff_update(r, pc->fc.tx_probs_16x16p[i][j]); } } for (i = 0; i < TX_SIZE_CONTEXTS; ++i) { for (j = 0; j < TX_SIZE_MAX_SB - 1; ++j) { if (vp9_read(r, VP9_MODE_UPDATE_PROB)) pc->fc.tx_probs_32x32p[i][j] = vp9_read_prob_diff_update(r, pc->fc.tx_probs_32x32p[i][j]); } } } } }
// Read the referncence frame static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, vp9_reader *r, int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { ref_frame[0] = vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME); ref_frame[1] = NONE; } else { const int comp_ctx = vp9_get_pred_context_comp_inter_inter(cm, xd); int is_comp; if (cm->comp_pred_mode == HYBRID_PREDICTION) { is_comp = vp9_read(r, fc->comp_inter_prob[comp_ctx]); if (!cm->frame_parallel_decoding_mode) ++counts->comp_inter[comp_ctx][is_comp]; } else { is_comp = cm->comp_pred_mode == COMP_PREDICTION_ONLY; } // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding if (is_comp) { const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; const int ref_ctx = vp9_get_pred_context_comp_ref_p(cm, xd); const int b = vp9_read(r, fc->comp_ref_prob[ref_ctx]); if (!cm->frame_parallel_decoding_mode) ++counts->comp_ref[ref_ctx][b]; ref_frame[fix_ref_idx] = cm->comp_fixed_ref; ref_frame[!fix_ref_idx] = cm->comp_var_ref[b]; } else { const int ctx0 = vp9_get_pred_context_single_ref_p1(xd); const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]); if (!cm->frame_parallel_decoding_mode) ++counts->single_ref[ctx0][0][bit0]; if (bit0) { const int ctx1 = vp9_get_pred_context_single_ref_p2(xd); const int bit1 = vp9_read(r, fc->single_ref_prob[ctx1][1]); ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME; if (!cm->frame_parallel_decoding_mode) ++counts->single_ref[ctx1][1][bit1]; } else { ref_frame[0] = LAST_FRAME; } ref_frame[1] = NONE; } } }
static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, TX_SIZE max_tx_size, vp9_reader *r) { const int ctx = vp9_get_tx_size_context(xd); const vp9_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc.tx_probs); TX_SIZE tx_size = vp9_read(r, tx_probs[0]); if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { tx_size += vp9_read(r, tx_probs[1]); if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) tx_size += vp9_read(r, tx_probs[2]); } if (!cm->frame_parallel_decoding_mode) ++get_tx_counts(max_tx_size, ctx, &cm->counts.tx)[tx_size]; return tx_size; }
static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, TX_SIZE max_tx_size, vp9_reader *r) { FRAME_COUNTS *counts = xd->counts; const int ctx = vp9_get_tx_size_context(xd); const vp9_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc->tx_probs); int tx_size = vp9_read(r, tx_probs[0]); if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { tx_size += vp9_read(r, tx_probs[1]); if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) tx_size += vp9_read(r, tx_probs[2]); } if (counts) ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size]; return (TX_SIZE)tx_size; }
static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, int mi_row, int mi_col, vp9_reader *r) { struct segmentation *const seg = &cm->seg; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; const BLOCK_SIZE bsize = mbmi->sb_type; int predicted_segment_id, segment_id; if (!seg->enabled) return 0; // Default for disabled segmentation predicted_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col); if (!seg->update_map) return predicted_segment_id; if (seg->temporal_update) { const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); mbmi->seg_id_predicted = vp9_read(r, pred_prob); segment_id = mbmi->seg_id_predicted ? predicted_segment_id : read_segment_id(r, seg); } else { segment_id = read_segment_id(r, seg); } set_segment_id(cm, bsize, mi_row, mi_col, segment_id); return segment_id; }
static REFERENCE_MODE read_reference_mode(VP9_COMMON *cm, const MACROBLOCKD *xd, vp9_reader *r) { const int ctx = vp9_get_reference_mode_context(cm, xd); const int mode = vp9_read(r, cm->fc.comp_inter_prob[ctx]); if (!cm->frame_parallel_decoding_mode) ++cm->counts.comp_inter[ctx][mode]; return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE }
// Read the referncence frame static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, vp9_reader *r, int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { ref_frame[0] = vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME); ref_frame[1] = NONE; } else { const REFERENCE_MODE mode = (cm->reference_mode == REFERENCE_MODE_SELECT) ? read_reference_mode(cm, xd, r) : cm->reference_mode; // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding if (mode == COMPOUND_REFERENCE) { const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd); const int bit = vp9_read(r, fc->comp_ref_prob[ctx]); if (!cm->frame_parallel_decoding_mode) ++counts->comp_ref[ctx][bit]; ref_frame[idx] = cm->comp_fixed_ref; ref_frame[!idx] = cm->comp_var_ref[bit]; } else if (mode == SINGLE_REFERENCE) { const int ctx0 = vp9_get_pred_context_single_ref_p1(xd); const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]); if (!cm->frame_parallel_decoding_mode) ++counts->single_ref[ctx0][0][bit0]; if (bit0) { const int ctx1 = vp9_get_pred_context_single_ref_p2(xd); const int bit1 = vp9_read(r, fc->single_ref_prob[ctx1][1]); if (!cm->frame_parallel_decoding_mode) ++counts->single_ref[ctx1][1][bit1]; ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME; } else { ref_frame[0] = LAST_FRAME; } ref_frame[1] = NONE; } else { assert(0 && "Invalid prediction mode."); } } }
static uint8_t read_skip_coeff(VP9_COMMON *const cm, MACROBLOCKD *const xd, int segment_id, vp9_reader *r) { int skip_coeff = vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP); if (!skip_coeff) { const int ctx = vp9_get_pred_context_mbskip(xd); skip_coeff = vp9_read(r, vp9_get_pred_prob_mbskip(cm, xd)); if (!cm->frame_parallel_decoding_mode) ++cm->counts.mbskip[ctx][skip_coeff]; } return skip_coeff; }
static int read_skip_coeff(VP9_COMMON *cm, const MACROBLOCKD *xd, int segment_id, vp9_reader *r) { if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { return 1; } else { const int ctx = vp9_get_skip_context(xd); const int skip = vp9_read(r, cm->fc.mbskip_probs[ctx]); if (!cm->frame_parallel_decoding_mode) ++cm->counts.mbskip[ctx][skip]; return skip; } }
static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, int segment_id, vp9_reader *r) { if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { return 1; } else { const int ctx = vp9_get_skip_context(xd); const int skip = vp9_read(r, cm->fc->skip_probs[ctx]); FRAME_COUNTS *counts = xd->counts; if (counts) ++counts->skip[ctx][skip]; return skip; } }
static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd, int segment_id, vp9_reader *r) { if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { return vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME; } else { const int ctx = vp9_get_intra_inter_context(xd); const int is_inter = vp9_read(r, cm->fc.intra_inter_prob[ctx]); if (!cm->frame_parallel_decoding_mode) ++cm->counts.intra_inter[ctx][is_inter]; return is_inter; } }
static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd, int segment_id, vp9_reader *r) { if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { return vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME; } else { const int ctx = vp9_get_intra_inter_context(xd); const int is_inter = vp9_read(r, cm->fc->intra_inter_prob[ctx]); FRAME_COUNTS *counts = xd->counts; if (counts) ++counts->intra_inter[ctx][is_inter]; return is_inter; } }
static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm, const MACROBLOCKD *xd, vp9_reader *r) { if (cm->reference_mode == REFERENCE_MODE_SELECT) { const int ctx = vp9_get_reference_mode_context(cm, xd); const REFERENCE_MODE mode = (REFERENCE_MODE)vp9_read(r, cm->fc->comp_inter_prob[ctx]); FRAME_COUNTS *counts = xd->counts; if (counts) ++counts->comp_inter[ctx][mode]; return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE } else { return cm->reference_mode; } }
static void update_mv(vp9_reader *r, vp9_prob *p) { if (vp9_read(r, NMV_UPDATE_PROB)) *p = (vp9_read_literal(r, 7) << 1) | 1; }
static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, FRAME_COUNTS *counts, PLANE_TYPE type, tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vp9_reader *r) { const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = cm->fc; const int ref = is_inter_block(&xd->mi[0].src_mi->mbmi); int band, c = 0; const vp9_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vp9_prob *prob; unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1] = counts->coef[tx_size][type][ref]; unsigned int (*eob_branch_count)[COEFF_CONTEXTS] = counts->eob_branch[tx_size][type][ref]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v, token; int16_t dqv = dq[0]; const uint8_t *cat1_prob; const uint8_t *cat2_prob; const uint8_t *cat3_prob; const uint8_t *cat4_prob; const uint8_t *cat5_prob; const uint8_t *cat6_prob; #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { if (cm->bit_depth == VPX_BITS_10) { cat1_prob = vp9_cat1_prob_high10; cat2_prob = vp9_cat2_prob_high10; cat3_prob = vp9_cat3_prob_high10; cat4_prob = vp9_cat4_prob_high10; cat5_prob = vp9_cat5_prob_high10; cat6_prob = vp9_cat6_prob_high10; } else { cat1_prob = vp9_cat1_prob_high12; cat2_prob = vp9_cat2_prob_high12; cat3_prob = vp9_cat3_prob_high12; cat4_prob = vp9_cat4_prob_high12; cat5_prob = vp9_cat5_prob_high12; cat6_prob = vp9_cat6_prob_high12; } } else { cat1_prob = vp9_cat1_prob; cat2_prob = vp9_cat2_prob; cat3_prob = vp9_cat3_prob; cat4_prob = vp9_cat4_prob; cat5_prob = vp9_cat5_prob; cat6_prob = vp9_cat6_prob; } #else cat1_prob = vp9_cat1_prob; cat2_prob = vp9_cat2_prob; cat3_prob = vp9_cat3_prob; cat4_prob = vp9_cat4_prob; cat5_prob = vp9_cat5_prob; cat6_prob = vp9_cat6_prob; #endif while (c < max_eob) { int val = -1; band = *band_translate++; prob = coef_probs[band][ctx]; if (!cm->frame_parallel_decoding_mode) ++eob_branch_count[band][ctx]; if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); token = ONE_TOKEN; val = 1; } else { INCREMENT_COUNT(TWO_TOKEN); token = vp9_read_tree(r, coeff_subtree_high, vp9_pareto8_full[prob[PIVOT_NODE] - 1]); switch (token) { case TWO_TOKEN: case THREE_TOKEN: case FOUR_TOKEN: val = token; break; case CATEGORY1_TOKEN: val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, r); break; case CATEGORY2_TOKEN: val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, r); break; case CATEGORY3_TOKEN: val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, r); break; case CATEGORY4_TOKEN: val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, r); break; case CATEGORY5_TOKEN: val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, r); break; case CATEGORY6_TOKEN: #if CONFIG_VP9_HIGHBITDEPTH switch (cm->bit_depth) { case VPX_BITS_8: val = CAT6_MIN_VAL + read_coeff(cat6_prob, 14, r); break; case VPX_BITS_10: val = CAT6_MIN_VAL + read_coeff(cat6_prob, 16, r); break; case VPX_BITS_12: val = CAT6_MIN_VAL + read_coeff(cat6_prob, 18, r); break; default: assert(0); return -1; } #else val = CAT6_MIN_VAL + read_coeff(cat6_prob, 14, r); #endif break; } } v = (val * dqv) >> dq_shift; #if CONFIG_COEFFICIENT_RANGE_CHECKING #if CONFIG_VP9_HIGHBITDEPTH dqcoeff[scan[c]] = highbd_check_range((vp9_read_bit(r) ? -v : v), cm->bit_depth); #else dqcoeff[scan[c]] = check_range(vp9_read_bit(r) ? -v : v); #endif // CONFIG_VP9_HIGHBITDEPTH #else dqcoeff[scan[c]] = vp9_read_bit(r) ? -v : v; #endif // CONFIG_COEFFICIENT_RANGE_CHECKING token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; ctx = get_coef_context(nb, token_cache, c); dqv = dq[1]; } return c; }
static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, vp9_reader *r, int block_idx, PLANE_TYPE type, int seg_eob, int16_t *qcoeff_ptr, TX_SIZE tx_size, const int16_t *dq, int pt) { const FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; const int ref = is_inter_block(&xd->mi_8x8[0]->mbmi); int band, c = 0; const vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; vp9_prob coef_probs_full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = { { 0 } }; const vp9_prob *prob; vp9_coeff_count_model *coef_counts = counts->coef[tx_size]; const int16_t *scan, *nb; const uint8_t *const band_translate = get_band_translate(tx_size); uint8_t token_cache[1024]; get_scan(xd, tx_size, type, block_idx, &scan, &nb); while (1) { int val; const uint8_t *cat6 = cat6_prob; if (c >= seg_eob) break; if (c) pt = get_coef_context(nb, token_cache, c); band = get_coef_band(band_translate, c); prob = coef_probs[band][pt]; if (!cm->frame_parallel_decoding_mode) ++counts->eob_branch[tx_size][type][ref][band][pt]; if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) break; SKIP_START: if (c >= seg_eob) break; if (c) pt = get_coef_context(nb, token_cache, c); band = get_coef_band(band_translate, c); prob = coef_probs[band][pt]; if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); ++c; goto SKIP_START; } // ONE_CONTEXT_NODE_0_ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(1, ONE_TOKEN); } // Load full probabilities if not already loaded if (!load_map[band][pt]) { vp9_model_to_full_probs(coef_probs[band][pt], coef_probs_full[band][pt]); load_map[band][pt] = 1; } prob = coef_probs_full[band][pt]; // LOW_VAL_CONTEXT_NODE_0_ if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) { if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(2, TWO_TOKEN); } if (!vp9_read(r, prob[THREE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(3, THREE_TOKEN); } WRITE_COEF_CONTINUE(4, FOUR_TOKEN); } // HIGH_LOW_CONTEXT_NODE_0_ if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) { val = CAT1_MIN_VAL; ADJUST_COEF(CAT1_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY1); } val = CAT2_MIN_VAL; ADJUST_COEF(CAT2_PROB1, 1); ADJUST_COEF(CAT2_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY2); } // CAT_THREEFOUR_CONTEXT_NODE_0_ if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) { val = CAT3_MIN_VAL; ADJUST_COEF(CAT3_PROB2, 2); ADJUST_COEF(CAT3_PROB1, 1); ADJUST_COEF(CAT3_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY3); } val = CAT4_MIN_VAL; ADJUST_COEF(CAT4_PROB3, 3); ADJUST_COEF(CAT4_PROB2, 2); ADJUST_COEF(CAT4_PROB1, 1); ADJUST_COEF(CAT4_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY4); } // CAT_FIVE_CONTEXT_NODE_0_: if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) { val = CAT5_MIN_VAL; ADJUST_COEF(CAT5_PROB4, 4); ADJUST_COEF(CAT5_PROB3, 3); ADJUST_COEF(CAT5_PROB2, 2); ADJUST_COEF(CAT5_PROB1, 1); ADJUST_COEF(CAT5_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY5); } val = 0; while (*cat6) { val = (val << 1) | vp9_read(r, *cat6++); } val += CAT6_MIN_VAL; WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6); } if (c < seg_eob) { if (!cm->frame_parallel_decoding_mode) ++coef_counts[type][ref][band][pt][DCT_EOB_MODEL_TOKEN]; } return c; }
void vp9_diff_update_prob(vp9_reader *r, vp9_prob* p) { if (vp9_read(r, DIFF_UPDATE_PROB)) { const int delp = decode_term_subexp(r); *p = (vp9_prob)inv_remap_prob(delp, *p); } }
static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, PLANE_TYPE type, int16_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vp9_reader *r) { const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; const int ref = is_inter_block(&xd->mi[0]->mbmi); int band, c = 0; const vp9_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vp9_prob *prob; unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1] = counts->coef[tx_size][type][ref]; unsigned int (*eob_branch_count)[COEFF_CONTEXTS] = counts->eob_branch[tx_size][type][ref]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v; int16_t dqv = dq[0]; while (c < max_eob) { int val; band = *band_translate++; prob = coef_probs[band][ctx]; if (!cm->frame_parallel_decoding_mode) ++eob_branch_count[band][ctx]; if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } // ONE_CONTEXT_NODE_0_ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); WRITE_COEF_CONTINUE(1, ONE_TOKEN); } INCREMENT_COUNT(TWO_TOKEN); prob = vp9_pareto8_full[prob[PIVOT_NODE] - 1]; if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) { if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(2, TWO_TOKEN); } if (!vp9_read(r, prob[THREE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(3, THREE_TOKEN); } WRITE_COEF_CONTINUE(4, FOUR_TOKEN); } if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) { val = CAT1_MIN_VAL; ADJUST_COEF(vp9_cat1_prob[0], 0); WRITE_COEF_CONTINUE(val, CATEGORY1_TOKEN); } val = CAT2_MIN_VAL; ADJUST_COEF(vp9_cat2_prob[0], 1); ADJUST_COEF(vp9_cat2_prob[1], 0); WRITE_COEF_CONTINUE(val, CATEGORY2_TOKEN); } if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) { val = CAT3_MIN_VAL; ADJUST_COEF(vp9_cat3_prob[0], 2); ADJUST_COEF(vp9_cat3_prob[1], 1); ADJUST_COEF(vp9_cat3_prob[2], 0); WRITE_COEF_CONTINUE(val, CATEGORY3_TOKEN); } val = CAT4_MIN_VAL; ADJUST_COEF(vp9_cat4_prob[0], 3); ADJUST_COEF(vp9_cat4_prob[1], 2); ADJUST_COEF(vp9_cat4_prob[2], 1); ADJUST_COEF(vp9_cat4_prob[3], 0); WRITE_COEF_CONTINUE(val, CATEGORY4_TOKEN); } if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) { val = CAT5_MIN_VAL; ADJUST_COEF(vp9_cat5_prob[0], 4); ADJUST_COEF(vp9_cat5_prob[1], 3); ADJUST_COEF(vp9_cat5_prob[2], 2); ADJUST_COEF(vp9_cat5_prob[3], 1); ADJUST_COEF(vp9_cat5_prob[4], 0); WRITE_COEF_CONTINUE(val, CATEGORY5_TOKEN); } val = 0; val = (val << 1) | vp9_read(r, vp9_cat6_prob[0]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[1]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[2]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[3]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[4]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[5]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[6]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[7]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[8]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[9]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[10]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[11]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[12]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[13]); val += CAT6_MIN_VAL; WRITE_COEF_CONTINUE(val, CATEGORY6_TOKEN); } return c; }
static INLINE int read_coeff(const vp9_prob *probs, int n, vp9_reader *r) { int i, val = 0; for (i = 0; i < n; ++i) val = (val << 1) | vp9_read(r, probs[i]); return val; }