static int read_mv_component(vpx_reader *r, const nmv_component *mvcomp, int usehp) { int mag, d, fr, hp; const int sign = vpx_read(r, mvcomp->sign); const int mv_class = vpx_read_tree(r, vp9_mv_class_tree, mvcomp->classes); const int class0 = mv_class == MV_CLASS_0; // Integer part if (class0) { d = vpx_read_tree(r, vp9_mv_class0_tree, mvcomp->class0); mag = 0; } else { int i; const int n = mv_class + CLASS0_BITS - 1; // number of bits d = 0; for (i = 0; i < n; ++i) d |= vpx_read(r, mvcomp->bits[i]) << i; mag = CLASS0_SIZE << (mv_class + 2); } // Fractional part fr = vpx_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d] : mvcomp->fp); // High precision part (if hp is not used, the default value of the hp is 1) hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1; // Result mag += ((d << 3) | (fr << 1) | hp) + 1; return sign ? -mag : mag; }
static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, TX_SIZE max_tx_size, vpx_reader *r) { FRAME_COUNTS *counts = xd->counts; const int ctx = get_tx_size_context(xd); const vpx_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc->tx_probs); int tx_size = vpx_read(r, tx_probs[0]); if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { tx_size += vpx_read(r, tx_probs[1]); if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) tx_size += vpx_read(r, tx_probs[2]); } if (counts) ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size]; return (TX_SIZE)tx_size; }
// Read the referncence frame static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, vpx_reader *r, int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { FRAME_CONTEXT *const fc = cm->fc; FRAME_COUNTS *counts = xd->counts; if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { ref_frame[0] = (MV_REFERENCE_FRAME)get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME); ref_frame[1] = NONE; } else { const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r); // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding if (mode == COMPOUND_REFERENCE) { const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd); const int bit = vpx_read(r, fc->comp_ref_prob[ctx]); if (counts) ++counts->comp_ref[ctx][bit]; ref_frame[idx] = cm->comp_fixed_ref; ref_frame[!idx] = cm->comp_var_ref[bit]; } else if (mode == SINGLE_REFERENCE) { const int ctx0 = vp9_get_pred_context_single_ref_p1(xd); const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]); if (counts) ++counts->single_ref[ctx0][0][bit0]; if (bit0) { const int ctx1 = vp9_get_pred_context_single_ref_p2(xd); const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]); if (counts) ++counts->single_ref[ctx1][1][bit1]; ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME; } else { ref_frame[0] = LAST_FRAME; } ref_frame[1] = NONE; } else { assert(0 && "Invalid prediction mode."); } } }
static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd, int segment_id, vpx_reader *r) { if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME; } else { const int ctx = vp9_get_intra_inter_context(xd); const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]); FRAME_COUNTS *counts = xd->counts; if (counts) ++counts->intra_inter[ctx][is_inter]; return is_inter; } }
static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, int segment_id, vpx_reader *r) { if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { return 1; } else { const int ctx = vp9_get_skip_context(xd); const int skip = vpx_read(r, cm->fc->skip_probs[ctx]); FRAME_COUNTS *counts = xd->counts; if (counts) ++counts->skip[ctx][skip]; return skip; } }
static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm, const MACROBLOCKD *xd, vpx_reader *r) { if (cm->reference_mode == REFERENCE_MODE_SELECT) { const int ctx = vp9_get_reference_mode_context(cm, xd); const REFERENCE_MODE mode = (REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]); FRAME_COUNTS *counts = xd->counts; if (counts) ++counts->comp_inter[ctx][mode]; return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE } else { return cm->reference_mode; } }
void encode_with_adaptive_probability() { memcpy(tmp, uncompressed, sizeof(uncompressed)); (*transform)(tmp); // this currently is a no-op but it may be helpful for the EXERCISE DynProb encode; vpx_writer wri ={0}; vpx_start_encode(&wri, tmp); for (size_t i = 0; i < sizeof(uncompressed); ++i) { for(int bit = 1; bit < 256; bit <<= 1) { bool cur_bit = !!(tmp[i] & bit); vpx_write(&wri, cur_bit, encode.prob); encode.record_bit(cur_bit); // <-- this a new line for lesson1 that lets the encoder adapt to data } } vpx_stop_encode(&wri); printf("Buffer encoded with final prob(0) = %.2f results in %d size (%.2f%%)\n", encode.prob / 255., wri.pos, 100 * wri.pos / float(sizeof(uncompressed))); DynProb decode; vpx_reader rea={0}; vpx_reader_init(&rea, wri.buffer, wri.pos); memset(roundtrip, 0, sizeof(roundtrip)); for (size_t i = 0; i < sizeof(roundtrip); ++i) { for(int bit = 1; bit < 256; bit <<= 1) { if (vpx_read(&rea, decode.prob)) { roundtrip[i] |= bit; decode.record_bit(true); // <-- this a new line for lesson1 } else { decode.record_bit(false); // <-- this a new line for lesson1 } } } assert(vpx_reader_has_error(&rea) == 0); (*untransform)(uncompressed); // this is, again a no-op, but may be helpful for the EXERCISE assert(memcmp(uncompressed, roundtrip, sizeof(uncompressed)) == 0); }
static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, int mi_row, int mi_col, vpx_reader *r) { struct segmentation *const seg = &cm->seg; MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; int predicted_segment_id, segment_id; const int mi_offset = mi_row * cm->mi_cols + mi_col; const int bw = xd->plane[0].n4_w >> 1; const int bh = xd->plane[0].n4_h >> 1; // TODO(slavarnway): move x_mis, y_mis into xd ????? const int x_mis = MIN(cm->mi_cols - mi_col, bw); const int y_mis = MIN(cm->mi_rows - mi_row, bh); if (!seg->enabled) return 0; // Default for disabled segmentation predicted_segment_id = cm->last_frame_seg_map ? dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) : 0; if (!seg->update_map) { copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map, mi_offset, x_mis, y_mis); return predicted_segment_id; } if (seg->temporal_update) { const vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); mbmi->seg_id_predicted = vpx_read(r, pred_prob); segment_id = mbmi->seg_id_predicted ? predicted_segment_id : read_segment_id(r, seg); } else { segment_id = read_segment_id(r, seg); } set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id); return segment_id; }
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vpx_reader *r) #endif { FRAME_COUNTS *counts = xd->counts; const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = xd->fc; const int ref = is_inter_block(&xd->mi[0]->mbmi); #if CONFIG_AOM_QM const qm_val_t *iqmatrix = iqm[!ref][tx_size]; #endif int band, c = 0; const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vpx_prob *prob; unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1]; unsigned int(*eob_branch_count)[COEFF_CONTEXTS]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v, token; int16_t dqv = dq[0]; const uint8_t *cat1_prob; const uint8_t *cat2_prob; const uint8_t *cat3_prob; const uint8_t *cat4_prob; const uint8_t *cat5_prob; const uint8_t *cat6_prob; if (counts) { coef_counts = counts->coef[tx_size][type][ref]; eob_branch_count = counts->eob_branch[tx_size][type][ref]; } #if CONFIG_VPX_HIGHBITDEPTH if (xd->bd > VPX_BITS_8) { if (xd->bd == VPX_BITS_10) { cat1_prob = vp10_cat1_prob_high10; cat2_prob = vp10_cat2_prob_high10; cat3_prob = vp10_cat3_prob_high10; cat4_prob = vp10_cat4_prob_high10; cat5_prob = vp10_cat5_prob_high10; cat6_prob = vp10_cat6_prob_high10; } else { cat1_prob = vp10_cat1_prob_high12; cat2_prob = vp10_cat2_prob_high12; cat3_prob = vp10_cat3_prob_high12; cat4_prob = vp10_cat4_prob_high12; cat5_prob = vp10_cat5_prob_high12; cat6_prob = vp10_cat6_prob_high12; } } else { cat1_prob = vp10_cat1_prob; cat2_prob = vp10_cat2_prob; cat3_prob = vp10_cat3_prob; cat4_prob = vp10_cat4_prob; cat5_prob = vp10_cat5_prob; cat6_prob = vp10_cat6_prob; } #else cat1_prob = vp10_cat1_prob; cat2_prob = vp10_cat2_prob; cat3_prob = vp10_cat3_prob; cat4_prob = vp10_cat4_prob; cat5_prob = vp10_cat5_prob; cat6_prob = vp10_cat6_prob; #endif while (c < max_eob) { int val = -1; band = *band_translate++; prob = coef_probs[band][ctx]; if (counts) ++eob_branch_count[band][ctx]; if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vpx_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } if (!vpx_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); token = ONE_TOKEN; val = 1; } else { INCREMENT_COUNT(TWO_TOKEN); token = vpx_read_tree(r, vp10_coef_con_tree, vp10_pareto8_full[prob[PIVOT_NODE] - 1]); switch (token) { case TWO_TOKEN: case THREE_TOKEN: case FOUR_TOKEN: val = token; break; case CATEGORY1_TOKEN: val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, r); break; case CATEGORY2_TOKEN: val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, r); break; case CATEGORY3_TOKEN: val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, r); break; case CATEGORY4_TOKEN: val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, r); break; case CATEGORY5_TOKEN: val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, r); break; case CATEGORY6_TOKEN: { #if CONFIG_MISC_FIXES const int skip_bits = TX_SIZES - 1 - tx_size; #else const int skip_bits = 0; #endif const uint8_t *cat6p = cat6_prob + skip_bits; #if CONFIG_VPX_HIGHBITDEPTH switch (xd->bd) { case VPX_BITS_8: val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r); break; case VPX_BITS_10: val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, r); break; case VPX_BITS_12: val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r); break; default: assert(0); return -1; } #else val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r); #endif break; } } } #if CONFIG_AOM_QM dqv = ((iqmatrix[scan[c]] * (int)dqv) + (1 << (AOM_QM_BITS - 1))) >> AOM_QM_BITS; #endif v = (val * dqv) >> dq_shift; #if CONFIG_COEFFICIENT_RANGE_CHECKING #if CONFIG_VPX_HIGHBITDEPTH dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), xd->bd); #else dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v); #endif // CONFIG_VPX_HIGHBITDEPTH #else dqcoeff[scan[c]] = vpx_read_bit(r) ? -v : v; #endif // CONFIG_COEFFICIENT_RANGE_CHECKING token_cache[scan[c]] = vp10_pt_energy_class[token]; ++c; ctx = get_coef_context(nb, token_cache, c); dqv = dq[1]; } return c; }
static INLINE int read_coeff(const vpx_prob *probs, int n, vpx_reader *r) { int i, val = 0; for (i = 0; i < n; ++i) val = (val << 1) | vpx_read(r, probs[i]); return val; }
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vpx_reader *r) { FRAME_COUNTS *counts = xd->counts; const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = xd->fc; const int ref = is_inter_block(xd->mi[0]); int band, c = 0; const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vpx_prob *prob; unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1]; unsigned int(*eob_branch_count)[COEFF_CONTEXTS]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v, token; int16_t dqv = dq[0]; const uint8_t *const cat6_prob = #if CONFIG_VP9_HIGHBITDEPTH (xd->bd == VPX_BITS_12) ? vp9_cat6_prob_high12 : (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 : #endif // CONFIG_VP9_HIGHBITDEPTH vp9_cat6_prob; const int cat6_bits = #if CONFIG_VP9_HIGHBITDEPTH (xd->bd == VPX_BITS_12) ? 18 : (xd->bd == VPX_BITS_10) ? 16 : #endif // CONFIG_VP9_HIGHBITDEPTH 14; if (counts) { coef_counts = counts->coef[tx_size][type][ref]; eob_branch_count = counts->eob_branch[tx_size][type][ref]; } while (c < max_eob) { int val = -1; band = *band_translate++; prob = coef_probs[band][ctx]; if (counts) ++eob_branch_count[band][ctx]; if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vpx_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } if (!vpx_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); token = ONE_TOKEN; val = 1; } else { INCREMENT_COUNT(TWO_TOKEN); token = vpx_read_tree(r, vp9_coef_con_tree, vp9_pareto8_full[prob[PIVOT_NODE] - 1]); switch (token) { case TWO_TOKEN: case THREE_TOKEN: case FOUR_TOKEN: val = token; break; case CATEGORY1_TOKEN: val = CAT1_MIN_VAL + read_coeff(vp9_cat1_prob, 1, r); break; case CATEGORY2_TOKEN: val = CAT2_MIN_VAL + read_coeff(vp9_cat2_prob, 2, r); break; case CATEGORY3_TOKEN: val = CAT3_MIN_VAL + read_coeff(vp9_cat3_prob, 3, r); break; case CATEGORY4_TOKEN: val = CAT4_MIN_VAL + read_coeff(vp9_cat4_prob, 4, r); break; case CATEGORY5_TOKEN: val = CAT5_MIN_VAL + read_coeff(vp9_cat5_prob, 5, r); break; case CATEGORY6_TOKEN: val = CAT6_MIN_VAL + read_coeff(cat6_prob, cat6_bits, r); break; } } v = (val * dqv) >> dq_shift; #if CONFIG_COEFFICIENT_RANGE_CHECKING #if CONFIG_VP9_HIGHBITDEPTH dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), xd->bd); #else dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v); #endif // CONFIG_VP9_HIGHBITDEPTH #else dqcoeff[scan[c]] = vpx_read_bit(r) ? -v : v; #endif // CONFIG_COEFFICIENT_RANGE_CHECKING token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; ctx = get_coef_context(nb, token_cache, c); dqv = dq[1]; } return c; }