static void update_skip_probs(VP9_COMMON *cm, vpx_writer *w, FRAME_COUNTS *counts) { int k; for (k = 0; k < SKIP_CONTEXTS; ++k) vp9_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]); }
static void prob_diff_update(const vpx_tree_index *tree, vpx_prob probs[/*n - 1*/], const unsigned int counts[/*n - 1*/], int n, vpx_writer *w) { int i; unsigned int branch_ct[32][2]; // Assuming max number of probabilities <= 32 assert(n <= 32); vp9_tree_probs_from_distribution(tree, branch_ct, counts); for (i = 0; i < n - 1; ++i) vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); }
static void encode_txfm_probs(VP9_COMMON *cm, vpx_writer *w, FRAME_COUNTS *counts) { // Mode vpx_write_literal(w, VPXMIN(cm->tx_mode, ALLOW_32X32), 2); if (cm->tx_mode >= ALLOW_32X32) vpx_write_bit(w, cm->tx_mode == TX_MODE_SELECT); // Probabilities if (cm->tx_mode == TX_MODE_SELECT) { int i, j; unsigned int ct_8x8p[TX_SIZES - 3][2]; unsigned int ct_16x16p[TX_SIZES - 2][2]; unsigned int ct_32x32p[TX_SIZES - 1][2]; for (i = 0; i < TX_SIZE_CONTEXTS; i++) { tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p); for (j = 0; j < TX_SIZES - 3; j++) vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]); } for (i = 0; i < TX_SIZE_CONTEXTS; i++) { tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p); for (j = 0; j < TX_SIZES - 2; j++) vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j], ct_16x16p[j]); } for (i = 0; i < TX_SIZE_CONTEXTS; i++) { tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p); for (j = 0; j < TX_SIZES - 1; j++) vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j], ct_32x32p[j]); } } }
static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; FRAME_CONTEXT *const fc = cm->fc; FRAME_COUNTS *counts = cpi->td.counts; vpx_writer header_bc; vpx_start_encode(&header_bc, data); if (xd->lossless) cm->tx_mode = ONLY_4X4; else encode_txfm_probs(cm, &header_bc, counts); update_coef_probs(cpi, &header_bc); update_skip_probs(cm, &header_bc, counts); if (!frame_is_intra_only(cm)) { int i; for (i = 0; i < INTER_MODE_CONTEXTS; ++i) prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i], counts->inter_mode[i], INTER_MODES, &header_bc); if (cm->interp_filter == SWITCHABLE) update_switchable_interp_probs(cm, &header_bc, counts); for (i = 0; i < INTRA_INTER_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], counts->intra_inter[i]); if (cpi->allow_comp_inter_inter) { const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; vpx_write_bit(&header_bc, use_compound_pred); if (use_compound_pred) { vpx_write_bit(&header_bc, use_hybrid_pred); if (use_hybrid_pred) for (i = 0; i < COMP_INTER_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], counts->comp_inter[i]); } } if (cm->reference_mode != COMPOUND_REFERENCE) { for (i = 0; i < REF_CONTEXTS; i++) { vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], counts->single_ref[i][0]); vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], counts->single_ref[i][1]); } } if (cm->reference_mode != SINGLE_REFERENCE) for (i = 0; i < REF_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], counts->comp_ref[i]); for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i], counts->y_mode[i], INTRA_MODES, &header_bc); for (i = 0; i < PARTITION_CONTEXTS; ++i) prob_diff_update(vp9_partition_tree, fc->partition_prob[i], counts->partition[i], PARTITION_TYPES, &header_bc); vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc, &counts->mv); } vpx_stop_encode(&header_bc); assert(header_bc.pos <= 0xffff); return header_bc.pos; }
static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { int k; for (k = 0; k < SKIP_CONTEXTS; ++k) vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); }