static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vpx_reader *r) #endif { FRAME_COUNTS *counts = xd->counts; const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = xd->fc; const int ref = is_inter_block(&xd->mi[0]->mbmi); #if CONFIG_AOM_QM const qm_val_t *iqmatrix = iqm[!ref][tx_size]; #endif int band, c = 0; const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vpx_prob *prob; unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1]; unsigned int(*eob_branch_count)[COEFF_CONTEXTS]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v, token; int16_t dqv = dq[0]; const uint8_t *cat1_prob; const uint8_t *cat2_prob; const uint8_t *cat3_prob; const uint8_t *cat4_prob; const uint8_t *cat5_prob; const uint8_t *cat6_prob; if (counts) { coef_counts = counts->coef[tx_size][type][ref]; eob_branch_count = counts->eob_branch[tx_size][type][ref]; } #if CONFIG_VPX_HIGHBITDEPTH if (xd->bd > VPX_BITS_8) { if (xd->bd == VPX_BITS_10) { cat1_prob = vp10_cat1_prob_high10; cat2_prob = vp10_cat2_prob_high10; cat3_prob = vp10_cat3_prob_high10; cat4_prob = vp10_cat4_prob_high10; cat5_prob = vp10_cat5_prob_high10; cat6_prob = vp10_cat6_prob_high10; } else { cat1_prob = vp10_cat1_prob_high12; cat2_prob = vp10_cat2_prob_high12; cat3_prob = vp10_cat3_prob_high12; cat4_prob = vp10_cat4_prob_high12; cat5_prob = vp10_cat5_prob_high12; cat6_prob = vp10_cat6_prob_high12; } } else { cat1_prob = vp10_cat1_prob; cat2_prob = vp10_cat2_prob; cat3_prob = vp10_cat3_prob; cat4_prob = vp10_cat4_prob; cat5_prob = vp10_cat5_prob; cat6_prob = vp10_cat6_prob; } #else cat1_prob = vp10_cat1_prob; cat2_prob = vp10_cat2_prob; cat3_prob = vp10_cat3_prob; cat4_prob = vp10_cat4_prob; cat5_prob = vp10_cat5_prob; cat6_prob = vp10_cat6_prob; #endif while (c < max_eob) { int val = -1; band = *band_translate++; prob = coef_probs[band][ctx]; if (counts) ++eob_branch_count[band][ctx]; if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vpx_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } if (!vpx_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); token = ONE_TOKEN; val = 1; } else { INCREMENT_COUNT(TWO_TOKEN); token = vpx_read_tree(r, vp10_coef_con_tree, vp10_pareto8_full[prob[PIVOT_NODE] - 1]); switch (token) { case TWO_TOKEN: case THREE_TOKEN: case FOUR_TOKEN: val = token; break; case CATEGORY1_TOKEN: val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, r); break; case CATEGORY2_TOKEN: val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, r); break; case CATEGORY3_TOKEN: val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, r); break; case CATEGORY4_TOKEN: val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, r); break; case CATEGORY5_TOKEN: val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, r); break; case CATEGORY6_TOKEN: { #if CONFIG_MISC_FIXES const int skip_bits = TX_SIZES - 1 - tx_size; #else const int skip_bits = 0; #endif const uint8_t *cat6p = cat6_prob + skip_bits; #if CONFIG_VPX_HIGHBITDEPTH switch (xd->bd) { case VPX_BITS_8: val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r); break; case VPX_BITS_10: val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, r); break; case VPX_BITS_12: val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r); break; default: assert(0); return -1; } #else val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r); #endif break; } } } #if CONFIG_AOM_QM dqv = ((iqmatrix[scan[c]] * (int)dqv) + (1 << (AOM_QM_BITS - 1))) >> AOM_QM_BITS; #endif v = (val * dqv) >> dq_shift; #if CONFIG_COEFFICIENT_RANGE_CHECKING #if CONFIG_VPX_HIGHBITDEPTH dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), xd->bd); #else dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v); #endif // CONFIG_VPX_HIGHBITDEPTH #else dqcoeff[scan[c]] = vpx_read_bit(r) ? -v : v; #endif // CONFIG_COEFFICIENT_RANGE_CHECKING token_cache[scan[c]] = vp10_pt_energy_class[token]; ++c; ctx = get_coef_context(nb, token_cache, c); dqv = dq[1]; } return c; }
static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, PLANE_TYPE type, int16_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vp9_reader *r) { const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; const int ref = is_inter_block(&xd->mi[0]->mbmi); int band, c = 0; const vp9_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vp9_prob *prob; unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1] = counts->coef[tx_size][type][ref]; unsigned int (*eob_branch_count)[COEFF_CONTEXTS] = counts->eob_branch[tx_size][type][ref]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v; int16_t dqv = dq[0]; while (c < max_eob) { int val; band = *band_translate++; prob = coef_probs[band][ctx]; if (!cm->frame_parallel_decoding_mode) ++eob_branch_count[band][ctx]; if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } // ONE_CONTEXT_NODE_0_ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); WRITE_COEF_CONTINUE(1, ONE_TOKEN); } INCREMENT_COUNT(TWO_TOKEN); prob = vp9_pareto8_full[prob[PIVOT_NODE] - 1]; if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) { if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(2, TWO_TOKEN); } if (!vp9_read(r, prob[THREE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(3, THREE_TOKEN); } WRITE_COEF_CONTINUE(4, FOUR_TOKEN); } if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) { val = CAT1_MIN_VAL; ADJUST_COEF(vp9_cat1_prob[0], 0); WRITE_COEF_CONTINUE(val, CATEGORY1_TOKEN); } val = CAT2_MIN_VAL; ADJUST_COEF(vp9_cat2_prob[0], 1); ADJUST_COEF(vp9_cat2_prob[1], 0); WRITE_COEF_CONTINUE(val, CATEGORY2_TOKEN); } if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) { val = CAT3_MIN_VAL; ADJUST_COEF(vp9_cat3_prob[0], 2); ADJUST_COEF(vp9_cat3_prob[1], 1); ADJUST_COEF(vp9_cat3_prob[2], 0); WRITE_COEF_CONTINUE(val, CATEGORY3_TOKEN); } val = CAT4_MIN_VAL; ADJUST_COEF(vp9_cat4_prob[0], 3); ADJUST_COEF(vp9_cat4_prob[1], 2); ADJUST_COEF(vp9_cat4_prob[2], 1); ADJUST_COEF(vp9_cat4_prob[3], 0); WRITE_COEF_CONTINUE(val, CATEGORY4_TOKEN); } if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) { val = CAT5_MIN_VAL; ADJUST_COEF(vp9_cat5_prob[0], 4); ADJUST_COEF(vp9_cat5_prob[1], 3); ADJUST_COEF(vp9_cat5_prob[2], 2); ADJUST_COEF(vp9_cat5_prob[3], 1); ADJUST_COEF(vp9_cat5_prob[4], 0); WRITE_COEF_CONTINUE(val, CATEGORY5_TOKEN); } val = 0; val = (val << 1) | vp9_read(r, vp9_cat6_prob[0]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[1]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[2]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[3]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[4]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[5]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[6]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[7]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[8]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[9]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[10]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[11]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[12]); val = (val << 1) | vp9_read(r, vp9_cat6_prob[13]); val += CAT6_MIN_VAL; WRITE_COEF_CONTINUE(val, CATEGORY6_TOKEN); } return c; }
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vpx_reader *r) { FRAME_COUNTS *counts = xd->counts; const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = xd->fc; const int ref = is_inter_block(xd->mi[0]); int band, c = 0; const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vpx_prob *prob; unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1]; unsigned int(*eob_branch_count)[COEFF_CONTEXTS]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v, token; int16_t dqv = dq[0]; const uint8_t *const cat6_prob = #if CONFIG_VP9_HIGHBITDEPTH (xd->bd == VPX_BITS_12) ? vp9_cat6_prob_high12 : (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 : #endif // CONFIG_VP9_HIGHBITDEPTH vp9_cat6_prob; const int cat6_bits = #if CONFIG_VP9_HIGHBITDEPTH (xd->bd == VPX_BITS_12) ? 18 : (xd->bd == VPX_BITS_10) ? 16 : #endif // CONFIG_VP9_HIGHBITDEPTH 14; if (counts) { coef_counts = counts->coef[tx_size][type][ref]; eob_branch_count = counts->eob_branch[tx_size][type][ref]; } while (c < max_eob) { int val = -1; band = *band_translate++; prob = coef_probs[band][ctx]; if (counts) ++eob_branch_count[band][ctx]; if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vpx_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } if (!vpx_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); token = ONE_TOKEN; val = 1; } else { INCREMENT_COUNT(TWO_TOKEN); token = vpx_read_tree(r, vp9_coef_con_tree, vp9_pareto8_full[prob[PIVOT_NODE] - 1]); switch (token) { case TWO_TOKEN: case THREE_TOKEN: case FOUR_TOKEN: val = token; break; case CATEGORY1_TOKEN: val = CAT1_MIN_VAL + read_coeff(vp9_cat1_prob, 1, r); break; case CATEGORY2_TOKEN: val = CAT2_MIN_VAL + read_coeff(vp9_cat2_prob, 2, r); break; case CATEGORY3_TOKEN: val = CAT3_MIN_VAL + read_coeff(vp9_cat3_prob, 3, r); break; case CATEGORY4_TOKEN: val = CAT4_MIN_VAL + read_coeff(vp9_cat4_prob, 4, r); break; case CATEGORY5_TOKEN: val = CAT5_MIN_VAL + read_coeff(vp9_cat5_prob, 5, r); break; case CATEGORY6_TOKEN: val = CAT6_MIN_VAL + read_coeff(cat6_prob, cat6_bits, r); break; } } v = (val * dqv) >> dq_shift; #if CONFIG_COEFFICIENT_RANGE_CHECKING #if CONFIG_VP9_HIGHBITDEPTH dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), xd->bd); #else dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v); #endif // CONFIG_VP9_HIGHBITDEPTH #else dqcoeff[scan[c]] = vpx_read_bit(r) ? -v : v; #endif // CONFIG_COEFFICIENT_RANGE_CHECKING token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; ctx = get_coef_context(nb, token_cache, c); dqv = dq[1]; } return c; }
static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, vp9_reader *r, int block_idx, PLANE_TYPE type, int seg_eob, int16_t *qcoeff_ptr, TX_SIZE tx_size, const int16_t *dq, int pt) { const FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; const int ref = is_inter_block(&xd->mi_8x8[0]->mbmi); int band, c = 0; const vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; vp9_prob coef_probs_full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = { { 0 } }; const vp9_prob *prob; vp9_coeff_count_model *coef_counts = counts->coef[tx_size]; const int16_t *scan, *nb; const uint8_t *const band_translate = get_band_translate(tx_size); uint8_t token_cache[1024]; get_scan(xd, tx_size, type, block_idx, &scan, &nb); while (1) { int val; const uint8_t *cat6 = cat6_prob; if (c >= seg_eob) break; if (c) pt = get_coef_context(nb, token_cache, c); band = get_coef_band(band_translate, c); prob = coef_probs[band][pt]; if (!cm->frame_parallel_decoding_mode) ++counts->eob_branch[tx_size][type][ref][band][pt]; if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) break; SKIP_START: if (c >= seg_eob) break; if (c) pt = get_coef_context(nb, token_cache, c); band = get_coef_band(band_translate, c); prob = coef_probs[band][pt]; if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); ++c; goto SKIP_START; } // ONE_CONTEXT_NODE_0_ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(1, ONE_TOKEN); } // Load full probabilities if not already loaded if (!load_map[band][pt]) { vp9_model_to_full_probs(coef_probs[band][pt], coef_probs_full[band][pt]); load_map[band][pt] = 1; } prob = coef_probs_full[band][pt]; // LOW_VAL_CONTEXT_NODE_0_ if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) { if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(2, TWO_TOKEN); } if (!vp9_read(r, prob[THREE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(3, THREE_TOKEN); } WRITE_COEF_CONTINUE(4, FOUR_TOKEN); } // HIGH_LOW_CONTEXT_NODE_0_ if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) { val = CAT1_MIN_VAL; ADJUST_COEF(CAT1_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY1); } val = CAT2_MIN_VAL; ADJUST_COEF(CAT2_PROB1, 1); ADJUST_COEF(CAT2_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY2); } // CAT_THREEFOUR_CONTEXT_NODE_0_ if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) { val = CAT3_MIN_VAL; ADJUST_COEF(CAT3_PROB2, 2); ADJUST_COEF(CAT3_PROB1, 1); ADJUST_COEF(CAT3_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY3); } val = CAT4_MIN_VAL; ADJUST_COEF(CAT4_PROB3, 3); ADJUST_COEF(CAT4_PROB2, 2); ADJUST_COEF(CAT4_PROB1, 1); ADJUST_COEF(CAT4_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY4); } // CAT_FIVE_CONTEXT_NODE_0_: if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) { val = CAT5_MIN_VAL; ADJUST_COEF(CAT5_PROB4, 4); ADJUST_COEF(CAT5_PROB3, 3); ADJUST_COEF(CAT5_PROB2, 2); ADJUST_COEF(CAT5_PROB1, 1); ADJUST_COEF(CAT5_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY5); } val = 0; while (*cat6) { val = (val << 1) | vp9_read(r, *cat6++); } val += CAT6_MIN_VAL; WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6); } if (c < seg_eob) { if (!cm->frame_parallel_decoding_mode) ++coef_counts[type][ref][band][pt][DCT_EOB_MODEL_TOKEN]; } return c; }
static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, FRAME_COUNTS *counts, PLANE_TYPE type, tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vp9_reader *r) { const int max_eob = 16 << (tx_size << 1); const FRAME_CONTEXT *const fc = cm->fc; const int ref = is_inter_block(&xd->mi[0].src_mi->mbmi); int band, c = 0; const vp9_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vp9_prob *prob; unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1] = counts->coef[tx_size][type][ref]; unsigned int (*eob_branch_count)[COEFF_CONTEXTS] = counts->eob_branch[tx_size][type][ref]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); int v, token; int16_t dqv = dq[0]; const uint8_t *cat1_prob; const uint8_t *cat2_prob; const uint8_t *cat3_prob; const uint8_t *cat4_prob; const uint8_t *cat5_prob; const uint8_t *cat6_prob; #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { if (cm->bit_depth == VPX_BITS_10) { cat1_prob = vp9_cat1_prob_high10; cat2_prob = vp9_cat2_prob_high10; cat3_prob = vp9_cat3_prob_high10; cat4_prob = vp9_cat4_prob_high10; cat5_prob = vp9_cat5_prob_high10; cat6_prob = vp9_cat6_prob_high10; } else { cat1_prob = vp9_cat1_prob_high12; cat2_prob = vp9_cat2_prob_high12; cat3_prob = vp9_cat3_prob_high12; cat4_prob = vp9_cat4_prob_high12; cat5_prob = vp9_cat5_prob_high12; cat6_prob = vp9_cat6_prob_high12; } } else { cat1_prob = vp9_cat1_prob; cat2_prob = vp9_cat2_prob; cat3_prob = vp9_cat3_prob; cat4_prob = vp9_cat4_prob; cat5_prob = vp9_cat5_prob; cat6_prob = vp9_cat6_prob; } #else cat1_prob = vp9_cat1_prob; cat2_prob = vp9_cat2_prob; cat3_prob = vp9_cat3_prob; cat4_prob = vp9_cat4_prob; cat5_prob = vp9_cat5_prob; cat6_prob = vp9_cat6_prob; #endif while (c < max_eob) { int val = -1; band = *band_translate++; prob = coef_probs[band][ctx]; if (!cm->frame_parallel_decoding_mode) ++eob_branch_count[band][ctx]; if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } while (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; if (c >= max_eob) return c; // zero tokens at the end (no eob token) ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { INCREMENT_COUNT(ONE_TOKEN); token = ONE_TOKEN; val = 1; } else { INCREMENT_COUNT(TWO_TOKEN); token = vp9_read_tree(r, coeff_subtree_high, vp9_pareto8_full[prob[PIVOT_NODE] - 1]); switch (token) { case TWO_TOKEN: case THREE_TOKEN: case FOUR_TOKEN: val = token; break; case CATEGORY1_TOKEN: val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, r); break; case CATEGORY2_TOKEN: val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, r); break; case CATEGORY3_TOKEN: val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, r); break; case CATEGORY4_TOKEN: val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, r); break; case CATEGORY5_TOKEN: val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, r); break; case CATEGORY6_TOKEN: #if CONFIG_VP9_HIGHBITDEPTH switch (cm->bit_depth) { case VPX_BITS_8: val = CAT6_MIN_VAL + read_coeff(cat6_prob, 14, r); break; case VPX_BITS_10: val = CAT6_MIN_VAL + read_coeff(cat6_prob, 16, r); break; case VPX_BITS_12: val = CAT6_MIN_VAL + read_coeff(cat6_prob, 18, r); break; default: assert(0); return -1; } #else val = CAT6_MIN_VAL + read_coeff(cat6_prob, 14, r); #endif break; } } v = (val * dqv) >> dq_shift; #if CONFIG_COEFFICIENT_RANGE_CHECKING #if CONFIG_VP9_HIGHBITDEPTH dqcoeff[scan[c]] = highbd_check_range((vp9_read_bit(r) ? -v : v), cm->bit_depth); #else dqcoeff[scan[c]] = check_range(vp9_read_bit(r) ? -v : v); #endif // CONFIG_VP9_HIGHBITDEPTH #else dqcoeff[scan[c]] = vp9_read_bit(r) ? -v : v; #endif // CONFIG_COEFFICIENT_RANGE_CHECKING token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; ctx = get_coef_context(nb, token_cache, c); dqv = dq[1]; } return c; }
void *qt_hash_put_helper(qt_dictionary *h, qt_key_t key, void *value, int put_choice) { uint64_t lkey = (uint64_t)(uintptr_t)(h->op_hash(key)); HASH_KEY(lkey); assert(h); unsigned bucket = BASE_SPINE_BUCKET(lkey); hash_entry *e = qt_malloc(sizeof(hash_entry)); // XXX: should be from a memory pool spine_element_t *child_id = &(h->base[bucket]); spine_element_t child_val = h->base[bucket]; spine_element_t *cur_id = NULL; spine_t *cur_spine = NULL; unsigned depth = 0; assert(e != NULL); e->hashed_key = lkey; e->key = key; e->value = value; e->next = NULL; hash_entry *crt; do { if (child_val.e == NULL) { // place the entry in the hash if ((child_val.e = CAS(&(child_id->e), NULL, e)) == NULL) { // put success: no potential colliding element was present return value; } } else if (SPINE_PTR_TEST(child_val)) { INCREMENT_COUNT(child_id, child_val); if (cur_id) { DECREMENT_COUNT(cur_id); } cur_id = child_id; cur_spine = SPINE_PTR(h, child_val); depth++; assert(depth <= 11); // otherwise, something has gone horribly wrong bucket = SPINE_BUCKET(lkey, depth); child_id = &cur_spine->elements[bucket]; child_val = cur_spine->elements[bucket]; } else if (child_val.e->hashed_key != lkey) { // upgrade to a spine spine_element_t newspine, cur; spine_t *realspine; unsigned bucket1 = SPINE_BUCKET(child_val.e->hashed_key, depth + 1); unsigned bucket2 = SPINE_BUCKET(lkey, depth + 1); newspine.s.id = allocate_spine(h, &realspine); realspine->parent = cur_id; realspine->elements[bucket1] = child_val; if (bucket1 != bucket2) { // both elements will be in the new spine newspine.s.ctr = SPINE_COUNT(2); // contains 2 elements realspine->elements[bucket2].e = e; if ((cur.e = CAS(&(child_id->e), child_val.e, newspine.e)) == child_val.e) { // success! if (cur_id) { DECREMENT_COUNT(cur_id); } return value; } else { child_val = cur; deallocate_spine(h, newspine.s.id); } } else { // collision in the new spine (unusual; will use unnecessary CAS) newspine.s.ctr = SPINE_COUNT(1); // contains 1 element (oldval) if ((cur.e = CAS(&(child_id->e), child_val.e, newspine.e)) == child_val.e) { // success continue; } else { child_val = cur; deallocate_spine(h, newspine.s.id); } } } else { // use the real user-equals operation to differentiate subcases // it is possible that the element is there or it may not be there hash_entry *head; do { head = child_id->e; e->next = head; crt = head; // find the entry, if it is in the list while (crt) { if (h->op_equals(crt->key, key)) { // already exists if (put_choice != PUT_IF_ABSENT) { void **crt_val_adr = &(crt->value); void *crt_val = crt->value; while((qthread_cas_ptr(crt_val_adr, \ crt_val, value)) != crt_val ) { crt_val = crt->value; } } if (cur_id) { DECREMENT_COUNT(cur_id); } return crt->value; } crt = crt->next; } // and try to insert it at the head of the list; // if the list changed, redu the work } while (qthread_cas_ptr(&(child_id->e), head, e) != head); // printf("IN put: (%s-%s)\n", child_id->e->key, child_id->e->value); // if (e->next !=NULL) // printf("next key is %s and value %s\n", e->next->key, e->next->value); return e->value; } } while (1); }