static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct encode_b_args *const args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; struct optimize_ctx *const ctx = args->ctx; struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); int i, j; uint8_t *dst; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; // TODO(jingning): per transformed block zero forcing only enabled for // luma component. will integrate chroma components as well. if (x->zcoeff_blk[tx_size][block] && plane == 0) { p->eobs[block] = 0; ctx->ta[plane][i] = 0; ctx->tl[plane][j] = 0; return; } if (!x->skip_recode) vp9_xform_quant(x, plane, block, plane_bsize, tx_size); if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { optimize_b(plane, block, plane_bsize, tx_size, x, ctx); } else { ctx->ta[plane][i] = p->eobs[block] > 0; ctx->tl[plane][j] = p->eobs[block] > 0; } if (p->eobs[block]) *(args->skip) = 0; if (x->skip_encode || p->eobs[block] == 0) return; switch (tx_size) { case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; case TX_4X4: // this is like vp9_short_idct4x4 but has a special case around eob<=1 // which is significant (not just an optimization) for the lossless // case. xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; default: assert(0 && "Invalid transform size"); } }
static void set_entropy_context_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; MACROBLOCKD *const xd = args->xd; struct macroblock_plane *p = &args->cpi->mb.plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, aoff, loff); }
void vp9_xform_quant(MACROBLOCK *x, int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { MACROBLOCKD *const xd = &x->e_mbd; const struct macroblock_plane *const p = &x->plane[plane]; const struct macroblockd_plane *const pd = &xd->plane[plane]; const scan_order *const scan_order = &vp9_default_scan_orders[tx_size]; int16_t *const coeff = BLOCK_OFFSET(p->coeff, block); int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); uint16_t *const eob = &p->eobs[block]; const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; int i, j; const int16_t *src_diff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); src_diff = &p->src_diff[4 * (j * diff_stride + i)]; switch (tx_size) { case TX_32X32: fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); break; case TX_16X16: vp9_fdct16x16(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); break; case TX_8X8: vp9_fdct8x8(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); break; default: assert(0); } }
static void encode_block_pass1(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { MACROBLOCK *const x = (MACROBLOCK *)arg; MACROBLOCKD *const xd = &x->e_mbd; struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); int i, j; uint8_t *dst; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; vp9_xform_quant(x, plane, block, plane_bsize, tx_size); if (p->eobs[block] > 0) xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); }
int vp9_decode_block_tokens(VP9_COMMON *cm, MACROBLOCKD *xd, int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, vp9_reader *r) { struct macroblockd_plane *const pd = &xd->plane[plane]; const int seg_eob = get_tx_eob(&cm->seg, xd->mi_8x8[0]->mbmi.segment_id, tx_size); int aoff, loff, eob, pt; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); pt = get_entropy_context(tx_size, pd->above_context + aoff, pd->left_context + loff); eob = decode_coefs(cm, xd, r, block, pd->plane_type, seg_eob, BLOCK_OFFSET(pd->qcoeff, block), tx_size, pd->dequant, pt); set_contexts(xd, pd, plane_bsize, tx_size, eob > 0, aoff, loff); pd->eobs[block] = eob; return eob; }
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; VP9_COMP *cpi = args->cpi; ThreadData *const td = args->td; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; TOKENEXTRA **tp = args->tp; uint8_t token_cache[32 * 32]; struct macroblock_plane *p = &x->plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi; int pt; /* near block/prev token context index */ int c; TOKENEXTRA *t = *tp; /* store tokens starting here */ int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; const scan_order *so; const int ref = is_inter_block(mbmi); unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = td->rd_counts.coef_counts[tx_size][type][ref]; vp9_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = cpi->common.fc->coef_probs[tx_size][type][ref]; unsigned int (*const eob_branch)[COEFF_CONTEXTS] = td->counts->eob_branch[tx_size][type][ref]; const uint8_t *const band = get_band_translate(tx_size); const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); int16_t token; EXTRABIT extra; int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); pt = get_entropy_context(tx_size, pd->above_context + aoff, pd->left_context + loff); so = get_scan(xd, tx_size, type, block); scan = so->scan; nb = so->neighbors; c = 0; while (c < eob) { int v = 0; int skip_eob = 0; v = qcoeff[scan[c]]; while (!v) { add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; skip_eob = 1; token_cache[scan[c]] = 0; ++c; pt = get_coef_context(nb, token_cache, c); v = qcoeff[scan[c]]; } vp9_get_token_extra(v, &token, &extra); add_token(&t, coef_probs[band[c]][pt], extra, (uint8_t)token, (uint8_t)skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; pt = get_coef_context(nb, token_cache, c); } if (c < seg_eob) { add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0, counts[band[c]][pt]); ++eob_branch[band[c]][pt]; } *tp = t; vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); }
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; VP9_COMP *cpi = args->cpi; MACROBLOCKD *xd = args->xd; TOKENEXTRA **tp = args->tp; uint8_t *token_cache = args->token_cache; struct macroblock_plane *p = &cpi->mb.plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; int pt; /* near block/prev token context index */ int c = 0; TOKENEXTRA *t = *tp; /* store tokens starting here */ int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const int16_t *qcoeff_ptr = BLOCK_OFFSET(p->qcoeff, block); const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; const scan_order *so; vp9_coeff_count *const counts = cpi->coef_counts[tx_size]; vp9_coeff_probs_model *const coef_probs = cpi->common.fc.coef_probs[tx_size]; const int ref = is_inter_block(mbmi); const uint8_t *const band = get_band_translate(tx_size); const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); pt = get_entropy_context(tx_size, pd->above_context + aoff, pd->left_context + loff); so = get_scan(xd, tx_size, type, block); scan = so->scan; nb = so->neighbors; c = 0; while (c < eob) { int v = 0; int skip_eob = 0; v = qcoeff_ptr[scan[c]]; while (!v) { add_token(&t, coef_probs[type][ref][band[c]][pt], 0, ZERO_TOKEN, skip_eob, counts[type][ref][band[c]][pt]); cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt] += !skip_eob; skip_eob = 1; token_cache[scan[c]] = 0; ++c; pt = get_coef_context(nb, token_cache, c); v = qcoeff_ptr[scan[c]]; } add_token(&t, coef_probs[type][ref][band[c]][pt], vp9_dct_value_tokens_ptr[v].extra, vp9_dct_value_tokens_ptr[v].token, skip_eob, counts[type][ref][band[c]][pt]); cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt] += !skip_eob; token_cache[scan[c]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[v].token]; ++c; pt = get_coef_context(nb, token_cache, c); } if (c < seg_eob) { add_token(&t, coef_probs[type][ref][band[c]][pt], 0, EOB_TOKEN, 0, counts[type][ref][band[c]][pt]); ++cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt]; } *tp = t; set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); }
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; VP9_COMP *cpi = args->cpi; MACROBLOCKD *xd = args->xd; TOKENEXTRA **tp = args->tp; uint8_t token_cache[32 * 32]; struct macroblock_plane *p = &cpi->mb.plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi; int pt; /* near block/prev token context index */ int c; TOKENEXTRA *t = *tp; /* store tokens starting here */ int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; const scan_order *so; const int ref = is_inter_block(mbmi); unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = cpi->coef_counts[tx_size][type][ref]; vp9_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = cpi->common.fc.coef_probs[tx_size][type][ref]; unsigned int (*const eob_branch)[COEFF_CONTEXTS] = cpi->common.counts.eob_branch[tx_size][type][ref]; const uint8_t *const band = get_band_translate(tx_size); const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); const TOKENVALUE *dct_value_tokens; int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); pt = get_entropy_context(tx_size, pd->above_context + aoff, pd->left_context + loff); so = get_scan(xd, tx_size, type, block); scan = so->scan; nb = so->neighbors; c = 0; #if CONFIG_VP9_HIGHBITDEPTH if (cpi->common.profile >= PROFILE_2) { dct_value_tokens = (cpi->common.bit_depth == VPX_BITS_10 ? vp9_dct_value_tokens_high10_ptr : vp9_dct_value_tokens_high12_ptr); } else { dct_value_tokens = vp9_dct_value_tokens_ptr; } #else dct_value_tokens = vp9_dct_value_tokens_ptr; #endif while (c < eob) { int v = 0; int skip_eob = 0; v = qcoeff[scan[c]]; while (!v) { add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; skip_eob = 1; token_cache[scan[c]] = 0; ++c; pt = get_coef_context(nb, token_cache, c); v = qcoeff[scan[c]]; } add_token(&t, coef_probs[band[c]][pt], dct_value_tokens[v].extra, (uint8_t)dct_value_tokens[v].token, (uint8_t)skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; token_cache[scan[c]] = vp9_pt_energy_class[dct_value_tokens[v].token]; ++c; pt = get_coef_context(nb, token_cache, c); } if (c < seg_eob) { add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0, counts[band[c]][pt]); ++eob_branch[band[c]][pt]; } *tp = t; vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); }
static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; VP9_COMP *cpi = args->cpi; MACROBLOCKD *xd = args->xd; TOKENEXTRA **tp = args->tp; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; int pt; /* near block/prev token context index */ int c = 0, rc = 0; TOKENEXTRA *t = *tp; /* store tokens starting here */ const int eob = pd->eobs[block]; const PLANE_TYPE type = pd->plane_type; const int16_t *qcoeff_ptr = BLOCK_OFFSET(pd->qcoeff, block); int seg_eob; const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; vp9_coeff_count *const counts = cpi->coef_counts[tx_size]; vp9_coeff_probs_model *const coef_probs = cpi->common.fc.coef_probs[tx_size]; const int ref = is_inter_block(mbmi); ENTROPY_CONTEXT above_ec, left_ec; uint8_t token_cache[1024]; const uint8_t *band_translate; ENTROPY_CONTEXT *A, *L; int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); A = pd->above_context + aoff; L = pd->left_context + loff; assert((!type && !plane) || (type && plane)); switch (tx_size) { case TX_4X4: above_ec = A[0] != 0; left_ec = L[0] != 0; seg_eob = 16; scan = get_scan_4x4(get_tx_type_4x4(type, xd, block)); band_translate = vp9_coefband_trans_4x4; break; case TX_8X8: above_ec = !!*(uint16_t *)A; left_ec = !!*(uint16_t *)L; seg_eob = 64; scan = get_scan_8x8(get_tx_type_8x8(type, xd)); band_translate = vp9_coefband_trans_8x8plus; break; case TX_16X16: above_ec = !!*(uint32_t *)A; left_ec = !!*(uint32_t *)L; seg_eob = 256; scan = get_scan_16x16(get_tx_type_16x16(type, xd)); band_translate = vp9_coefband_trans_8x8plus; break; case TX_32X32: above_ec = !!*(uint64_t *)A; left_ec = !!*(uint64_t *)L; seg_eob = 1024; scan = vp9_default_scan_32x32; band_translate = vp9_coefband_trans_8x8plus; break; default: assert(!"Invalid transform size"); } pt = combine_entropy_contexts(above_ec, left_ec); nb = vp9_get_coef_neighbors_handle(scan); if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) seg_eob = 0; c = 0; do { const int band = get_coef_band(band_translate, c); int token; int v = 0; rc = scan[c]; if (c) pt = get_coef_context(nb, token_cache, c); if (c < eob) { v = qcoeff_ptr[rc]; assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE); t->extra = vp9_dct_value_tokens_ptr[v].extra; token = vp9_dct_value_tokens_ptr[v].token; } else { token = DCT_EOB_TOKEN; } t->token = token; t->context_tree = coef_probs[type][ref][band][pt]; t->skip_eob_node = (c > 0) && (token_cache[scan[c - 1]] == 0); assert(vp9_coef_encodings[t->token].len - t->skip_eob_node > 0); ++counts[type][ref][band][pt][token]; if (!t->skip_eob_node) ++cpi->common.counts.eob_branch[tx_size][type][ref][band][pt]; token_cache[rc] = vp9_pt_energy_class[token]; ++t; } while (c < eob && ++c < seg_eob); *tp = t; set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); }
static void encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct encode_b_args* const args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; int16_t *coeff = BLOCK_OFFSET(p->coeff, block); int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); const scan_order *scan_order; TX_TYPE tx_type; PREDICTION_MODE mode; const int bwl = b_width_log2(plane_bsize); const int diff_stride = 4 * (1 << bwl); uint8_t *src, *dst; int16_t *src_diff; uint16_t *eob = &p->eobs[block]; const int src_stride = p->src.stride; const int dst_stride = pd->dst.stride; int i, j; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); dst = &pd->dst.buf[4 * (j * dst_stride + i)]; src = &p->src.buf[4 * (j * src_stride + i)]; src_diff = &p->src_diff[4 * (j * diff_stride + i)]; switch (tx_size) { case TX_32X32: scan_order = &vp9_default_scan_orders[TX_32X32]; mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; vp9_predict_intra_block(xd, block >> 6, bwl, TX_32X32, mode, x->skip_encode ? src : dst, x->skip_encode ? src_stride : dst_stride, dst, dst_stride, i, j, plane); if (!x->skip_recode) { vp9_subtract_block(32, 32, src_diff, diff_stride, src, src_stride, dst, dst_stride); fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); } if (!x->skip_encode && *eob) vp9_idct32x32_add(dqcoeff, dst, dst_stride, *eob); break; case TX_16X16: tx_type = get_tx_type(pd->plane_type, xd); scan_order = &vp9_scan_orders[TX_16X16][tx_type]; mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode, x->skip_encode ? src : dst, x->skip_encode ? src_stride : dst_stride, dst, dst_stride, i, j, plane); if (!x->skip_recode) { vp9_subtract_block(16, 16, src_diff, diff_stride, src, src_stride, dst, dst_stride); vp9_fht16x16(src_diff, coeff, diff_stride, tx_type); vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); } if (!x->skip_encode && *eob) vp9_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob); break; case TX_8X8: tx_type = get_tx_type(pd->plane_type, xd); scan_order = &vp9_scan_orders[TX_8X8][tx_type]; mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode, x->skip_encode ? src : dst, x->skip_encode ? src_stride : dst_stride, dst, dst_stride, i, j, plane); if (!x->skip_recode) { vp9_subtract_block(8, 8, src_diff, diff_stride, src, src_stride, dst, dst_stride); vp9_fht8x8(src_diff, coeff, diff_stride, tx_type); vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); } if (!x->skip_encode && *eob) vp9_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob); break; case TX_4X4: tx_type = get_tx_type_4x4(pd->plane_type, xd, block); scan_order = &vp9_scan_orders[TX_4X4][tx_type]; mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode; vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode, x->skip_encode ? src : dst, x->skip_encode ? src_stride : dst_stride, dst, dst_stride, i, j, plane); if (!x->skip_recode) { vp9_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst, dst_stride); if (tx_type != DCT_DCT) vp9_fht4x4(src_diff, coeff, diff_stride, tx_type); else x->fwd_txm4x4(src_diff, coeff, diff_stride); vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, p->zbin_extra, eob, scan_order->scan, scan_order->iscan); } if (!x->skip_encode && *eob) { if (tx_type == DCT_DCT) // this is like vp9_short_idct4x4 but has a special case around eob<=1 // which is significant (not just an optimization) for the lossless // case. xd->itxm_add(dqcoeff, dst, dst_stride, *eob); else vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type); } break; default: assert(0); } if (*eob) *(args->skip) = 0; }
static void optimize_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, MACROBLOCK *mb, struct optimize_ctx *ctx) { MACROBLOCKD *const xd = &mb->e_mbd; struct macroblock_plane *p = &mb->plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; const int ref = is_inter_block(&xd->mi_8x8[0]->mbmi); vp9_token_state tokens[1025][2]; unsigned best_index[1025][2]; const int16_t *coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block); int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); int eob = p->eobs[block], final_eob, sz = 0; const int i0 = 0; int rc, x, next, i; int64_t rdmult, rddiv, rd_cost0, rd_cost1; int rate0, rate1, error0, error1, t0, t1; int best, band, pt; PLANE_TYPE type = pd->plane_type; int err_mult = plane_rd_mult[type]; const int default_eob = 16 << (tx_size << 1); const int mul = 1 + (tx_size == TX_32X32); uint8_t token_cache[1024]; const int16_t *dequant_ptr = pd->dequant; const uint8_t *const band_translate = get_band_translate(tx_size); const scan_order *so = get_scan(xd, tx_size, type, block); const int16_t *scan = so->scan; const int16_t *nb = so->neighbors; ENTROPY_CONTEXT *a, *l; int tx_x, tx_y; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &tx_x, &tx_y); a = &ctx->ta[plane][tx_x]; l = &ctx->tl[plane][tx_y]; assert((!type && !plane) || (type && plane)); assert(eob <= default_eob); /* Now set up a Viterbi trellis to evaluate alternative roundings. */ rdmult = mb->rdmult * err_mult; if (!is_inter_block(&mb->e_mbd.mi_8x8[0]->mbmi)) rdmult = (rdmult * 9) >> 4; rddiv = mb->rddiv; /* Initialize the sentinel node of the trellis. */ tokens[eob][0].rate = 0; tokens[eob][0].error = 0; tokens[eob][0].next = default_eob; tokens[eob][0].token = EOB_TOKEN; tokens[eob][0].qc = 0; *(tokens[eob] + 1) = *(tokens[eob] + 0); next = eob; for (i = 0; i < eob; i++) token_cache[scan[i]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[ qcoeff[scan[i]]].token]; for (i = eob; i-- > i0;) { int base_bits, d2, dx; rc = scan[i]; x = qcoeff[rc]; /* Only add a trellis state for non-zero coefficients. */ if (x) { int shortcut = 0; error0 = tokens[next][0].error; error1 = tokens[next][1].error; /* Evaluate the first possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; t0 = (vp9_dct_value_tokens_ptr + x)->token; /* Consider both possible successor states. */ if (next < default_eob) { band = band_translate[i + 1]; pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); rate0 += mb->token_costs[tx_size][type][ref][band][0][pt] [tokens[next][0].token]; rate1 += mb->token_costs[tx_size][type][ref][band][0][pt] [tokens[next][1].token]; } UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = *(vp9_dct_value_cost_ptr + x); dx = mul * (dqcoeff[rc] - coeff[rc]); d2 = dx * dx; tokens[i][0].rate = base_bits + (best ? rate1 : rate0); tokens[i][0].error = d2 + (best ? error1 : error0); tokens[i][0].next = next; tokens[i][0].token = t0; tokens[i][0].qc = x; best_index[i][0] = best; /* Evaluate the second possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; if ((abs(x)*dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) && (abs(x)*dequant_ptr[rc != 0] < abs(coeff[rc]) * mul + dequant_ptr[rc != 0])) shortcut = 1; else shortcut = 0; if (shortcut) { sz = -(x < 0); x -= 2 * sz + 1; } /* Consider both possible successor states. */ if (!x) { /* If we reduced this coefficient to zero, check to see if * we need to move the EOB back here. */ t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; } else { t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token; } if (next < default_eob) { band = band_translate[i + 1]; if (t0 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][0].token]; } if (t1 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache); rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][1].token]; } } UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = *(vp9_dct_value_cost_ptr + x); if (shortcut) { dx -= (dequant_ptr[rc != 0] + sz) ^ sz; d2 = dx * dx; } tokens[i][1].rate = base_bits + (best ? rate1 : rate0); tokens[i][1].error = d2 + (best ? error1 : error0); tokens[i][1].next = next; tokens[i][1].token = best ? t1 : t0; tokens[i][1].qc = x; best_index[i][1] = best; /* Finally, make this the new head of the trellis. */ next = i; } else { /* There's no choice to make for a zero coefficient, so we don't * add a new trellis node, but we do need to update the costs. */ band = band_translate[i + 1]; t0 = tokens[next][0].token; t1 = tokens[next][1].token; /* Update the cost of each path if we're past the EOB token. */ if (t0 != EOB_TOKEN) { tokens[next][0].rate += mb->token_costs[tx_size][type][ref][band][1][0][t0]; tokens[next][0].token = ZERO_TOKEN; } if (t1 != EOB_TOKEN) { tokens[next][1].rate += mb->token_costs[tx_size][type][ref][band][1][0][t1]; tokens[next][1].token = ZERO_TOKEN; } best_index[i][0] = best_index[i][1] = 0; /* Don't update next, because we didn't add a new node. */ } } /* Now pick the best path through the whole trellis. */ band = band_translate[i + 1]; pt = combine_entropy_contexts(*a, *l); rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; error0 = tokens[next][0].error; error1 = tokens[next][1].error; t0 = tokens[next][0].token; t1 = tokens[next][1].token; rate0 += mb->token_costs[tx_size][type][ref][band][0][pt][t0]; rate1 += mb->token_costs[tx_size][type][ref][band][0][pt][t1]; UPDATE_RD_COST(); best = rd_cost1 < rd_cost0; final_eob = i0 - 1; vpx_memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2))); vpx_memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2))); for (i = next; i < eob; i = next) { x = tokens[i][best].qc; if (x) { final_eob = i; } rc = scan[i]; qcoeff[rc] = x; dqcoeff[rc] = (x * dequant_ptr[rc != 0]) / mul; next = tokens[i][best].next; best = best_index[i][best]; } final_eob++; mb->plane[plane].eobs[block] = final_eob; *a = *l = (final_eob > 0); }