void av1_foreach_transformed_block_in_plane( const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, foreach_transformed_block_visitor visit, void *arg) { const struct macroblockd_plane *const pd = &xd->plane[plane]; const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 // transform size varies per plane, look it up in a common way. const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size; const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); const uint8_t txw_unit = tx_size_wide_unit[tx_size]; const uint8_t txh_unit = tx_size_high_unit[tx_size]; const int step = txw_unit * txh_unit; int i = 0, r, c; // If mb_to_right_edge is < 0 we are in a situation in which // the current block size extends into the UMV and we won't // visit the sub blocks that are wholly within the UMV. const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane); const int max_blocks_high = max_block_high(xd, plane_bsize, plane); // Keep track of the row and column of the blocks we use so that we know // if we are in the unrestricted motion border. for (r = 0; r < max_blocks_high; r += txh_unit) { // Skip visiting the sub blocks that are wholly within the UMV. for (c = 0; c < max_blocks_wide; c += txw_unit) { visit(plane, i, r, c, plane_bsize, tx_size, arg); i += step; } } }
void vp9_foreach_transformed_block_in_plane( const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, foreach_transformed_block_visitor visit, void *arg) { const struct macroblockd_plane *const pd = &xd->plane[plane]; const MB_MODE_INFO* mbmi = &xd->mi[0].src_mi->mbmi; // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 // transform size varies per plane, look it up in a common way. const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size; const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; const int step = 1 << (tx_size << 1); int i; // If mb_to_right_edge is < 0 we are in a situation in which // the current block size extends into the UMV and we won't // visit the sub blocks that are wholly within the UMV. if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { int r, c; int max_blocks_wide = num_4x4_w; int max_blocks_high = num_4x4_h; // xd->mb_to_right_edge is in units of pixels * 8. This converts // it to 4x4 block sizes. if (xd->mb_to_right_edge < 0) max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); if (xd->mb_to_bottom_edge < 0) max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); i = 0; // Unlike the normal case - in here we have to keep track of the // row and column of the blocks we use so that we know if we are in // the unrestricted motion border. for (r = 0; r < num_4x4_h; r += (1 << tx_size)) { for (c = 0; c < num_4x4_w; c += (1 << tx_size)) { if (r < max_blocks_high && c < max_blocks_wide) visit(plane, i, plane_bsize, tx_size, arg); i += step; } } } else {
void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) { MACROBLOCKD *const xd = &x->e_mbd; struct optimize_ctx ctx; MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; struct encode_b_args arg = {x, &ctx, &mbmi->skip}; int plane; for (plane = 0; plane < MAX_MB_PLANE; ++plane) { if (!x->skip_recode) vp9_subtract_plane(x, bsize, plane); if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { const struct macroblockd_plane* const pd = &xd->plane[plane]; const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size; vp9_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]); } vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block, &arg); } }
void av1_foreach_transformed_block_interleave( const MACROBLOCKD *const xd, BLOCK_SIZE bsize, foreach_transformed_block_visitor visit, void *arg) { const struct macroblockd_plane *const pd_y = &xd->plane[0]; const struct macroblockd_plane *const pd_c = &xd->plane[1]; const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; const TX_SIZE tx_log2_y = mbmi->tx_size; const TX_SIZE tx_log2_c = get_uv_tx_size(mbmi, pd_c); const int tx_sz_y = (1 << tx_log2_y); const int tx_sz_c = (1 << tx_log2_c); const BLOCK_SIZE plane_bsize_y = get_plane_block_size(bsize, pd_y); const BLOCK_SIZE plane_bsize_c = get_plane_block_size(bsize, pd_c); const int num_4x4_w_y = num_4x4_blocks_wide_lookup[plane_bsize_y]; const int num_4x4_w_c = num_4x4_blocks_wide_lookup[plane_bsize_c]; const int num_4x4_h_y = num_4x4_blocks_high_lookup[plane_bsize_y]; const int num_4x4_h_c = num_4x4_blocks_high_lookup[plane_bsize_c]; const int step_y = 1 << (tx_log2_y << 1); const int step_c = 1 << (tx_log2_c << 1); const int max_4x4_w_y = get_max_4x4_size(num_4x4_w_y, xd->mb_to_right_edge, pd_y->subsampling_x); const int max_4x4_h_y = get_max_4x4_size(num_4x4_h_y, xd->mb_to_bottom_edge, pd_y->subsampling_y); const int extra_step_y = ((num_4x4_w_y - max_4x4_w_y) >> tx_log2_y) * step_y; const int max_4x4_w_c = get_max_4x4_size(num_4x4_w_c, xd->mb_to_right_edge, pd_c->subsampling_x); const int max_4x4_h_c = get_max_4x4_size(num_4x4_h_c, xd->mb_to_bottom_edge, pd_c->subsampling_y); const int extra_step_c = ((num_4x4_w_c - max_4x4_w_c) >> tx_log2_c) * step_c; // The max_4x4_w/h may be smaller than tx_sz under some corner cases, // i.e. when the SB is splitted by tile boundaries. const int tu_num_w_y = (max_4x4_w_y + tx_sz_y - 1) / tx_sz_y; const int tu_num_h_y = (max_4x4_h_y + tx_sz_y - 1) / tx_sz_y; const int tu_num_w_c = (max_4x4_w_c + tx_sz_c - 1) / tx_sz_c; const int tu_num_h_c = (max_4x4_h_c + tx_sz_c - 1) / tx_sz_c; const int tu_num_y = tu_num_w_y * tu_num_h_y; const int tu_num_c = tu_num_w_c * tu_num_h_c; int tu_idx_c = 0; int offset_y, row_y, col_y; int offset_c, row_c, col_c; for (row_y = 0; row_y < tu_num_h_y; row_y++) { for (col_y = 0; col_y < tu_num_w_y; col_y++) { // luma offset_y = (row_y * tu_num_w_y + col_y) * step_y + row_y * extra_step_y; visit(0, offset_y, row_y * tx_sz_y, col_y * tx_sz_y, plane_bsize_y, tx_log2_y, arg); // chroma if (tu_idx_c < tu_num_c) { row_c = (tu_idx_c / tu_num_w_c) * tx_sz_c; col_c = (tu_idx_c % tu_num_w_c) * tx_sz_c; offset_c = tu_idx_c * step_c + (tu_idx_c / tu_num_w_c) * extra_step_c; visit(1, offset_c, row_c, col_c, plane_bsize_c, tx_log2_c, arg); visit(2, offset_c, row_c, col_c, plane_bsize_c, tx_log2_c, arg); tu_idx_c++; } } } // In 422 case, it's possible that Chroma has more TUs than Luma while (tu_idx_c < tu_num_c) { row_c = (tu_idx_c / tu_num_w_c) * tx_sz_c; col_c = (tu_idx_c % tu_num_w_c) * tx_sz_c; offset_c = tu_idx_c * step_c + row_c * extra_step_c; visit(1, offset_c, row_c, col_c, plane_bsize_c, tx_log2_c, arg); visit(2, offset_c, row_c, col_c, plane_bsize_c, tx_log2_c, arg); tu_idx_c++; } }