static int find_best_16x16_intra(VP9_COMP *cpi, PREDICTION_MODE *pbest_mode) { MACROBLOCK *const x = &cpi->td.mb; MACROBLOCKD *const xd = &x->e_mbd; PREDICTION_MODE best_mode = -1, mode; unsigned int best_err = INT_MAX; // calculate SATD for each intra prediction mode; // we're intentionally not doing 4x4, we just want a rough estimate for (mode = DC_PRED; mode <= TM_PRED; mode++) { unsigned int err; xd->mi[0]->mbmi.mode = mode; vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode, x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride, 0, 0, 0); err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride); // find best if (err < best_err) { best_err = err; best_mode = mode; } } if (pbest_mode) *pbest_mode = best_mode; return best_err; }
static int find_best_16x16_intra ( VP9_COMP *cpi, YV12_BUFFER_CONFIG *buf, int mb_y_offset, MB_PREDICTION_MODE *pbest_mode ) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; MB_PREDICTION_MODE best_mode = -1, mode; unsigned int best_err = INT_MAX; // calculate SATD for each intra prediction mode; // we're intentionally not doing 4x4, we just want a rough estimate for (mode = DC_PRED; mode <= TM_PRED; mode++) { unsigned int err; xd->mode_info_context->mbmi.mode = mode; vp9_build_intra_predictors_mby(xd); err = vp9_sad16x16(xd->predictor, 16, buf->y_buffer + mb_y_offset, buf->y_stride, best_err); // find best if (err < best_err) { best_err = err; best_mode = mode; } } if (pbest_mode) *pbest_mode = best_mode; return best_err; }
static int do_16x16_motion_search ( VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv, YV12_BUFFER_CONFIG *buf, int buf_mb_y_offset, YV12_BUFFER_CONFIG *ref, int mb_y_offset ) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; unsigned int err, tmp_err; int_mv tmp_mv; int n; for (n = 0; n < 16; n++) { BLOCKD *d = &xd->block[n]; BLOCK *b = &x->block[n]; b->base_src = &buf->y_buffer; b->src_stride = buf->y_stride; b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset; d->base_pre = &ref->y_buffer; d->pre_stride = ref->y_stride; d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset; } // Try zero MV first // FIXME should really use something like near/nearest MV and/or MV prediction xd->pre.y_buffer = ref->y_buffer + mb_y_offset; xd->pre.y_stride = ref->y_stride; err = vp9_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride, xd->dst.y_buffer, xd->dst.y_stride, INT_MAX); dst_mv->as_int = 0; // Test last reference frame using the previous best mv as the // starting point (best reference) for the search tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv); if (tmp_err < err) { err = tmp_err; dst_mv->as_int = tmp_mv.as_int; } // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well if (ref_mv->as_int) { unsigned int tmp_err; int_mv zero_ref_mv, tmp_mv; zero_ref_mv.as_int = 0; tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv); if (tmp_err < err) { dst_mv->as_int = tmp_mv.as_int; err = tmp_err; } } return err; }
static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, const MV *ref_mv, MV *dst_mv, int mb_row, int mb_col) { MACROBLOCK *const x = &cpi->td.mb; MACROBLOCKD *const xd = &x->e_mbd; const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv; const vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; const int tmp_col_min = x->mv_col_min; const int tmp_col_max = x->mv_col_max; const int tmp_row_min = x->mv_row_min; const int tmp_row_max = x->mv_row_max; MV ref_full; int cost_list[5]; // Further step/diamond searches as necessary int step_param = mv_sf->reduce_first_step_size; step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2); vp9_set_mv_search_range(x, ref_mv); ref_full.col = ref_mv->col >> 3; ref_full.row = ref_mv->row >> 3; /*cpi->sf.search_method == HEX*/ vp9_hex_search(x, &ref_full, step_param, x->errorperbit, 0, cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv, dst_mv); // Try sub-pixel MC // if (bestsme > error_thresh && bestsme < INT_MAX) { int distortion; unsigned int sse; cpi->find_fractional_mv_step( x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, &v_fn_ptr, 0, mv_sf->subpel_iters_per_step, cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0, 0); } xd->mi[0]->mbmi.mode = NEWMV; xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv; vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); /* restore UMV window */ x->mv_col_min = tmp_col_min; x->mv_col_max = tmp_col_max; x->mv_row_min = tmp_row_min; x->mv_row_max = tmp_row_max; return vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride); }
static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv, int mb_row, int mb_col) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; unsigned int best_err; const int tmp_col_min = x->mv_col_min; const int tmp_col_max = x->mv_col_max; const int tmp_row_min = x->mv_row_min; const int tmp_row_max = x->mv_row_max; int_mv ref_full; // Further step/diamond searches as necessary int step_param = cpi->sf.reduce_first_step_size + (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2); step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2)); vp9_clamp_mv_min_max(x, &ref_mv->as_mv); ref_full.as_mv.col = ref_mv->as_mv.col >> 3; ref_full.as_mv.row = ref_mv->as_mv.row >> 3; /*cpi->sf.search_method == HEX*/ best_err = vp9_hex_search(x, &ref_full, step_param, x->errorperbit, 0, &v_fn_ptr, 0, ref_mv, dst_mv); // Try sub-pixel MC // if (bestsme > error_thresh && bestsme < INT_MAX) { int distortion; unsigned int sse; best_err = cpi->find_fractional_mv_step( x, dst_mv, ref_mv, x->errorperbit, &v_fn_ptr, 0, cpi->sf.subpel_iters_per_step, NULL, NULL, & distortion, &sse); } vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv); vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride, INT_MAX); /* restore UMV window */ x->mv_col_min = tmp_col_min; x->mv_col_max = tmp_col_max; x->mv_row_min = tmp_row_min; x->mv_row_max = tmp_row_max; return best_err; }
static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) { MACROBLOCK *const x = &cpi->td.mb; MACROBLOCKD *const xd = &x->e_mbd; unsigned int err; // Try zero MV first // FIXME should really use something like near/nearest MV and/or MV prediction err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride); dst_mv->as_int = 0; return err; }
static int do_16x16_motion_search(VP9_COMP *cpi, const int_mv *ref_mv, int_mv *dst_mv, int mb_row, int mb_col) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; unsigned int err, tmp_err; int_mv tmp_mv; // Try zero MV first // FIXME should really use something like near/nearest MV and/or MV prediction err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, INT_MAX); dst_mv->as_int = 0; // Test last reference frame using the previous best mv as the // starting point (best reference) for the search tmp_err = do_16x16_motion_iteration(cpi, &ref_mv->as_mv, &tmp_mv.as_mv, mb_row, mb_col); if (tmp_err < err) { err = tmp_err; dst_mv->as_int = tmp_mv.as_int; } // If the current best reference mv is not centered on 0,0 then do a 0,0 // based search as well. if (ref_mv->as_int) { unsigned int tmp_err; int_mv zero_ref_mv, tmp_mv; zero_ref_mv.as_int = 0; tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv.as_mv, &tmp_mv.as_mv, mb_row, mb_col); if (tmp_err < err) { dst_mv->as_int = tmp_mv.as_int; err = tmp_err; } } return err; }
static int do_16x16_zerozero_search ( VP9_COMP *cpi, int_mv *dst_mv, YV12_BUFFER_CONFIG *buf, int buf_mb_y_offset, YV12_BUFFER_CONFIG *ref, int mb_y_offset ) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; unsigned int err; int n; for (n = 0; n < 16; n++) { BLOCKD *d = &xd->block[n]; BLOCK *b = &x->block[n]; b->base_src = &buf->y_buffer; b->src_stride = buf->y_stride; b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset; d->base_pre = &ref->y_buffer; d->pre_stride = ref->y_stride; d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset; } // Try zero MV first // FIXME should really use something like near/nearest MV and/or MV prediction xd->pre.y_buffer = ref->y_buffer + mb_y_offset; xd->pre.y_stride = ref->y_stride; err = vp9_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride, xd->dst.y_buffer, xd->dst.y_stride, INT_MAX); dst_mv->as_int = 0; return err; }
static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv, int mb_row, int mb_col) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; BLOCK *b = &x->block[0]; BLOCKD *d = &xd->block[0]; vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; unsigned int best_err; int tmp_col_min = x->mv_col_min; int tmp_col_max = x->mv_col_max; int tmp_row_min = x->mv_row_min; int tmp_row_max = x->mv_row_max; int_mv ref_full; // Further step/diamond searches as necessary int step_param = cpi->sf.first_step + (cpi->Speed < 8 ? (cpi->Speed > 5 ? 1 : 0) : 2); vp9_clamp_mv_min_max(x, ref_mv); ref_full.as_mv.col = ref_mv->as_mv.col >> 3; ref_full.as_mv.row = ref_mv->as_mv.row >> 3; /*cpi->sf.search_method == HEX*/ best_err = vp9_hex_search( x, b, d, &ref_full, dst_mv, step_param, x->errorperbit, &v_fn_ptr, NULL, NULL, NULL, NULL, ref_mv); // Try sub-pixel MC // if (bestsme > error_thresh && bestsme < INT_MAX) { int distortion; unsigned int sse; best_err = cpi->find_fractional_mv_step( x, b, d, dst_mv, ref_mv, x->errorperbit, &v_fn_ptr, NULL, NULL, & distortion, &sse); } vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv); vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16, mb_row, mb_col); best_err = vp9_sad16x16(xd->dst.y_buffer, xd->dst.y_stride, xd->predictor, 16, INT_MAX); /* restore UMV window */ x->mv_col_min = tmp_col_min; x->mv_col_max = tmp_col_max; x->mv_row_min = tmp_row_min; x->mv_row_max = tmp_row_max; return best_err; }