void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd) { struct segmentation *seg = &cm->seg; struct segmentation_probs *segp = &cm->fc->seg; int no_pred_cost; int t_pred_cost = INT_MAX; int i, tile_col, tile_row, mi_row, mi_col; #if CONFIG_TILE_GROUPS const int probwt = cm->num_tg; #else const int probwt = 1; #endif unsigned(*temporal_predictor_count)[2] = cm->counts.seg.pred; unsigned *no_pred_segcounts = cm->counts.seg.tree_total; unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred; aom_prob no_pred_tree[SEG_TREE_PROBS]; aom_prob t_pred_tree[SEG_TREE_PROBS]; aom_prob t_nopred_prob[PREDICTION_PROBS]; (void)xd; // We are about to recompute all the segment counts, so zero the accumulators. av1_zero(cm->counts.seg); // First of all generate stats regarding how well the last segment map // predicts this one for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) { TileInfo tile_info; av1_tile_set_row(&tile_info, cm, tile_row); for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) { MODE_INFO **mi_ptr; av1_tile_set_col(&tile_info, cm, tile_col); mi_ptr = cm->mi_grid_visible + tile_info.mi_row_start * cm->mi_stride + tile_info.mi_col_start; for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end; mi_row += cm->mib_size, mi_ptr += cm->mib_size * cm->mi_stride) { MODE_INFO **mi = mi_ptr; for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end; mi_col += cm->mib_size, mi += cm->mib_size) { count_segs_sb(cm, xd, &tile_info, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row, mi_col, cm->sb_size); } } } } // Work out probability tree for coding segments without prediction // and the cost. calc_segtree_probs(no_pred_segcounts, no_pred_tree, segp->tree_probs, probwt); no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree); // Key frames cannot use temporal prediction if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) { // Work out probability tree for coding those segments not // predicted using the temporal method and the cost. calc_segtree_probs(t_unpred_seg_counts, t_pred_tree, segp->tree_probs, probwt); t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree); // Add in the cost of the signaling for each prediction context. for (i = 0; i < PREDICTION_PROBS; i++) { const int count0 = temporal_predictor_count[i][0]; const int count1 = temporal_predictor_count[i][1]; t_nopred_prob[i] = get_binary_prob(count0, count1); av1_prob_diff_update_savings_search( temporal_predictor_count[i], segp->pred_probs[i], &t_nopred_prob[i], DIFF_UPDATE_PROB, probwt); // Add in the predictor signaling cost t_pred_cost += count0 * av1_cost_zero(t_nopred_prob[i]) + count1 * av1_cost_one(t_nopred_prob[i]); } } // Now choose which coding method to use. if (t_pred_cost < no_pred_cost) { assert(!cm->error_resilient_mode); seg->temporal_update = 1; } else { seg->temporal_update = 0; } }
/* TODO(negge) This function may be called by more than one thread when using a multi-threaded decoder and this may cause a data race. */ int ifd_inspect(insp_frame_data *fd, void *decoder) { struct AV1Decoder *pbi = (struct AV1Decoder *)decoder; AV1_COMMON *const cm = &pbi->common; if (fd->mi_rows != cm->mi_rows || fd->mi_cols != cm->mi_cols) { ifd_clear(fd); ifd_init_mi_rc(fd, cm->mi_rows, cm->mi_cols); } fd->show_frame = cm->show_frame; fd->frame_type = cm->frame_type; fd->base_qindex = cm->base_qindex; // Set width and height of the first tile until generic support can be added TileInfo tile_info; av1_tile_set_row(&tile_info, cm, 0); av1_tile_set_col(&tile_info, cm, 0); fd->tile_mi_cols = tile_info.mi_col_end - tile_info.mi_col_start; fd->tile_mi_rows = tile_info.mi_row_end - tile_info.mi_row_start; fd->delta_q_present_flag = cm->delta_q_present_flag; fd->delta_q_res = cm->delta_q_res; #if CONFIG_ACCOUNTING fd->accounting = &pbi->accounting; #endif // TODO(negge): copy per frame CDEF data int i, j; for (i = 0; i < MAX_SEGMENTS; i++) { for (j = 0; j < 2; j++) { fd->y_dequant[i][j] = cm->y_dequant_QTX[i][j]; fd->u_dequant[i][j] = cm->u_dequant_QTX[i][j]; fd->v_dequant[i][j] = cm->v_dequant_QTX[i][j]; } } for (j = 0; j < cm->mi_rows; j++) { for (i = 0; i < cm->mi_cols; i++) { const MB_MODE_INFO *mbmi = cm->mi_grid_visible[j * cm->mi_stride + i]; insp_mi_data *mi = &fd->mi_grid[j * cm->mi_cols + i]; // Segment mi->segment_id = mbmi->segment_id; // Motion Vectors mi->mv[0].row = mbmi->mv[0].as_mv.row; mi->mv[0].col = mbmi->mv[0].as_mv.col; mi->mv[1].row = mbmi->mv[1].as_mv.row; mi->mv[1].col = mbmi->mv[1].as_mv.col; // Reference Frames mi->ref_frame[0] = mbmi->ref_frame[0]; mi->ref_frame[1] = mbmi->ref_frame[1]; // Prediction Mode mi->mode = mbmi->mode; // Prediction Mode for Chromatic planes if (mi->mode < INTRA_MODES) { mi->uv_mode = mbmi->uv_mode; } else { mi->uv_mode = UV_MODE_INVALID; } // Block Size mi->sb_type = mbmi->sb_type; // Skip Flag mi->skip = mbmi->skip; mi->filter[0] = av1_extract_interp_filter(mbmi->interp_filters, 0); mi->filter[1] = av1_extract_interp_filter(mbmi->interp_filters, 1); mi->dual_filter_type = mi->filter[0] * 3 + mi->filter[1]; // Transform // TODO(anyone): extract tx type info from mbmi->txk_type[]. mi->tx_type = DCT_DCT; mi->tx_size = mbmi->tx_size; mi->cdef_level = cm->cdef_strengths[mbmi->cdef_strength] / CDEF_SEC_STRENGTHS; mi->cdef_strength = cm->cdef_strengths[mbmi->cdef_strength] % CDEF_SEC_STRENGTHS; mi->cdef_strength += mi->cdef_strength == 3; if (mbmi->uv_mode == UV_CFL_PRED) { mi->cfl_alpha_idx = mbmi->cfl_alpha_idx; mi->cfl_alpha_sign = mbmi->cfl_alpha_signs; } else { mi->cfl_alpha_idx = 0; mi->cfl_alpha_sign = 0; } // delta_q mi->current_qindex = mbmi->current_qindex; } } return 1; }