static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi, int *no_pred_segcounts, int (*temporal_predictor_count)[2], int *t_unpred_seg_counts, int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize) { VP9_COMMON *const cm = &cpi->common; const int mis = cm->mode_info_stride; int bwl, bhl; const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1); if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; bwl = mi_width_log2(mi->mbmi.sb_type); bhl = mi_height_log2(mi->mbmi.sb_type); if (bwl == bsl && bhl == bsl) { count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, 1 << bsl, 1 << bsl, mi_row, mi_col); } else if (bwl == bsl && bhl < bsl) { count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, 1 << bsl, bs, mi_row, mi_col); count_segs(cpi, mi + bs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, 1 << bsl, bs, mi_row + bs, mi_col); } else if (bwl < bsl && bhl == bsl) { count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, 1 << bsl, mi_row, mi_col); count_segs(cpi, mi + bs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, 1 << bsl, mi_row, mi_col + bs); } else { BLOCK_SIZE_TYPE subsize; int n; assert(bwl < bsl && bhl < bsl); if (bsize == BLOCK_SIZE_SB64X64) { subsize = BLOCK_SIZE_SB32X32; } else if (bsize == BLOCK_SIZE_SB32X32) { subsize = BLOCK_SIZE_MB16X16; } else { assert(bsize == BLOCK_SIZE_MB16X16); subsize = BLOCK_SIZE_SB8X8; } for (n = 0; n < 4; n++) { const int y_idx = n >> 1, x_idx = n & 0x01; count_segs_sb(cpi, mi + y_idx * bs * mis + x_idx * bs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row + y_idx * bs, mi_col + x_idx * bs, subsize); } } }
static void count_segs_sb(const VP9_COMMON *cm, MACROBLOCKD *xd, const TileInfo *tile, MODE_INFO **mi, int *no_pred_segcounts, int (*temporal_predictor_count)[2], int *t_unpred_seg_counts, int mi_row, int mi_col, BLOCK_SIZE bsize) { const int mis = cm->mi_stride; int bw, bh; const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2; if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; bw = num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type]; bh = num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type]; if (bw == bs && bh == bs) { count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, bs, mi_row, mi_col); } else if (bw == bs && bh < bs) { count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row + hbs, mi_col); } else if (bw < bs && bh == bs) { count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs); } else { const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; int n; assert(bw < bs && bh < bs); for (n = 0; n < 4; n++) { const int mi_dc = hbs * (n & 1); const int mi_dr = hbs * (n >> 1); count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row + mi_dr, mi_col + mi_dc, subsize); } } }
void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd) { struct segmentation *seg = &cm->seg; struct segmentation_probs *segp = &cm->fc->seg; int no_pred_cost; int t_pred_cost = INT_MAX; int i, tile_col, tile_row, mi_row, mi_col; #if CONFIG_TILE_GROUPS const int probwt = cm->num_tg; #else const int probwt = 1; #endif unsigned(*temporal_predictor_count)[2] = cm->counts.seg.pred; unsigned *no_pred_segcounts = cm->counts.seg.tree_total; unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred; aom_prob no_pred_tree[SEG_TREE_PROBS]; aom_prob t_pred_tree[SEG_TREE_PROBS]; aom_prob t_nopred_prob[PREDICTION_PROBS]; (void)xd; // We are about to recompute all the segment counts, so zero the accumulators. av1_zero(cm->counts.seg); // First of all generate stats regarding how well the last segment map // predicts this one for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) { TileInfo tile_info; av1_tile_set_row(&tile_info, cm, tile_row); for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) { MODE_INFO **mi_ptr; av1_tile_set_col(&tile_info, cm, tile_col); mi_ptr = cm->mi_grid_visible + tile_info.mi_row_start * cm->mi_stride + tile_info.mi_col_start; for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end; mi_row += cm->mib_size, mi_ptr += cm->mib_size * cm->mi_stride) { MODE_INFO **mi = mi_ptr; for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end; mi_col += cm->mib_size, mi += cm->mib_size) { count_segs_sb(cm, xd, &tile_info, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row, mi_col, cm->sb_size); } } } } // Work out probability tree for coding segments without prediction // and the cost. calc_segtree_probs(no_pred_segcounts, no_pred_tree, segp->tree_probs, probwt); no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree); // Key frames cannot use temporal prediction if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) { // Work out probability tree for coding those segments not // predicted using the temporal method and the cost. calc_segtree_probs(t_unpred_seg_counts, t_pred_tree, segp->tree_probs, probwt); t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree); // Add in the cost of the signaling for each prediction context. for (i = 0; i < PREDICTION_PROBS; i++) { const int count0 = temporal_predictor_count[i][0]; const int count1 = temporal_predictor_count[i][1]; t_nopred_prob[i] = get_binary_prob(count0, count1); av1_prob_diff_update_savings_search( temporal_predictor_count[i], segp->pred_probs[i], &t_nopred_prob[i], DIFF_UPDATE_PROB, probwt); // Add in the predictor signaling cost t_pred_cost += count0 * av1_cost_zero(t_nopred_prob[i]) + count1 * av1_cost_one(t_nopred_prob[i]); } } // Now choose which coding method to use. if (t_pred_cost < no_pred_cost) { assert(!cm->error_resilient_mode); seg->temporal_update = 1; } else { seg->temporal_update = 0; } }
static void count_segs_sb(const AV1_COMMON *cm, MACROBLOCKD *xd, const TileInfo *tile, MODE_INFO **mi, unsigned *no_pred_segcounts, unsigned (*temporal_predictor_count)[2], unsigned *t_unpred_seg_counts, int mi_row, int mi_col, BLOCK_SIZE bsize) { const int mis = cm->mi_stride; const int bs = mi_size_wide[bsize], hbs = bs / 2; #if CONFIG_EXT_PARTITION_TYPES PARTITION_TYPE partition; #else int bw, bh; #endif // CONFIG_EXT_PARTITION_TYPES if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; #if CONFIG_EXT_PARTITION_TYPES if (bsize == BLOCK_8X8) partition = PARTITION_NONE; else partition = get_partition(cm, mi_row, mi_col, bsize); switch (partition) { case PARTITION_NONE: count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, bs, mi_row, mi_col); break; case PARTITION_HORZ: count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row + hbs, mi_col); break; case PARTITION_VERT: count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs); break; case PARTITION_HORZ_A: count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row, mi_col + hbs); count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row + hbs, mi_col); break; case PARTITION_HORZ_B: count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row + hbs, mi_col); count_segs(cm, xd, tile, mi + hbs + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row + hbs, mi_col + hbs); break; case PARTITION_VERT_A: count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row + hbs, mi_col); count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs); break; case PARTITION_VERT_B: count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row, mi_col + hbs); count_segs(cm, xd, tile, mi + hbs + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, hbs, mi_row + hbs, mi_col + hbs); break; case PARTITION_SPLIT: { const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; int n; assert(num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type] < bs && num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type] < bs); for (n = 0; n < 4; n++) { const int mi_dc = hbs * (n & 1); const int mi_dr = hbs * (n >> 1); count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row + mi_dr, mi_col + mi_dc, subsize); } } break; default: assert(0); } #else bw = mi_size_wide[mi[0]->mbmi.sb_type]; bh = mi_size_high[mi[0]->mbmi.sb_type]; if (bw == bs && bh == bs) { count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, bs, mi_row, mi_col); } else if (bw == bs && bh < bs) { count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row + hbs, mi_col); } else if (bw < bs && bh == bs) { count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col); count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs); } else { const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; int n; assert(bw < bs && bh < bs); for (n = 0; n < 4; n++) { const int mi_dc = hbs * (n & 1); const int mi_dr = hbs * (n >> 1); count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row + mi_dr, mi_col + mi_dc, subsize); } } #endif // CONFIG_EXT_PARTITION_TYPES }
void vp9_choose_segmap_coding_method(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->mb.e_mbd; int no_pred_cost; int t_pred_cost = INT_MAX; int i; int tile_col, mi_row, mi_col; int temporal_predictor_count[PREDICTION_PROBS][2]; int no_pred_segcounts[MAX_MB_SEGMENTS]; int t_unpred_seg_counts[MAX_MB_SEGMENTS]; vp9_prob no_pred_tree[MB_SEG_TREE_PROBS]; vp9_prob t_pred_tree[MB_SEG_TREE_PROBS]; vp9_prob t_nopred_prob[PREDICTION_PROBS]; const int mis = cm->mode_info_stride; MODE_INFO *mi_ptr, *mi; // Set default state for the segment tree probabilities and the // temporal coding probabilities vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs)); vpx_memset(cm->segment_pred_probs, 255, sizeof(cm->segment_pred_probs)); vpx_memset(no_pred_segcounts, 0, sizeof(no_pred_segcounts)); vpx_memset(t_unpred_seg_counts, 0, sizeof(t_unpred_seg_counts)); vpx_memset(temporal_predictor_count, 0, sizeof(temporal_predictor_count)); // First of all generate stats regarding how well the last segment map // predicts this one for (tile_col = 0; tile_col < cm->tile_columns; tile_col++) { vp9_get_tile_col_offsets(cm, tile_col); mi_ptr = cm->mi + cm->cur_tile_mi_col_start; for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { mi = mi_ptr; for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end; mi_col += 8, mi += 8) { count_segs_sb(cpi, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row, mi_col, BLOCK_SIZE_SB64X64); } } } // Work out probability tree for coding segments without prediction // and the cost. calc_segtree_probs(xd, no_pred_segcounts, no_pred_tree); no_pred_cost = cost_segmap(xd, no_pred_segcounts, no_pred_tree); // Key frames cannot use temporal prediction if (cm->frame_type != KEY_FRAME) { // Work out probability tree for coding those segments not // predicted using the temporal method and the cost. calc_segtree_probs(xd, t_unpred_seg_counts, t_pred_tree); t_pred_cost = cost_segmap(xd, t_unpred_seg_counts, t_pred_tree); // Add in the cost of the signalling for each prediction context for (i = 0; i < PREDICTION_PROBS; i++) { const int count0 = temporal_predictor_count[i][0]; const int count1 = temporal_predictor_count[i][1]; t_nopred_prob[i] = get_binary_prob(count0, count1); // Add in the predictor signaling cost t_pred_cost += count0 * vp9_cost_zero(t_nopred_prob[i]) + count1 * vp9_cost_one(t_nopred_prob[i]); } } // Now choose which coding method to use. if (t_pred_cost < no_pred_cost) { cm->temporal_update = 1; vpx_memcpy(xd->mb_segment_tree_probs, t_pred_tree, sizeof(t_pred_tree)); vpx_memcpy(cm->segment_pred_probs, t_nopred_prob, sizeof(t_nopred_prob)); } else { cm->temporal_update = 0; vpx_memcpy(xd->mb_segment_tree_probs, no_pred_tree, sizeof(no_pred_tree)); } }
void vp9_choose_segmap_coding_method(VP9_COMMON *cm, MACROBLOCKD *xd) { struct segmentation *seg = &cm->seg; int no_pred_cost; int t_pred_cost = INT_MAX; int i, tile_col, mi_row, mi_col; int temporal_predictor_count[PREDICTION_PROBS][2] = { { 0 } }; int no_pred_segcounts[MAX_SEGMENTS] = { 0 }; int t_unpred_seg_counts[MAX_SEGMENTS] = { 0 }; vp9_prob no_pred_tree[SEG_TREE_PROBS]; vp9_prob t_pred_tree[SEG_TREE_PROBS]; vp9_prob t_nopred_prob[PREDICTION_PROBS]; // Set default state for the segment tree probabilities and the // temporal coding probabilities memset(seg->tree_probs, 255, sizeof(seg->tree_probs)); memset(seg->pred_probs, 255, sizeof(seg->pred_probs)); // First of all generate stats regarding how well the last segment map // predicts this one for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) { TileInfo tile; MODE_INFO **mi_ptr; vp9_tile_init(&tile, cm, 0, tile_col); mi_ptr = cm->mi_grid_visible + tile.mi_col_start; for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * cm->mi_stride) { MODE_INFO **mi = mi_ptr; for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; mi_col += 8, mi += 8) count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row, mi_col, BLOCK_64X64); } } // Work out probability tree for coding segments without prediction // and the cost. calc_segtree_probs(no_pred_segcounts, no_pred_tree); no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree); // Key frames cannot use temporal prediction if (!frame_is_intra_only(cm)) { // Work out probability tree for coding those segments not // predicted using the temporal method and the cost. calc_segtree_probs(t_unpred_seg_counts, t_pred_tree); t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree); // Add in the cost of the signaling for each prediction context. for (i = 0; i < PREDICTION_PROBS; i++) { const int count0 = temporal_predictor_count[i][0]; const int count1 = temporal_predictor_count[i][1]; t_nopred_prob[i] = get_binary_prob(count0, count1); // Add in the predictor signaling cost t_pred_cost += count0 * vp9_cost_zero(t_nopred_prob[i]) + count1 * vp9_cost_one(t_nopred_prob[i]); } } // Now choose which coding method to use. if (t_pred_cost < no_pred_cost) { seg->temporal_update = 1; memcpy(seg->tree_probs, t_pred_tree, sizeof(t_pred_tree)); memcpy(seg->pred_probs, t_nopred_prob, sizeof(t_nopred_prob)); } else { seg->temporal_update = 0; memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree)); } }
void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) { struct segmentation *seg = &cm->seg; #if CONFIG_MISC_FIXES struct segmentation_probs *segp = &cm->fc->seg; #else struct segmentation_probs *segp = &cm->segp; #endif int no_pred_cost; int t_pred_cost = INT_MAX; int i, tile_col, mi_row, mi_col; #if CONFIG_MISC_FIXES unsigned(*temporal_predictor_count)[2] = cm->counts.seg.pred; unsigned *no_pred_segcounts = cm->counts.seg.tree_total; unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred; #else unsigned temporal_predictor_count[PREDICTION_PROBS][2] = { { 0 } }; unsigned no_pred_segcounts[MAX_SEGMENTS] = { 0 }; unsigned t_unpred_seg_counts[MAX_SEGMENTS] = { 0 }; #endif vpx_prob no_pred_tree[SEG_TREE_PROBS]; vpx_prob t_pred_tree[SEG_TREE_PROBS]; vpx_prob t_nopred_prob[PREDICTION_PROBS]; #if CONFIG_MISC_FIXES (void)xd; #else // Set default state for the segment tree probabilities and the // temporal coding probabilities memset(segp->tree_probs, 255, sizeof(segp->tree_probs)); memset(segp->pred_probs, 255, sizeof(segp->pred_probs)); #endif // First of all generate stats regarding how well the last segment map // predicts this one for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) { TileInfo tile; MODE_INFO **mi_ptr; vp10_tile_init(&tile, cm, 0, tile_col); mi_ptr = cm->mi_grid_visible + tile.mi_col_start; for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * cm->mi_stride) { MODE_INFO **mi = mi_ptr; for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; mi_col += 8, mi += 8) count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row, mi_col, BLOCK_64X64); } } // Work out probability tree for coding segments without prediction // and the cost. calc_segtree_probs(no_pred_segcounts, no_pred_tree, segp->tree_probs); no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree); // Key frames cannot use temporal prediction if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) { // Work out probability tree for coding those segments not // predicted using the temporal method and the cost. calc_segtree_probs(t_unpred_seg_counts, t_pred_tree, segp->tree_probs); t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree); // Add in the cost of the signaling for each prediction context. for (i = 0; i < PREDICTION_PROBS; i++) { const int count0 = temporal_predictor_count[i][0]; const int count1 = temporal_predictor_count[i][1]; #if CONFIG_MISC_FIXES vp10_prob_diff_update_savings_search(temporal_predictor_count[i], segp->pred_probs[i], &t_nopred_prob[i], DIFF_UPDATE_PROB); #else t_nopred_prob[i] = get_binary_prob(count0, count1); #endif // Add in the predictor signaling cost t_pred_cost += count0 * vp10_cost_zero(t_nopred_prob[i]) + count1 * vp10_cost_one(t_nopred_prob[i]); } } // Now choose which coding method to use. if (t_pred_cost < no_pred_cost) { assert(!cm->error_resilient_mode); seg->temporal_update = 1; #if !CONFIG_MISC_FIXES memcpy(segp->tree_probs, t_pred_tree, sizeof(t_pred_tree)); memcpy(segp->pred_probs, t_nopred_prob, sizeof(t_nopred_prob)); #endif } else { seg->temporal_update = 0; #if !CONFIG_MISC_FIXES memcpy(segp->tree_probs, no_pred_tree, sizeof(no_pred_tree)); #endif } }