static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { VP9_COMMON *const cm = &cpi->common; vpx_writer residual_bc; int tile_row, tile_col; TOKENEXTRA *tok_end; size_t total_size = 0; const int tile_cols = 1 << cm->log2_tile_cols; const int tile_rows = 1 << cm->log2_tile_rows; memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols)); for (tile_row = 0; tile_row < tile_rows; tile_row++) { for (tile_col = 0; tile_col < tile_cols; tile_col++) { int tile_idx = tile_row * tile_cols + tile_col; TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col]; tok_end = cpi->tile_tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) vpx_start_encode(&residual_bc, data_ptr + total_size + 4); else vpx_start_encode(&residual_bc, data_ptr + total_size); write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, &residual_bc, &tok, tok_end); assert(tok == tok_end); vpx_stop_encode(&residual_bc); if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { // size of this tile mem_put_be32(data_ptr + total_size, residual_bc.pos); total_size += 4; } total_size += residual_bc.pos; } } return total_size; }
void encode_with_adaptive_probability() { memcpy(tmp, uncompressed, sizeof(uncompressed)); (*transform)(tmp); // this currently is a no-op but it may be helpful for the EXERCISE DynProb encode; vpx_writer wri ={0}; vpx_start_encode(&wri, tmp); for (size_t i = 0; i < sizeof(uncompressed); ++i) { for(int bit = 1; bit < 256; bit <<= 1) { bool cur_bit = !!(tmp[i] & bit); vpx_write(&wri, cur_bit, encode.prob); encode.record_bit(cur_bit); // <-- this a new line for lesson1 that lets the encoder adapt to data } } vpx_stop_encode(&wri); printf("Buffer encoded with final prob(0) = %.2f results in %d size (%.2f%%)\n", encode.prob / 255., wri.pos, 100 * wri.pos / float(sizeof(uncompressed))); DynProb decode; vpx_reader rea={0}; vpx_reader_init(&rea, wri.buffer, wri.pos); memset(roundtrip, 0, sizeof(roundtrip)); for (size_t i = 0; i < sizeof(roundtrip); ++i) { for(int bit = 1; bit < 256; bit <<= 1) { if (vpx_read(&rea, decode.prob)) { roundtrip[i] |= bit; decode.record_bit(true); // <-- this a new line for lesson1 } else { decode.record_bit(false); // <-- this a new line for lesson1 } } } assert(vpx_reader_has_error(&rea) == 0); (*untransform)(uncompressed); // this is, again a no-op, but may be helpful for the EXERCISE assert(memcmp(uncompressed, roundtrip, sizeof(uncompressed)) == 0); }
static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; FRAME_CONTEXT *const fc = cm->fc; FRAME_COUNTS *counts = cpi->td.counts; vpx_writer header_bc; vpx_start_encode(&header_bc, data); if (xd->lossless) cm->tx_mode = ONLY_4X4; else encode_txfm_probs(cm, &header_bc, counts); update_coef_probs(cpi, &header_bc); update_skip_probs(cm, &header_bc, counts); if (!frame_is_intra_only(cm)) { int i; for (i = 0; i < INTER_MODE_CONTEXTS; ++i) prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i], counts->inter_mode[i], INTER_MODES, &header_bc); if (cm->interp_filter == SWITCHABLE) update_switchable_interp_probs(cm, &header_bc, counts); for (i = 0; i < INTRA_INTER_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], counts->intra_inter[i]); if (cpi->allow_comp_inter_inter) { const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; vpx_write_bit(&header_bc, use_compound_pred); if (use_compound_pred) { vpx_write_bit(&header_bc, use_hybrid_pred); if (use_hybrid_pred) for (i = 0; i < COMP_INTER_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], counts->comp_inter[i]); } } if (cm->reference_mode != COMPOUND_REFERENCE) { for (i = 0; i < REF_CONTEXTS; i++) { vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], counts->single_ref[i][0]); vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], counts->single_ref[i][1]); } } if (cm->reference_mode != SINGLE_REFERENCE) for (i = 0; i < REF_CONTEXTS; i++) vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], counts->comp_ref[i]); for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i], counts->y_mode[i], INTRA_MODES, &header_bc); for (i = 0; i < PARTITION_CONTEXTS; ++i) prob_diff_update(vp9_partition_tree, fc->partition_prob[i], counts->partition[i], PARTITION_TYPES, &header_bc); vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc, &counts->mv); } vpx_stop_encode(&header_bc); assert(header_bc.pos <= 0xffff); return header_bc.pos; }