static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd, MODE_INFO **mi_8x8, vpx_writer *w) { const struct segmentation *const seg = &cm->seg; const MODE_INFO *const mi = mi_8x8[0]; const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; const BLOCK_SIZE bsize = mi->sb_type; if (seg->update_map) write_segment_id(w, seg, mi->segment_id); write_skip(cm, xd, mi->segment_id, mi, w); if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) write_selected_tx_size(cm, xd, w); if (bsize >= BLOCK_8X8) { write_intra_mode(w, mi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); } else { const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; int idx, idy; for (idy = 0; idy < 2; idy += num_4x4_h) { for (idx = 0; idx < 2; idx += num_4x4_w) { const int block = idy * 2 + idx; write_intra_mode(w, mi->bmi[block].as_mode, get_y_mode_probs(mi, above_mi, left_mi, block)); } } } write_intra_mode(w, mi->uv_mode, vp9_kf_uv_mode_prob[mi->mode]); }
static size_t channel_write_adb_hack(struct channel* c, size_t sz) { size_t nr_removed = 0; while (nr_removed < sz) { struct iovec iov[2]; char encbuf[4096]; char* enc; char* encend; unsigned state; ringbuf_readable_iov(c->rb, iov, sz - nr_removed); enc = encbuf; encend = enc + sizeof (encbuf); state = c->leftover_escape; for (int i = 0; i < ARRAYSIZE(iov); ++i) { const char* in = iov[i].iov_base; const char* inend = in + iov[i].iov_len; adb_encode(&state, &enc, encend, &in, inend); } // If we left a byte in the ringbuffer, don't actually write // its first half now (since we wrote it before), but pretend // we did. size_t skip = (c->leftover_escape != 0); ssize_t nr_written = write_skip(c->fdh->fd, encbuf, enc - encbuf, skip); if (nr_written < 0 && nr_removed == 0) die_errno("write"); if (nr_written < 0) break; size_t nr_encoded = 0; enc = encbuf; encend = enc + nr_written; state = c->leftover_escape; for (int i = 0; i < ARRAYSIZE(iov); ++i) { const char* in = iov[i].iov_base; const char* inend = in + iov[i].iov_len; adb_encode(&state, &enc, encend, &in, inend); nr_encoded += (in - (char*) iov[i].iov_base); } // If we wrote a partial encoded byte, leave the plain byte in // the ringbuf so that we know this channel still needs to // write. if (state != 0) { assert(nr_encoded > 0); nr_encoded -= 1; } ringbuf_note_removed(c->rb, nr_encoded); nr_removed += nr_encoded; c->leftover_escape = state; } return nr_removed; }
static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, vpx_writer *w) { VP9_COMMON *const cm = &cpi->common; const nmv_context *nmvc = &cm->fc->nmvc; const MACROBLOCK *const x = &cpi->td.mb; const MACROBLOCKD *const xd = &x->e_mbd; const struct segmentation *const seg = &cm->seg; const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; const PREDICTION_MODE mode = mi->mode; const int segment_id = mi->segment_id; const BLOCK_SIZE bsize = mi->sb_type; const int allow_hp = cm->allow_high_precision_mv; const int is_inter = is_inter_block(mi); const int is_compound = has_second_ref(mi); int skip, ref; if (seg->update_map) { if (seg->temporal_update) { const int pred_flag = mi->seg_id_predicted; vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); vpx_write(w, pred_flag, pred_prob); if (!pred_flag) write_segment_id(w, seg, segment_id); } else { write_segment_id(w, seg, segment_id); } } skip = write_skip(cm, xd, segment_id, mi, w); if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) vpx_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && !(is_inter && skip)) { write_selected_tx_size(cm, xd, w); } if (!is_inter) { if (bsize >= BLOCK_8X8) { write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]); } else { int idx, idy; const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; for (idy = 0; idy < 2; idy += num_4x4_h) { for (idx = 0; idx < 2; idx += num_4x4_w) { const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]); } } } write_intra_mode(w, mi->uv_mode, cm->fc->uv_mode_prob[mode]); } else { const int mode_ctx = mbmi_ext->mode_context[mi->ref_frame[0]]; const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx]; write_ref_frames(cm, xd, w); // If segment skip is not enabled code the mode. if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { if (bsize >= BLOCK_8X8) { write_inter_mode(w, mode, inter_probs); } } if (cm->interp_filter == SWITCHABLE) { const int ctx = vp9_get_pred_context_switchable_interp(xd); vp9_write_token(w, vp9_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx], &switchable_interp_encodings[mi->interp_filter]); ++cpi->interp_filter_selected[0][mi->interp_filter]; } else { assert(mi->interp_filter == cm->interp_filter); } if (bsize < BLOCK_8X8) { const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; int idx, idy; for (idy = 0; idy < 2; idy += num_4x4_h) { for (idx = 0; idx < 2; idx += num_4x4_w) { const int j = idy * 2 + idx; const PREDICTION_MODE b_mode = mi->bmi[j].as_mode; write_inter_mode(w, b_mode, inter_probs); if (b_mode == NEWMV) { for (ref = 0; ref < 1 + is_compound; ++ref) vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, &mbmi_ext->ref_mvs[mi->ref_frame[ref]][0].as_mv, nmvc, allow_hp); } } } } else { if (mode == NEWMV) { for (ref = 0; ref < 1 + is_compound; ++ref) vp9_encode_mv(cpi, w, &mi->mv[ref].as_mv, &mbmi_ext->ref_mvs[mi->ref_frame[ref]][0].as_mv, nmvc, allow_hp); } } } }
int shared_blk::write( void* p , int sz ) { int write_sz = std::min( sz , static_cast<int>(space())); memcpy( write_ptr() , p , write_sz ); write_skip( write_sz ); return write_sz; }