void vp9_setup_key_frame(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; // Setup for Key frame: vp9_default_coef_probs(& cpi->common); vp9_kf_default_bmode_probs(cpi->common.kf_bmode_prob); vp9_init_mbmode_probs(& cpi->common); vp9_default_bmode_probs(cm->fc.bmode_prob); if(cm->last_frame_seg_map) vpx_memset(cm->last_frame_seg_map, 0, (cm->mb_rows * cm->mb_cols)); vp9_init_mv_probs(& cpi->common); // cpi->common.filter_level = 0; // Reset every key frame. cpi->common.filter_level = cpi->common.base_qindex * 3 / 8; // interval before next GF cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; cpi->common.refresh_golden_frame = TRUE; cpi->common.refresh_alt_ref_frame = TRUE; vp9_init_mode_contexts(&cpi->common); vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc)); vpx_memcpy(&cpi->common.lfc_a, &cpi->common.fc, sizeof(cpi->common.fc)); vpx_memset(cm->prev_mip, 0, (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO)); vpx_memset(cm->mip, 0, (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO)); vp9_update_mode_info_border(cm, cm->mip); vp9_update_mode_info_in_image(cm, cm->mi); }
void vpx_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc) { int row; const uint8_t *src = src_ybc->y_buffer; uint8_t *dst = dst_ybc->y_buffer; #if CONFIG_VP9 && CONFIG_VP9_HIGHBITDEPTH if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) { const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst); for (row = 0; row < src_ybc->y_height; ++row) { vpx_memcpy(dst16, src16, src_ybc->y_width * sizeof(uint16_t)); src16 += src_ybc->y_stride; dst16 += dst_ybc->y_stride; } return; } #endif for (row = 0; row < src_ybc->y_height; ++row) { vpx_memcpy(dst, src, src_ybc->y_width); src += src_ybc->y_stride; dst += dst_ybc->y_stride; } }
static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) { int i; unsigned char *src_ptr1, *src_ptr2; unsigned char *dest_ptr2; unsigned int Border; int plane_stride; int plane_height; /***********/ /* Y Plane */ /***********/ Border = ybf->border; plane_stride = ybf->y_stride; plane_height = ybf->y_height; src_ptr1 = ybf->y_buffer - Border; src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; dest_ptr2 = src_ptr2 + plane_stride; for (i = 0; i < (int)Border; i++) { vpx_memcpy(dest_ptr2, src_ptr2, plane_stride); dest_ptr2 += plane_stride; } /***********/ /* U Plane */ /***********/ plane_stride = ybf->uv_stride; plane_height = ybf->uv_height; Border /= 2; src_ptr1 = ybf->u_buffer - Border; src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; dest_ptr2 = src_ptr2 + plane_stride; for (i = 0; i < (int)(Border); i++) { vpx_memcpy(dest_ptr2, src_ptr2, plane_stride); dest_ptr2 += plane_stride; } /***********/ /* V Plane */ /***********/ src_ptr1 = ybf->v_buffer - Border; src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; dest_ptr2 = src_ptr2 + plane_stride; for (i = 0; i < (int)(Border); i++) { vpx_memcpy(dest_ptr2, src_ptr2, plane_stride); dest_ptr2 += plane_stride; } }
void vp8_yv12_extend_frame_borders_yonly(YV12_BUFFER_CONFIG *ybf) { int i; unsigned char *src_ptr1, *src_ptr2; unsigned char *dest_ptr1, *dest_ptr2; unsigned int Border; int plane_stride; int plane_height; int plane_width; /***********/ /* Y Plane */ /***********/ Border = ybf->border; plane_stride = ybf->y_stride; plane_height = ybf->y_height; plane_width = ybf->y_width; // copy the left and right most columns out src_ptr1 = ybf->y_buffer; src_ptr2 = src_ptr1 + plane_width - 1; dest_ptr1 = src_ptr1 - Border; dest_ptr2 = src_ptr2 + 1; for (i = 0; i < plane_height; i++) { vpx_memset(dest_ptr1, src_ptr1[0], Border); vpx_memset(dest_ptr2, src_ptr2[0], Border); src_ptr1 += plane_stride; src_ptr2 += plane_stride; dest_ptr1 += plane_stride; dest_ptr2 += plane_stride; } // Now copy the top and bottom source lines into each line of the respective borders src_ptr1 = ybf->y_buffer - Border; src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; dest_ptr1 = src_ptr1 - (Border * plane_stride); dest_ptr2 = src_ptr2 + plane_stride; for (i = 0; i < (int)Border; i++) { vpx_memcpy(dest_ptr1, src_ptr1, plane_stride); vpx_memcpy(dest_ptr2, src_ptr2, plane_stride); dest_ptr1 += plane_stride; dest_ptr2 += plane_stride; } plane_stride /= 2; plane_height /= 2; plane_width /= 2; Border /= 2; }
void vp9_setup_inter_frame(VP9_COMP *cpi) { if (cpi->common.refresh_alt_ref_frame) { vpx_memcpy(&cpi->common.fc, &cpi->common.lfc_a, sizeof(cpi->common.fc)); } else { vpx_memcpy(&cpi->common.fc, &cpi->common.lfc, sizeof(cpi->common.fc)); } }
static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) { int i; unsigned char *src_ptr1; unsigned char *dest_ptr1; unsigned int Border; int plane_stride; /***********/ /* Y Plane */ /***********/ Border = ybf->border; plane_stride = ybf->y_stride; src_ptr1 = ybf->y_buffer - Border; dest_ptr1 = src_ptr1 - (Border * plane_stride); for (i = 0; i < (int)Border; i++) { vpx_memcpy(dest_ptr1, src_ptr1, plane_stride); dest_ptr1 += plane_stride; } /***********/ /* U Plane */ /***********/ plane_stride = ybf->uv_stride; Border /= 2; src_ptr1 = ybf->u_buffer - Border; dest_ptr1 = src_ptr1 - (Border * plane_stride); for (i = 0; i < (int)(Border); i++) { vpx_memcpy(dest_ptr1, src_ptr1, plane_stride); dest_ptr1 += plane_stride; } /***********/ /* V Plane */ /***********/ src_ptr1 = ybf->v_buffer - Border; dest_ptr1 = src_ptr1 - (Border * plane_stride); for (i = 0; i < (int)(Border); i++) { vpx_memcpy(dest_ptr1, src_ptr1, plane_stride); dest_ptr1 += plane_stride; } }
/* Generate the list of filtering values per priority level*/ void vp8_loop_filter_build_filter_offsets(cl_int *filters, int level, cl_int *filter_levels, cl_int *dc_diffs, cl_int *mb_rows, cl_int *mb_cols ) { int offset = block_offsets[level]*4; int num_blocks = priority_num_blocks[level]; if (num_blocks == 0) return; vpx_memcpy(&filters[offset], filter_levels, num_blocks*sizeof(cl_int)); vpx_memcpy(&filters[offset+DC_DIFFS_LOCATION*num_blocks], dc_diffs, num_blocks*sizeof(cl_int)); vpx_memcpy(&filters[offset+COLS_LOCATION*num_blocks], mb_cols, num_blocks*sizeof(cl_int)); vpx_memcpy(&filters[offset+ROWS_LOCATION*num_blocks], mb_rows, num_blocks*sizeof(cl_int)); }
static void extend_plane(uint8_t *s, /* source */ int sp, /* source pitch */ int w, /* width */ int h, /* height */ int et, /* extend top border */ int el, /* extend left border */ int eb, /* extend bottom border */ int er) { /* extend right border */ int i; uint8_t *src_ptr1, *src_ptr2; uint8_t *dest_ptr1, *dest_ptr2; int linesize; /* copy the left and right most columns out */ src_ptr1 = s; src_ptr2 = s + w - 1; dest_ptr1 = s - el; dest_ptr2 = s + w; for (i = 0; i < h; i++) { vpx_memset(dest_ptr1, src_ptr1[0], el); vpx_memset(dest_ptr2, src_ptr2[0], er); src_ptr1 += sp; src_ptr2 += sp; dest_ptr1 += sp; dest_ptr2 += sp; } /* Now copy the top and bottom lines into each line of the respective * borders */ src_ptr1 = s - el; src_ptr2 = s + sp * (h - 1) - el; dest_ptr1 = s + sp * (-et) - el; dest_ptr2 = s + sp * (h) - el; linesize = el + er + w; for (i = 0; i < et; i++) { vpx_memcpy(dest_ptr1, src_ptr1, linesize); dest_ptr1 += sp; } for (i = 0; i < eb; i++) { vpx_memcpy(dest_ptr2, src_ptr2, linesize); dest_ptr2 += sp; } }
void vp9_save_coding_context(VP9_COMP *cpi) { CODING_CONTEXT *const cc = &cpi->coding_context; VP9_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &cpi->mb.e_mbd; // Stores a snapshot of key state variables which can subsequently be // restored with a call to vp9_restore_coding_context. These functions are // intended for use in a re-code loop in vp9_compress_frame where the // quantizer value is adjusted between loop iterations. cc->nmvc = cm->fc.nmvc; vp9_copy(cc->nmvjointcost, cpi->mb.nmvjointcost); vp9_copy(cc->nmvcosts, cpi->mb.nmvcosts); vp9_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp); vp9_copy(cc->vp9_mode_contexts, cm->fc.vp9_mode_contexts); vp9_copy(cc->ymode_prob, cm->fc.ymode_prob); #if CONFIG_SUPERBLOCKS vp9_copy(cc->sb_ymode_prob, cm->fc.sb_ymode_prob); #endif vp9_copy(cc->bmode_prob, cm->fc.bmode_prob); vp9_copy(cc->uv_mode_prob, cm->fc.uv_mode_prob); vp9_copy(cc->i8x8_mode_prob, cm->fc.i8x8_mode_prob); vp9_copy(cc->sub_mv_ref_prob, cm->fc.sub_mv_ref_prob); vp9_copy(cc->mbsplit_prob, cm->fc.mbsplit_prob); // Stats #ifdef MODE_STATS vp9_copy(cc->y_modes, y_modes); vp9_copy(cc->uv_modes, uv_modes); vp9_copy(cc->b_modes, b_modes); vp9_copy(cc->inter_y_modes, inter_y_modes); vp9_copy(cc->inter_uv_modes, inter_uv_modes); vp9_copy(cc->inter_b_modes, inter_b_modes); #endif vp9_copy(cc->segment_pred_probs, cm->segment_pred_probs); vp9_copy(cc->ref_pred_probs_update, cpi->ref_pred_probs_update); vp9_copy(cc->ref_pred_probs, cm->ref_pred_probs); vp9_copy(cc->prob_comppred, cm->prob_comppred); vpx_memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map, (cm->mb_rows * cm->mb_cols)); vp9_copy(cc->last_ref_lf_deltas, xd->last_ref_lf_deltas); vp9_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas); vp9_copy(cc->coef_probs, cm->fc.coef_probs); vp9_copy(cc->hybrid_coef_probs, cm->fc.hybrid_coef_probs); vp9_copy(cc->coef_probs_8x8, cm->fc.coef_probs_8x8); vp9_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8); vp9_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16); vp9_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16); vp9_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob); #if CONFIG_COMP_INTERINTRA_PRED cc->interintra_prob = cm->fc.interintra_prob; #endif }
void vp9_restore_coding_context(VP9_COMP *cpi) { CODING_CONTEXT *const cc = &cpi->coding_context; VP9_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &cpi->mb.e_mbd; // Restore key state variables to the snapshot state stored in the // previous call to vp9_save_coding_context. cm->fc.nmvc = cc->nmvc; vp9_copy(cpi->mb.nmvjointcost, cc->nmvjointcost); vp9_copy(cpi->mb.nmvcosts, cc->nmvcosts); vp9_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp); vp9_copy(cm->fc.vp9_mode_contexts, cc->vp9_mode_contexts); vp9_copy(cm->fc.ymode_prob, cc->ymode_prob); #if CONFIG_SUPERBLOCKS vp9_copy(cm->fc.sb_ymode_prob, cc->sb_ymode_prob); #endif vp9_copy(cm->fc.bmode_prob, cc->bmode_prob); vp9_copy(cm->fc.i8x8_mode_prob, cc->i8x8_mode_prob); vp9_copy(cm->fc.uv_mode_prob, cc->uv_mode_prob); vp9_copy(cm->fc.sub_mv_ref_prob, cc->sub_mv_ref_prob); vp9_copy(cm->fc.mbsplit_prob, cc->mbsplit_prob); // Stats #ifdef MODE_STATS vp9_copy(y_modes, cc->y_modes); vp9_copy(uv_modes, cc->uv_modes); vp9_copy(b_modes, cc->b_modes); vp9_copy(inter_y_modes, cc->inter_y_modes); vp9_copy(inter_uv_modes, cc->inter_uv_modes); vp9_copy(inter_b_modes, cc->inter_b_modes); #endif vp9_copy(cm->segment_pred_probs, cc->segment_pred_probs); vp9_copy(cpi->ref_pred_probs_update, cc->ref_pred_probs_update); vp9_copy(cm->ref_pred_probs, cc->ref_pred_probs); vp9_copy(cm->prob_comppred, cc->prob_comppred); vpx_memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy, (cm->mb_rows * cm->mb_cols)); vp9_copy(xd->last_ref_lf_deltas, cc->last_ref_lf_deltas); vp9_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas); vp9_copy(cm->fc.coef_probs, cc->coef_probs); vp9_copy(cm->fc.hybrid_coef_probs, cc->hybrid_coef_probs); vp9_copy(cm->fc.coef_probs_8x8, cc->coef_probs_8x8); vp9_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8); vp9_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16); vp9_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16); vp9_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob); #if CONFIG_COMP_INTERINTRA_PRED cm->fc.interintra_prob = cc->interintra_prob; #endif }
void vp9_setup_key_frame(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; // Setup for Key frame: vp9_default_coef_probs(& cpi->common); vp9_kf_default_bmode_probs(cpi->common.kf_bmode_prob); vp9_init_mbmode_probs(& cpi->common); vp9_default_bmode_probs(cm->fc.bmode_prob); if(cm->last_frame_seg_map) vpx_memset(cm->last_frame_seg_map, 0, (cm->mb_rows * cm->mb_cols)); vp9_init_mv_probs(& cpi->common); // cpi->common.filter_level = 0; // Reset every key frame. cpi->common.filter_level = cpi->common.base_qindex * 3 / 8; // interval before next GF cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; cpi->common.refresh_golden_frame = TRUE; cpi->common.refresh_alt_ref_frame = TRUE; vp9_init_mode_contexts(&cpi->common); vpx_memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc)); vpx_memcpy(&cpi->common.lfc_a, &cpi->common.fc, sizeof(cpi->common.fc)); vpx_memset(cm->prev_mip, 0, (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO)); vpx_memset(cm->mip, 0, (cm->mb_cols + 1) * (cm->mb_rows + 1)* sizeof(MODE_INFO)); vp9_update_mode_info_border(cm, cm->mip); vp9_update_mode_info_in_image(cm, cm->mi); #if CONFIG_NEW_MVREF if (1) { MACROBLOCKD *xd = &cpi->mb.e_mbd; // Defaults probabilities for encoding the MV ref id signal vpx_memset(xd->mb_mv_ref_probs, VP9_DEFAULT_MV_REF_PROB, sizeof(xd->mb_mv_ref_probs)); } #endif }
/**************************************************************************** * * ROUTINE : vp8_yv12_copy_frame * * INPUTS : * * OUTPUTS : None. * * RETURNS : void * * FUNCTION : Copies the source image into the destination image and * updates the destination's UMV borders. * * SPECIAL NOTES : The frames are assumed to be identical in size. * ****************************************************************************/ void vp8_yv12_copy_frame_c(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc) { int row; unsigned char *source, *dest; #if 0 /* These assertions are valid in the codec, but the libvpx-tester uses * this code slightly differently. */ assert(src_ybc->y_width == dst_ybc->y_width); assert(src_ybc->y_height == dst_ybc->y_height); #endif source = src_ybc->y_buffer; dest = dst_ybc->y_buffer; for (row = 0; row < src_ybc->y_height; row++) { vpx_memcpy(dest, source, src_ybc->y_width); source += src_ybc->y_stride; dest += dst_ybc->y_stride; } source = src_ybc->u_buffer; dest = dst_ybc->u_buffer; for (row = 0; row < src_ybc->uv_height; row++) { vpx_memcpy(dest, source, src_ybc->uv_width); source += src_ybc->uv_stride; dest += dst_ybc->uv_stride; } source = src_ybc->v_buffer; dest = dst_ybc->v_buffer; for (row = 0; row < src_ybc->uv_height; row++) { vpx_memcpy(dest, source, src_ybc->uv_width); source += src_ybc->uv_stride; dest += dst_ybc->uv_stride; } vp8_yv12_extend_frame_borders_c(dst_ybc); }
static void extend_plane_high(uint8_t *const src8, int src_stride, int width, int height, int extend_top, int extend_left, int extend_bottom, int extend_right) { int i; const int linesize = extend_left + extend_right + width; uint16_t *src = CONVERT_TO_SHORTPTR(src8); /* copy the left and right most columns out */ uint16_t *src_ptr1 = src; uint16_t *src_ptr2 = src + width - 1; uint16_t *dst_ptr1 = src - extend_left; uint16_t *dst_ptr2 = src + width; for (i = 0; i < height; ++i) { vpx_memset16(dst_ptr1, src_ptr1[0], extend_left); vpx_memset16(dst_ptr2, src_ptr2[0], extend_right); src_ptr1 += src_stride; src_ptr2 += src_stride; dst_ptr1 += src_stride; dst_ptr2 += src_stride; } /* Now copy the top and bottom lines into each line of the respective * borders */ src_ptr1 = src - extend_left; src_ptr2 = src + src_stride * (height - 1) - extend_left; dst_ptr1 = src + src_stride * -extend_top - extend_left; dst_ptr2 = src + src_stride * height - extend_left; for (i = 0; i < extend_top; ++i) { vpx_memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t)); dst_ptr1 += src_stride; } for (i = 0; i < extend_bottom; ++i) { vpx_memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t)); dst_ptr2 += src_stride; } }
void vpx_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc) { int row; const uint8_t *src = src_ybc->y_buffer; uint8_t *dst = dst_ybc->y_buffer; for (row = 0; row < src_ybc->y_height; ++row) { vpx_memcpy(dst, src, src_ybc->y_width); src += src_ybc->y_stride; dst += dst_ybc->y_stride; } }
void vp9_set_segmentation_map(VP9_PTR ptr, unsigned char *segmentation_map) { VP9_COMP *cpi = (VP9_COMP *)(ptr); // Copy in the new segmentation map vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mi_rows * cpi->common.mi_cols)); // Signal that the map should be updated. cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; }
// Copies the source image into the destination image and updates the // destination's UMV borders. // Note: The frames are assumed to be identical in size. void vp8_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc) { int row; const uint8_t *src = src_ybc->y_buffer; uint8_t *dst = dst_ybc->y_buffer; #if 0 /* These assertions are valid in the codec, but the libvpx-tester uses * this code slightly differently. */ assert(src_ybc->y_width == dst_ybc->y_width); assert(src_ybc->y_height == dst_ybc->y_height); #endif for (row = 0; row < src_ybc->y_height; ++row) { vpx_memcpy(dst, src, src_ybc->y_width); src += src_ybc->y_stride; dst += dst_ybc->y_stride; } src = src_ybc->u_buffer; dst = dst_ybc->u_buffer; for (row = 0; row < src_ybc->uv_height; ++row) { vpx_memcpy(dst, src, src_ybc->uv_width); src += src_ybc->uv_stride; dst += dst_ybc->uv_stride; } src = src_ybc->v_buffer; dst = dst_ybc->v_buffer; for (row = 0; row < src_ybc->uv_height; ++row) { vpx_memcpy(dst, src, src_ybc->uv_width); src += src_ybc->uv_stride; dst += dst_ybc->uv_stride; } vp8_yv12_extend_frame_borders_c(dst_ybc); }
void vp9_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, int w, int h) { int r; for (r = h; r > 0; --r) { vpx_memcpy(dst, src, w); src += src_stride; dst += dst_stride; } }
/**************************************************************************** * * ROUTINE : vp8_yv12_copy_frame * * INPUTS : * * OUTPUTS : None. * * RETURNS : void * * FUNCTION : Copies the source image into the destination image and * updates the destination's UMV borders. * * SPECIAL NOTES : The frames are assumed to be identical in size. * ****************************************************************************/ void vp8_yv12_copy_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc) { int row; unsigned char *source, *dest; source = src_ybc->y_buffer; dest = dst_ybc->y_buffer; for (row = 0; row < src_ybc->y_height; row++) { vpx_memcpy(dest, source, src_ybc->y_width); source += src_ybc->y_stride; dest += dst_ybc->y_stride; } source = src_ybc->u_buffer; dest = dst_ybc->u_buffer; for (row = 0; row < src_ybc->uv_height; row++) { vpx_memcpy(dest, source, src_ybc->uv_width); source += src_ybc->uv_stride; dest += dst_ybc->uv_stride; } source = src_ybc->v_buffer; dest = dst_ybc->v_buffer; for (row = 0; row < src_ybc->uv_height; row++) { vpx_memcpy(dest, source, src_ybc->uv_width); source += src_ybc->uv_stride; dest += dst_ybc->uv_stride; } vp8_yv12_extend_frame_borders_ptr(dst_ybc); }
void vp9_set_segment_data(VP9_PTR ptr, signed char *feature_data, unsigned char abs_delta) { VP9_COMP *cpi = (VP9_COMP *)(ptr); cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta; vpx_memcpy(cpi->mb.e_mbd.segment_feature_data, feature_data, sizeof(cpi->mb.e_mbd.segment_feature_data)); // TBD ?? Set the feature mask // vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0, // sizeof(cpi->mb.e_mbd.segment_feature_mask)); }
static void extend_plane(uint8_t *const src, int src_stride, int width, int height, int extend_top, int extend_left, int extend_bottom, int extend_right) { int i; const int linesize = extend_left + extend_right + width; uint8_t *src_ptr1 = src; uint8_t *src_ptr2 = src + width - 1; uint8_t *dst_ptr1 = src - extend_left; uint8_t *dst_ptr2 = src + width; for (i = 0; i < height; ++i) { vpx_memset(dst_ptr1, src_ptr1[0], extend_left); vpx_memset(dst_ptr2, src_ptr2[0], extend_right); src_ptr1 += src_stride; src_ptr2 += src_stride; dst_ptr1 += src_stride; dst_ptr2 += src_stride; } src_ptr1 = src - extend_left; src_ptr2 = src + src_stride * (height - 1) - extend_left; dst_ptr1 = src + src_stride * -extend_top - extend_left; dst_ptr2 = src + src_stride * height - extend_left; for (i = 0; i < extend_top; ++i) { vpx_memcpy(dst_ptr1, src_ptr1, linesize); dst_ptr1 += src_stride; } for (i = 0; i < extend_bottom; ++i) { vpx_memcpy(dst_ptr2, src_ptr2, linesize); dst_ptr2 += src_stride; } }
void vp8_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc) { int row; const uint8_t *src = src_ybc->y_buffer; uint8_t *dst = dst_ybc->y_buffer; #if 0 assert(src_ybc->y_width == dst_ybc->y_width); assert(src_ybc->y_height == dst_ybc->y_height); #endif for (row = 0; row < src_ybc->y_height; ++row) { vpx_memcpy(dst, src, src_ybc->y_width); src += src_ybc->y_stride; dst += dst_ybc->y_stride; } src = src_ybc->u_buffer; dst = dst_ybc->u_buffer; for (row = 0; row < src_ybc->uv_height; ++row) { vpx_memcpy(dst, src, src_ybc->uv_width); src += src_ybc->uv_stride; dst += dst_ybc->uv_stride; } src = src_ybc->v_buffer; dst = dst_ybc->v_buffer; for (row = 0; row < src_ybc->uv_height; ++row) { vpx_memcpy(dst, src, src_ybc->uv_width); src += src_ybc->uv_stride; dst += dst_ybc->uv_stride; } vp8_yv12_extend_frame_borders_c(dst_ybc); }
void vp8_yv12_copy_y_c(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc) { int row; unsigned char *source, *dest; source = src_ybc->y_buffer; dest = dst_ybc->y_buffer; for (row = 0; row < src_ybc->y_height; row++) { vpx_memcpy(dest, source, src_ybc->y_width); source += src_ybc->y_stride; dest += dst_ybc->y_stride; } }
static void extend_model_to_full_distribution(vp9_prob p, vp9_prob *tree_probs) { const int l = (p - 1) / 2; const vp9_prob (*model)[MODEL_NODES] = modelcoefprobs_pareto8; if (p & 1) { vpx_memcpy(tree_probs + UNCONSTRAINED_NODES, model[l], MODEL_NODES * sizeof(vp9_prob)); } else { // interpolate int i; for (i = UNCONSTRAINED_NODES; i < ENTROPY_NODES; ++i) tree_probs[i] = (model[l][i - UNCONSTRAINED_NODES] + model[l + 1][i - UNCONSTRAINED_NODES]) >> 1; } }
void vp9_high_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, int w, int h, int bd) { int r; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); (void)filter_x; (void)filter_y; (void)filter_x_stride; (void)filter_y_stride; (void)bd; for (r = h; r > 0; --r) { vpx_memcpy(dst, src, w * sizeof(uint16_t)); src += src_stride; dst += dst_stride; } }
void vp9_restore_coding_context(VP9_COMP *cpi) { CODING_CONTEXT *const cc = &cpi->coding_context; VP9_COMMON *cm = &cpi->common; // Restore key state variables to the snapshot state stored in the // previous call to vp9_save_coding_context. vp9_copy(cpi->mb.nmvjointcost, cc->nmvjointcost); vp9_copy(cpi->mb.nmvcosts, cc->nmvcosts); vp9_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp); vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs); vpx_memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy, (cm->mi_rows * cm->mi_cols)); vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas); vp9_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas); cm->fc = cc->fc; }
void vp9_save_coding_context(VP9_COMP *cpi) { CODING_CONTEXT *const cc = &cpi->coding_context; VP9_COMMON *cm = &cpi->common; // Stores a snapshot of key state variables which can subsequently be // restored with a call to vp9_restore_coding_context. These functions are // intended for use in a re-code loop in vp9_compress_frame where the // quantizer value is adjusted between loop iterations. vp9_copy(cc->nmvjointcost, cpi->mb.nmvjointcost); vp9_copy(cc->nmvcosts, cpi->mb.nmvcosts); vp9_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp); vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs); vpx_memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols)); vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas); vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas); cc->fc = cm->fc; }
static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) { MACROBLOCK *x = mbsrc; MACROBLOCK *z = mbdst; int i; z->ss = x->ss; z->ss_count = x->ss_count; z->searches_per_step = x->searches_per_step; z->errorperbit = x->errorperbit; z->sadperbit16 = x->sadperbit16; z->sadperbit4 = x->sadperbit4; /* z->mv_col_min = x->mv_col_min; z->mv_col_max = x->mv_col_max; z->mv_row_min = x->mv_row_min; z->mv_row_max = x->mv_row_max; */ z->short_fdct4x4 = x->short_fdct4x4; z->short_fdct8x4 = x->short_fdct8x4; z->short_walsh4x4 = x->short_walsh4x4; z->quantize_b = x->quantize_b; z->quantize_b_pair = x->quantize_b_pair; z->optimize = x->optimize; /* z->mvc = x->mvc; z->src.y_buffer = x->src.y_buffer; z->src.u_buffer = x->src.u_buffer; z->src.v_buffer = x->src.v_buffer; */ z->mvcost[0] = x->mvcost[0]; z->mvcost[1] = x->mvcost[1]; z->mvsadcost[0] = x->mvsadcost[0]; z->mvsadcost[1] = x->mvsadcost[1]; z->token_costs = x->token_costs; z->inter_bmode_costs = x->inter_bmode_costs; z->mbmode_cost = x->mbmode_cost; z->intra_uv_mode_cost = x->intra_uv_mode_cost; z->bmode_costs = x->bmode_costs; for (i = 0; i < 25; i++) { z->block[i].quant = x->block[i].quant; z->block[i].quant_fast = x->block[i].quant_fast; z->block[i].quant_shift = x->block[i].quant_shift; z->block[i].zbin = x->block[i].zbin; z->block[i].zrun_zbin_boost = x->block[i].zrun_zbin_boost; z->block[i].round = x->block[i].round; z->block[i].src_stride = x->block[i].src_stride; } z->q_index = x->q_index; z->act_zbin_adj = x->act_zbin_adj; z->last_act_zbin_adj = x->last_act_zbin_adj; { MACROBLOCKD *xd = &x->e_mbd; MACROBLOCKD *zd = &z->e_mbd; /* zd->mode_info_context = xd->mode_info_context; zd->mode_info = xd->mode_info; zd->mode_info_stride = xd->mode_info_stride; zd->frame_type = xd->frame_type; zd->up_available = xd->up_available ; zd->left_available = xd->left_available; zd->left_context = xd->left_context; zd->last_frame_dc = xd->last_frame_dc; zd->last_frame_dccons = xd->last_frame_dccons; zd->gold_frame_dc = xd->gold_frame_dc; zd->gold_frame_dccons = xd->gold_frame_dccons; zd->mb_to_left_edge = xd->mb_to_left_edge; zd->mb_to_right_edge = xd->mb_to_right_edge; zd->mb_to_top_edge = xd->mb_to_top_edge ; zd->mb_to_bottom_edge = xd->mb_to_bottom_edge; zd->gf_active_ptr = xd->gf_active_ptr; zd->frames_since_golden = xd->frames_since_golden; zd->frames_till_alt_ref_frame = xd->frames_till_alt_ref_frame; */ zd->subpixel_predict = xd->subpixel_predict; zd->subpixel_predict8x4 = xd->subpixel_predict8x4; zd->subpixel_predict8x8 = xd->subpixel_predict8x8; zd->subpixel_predict16x16 = xd->subpixel_predict16x16; zd->segmentation_enabled = xd->segmentation_enabled; zd->mb_segement_abs_delta = xd->mb_segement_abs_delta; vpx_memcpy(zd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data)); vpx_memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc)); vpx_memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); vpx_memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); vpx_memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); #if 1 /*TODO: Remove dequant from BLOCKD. This is a temporary solution until * the quantizer code uses a passed in pointer to the dequant constants. * This will also require modifications to the x86 and neon assembly. * */ for (i = 0; i < 16; i++) zd->block[i].dequant = zd->dequant_y1; for (i = 16; i < 24; i++) zd->block[i].dequant = zd->dequant_uv; zd->block[24].dequant = zd->dequant_y2; #endif } }
void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) { ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context; ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context; int plane_type; int b; TOKENEXTRA *start = *t; TOKENEXTRA *tp = *t; x->mode_info_context->mbmi.dc_diff = 1; #if 0 if (x->mbmi.force_no_skip) { x->mbmi.mb_skip_coeff = 1; //reset for next_mb. x->mbmi.force_no_skip = 0; } #endif #if 1 x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(x); if (x->mode_info_context->mbmi.mb_skip_coeff) { cpi->skip_true_count++; if (!cpi->common.mb_no_coeff_skip) vp8_stuff_mb(cpi, x, t) ; else { vp8_fix_contexts(x); } if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV) x->mode_info_context->mbmi.dc_diff = 0; else x->mode_info_context->mbmi.dc_diff = 1; return; } cpi->skip_false_count++; #endif #if 0 vpx_memcpy(cpi->coef_counts_backup, cpi->coef_counts, sizeof(cpi->coef_counts)); #endif if (x->mode_info_context->mbmi.mode == B_PRED || x->mode_info_context->mbmi.mode == SPLITMV) { plane_type = 3; } else { tokenize2nd_order_b(x->block + 24, t, 1, x->frame_type, A + vp8_block2above[24], L + vp8_block2left[24], cpi); plane_type = 0; } for (b = 0; b < 16; b++) tokenize1st_order_b(x->block + b, t, plane_type, x->frame_type, A + vp8_block2above[b], L + vp8_block2left[b], cpi); for (b = 16; b < 24; b++) tokenize1st_order_b(x->block + b, t, 2, x->frame_type, A + vp8_block2above[b], L + vp8_block2left[b], cpi); #if 0 if (cpi->common.mb_no_coeff_skip) { int skip = 1; while ((tp != *t) && skip) { skip = (skip && (tp->Token == DCT_EOB_TOKEN)); tp ++; } if (skip != x->mbmi.mb_skip_coeff) skip += 0; x->mbmi.mb_skip_coeff = skip; if (x->mbmi.mb_skip_coeff == 1) { x->mbmi.dc_diff = 0; //redo the coutnts vpx_memcpy(cpi->coef_counts, cpi->coef_counts_backup, sizeof(cpi->coef_counts)); *t = start; cpi->skip_true_count++; //skip_true_count++; } else { cpi->skip_false_count++; //skip_false_count++; } } #endif }
static void init_frame(VP8D_COMP *pbi) { VP8_COMMON *const pc = & pbi->common; MACROBLOCKD *const xd = & pbi->mb; if (pc->frame_type == KEY_FRAME) { /* Various keyframe initializations */ vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context)); vp8_init_mbmode_probs(pc); vp8_default_coef_probs(pc); /* reset the segment feature data to 0 with delta coding (Default state). */ vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); xd->mb_segement_abs_delta = SEGMENT_DELTADATA; /* reset the mode ref deltasa for loop filter */ vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas)); vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas)); /* All buffers are implicitly updated on key frames. */ pc->refresh_golden_frame = 1; pc->refresh_alt_ref_frame = 1; pc->copy_buffer_to_gf = 0; pc->copy_buffer_to_arf = 0; /* Note that Golden and Altref modes cannot be used on a key frame so * ref_frame_sign_bias[] is undefined and meaningless */ pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0; pc->ref_frame_sign_bias[ALTREF_FRAME] = 0; } else { /* To enable choice of different interploation filters */ if (!pc->use_bilinear_mc_filter) { xd->subpixel_predict = vp8_sixtap_predict4x4; xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; } else { xd->subpixel_predict = vp8_bilinear_predict4x4; xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; } if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active) pbi->ec_active = 1; } xd->left_context = &pc->left_context; xd->mode_info_context = pc->mi; xd->frame_type = pc->frame_type; xd->mode_info_context->mbmi.mode = DC_PRED; xd->mode_info_stride = pc->mode_info_stride; xd->corrupted = 0; /* init without corruption */ xd->fullpixel_mask = 0xffffffff; if(pc->full_pixel) xd->fullpixel_mask = 0xfffffff8; }
// This function searches the neighbourhood of a given MB/SB and populates a // list of candidate reference vectors. // void vp9_find_mv_refs( MACROBLOCKD *xd, MODE_INFO *here, MODE_INFO *lf_here, MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list, int *ref_sign_bias ) { int i; MODE_INFO *candidate_mi; MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; int_mv candidate_mvs[MAX_MV_REF_CANDIDATES]; int_mv c_refmv; int_mv c2_refmv; MV_REFERENCE_FRAME c_ref_frame; MV_REFERENCE_FRAME c2_ref_frame; int candidate_scores[MAX_MV_REF_CANDIDATES]; int index = 0; int split_count = 0; int (*mv_ref_search)[2]; int *ref_distance_weight; // Blank the reference vector lists and other local structures. vpx_memset(mv_ref_list, 0, sizeof(int_mv) * MAX_MV_REF_CANDIDATES); vpx_memset(candidate_mvs, 0, sizeof(int_mv) * MAX_MV_REF_CANDIDATES); vpx_memset(candidate_scores, 0, sizeof(candidate_scores)); if (mbmi->sb_type) { mv_ref_search = sb_mv_ref_search; ref_distance_weight = sb_ref_distance_weight; } else { mv_ref_search = mb_mv_ref_search; ref_distance_weight = mb_ref_distance_weight; } // We first scan for candidate vectors that match the current reference frame // Look at nearest neigbours for (i = 0; i < 2; ++i) { if (((mv_ref_search[i][0] << 7) >= xd->mb_to_left_edge) && ((mv_ref_search[i][1] << 7) >= xd->mb_to_top_edge)) { candidate_mi = here + mv_ref_search[i][0] + (mv_ref_search[i][1] * xd->mode_info_stride); if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) { clamp_mv(xd, &c_refmv); addmv_and_shuffle(candidate_mvs, candidate_scores, &index, c_refmv, ref_distance_weight[i] + 16); } split_count += (candidate_mi->mbmi.mode == SPLITMV); } } // Look in the last frame candidate_mi = lf_here; if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) { clamp_mv(xd, &c_refmv); addmv_and_shuffle(candidate_mvs, candidate_scores, &index, c_refmv, 18); } // More distant neigbours for (i = 2; (i < MVREF_NEIGHBOURS) && (index < (MAX_MV_REF_CANDIDATES - 1)); ++i) { if (((mv_ref_search[i][0] << 7) >= xd->mb_to_left_edge) && ((mv_ref_search[i][1] << 7) >= xd->mb_to_top_edge)) { candidate_mi = here + mv_ref_search[i][0] + (mv_ref_search[i][1] * xd->mode_info_stride); if (get_matching_candidate(candidate_mi, ref_frame, &c_refmv)) { clamp_mv(xd, &c_refmv); addmv_and_shuffle(candidate_mvs, candidate_scores, &index, c_refmv, ref_distance_weight[i] + 16); } } } // If we have not found enough candidates consider ones where the // reference frame does not match. Break out when we have // MAX_MV_REF_CANDIDATES candidates. // Look first at spatial neighbours if (index < (MAX_MV_REF_CANDIDATES - 1)) { for (i = 0; i < MVREF_NEIGHBOURS; ++i) { if (((mv_ref_search[i][0] << 7) >= xd->mb_to_left_edge) && ((mv_ref_search[i][1] << 7) >= xd->mb_to_top_edge)) { candidate_mi = here + mv_ref_search[i][0] + (mv_ref_search[i][1] * xd->mode_info_stride); get_non_matching_candidates(candidate_mi, ref_frame, &c_ref_frame, &c_refmv, &c2_ref_frame, &c2_refmv); if (c_ref_frame != INTRA_FRAME) { scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias); addmv_and_shuffle(candidate_mvs, candidate_scores, &index, c_refmv, ref_distance_weight[i]); } if (c2_ref_frame != INTRA_FRAME) { scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias); addmv_and_shuffle(candidate_mvs, candidate_scores, &index, c2_refmv, ref_distance_weight[i]); } } if (index >= (MAX_MV_REF_CANDIDATES - 1)) { break; } } } // Look at the last frame if (index < (MAX_MV_REF_CANDIDATES - 1)) { candidate_mi = lf_here; get_non_matching_candidates(candidate_mi, ref_frame, &c_ref_frame, &c_refmv, &c2_ref_frame, &c2_refmv); if (c_ref_frame != INTRA_FRAME) { scale_mv(xd, ref_frame, c_ref_frame, &c_refmv, ref_sign_bias); addmv_and_shuffle(candidate_mvs, candidate_scores, &index, c_refmv, 2); } if (c2_ref_frame != INTRA_FRAME) { scale_mv(xd, ref_frame, c2_ref_frame, &c2_refmv, ref_sign_bias); addmv_and_shuffle(candidate_mvs, candidate_scores, &index, c2_refmv, 2); } } // Define inter mode coding context. // 0,0 was best if (candidate_mvs[0].as_int == 0) { // 0,0 is only candidate if (index <= 1) { mbmi->mb_mode_context[ref_frame] = 0; // non zero candidates candidates available } else if (split_count == 0) { mbmi->mb_mode_context[ref_frame] = 1; } else { mbmi->mb_mode_context[ref_frame] = 2; } // Non zero best, No Split MV cases } else if (split_count == 0) { if (candidate_scores[0] >= 32) { mbmi->mb_mode_context[ref_frame] = 3; } else { mbmi->mb_mode_context[ref_frame] = 4; } // Non zero best, some split mv } else { if (candidate_scores[0] >= 32) { mbmi->mb_mode_context[ref_frame] = 5; } else { mbmi->mb_mode_context[ref_frame] = 6; } } // 0,0 is always a valid reference. for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) { if (candidate_mvs[i].as_int == 0) break; } if (i == MAX_MV_REF_CANDIDATES) { candidate_mvs[MAX_MV_REF_CANDIDATES-1].as_int = 0; } // Copy over the candidate list. vpx_memcpy(mv_ref_list, candidate_mvs, sizeof(candidate_mvs)); }