void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x, unsigned int best_sse, unsigned int zero_mv_sse, int recon_yoffset, int recon_uvoffset) { int mv_row; int mv_col; unsigned int motion_magnitude2; MV_REFERENCE_FRAME frame = x->best_reference_frame; MV_REFERENCE_FRAME zero_frame = x->best_zeromv_reference_frame; enum vp8_denoiser_decision decision = FILTER_BLOCK; if (zero_frame) { YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame]; YV12_BUFFER_CONFIG *dst = &denoiser->yv12_mc_running_avg; YV12_BUFFER_CONFIG saved_pre,saved_dst; MB_MODE_INFO saved_mbmi; MACROBLOCKD *filter_xd = &x->e_mbd; MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi; int mv_col; int mv_row; int sse_diff = zero_mv_sse - best_sse; saved_mbmi = *mbmi; /* Use the best MV for the compensation. */ mbmi->ref_frame = x->best_reference_frame; mbmi->mode = x->best_sse_inter_mode; mbmi->mv = x->best_sse_mv; mbmi->need_to_clamp_mvs = x->need_to_clamp_best_mvs; mv_col = x->best_sse_mv.as_mv.col; mv_row = x->best_sse_mv.as_mv.row; if (frame == INTRA_FRAME || ((unsigned int)(mv_row *mv_row + mv_col *mv_col) <= NOISE_MOTION_THRESHOLD && sse_diff < (int)SSE_DIFF_THRESHOLD)) { /* * Handle intra blocks as referring to last frame with zero motion * and let the absolute pixel difference affect the filter factor. * Also consider small amount of motion as being random walk due * to noise, if it doesn't mean that we get a much bigger error. * Note that any changes to the mode info only affects the * denoising. */ mbmi->ref_frame = x->best_zeromv_reference_frame; src = &denoiser->yv12_running_avg[zero_frame]; mbmi->mode = ZEROMV; mbmi->mv.as_int = 0; x->best_sse_inter_mode = ZEROMV; x->best_sse_mv.as_int = 0; best_sse = zero_mv_sse; } saved_pre = filter_xd->pre; saved_dst = filter_xd->dst; /* Compensate the running average. */ filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset; filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset; filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset; /* Write the compensated running average to the destination buffer. */ filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset; filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset; filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset; if (!x->skip) { vp8_build_inter_predictors_mb(filter_xd); } else { vp8_build_inter16x16_predictors_mb(filter_xd, filter_xd->dst.y_buffer, filter_xd->dst.u_buffer, filter_xd->dst.v_buffer, filter_xd->dst.y_stride, filter_xd->dst.uv_stride); } filter_xd->pre = saved_pre; filter_xd->dst = saved_dst; *mbmi = saved_mbmi; } mv_row = x->best_sse_mv.as_mv.row; mv_col = x->best_sse_mv.as_mv.col; motion_magnitude2 = mv_row * mv_row + mv_col * mv_col; if (best_sse > SSE_THRESHOLD || motion_magnitude2 > 8 * NOISE_MOTION_THRESHOLD) { decision = COPY_BLOCK; } if (decision == FILTER_BLOCK) { /* Filter. */ decision = vp8_denoiser_filter(&denoiser->yv12_mc_running_avg, &denoiser->yv12_running_avg[INTRA_FRAME], x, motion_magnitude2, recon_yoffset, recon_uvoffset); } if (decision == COPY_BLOCK) { /* No filtering of this block; it differs too much from the predictor, * or the motion vector magnitude is considered too big. */ vp8_copy_mem16x16( x->thismb, 16, denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, denoiser->yv12_running_avg[INTRA_FRAME].y_stride); } }
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, unsigned int mb_idx) { MB_PREDICTION_MODE mode; int i; int corruption_detected = 0; int eobtotal; if (xd->mode_info_context->mbmi.mb_skip_coeff) { vp8_reset_mb_tokens_context(xd); } else if (!vp8dx_bool_error(xd->current_bc)) { eobtotal = vp8_decode_mb_tokens(pbi, xd); /* Special case: Force the loopfilter to skip when eobtotal is zero */ xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0); } mode = xd->mode_info_context->mbmi.mode; if (xd->segmentation_enabled) mb_init_dequantizer(pbi, xd); #if PROFILE_OUTPUT if (xd->frame_type == KEY_FRAME) printf("Intra-Coded MB\n"); else{ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME){ printf("Intra-Coded Inter-Frame MB\n"); } else { printf("Inter-Coded MB\n"); } } #endif #if CONFIG_OPENCL && (ENABLE_CL_IDCT_DEQUANT || ENABLE_CL_SUBPIXEL) //If OpenCL is enabled and initialized, use CL-specific decoder for remains //of MB decoding. if (cl_initialized == CL_SUCCESS){ vp8_decode_macroblock_cl(pbi, xd, eobtotal); return; } #endif #if CONFIG_ERROR_CONCEALMENT if(pbi->ec_active) { int throw_residual; /* When we have independent partitions we can apply residual even * though other partitions within the frame are corrupt. */ throw_residual = (!pbi->independent_partitions && pbi->frame_corrupt_residual); throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) { /* MB with corrupt residuals or corrupt mode/motion vectors. * Better to use the predictor as reconstruction. */ pbi->frame_corrupt_residual = 1; vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); vp8_conceal_corrupt_mb(xd); corruption_detected = 1; /* force idct to be skipped for B_PRED and use the * prediction only for reconstruction * */ vpx_memset(xd->eobs, 0, 25); } } #endif /* do prediction */ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { vp8_build_intra_predictors_mbuv_s(xd); if (mode != B_PRED) { vp8_build_intra_predictors_mby_s(xd); } else { short *DQC = xd->dequant_y1; /* clear out residual eob info */ if(xd->mode_info_context->mbmi.mb_skip_coeff) vpx_memset(xd->eobs, 0, 25); vp8_intra_prediction_down_copy(xd); for (i = 0; i < 16; i++) { BLOCKD *b = &xd->block[i]; int b_mode = xd->mode_info_context->bmi[i].as_mode; vp8_intra4x4_predict ( *(b->base_dst) + b->dst, b->dst_stride, b_mode, *(b->base_dst) + b->dst, b->dst_stride ); if (xd->eobs[i]) { if (xd->eobs[i] > 1) { vp8_dequant_idct_add (&b->qcoeff_base[b->qcoeff_offset], DQC, *(b->base_dst) + b->dst, b->dst_stride); } else { vp8_dc_only_idct_add (b->qcoeff_base[b->qcoeff_offset] * DQC[0], *(b->base_dst) + b->dst, b->dst_stride, *(b->base_dst) + b->dst, b->dst_stride); ((int *)&b->qcoeff_base[b->qcoeff_offset])[0] = 0; } } } } } else { vp8_build_inter_predictors_mb(xd); } #if CONFIG_ERROR_CONCEALMENT if (corruption_detected) { return; } #endif if(!xd->mode_info_context->mbmi.mb_skip_coeff) { /* dequantization and idct */ if (mode != B_PRED) { short *DQC = xd->dequant_y1; if (mode != SPLITMV) { BLOCKD *b = &xd->block[24]; short *qcoeff = &b->qcoeff_base[b->qcoeff_offset]; /* do 2nd order transform on the dc block */ if (xd->eobs[24] > 1) { vp8_dequantize_b(b, xd->dequant_y2); vp8_short_inv_walsh4x4(&b->dqcoeff_base[b->dqcoeff_offset], xd->qcoeff); ((int *)qcoeff)[0] = 0; ((int *)qcoeff)[1] = 0; ((int *)qcoeff)[2] = 0; ((int *)qcoeff)[3] = 0; ((int *)qcoeff)[4] = 0; ((int *)qcoeff)[5] = 0; ((int *)qcoeff)[6] = 0; ((int *)qcoeff)[7] = 0; } else { b->dqcoeff_base[b->dqcoeff_offset] = qcoeff[0] * xd->dequant_y2[0]; vp8_short_inv_walsh4x4_1(&b->dqcoeff_base[b->dqcoeff_offset], xd->qcoeff); ((int *)qcoeff)[0] = 0; } /* override the dc dequant constant in order to preserve the * dc components */ DQC = xd->dequant_y1_dc; } vp8_dequant_idct_add_y_block (xd->qcoeff, DQC, xd->dst.y_buffer, xd->dst.y_stride, xd->eobs); } vp8_dequant_idct_add_uv_block (xd->qcoeff+16*16, xd->dequant_uv, xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride, xd->eobs+16); } }
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, unsigned int mb_idx) { MB_PREDICTION_MODE mode; int i; #if CONFIG_ERROR_CONCEALMENT int corruption_detected = 0; #endif if (xd->mode_info_context->mbmi.mb_skip_coeff) { vp8_reset_mb_tokens_context(xd); } else if (!vp8dx_bool_error(xd->current_bc)) { int eobtotal; eobtotal = vp8_decode_mb_tokens(pbi, xd); /* Special case: Force the loopfilter to skip when eobtotal is zero */ xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0); } mode = xd->mode_info_context->mbmi.mode; if (xd->segmentation_enabled) vp8_mb_init_dequantizer(pbi, xd); #if CONFIG_ERROR_CONCEALMENT if(pbi->ec_active) { int throw_residual; /* When we have independent partitions we can apply residual even * though other partitions within the frame are corrupt. */ throw_residual = (!pbi->independent_partitions && pbi->frame_corrupt_residual); throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) { /* MB with corrupt residuals or corrupt mode/motion vectors. * Better to use the predictor as reconstruction. */ pbi->frame_corrupt_residual = 1; vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); vp8_conceal_corrupt_mb(xd); corruption_detected = 1; /* force idct to be skipped for B_PRED and use the * prediction only for reconstruction * */ vpx_memset(xd->eobs, 0, 25); } } #endif /* do prediction */ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { vp8_build_intra_predictors_mbuv_s(xd, xd->recon_above[1], xd->recon_above[2], xd->recon_left[1], xd->recon_left[2], xd->recon_left_stride[1], xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride); if (mode != B_PRED) { vp8_build_intra_predictors_mby_s(xd, xd->recon_above[0], xd->recon_left[0], xd->recon_left_stride[0], xd->dst.y_buffer, xd->dst.y_stride); } else { short *DQC = xd->dequant_y1; int dst_stride = xd->dst.y_stride; /* clear out residual eob info */ if(xd->mode_info_context->mbmi.mb_skip_coeff) vpx_memset(xd->eobs, 0, 25); intra_prediction_down_copy(xd, xd->recon_above[0] + 16); for (i = 0; i < 16; i++) { BLOCKD *b = &xd->block[i]; unsigned char *dst = xd->dst.y_buffer + b->offset; B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode; unsigned char *Above = dst - dst_stride; unsigned char *yleft = dst - 1; int left_stride = dst_stride; unsigned char top_left = Above[-1]; vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, dst, dst_stride, top_left); if (xd->eobs[i]) { if (xd->eobs[i] > 1) { vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); } else { vp8_dc_only_idct_add (b->qcoeff[0] * DQC[0], dst, dst_stride, dst, dst_stride); vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); } } } } } else { vp8_build_inter_predictors_mb(xd); } #if CONFIG_ERROR_CONCEALMENT if (corruption_detected) { return; } #endif if(!xd->mode_info_context->mbmi.mb_skip_coeff) { /* dequantization and idct */ if (mode != B_PRED) { short *DQC = xd->dequant_y1; if (mode != SPLITMV) { BLOCKD *b = &xd->block[24]; /* do 2nd order transform on the dc block */ if (xd->eobs[24] > 1) { vp8_dequantize_b(b, xd->dequant_y2); vp8_short_inv_walsh4x4(&b->dqcoeff[0], xd->qcoeff); vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); } else { b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], xd->qcoeff); vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); } /* override the dc dequant constant in order to preserve the * dc components */ DQC = xd->dequant_y1_dc; } vp8_dequant_idct_add_y_block (xd->qcoeff, DQC, xd->dst.y_buffer, xd->dst.y_stride, xd->eobs); } vp8_dequant_idct_add_uv_block (xd->qcoeff+16*16, xd->dequant_uv, xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride, xd->eobs+16); } }
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, unsigned int mb_idx) { int eobtotal = 0; int throw_residual = 0; MB_PREDICTION_MODE mode; int i; if (xd->mode_info_context->mbmi.mb_skip_coeff) { vp8_reset_mb_tokens_context(xd); } else if (!vp8dx_bool_error(xd->current_bc)) { eobtotal = vp8_decode_mb_tokens(pbi, xd); } mode = xd->mode_info_context->mbmi.mode; if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV && !vp8dx_bool_error(xd->current_bc)) { /* Special case: Force the loopfilter to skip when eobtotal and * mb_skip_coeff are zero. * */ xd->mode_info_context->mbmi.mb_skip_coeff = 1; skip_recon_mb(pbi, xd); return; } if (xd->segmentation_enabled) mb_init_dequantizer(pbi, xd); /* do prediction */ if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd); if (mode != B_PRED) { RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mby_s)(xd); } else { vp8_intra_prediction_down_copy(xd); } } else { vp8_build_inter_predictors_mb(xd); } /* When we have independent partitions we can apply residual even * though other partitions within the frame are corrupt. */ throw_residual = (!pbi->independent_partitions && pbi->frame_corrupt_residual); throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); #if CONFIG_ERROR_CONCEALMENT if (pbi->ec_active && (mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) { /* MB with corrupt residuals or corrupt mode/motion vectors. * Better to use the predictor as reconstruction. */ pbi->frame_corrupt_residual = 1; vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); vp8_conceal_corrupt_mb(xd); return; } #endif /* dequantization and idct */ if (mode == B_PRED) { for (i = 0; i < 16; i++) { BLOCKD *b = &xd->block[i]; int b_mode = xd->mode_info_context->bmi[i].as_mode; RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict) ( *(b->base_dst) + b->dst, b->dst_stride, b_mode, *(b->base_dst) + b->dst, b->dst_stride ); if (xd->eobs[i] ) { if (xd->eobs[i] > 1) { DEQUANT_INVOKE(&pbi->dequant, idct_add) (b->qcoeff, b->dequant, *(b->base_dst) + b->dst, b->dst_stride); } else { IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add) (b->qcoeff[0] * b->dequant[0], *(b->base_dst) + b->dst, b->dst_stride, *(b->base_dst) + b->dst, b->dst_stride); ((int *)b->qcoeff)[0] = 0; } } } } else { short *DQC = xd->block[0].dequant; /* save the dc dequant constant in case it is overridden */ short dc_dequant_temp = DQC[0]; if (mode != SPLITMV) { BLOCKD *b = &xd->block[24]; /* do 2nd order transform on the dc block */ if (xd->eobs[24] > 1) { DEQUANT_INVOKE(&pbi->dequant, block)(b); IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0], xd->qcoeff); ((int *)b->qcoeff)[0] = 0; ((int *)b->qcoeff)[1] = 0; ((int *)b->qcoeff)[2] = 0; ((int *)b->qcoeff)[3] = 0; ((int *)b->qcoeff)[4] = 0; ((int *)b->qcoeff)[5] = 0; ((int *)b->qcoeff)[6] = 0; ((int *)b->qcoeff)[7] = 0; } else { b->dqcoeff[0] = b->qcoeff[0] * b->dequant[0]; IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0], xd->qcoeff); ((int *)b->qcoeff)[0] = 0; } /* override the dc dequant constant */ DQC[0] = 1; } DEQUANT_INVOKE (&pbi->dequant, idct_add_y_block) (xd->qcoeff, xd->block[0].dequant, xd->dst.y_buffer, xd->dst.y_stride, xd->eobs); /* restore the dc dequant constant */ DQC[0] = dc_dequant_temp; } DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block) (xd->qcoeff+16*16, xd->block[16].dequant, xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride, xd->eobs+16); }