int vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg, YV12_BUFFER_CONFIG *running_avg, MACROBLOCK *signal, unsigned int motion_magnitude, int y_offset, int uv_offset) { unsigned char *sig = signal->thismb; int sig_stride = 16; unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset; int mc_avg_y_stride = mc_running_avg->y_stride; unsigned char *running_avg_y = running_avg->y_buffer + y_offset; int avg_y_stride = running_avg->y_stride; int r, c, i; int sum_diff = 0; int adj_val[3] = {3, 4, 6}; /* If motion_magnitude is small, making the denoiser more aggressive by * increasing the adjustment for each level. */ if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) { for (i = 0; i < 3; i++) adj_val[i] += 1; } for (r = 0; r < 16; ++r) { for (c = 0; c < 16; ++c) { int diff = 0; int adjustment = 0; int absdiff = 0; diff = mc_running_avg_y[c] - sig[c]; absdiff = abs(diff); /* When |diff| < 4, use pixel value from last denoised raw. */ if (absdiff <= 3) { running_avg_y[c] = mc_running_avg_y[c]; sum_diff += diff; } else { if (absdiff >= 4 && absdiff <= 7) adjustment = adj_val[0]; else if (absdiff >= 8 && absdiff <= 15) adjustment = adj_val[1]; else adjustment = adj_val[2]; if (diff > 0) { if ((sig[c] + adjustment) > 255) running_avg_y[c] = 255; else running_avg_y[c] = sig[c] + adjustment; sum_diff += adjustment; } else { if ((sig[c] - adjustment) < 0) running_avg_y[c] = 0; else running_avg_y[c] = sig[c] - adjustment; sum_diff -= adjustment; } } } /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_avg_y_stride; running_avg_y += avg_y_stride; } if (abs(sum_diff) > SUM_DIFF_THRESHOLD) return COPY_BLOCK; vp8_copy_mem16x16(running_avg->y_buffer + y_offset, avg_y_stride, signal->thismb, sig_stride); return FILTER_BLOCK; }
void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x, unsigned int best_sse, unsigned int zero_mv_sse, int recon_yoffset, int recon_uvoffset) { int mv_row; int mv_col; unsigned int motion_magnitude2; MV_REFERENCE_FRAME frame = x->best_reference_frame; MV_REFERENCE_FRAME zero_frame = x->best_zeromv_reference_frame; enum vp8_denoiser_decision decision = FILTER_BLOCK; if (zero_frame) { YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame]; YV12_BUFFER_CONFIG *dst = &denoiser->yv12_mc_running_avg; YV12_BUFFER_CONFIG saved_pre,saved_dst; MB_MODE_INFO saved_mbmi; MACROBLOCKD *filter_xd = &x->e_mbd; MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi; int mv_col; int mv_row; int sse_diff = zero_mv_sse - best_sse; saved_mbmi = *mbmi; /* Use the best MV for the compensation. */ mbmi->ref_frame = x->best_reference_frame; mbmi->mode = x->best_sse_inter_mode; mbmi->mv = x->best_sse_mv; mbmi->need_to_clamp_mvs = x->need_to_clamp_best_mvs; mv_col = x->best_sse_mv.as_mv.col; mv_row = x->best_sse_mv.as_mv.row; if (frame == INTRA_FRAME || ((unsigned int)(mv_row *mv_row + mv_col *mv_col) <= NOISE_MOTION_THRESHOLD && sse_diff < (int)SSE_DIFF_THRESHOLD)) { /* * Handle intra blocks as referring to last frame with zero motion * and let the absolute pixel difference affect the filter factor. * Also consider small amount of motion as being random walk due * to noise, if it doesn't mean that we get a much bigger error. * Note that any changes to the mode info only affects the * denoising. */ mbmi->ref_frame = x->best_zeromv_reference_frame; src = &denoiser->yv12_running_avg[zero_frame]; mbmi->mode = ZEROMV; mbmi->mv.as_int = 0; x->best_sse_inter_mode = ZEROMV; x->best_sse_mv.as_int = 0; best_sse = zero_mv_sse; } saved_pre = filter_xd->pre; saved_dst = filter_xd->dst; /* Compensate the running average. */ filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset; filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset; filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset; /* Write the compensated running average to the destination buffer. */ filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset; filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset; filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset; if (!x->skip) { vp8_build_inter_predictors_mb(filter_xd); } else { vp8_build_inter16x16_predictors_mb(filter_xd, filter_xd->dst.y_buffer, filter_xd->dst.u_buffer, filter_xd->dst.v_buffer, filter_xd->dst.y_stride, filter_xd->dst.uv_stride); } filter_xd->pre = saved_pre; filter_xd->dst = saved_dst; *mbmi = saved_mbmi; } mv_row = x->best_sse_mv.as_mv.row; mv_col = x->best_sse_mv.as_mv.col; motion_magnitude2 = mv_row * mv_row + mv_col * mv_col; if (best_sse > SSE_THRESHOLD || motion_magnitude2 > 8 * NOISE_MOTION_THRESHOLD) { decision = COPY_BLOCK; } if (decision == FILTER_BLOCK) { /* Filter. */ decision = vp8_denoiser_filter(&denoiser->yv12_mc_running_avg, &denoiser->yv12_running_avg[INTRA_FRAME], x, motion_magnitude2, recon_yoffset, recon_uvoffset); } if (decision == COPY_BLOCK) { /* No filtering of this block; it differs too much from the predictor, * or the motion vector magnitude is considered too big. */ vp8_copy_mem16x16( x->thismb, 16, denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, denoiser->yv12_running_avg[INTRA_FRAME].y_stride); } }
static THREAD_FUNCTION thread_encoding_proc(void *p_data) { int ithread = ((ENCODETHREAD_DATA *)p_data)->ithread; VP8_COMP *cpi = (VP8_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr1); MB_ROW_COMP *mbri = (MB_ROW_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr2); ENTROPY_CONTEXT_PLANES mb_row_left_context; while (1) { if (cpi->b_multi_threaded == 0) break; if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0) { const int nsync = cpi->mt_sync_range; VP8_COMMON *cm = &cpi->common; int mb_row; MACROBLOCK *x = &mbri->mb; MACROBLOCKD *xd = &x->e_mbd; TOKENEXTRA *tp ; #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING TOKENEXTRA *tp_start = cpi->tok + (1 + ithread) * (16 * 24); const int num_part = (1 << cm->multi_token_partition); #endif int *segment_counts = mbri->segment_counts; int *totalrate = &mbri->totalrate; if (cpi->b_multi_threaded == 0) /* we're shutting down */ break; for (mb_row = ithread + 1; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1)) { int recon_yoffset, recon_uvoffset; int mb_col; int ref_fb_idx = cm->lst_fb_idx; int dst_fb_idx = cm->new_fb_idx; int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; int map_index = (mb_row * cm->mb_cols); volatile const int *last_row_current_mb_col; volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) vp8_writer *w = &cpi->bc[1 + (mb_row % num_part)]; #else tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24)); cpi->tplist[mb_row].start = tp; #endif last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; /* reset above block coeffs */ xd->above_context = cm->above_context; xd->left_context = &mb_row_left_context; vp8_zero(mb_row_left_context); xd->up_available = (mb_row != 0); recon_yoffset = (mb_row * recon_y_stride * 16); recon_uvoffset = (mb_row * recon_uv_stride * 8); /* Set the mb activity pointer to the start of the row. */ x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; /* for each macroblock col in image */ for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { *current_mb_col = mb_col - 1; if ((mb_col & (nsync - 1)) == 0) { while (mb_col > (*last_row_current_mb_col - nsync)) { x86_pause_hint(); thread_sleep(0); } } #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING tp = tp_start; #endif /* Distance of Mb to the various image edges. * These specified to 8th pel as they are always compared * to values that are in 1/8th pel units */ xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; xd->mb_to_top_edge = -((mb_row * 16) << 3); xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; /* Set up limit values for motion vectors used to prevent * them extending outside the UMV borders */ x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); x->rddiv = cpi->RDDIV; x->rdmult = cpi->RDMULT; /* Copy current mb to a buffer */ vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x); /* Is segmentation enabled */ /* MB level adjustment to quantizer */ if (xd->segmentation_enabled) { /* Code to set segment id in xd->mbmi.segment_id for * current MB (with range checking) */ if (cpi->segmentation_map[map_index + mb_col] <= 3) xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index + mb_col]; else xd->mode_info_context->mbmi.segment_id = 0; vp8cx_mb_init_quantizer(cpi, x, 1); } else /* Set to Segment 0 by default */ xd->mode_info_context->mbmi.segment_id = 0; x->active_ptr = cpi->active_map + map_index + mb_col; if (cm->frame_type == KEY_FRAME) { *totalrate += vp8cx_encode_intra_macroblock(cpi, x, &tp); #ifdef MODE_STATS y_modes[xd->mbmi.mode] ++; #endif } else { *totalrate += vp8cx_encode_inter_macroblock(cpi, x, &tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); #ifdef MODE_STATS inter_y_modes[xd->mbmi.mode] ++; if (xd->mbmi.mode == SPLITMV) { int b; for (b = 0; b < xd->mbmi.partition_count; b++) { inter_b_modes[x->partition->bmi[b].mode] ++; } } #endif /* Special case code for cyclic refresh * If cyclic update enabled then copy * xd->mbmi.segment_id; (which may have been updated * based on mode during * vp8cx_encode_inter_macroblock()) back into the * global segmentation map */ if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled) { const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id; /* If the block has been refreshed mark it as clean * (the magnitude of the -ve influences how long it * will be before we consider another refresh): * Else if it was coded (last frame 0,0) and has * not already been refreshed then mark it as a * candidate for cleanup next time (marked 0) else * mark it as dirty (1). */ if (mbmi->segment_id) cpi->cyclic_refresh_map[map_index + mb_col] = -1; else if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME)) { if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) cpi->cyclic_refresh_map[map_index + mb_col] = 0; } else cpi->cyclic_refresh_map[map_index + mb_col] = 1; } } #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING /* pack tokens for this MB */ { int tok_count = tp - tp_start; pack_tokens(w, tp_start, tok_count); } #else cpi->tplist[mb_row].stop = tp; #endif /* Increment pointer into gf usage flags structure. */ x->gf_active_ptr++; /* Increment the activity mask pointers. */ x->mb_activity_ptr++; /* adjust to the next column of macroblocks */ x->src.y_buffer += 16; x->src.u_buffer += 8; x->src.v_buffer += 8; recon_yoffset += 16; recon_uvoffset += 8; /* Keep track of segment usage */ segment_counts[xd->mode_info_context->mbmi.segment_id]++; /* skip to next mb */ xd->mode_info_context++; x->partition_info++; xd->above_context++; } vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); *current_mb_col = mb_col + nsync; /* this is to account for the border */ xd->mode_info_context++; x->partition_info++; x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols; x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count; x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; if (mb_row == cm->mb_rows - 1) { sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */ } } } } /* printf("exit thread %d\n", ithread); */ return 0; }
int vp8_denoiser_filter_neon(YV12_BUFFER_CONFIG *mc_running_avg, YV12_BUFFER_CONFIG *running_avg, MACROBLOCK *signal, unsigned int motion_magnitude, int y_offset, int uv_offset) { /* If motion_magnitude is small, making the denoiser more aggressive by * increasing the adjustment for each level, level1 adjustment is * increased, the deltas stay the same. */ const uint8x16_t v_level1_adjustment = vdupq_n_u8( (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 : 3); const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); const uint8x16_t v_level1_threshold = vdupq_n_u8(4); const uint8x16_t v_level2_threshold = vdupq_n_u8(8); const uint8x16_t v_level3_threshold = vdupq_n_u8(16); /* Local variables for array pointers and strides. */ unsigned char *sig = signal->thismb; int sig_stride = 16; unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset; int mc_running_avg_y_stride = mc_running_avg->y_stride; unsigned char *running_avg_y = running_avg->y_buffer + y_offset; int running_avg_y_stride = running_avg->y_stride; /* Go over lines. */ int i; int sum_diff = 0; for (i = 0; i < 16; ++i) { int8x16_t v_sum_diff = vdupq_n_s8(0); uint8x16_t v_running_avg_y; /* Load inputs. */ const uint8x16_t v_sig = vld1q_u8(sig); const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); /* Calculate absolute difference and sign masks. */ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); /* Figure out which level that put us in. */ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff); const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff); const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff); /* Calculate absolute adjustments for level 1, 2 and 3. */ const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask, v_delta_level_1_and_2); const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask, v_delta_level_2_and_3); const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment, v_level2_adjustment); const uint8x16_t v_level1and2and3_adjustment = vaddq_u8( v_level1and2_adjustment, v_level3_adjustment); /* Figure adjustment absolute value by selecting between the absolute * difference if in level0 or the value for level 1, 2 and 3. */ const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff); /* Calculate positive and negative adjustments. Apply them to the signal * and accumulate them. Adjustments are less than eight and the maximum * sum of them (7 * 16) can fit in a signed char. */ const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, v_abs_adjustment); const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, v_abs_adjustment); v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment); v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment); v_sum_diff = vqaddq_s8(v_sum_diff, vreinterpretq_s8_u8(v_pos_adjustment)); v_sum_diff = vqsubq_s8(v_sum_diff, vreinterpretq_s8_u8(v_neg_adjustment)); /* Store results. */ vst1q_u8(running_avg_y, v_running_avg_y); /* Sum all the accumulators to have the sum of all pixel differences * for this macroblock. */ { int s0 = vgetq_lane_s8(v_sum_diff, 0) + vgetq_lane_s8(v_sum_diff, 1) + vgetq_lane_s8(v_sum_diff, 2) + vgetq_lane_s8(v_sum_diff, 3); int s1 = vgetq_lane_s8(v_sum_diff, 4) + vgetq_lane_s8(v_sum_diff, 5) + vgetq_lane_s8(v_sum_diff, 6) + vgetq_lane_s8(v_sum_diff, 7); int s2 = vgetq_lane_s8(v_sum_diff, 8) + vgetq_lane_s8(v_sum_diff, 9) + vgetq_lane_s8(v_sum_diff, 10) + vgetq_lane_s8(v_sum_diff, 11); int s3 = vgetq_lane_s8(v_sum_diff, 12) + vgetq_lane_s8(v_sum_diff, 13) + vgetq_lane_s8(v_sum_diff, 14) + vgetq_lane_s8(v_sum_diff, 15); sum_diff += s0 + s1+ s2 + s3; } /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_running_avg_y_stride; running_avg_y += running_avg_y_stride; } /* Too much adjustments => copy block. */ if (abs(sum_diff) > SUM_DIFF_THRESHOLD) return COPY_BLOCK; /* Tell above level that block was filtered. */ vp8_copy_mem16x16(running_avg->y_buffer + y_offset, running_avg_y_stride, signal->thismb, sig_stride); return FILTER_BLOCK; }
static THREAD_FUNCTION thread_encoding_proc(void *p_data) { int ithread = ((ENCODETHREAD_DATA *)p_data)->ithread; VP8_COMP *cpi = (VP8_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr1); MB_ROW_COMP *mbri = (MB_ROW_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr2); ENTROPY_CONTEXT_PLANES mb_row_left_context; const int nsync = cpi->mt_sync_range; //printf("Started thread %d\n", ithread); while (1) { if (cpi->b_multi_threaded == 0) break; //if(WaitForSingleObject(cpi->h_event_mbrencoding[ithread], INFINITE) == WAIT_OBJECT_0) if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0) { VP8_COMMON *cm = &cpi->common; int mb_row; MACROBLOCK *x = &mbri->mb; MACROBLOCKD *xd = &x->e_mbd; TOKENEXTRA *tp ; int *segment_counts = mbri->segment_counts; int *totalrate = &mbri->totalrate; if (cpi->b_multi_threaded == 0) // we're shutting down break; for (mb_row = ithread + 1; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1)) { int recon_yoffset, recon_uvoffset; int mb_col; int ref_fb_idx = cm->lst_fb_idx; int dst_fb_idx = cm->new_fb_idx; int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; int map_index = (mb_row * cm->mb_cols); volatile int *last_row_current_mb_col; tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24)); last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; // reset above block coeffs xd->above_context = cm->above_context; xd->left_context = &mb_row_left_context; vp8_zero(mb_row_left_context); xd->up_available = (mb_row != 0); recon_yoffset = (mb_row * recon_y_stride * 16); recon_uvoffset = (mb_row * recon_uv_stride * 8); cpi->tplist[mb_row].start = tp; //printf("Thread mb_row = %d\n", mb_row); // Set the mb activity pointer to the start of the row. x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; // for each macroblock col in image for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { if ((mb_col & (nsync - 1)) == 0) { while (mb_col > (*last_row_current_mb_col - nsync) && *last_row_current_mb_col != cm->mb_cols - 1) { x86_pause_hint(); thread_sleep(0); } } // Distance of Mb to the various image edges. // These specified to 8th pel as they are always compared to values that are in 1/8th pel units xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; xd->mb_to_top_edge = -((mb_row * 16) << 3); xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; // Set up limit values for motion vectors used to prevent them extending outside the UMV borders x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->left_available = (mb_col != 0); x->rddiv = cpi->RDDIV; x->rdmult = cpi->RDMULT; //Copy current mb to a buffer vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x); // Is segmentation enabled // MB level adjutment to quantizer if (xd->segmentation_enabled) { // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking) if (cpi->segmentation_map[map_index + mb_col] <= 3) xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index + mb_col]; else xd->mode_info_context->mbmi.segment_id = 0; vp8cx_mb_init_quantizer(cpi, x, 1); } else xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default x->active_ptr = cpi->active_map + map_index + mb_col; if (cm->frame_type == KEY_FRAME) { *totalrate += vp8cx_encode_intra_macro_block(cpi, x, &tp); #ifdef MODE_STATS y_modes[xd->mbmi.mode] ++; #endif } else { *totalrate += vp8cx_encode_inter_macroblock(cpi, x, &tp, recon_yoffset, recon_uvoffset); #ifdef MODE_STATS inter_y_modes[xd->mbmi.mode] ++; if (xd->mbmi.mode == SPLITMV) { int b; for (b = 0; b < xd->mbmi.partition_count; b++) { inter_b_modes[x->partition->bmi[b].mode] ++; } } #endif // Count of last ref frame 0,0 useage if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) cpi->inter_zz_count++; // Special case code for cyclic refresh // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled) { const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id; // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh): // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0) // else mark it as dirty (1). if (mbmi->segment_id) cpi->cyclic_refresh_map[map_index + mb_col] = -1; else if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME)) { if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) cpi->cyclic_refresh_map[map_index + mb_col] = 0; } else cpi->cyclic_refresh_map[map_index + mb_col] = 1; } } cpi->tplist[mb_row].stop = tp; // Increment pointer into gf useage flags structure. x->gf_active_ptr++; // Increment the activity mask pointers. x->mb_activity_ptr++; // adjust to the next column of macroblocks x->src.y_buffer += 16; x->src.u_buffer += 8; x->src.v_buffer += 8; recon_yoffset += 16; recon_uvoffset += 8; // Keep track of segment useage segment_counts[xd->mode_info_context->mbmi.segment_id]++; // skip to next mb xd->mode_info_context++; x->partition_info++; xd->above_context++; cpi->mt_current_mb_col[mb_row] = mb_col; } //extend the recon for intra prediction vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); // this is to account for the border xd->mode_info_context++; x->partition_info++; x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols; x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count; x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; if (mb_row == cm->mb_rows - 1) { //SetEvent(cpi->h_event_main); sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */ } } } } //printf("exit thread %d\n", ithread); return 0; }
int vp8_denoiser_filter_sse2(YV12_BUFFER_CONFIG *mc_running_avg, YV12_BUFFER_CONFIG *running_avg, MACROBLOCK *signal, unsigned int motion_magnitude, int y_offset, int uv_offset) { unsigned char *sig = signal->thismb; int sig_stride = 16; unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset; int mc_avg_y_stride = mc_running_avg->y_stride; unsigned char *running_avg_y = running_avg->y_buffer + y_offset; int avg_y_stride = running_avg->y_stride; int r; (void)uv_offset; __m128i acc_diff = _mm_setzero_si128(); const __m128i k_0 = _mm_setzero_si128(); const __m128i k_4 = _mm_set1_epi8(4); const __m128i k_8 = _mm_set1_epi8(8); const __m128i k_16 = _mm_set1_epi8(16); /* Modify each level's adjustment according to motion_magnitude. */ const __m128i l3 = _mm_set1_epi8( (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 : 6); /* Difference between level 3 and level 2 is 2. */ const __m128i l32 = _mm_set1_epi8(2); /* Difference between level 2 and level 1 is 1. */ const __m128i l21 = _mm_set1_epi8(1); for (r = 0; r < 16; ++r) { /* Calculate differences */ const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0])); const __m128i v_mc_running_avg_y = _mm_loadu_si128( (__m128i *)(&mc_running_avg_y[0])); __m128i v_running_avg_y; const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); /* Obtain the sign. FF if diff is negative. */ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); /* Clamp absolute difference to 16 to be used to get mask. Doing this * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */ const __m128i clamped_absdiff = _mm_min_epu8( _mm_or_si128(pdiff, ndiff), k_16); /* Get masks for l2 l1 and l0 adjustments */ const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff); const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff); const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff); /* Get adjustments for l2, l1, and l0 */ __m128i adj2 = _mm_and_si128(mask2, l32); const __m128i adj1 = _mm_and_si128(mask1, l21); const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); __m128i adj, padj, nadj; /* Combine the adjustments and get absolute adjustments. */ adj2 = _mm_add_epi8(adj2, adj1); adj = _mm_sub_epi8(l3, adj2); adj = _mm_andnot_si128(mask0, adj); adj = _mm_or_si128(adj, adj0); /* Restore the sign and get positive and negative adjustments. */ padj = _mm_andnot_si128(diff_sign, adj); nadj = _mm_and_si128(diff_sign, adj); /* Calculate filtered value. */ v_running_avg_y = _mm_adds_epu8(v_sig, padj); v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj); _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); /* Adjustments <=7, and each element in acc_diff can fit in signed * char. */ acc_diff = _mm_adds_epi8(acc_diff, padj); acc_diff = _mm_subs_epi8(acc_diff, nadj); /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_avg_y_stride; running_avg_y += avg_y_stride; } { /* Compute the sum of all pixel differences of this MB. */ union sum_union s; int sum_diff = 0; s.v = acc_diff; sum_diff = s.e[0] + s.e[1] + s.e[2] + s.e[3] + s.e[4] + s.e[5] + s.e[6] + s.e[7] + s.e[8] + s.e[9] + s.e[10] + s.e[11] + s.e[12] + s.e[13] + s.e[14] + s.e[15]; if (abs(sum_diff) > SUM_DIFF_THRESHOLD) { return COPY_BLOCK; } } vp8_copy_mem16x16(running_avg->y_buffer + y_offset, avg_y_stride, signal->thismb, sig_stride); return FILTER_BLOCK; }