Esempio n. 1
0
static INLINE void vp8_mbloop_filter_neon(
        uint8x16_t qblimit,  // mblimit
        uint8x16_t qlimit,   // limit
        uint8x16_t qthresh,  // thresh
        uint8x16_t q3,       // p2
        uint8x16_t q4,       // p2
        uint8x16_t q5,       // p1
        uint8x16_t q6,       // p0
        uint8x16_t q7,       // q0
        uint8x16_t q8,       // q1
        uint8x16_t q9,       // q2
        uint8x16_t q10,      // q3
        uint8x16_t *q4r,     // p1
        uint8x16_t *q5r,     // p1
        uint8x16_t *q6r,     // p0
        uint8x16_t *q7r,     // q0
        uint8x16_t *q8r,     // q1
        uint8x16_t *q9r) {   // q1
    uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8;
    int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16;
    int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8;
    uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16;
    int8x16_t q0s8, q12s8, q14s8, q15s8;
    int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29;

    q11u8 = vabdq_u8(q3, q4);
    q12u8 = vabdq_u8(q4, q5);
    q13u8 = vabdq_u8(q5, q6);
    q14u8 = vabdq_u8(q8, q7);
    q1u8  = vabdq_u8(q9, q8);
    q0u8  = vabdq_u8(q10, q9);

    q11u8 = vmaxq_u8(q11u8, q12u8);
    q12u8 = vmaxq_u8(q13u8, q14u8);
    q1u8  = vmaxq_u8(q1u8, q0u8);
    q15u8 = vmaxq_u8(q11u8, q12u8);

    q12u8 = vabdq_u8(q6, q7);

    // vp8_hevmask
    q13u8 = vcgtq_u8(q13u8, qthresh);
    q14u8 = vcgtq_u8(q14u8, qthresh);
    q15u8 = vmaxq_u8(q15u8, q1u8);

    q15u8 = vcgeq_u8(qlimit, q15u8);

    q1u8 = vabdq_u8(q5, q8);
    q12u8 = vqaddq_u8(q12u8, q12u8);

    // vp8_filter() function
    // convert to signed
    q0u8 = vdupq_n_u8(0x80);
    q9 = veorq_u8(q9, q0u8);
    q8 = veorq_u8(q8, q0u8);
    q7 = veorq_u8(q7, q0u8);
    q6 = veorq_u8(q6, q0u8);
    q5 = veorq_u8(q5, q0u8);
    q4 = veorq_u8(q4, q0u8);

    q1u8 = vshrq_n_u8(q1u8, 1);
    q12u8 = vqaddq_u8(q12u8, q1u8);

    q14u8 = vorrq_u8(q13u8, q14u8);
    q12u8 = vcgeq_u8(qblimit, q12u8);

    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
                     vget_low_s8(vreinterpretq_s8_u8(q6)));
    q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
                      vget_high_s8(vreinterpretq_s8_u8(q6)));

    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
                     vreinterpretq_s8_u8(q8));

    q11s16 = vdupq_n_s16(3);
    q2s16  = vmulq_s16(q2s16, q11s16);
    q13s16 = vmulq_s16(q13s16, q11s16);

    q15u8 = vandq_u8(q15u8, q12u8);

    q2s16  = vaddw_s8(q2s16, vget_low_s8(q1s8));
    q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8));

    q12u8 = vdupq_n_u8(3);
    q11u8 = vdupq_n_u8(4);
    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
    d2 = vqmovn_s16(q2s16);
    d3 = vqmovn_s16(q13s16);
    q1s8 = vcombine_s8(d2, d3);
    q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8));
    q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8));

    q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8));
    q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8));
    q2s8 = vshrq_n_s8(q2s8, 3);
    q13s8 = vshrq_n_s8(q13s8, 3);

    q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8);
    q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8);

    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));

    q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63);
    d5 = vdup_n_s8(9);
    d4 = vdup_n_s8(18);

    q0s16  = vmlal_s8(vreinterpretq_s16_u16(q0u16),  vget_low_s8(q1s8),  d5);
    q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5);
    d5 = vdup_n_s8(27);
    q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8),  d4);
    q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4);
    q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8),  d5);
    q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5);

    d0  = vqshrn_n_s16(q0s16 , 7);
    d1  = vqshrn_n_s16(q11s16, 7);
    d24 = vqshrn_n_s16(q12s16, 7);
    d25 = vqshrn_n_s16(q13s16, 7);
    d28 = vqshrn_n_s16(q14s16, 7);
    d29 = vqshrn_n_s16(q15s16, 7);

    q0s8  = vcombine_s8(d0, d1);
    q12s8 = vcombine_s8(d24, d25);
    q14s8 = vcombine_s8(d28, d29);

    q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8);
    q0s8  = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
    q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8);
    q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
    q15s8 = vqsubq_s8((q7s8), q14s8);
    q14s8 = vqaddq_s8((q6s8), q14s8);

    q1u8 = vdupq_n_u8(0x80);
    *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8);
    *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8);
    *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8);
    *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8);
    *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8);
    *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8);
    return;
}
static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
    unsigned char *s, int p, const unsigned char *blimit) {
  unsigned char *src1;
  uint8x16_t qblimit, q0u8;
  uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
  int16x8_t q2s16, q13s16, q11s16;
  int8x8_t d28s8, d29s8;
  int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
  uint8x8x4_t d0u8x4;  // d6, d7, d8, d9
  uint8x8x4_t d1u8x4;  // d10, d11, d12, d13
  uint8x8x2_t d2u8x2;  // d12, d13
  uint8x8x2_t d3u8x2;  // d14, d15

  qblimit = vdupq_n_u8(*blimit);

  src1 = s - 2;
  d0u8x4 = read_4x8(src1, p);
  src1 += p * 8;
  d1u8x4 = read_4x8(src1, p);

  q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]);  // d6 d10
  q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]);  // d8 d12
  q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]);  // d7 d11
  q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]);  // d9 d13

  q15u8 = vabdq_u8(q5u8, q4u8);
  q14u8 = vabdq_u8(q3u8, q6u8);

  q15u8 = vqaddq_u8(q15u8, q15u8);
  q14u8 = vshrq_n_u8(q14u8, 1);
  q0u8 = vdupq_n_u8(0x80);
  q11s16 = vdupq_n_s16(3);
  q15u8 = vqaddq_u8(q15u8, q14u8);

  q3u8 = veorq_u8(q3u8, q0u8);
  q4u8 = veorq_u8(q4u8, q0u8);
  q5u8 = veorq_u8(q5u8, q0u8);
  q6u8 = veorq_u8(q6u8, q0u8);

  q15u8 = vcgeq_u8(qblimit, q15u8);

  q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
                   vget_low_s8(vreinterpretq_s8_u8(q5u8)));
  q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
                    vget_high_s8(vreinterpretq_s8_u8(q5u8)));

  q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8), vreinterpretq_s8_u8(q6u8));

  q2s16 = vmulq_s16(q2s16, q11s16);
  q13s16 = vmulq_s16(q13s16, q11s16);

  q11u8 = vdupq_n_u8(3);
  q12u8 = vdupq_n_u8(4);

  q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
  q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));

  d28s8 = vqmovn_s16(q2s16);
  d29s8 = vqmovn_s16(q13s16);
  q14s8 = vcombine_s8(d28s8, d29s8);

  q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));

  q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
  q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
  q2s8 = vshrq_n_s8(q2s8, 3);
  q14s8 = vshrq_n_s8(q3s8, 3);

  q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
  q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);

  q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
  q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);

  d2u8x2.val[0] = vget_low_u8(q6u8);   // d12
  d2u8x2.val[1] = vget_low_u8(q7u8);   // d14
  d3u8x2.val[0] = vget_high_u8(q6u8);  // d13
  d3u8x2.val[1] = vget_high_u8(q7u8);  // d15

  src1 = s - 1;
  write_2x8(src1, p, d2u8x2, d3u8x2);
}
Esempio n. 3
0
int vp8_denoiser_filter_neon(YV12_BUFFER_CONFIG *mc_running_avg,
                             YV12_BUFFER_CONFIG *running_avg,
                             MACROBLOCK *signal, unsigned int motion_magnitude,
                             int y_offset, int uv_offset) {
    /* If motion_magnitude is small, making the denoiser more aggressive by
     * increasing the adjustment for each level, level1 adjustment is
     * increased, the deltas stay the same.
     */
    const uint8x16_t v_level1_adjustment = vdupq_n_u8(
        (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 : 3);
    const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
    const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
    const uint8x16_t v_level1_threshold = vdupq_n_u8(4);
    const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
    const uint8x16_t v_level3_threshold = vdupq_n_u8(16);

    /* Local variables for array pointers and strides. */
    unsigned char *sig = signal->thismb;
    int            sig_stride = 16;
    unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset;
    int            mc_running_avg_y_stride = mc_running_avg->y_stride;
    unsigned char *running_avg_y = running_avg->y_buffer + y_offset;
    int            running_avg_y_stride = running_avg->y_stride;

    /* Go over lines. */
    int i;
    int sum_diff = 0;
    for (i = 0; i < 16; ++i) {
        int8x16_t v_sum_diff = vdupq_n_s8(0);
        uint8x16_t v_running_avg_y;

        /* Load inputs. */
        const uint8x16_t v_sig = vld1q_u8(sig);
        const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);

        /* Calculate absolute difference and sign masks. */
        const uint8x16_t v_abs_diff      = vabdq_u8(v_sig, v_mc_running_avg_y);
        const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
        const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);

        /* Figure out which level that put us in. */
        const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold,
                                                  v_abs_diff);
        const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold,
                                                  v_abs_diff);
        const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold,
                                                  v_abs_diff);

        /* Calculate absolute adjustments for level 1, 2 and 3. */
        const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask,
                                                        v_delta_level_1_and_2);
        const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask,
                                                        v_delta_level_2_and_3);
        const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment,
            v_level2_adjustment);
        const uint8x16_t v_level1and2and3_adjustment = vaddq_u8(
            v_level1and2_adjustment, v_level3_adjustment);

        /* Figure adjustment absolute value by selecting between the absolute
         * difference if in level0 or the value for level 1, 2 and 3.
         */
        const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask,
            v_level1and2and3_adjustment, v_abs_diff);

        /* Calculate positive and negative adjustments. Apply them to the signal
         * and accumulate them. Adjustments are less than eight and the maximum
         * sum of them (7 * 16) can fit in a signed char.
         */
        const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask,
                                                     v_abs_adjustment);
        const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask,
                                                     v_abs_adjustment);
        v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
        v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
        v_sum_diff = vqaddq_s8(v_sum_diff, (int8x16_t)v_pos_adjustment);
        v_sum_diff = vqsubq_s8(v_sum_diff, (int8x16_t)v_neg_adjustment);

        /* Store results. */
        vst1q_u8(running_avg_y, v_running_avg_y);

        /* Sum all the accumulators to have the sum of all pixel differences
         * for this macroblock.
         */
        {
            int s0 = vgetq_lane_s8(v_sum_diff,  0) +
                     vgetq_lane_s8(v_sum_diff,  1) +
                     vgetq_lane_s8(v_sum_diff,  2) +
                     vgetq_lane_s8(v_sum_diff,  3);
            int s1 = vgetq_lane_s8(v_sum_diff,  4) +
                     vgetq_lane_s8(v_sum_diff,  5) +
                     vgetq_lane_s8(v_sum_diff,  6) +
                     vgetq_lane_s8(v_sum_diff,  7);
            int s2 = vgetq_lane_s8(v_sum_diff,  8) +
                     vgetq_lane_s8(v_sum_diff,  9) +
                     vgetq_lane_s8(v_sum_diff, 10) +
                     vgetq_lane_s8(v_sum_diff, 11);
            int s3 = vgetq_lane_s8(v_sum_diff, 12) +
                     vgetq_lane_s8(v_sum_diff, 13) +
                     vgetq_lane_s8(v_sum_diff, 14) +
                     vgetq_lane_s8(v_sum_diff, 15);
            sum_diff += s0 + s1+ s2 + s3;
        }

        /* Update pointers for next iteration. */
        sig += sig_stride;
        mc_running_avg_y += mc_running_avg_y_stride;
        running_avg_y += running_avg_y_stride;
    }

    /* Too much adjustments => copy block. */
    if (abs(sum_diff) > SUM_DIFF_THRESHOLD)
        return COPY_BLOCK;

    /* Tell above level that block was filtered. */
    vp8_copy_mem16x16(running_avg->y_buffer + y_offset, running_avg_y_stride,
                      signal->thismb, sig_stride);
    return FILTER_BLOCK;
}