Ejemplo n.º 1
0
static void ConvertARGBToUV_NEON(const uint32_t* argb, uint8_t* u, uint8_t* v,
                                 int src_width, int do_store) {
  int i;
  for (i = 0; i + 16 <= src_width; i += 16, u += 8, v += 8) {
    const uint8x16x4_t RGB = vld4q_u8((const uint8_t*)&argb[i]);
    const uint16x8_t R = vpaddlq_u8(RGB.val[2]);  // pair-wise adds
    const uint16x8_t G = vpaddlq_u8(RGB.val[1]);
    const uint16x8_t B = vpaddlq_u8(RGB.val[0]);
    int16x8_t U_tmp, V_tmp;
    CONVERT_RGB_TO_UV(R, G, B, 1, U_tmp, V_tmp);
    {
      const uint8x8_t U = vqrshrun_n_s16(U_tmp, 1);
      const uint8x8_t V = vqrshrun_n_s16(V_tmp, 1);
      if (do_store) {
        vst1_u8(u, U);
        vst1_u8(v, V);
      } else {
        const uint8x8_t prev_u = vld1_u8(u);
        const uint8x8_t prev_v = vld1_u8(v);
        vst1_u8(u, vrhadd_u8(U, prev_u));
        vst1_u8(v, vrhadd_u8(V, prev_v));
      }
    }
  }
  if (i < src_width) {  // left-over
    WebPConvertARGBToUV_C(argb + i, u, v, src_width - i, do_store);
  }
}
Ejemplo n.º 2
0
static void ConvertRGBA32ToUV_NEON(const uint16_t* rgb,
                                   uint8_t* u, uint8_t* v, int width) {
  int i;
  for (i = 0; i + 8 <= width; i += 8, rgb += 4 * 8) {
    const uint16x8x4_t RGB = vld4q_u16((const uint16_t*)rgb);
    int16x8_t U, V;
    CONVERT_RGB_TO_UV(RGB.val[0], RGB.val[1], RGB.val[2], 2, U, V);
    vst1_u8(u + i, vqrshrun_n_s16(U, 2));
    vst1_u8(v + i, vqrshrun_n_s16(V, 2));
  }
  for (; i < width; i += 1, rgb += 4) {
    const int r = rgb[0], g = rgb[1], b = rgb[2];
    u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2);
    v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2);
  }
}
Ejemplo n.º 3
0
void test_vqRshrun_ns16 (void)
{
  uint8x8_t out_uint8x8_t;
  int16x8_t arg0_int16x8_t;

  out_uint8x8_t = vqrshrun_n_s16 (arg0_int16x8_t, 1);
}
Ejemplo n.º 4
0
static INLINE void scaledconvolve_vert_w4(
    const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst,
    const ptrdiff_t dst_stride, const InterpKernel *const y_filters,
    const int y0_q4, const int y_step_q4, const int w, const int h) {
  int y;
  int y_q4 = y0_q4;

  src -= src_stride * (SUBPEL_TAPS / 2 - 1);
  y = h;
  do {
    const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];

    if (y_q4 & SUBPEL_MASK) {
      const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]);
      const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
      const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
      uint8x8_t s[8], d;
      int16x4_t t[8], tt;

      load_u8_8x8(src_y, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5],
                  &s[6], &s[7]);
      t[0] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[0])));
      t[1] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[1])));
      t[2] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[2])));
      t[3] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[3])));
      t[4] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[4])));
      t[5] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[5])));
      t[6] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[6])));
      t[7] = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(s[7])));

      tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], filters,
                       filter3, filter4);
      d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7);
      vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d), 0);
    } else {
      memcpy(dst, &src_y[3 * src_stride], w);
    }

    dst += dst_stride;
    y_q4 += y_step_q4;
  } while (--y);
}
Ejemplo n.º 5
0
static INLINE void scaledconvolve_horiz_w4(
    const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst,
    const ptrdiff_t dst_stride, const InterpKernel *const x_filters,
    const int x0_q4, const int x_step_q4, const int w, const int h) {
  DECLARE_ALIGNED(16, uint8_t, temp[4 * 4]);
  int x, y, z;

  src -= SUBPEL_TAPS / 2 - 1;

  y = h;
  do {
    int x_q4 = x0_q4;
    x = 0;
    do {
      // process 4 src_x steps
      for (z = 0; z < 4; ++z) {
        const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
        if (x_q4 & SUBPEL_MASK) {
          const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]);
          const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
          const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
          uint8x8_t s[8], d;
          int16x8_t ss[4];
          int16x4_t t[8], tt;

          load_u8_8x4(src_x, src_stride, &s[0], &s[1], &s[2], &s[3]);
          transpose_u8_8x4(&s[0], &s[1], &s[2], &s[3]);

          ss[0] = vreinterpretq_s16_u16(vmovl_u8(s[0]));
          ss[1] = vreinterpretq_s16_u16(vmovl_u8(s[1]));
          ss[2] = vreinterpretq_s16_u16(vmovl_u8(s[2]));
          ss[3] = vreinterpretq_s16_u16(vmovl_u8(s[3]));
          t[0] = vget_low_s16(ss[0]);
          t[1] = vget_low_s16(ss[1]);
          t[2] = vget_low_s16(ss[2]);
          t[3] = vget_low_s16(ss[3]);
          t[4] = vget_high_s16(ss[0]);
          t[5] = vget_high_s16(ss[1]);
          t[6] = vget_high_s16(ss[2]);
          t[7] = vget_high_s16(ss[3]);

          tt = convolve8_4(t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7],
                           filters, filter3, filter4);
          d = vqrshrun_n_s16(vcombine_s16(tt, tt), 7);
          vst1_lane_u32((uint32_t *)&temp[4 * z], vreinterpret_u32_u8(d), 0);
        } else {
          int i;
          for (i = 0; i < 4; ++i) {
            temp[z * 4 + i] = src_x[i * src_stride + 3];
          }
        }
        x_q4 += x_step_q4;
      }

      // transpose the 4x4 filters values back to dst
      {
        const uint8x8x4_t d4 = vld4_u8(temp);
        vst1_lane_u32((uint32_t *)&dst[x + 0 * dst_stride],
                      vreinterpret_u32_u8(d4.val[0]), 0);
        vst1_lane_u32((uint32_t *)&dst[x + 1 * dst_stride],
                      vreinterpret_u32_u8(d4.val[1]), 0);
        vst1_lane_u32((uint32_t *)&dst[x + 2 * dst_stride],
                      vreinterpret_u32_u8(d4.val[2]), 0);
        vst1_lane_u32((uint32_t *)&dst[x + 3 * dst_stride],
                      vreinterpret_u32_u8(d4.val[3]), 0);
      }
      x += 4;
    } while (x < w);

    src += src_stride * 4;
    dst += dst_stride * 4;
    y -= 4;
  } while (y > 0);
}
Ejemplo n.º 6
0
void vp8_sixtap_predict16x16_neon(
    unsigned char *src_ptr,
    int src_pixels_per_line,
    int xoffset,
    int yoffset,
    unsigned char *dst_ptr,
    int dst_pitch) {
    unsigned char *src, *src_tmp, *dst, *tmpp;
    unsigned char tmp[336];
    int i, j;
    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
    uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d18u8, d19u8;
    uint8x8_t d20u8, d21u8, d22u8, d23u8, d24u8, d25u8, d26u8, d27u8;
    uint8x8_t d28u8, d29u8, d30u8, d31u8;
    int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
    uint8x16_t q3u8, q4u8;
    uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16, q8u16, q9u16, q10u16;
    uint16x8_t q11u16, q12u16, q13u16, q15u16;
    int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16, q8s16, q9s16, q10s16;
    int16x8_t q11s16, q12s16, q13s16, q15s16;

    if (xoffset == 0) {  // secondpass_filter8x8_only
        // load second_pass filter
        dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
        d0s8 = vdup_lane_s8(dtmps8, 0);
        d1s8 = vdup_lane_s8(dtmps8, 1);
        d2s8 = vdup_lane_s8(dtmps8, 2);
        d3s8 = vdup_lane_s8(dtmps8, 3);
        d4s8 = vdup_lane_s8(dtmps8, 4);
        d5s8 = vdup_lane_s8(dtmps8, 5);
        d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
        d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
        d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
        d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
        d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
        d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));

        // load src data
        src_tmp = src_ptr - src_pixels_per_line * 2;
        for (i = 0; i < 2; i++) {
            src = src_tmp + i * 8;
            dst = dst_ptr + i * 8;
            d18u8 = vld1_u8(src);
            src += src_pixels_per_line;
            d19u8 = vld1_u8(src);
            src += src_pixels_per_line;
            d20u8 = vld1_u8(src);
            src += src_pixels_per_line;
            d21u8 = vld1_u8(src);
            src += src_pixels_per_line;
            d22u8 = vld1_u8(src);
            src += src_pixels_per_line;
            for (j = 0; j < 4; j++) {
                d23u8 = vld1_u8(src);
                src += src_pixels_per_line;
                d24u8 = vld1_u8(src);
                src += src_pixels_per_line;
                d25u8 = vld1_u8(src);
                src += src_pixels_per_line;
                d26u8 = vld1_u8(src);
                src += src_pixels_per_line;

                q3u16 = vmull_u8(d18u8, d0u8);
                q4u16 = vmull_u8(d19u8, d0u8);
                q5u16 = vmull_u8(d20u8, d0u8);
                q6u16 = vmull_u8(d21u8, d0u8);

                q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
                q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
                q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
                q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);

                q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
                q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
                q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
                q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);

                q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
                q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
                q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
                q6u16 = vmlal_u8(q6u16, d23u8, d2u8);

                q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
                q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
                q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
                q6u16 = vmlal_u8(q6u16, d26u8, d5u8);

                q7u16 = vmull_u8(d21u8, d3u8);
                q8u16 = vmull_u8(d22u8, d3u8);
                q9u16 = vmull_u8(d23u8, d3u8);
                q10u16 = vmull_u8(d24u8, d3u8);

                q3s16 = vreinterpretq_s16_u16(q3u16);
                q4s16 = vreinterpretq_s16_u16(q4u16);
                q5s16 = vreinterpretq_s16_u16(q5u16);
                q6s16 = vreinterpretq_s16_u16(q6u16);
                q7s16 = vreinterpretq_s16_u16(q7u16);
                q8s16 = vreinterpretq_s16_u16(q8u16);
                q9s16 = vreinterpretq_s16_u16(q9u16);
                q10s16 = vreinterpretq_s16_u16(q10u16);

                q7s16 = vqaddq_s16(q7s16, q3s16);
                q8s16 = vqaddq_s16(q8s16, q4s16);
                q9s16 = vqaddq_s16(q9s16, q5s16);
                q10s16 = vqaddq_s16(q10s16, q6s16);

                d6u8 = vqrshrun_n_s16(q7s16, 7);
                d7u8 = vqrshrun_n_s16(q8s16, 7);
                d8u8 = vqrshrun_n_s16(q9s16, 7);
                d9u8 = vqrshrun_n_s16(q10s16, 7);

                d18u8 = d22u8;
                d19u8 = d23u8;
                d20u8 = d24u8;
                d21u8 = d25u8;
                d22u8 = d26u8;

                vst1_u8(dst, d6u8);
                dst += dst_pitch;
                vst1_u8(dst, d7u8);
                dst += dst_pitch;
                vst1_u8(dst, d8u8);
                dst += dst_pitch;
                vst1_u8(dst, d9u8);
                dst += dst_pitch;
            }
        }
        return;
    }

    // load first_pass filter
    dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
    d0s8 = vdup_lane_s8(dtmps8, 0);
    d1s8 = vdup_lane_s8(dtmps8, 1);
    d2s8 = vdup_lane_s8(dtmps8, 2);
    d3s8 = vdup_lane_s8(dtmps8, 3);
    d4s8 = vdup_lane_s8(dtmps8, 4);
    d5s8 = vdup_lane_s8(dtmps8, 5);
    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));

    // First pass: output_height lines x output_width columns (9x4)
    if (yoffset == 0) {  // firstpass_filter4x4_only
        src = src_ptr - 2;
        dst = dst_ptr;
        for (i = 0; i < 8; i++) {
            d6u8 = vld1_u8(src);
            d7u8 = vld1_u8(src + 8);
            d8u8 = vld1_u8(src + 16);
            src += src_pixels_per_line;
            d9u8 = vld1_u8(src);
            d10u8 = vld1_u8(src + 8);
            d11u8 = vld1_u8(src + 16);
            src += src_pixels_per_line;

            __builtin_prefetch(src);
            __builtin_prefetch(src + src_pixels_per_line);

            q6u16 = vmull_u8(d6u8, d0u8);
            q7u16 = vmull_u8(d7u8, d0u8);
            q8u16 = vmull_u8(d9u8, d0u8);
            q9u16 = vmull_u8(d10u8, d0u8);

            d20u8 = vext_u8(d6u8, d7u8, 1);
            d21u8 = vext_u8(d9u8, d10u8, 1);
            d22u8 = vext_u8(d7u8, d8u8, 1);
            d23u8 = vext_u8(d10u8, d11u8, 1);
            d24u8 = vext_u8(d6u8, d7u8, 4);
            d25u8 = vext_u8(d9u8, d10u8, 4);
            d26u8 = vext_u8(d7u8, d8u8, 4);
            d27u8 = vext_u8(d10u8, d11u8, 4);
            d28u8 = vext_u8(d6u8, d7u8, 5);
            d29u8 = vext_u8(d9u8, d10u8, 5);

            q6u16 = vmlsl_u8(q6u16, d20u8, d1u8);
            q8u16 = vmlsl_u8(q8u16, d21u8, d1u8);
            q7u16 = vmlsl_u8(q7u16, d22u8, d1u8);
            q9u16 = vmlsl_u8(q9u16, d23u8, d1u8);
            q6u16 = vmlsl_u8(q6u16, d24u8, d4u8);
            q8u16 = vmlsl_u8(q8u16, d25u8, d4u8);
            q7u16 = vmlsl_u8(q7u16, d26u8, d4u8);
            q9u16 = vmlsl_u8(q9u16, d27u8, d4u8);
            q6u16 = vmlal_u8(q6u16, d28u8, d5u8);
            q8u16 = vmlal_u8(q8u16, d29u8, d5u8);

            d20u8 = vext_u8(d7u8, d8u8, 5);
            d21u8 = vext_u8(d10u8, d11u8, 5);
            d22u8 = vext_u8(d6u8, d7u8, 2);
            d23u8 = vext_u8(d9u8, d10u8, 2);
            d24u8 = vext_u8(d7u8, d8u8, 2);
            d25u8 = vext_u8(d10u8, d11u8, 2);
            d26u8 = vext_u8(d6u8, d7u8, 3);
            d27u8 = vext_u8(d9u8, d10u8, 3);
            d28u8 = vext_u8(d7u8, d8u8, 3);
            d29u8 = vext_u8(d10u8, d11u8, 3);

            q7u16 = vmlal_u8(q7u16, d20u8, d5u8);
            q9u16 = vmlal_u8(q9u16, d21u8, d5u8);
            q6u16 = vmlal_u8(q6u16, d22u8, d2u8);
            q8u16 = vmlal_u8(q8u16, d23u8, d2u8);
            q7u16 = vmlal_u8(q7u16, d24u8, d2u8);
            q9u16 = vmlal_u8(q9u16, d25u8, d2u8);

            q10u16 = vmull_u8(d26u8, d3u8);
            q11u16 = vmull_u8(d27u8, d3u8);
            q12u16 = vmull_u8(d28u8, d3u8);
            q15u16 = vmull_u8(d29u8, d3u8);

            q6s16 = vreinterpretq_s16_u16(q6u16);
            q7s16 = vreinterpretq_s16_u16(q7u16);
            q8s16 = vreinterpretq_s16_u16(q8u16);
            q9s16 = vreinterpretq_s16_u16(q9u16);
            q10s16 = vreinterpretq_s16_u16(q10u16);
            q11s16 = vreinterpretq_s16_u16(q11u16);
            q12s16 = vreinterpretq_s16_u16(q12u16);
            q15s16 = vreinterpretq_s16_u16(q15u16);

            q6s16 = vqaddq_s16(q6s16, q10s16);
            q8s16 = vqaddq_s16(q8s16, q11s16);
            q7s16 = vqaddq_s16(q7s16, q12s16);
            q9s16 = vqaddq_s16(q9s16, q15s16);

            d6u8 = vqrshrun_n_s16(q6s16, 7);
            d7u8 = vqrshrun_n_s16(q7s16, 7);
            d8u8 = vqrshrun_n_s16(q8s16, 7);
            d9u8 = vqrshrun_n_s16(q9s16, 7);

            q3u8 = vcombine_u8(d6u8, d7u8);
            q4u8 = vcombine_u8(d8u8, d9u8);
            vst1q_u8(dst, q3u8);
            dst += dst_pitch;
            vst1q_u8(dst, q4u8);
            dst += dst_pitch;
        }
        return;
    }

    src = src_ptr - 2 - src_pixels_per_line * 2;
    tmpp = tmp;
    for (i = 0; i < 7; i++) {
        d6u8 = vld1_u8(src);
        d7u8 = vld1_u8(src + 8);
        d8u8 = vld1_u8(src + 16);
        src += src_pixels_per_line;
        d9u8 = vld1_u8(src);
        d10u8 = vld1_u8(src + 8);
        d11u8 = vld1_u8(src + 16);
        src += src_pixels_per_line;
        d12u8 = vld1_u8(src);
        d13u8 = vld1_u8(src + 8);
        d14u8 = vld1_u8(src + 16);
        src += src_pixels_per_line;

        __builtin_prefetch(src);
        __builtin_prefetch(src + src_pixels_per_line);
        __builtin_prefetch(src + src_pixels_per_line * 2);

        q8u16 = vmull_u8(d6u8, d0u8);
        q9u16 = vmull_u8(d7u8, d0u8);
        q10u16 = vmull_u8(d9u8, d0u8);
        q11u16 = vmull_u8(d10u8, d0u8);
        q12u16 = vmull_u8(d12u8, d0u8);
        q13u16 = vmull_u8(d13u8, d0u8);

        d28u8 = vext_u8(d6u8, d7u8, 1);
        d29u8 = vext_u8(d9u8, d10u8, 1);
        d30u8 = vext_u8(d12u8, d13u8, 1);
        q8u16 = vmlsl_u8(q8u16, d28u8, d1u8);
        q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
        q12u16 = vmlsl_u8(q12u16, d30u8, d1u8);
        d28u8 = vext_u8(d7u8, d8u8, 1);
        d29u8 = vext_u8(d10u8, d11u8, 1);
        d30u8 = vext_u8(d13u8, d14u8, 1);
        q9u16  = vmlsl_u8(q9u16, d28u8, d1u8);
        q11u16 = vmlsl_u8(q11u16, d29u8, d1u8);
        q13u16 = vmlsl_u8(q13u16, d30u8, d1u8);

        d28u8 = vext_u8(d6u8, d7u8, 4);
        d29u8 = vext_u8(d9u8, d10u8, 4);
        d30u8 = vext_u8(d12u8, d13u8, 4);
        q8u16 = vmlsl_u8(q8u16, d28u8, d4u8);
        q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
        q12u16 = vmlsl_u8(q12u16, d30u8, d4u8);
        d28u8 = vext_u8(d7u8, d8u8, 4);
        d29u8 = vext_u8(d10u8, d11u8, 4);
        d30u8 = vext_u8(d13u8, d14u8, 4);
        q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
        q11u16 = vmlsl_u8(q11u16, d29u8, d4u8);
        q13u16 = vmlsl_u8(q13u16, d30u8, d4u8);

        d28u8 = vext_u8(d6u8, d7u8, 5);
        d29u8 = vext_u8(d9u8, d10u8, 5);
        d30u8 = vext_u8(d12u8, d13u8, 5);
        q8u16 = vmlal_u8(q8u16, d28u8, d5u8);
        q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
        q12u16 = vmlal_u8(q12u16, d30u8, d5u8);
        d28u8 = vext_u8(d7u8, d8u8, 5);
        d29u8 = vext_u8(d10u8, d11u8, 5);
        d30u8 = vext_u8(d13u8, d14u8, 5);
        q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
        q11u16 = vmlal_u8(q11u16, d29u8, d5u8);
        q13u16 = vmlal_u8(q13u16, d30u8, d5u8);

        d28u8 = vext_u8(d6u8, d7u8, 2);
        d29u8 = vext_u8(d9u8, d10u8, 2);
        d30u8 = vext_u8(d12u8, d13u8, 2);
        q8u16 = vmlal_u8(q8u16, d28u8, d2u8);
        q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
        q12u16 = vmlal_u8(q12u16, d30u8, d2u8);
        d28u8 = vext_u8(d7u8, d8u8, 2);
        d29u8 = vext_u8(d10u8, d11u8, 2);
        d30u8 = vext_u8(d13u8, d14u8, 2);
        q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
        q11u16 = vmlal_u8(q11u16, d29u8, d2u8);
        q13u16 = vmlal_u8(q13u16, d30u8, d2u8);

        d28u8 = vext_u8(d6u8, d7u8, 3);
        d29u8 = vext_u8(d9u8, d10u8, 3);
        d30u8 = vext_u8(d12u8, d13u8, 3);
        d15u8 = vext_u8(d7u8, d8u8, 3);
        d31u8 = vext_u8(d10u8, d11u8, 3);
        d6u8  = vext_u8(d13u8, d14u8, 3);
        q4u16 = vmull_u8(d28u8, d3u8);
        q5u16 = vmull_u8(d29u8, d3u8);
        q6u16 = vmull_u8(d30u8, d3u8);
        q4s16 = vreinterpretq_s16_u16(q4u16);
        q5s16 = vreinterpretq_s16_u16(q5u16);
        q6s16 = vreinterpretq_s16_u16(q6u16);
        q8s16 = vreinterpretq_s16_u16(q8u16);
        q10s16 = vreinterpretq_s16_u16(q10u16);
        q12s16 = vreinterpretq_s16_u16(q12u16);
        q8s16 = vqaddq_s16(q8s16, q4s16);
        q10s16 = vqaddq_s16(q10s16, q5s16);
        q12s16 = vqaddq_s16(q12s16, q6s16);

        q6u16 = vmull_u8(d15u8, d3u8);
        q7u16 = vmull_u8(d31u8, d3u8);
        q3u16 = vmull_u8(d6u8, d3u8);
        q3s16 = vreinterpretq_s16_u16(q3u16);
        q6s16 = vreinterpretq_s16_u16(q6u16);
        q7s16 = vreinterpretq_s16_u16(q7u16);
        q9s16 = vreinterpretq_s16_u16(q9u16);
        q11s16 = vreinterpretq_s16_u16(q11u16);
        q13s16 = vreinterpretq_s16_u16(q13u16);
        q9s16 = vqaddq_s16(q9s16, q6s16);
        q11s16 = vqaddq_s16(q11s16, q7s16);
        q13s16 = vqaddq_s16(q13s16, q3s16);

        d6u8 = vqrshrun_n_s16(q8s16, 7);
        d7u8 = vqrshrun_n_s16(q9s16, 7);
        d8u8 = vqrshrun_n_s16(q10s16, 7);
        d9u8 = vqrshrun_n_s16(q11s16, 7);
        d10u8 = vqrshrun_n_s16(q12s16, 7);
        d11u8 = vqrshrun_n_s16(q13s16, 7);

        vst1_u8(tmpp, d6u8);
        tmpp += 8;
        vst1_u8(tmpp, d7u8);
        tmpp += 8;
        vst1_u8(tmpp, d8u8);
        tmpp += 8;
        vst1_u8(tmpp, d9u8);
        tmpp += 8;
        vst1_u8(tmpp, d10u8);
        tmpp += 8;
        vst1_u8(tmpp, d11u8);
        tmpp += 8;
    }

    // Second pass: 16x16
    dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
    d0s8 = vdup_lane_s8(dtmps8, 0);
    d1s8 = vdup_lane_s8(dtmps8, 1);
    d2s8 = vdup_lane_s8(dtmps8, 2);
    d3s8 = vdup_lane_s8(dtmps8, 3);
    d4s8 = vdup_lane_s8(dtmps8, 4);
    d5s8 = vdup_lane_s8(dtmps8, 5);
    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));

    for (i = 0; i < 2; i++) {
        dst = dst_ptr + 8 * i;
        tmpp = tmp + 8 * i;
        d18u8 = vld1_u8(tmpp);
        tmpp += 16;
        d19u8 = vld1_u8(tmpp);
        tmpp += 16;
        d20u8 = vld1_u8(tmpp);
        tmpp += 16;
        d21u8 = vld1_u8(tmpp);
        tmpp += 16;
        d22u8 = vld1_u8(tmpp);
        tmpp += 16;
        for (j = 0; j < 4; j++) {
            d23u8 = vld1_u8(tmpp);
            tmpp += 16;
            d24u8 = vld1_u8(tmpp);
            tmpp += 16;
            d25u8 = vld1_u8(tmpp);
            tmpp += 16;
            d26u8 = vld1_u8(tmpp);
            tmpp += 16;

            q3u16 = vmull_u8(d18u8, d0u8);
            q4u16 = vmull_u8(d19u8, d0u8);
            q5u16 = vmull_u8(d20u8, d0u8);
            q6u16 = vmull_u8(d21u8, d0u8);

            q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
            q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
            q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
            q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);

            q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
            q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
            q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
            q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);

            q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
            q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
            q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
            q6u16 = vmlal_u8(q6u16, d23u8, d2u8);

            q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
            q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
            q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
            q6u16 = vmlal_u8(q6u16, d26u8, d5u8);

            q7u16 = vmull_u8(d21u8, d3u8);
            q8u16 = vmull_u8(d22u8, d3u8);
            q9u16 = vmull_u8(d23u8, d3u8);
            q10u16 = vmull_u8(d24u8, d3u8);

            q3s16 = vreinterpretq_s16_u16(q3u16);
            q4s16 = vreinterpretq_s16_u16(q4u16);
            q5s16 = vreinterpretq_s16_u16(q5u16);
            q6s16 = vreinterpretq_s16_u16(q6u16);
            q7s16 = vreinterpretq_s16_u16(q7u16);
            q8s16 = vreinterpretq_s16_u16(q8u16);
            q9s16 = vreinterpretq_s16_u16(q9u16);
            q10s16 = vreinterpretq_s16_u16(q10u16);

            q7s16 = vqaddq_s16(q7s16, q3s16);
            q8s16 = vqaddq_s16(q8s16, q4s16);
            q9s16 = vqaddq_s16(q9s16, q5s16);
            q10s16 = vqaddq_s16(q10s16, q6s16);

            d6u8 = vqrshrun_n_s16(q7s16, 7);
            d7u8 = vqrshrun_n_s16(q8s16, 7);
            d8u8 = vqrshrun_n_s16(q9s16, 7);
            d9u8 = vqrshrun_n_s16(q10s16, 7);

            d18u8 = d22u8;
            d19u8 = d23u8;
            d20u8 = d24u8;
            d21u8 = d25u8;
            d22u8 = d26u8;

            vst1_u8(dst, d6u8);
            dst += dst_pitch;
            vst1_u8(dst, d7u8);
            dst += dst_pitch;
            vst1_u8(dst, d8u8);
            dst += dst_pitch;
            vst1_u8(dst, d9u8);
            dst += dst_pitch;
        }
    }
    return;
}
Ejemplo n.º 7
0
void vp8_sixtap_predict8x8_neon(
    unsigned char *src_ptr,
    int src_pixels_per_line,
    int xoffset,
    int yoffset,
    unsigned char *dst_ptr,
    int dst_pitch) {
    unsigned char *src, *tmpp;
    unsigned char tmp[64];
    int i;
    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
    uint8x8_t d18u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8, d25u8;
    uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
    int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
    uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
    uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
    int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
    uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q9u8, q10u8, q11u8, q12u8;

    if (xoffset == 0) {  // secondpass_filter8x8_only
        // load second_pass filter
        dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
        d0s8 = vdup_lane_s8(dtmps8, 0);
        d1s8 = vdup_lane_s8(dtmps8, 1);
        d2s8 = vdup_lane_s8(dtmps8, 2);
        d3s8 = vdup_lane_s8(dtmps8, 3);
        d4s8 = vdup_lane_s8(dtmps8, 4);
        d5s8 = vdup_lane_s8(dtmps8, 5);
        d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
        d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
        d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
        d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
        d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
        d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));

        // load src data
        src = src_ptr - src_pixels_per_line * 2;
        d18u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d19u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d20u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d21u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d22u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d23u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d24u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d25u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d26u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d27u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d28u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d29u8 = vld1_u8(src);
        src += src_pixels_per_line;
        d30u8 = vld1_u8(src);

        for (i = 2; i > 0; i--) {
            q3u16 = vmull_u8(d18u8, d0u8);
            q4u16 = vmull_u8(d19u8, d0u8);
            q5u16 = vmull_u8(d20u8, d0u8);
            q6u16 = vmull_u8(d21u8, d0u8);

            q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
            q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
            q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
            q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);

            q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
            q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
            q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
            q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);

            q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
            q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
            q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
            q6u16 = vmlal_u8(q6u16, d23u8, d2u8);

            q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
            q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
            q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
            q6u16 = vmlal_u8(q6u16, d26u8, d5u8);

            q7u16 = vmull_u8(d21u8, d3u8);
            q8u16 = vmull_u8(d22u8, d3u8);
            q9u16 = vmull_u8(d23u8, d3u8);
            q10u16 = vmull_u8(d24u8, d3u8);

            q3s16 = vreinterpretq_s16_u16(q3u16);
            q4s16 = vreinterpretq_s16_u16(q4u16);
            q5s16 = vreinterpretq_s16_u16(q5u16);
            q6s16 = vreinterpretq_s16_u16(q6u16);
            q7s16 = vreinterpretq_s16_u16(q7u16);
            q8s16 = vreinterpretq_s16_u16(q8u16);
            q9s16 = vreinterpretq_s16_u16(q9u16);
            q10s16 = vreinterpretq_s16_u16(q10u16);

            q7s16 = vqaddq_s16(q7s16, q3s16);
            q8s16 = vqaddq_s16(q8s16, q4s16);
            q9s16 = vqaddq_s16(q9s16, q5s16);
            q10s16 = vqaddq_s16(q10s16, q6s16);

            d6u8 = vqrshrun_n_s16(q7s16, 7);
            d7u8 = vqrshrun_n_s16(q8s16, 7);
            d8u8 = vqrshrun_n_s16(q9s16, 7);
            d9u8 = vqrshrun_n_s16(q10s16, 7);

            d18u8 = d22u8;
            d19u8 = d23u8;
            d20u8 = d24u8;
            d21u8 = d25u8;
            d22u8 = d26u8;
            d23u8 = d27u8;
            d24u8 = d28u8;
            d25u8 = d29u8;
            d26u8 = d30u8;

            vst1_u8(dst_ptr, d6u8);
            dst_ptr += dst_pitch;
            vst1_u8(dst_ptr, d7u8);
            dst_ptr += dst_pitch;
            vst1_u8(dst_ptr, d8u8);
            dst_ptr += dst_pitch;
            vst1_u8(dst_ptr, d9u8);
            dst_ptr += dst_pitch;
        }
        return;
    }

    // load first_pass filter
    dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
    d0s8 = vdup_lane_s8(dtmps8, 0);
    d1s8 = vdup_lane_s8(dtmps8, 1);
    d2s8 = vdup_lane_s8(dtmps8, 2);
    d3s8 = vdup_lane_s8(dtmps8, 3);
    d4s8 = vdup_lane_s8(dtmps8, 4);
    d5s8 = vdup_lane_s8(dtmps8, 5);
    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));

    // First pass: output_height lines x output_width columns (9x4)
    if (yoffset == 0)  // firstpass_filter4x4_only
        src = src_ptr - 2;
    else
        src = src_ptr - 2 - (src_pixels_per_line * 2);

    tmpp = tmp;
    for (i = 2; i > 0; i--) {
        q3u8 = vld1q_u8(src);
        src += src_pixels_per_line;
        q4u8 = vld1q_u8(src);
        src += src_pixels_per_line;
        q5u8 = vld1q_u8(src);
        src += src_pixels_per_line;
        q6u8 = vld1q_u8(src);
        src += src_pixels_per_line;

        __builtin_prefetch(src);
        __builtin_prefetch(src + src_pixels_per_line);
        __builtin_prefetch(src + src_pixels_per_line * 2);

        q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
        q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
        q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
        q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8);

        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);

        q7u16 = vmlsl_u8(q7u16, d28u8, d1u8);
        q8u16 = vmlsl_u8(q8u16, d29u8, d1u8);
        q9u16 = vmlsl_u8(q9u16, d30u8, d1u8);
        q10u16 = vmlsl_u8(q10u16, d31u8, d1u8);

        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);

        q7u16 = vmlsl_u8(q7u16, d28u8, d4u8);
        q8u16 = vmlsl_u8(q8u16, d29u8, d4u8);
        q9u16 = vmlsl_u8(q9u16, d30u8, d4u8);
        q10u16 = vmlsl_u8(q10u16, d31u8, d4u8);

        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);

        q7u16 = vmlal_u8(q7u16, d28u8, d2u8);
        q8u16 = vmlal_u8(q8u16, d29u8, d2u8);
        q9u16 = vmlal_u8(q9u16, d30u8, d2u8);
        q10u16 = vmlal_u8(q10u16, d31u8, d2u8);

        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);

        q7u16 = vmlal_u8(q7u16, d28u8, d5u8);
        q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
        q9u16 = vmlal_u8(q9u16, d30u8, d5u8);
        q10u16 = vmlal_u8(q10u16, d31u8, d5u8);

        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);

        q3u16 = vmull_u8(d28u8, d3u8);
        q4u16 = vmull_u8(d29u8, d3u8);
        q5u16 = vmull_u8(d30u8, d3u8);
        q6u16 = vmull_u8(d31u8, d3u8);

        q3s16 = vreinterpretq_s16_u16(q3u16);
        q4s16 = vreinterpretq_s16_u16(q4u16);
        q5s16 = vreinterpretq_s16_u16(q5u16);
        q6s16 = vreinterpretq_s16_u16(q6u16);
        q7s16 = vreinterpretq_s16_u16(q7u16);
        q8s16 = vreinterpretq_s16_u16(q8u16);
        q9s16 = vreinterpretq_s16_u16(q9u16);
        q10s16 = vreinterpretq_s16_u16(q10u16);

        q7s16 = vqaddq_s16(q7s16, q3s16);
        q8s16 = vqaddq_s16(q8s16, q4s16);
        q9s16 = vqaddq_s16(q9s16, q5s16);
        q10s16 = vqaddq_s16(q10s16, q6s16);

        d22u8 = vqrshrun_n_s16(q7s16, 7);
        d23u8 = vqrshrun_n_s16(q8s16, 7);
        d24u8 = vqrshrun_n_s16(q9s16, 7);
        d25u8 = vqrshrun_n_s16(q10s16, 7);

        if (yoffset == 0) {  // firstpass_filter8x4_only
            vst1_u8(dst_ptr, d22u8);
            dst_ptr += dst_pitch;
            vst1_u8(dst_ptr, d23u8);
            dst_ptr += dst_pitch;
            vst1_u8(dst_ptr, d24u8);
            dst_ptr += dst_pitch;
            vst1_u8(dst_ptr, d25u8);
            dst_ptr += dst_pitch;
        } else {
            vst1_u8(tmpp, d22u8);
            tmpp += 8;
            vst1_u8(tmpp, d23u8);
            tmpp += 8;
            vst1_u8(tmpp, d24u8);
            tmpp += 8;
            vst1_u8(tmpp, d25u8);
            tmpp += 8;
        }
    }
    if (yoffset == 0)
        return;

    // First Pass on rest 5-line data
    q3u8 = vld1q_u8(src);
    src += src_pixels_per_line;
    q4u8 = vld1q_u8(src);
    src += src_pixels_per_line;
    q5u8 = vld1q_u8(src);
    src += src_pixels_per_line;
    q6u8 = vld1q_u8(src);
    src += src_pixels_per_line;
    q7u8 = vld1q_u8(src);

    q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
    q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
    q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
    q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
    q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8);

    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1);

    q8u16 = vmlsl_u8(q8u16, d27u8, d1u8);
    q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
    q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
    q11u16 = vmlsl_u8(q11u16, d30u8, d1u8);
    q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);

    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4);

    q8u16 = vmlsl_u8(q8u16, d27u8, d4u8);
    q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
    q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
    q11u16 = vmlsl_u8(q11u16, d30u8, d4u8);
    q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);

    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2);

    q8u16 = vmlal_u8(q8u16, d27u8, d2u8);
    q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
    q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
    q11u16 = vmlal_u8(q11u16, d30u8, d2u8);
    q12u16 = vmlal_u8(q12u16, d31u8, d2u8);

    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5);

    q8u16 = vmlal_u8(q8u16, d27u8, d5u8);
    q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
    q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
    q11u16 = vmlal_u8(q11u16, d30u8, d5u8);
    q12u16 = vmlal_u8(q12u16, d31u8, d5u8);

    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3);

    q3u16 = vmull_u8(d27u8, d3u8);
    q4u16 = vmull_u8(d28u8, d3u8);
    q5u16 = vmull_u8(d29u8, d3u8);
    q6u16 = vmull_u8(d30u8, d3u8);
    q7u16 = vmull_u8(d31u8, d3u8);

    q3s16 = vreinterpretq_s16_u16(q3u16);
    q4s16 = vreinterpretq_s16_u16(q4u16);
    q5s16 = vreinterpretq_s16_u16(q5u16);
    q6s16 = vreinterpretq_s16_u16(q6u16);
    q7s16 = vreinterpretq_s16_u16(q7u16);
    q8s16 = vreinterpretq_s16_u16(q8u16);
    q9s16 = vreinterpretq_s16_u16(q9u16);
    q10s16 = vreinterpretq_s16_u16(q10u16);
    q11s16 = vreinterpretq_s16_u16(q11u16);
    q12s16 = vreinterpretq_s16_u16(q12u16);

    q8s16 = vqaddq_s16(q8s16, q3s16);
    q9s16 = vqaddq_s16(q9s16, q4s16);
    q10s16 = vqaddq_s16(q10s16, q5s16);
    q11s16 = vqaddq_s16(q11s16, q6s16);
    q12s16 = vqaddq_s16(q12s16, q7s16);

    d26u8 = vqrshrun_n_s16(q8s16, 7);
    d27u8 = vqrshrun_n_s16(q9s16, 7);
    d28u8 = vqrshrun_n_s16(q10s16, 7);
    d29u8 = vqrshrun_n_s16(q11s16, 7);
    d30u8 = vqrshrun_n_s16(q12s16, 7);

    // Second pass: 8x8
    dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
    d0s8 = vdup_lane_s8(dtmps8, 0);
    d1s8 = vdup_lane_s8(dtmps8, 1);
    d2s8 = vdup_lane_s8(dtmps8, 2);
    d3s8 = vdup_lane_s8(dtmps8, 3);
    d4s8 = vdup_lane_s8(dtmps8, 4);
    d5s8 = vdup_lane_s8(dtmps8, 5);
    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));

    tmpp = tmp;
    q9u8 = vld1q_u8(tmpp);
    tmpp += 16;
    q10u8 = vld1q_u8(tmpp);
    tmpp += 16;
    q11u8 = vld1q_u8(tmpp);
    tmpp += 16;
    q12u8 = vld1q_u8(tmpp);

    d18u8 = vget_low_u8(q9u8);
    d19u8 = vget_high_u8(q9u8);
    d20u8 = vget_low_u8(q10u8);
    d21u8 = vget_high_u8(q10u8);
    d22u8 = vget_low_u8(q11u8);
    d23u8 = vget_high_u8(q11u8);
    d24u8 = vget_low_u8(q12u8);
    d25u8 = vget_high_u8(q12u8);

    for (i = 2; i > 0; i--) {
        q3u16 = vmull_u8(d18u8, d0u8);
        q4u16 = vmull_u8(d19u8, d0u8);
        q5u16 = vmull_u8(d20u8, d0u8);
        q6u16 = vmull_u8(d21u8, d0u8);

        q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
        q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
        q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
        q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);

        q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
        q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
        q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
        q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);

        q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
        q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
        q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
        q6u16 = vmlal_u8(q6u16, d23u8, d2u8);

        q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
        q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
        q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
        q6u16 = vmlal_u8(q6u16, d26u8, d5u8);

        q7u16 = vmull_u8(d21u8, d3u8);
        q8u16 = vmull_u8(d22u8, d3u8);
        q9u16 = vmull_u8(d23u8, d3u8);
        q10u16 = vmull_u8(d24u8, d3u8);

        q3s16 = vreinterpretq_s16_u16(q3u16);
        q4s16 = vreinterpretq_s16_u16(q4u16);
        q5s16 = vreinterpretq_s16_u16(q5u16);
        q6s16 = vreinterpretq_s16_u16(q6u16);
        q7s16 = vreinterpretq_s16_u16(q7u16);
        q8s16 = vreinterpretq_s16_u16(q8u16);
        q9s16 = vreinterpretq_s16_u16(q9u16);
        q10s16 = vreinterpretq_s16_u16(q10u16);

        q7s16 = vqaddq_s16(q7s16, q3s16);
        q8s16 = vqaddq_s16(q8s16, q4s16);
        q9s16 = vqaddq_s16(q9s16, q5s16);
        q10s16 = vqaddq_s16(q10s16, q6s16);

        d6u8 = vqrshrun_n_s16(q7s16, 7);
        d7u8 = vqrshrun_n_s16(q8s16, 7);
        d8u8 = vqrshrun_n_s16(q9s16, 7);
        d9u8 = vqrshrun_n_s16(q10s16, 7);

        d18u8 = d22u8;
        d19u8 = d23u8;
        d20u8 = d24u8;
        d21u8 = d25u8;
        d22u8 = d26u8;
        d23u8 = d27u8;
        d24u8 = d28u8;
        d25u8 = d29u8;
        d26u8 = d30u8;

        vst1_u8(dst_ptr, d6u8);
        dst_ptr += dst_pitch;
        vst1_u8(dst_ptr, d7u8);
        dst_ptr += dst_pitch;
        vst1_u8(dst_ptr, d8u8);
        dst_ptr += dst_pitch;
        vst1_u8(dst_ptr, d9u8);
        dst_ptr += dst_pitch;
    }
    return;
}