template<bool align, bool compensation> void ReduceGray3x3( const uint8_t* src, size_t srcWidth, size_t srcHeight, size_t srcStride, uint8_t* dst, size_t dstWidth, size_t dstHeight, size_t dstStride) { assert(srcWidth >= A && (srcWidth + 1)/2 == dstWidth && (srcHeight + 1)/2 == dstHeight); if(align) assert(Aligned(src) && Aligned(srcStride)); size_t lastOddCol = srcWidth - AlignLo(srcWidth, 2); size_t bodyWidth = AlignLo(srcWidth, A); for(size_t row = 0; row < srcHeight; row += 2, dst += dstStride, src += 2*srcStride) { const uint8_t * s1 = src; const uint8_t * s0 = s1 - (row ? srcStride : 0); const uint8_t * s2 = s1 + (row != srcHeight - 1 ? srcStride : 0); vst1_u8(dst, ReduceRow<compensation>(ReduceColNose<align>(s0), ReduceColNose<align>(s1), ReduceColNose<align>(s2))); for(size_t srcCol = A, dstCol = HA; srcCol < bodyWidth; srcCol += A, dstCol += HA) vst1_u8(dst + dstCol, ReduceRow<compensation>(ReduceColBody<align>(s0 + srcCol), ReduceColBody<align>(s1 + srcCol), ReduceColBody<align>(s2 + srcCol))); if(bodyWidth != srcWidth) { size_t srcCol = srcWidth - A - lastOddCol; size_t dstCol = dstWidth - HA - lastOddCol; vst1_u8(dst + dstCol, ReduceRow<compensation>(ReduceColBody<false>(s0 + srcCol), ReduceColBody<false>(s1 + srcCol), ReduceColBody<false>(s2 + srcCol))); if(lastOddCol) dst[dstWidth - 1] = Base::GaussianBlur3x3<compensation>(s0 + srcWidth, s1 + srcWidth, s2 + srcWidth, -2, -1, -1); } } }
int main(void) { uint8_t v1_init[8] = {1, 1, 1, 1, 1, 1, 1, 1}; uint8_t v2_init[8] = {2, 2, 2, 2, 2, 2, 2, 2}; uint8x8_t v1 = vld1_u8 (v1_init); uint8x8_t v2 = vld1_u8 (v2_init); uint8x8x2_t vd1, vd2; union {uint8x8_t v; uint8_t buf[8];} d1, d2, d3, d4; int i; uint8_t odd, even; vd1 = vzip_u8(v1, vdup_n_u8(0)); vd2 = vzip_u8(v2, vdup_n_u8(0)); vst1_u8(d1.buf, vd1.val[0]); vst1_u8(d2.buf, vd1.val[1]); vst1_u8(d3.buf, vd2.val[0]); vst1_u8(d4.buf, vd2.val[1]); #ifdef __ARMEL__ odd = 1; even = 0; #else odd = 0; even = 1; #endif for (i = 0; i < 8; i++) if ((i % 2 == even && d4.buf[i] != 2) || (i % 2 == odd && d4.buf[i] != 0)) abort (); return 0; }
static void ConvertARGBToUV_NEON(const uint32_t* argb, uint8_t* u, uint8_t* v, int src_width, int do_store) { int i; for (i = 0; i + 16 <= src_width; i += 16, u += 8, v += 8) { const uint8x16x4_t RGB = vld4q_u8((const uint8_t*)&argb[i]); const uint16x8_t R = vpaddlq_u8(RGB.val[2]); // pair-wise adds const uint16x8_t G = vpaddlq_u8(RGB.val[1]); const uint16x8_t B = vpaddlq_u8(RGB.val[0]); int16x8_t U_tmp, V_tmp; CONVERT_RGB_TO_UV(R, G, B, 1, U_tmp, V_tmp); { const uint8x8_t U = vqrshrun_n_s16(U_tmp, 1); const uint8x8_t V = vqrshrun_n_s16(V_tmp, 1); if (do_store) { vst1_u8(u, U); vst1_u8(v, V); } else { const uint8x8_t prev_u = vld1_u8(u); const uint8x8_t prev_v = vld1_u8(v); vst1_u8(u, vrhadd_u8(U, prev_u)); vst1_u8(v, vrhadd_u8(V, prev_v)); } } } if (i < src_width) { // left-over WebPConvertARGBToUV_C(argb + i, u, v, src_width - i, do_store); } }
template <bool even> void ReduceGray4x4(const uint8_t *src, size_t srcWidth, size_t srcHeight, size_t srcStride, uint8_t *dst, size_t dstWidth, size_t dstHeight, size_t dstStride) { assert((srcWidth + 1) / 2 == dstWidth && (srcHeight + 1) / 2 == dstHeight && srcWidth > A); size_t alignedDstWidth = Simd::AlignLo(dstWidth, HA); size_t srcTail = Simd::AlignHi(srcWidth - A, 2); Buffer buffer(Simd::AlignHi(dstWidth, A)); uint16x8_t tmp = ReduceColNose(src); Store<true>(buffer.src0, tmp); Store<true>(buffer.src1, tmp); size_t srcCol = A, dstCol = HA; for (; srcCol < srcWidth - A; srcCol += A, dstCol += HA) { tmp = ReduceColBody(src + srcCol); Store<true>(buffer.src0 + dstCol, tmp); Store<true>(buffer.src1 + dstCol, tmp); } tmp = ReduceColTail<even>(src + srcTail); Store<false>(buffer.src0 + dstWidth - HA, tmp); Store<false>(buffer.src1 + dstWidth - HA, tmp); for (size_t row = 0; row < srcHeight; row += 2, dst += dstStride) { const uint8_t *src2 = src + srcStride*(row + 1); const uint8_t *src3 = src2 + srcStride; if (row >= srcHeight - 2) { src2 = src + srcStride*(srcHeight - 1); src3 = src2; } Store<true>(buffer.src2, ReduceColNose(src2)); Store<true>(buffer.src3, ReduceColNose(src3)); size_t srcCol = A, dstCol = HA; for (; srcCol < srcWidth - A; srcCol += A, dstCol += HA) { Store<true>(buffer.src2 + dstCol, ReduceColBody(src2 + srcCol)); Store<true>(buffer.src3 + dstCol, ReduceColBody(src3 + srcCol)); } Store<false>(buffer.src2 + dstWidth - HA, ReduceColTail<even>(src2 + srcTail)); Store<false>(buffer.src3 + dstWidth - HA, ReduceColTail<even>(src3 + srcTail)); for (size_t col = 0; col < alignedDstWidth; col += HA) vst1_u8(dst + col, ReduceRow<true>(buffer, col)); if (alignedDstWidth != dstWidth) vst1_u8(dst + dstWidth - HA, ReduceRow<false>(buffer, dstWidth - HA)); Swap(buffer.src0, buffer.src2); Swap(buffer.src1, buffer.src3); } }
void vp9_lpf_horizontal_4_neon( unsigned char *src, int pitch, unsigned char *blimit, unsigned char *limit, unsigned char *thresh, int count) { int i; uint8_t *s, *psrc; uint8x8_t dblimit, dlimit, dthresh; uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8; if (count == 0) // end_vp9_lf_h_edge return; dblimit = vld1_u8(blimit); dlimit = vld1_u8(limit); dthresh = vld1_u8(thresh); psrc = src - (pitch << 2); for (i = 0; i < count; i++) { s = psrc + i * 8; d3u8 = vld1_u8(s); s += pitch; d4u8 = vld1_u8(s); s += pitch; d5u8 = vld1_u8(s); s += pitch; d6u8 = vld1_u8(s); s += pitch; d7u8 = vld1_u8(s); s += pitch; d16u8 = vld1_u8(s); s += pitch; d17u8 = vld1_u8(s); s += pitch; d18u8 = vld1_u8(s); vp9_loop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, &d4u8, &d5u8, &d6u8, &d7u8); s -= (pitch * 5); vst1_u8(s, d4u8); s += pitch; vst1_u8(s, d5u8); s += pitch; vst1_u8(s, d6u8); s += pitch; vst1_u8(s, d7u8); } return; }
static INLINE void scaledconvolve_horiz_w8( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const x_filters, const int x0_q4, const int x_step_q4, const int w, const int h) { DECLARE_ALIGNED(16, uint8_t, temp[8 * 8]); int x, y, z; src -= SUBPEL_TAPS / 2 - 1; // This function processes 8x8 areas. The intermediate height is not always // a multiple of 8, so force it to be a multiple of 8 here. y = (h + 7) & ~7; do { int x_q4 = x0_q4; x = 0; do { uint8x8_t d[8]; // process 8 src_x steps for (z = 0; z < 8; ++z) { const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; if (x_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(x_filters[x_q4 & SUBPEL_MASK]); uint8x8_t s[8]; load_u8_8x8(src_x, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); transpose_u8_8x8(&s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); d[0] = scale_filter_8(s, filters); vst1_u8(&temp[8 * z], d[0]); } else { int i; for (i = 0; i < 8; ++i) { temp[z * 8 + i] = src_x[i * src_stride + 3]; } } x_q4 += x_step_q4; } // transpose the 8x8 filters values back to dst load_u8_8x8(temp, 8, &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]); transpose_u8_8x8(&d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]); vst1_u8(&dst[x + 0 * dst_stride], d[0]); vst1_u8(&dst[x + 1 * dst_stride], d[1]); vst1_u8(&dst[x + 2 * dst_stride], d[2]); vst1_u8(&dst[x + 3 * dst_stride], d[3]); vst1_u8(&dst[x + 4 * dst_stride], d[4]); vst1_u8(&dst[x + 5 * dst_stride], d[5]); vst1_u8(&dst[x + 6 * dst_stride], d[6]); vst1_u8(&dst[x + 7 * dst_stride], d[7]); x += 8; } while (x < w); src += src_stride * 8; dst += dst_stride * 8; } while (y -= 8); }
void vp9_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { uint8x8_t d0u8 = vdup_n_u8(0); uint64x1_t d1u64 = vdup_n_u64(0); (void)above; d1u64 = vld1_u64((const uint64_t *)left); d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 0); vst1_u8(dst, d0u8); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 1); vst1_u8(dst, d0u8); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 2); vst1_u8(dst, d0u8); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 3); vst1_u8(dst, d0u8); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 4); vst1_u8(dst, d0u8); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 5); vst1_u8(dst, d0u8); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 6); vst1_u8(dst, d0u8); dst += stride; d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 7); vst1_u8(dst, d0u8); }
static void ConvertRGBA32ToUV_NEON(const uint16_t* rgb, uint8_t* u, uint8_t* v, int width) { int i; for (i = 0; i + 8 <= width; i += 8, rgb += 4 * 8) { const uint16x8x4_t RGB = vld4q_u16((const uint16_t*)rgb); int16x8_t U, V; CONVERT_RGB_TO_UV(RGB.val[0], RGB.val[1], RGB.val[2], 2, U, V); vst1_u8(u + i, vqrshrun_n_s16(U, 2)); vst1_u8(v + i, vqrshrun_n_s16(V, 2)); } for (; i < width; i += 1, rgb += 4) { const int r = rgb[0], g = rgb[1], b = rgb[2]; u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2); v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2); } }
static INLINE void scaledconvolve_vert_w8( const uint8_t *src, const ptrdiff_t src_stride, uint8_t *dst, const ptrdiff_t dst_stride, const InterpKernel *const y_filters, const int y0_q4, const int y_step_q4, const int w, const int h) { int y; int y_q4 = y0_q4; src -= src_stride * (SUBPEL_TAPS / 2 - 1); y = h; do { const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; if (y_q4 & SUBPEL_MASK) { const int16x8_t filters = vld1q_s16(y_filters[y_q4 & SUBPEL_MASK]); uint8x8_t s[8], d; load_u8_8x8(src_y, src_stride, &s[0], &s[1], &s[2], &s[3], &s[4], &s[5], &s[6], &s[7]); d = scale_filter_8(s, filters); vst1_u8(dst, d); } else { memcpy(dst, &src_y[3 * src_stride], w); } dst += dst_stride; y_q4 += y_step_q4; } while (--y); }
void test_vst1u8 (void) { uint8_t *arg0_uint8_t; uint8x8_t arg1_uint8x8_t; vst1_u8 (arg0_uint8_t, arg1_uint8x8_t); }
void ComputesRGBLuminanceMask_NEON(const uint8_t *aSourceData, int32_t aSourceStride, uint8_t *aDestData, int32_t aDestStride, const IntSize &aSize, float aOpacity) { int32_t redFactor = 55 * aOpacity; // 255 * 0.2125 * opacity int32_t greenFactor = 183 * aOpacity; // 255 * 0.7154 * opacity int32_t blueFactor = 18 * aOpacity; // 255 * 0.0721 const uint8_t *sourcePixel = aSourceData; int32_t sourceOffset = aSourceStride - 4 * aSize.width; uint8_t *destPixel = aDestData; int32_t destOffset = aDestStride - aSize.width; sourcePixel = aSourceData; int32_t remainderWidth = aSize.width % 8; int32_t roundedWidth = aSize.width - remainderWidth; uint16x8_t temp; uint8x8_t gray; uint8x8_t redVector = vdup_n_u8(redFactor); uint8x8_t greenVector = vdup_n_u8(greenFactor); uint8x8_t blueVector = vdup_n_u8(blueFactor); uint8x8_t fullBitVector = vdup_n_u8(255); uint8x8_t oneVector = vdup_n_u8(1); for (int32_t y = 0; y < aSize.height; y++) { // Calculate luminance by neon with 8 pixels per loop for (int32_t x = 0; x < roundedWidth; x += 8) { uint8x8x4_t argb = vld4_u8(sourcePixel); temp = vmull_u8(argb.val[GFX_ARGB32_OFFSET_R], redVector); // temp = red * redFactor temp = vmlal_u8(temp, argb.val[GFX_ARGB32_OFFSET_G], greenVector); // temp += green * greenFactor temp = vmlal_u8(temp, argb.val[GFX_ARGB32_OFFSET_B], blueVector); // temp += blue * blueFactor gray = vshrn_n_u16(temp, 8); // gray = temp >> 8 // Check alpha value uint8x8_t alphaVector = vtst_u8(argb.val[GFX_ARGB32_OFFSET_A], fullBitVector); gray = vmul_u8(gray, vand_u8(alphaVector, oneVector)); // Put the result to the 8 pixels vst1_u8(destPixel, gray); sourcePixel += 8 * 4; destPixel += 8; } // Calculate the rest pixels of the line by cpu for (int32_t x = 0; x < remainderWidth; x++) { if (sourcePixel[GFX_ARGB32_OFFSET_A] > 0) { *destPixel = (redFactor * sourcePixel[GFX_ARGB32_OFFSET_R]+ greenFactor * sourcePixel[GFX_ARGB32_OFFSET_G] + blueFactor * sourcePixel[GFX_ARGB32_OFFSET_B]) >> 8; } else { *destPixel = 0; } sourcePixel += 4; destPixel++; }
void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int i; uint8x8_t d0u8 = vdup_n_u8(0); (void)left; d0u8 = vld1_u8(above); for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8); }
void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { static const uint8_t shuffle1[8] = { 1, 2, 3, 4, 5, 6, 7, 7 }; static const uint8_t shuffle2[8] = { 2, 3, 4, 5, 6, 7, 7, 7 }; const uint8x8_t sh_12345677 = vld1_u8(shuffle1); const uint8x8_t sh_23456777 = vld1_u8(shuffle2); const uint8x8_t A0 = vld1_u8(above); // top row const uint8x8_t A1 = vtbl1_u8(A0, sh_12345677); const uint8x8_t A2 = vtbl1_u8(A0, sh_23456777); const uint8x8_t avg1 = vhadd_u8(A0, A2); uint8x8_t row = vrhadd_u8(avg1, A1); int i; (void)left; for (i = 0; i < 7; ++i) { vst1_u8(dst + i * stride, row); row = vtbl1_u8(row, sh_12345677); } vst1_u8(dst + i * stride, row); }
static void ConvertBGRAToBGR(const uint32_t* src, int num_pixels, uint8_t* dst) { const uint32_t* const end = src + (num_pixels & ~7); const uint8x8_t shuffle0 = vld1_u8(kBGRShuffle[0]); const uint8x8_t shuffle1 = vld1_u8(kBGRShuffle[1]); const uint8x8_t shuffle2 = vld1_u8(kBGRShuffle[2]); for (; src < end; src += 8) { uint8x8x4_t pixels; INIT_VECTOR4(pixels, vld1_u8((const uint8_t*)(src + 0)), vld1_u8((const uint8_t*)(src + 2)), vld1_u8((const uint8_t*)(src + 4)), vld1_u8((const uint8_t*)(src + 6))); vst1_u8(dst + 0, vtbl4_u8(pixels, shuffle0)); vst1_u8(dst + 8, vtbl4_u8(pixels, shuffle1)); vst1_u8(dst + 16, vtbl4_u8(pixels, shuffle2)); dst += 8 * 3; } VP8LConvertBGRAToBGR_C(src, num_pixels & 7, dst); // left-overs }
static void ConvertBGR24ToY_NEON(const uint8_t* bgr, uint8_t* y, int width) { int i; for (i = 0; i + 8 <= width; i += 8, bgr += 3 * 8) { const uint8x8x3_t BGR = vld3_u8(bgr); const uint8x8_t Y = ConvertRGBToY_NEON(BGR.val[2], BGR.val[1], BGR.val[0]); vst1_u8(y + i, Y); } for (; i < width; ++i, bgr += 3) { // left-over y[i] = VP8RGBToY(bgr[2], bgr[1], bgr[0], YUV_HALF); } }
static void ConvertBGRAToRGBA(const uint32_t* src, int num_pixels, uint8_t* dst) { const uint32_t* const end = src + (num_pixels & ~1); const uint8x8_t shuffle = vld1_u8(kRGBAShuffle); for (; src < end; src += 2) { const uint8x8_t pixels = vld1_u8((uint8_t*)src); vst1_u8(dst, vtbl1_u8(pixels, shuffle)); dst += 8; } VP8LConvertBGRAToRGBA_C(src, num_pixels & 1, dst); // left-overs }
static void ConvertRGB24ToY_NEON(const uint8_t* rgb, uint8_t* y, int width) { int i; for (i = 0; i + 8 <= width; i += 8, rgb += 3 * 8) { const uint8x8x3_t RGB = vld3_u8(rgb); const uint8x8_t Y = ConvertRGBToY_NEON(RGB.val[0], RGB.val[1], RGB.val[2]); vst1_u8(y + i, Y); } for (; i < width; ++i, rgb += 3) { // left-over y[i] = VP8RGBToY(rgb[0], rgb[1], rgb[2], YUV_HALF); } }
void vp8_copy_mem8x8_neon(unsigned char *src, int src_stride, unsigned char *dst, int dst_stride) { uint8x8_t vtmp; int r; for (r = 0; r < 8; ++r) { vtmp = vld1_u8(src); vst1_u8(dst, vtmp); src += src_stride; dst += dst_stride; } }
static void ConvertARGBToY_NEON(const uint32_t* argb, uint8_t* y, int width) { int i; for (i = 0; i + 8 <= width; i += 8) { const uint8x8x4_t RGB = vld4_u8((const uint8_t*)&argb[i]); const uint8x8_t Y = ConvertRGBToY_NEON(RGB.val[2], RGB.val[1], RGB.val[0]); vst1_u8(y + i, Y); } for (; i < width; ++i) { // left-over const uint32_t p = argb[i]; y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff, YUV_HALF); } }
void vpx_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; uint8_t *s, *psrc; uint8x8_t dblimit, dlimit, dthresh; uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8; uint8x8_t d16u8, d17u8, d18u8; if (count == 0) // end_vpx_mblf_h_edge return; dblimit = vld1_u8(blimit); dlimit = vld1_u8(limit); dthresh = vld1_u8(thresh); psrc = src - (pitch << 2); for (i = 0; i < count; i++) { s = psrc + i * 8; d3u8 = vld1_u8(s); s += pitch; d4u8 = vld1_u8(s); s += pitch; d5u8 = vld1_u8(s); s += pitch; d6u8 = vld1_u8(s); s += pitch; d7u8 = vld1_u8(s); s += pitch; d16u8 = vld1_u8(s); s += pitch; d17u8 = vld1_u8(s); s += pitch; d18u8 = vld1_u8(s); mbloop_filter_neon(dblimit, dlimit, dthresh, d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8); s -= (pitch * 6); vst1_u8(s, d0u8); s += pitch; vst1_u8(s, d1u8); s += pitch; vst1_u8(s, d2u8); s += pitch; vst1_u8(s, d3u8); s += pitch; vst1_u8(s, d4u8); s += pitch; vst1_u8(s, d5u8); } return; }
/** * @brief Grayscale 변환을 수행하는 thread 함수. NEON 명령어를 사용한다. * * @param arg 계산해야할 정보가 담겨있는 _thread_data 형식의 구조체 * * @return NULL */ static void *thread_calc(void *arg) { struct _thread_data *data = (struct _thread_data *)arg; uint8x8_t rfac = vdup_n_u8 (76); uint8x8_t gfac = vdup_n_u8 (151); uint8x8_t bfac = vdup_n_u8 (29); int n = data->size / 8; int m = data->size % 8; int iTemp; unsigned char szTemp[8]; unsigned int *data_in = data->data_in; unsigned int *data_out = data->data_out; // 한 루프당 8픽셀씩 변환 (32 bytes) while (n--) { uint16x8_t temp; uint8x8x4_t rgb = vld4_u8 ((unsigned char *)data_in); uint8x8_t result; temp = vmull_u8(rgb.val[0], bfac); temp = vmlal_u8(temp, rgb.val[1], gfac); temp = vmlal_u8(temp, rgb.val[2], rfac); result = vshrn_n_u16 (temp, 8); vst1_u8(szTemp, result); iTemp = szTemp[0]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; iTemp = szTemp[1]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; iTemp = szTemp[2]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; iTemp = szTemp[3]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; iTemp = szTemp[4]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; iTemp = szTemp[5]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; iTemp = szTemp[6]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; iTemp = szTemp[7]; *data_out = iTemp | (iTemp << 8) | (iTemp << 16) | (*data_in & 0xff000000); data_in++; data_out++; } if (m) { argb8888_to_gray(data_in, data_out, m); } return NULL; }
void compare_neon_ge(float *psrc1, float src2, uchar *pdst, int size) { int remainder = size - 7; float32x4_t vsrc2 = vdupq_n_f32(src2); int i = 0; for(; i < remainder; i += 8){ float32x4_t vsrc1_32x4 = vld1q_f32(psrc1 + i ); float32x4_t vsrc2_32x4 = vld1q_f32(psrc1 + i + 4); uint32x4_t vdst1 = vcgeq_f32(vsrc1_32x4, vsrc2); uint32x4_t vdst2 = vcgeq_f32(vsrc2_32x4, vsrc2); uint16x4_t vdst1_16x4 = vmovn_u32(vdst1); uint16x4_t vdst2_16x4 = vmovn_u32(vdst2); uint16x8_t vdst_16x8 = vcombine_u16(vdst1_16x4, vdst2_16x4); uint8x8_t vdst_8x8 = vmovn_u16(vdst_16x8); vst1_u8(pdst + i, vdst_8x8); } for( ; i < size; i++){ pdst[i] = (psrc1[i] >= src2 ) ? 255 : 0; } }
// Process a block exactly 8 wide and any height. static void var_filter_block2d_bil_w8(const uint8_t *src_ptr, uint8_t *output_ptr, unsigned int src_pixels_per_line, int pixel_step, unsigned int output_height, const uint8_t *filter) { const uint8x8_t f0 = vdup_n_u8(filter[0]); const uint8x8_t f1 = vdup_n_u8(filter[1]); unsigned int i; for (i = 0; i < output_height; ++i) { const uint8x8_t src_0 = vld1_u8(&src_ptr[0]); const uint8x8_t src_1 = vld1_u8(&src_ptr[pixel_step]); const uint16x8_t a = vmull_u8(src_0, f0); const uint16x8_t b = vmlal_u8(a, src_1, f1); const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS); vst1_u8(output_ptr, out); src_ptr += src_pixels_per_line; output_ptr += 8; } }
static void var_filter_block2d_bil_w8(const uint8_t *src_ptr, uint8_t *output_ptr, unsigned int src_pixels_per_line, int pixel_step, unsigned int output_height, unsigned int output_width, const uint16_t *vpx_filter) { const uint8x8_t f0 = vmov_n_u8((uint8_t)vpx_filter[0]); const uint8x8_t f1 = vmov_n_u8((uint8_t)vpx_filter[1]); unsigned int i; for (i = 0; i < output_height; ++i) { const uint8x8_t src_0 = vld1_u8(&src_ptr[0]); const uint8x8_t src_1 = vld1_u8(&src_ptr[pixel_step]); const uint16x8_t a = vmull_u8(src_0, f0); const uint16x8_t b = vmlal_u8(a, src_1, f1); const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS); vst1_u8(&output_ptr[0], out); // Next row... src_ptr += src_pixels_per_line; output_ptr += output_width; } }
inline void vst1(u8 * ptr, const uint8x8_t & v) { return vst1_u8(ptr, v); }
void ne10_img_vresize_linear_neon (const int** src, unsigned char* dst, const short* beta, int width) { const int *S0 = src[0], *S1 = src[1]; int32x4_t qS0_0123, qS0_4567, qS1_0123, qS1_4567; int32x4_t qT_0123, qT_4567; int16x4_t dT_0123, dT_4567; uint16x8_t qT_01234567; uint8x8_t dT_01234567, dDst_01234567; int32x2_t dBeta; dBeta = vset_lane_s32 ( (int) (beta[0]), dBeta, 0); dBeta = vset_lane_s32 ( (int) (beta[1]), dBeta, 1); int32x4_t qDelta, qMin, qMax; qDelta = vdupq_n_s32 (DELTA); qMin = vdupq_n_s32 (0); qMax = vdupq_n_s32 (255); int x = 0; for (; x <= width - 8; x += 8) { qS0_0123 = vld1q_s32 (&S0[x]); qS0_4567 = vld1q_s32 (&S0[x + 4]); qS1_0123 = vld1q_s32 (&S1[x]); qS1_4567 = vld1q_s32 (&S1[x + 4]); qT_0123 = vmulq_lane_s32 (qS0_0123, dBeta, 0); qT_4567 = vmulq_lane_s32 (qS0_4567, dBeta, 0); qT_0123 = vmlaq_lane_s32 (qT_0123, qS1_0123, dBeta, 1); qT_4567 = vmlaq_lane_s32 (qT_4567, qS1_4567, dBeta, 1); qT_0123 = vaddq_s32 (qT_0123, qDelta); qT_4567 = vaddq_s32 (qT_4567, qDelta); qT_0123 = vshrq_n_s32 (qT_0123, BITS); qT_4567 = vshrq_n_s32 (qT_4567, BITS); qT_0123 = vmaxq_s32 (qT_0123, qMin); qT_4567 = vmaxq_s32 (qT_4567, qMin); qT_0123 = vminq_s32 (qT_0123, qMax); qT_4567 = vminq_s32 (qT_4567, qMax); dT_0123 = vmovn_s32 (qT_0123); dT_4567 = vmovn_s32 (qT_4567); qT_01234567 = vreinterpretq_u16_s16 (vcombine_s16 (dT_0123, dT_4567)); dT_01234567 = vmovn_u16 (qT_01234567); vst1_u8 (&dst[x], dT_01234567); } if (x < width) { uint8x8_t dMask; dMask = vld1_u8 ( (uint8_t *) (&ne10_img_vresize_linear_mask_residual_table[ (width - x - 1)])); dDst_01234567 = vld1_u8 (&dst[x]); qS0_0123 = vld1q_s32 (&S0[x]); qS0_4567 = vld1q_s32 (&S0[x + 4]); qS1_0123 = vld1q_s32 (&S1[x]); qS1_4567 = vld1q_s32 (&S1[x + 4]); qT_0123 = vmulq_lane_s32 (qS0_0123, dBeta, 0); qT_4567 = vmulq_lane_s32 (qS0_4567, dBeta, 0); qT_0123 = vmlaq_lane_s32 (qT_0123, qS1_0123, dBeta, 1); qT_4567 = vmlaq_lane_s32 (qT_4567, qS1_4567, dBeta, 1); qT_0123 = vaddq_s32 (qT_0123, qDelta); qT_4567 = vaddq_s32 (qT_4567, qDelta); qT_0123 = vshrq_n_s32 (qT_0123, BITS); qT_4567 = vshrq_n_s32 (qT_4567, BITS); qT_0123 = vmaxq_s32 (qT_0123, qMin); qT_4567 = vmaxq_s32 (qT_4567, qMin); qT_0123 = vminq_s32 (qT_0123, qMax); qT_4567 = vminq_s32 (qT_4567, qMax); dT_0123 = vmovn_s32 (qT_0123); dT_4567 = vmovn_s32 (qT_4567); qT_01234567 = vreinterpretq_u16_s16 (vcombine_s16 (dT_0123, dT_4567)); dT_01234567 = vmovn_u16 (qT_01234567); dMask = vbsl_u8 (dMask, dT_01234567, dDst_01234567); vst1_u8 (&dst[x], dMask); } }
void vp8_sixtap_predict8x8_neon( unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch) { unsigned char *src, *tmpp; unsigned char tmp[64]; int i; uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; uint8x8_t d18u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8, d25u8; uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8, d31u8; int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16; uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16; int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16; int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16; uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q9u8, q10u8, q11u8, q12u8; if (xoffset == 0) { // secondpass_filter8x8_only // load second_pass filter dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); d2s8 = vdup_lane_s8(dtmps8, 2); d3s8 = vdup_lane_s8(dtmps8, 3); d4s8 = vdup_lane_s8(dtmps8, 4); d5s8 = vdup_lane_s8(dtmps8, 5); d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); // load src data src = src_ptr - src_pixels_per_line * 2; d18u8 = vld1_u8(src); src += src_pixels_per_line; d19u8 = vld1_u8(src); src += src_pixels_per_line; d20u8 = vld1_u8(src); src += src_pixels_per_line; d21u8 = vld1_u8(src); src += src_pixels_per_line; d22u8 = vld1_u8(src); src += src_pixels_per_line; d23u8 = vld1_u8(src); src += src_pixels_per_line; d24u8 = vld1_u8(src); src += src_pixels_per_line; d25u8 = vld1_u8(src); src += src_pixels_per_line; d26u8 = vld1_u8(src); src += src_pixels_per_line; d27u8 = vld1_u8(src); src += src_pixels_per_line; d28u8 = vld1_u8(src); src += src_pixels_per_line; d29u8 = vld1_u8(src); src += src_pixels_per_line; d30u8 = vld1_u8(src); for (i = 2; i > 0; i--) { q3u16 = vmull_u8(d18u8, d0u8); q4u16 = vmull_u8(d19u8, d0u8); q5u16 = vmull_u8(d20u8, d0u8); q6u16 = vmull_u8(d21u8, d0u8); q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); q3u16 = vmlal_u8(q3u16, d20u8, d2u8); q4u16 = vmlal_u8(q4u16, d21u8, d2u8); q5u16 = vmlal_u8(q5u16, d22u8, d2u8); q6u16 = vmlal_u8(q6u16, d23u8, d2u8); q3u16 = vmlal_u8(q3u16, d23u8, d5u8); q4u16 = vmlal_u8(q4u16, d24u8, d5u8); q5u16 = vmlal_u8(q5u16, d25u8, d5u8); q6u16 = vmlal_u8(q6u16, d26u8, d5u8); q7u16 = vmull_u8(d21u8, d3u8); q8u16 = vmull_u8(d22u8, d3u8); q9u16 = vmull_u8(d23u8, d3u8); q10u16 = vmull_u8(d24u8, d3u8); q3s16 = vreinterpretq_s16_u16(q3u16); q4s16 = vreinterpretq_s16_u16(q4u16); q5s16 = vreinterpretq_s16_u16(q5u16); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q8s16 = vreinterpretq_s16_u16(q8u16); q9s16 = vreinterpretq_s16_u16(q9u16); q10s16 = vreinterpretq_s16_u16(q10u16); q7s16 = vqaddq_s16(q7s16, q3s16); q8s16 = vqaddq_s16(q8s16, q4s16); q9s16 = vqaddq_s16(q9s16, q5s16); q10s16 = vqaddq_s16(q10s16, q6s16); d6u8 = vqrshrun_n_s16(q7s16, 7); d7u8 = vqrshrun_n_s16(q8s16, 7); d8u8 = vqrshrun_n_s16(q9s16, 7); d9u8 = vqrshrun_n_s16(q10s16, 7); d18u8 = d22u8; d19u8 = d23u8; d20u8 = d24u8; d21u8 = d25u8; d22u8 = d26u8; d23u8 = d27u8; d24u8 = d28u8; d25u8 = d29u8; d26u8 = d30u8; vst1_u8(dst_ptr, d6u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d7u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d8u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d9u8); dst_ptr += dst_pitch; } return; } // load first_pass filter dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); d2s8 = vdup_lane_s8(dtmps8, 2); d3s8 = vdup_lane_s8(dtmps8, 3); d4s8 = vdup_lane_s8(dtmps8, 4); d5s8 = vdup_lane_s8(dtmps8, 5); d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); // First pass: output_height lines x output_width columns (9x4) if (yoffset == 0) // firstpass_filter4x4_only src = src_ptr - 2; else src = src_ptr - 2 - (src_pixels_per_line * 2); tmpp = tmp; for (i = 2; i > 0; i--) { q3u8 = vld1q_u8(src); src += src_pixels_per_line; q4u8 = vld1q_u8(src); src += src_pixels_per_line; q5u8 = vld1q_u8(src); src += src_pixels_per_line; q6u8 = vld1q_u8(src); src += src_pixels_per_line; __builtin_prefetch(src); __builtin_prefetch(src + src_pixels_per_line); __builtin_prefetch(src + src_pixels_per_line * 2); q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8); q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8); q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8); q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8); d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); q7u16 = vmlsl_u8(q7u16, d28u8, d1u8); q8u16 = vmlsl_u8(q8u16, d29u8, d1u8); q9u16 = vmlsl_u8(q9u16, d30u8, d1u8); q10u16 = vmlsl_u8(q10u16, d31u8, d1u8); d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); q7u16 = vmlsl_u8(q7u16, d28u8, d4u8); q8u16 = vmlsl_u8(q8u16, d29u8, d4u8); q9u16 = vmlsl_u8(q9u16, d30u8, d4u8); q10u16 = vmlsl_u8(q10u16, d31u8, d4u8); d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); q7u16 = vmlal_u8(q7u16, d28u8, d2u8); q8u16 = vmlal_u8(q8u16, d29u8, d2u8); q9u16 = vmlal_u8(q9u16, d30u8, d2u8); q10u16 = vmlal_u8(q10u16, d31u8, d2u8); d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); q7u16 = vmlal_u8(q7u16, d28u8, d5u8); q8u16 = vmlal_u8(q8u16, d29u8, d5u8); q9u16 = vmlal_u8(q9u16, d30u8, d5u8); q10u16 = vmlal_u8(q10u16, d31u8, d5u8); d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); q3u16 = vmull_u8(d28u8, d3u8); q4u16 = vmull_u8(d29u8, d3u8); q5u16 = vmull_u8(d30u8, d3u8); q6u16 = vmull_u8(d31u8, d3u8); q3s16 = vreinterpretq_s16_u16(q3u16); q4s16 = vreinterpretq_s16_u16(q4u16); q5s16 = vreinterpretq_s16_u16(q5u16); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q8s16 = vreinterpretq_s16_u16(q8u16); q9s16 = vreinterpretq_s16_u16(q9u16); q10s16 = vreinterpretq_s16_u16(q10u16); q7s16 = vqaddq_s16(q7s16, q3s16); q8s16 = vqaddq_s16(q8s16, q4s16); q9s16 = vqaddq_s16(q9s16, q5s16); q10s16 = vqaddq_s16(q10s16, q6s16); d22u8 = vqrshrun_n_s16(q7s16, 7); d23u8 = vqrshrun_n_s16(q8s16, 7); d24u8 = vqrshrun_n_s16(q9s16, 7); d25u8 = vqrshrun_n_s16(q10s16, 7); if (yoffset == 0) { // firstpass_filter8x4_only vst1_u8(dst_ptr, d22u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d23u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d24u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d25u8); dst_ptr += dst_pitch; } else { vst1_u8(tmpp, d22u8); tmpp += 8; vst1_u8(tmpp, d23u8); tmpp += 8; vst1_u8(tmpp, d24u8); tmpp += 8; vst1_u8(tmpp, d25u8); tmpp += 8; } } if (yoffset == 0) return; // First Pass on rest 5-line data q3u8 = vld1q_u8(src); src += src_pixels_per_line; q4u8 = vld1q_u8(src); src += src_pixels_per_line; q5u8 = vld1q_u8(src); src += src_pixels_per_line; q6u8 = vld1q_u8(src); src += src_pixels_per_line; q7u8 = vld1q_u8(src); q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8); q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8); d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1); q8u16 = vmlsl_u8(q8u16, d27u8, d1u8); q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); q11u16 = vmlsl_u8(q11u16, d30u8, d1u8); q12u16 = vmlsl_u8(q12u16, d31u8, d1u8); d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4); q8u16 = vmlsl_u8(q8u16, d27u8, d4u8); q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); q11u16 = vmlsl_u8(q11u16, d30u8, d4u8); q12u16 = vmlsl_u8(q12u16, d31u8, d4u8); d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2); q8u16 = vmlal_u8(q8u16, d27u8, d2u8); q9u16 = vmlal_u8(q9u16, d28u8, d2u8); q10u16 = vmlal_u8(q10u16, d29u8, d2u8); q11u16 = vmlal_u8(q11u16, d30u8, d2u8); q12u16 = vmlal_u8(q12u16, d31u8, d2u8); d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5); q8u16 = vmlal_u8(q8u16, d27u8, d5u8); q9u16 = vmlal_u8(q9u16, d28u8, d5u8); q10u16 = vmlal_u8(q10u16, d29u8, d5u8); q11u16 = vmlal_u8(q11u16, d30u8, d5u8); q12u16 = vmlal_u8(q12u16, d31u8, d5u8); d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3); q3u16 = vmull_u8(d27u8, d3u8); q4u16 = vmull_u8(d28u8, d3u8); q5u16 = vmull_u8(d29u8, d3u8); q6u16 = vmull_u8(d30u8, d3u8); q7u16 = vmull_u8(d31u8, d3u8); q3s16 = vreinterpretq_s16_u16(q3u16); q4s16 = vreinterpretq_s16_u16(q4u16); q5s16 = vreinterpretq_s16_u16(q5u16); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q8s16 = vreinterpretq_s16_u16(q8u16); q9s16 = vreinterpretq_s16_u16(q9u16); q10s16 = vreinterpretq_s16_u16(q10u16); q11s16 = vreinterpretq_s16_u16(q11u16); q12s16 = vreinterpretq_s16_u16(q12u16); q8s16 = vqaddq_s16(q8s16, q3s16); q9s16 = vqaddq_s16(q9s16, q4s16); q10s16 = vqaddq_s16(q10s16, q5s16); q11s16 = vqaddq_s16(q11s16, q6s16); q12s16 = vqaddq_s16(q12s16, q7s16); d26u8 = vqrshrun_n_s16(q8s16, 7); d27u8 = vqrshrun_n_s16(q9s16, 7); d28u8 = vqrshrun_n_s16(q10s16, 7); d29u8 = vqrshrun_n_s16(q11s16, 7); d30u8 = vqrshrun_n_s16(q12s16, 7); // Second pass: 8x8 dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); d2s8 = vdup_lane_s8(dtmps8, 2); d3s8 = vdup_lane_s8(dtmps8, 3); d4s8 = vdup_lane_s8(dtmps8, 4); d5s8 = vdup_lane_s8(dtmps8, 5); d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); tmpp = tmp; q9u8 = vld1q_u8(tmpp); tmpp += 16; q10u8 = vld1q_u8(tmpp); tmpp += 16; q11u8 = vld1q_u8(tmpp); tmpp += 16; q12u8 = vld1q_u8(tmpp); d18u8 = vget_low_u8(q9u8); d19u8 = vget_high_u8(q9u8); d20u8 = vget_low_u8(q10u8); d21u8 = vget_high_u8(q10u8); d22u8 = vget_low_u8(q11u8); d23u8 = vget_high_u8(q11u8); d24u8 = vget_low_u8(q12u8); d25u8 = vget_high_u8(q12u8); for (i = 2; i > 0; i--) { q3u16 = vmull_u8(d18u8, d0u8); q4u16 = vmull_u8(d19u8, d0u8); q5u16 = vmull_u8(d20u8, d0u8); q6u16 = vmull_u8(d21u8, d0u8); q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); q3u16 = vmlal_u8(q3u16, d20u8, d2u8); q4u16 = vmlal_u8(q4u16, d21u8, d2u8); q5u16 = vmlal_u8(q5u16, d22u8, d2u8); q6u16 = vmlal_u8(q6u16, d23u8, d2u8); q3u16 = vmlal_u8(q3u16, d23u8, d5u8); q4u16 = vmlal_u8(q4u16, d24u8, d5u8); q5u16 = vmlal_u8(q5u16, d25u8, d5u8); q6u16 = vmlal_u8(q6u16, d26u8, d5u8); q7u16 = vmull_u8(d21u8, d3u8); q8u16 = vmull_u8(d22u8, d3u8); q9u16 = vmull_u8(d23u8, d3u8); q10u16 = vmull_u8(d24u8, d3u8); q3s16 = vreinterpretq_s16_u16(q3u16); q4s16 = vreinterpretq_s16_u16(q4u16); q5s16 = vreinterpretq_s16_u16(q5u16); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q8s16 = vreinterpretq_s16_u16(q8u16); q9s16 = vreinterpretq_s16_u16(q9u16); q10s16 = vreinterpretq_s16_u16(q10u16); q7s16 = vqaddq_s16(q7s16, q3s16); q8s16 = vqaddq_s16(q8s16, q4s16); q9s16 = vqaddq_s16(q9s16, q5s16); q10s16 = vqaddq_s16(q10s16, q6s16); d6u8 = vqrshrun_n_s16(q7s16, 7); d7u8 = vqrshrun_n_s16(q8s16, 7); d8u8 = vqrshrun_n_s16(q9s16, 7); d9u8 = vqrshrun_n_s16(q10s16, 7); d18u8 = d22u8; d19u8 = d23u8; d20u8 = d24u8; d21u8 = d25u8; d22u8 = d26u8; d23u8 = d27u8; d24u8 = d28u8; d25u8 = d29u8; d26u8 = d30u8; vst1_u8(dst_ptr, d6u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d7u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d8u8); dst_ptr += dst_pitch; vst1_u8(dst_ptr, d9u8); dst_ptr += dst_pitch; } return; }
void vp8_sixtap_predict16x16_neon( unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch) { unsigned char *src, *src_tmp, *dst, *tmpp; unsigned char tmp[336]; int i, j; uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d18u8, d19u8; uint8x8_t d20u8, d21u8, d22u8, d23u8, d24u8, d25u8, d26u8, d27u8; uint8x8_t d28u8, d29u8, d30u8, d31u8; int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; uint8x16_t q3u8, q4u8; uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16, q8u16, q9u16, q10u16; uint16x8_t q11u16, q12u16, q13u16, q15u16; int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16, q8s16, q9s16, q10s16; int16x8_t q11s16, q12s16, q13s16, q15s16; if (xoffset == 0) { // secondpass_filter8x8_only // load second_pass filter dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); d2s8 = vdup_lane_s8(dtmps8, 2); d3s8 = vdup_lane_s8(dtmps8, 3); d4s8 = vdup_lane_s8(dtmps8, 4); d5s8 = vdup_lane_s8(dtmps8, 5); d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); // load src data src_tmp = src_ptr - src_pixels_per_line * 2; for (i = 0; i < 2; i++) { src = src_tmp + i * 8; dst = dst_ptr + i * 8; d18u8 = vld1_u8(src); src += src_pixels_per_line; d19u8 = vld1_u8(src); src += src_pixels_per_line; d20u8 = vld1_u8(src); src += src_pixels_per_line; d21u8 = vld1_u8(src); src += src_pixels_per_line; d22u8 = vld1_u8(src); src += src_pixels_per_line; for (j = 0; j < 4; j++) { d23u8 = vld1_u8(src); src += src_pixels_per_line; d24u8 = vld1_u8(src); src += src_pixels_per_line; d25u8 = vld1_u8(src); src += src_pixels_per_line; d26u8 = vld1_u8(src); src += src_pixels_per_line; q3u16 = vmull_u8(d18u8, d0u8); q4u16 = vmull_u8(d19u8, d0u8); q5u16 = vmull_u8(d20u8, d0u8); q6u16 = vmull_u8(d21u8, d0u8); q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); q3u16 = vmlal_u8(q3u16, d20u8, d2u8); q4u16 = vmlal_u8(q4u16, d21u8, d2u8); q5u16 = vmlal_u8(q5u16, d22u8, d2u8); q6u16 = vmlal_u8(q6u16, d23u8, d2u8); q3u16 = vmlal_u8(q3u16, d23u8, d5u8); q4u16 = vmlal_u8(q4u16, d24u8, d5u8); q5u16 = vmlal_u8(q5u16, d25u8, d5u8); q6u16 = vmlal_u8(q6u16, d26u8, d5u8); q7u16 = vmull_u8(d21u8, d3u8); q8u16 = vmull_u8(d22u8, d3u8); q9u16 = vmull_u8(d23u8, d3u8); q10u16 = vmull_u8(d24u8, d3u8); q3s16 = vreinterpretq_s16_u16(q3u16); q4s16 = vreinterpretq_s16_u16(q4u16); q5s16 = vreinterpretq_s16_u16(q5u16); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q8s16 = vreinterpretq_s16_u16(q8u16); q9s16 = vreinterpretq_s16_u16(q9u16); q10s16 = vreinterpretq_s16_u16(q10u16); q7s16 = vqaddq_s16(q7s16, q3s16); q8s16 = vqaddq_s16(q8s16, q4s16); q9s16 = vqaddq_s16(q9s16, q5s16); q10s16 = vqaddq_s16(q10s16, q6s16); d6u8 = vqrshrun_n_s16(q7s16, 7); d7u8 = vqrshrun_n_s16(q8s16, 7); d8u8 = vqrshrun_n_s16(q9s16, 7); d9u8 = vqrshrun_n_s16(q10s16, 7); d18u8 = d22u8; d19u8 = d23u8; d20u8 = d24u8; d21u8 = d25u8; d22u8 = d26u8; vst1_u8(dst, d6u8); dst += dst_pitch; vst1_u8(dst, d7u8); dst += dst_pitch; vst1_u8(dst, d8u8); dst += dst_pitch; vst1_u8(dst, d9u8); dst += dst_pitch; } } return; } // load first_pass filter dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); d2s8 = vdup_lane_s8(dtmps8, 2); d3s8 = vdup_lane_s8(dtmps8, 3); d4s8 = vdup_lane_s8(dtmps8, 4); d5s8 = vdup_lane_s8(dtmps8, 5); d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); // First pass: output_height lines x output_width columns (9x4) if (yoffset == 0) { // firstpass_filter4x4_only src = src_ptr - 2; dst = dst_ptr; for (i = 0; i < 8; i++) { d6u8 = vld1_u8(src); d7u8 = vld1_u8(src + 8); d8u8 = vld1_u8(src + 16); src += src_pixels_per_line; d9u8 = vld1_u8(src); d10u8 = vld1_u8(src + 8); d11u8 = vld1_u8(src + 16); src += src_pixels_per_line; __builtin_prefetch(src); __builtin_prefetch(src + src_pixels_per_line); q6u16 = vmull_u8(d6u8, d0u8); q7u16 = vmull_u8(d7u8, d0u8); q8u16 = vmull_u8(d9u8, d0u8); q9u16 = vmull_u8(d10u8, d0u8); d20u8 = vext_u8(d6u8, d7u8, 1); d21u8 = vext_u8(d9u8, d10u8, 1); d22u8 = vext_u8(d7u8, d8u8, 1); d23u8 = vext_u8(d10u8, d11u8, 1); d24u8 = vext_u8(d6u8, d7u8, 4); d25u8 = vext_u8(d9u8, d10u8, 4); d26u8 = vext_u8(d7u8, d8u8, 4); d27u8 = vext_u8(d10u8, d11u8, 4); d28u8 = vext_u8(d6u8, d7u8, 5); d29u8 = vext_u8(d9u8, d10u8, 5); q6u16 = vmlsl_u8(q6u16, d20u8, d1u8); q8u16 = vmlsl_u8(q8u16, d21u8, d1u8); q7u16 = vmlsl_u8(q7u16, d22u8, d1u8); q9u16 = vmlsl_u8(q9u16, d23u8, d1u8); q6u16 = vmlsl_u8(q6u16, d24u8, d4u8); q8u16 = vmlsl_u8(q8u16, d25u8, d4u8); q7u16 = vmlsl_u8(q7u16, d26u8, d4u8); q9u16 = vmlsl_u8(q9u16, d27u8, d4u8); q6u16 = vmlal_u8(q6u16, d28u8, d5u8); q8u16 = vmlal_u8(q8u16, d29u8, d5u8); d20u8 = vext_u8(d7u8, d8u8, 5); d21u8 = vext_u8(d10u8, d11u8, 5); d22u8 = vext_u8(d6u8, d7u8, 2); d23u8 = vext_u8(d9u8, d10u8, 2); d24u8 = vext_u8(d7u8, d8u8, 2); d25u8 = vext_u8(d10u8, d11u8, 2); d26u8 = vext_u8(d6u8, d7u8, 3); d27u8 = vext_u8(d9u8, d10u8, 3); d28u8 = vext_u8(d7u8, d8u8, 3); d29u8 = vext_u8(d10u8, d11u8, 3); q7u16 = vmlal_u8(q7u16, d20u8, d5u8); q9u16 = vmlal_u8(q9u16, d21u8, d5u8); q6u16 = vmlal_u8(q6u16, d22u8, d2u8); q8u16 = vmlal_u8(q8u16, d23u8, d2u8); q7u16 = vmlal_u8(q7u16, d24u8, d2u8); q9u16 = vmlal_u8(q9u16, d25u8, d2u8); q10u16 = vmull_u8(d26u8, d3u8); q11u16 = vmull_u8(d27u8, d3u8); q12u16 = vmull_u8(d28u8, d3u8); q15u16 = vmull_u8(d29u8, d3u8); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q8s16 = vreinterpretq_s16_u16(q8u16); q9s16 = vreinterpretq_s16_u16(q9u16); q10s16 = vreinterpretq_s16_u16(q10u16); q11s16 = vreinterpretq_s16_u16(q11u16); q12s16 = vreinterpretq_s16_u16(q12u16); q15s16 = vreinterpretq_s16_u16(q15u16); q6s16 = vqaddq_s16(q6s16, q10s16); q8s16 = vqaddq_s16(q8s16, q11s16); q7s16 = vqaddq_s16(q7s16, q12s16); q9s16 = vqaddq_s16(q9s16, q15s16); d6u8 = vqrshrun_n_s16(q6s16, 7); d7u8 = vqrshrun_n_s16(q7s16, 7); d8u8 = vqrshrun_n_s16(q8s16, 7); d9u8 = vqrshrun_n_s16(q9s16, 7); q3u8 = vcombine_u8(d6u8, d7u8); q4u8 = vcombine_u8(d8u8, d9u8); vst1q_u8(dst, q3u8); dst += dst_pitch; vst1q_u8(dst, q4u8); dst += dst_pitch; } return; } src = src_ptr - 2 - src_pixels_per_line * 2; tmpp = tmp; for (i = 0; i < 7; i++) { d6u8 = vld1_u8(src); d7u8 = vld1_u8(src + 8); d8u8 = vld1_u8(src + 16); src += src_pixels_per_line; d9u8 = vld1_u8(src); d10u8 = vld1_u8(src + 8); d11u8 = vld1_u8(src + 16); src += src_pixels_per_line; d12u8 = vld1_u8(src); d13u8 = vld1_u8(src + 8); d14u8 = vld1_u8(src + 16); src += src_pixels_per_line; __builtin_prefetch(src); __builtin_prefetch(src + src_pixels_per_line); __builtin_prefetch(src + src_pixels_per_line * 2); q8u16 = vmull_u8(d6u8, d0u8); q9u16 = vmull_u8(d7u8, d0u8); q10u16 = vmull_u8(d9u8, d0u8); q11u16 = vmull_u8(d10u8, d0u8); q12u16 = vmull_u8(d12u8, d0u8); q13u16 = vmull_u8(d13u8, d0u8); d28u8 = vext_u8(d6u8, d7u8, 1); d29u8 = vext_u8(d9u8, d10u8, 1); d30u8 = vext_u8(d12u8, d13u8, 1); q8u16 = vmlsl_u8(q8u16, d28u8, d1u8); q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); q12u16 = vmlsl_u8(q12u16, d30u8, d1u8); d28u8 = vext_u8(d7u8, d8u8, 1); d29u8 = vext_u8(d10u8, d11u8, 1); d30u8 = vext_u8(d13u8, d14u8, 1); q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); q11u16 = vmlsl_u8(q11u16, d29u8, d1u8); q13u16 = vmlsl_u8(q13u16, d30u8, d1u8); d28u8 = vext_u8(d6u8, d7u8, 4); d29u8 = vext_u8(d9u8, d10u8, 4); d30u8 = vext_u8(d12u8, d13u8, 4); q8u16 = vmlsl_u8(q8u16, d28u8, d4u8); q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); q12u16 = vmlsl_u8(q12u16, d30u8, d4u8); d28u8 = vext_u8(d7u8, d8u8, 4); d29u8 = vext_u8(d10u8, d11u8, 4); d30u8 = vext_u8(d13u8, d14u8, 4); q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); q11u16 = vmlsl_u8(q11u16, d29u8, d4u8); q13u16 = vmlsl_u8(q13u16, d30u8, d4u8); d28u8 = vext_u8(d6u8, d7u8, 5); d29u8 = vext_u8(d9u8, d10u8, 5); d30u8 = vext_u8(d12u8, d13u8, 5); q8u16 = vmlal_u8(q8u16, d28u8, d5u8); q10u16 = vmlal_u8(q10u16, d29u8, d5u8); q12u16 = vmlal_u8(q12u16, d30u8, d5u8); d28u8 = vext_u8(d7u8, d8u8, 5); d29u8 = vext_u8(d10u8, d11u8, 5); d30u8 = vext_u8(d13u8, d14u8, 5); q9u16 = vmlal_u8(q9u16, d28u8, d5u8); q11u16 = vmlal_u8(q11u16, d29u8, d5u8); q13u16 = vmlal_u8(q13u16, d30u8, d5u8); d28u8 = vext_u8(d6u8, d7u8, 2); d29u8 = vext_u8(d9u8, d10u8, 2); d30u8 = vext_u8(d12u8, d13u8, 2); q8u16 = vmlal_u8(q8u16, d28u8, d2u8); q10u16 = vmlal_u8(q10u16, d29u8, d2u8); q12u16 = vmlal_u8(q12u16, d30u8, d2u8); d28u8 = vext_u8(d7u8, d8u8, 2); d29u8 = vext_u8(d10u8, d11u8, 2); d30u8 = vext_u8(d13u8, d14u8, 2); q9u16 = vmlal_u8(q9u16, d28u8, d2u8); q11u16 = vmlal_u8(q11u16, d29u8, d2u8); q13u16 = vmlal_u8(q13u16, d30u8, d2u8); d28u8 = vext_u8(d6u8, d7u8, 3); d29u8 = vext_u8(d9u8, d10u8, 3); d30u8 = vext_u8(d12u8, d13u8, 3); d15u8 = vext_u8(d7u8, d8u8, 3); d31u8 = vext_u8(d10u8, d11u8, 3); d6u8 = vext_u8(d13u8, d14u8, 3); q4u16 = vmull_u8(d28u8, d3u8); q5u16 = vmull_u8(d29u8, d3u8); q6u16 = vmull_u8(d30u8, d3u8); q4s16 = vreinterpretq_s16_u16(q4u16); q5s16 = vreinterpretq_s16_u16(q5u16); q6s16 = vreinterpretq_s16_u16(q6u16); q8s16 = vreinterpretq_s16_u16(q8u16); q10s16 = vreinterpretq_s16_u16(q10u16); q12s16 = vreinterpretq_s16_u16(q12u16); q8s16 = vqaddq_s16(q8s16, q4s16); q10s16 = vqaddq_s16(q10s16, q5s16); q12s16 = vqaddq_s16(q12s16, q6s16); q6u16 = vmull_u8(d15u8, d3u8); q7u16 = vmull_u8(d31u8, d3u8); q3u16 = vmull_u8(d6u8, d3u8); q3s16 = vreinterpretq_s16_u16(q3u16); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q9s16 = vreinterpretq_s16_u16(q9u16); q11s16 = vreinterpretq_s16_u16(q11u16); q13s16 = vreinterpretq_s16_u16(q13u16); q9s16 = vqaddq_s16(q9s16, q6s16); q11s16 = vqaddq_s16(q11s16, q7s16); q13s16 = vqaddq_s16(q13s16, q3s16); d6u8 = vqrshrun_n_s16(q8s16, 7); d7u8 = vqrshrun_n_s16(q9s16, 7); d8u8 = vqrshrun_n_s16(q10s16, 7); d9u8 = vqrshrun_n_s16(q11s16, 7); d10u8 = vqrshrun_n_s16(q12s16, 7); d11u8 = vqrshrun_n_s16(q13s16, 7); vst1_u8(tmpp, d6u8); tmpp += 8; vst1_u8(tmpp, d7u8); tmpp += 8; vst1_u8(tmpp, d8u8); tmpp += 8; vst1_u8(tmpp, d9u8); tmpp += 8; vst1_u8(tmpp, d10u8); tmpp += 8; vst1_u8(tmpp, d11u8); tmpp += 8; } // Second pass: 16x16 dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); d2s8 = vdup_lane_s8(dtmps8, 2); d3s8 = vdup_lane_s8(dtmps8, 3); d4s8 = vdup_lane_s8(dtmps8, 4); d5s8 = vdup_lane_s8(dtmps8, 5); d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); for (i = 0; i < 2; i++) { dst = dst_ptr + 8 * i; tmpp = tmp + 8 * i; d18u8 = vld1_u8(tmpp); tmpp += 16; d19u8 = vld1_u8(tmpp); tmpp += 16; d20u8 = vld1_u8(tmpp); tmpp += 16; d21u8 = vld1_u8(tmpp); tmpp += 16; d22u8 = vld1_u8(tmpp); tmpp += 16; for (j = 0; j < 4; j++) { d23u8 = vld1_u8(tmpp); tmpp += 16; d24u8 = vld1_u8(tmpp); tmpp += 16; d25u8 = vld1_u8(tmpp); tmpp += 16; d26u8 = vld1_u8(tmpp); tmpp += 16; q3u16 = vmull_u8(d18u8, d0u8); q4u16 = vmull_u8(d19u8, d0u8); q5u16 = vmull_u8(d20u8, d0u8); q6u16 = vmull_u8(d21u8, d0u8); q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); q3u16 = vmlal_u8(q3u16, d20u8, d2u8); q4u16 = vmlal_u8(q4u16, d21u8, d2u8); q5u16 = vmlal_u8(q5u16, d22u8, d2u8); q6u16 = vmlal_u8(q6u16, d23u8, d2u8); q3u16 = vmlal_u8(q3u16, d23u8, d5u8); q4u16 = vmlal_u8(q4u16, d24u8, d5u8); q5u16 = vmlal_u8(q5u16, d25u8, d5u8); q6u16 = vmlal_u8(q6u16, d26u8, d5u8); q7u16 = vmull_u8(d21u8, d3u8); q8u16 = vmull_u8(d22u8, d3u8); q9u16 = vmull_u8(d23u8, d3u8); q10u16 = vmull_u8(d24u8, d3u8); q3s16 = vreinterpretq_s16_u16(q3u16); q4s16 = vreinterpretq_s16_u16(q4u16); q5s16 = vreinterpretq_s16_u16(q5u16); q6s16 = vreinterpretq_s16_u16(q6u16); q7s16 = vreinterpretq_s16_u16(q7u16); q8s16 = vreinterpretq_s16_u16(q8u16); q9s16 = vreinterpretq_s16_u16(q9u16); q10s16 = vreinterpretq_s16_u16(q10u16); q7s16 = vqaddq_s16(q7s16, q3s16); q8s16 = vqaddq_s16(q8s16, q4s16); q9s16 = vqaddq_s16(q9s16, q5s16); q10s16 = vqaddq_s16(q10s16, q6s16); d6u8 = vqrshrun_n_s16(q7s16, 7); d7u8 = vqrshrun_n_s16(q8s16, 7); d8u8 = vqrshrun_n_s16(q9s16, 7); d9u8 = vqrshrun_n_s16(q10s16, 7); d18u8 = d22u8; d19u8 = d23u8; d20u8 = d24u8; d21u8 = d25u8; d22u8 = d26u8; vst1_u8(dst, d6u8); dst += dst_pitch; vst1_u8(dst, d7u8); dst += dst_pitch; vst1_u8(dst, d8u8); dst += dst_pitch; vst1_u8(dst, d9u8); dst += dst_pitch; } } return; }
inline void vnst(u8* dst, uint32x4_t v1, uint32x4_t v2) { vst1_u8(dst, vmovn_u16(vcombine_u16(vmovn_u32(v1), vmovn_u32(v2)))); }
void phase(const Size2D &size, const s16 * src0Base, ptrdiff_t src0Stride, const s16 * src1Base, ptrdiff_t src1Stride, u8 * dstBase, ptrdiff_t dstStride) { internal::assertSupportedConfiguration(); #ifdef CAROTENE_NEON FASTATAN2CONST(256.0f / 360.0f) size_t roiw16 = size.width >= 15 ? size.width - 15 : 0; size_t roiw8 = size.width >= 7 ? size.width - 7 : 0; float32x4_t v_05 = vdupq_n_f32(0.5f); for (size_t i = 0; i < size.height; ++i) { const s16 * src0 = internal::getRowPtr(src0Base, src0Stride, i); const s16 * src1 = internal::getRowPtr(src1Base, src1Stride, i); u8 * dst = internal::getRowPtr(dstBase, dstStride, i); size_t j = 0; for (; j < roiw16; j += 16) { internal::prefetch(src0 + j); internal::prefetch(src1 + j); int16x8_t v_src00 = vld1q_s16(src0 + j), v_src01 = vld1q_s16(src0 + j + 8); int16x8_t v_src10 = vld1q_s16(src1 + j), v_src11 = vld1q_s16(src1 + j + 8); // 0 float32x4_t v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src00))); float32x4_t v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src10))); float32x4_t v_dst32f0; FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f0) v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src00))); v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src10))); float32x4_t v_dst32f1; FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f1) uint16x8_t v_dst16s0 = vcombine_u16(vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f0, v_05))), vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f1, v_05)))); // 1 v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src01))); v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src11))); FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f0) v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src01))); v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src11))); FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f1) uint16x8_t v_dst16s1 = vcombine_u16(vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f0, v_05))), vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f1, v_05)))); vst1q_u8(dst + j, vcombine_u8(vmovn_u16(v_dst16s0), vmovn_u16(v_dst16s1))); } for (; j < roiw8; j += 8) { int16x8_t v_src0 = vld1q_s16(src0 + j); int16x8_t v_src1 = vld1q_s16(src1 + j); float32x4_t v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src0))); float32x4_t v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src1))); float32x4_t v_dst32f0; FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f0) v_src0_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src0))); v_src1_p = vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src1))); float32x4_t v_dst32f1; FASTATAN2VECTOR(v_src1_p, v_src0_p, v_dst32f1) uint16x8_t v_dst = vcombine_u16(vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f0, v_05))), vmovn_u32(vcvtq_u32_f32(vaddq_f32(v_dst32f1, v_05)))); vst1_u8(dst + j, vmovn_u16(v_dst)); } for (; j < size.width; j++) { f32 x = src0[j], y = src1[j]; f32 a; FASTATAN2SCALAR(y, x, a) dst[j] = (u8)(s32)floor(a + 0.5f); } } #else (void)size; (void)src0Base; (void)src0Stride; (void)src1Base; (void)src1Stride; (void)dstBase; (void)dstStride; #endif }