static void SharpYUVFilterRow_NEON(const int16_t* A, const int16_t* B, int len, const uint16_t* best_y, uint16_t* out) { int i; const int16x8_t max = vdupq_n_s16(MAX_Y); const int16x8_t zero = vdupq_n_s16(0); for (i = 0; i + 8 <= len; i += 8) { const int16x8_t a0 = vld1q_s16(A + i + 0); const int16x8_t a1 = vld1q_s16(A + i + 1); const int16x8_t b0 = vld1q_s16(B + i + 0); const int16x8_t b1 = vld1q_s16(B + i + 1); const int16x8_t a0b1 = vaddq_s16(a0, b1); const int16x8_t a1b0 = vaddq_s16(a1, b0); const int16x8_t a0a1b0b1 = vaddq_s16(a0b1, a1b0); // A0+A1+B0+B1 const int16x8_t a0b1_2 = vaddq_s16(a0b1, a0b1); // 2*(A0+B1) const int16x8_t a1b0_2 = vaddq_s16(a1b0, a1b0); // 2*(A1+B0) const int16x8_t c0 = vshrq_n_s16(vaddq_s16(a0b1_2, a0a1b0b1), 3); const int16x8_t c1 = vshrq_n_s16(vaddq_s16(a1b0_2, a0a1b0b1), 3); const int16x8_t d0 = vaddq_s16(c1, a0); const int16x8_t d1 = vaddq_s16(c0, a1); const int16x8_t e0 = vrshrq_n_s16(d0, 1); const int16x8_t e1 = vrshrq_n_s16(d1, 1); const int16x8x2_t f = vzipq_s16(e0, e1); const int16x8_t g0 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 0)); const int16x8_t g1 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 8)); const int16x8_t h0 = vaddq_s16(g0, f.val[0]); const int16x8_t h1 = vaddq_s16(g1, f.val[1]); const int16x8_t i0 = vmaxq_s16(vminq_s16(h0, max), zero); const int16x8_t i1 = vmaxq_s16(vminq_s16(h1, max), zero); vst1q_u16(out + 2 * i + 0, vreinterpretq_u16_s16(i0)); vst1q_u16(out + 2 * i + 8, vreinterpretq_u16_s16(i1)); } for (; i < len; ++i) { const int a0b1 = A[i + 0] + B[i + 1]; const int a1b0 = A[i + 1] + B[i + 0]; const int a0a1b0b1 = a0b1 + a1b0 + 8; const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4; const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4; out[2 * i + 0] = clip_y(best_y[2 * i + 0] + v0); out[2 * i + 1] = clip_y(best_y[2 * i + 1] + v1); } }
void yuv422rgb_neon_int(const unsigned char * sourcep, int source_byte_count, unsigned char * destp) { const unsigned char *source_endp; const unsigned char *vector_endp; int remainder; const int16x8_t u_coeff = {0, -22, 113, 0, 0, -22, 113, 0}; const int16x8_t v_coeff = {90, -46, 0, 0, 90, -46, 0, 0}; const uint8x8_t zeroalpha = {0x0, 0x0, 0x0, 0xFF, 0x0, 0x0, 0x0, 0xFF}; const int16x8_t uvbias = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80}; int16x8_t mp0_rgba; /* macropixel 0's resulting RGBA RGBA pixels */ int16x8_t mp1_rgba; /* macropixel 1's resulting RGBA RGBA pixels */ uint8x8_t rawpixels; /* source pixels as {[YUYV]0 [YUYV]1} */ uint8x8_t rgba0, rgba1; /* rgba values as bytes */ uint8x16_t bothrgba; uint8_t * destinationp; /* pointer into output buffer destp */ int16x8_t widerpixels; /* rawpixels promoted to shorts per component */ const uint8x8_t yselect = {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; /* we're working with things in 4-byte macropixels */ remainder = source_byte_count % 4; source_endp = sourcep + source_byte_count; vector_endp = source_endp - remainder; destinationp = (uint8_t *)destp; while (sourcep < vector_endp) { /* pull YUYV from 2 four byte macropixels starting at sourcep. */ /* we'll increment sourcep as we go to save the array dereference */ /* and separate increment instruction at the end of the loop */ /* load rawpixels with {[YUYV]0 [YUYV]1 } with byte components */ rawpixels = vld1_u8(sourcep); sourcep += sizeof(rawpixels); widerpixels = vreinterpretq_s16_u16(vmovl_u8(rawpixels)); /* ---------- process macropixel 0 --------------- */ /* take macropixel zero ([YUYV]0) from rawpixels and */ /* compute the two RGBA pixels that come from it. store */ /* those two pixels in mp0_rgba */ { int16x8_t wider_yalpha; int16x8_t u_vec, v_vec, uv_vec; uint8x8_t narrow_yalpha; uint8x8_t y0_vec, y1_vec; int16x4_t yuyv; /* narrow_yalpha is drawn from [YUYV]0 and formed into */ /* {Y0, Y0, Y0, alpha, Y1, Y1, Y1, alpha} */ /* this would have been a nice place for vtbx1_u8, but i */ /* can't get it to work. so i'll have to use vbsl_u8 instead. */ y0_vec = vdup_lane_u8(rawpixels, MP0_Y0); y1_vec = vdup_lane_u8(rawpixels, MP0_Y1); narrow_yalpha = vbsl_u8(yselect, y0_vec, y1_vec); /* store ALPHA in elements 3 and 7 (after the RGB components) */ narrow_yalpha = vset_lane_u8(ALPHA, narrow_yalpha, 3); narrow_yalpha = vset_lane_u8(ALPHA, narrow_yalpha, 7); /* use vmovl_u8 to go from being unsigned 8-bit to */ /* unsigned 16-bit, the use vreinterpretq_s16_u16 to */ /* change interpretation from unsigned 16-bit to signed */ /* 16-bit. */ wider_yalpha = vreinterpretq_s16_u16(vmovl_u8(narrow_yalpha)); yuyv = vget_low_s16(widerpixels); /* form a vector of the U component from MP0 */ u_vec = vdupq_lane_s16(yuyv, MP0_U); /* subtract uvbias from u_vec */ u_vec = vsubq_s16(u_vec, uvbias); /* form a vector of the V component from MP0 */ v_vec = vdupq_lane_s16(yuyv, MP0_V); /* subtract uvbias from v_vec */ v_vec = vsubq_s16(v_vec, uvbias); /* Multiply eight 16-bit values in u_vec by eight 16-bit */ /* values in u_coeff and store the results in u_vec. */ u_vec = vmulq_s16(u_vec, u_coeff); /* likewise multiply eight 16-bit values in v_vec by */ /* v_coeff and store the results in v_vec */ v_vec = vmulq_s16(v_vec, v_coeff); /* form uv_vec as the sum of u_vec & v_vec, then shift 6 places */ /* (dividing by 64) */ uv_vec = vaddq_s16(u_vec, v_vec); uv_vec = vshrq_n_s16(uv_vec, 6); /* now mp0_rgba = y_vec + u_vec + v_vec */ mp0_rgba = vaddq_s16(wider_yalpha, uv_vec); } /* ---------- process macropixel 1 --------------- */ /* take macropixel one ([YUYV]1) from rawpixels and */ /* compute the two RGBA pixels that come from it. store */ /* those two pixels in mp1_rgba */ { int16x8_t wider_yalpha; int16x8_t u_vec, v_vec, uv_vec; uint8x8_t narrow_yalpha; uint8x8_t y0_vec, y1_vec; int16x4_t yuyv; /* narrow_yalpha is drawn from [YUYV]1 and formed into */ /* {Y0, Y0, Y0, alpha, Y1, Y1, Y1, alpha} */ /* this would have been a nice place for vtbx1_u8, but i */ /* can't get it to work. so i'll have to use vbsl_u8 instead. */ y0_vec = vdup_lane_u8(rawpixels, MP1_Y0); y1_vec = vdup_lane_u8(rawpixels, MP1_Y1); narrow_yalpha = vbsl_u8(yselect, y0_vec, y1_vec); narrow_yalpha = vset_lane_u8(ALPHA, narrow_yalpha, 3); narrow_yalpha = vset_lane_u8(ALPHA, narrow_yalpha, 7); /* use vmovl_u8 to go from being unsigned 8-bit to */ /* unsigned 16-bit, the use vreinterpretq_s16_u16 to */ wider_yalpha = vreinterpretq_s16_u16(vmovl_u8(narrow_yalpha)); yuyv = vget_high_s16(widerpixels); u_vec = vdupq_lane_s16(yuyv, 1); u_vec = vsubq_s16(u_vec, uvbias); v_vec = vdupq_lane_s16(yuyv, 3); v_vec = vsubq_s16(v_vec, uvbias); /* Multiply eight 16-bit values in u_vec by eight 16-bit */ /* values in u_coeff and store the results in u_vec. */ u_vec = vmulq_s16(u_vec, u_coeff); /* likewise multiply eight 16-bit values in v_vec by */ /* v_coeff and store the results in v_vec */ v_vec = vmulq_s16(v_vec, v_coeff); /* form uv_vec as the sum of u_vec & v_vec, then shift 6 places */ /* (dividing by 64) */ uv_vec = vaddq_s16(u_vec, v_vec); uv_vec = vshrq_n_s16(uv_vec, 6); /* now mp1_rgba = y_vec + u_vec + v_vec */ mp1_rgba = vaddq_s16(wider_yalpha, uv_vec); } /* turn mp0_rgba from a vector of shorts to a vector of */ /* unsigned unsigned chars. this will saturate: clipping */ /* the values between 0 and 255. */ rgba0 = vqmovun_s16(mp0_rgba); rgba1 = vqmovun_s16(mp1_rgba); /* make it faster to copy these back out of vector registers into */ /* memory by combining rgba0 and rgba1 into the larger bothrgba. */ /* then store that back into memory at destinationp. */ bothrgba = vcombine_u8(rgba0, rgba1); vst1q_u8(destinationp, bothrgba); destinationp += 16; } }
static void hadamard8x8_one_pass(int16x8_t *a0, int16x8_t *a1, int16x8_t *a2, int16x8_t *a3, int16x8_t *a4, int16x8_t *a5, int16x8_t *a6, int16x8_t *a7) { const int16x8_t b0 = vaddq_s16(*a0, *a1); const int16x8_t b1 = vsubq_s16(*a0, *a1); const int16x8_t b2 = vaddq_s16(*a2, *a3); const int16x8_t b3 = vsubq_s16(*a2, *a3); const int16x8_t b4 = vaddq_s16(*a4, *a5); const int16x8_t b5 = vsubq_s16(*a4, *a5); const int16x8_t b6 = vaddq_s16(*a6, *a7); const int16x8_t b7 = vsubq_s16(*a6, *a7); const int16x8_t c0 = vaddq_s16(b0, b2); const int16x8_t c1 = vaddq_s16(b1, b3); const int16x8_t c2 = vsubq_s16(b0, b2); const int16x8_t c3 = vsubq_s16(b1, b3); const int16x8_t c4 = vaddq_s16(b4, b6); const int16x8_t c5 = vaddq_s16(b5, b7); const int16x8_t c6 = vsubq_s16(b4, b6); const int16x8_t c7 = vsubq_s16(b5, b7); *a0 = vaddq_s16(c0, c4); *a1 = vsubq_s16(c2, c6); *a2 = vsubq_s16(c0, c4); *a3 = vaddq_s16(c2, c6); *a4 = vaddq_s16(c3, c7); *a5 = vsubq_s16(c3, c7); *a6 = vsubq_s16(c1, c5); *a7 = vaddq_s16(c1, c5); }
/* s16x8 mm mul */ void mw_neon_mm_mul_s16x8(short * A, int Row, int T, short * B, int Col, short * C) { int i, k, j; int16x8_t neon_b, neon_c; int16x8_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7; int16x8_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7; for (i = 0; i < Row; i+=8) { for (k = 0; k < Col; k+=1) { neon_c = vmovq_n_s16(0); for (j = 0; j < T; j+=8) { int j_T = j * T + i; int k_Row = k * Row; neon_a0 = vld1q_s16(A + j_T); j_T+=Row; neon_a1 = vld1q_s16(A + j_T); j_T+=Row; neon_a2 = vld1q_s16(A + j_T); j_T+=Row; neon_a3 = vld1q_s16(A + j_T); j_T+=Row; neon_a4 = vld1q_s16(A + j_T); j_T+=Row; neon_a5 = vld1q_s16(A + j_T); j_T+=Row; neon_a6 = vld1q_s16(A + j_T); j_T+=Row; neon_a7 = vld1q_s16(A + j_T); neon_b = vld1q_s16(B + k_Row + j); neon_b0 = vdupq_n_s16(vgetq_lane_s16(neon_b, 0)); neon_b1 = vdupq_n_s16(vgetq_lane_s16(neon_b, 1)); neon_b2 = vdupq_n_s16(vgetq_lane_s16(neon_b, 2)); neon_b3 = vdupq_n_s16(vgetq_lane_s16(neon_b, 3)); neon_b4 = vdupq_n_s16(vgetq_lane_s16(neon_b, 4)); neon_b5 = vdupq_n_s16(vgetq_lane_s16(neon_b, 5)); neon_b6 = vdupq_n_s16(vgetq_lane_s16(neon_b, 6)); neon_b7 = vdupq_n_s16(vgetq_lane_s16(neon_b, 7)); neon_c = vaddq_s16(vmulq_s16(neon_a0, neon_b0), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a1, neon_b1), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a2, neon_b2), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a3, neon_b3), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a4, neon_b4), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a5, neon_b5), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a6, neon_b6), neon_c); neon_c = vaddq_s16(vmulq_s16(neon_a7, neon_b7), neon_c); vst1q_lane_s16(C + k_Row + i, neon_c, 0); vst1q_lane_s16(C + k_Row + i + 1, neon_c, 1); vst1q_lane_s16(C + k_Row + i + 2, neon_c, 2); vst1q_lane_s16(C + k_Row + i + 3, neon_c, 3); vst1q_lane_s16(C + k_Row + i + 4, neon_c, 4); vst1q_lane_s16(C + k_Row + i + 5, neon_c, 5); vst1q_lane_s16(C + k_Row + i + 6, neon_c, 6); vst1q_lane_s16(C + k_Row + i + 7, neon_c, 7); } } } }
PRIM_STATIC pstatus_t neon_yCbCrToRGB_16s16s_P3P3( const INT16 *pSrc[3], int srcStep, INT16 *pDst[3], int dstStep, const prim_size_t *roi) /* region of interest */ { /* TODO: If necessary, check alignments and call the general version. */ int16x8_t zero = vdupq_n_s16(0); int16x8_t max = vdupq_n_s16(255); int16x8_t y_add = vdupq_n_s16(128); int16x8_t* y_buf = (int16x8_t*) pSrc[0]; int16x8_t* cb_buf = (int16x8_t*) pSrc[1]; int16x8_t* cr_buf = (int16x8_t*) pSrc[2]; int16x8_t* r_buf = (int16x8_t*) pDst[0]; int16x8_t* g_buf = (int16x8_t*) pDst[1]; int16x8_t* b_buf = (int16x8_t*) pDst[2]; int srcbump = srcStep / sizeof(int16x8_t); int dstbump = dstStep / sizeof(int16x8_t); int yp; int imax = roi->width * sizeof(INT16) / sizeof(int16x8_t); for (yp=0; yp<roi->height; ++yp) { int i; for (i=0; i<imax; i++) { int16x8_t y = vld1q_s16((INT16*) (y_buf+i)); y = vaddq_s16(y, y_add); int16x8_t cr = vld1q_s16((INT16*) (cr_buf+i)); /* r = between((y + cr + (cr >> 2) + (cr >> 3) + (cr >> 5)), * 0, 255); */ int16x8_t r = vaddq_s16(y, cr); r = vaddq_s16(r, vshrq_n_s16(cr, 2)); r = vaddq_s16(r, vshrq_n_s16(cr, 3)); r = vaddq_s16(r, vshrq_n_s16(cr, 5)); r = vminq_s16(vmaxq_s16(r, zero), max); vst1q_s16((INT16*) (r_buf+i), r); /* cb = cb_g_buf[i]; */ int16x8_t cb = vld1q_s16((INT16*) (cb_buf+i)); /* g = between(y - (cb >> 2) - (cb >> 4) - (cb >> 5) - (cr >> 1) * - (cr >> 3) - (cr >> 4) - (cr >> 5), 0, 255); */ int16x8_t g = vsubq_s16(y, vshrq_n_s16(cb, 2)); g = vsubq_s16(g, vshrq_n_s16(cb, 4)); g = vsubq_s16(g, vshrq_n_s16(cb, 5)); g = vsubq_s16(g, vshrq_n_s16(cr, 1)); g = vsubq_s16(g, vshrq_n_s16(cr, 3)); g = vsubq_s16(g, vshrq_n_s16(cr, 4)); g = vsubq_s16(g, vshrq_n_s16(cr, 5)); g = vminq_s16(vmaxq_s16(g, zero), max); vst1q_s16((INT16*) (g_buf+i), g); /* b = between((y + cb + (cb >> 1) + (cb >> 2) + (cb >> 6)), * 0, 255); */ int16x8_t b = vaddq_s16(y, cb); b = vaddq_s16(b, vshrq_n_s16(cb, 1)); b = vaddq_s16(b, vshrq_n_s16(cb, 2)); b = vaddq_s16(b, vshrq_n_s16(cb, 6)); b = vminq_s16(vmaxq_s16(b, zero), max); vst1q_s16((INT16*) (b_buf+i), b); } y_buf += srcbump; cb_buf += srcbump; cr_buf += srcbump; r_buf += dstbump; g_buf += dstbump; b_buf += dstbump; } }
inline int16x8_t vaddq(const int16x8_t & v0, const int16x8_t & v1) { return vaddq_s16(v0, v1); }
static INLINE void IADST8X8_1D( int16x8_t *q8s16, int16x8_t *q9s16, int16x8_t *q10s16, int16x8_t *q11s16, int16x8_t *q12s16, int16x8_t *q13s16, int16x8_t *q14s16, int16x8_t *q15s16) { int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; int16x8_t q2s16, q4s16, q5s16, q6s16; int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32; int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; d16s16 = vget_low_s16(*q8s16); d17s16 = vget_high_s16(*q8s16); d18s16 = vget_low_s16(*q9s16); d19s16 = vget_high_s16(*q9s16); d20s16 = vget_low_s16(*q10s16); d21s16 = vget_high_s16(*q10s16); d22s16 = vget_low_s16(*q11s16); d23s16 = vget_high_s16(*q11s16); d24s16 = vget_low_s16(*q12s16); d25s16 = vget_high_s16(*q12s16); d26s16 = vget_low_s16(*q13s16); d27s16 = vget_high_s16(*q13s16); d28s16 = vget_low_s16(*q14s16); d29s16 = vget_high_s16(*q14s16); d30s16 = vget_low_s16(*q15s16); d31s16 = vget_high_s16(*q15s16); d14s16 = vdup_n_s16(cospi_2_64); d15s16 = vdup_n_s16(cospi_30_64); q1s32 = vmull_s16(d30s16, d14s16); q2s32 = vmull_s16(d31s16, d14s16); q3s32 = vmull_s16(d30s16, d15s16); q4s32 = vmull_s16(d31s16, d15s16); d30s16 = vdup_n_s16(cospi_18_64); d31s16 = vdup_n_s16(cospi_14_64); q1s32 = vmlal_s16(q1s32, d16s16, d15s16); q2s32 = vmlal_s16(q2s32, d17s16, d15s16); q3s32 = vmlsl_s16(q3s32, d16s16, d14s16); q4s32 = vmlsl_s16(q4s32, d17s16, d14s16); q5s32 = vmull_s16(d22s16, d30s16); q6s32 = vmull_s16(d23s16, d30s16); q7s32 = vmull_s16(d22s16, d31s16); q8s32 = vmull_s16(d23s16, d31s16); q5s32 = vmlal_s16(q5s32, d24s16, d31s16); q6s32 = vmlal_s16(q6s32, d25s16, d31s16); q7s32 = vmlsl_s16(q7s32, d24s16, d30s16); q8s32 = vmlsl_s16(q8s32, d25s16, d30s16); q11s32 = vaddq_s32(q1s32, q5s32); q12s32 = vaddq_s32(q2s32, q6s32); q1s32 = vsubq_s32(q1s32, q5s32); q2s32 = vsubq_s32(q2s32, q6s32); d22s16 = vqrshrn_n_s32(q11s32, 14); d23s16 = vqrshrn_n_s32(q12s32, 14); *q11s16 = vcombine_s16(d22s16, d23s16); q12s32 = vaddq_s32(q3s32, q7s32); q15s32 = vaddq_s32(q4s32, q8s32); q3s32 = vsubq_s32(q3s32, q7s32); q4s32 = vsubq_s32(q4s32, q8s32); d2s16 = vqrshrn_n_s32(q1s32, 14); d3s16 = vqrshrn_n_s32(q2s32, 14); d24s16 = vqrshrn_n_s32(q12s32, 14); d25s16 = vqrshrn_n_s32(q15s32, 14); d6s16 = vqrshrn_n_s32(q3s32, 14); d7s16 = vqrshrn_n_s32(q4s32, 14); *q12s16 = vcombine_s16(d24s16, d25s16); d0s16 = vdup_n_s16(cospi_10_64); d1s16 = vdup_n_s16(cospi_22_64); q4s32 = vmull_s16(d26s16, d0s16); q5s32 = vmull_s16(d27s16, d0s16); q2s32 = vmull_s16(d26s16, d1s16); q6s32 = vmull_s16(d27s16, d1s16); d30s16 = vdup_n_s16(cospi_26_64); d31s16 = vdup_n_s16(cospi_6_64); q4s32 = vmlal_s16(q4s32, d20s16, d1s16); q5s32 = vmlal_s16(q5s32, d21s16, d1s16); q2s32 = vmlsl_s16(q2s32, d20s16, d0s16); q6s32 = vmlsl_s16(q6s32, d21s16, d0s16); q0s32 = vmull_s16(d18s16, d30s16); q13s32 = vmull_s16(d19s16, d30s16); q0s32 = vmlal_s16(q0s32, d28s16, d31s16); q13s32 = vmlal_s16(q13s32, d29s16, d31s16); q10s32 = vmull_s16(d18s16, d31s16); q9s32 = vmull_s16(d19s16, d31s16); q10s32 = vmlsl_s16(q10s32, d28s16, d30s16); q9s32 = vmlsl_s16(q9s32, d29s16, d30s16); q14s32 = vaddq_s32(q2s32, q10s32); q15s32 = vaddq_s32(q6s32, q9s32); q2s32 = vsubq_s32(q2s32, q10s32); q6s32 = vsubq_s32(q6s32, q9s32); d28s16 = vqrshrn_n_s32(q14s32, 14); d29s16 = vqrshrn_n_s32(q15s32, 14); d4s16 = vqrshrn_n_s32(q2s32, 14); d5s16 = vqrshrn_n_s32(q6s32, 14); *q14s16 = vcombine_s16(d28s16, d29s16); q9s32 = vaddq_s32(q4s32, q0s32); q10s32 = vaddq_s32(q5s32, q13s32); q4s32 = vsubq_s32(q4s32, q0s32); q5s32 = vsubq_s32(q5s32, q13s32); d30s16 = vdup_n_s16(cospi_8_64); d31s16 = vdup_n_s16(cospi_24_64); d18s16 = vqrshrn_n_s32(q9s32, 14); d19s16 = vqrshrn_n_s32(q10s32, 14); d8s16 = vqrshrn_n_s32(q4s32, 14); d9s16 = vqrshrn_n_s32(q5s32, 14); *q9s16 = vcombine_s16(d18s16, d19s16); q5s32 = vmull_s16(d2s16, d30s16); q6s32 = vmull_s16(d3s16, d30s16); q7s32 = vmull_s16(d2s16, d31s16); q0s32 = vmull_s16(d3s16, d31s16); q5s32 = vmlal_s16(q5s32, d6s16, d31s16); q6s32 = vmlal_s16(q6s32, d7s16, d31s16); q7s32 = vmlsl_s16(q7s32, d6s16, d30s16); q0s32 = vmlsl_s16(q0s32, d7s16, d30s16); q1s32 = vmull_s16(d4s16, d30s16); q3s32 = vmull_s16(d5s16, d30s16); q10s32 = vmull_s16(d4s16, d31s16); q2s32 = vmull_s16(d5s16, d31s16); q1s32 = vmlsl_s16(q1s32, d8s16, d31s16); q3s32 = vmlsl_s16(q3s32, d9s16, d31s16); q10s32 = vmlal_s16(q10s32, d8s16, d30s16); q2s32 = vmlal_s16(q2s32, d9s16, d30s16); *q8s16 = vaddq_s16(*q11s16, *q9s16); *q11s16 = vsubq_s16(*q11s16, *q9s16); q4s16 = vaddq_s16(*q12s16, *q14s16); *q12s16 = vsubq_s16(*q12s16, *q14s16); q14s32 = vaddq_s32(q5s32, q1s32); q15s32 = vaddq_s32(q6s32, q3s32); q5s32 = vsubq_s32(q5s32, q1s32); q6s32 = vsubq_s32(q6s32, q3s32); d18s16 = vqrshrn_n_s32(q14s32, 14); d19s16 = vqrshrn_n_s32(q15s32, 14); d10s16 = vqrshrn_n_s32(q5s32, 14); d11s16 = vqrshrn_n_s32(q6s32, 14); *q9s16 = vcombine_s16(d18s16, d19s16); q1s32 = vaddq_s32(q7s32, q10s32); q3s32 = vaddq_s32(q0s32, q2s32); q7s32 = vsubq_s32(q7s32, q10s32); q0s32 = vsubq_s32(q0s32, q2s32); d28s16 = vqrshrn_n_s32(q1s32, 14); d29s16 = vqrshrn_n_s32(q3s32, 14); d14s16 = vqrshrn_n_s32(q7s32, 14); d15s16 = vqrshrn_n_s32(q0s32, 14); *q14s16 = vcombine_s16(d28s16, d29s16); d30s16 = vdup_n_s16(cospi_16_64); d22s16 = vget_low_s16(*q11s16); d23s16 = vget_high_s16(*q11s16); q2s32 = vmull_s16(d22s16, d30s16); q3s32 = vmull_s16(d23s16, d30s16); q13s32 = vmull_s16(d22s16, d30s16); q1s32 = vmull_s16(d23s16, d30s16); d24s16 = vget_low_s16(*q12s16); d25s16 = vget_high_s16(*q12s16); q2s32 = vmlal_s16(q2s32, d24s16, d30s16); q3s32 = vmlal_s16(q3s32, d25s16, d30s16); q13s32 = vmlsl_s16(q13s32, d24s16, d30s16); q1s32 = vmlsl_s16(q1s32, d25s16, d30s16); d4s16 = vqrshrn_n_s32(q2s32, 14); d5s16 = vqrshrn_n_s32(q3s32, 14); d24s16 = vqrshrn_n_s32(q13s32, 14); d25s16 = vqrshrn_n_s32(q1s32, 14); q2s16 = vcombine_s16(d4s16, d5s16); *q12s16 = vcombine_s16(d24s16, d25s16); q13s32 = vmull_s16(d10s16, d30s16); q1s32 = vmull_s16(d11s16, d30s16); q11s32 = vmull_s16(d10s16, d30s16); q0s32 = vmull_s16(d11s16, d30s16); q13s32 = vmlal_s16(q13s32, d14s16, d30s16); q1s32 = vmlal_s16(q1s32, d15s16, d30s16); q11s32 = vmlsl_s16(q11s32, d14s16, d30s16); q0s32 = vmlsl_s16(q0s32, d15s16, d30s16); d20s16 = vqrshrn_n_s32(q13s32, 14); d21s16 = vqrshrn_n_s32(q1s32, 14); d12s16 = vqrshrn_n_s32(q11s32, 14); d13s16 = vqrshrn_n_s32(q0s32, 14); *q10s16 = vcombine_s16(d20s16, d21s16); q6s16 = vcombine_s16(d12s16, d13s16); q5s16 = vdupq_n_s16(0); *q9s16 = vsubq_s16(q5s16, *q9s16); *q11s16 = vsubq_s16(q5s16, q2s16); *q13s16 = vsubq_s16(q5s16, q6s16); *q15s16 = vsubq_s16(q5s16, q4s16); return; }
rfx_dwt_2d_decode_block_vert_NEON(sint16 * l, sint16 * h, sint16 * dst, int subband_width) { int x, n; sint16 * l_ptr = l; sint16 * h_ptr = h; sint16 * dst_ptr = dst; int total_width = subband_width + subband_width; /* Even coefficients */ for (n = 0; n < subband_width; n++) { for (x = 0; x < total_width; x+=8) { // dst[2n] = l[n] - ((h[n-1] + h[n] + 1) >> 1); int16x8_t l_n = vld1q_s16(l_ptr); int16x8_t h_n = vld1q_s16(h_ptr); int16x8_t tmp_n = vaddq_s16(h_n, vdupq_n_s16(1));; if (n == 0) tmp_n = vaddq_s16(tmp_n, h_n); else { int16x8_t h_n_m = vld1q_s16((h_ptr - total_width)); tmp_n = vaddq_s16(tmp_n, h_n_m); } tmp_n = vshrq_n_s16(tmp_n, 1); int16x8_t dst_n = vsubq_s16(l_n, tmp_n); vst1q_s16(dst_ptr, dst_n); l_ptr+=8; h_ptr+=8; dst_ptr+=8; } dst_ptr+=total_width; } h_ptr = h; dst_ptr = dst + total_width; /* Odd coefficients */ for (n = 0; n < subband_width; n++) { for (x = 0; x < total_width; x+=8) { // dst[2n + 1] = (h[n] << 1) + ((dst[2n] + dst[2n + 2]) >> 1); int16x8_t h_n = vld1q_s16(h_ptr); int16x8_t dst_n_m = vld1q_s16(dst_ptr - total_width); h_n = vshlq_n_s16(h_n, 1); int16x8_t tmp_n = dst_n_m; if (n == subband_width - 1) tmp_n = vaddq_s16(tmp_n, dst_n_m); else { int16x8_t dst_n_p = vld1q_s16((dst_ptr + total_width)); tmp_n = vaddq_s16(tmp_n, dst_n_p); } tmp_n = vshrq_n_s16(tmp_n, 1); int16x8_t dst_n = vaddq_s16(tmp_n, h_n); vst1q_s16(dst_ptr, dst_n); h_ptr+=8; dst_ptr+=8; } dst_ptr+=total_width; } }
void vpx_fdct4x4_neon(const int16_t *input, tran_low_t *final_output, int stride) { int i; // input[M * stride] * 16 int16x4_t input_0 = vshl_n_s16(vld1_s16(input + 0 * stride), 4); int16x4_t input_1 = vshl_n_s16(vld1_s16(input + 1 * stride), 4); int16x4_t input_2 = vshl_n_s16(vld1_s16(input + 2 * stride), 4); int16x4_t input_3 = vshl_n_s16(vld1_s16(input + 3 * stride), 4); // If the very first value != 0, then add 1. if (input[0] != 0) { const int16x4_t one = vreinterpret_s16_s64(vdup_n_s64(1)); input_0 = vadd_s16(input_0, one); } for (i = 0; i < 2; ++i) { const int16x8_t input_01 = vcombine_s16(input_0, input_1); const int16x8_t input_32 = vcombine_s16(input_3, input_2); // in_0 +/- in_3, in_1 +/- in_2 const int16x8_t s_01 = vaddq_s16(input_01, input_32); const int16x8_t s_32 = vsubq_s16(input_01, input_32); // step_0 +/- step_1, step_2 +/- step_3 const int16x4_t s_0 = vget_low_s16(s_01); const int16x4_t s_1 = vget_high_s16(s_01); const int16x4_t s_2 = vget_high_s16(s_32); const int16x4_t s_3 = vget_low_s16(s_32); // (s_0 +/- s_1) * cospi_16_64 // Must expand all elements to s32. See 'needs32' comment in fwd_txfm.c. const int32x4_t s_0_p_s_1 = vaddl_s16(s_0, s_1); const int32x4_t s_0_m_s_1 = vsubl_s16(s_0, s_1); const int32x4_t temp1 = vmulq_n_s32(s_0_p_s_1, cospi_16_64); const int32x4_t temp2 = vmulq_n_s32(s_0_m_s_1, cospi_16_64); // fdct_round_shift int16x4_t out_0 = vrshrn_n_s32(temp1, DCT_CONST_BITS); int16x4_t out_2 = vrshrn_n_s32(temp2, DCT_CONST_BITS); // s_3 * cospi_8_64 + s_2 * cospi_24_64 // s_3 * cospi_24_64 - s_2 * cospi_8_64 const int32x4_t s_3_cospi_8_64 = vmull_n_s16(s_3, cospi_8_64); const int32x4_t s_3_cospi_24_64 = vmull_n_s16(s_3, cospi_24_64); const int32x4_t temp3 = vmlal_n_s16(s_3_cospi_8_64, s_2, cospi_24_64); const int32x4_t temp4 = vmlsl_n_s16(s_3_cospi_24_64, s_2, cospi_8_64); // fdct_round_shift int16x4_t out_1 = vrshrn_n_s32(temp3, DCT_CONST_BITS); int16x4_t out_3 = vrshrn_n_s32(temp4, DCT_CONST_BITS); transpose_s16_4x4d(&out_0, &out_1, &out_2, &out_3); input_0 = out_0; input_1 = out_1; input_2 = out_2; input_3 = out_3; } { // Not quite a rounding shift. Only add 1 despite shifting by 2. const int16x8_t one = vdupq_n_s16(1); int16x8_t out_01 = vcombine_s16(input_0, input_1); int16x8_t out_23 = vcombine_s16(input_2, input_3); out_01 = vshrq_n_s16(vaddq_s16(out_01, one), 2); out_23 = vshrq_n_s16(vaddq_s16(out_23, one), 2); store_s16q_to_tran_low(final_output + 0 * 8, out_01); store_s16q_to_tran_low(final_output + 1 * 8, out_23); } }
pstatus_t neon_yCbCrToRGB_16s16s_P3P3( const INT16 *pSrc[3], int srcStep, INT16 *pDst[3], int dstStep, const prim_size_t *roi) /* region of interest */ { /* TODO: If necessary, check alignments and call the general version. */ int16x8_t zero = vdupq_n_s16(0); int16x8_t max = vdupq_n_s16(255); int16x8_t r_cr = vdupq_n_s16(22986); // 1.403 << 14 int16x8_t g_cb = vdupq_n_s16(-5636); // -0.344 << 14 int16x8_t g_cr = vdupq_n_s16(-11698); // -0.714 << 14 int16x8_t b_cb = vdupq_n_s16(28999); // 1.770 << 14 int16x8_t c4096 = vdupq_n_s16(4096); int16x8_t* y_buf = (int16x8_t*) pSrc[0]; int16x8_t* cb_buf = (int16x8_t*) pSrc[1]; int16x8_t* cr_buf = (int16x8_t*) pSrc[2]; int16x8_t* r_buf = (int16x8_t*) pDst[0]; int16x8_t* g_buf = (int16x8_t*) pDst[1]; int16x8_t* b_buf = (int16x8_t*) pDst[2]; int srcbump = srcStep / sizeof(int16x8_t); int dstbump = dstStep / sizeof(int16x8_t); int yp; int imax = roi->width * sizeof(INT16) / sizeof(int16x8_t); for (yp=0; yp<roi->height; ++yp) { int i; for (i=0; i<imax; i++) { /* In order to use NEON signed 16-bit integer multiplication we need to convert the floating point factors to signed int without loosing information. The result of this multiplication is 32 bit and we have a NEON instruction that returns the hi word of the saturated double. Thus we will multiply the factors by the highest possible 2^n, take the upper 16 bits of the signed 32-bit result (vqdmulhq_s16 followed by a right shift by 1 to reverse the doubling) and correct this result by multiplying it by 2^(16-n). For the given factors in the conversion matrix the best possible n is 14. Example for calculating r: r = (y>>5) + 128 + (cr*1.403)>>5 // our base formula r = (y>>5) + 128 + (HIWORD(cr*(1.403<<14)<<2))>>5 // see above r = (y+4096)>>5 + (HIWORD(cr*22986)<<2)>>5 // simplification r = ((y+4096)>>2 + HIWORD(cr*22986)) >> 3 */ /* y = (y_buf[i] + 4096) >> 2 */ int16x8_t y = vld1q_s16((INT16*) &y_buf[i]); y = vaddq_s16(y, c4096); y = vshrq_n_s16(y, 2); /* cb = cb_buf[i]; */ int16x8_t cb = vld1q_s16((INT16*)&cb_buf[i]); /* cr = cr_buf[i]; */ int16x8_t cr = vld1q_s16((INT16*) &cr_buf[i]); /* (y + HIWORD(cr*22986)) >> 3 */ int16x8_t r = vaddq_s16(y, vshrq_n_s16(vqdmulhq_s16(cr, r_cr), 1)); r = vshrq_n_s16(r, 3); /* r_buf[i] = MINMAX(r, 0, 255); */ r = vminq_s16(vmaxq_s16(r, zero), max); vst1q_s16((INT16*)&r_buf[i], r); /* (y + HIWORD(cb*-5636) + HIWORD(cr*-11698)) >> 3 */ int16x8_t g = vaddq_s16(y, vshrq_n_s16(vqdmulhq_s16(cb, g_cb), 1)); g = vaddq_s16(g, vshrq_n_s16(vqdmulhq_s16(cr, g_cr), 1)); g = vshrq_n_s16(g, 3); /* g_buf[i] = MINMAX(g, 0, 255); */ g = vminq_s16(vmaxq_s16(g, zero), max); vst1q_s16((INT16*)&g_buf[i], g); /* (y + HIWORD(cb*28999)) >> 3 */ int16x8_t b = vaddq_s16(y, vshrq_n_s16(vqdmulhq_s16(cb, b_cb), 1)); b = vshrq_n_s16(b, 3); /* b_buf[i] = MINMAX(b, 0, 255); */ b = vminq_s16(vmaxq_s16(b, zero), max); vst1q_s16((INT16*)&b_buf[i], b); } y_buf += srcbump; cb_buf += srcbump; cr_buf += srcbump; r_buf += dstbump; g_buf += dstbump; b_buf += dstbump; } return PRIMITIVES_SUCCESS; }
rfx_dwt_2d_decode_block_horiz_NEON(sint16 * l, sint16 * h, sint16 * dst, int subband_width) { int y, n; sint16 * l_ptr = l; sint16 * h_ptr = h; sint16 * dst_ptr = dst; for (y = 0; y < subband_width; y++) { /* Even coefficients */ for (n = 0; n < subband_width; n+=8) { // dst[2n] = l[n] - ((h[n-1] + h[n] + 1) >> 1); int16x8_t l_n = vld1q_s16(l_ptr); int16x8_t h_n = vld1q_s16(h_ptr); int16x8_t h_n_m = vld1q_s16(h_ptr - 1); if (n == 0) { int16_t first = vgetq_lane_s16(h_n_m, 1); h_n_m = vsetq_lane_s16(first, h_n_m, 0); } int16x8_t tmp_n = vaddq_s16(h_n, h_n_m); tmp_n = vaddq_s16(tmp_n, vdupq_n_s16(1)); tmp_n = vshrq_n_s16(tmp_n, 1); int16x8_t dst_n = vsubq_s16(l_n, tmp_n); vst1q_s16(l_ptr, dst_n); l_ptr+=8; h_ptr+=8; } l_ptr -= subband_width; h_ptr -= subband_width; /* Odd coefficients */ for (n = 0; n < subband_width; n+=8) { // dst[2n + 1] = (h[n] << 1) + ((dst[2n] + dst[2n + 2]) >> 1); int16x8_t h_n = vld1q_s16(h_ptr); h_n = vshlq_n_s16(h_n, 1); int16x8x2_t dst_n; dst_n.val[0] = vld1q_s16(l_ptr); int16x8_t dst_n_p = vld1q_s16(l_ptr + 1); if (n == subband_width - 8) { int16_t last = vgetq_lane_s16(dst_n_p, 6); dst_n_p = vsetq_lane_s16(last, dst_n_p, 7); } dst_n.val[1] = vaddq_s16(dst_n_p, dst_n.val[0]); dst_n.val[1] = vshrq_n_s16(dst_n.val[1], 1); dst_n.val[1] = vaddq_s16(dst_n.val[1], h_n); vst2q_s16(dst_ptr, dst_n); l_ptr+=8; h_ptr+=8; dst_ptr+=16; } } }
void vp8_short_fdct8x4_neon( int16_t *input, int16_t *output, int pitch) { int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; int16x4_t d16s16, d17s16, d26s16, d27s16, d28s16, d29s16; uint16x4_t d28u16, d29u16; uint16x8_t q14u16; int16x8_t q0s16, q1s16, q2s16, q3s16; int16x8_t q11s16, q12s16, q13s16, q14s16, q15s16, qEmptys16; int32x4_t q9s32, q10s32, q11s32, q12s32; int16x8x2_t v2tmp0, v2tmp1; int32x4x2_t v2tmp2, v2tmp3; d16s16 = vdup_n_s16(5352); d17s16 = vdup_n_s16(2217); q9s32 = vdupq_n_s32(14500); q10s32 = vdupq_n_s32(7500); // Part one pitch >>= 1; q0s16 = vld1q_s16(input); input += pitch; q1s16 = vld1q_s16(input); input += pitch; q2s16 = vld1q_s16(input); input += pitch; q3s16 = vld1q_s16(input); v2tmp2 = vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16)); v2tmp3 = vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16)); v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 vreinterpretq_s16_s32(v2tmp3.val[0])); // q1 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 vreinterpretq_s16_s32(v2tmp3.val[1])); // q3 q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]); q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]); q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]); q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]); q11s16 = vshlq_n_s16(q11s16, 3); q12s16 = vshlq_n_s16(q12s16, 3); q13s16 = vshlq_n_s16(q13s16, 3); q14s16 = vshlq_n_s16(q14s16, 3); q0s16 = vaddq_s16(q11s16, q12s16); q2s16 = vsubq_s16(q11s16, q12s16); q11s32 = q9s32; q12s32 = q10s32; d26s16 = vget_low_s16(q13s16); d27s16 = vget_high_s16(q13s16); d28s16 = vget_low_s16(q14s16); d29s16 = vget_high_s16(q14s16); q9s32 = vmlal_s16(q9s32, d28s16, d16s16); q10s32 = vmlal_s16(q10s32, d28s16, d17s16); q11s32 = vmlal_s16(q11s32, d29s16, d16s16); q12s32 = vmlal_s16(q12s32, d29s16, d17s16); q9s32 = vmlal_s16(q9s32, d26s16, d17s16); q10s32 = vmlsl_s16(q10s32, d26s16, d16s16); q11s32 = vmlal_s16(q11s32, d27s16, d17s16); q12s32 = vmlsl_s16(q12s32, d27s16, d16s16); d2s16 = vshrn_n_s32(q9s32, 12); d6s16 = vshrn_n_s32(q10s32, 12); d3s16 = vshrn_n_s32(q11s32, 12); d7s16 = vshrn_n_s32(q12s32, 12); q1s16 = vcombine_s16(d2s16, d3s16); q3s16 = vcombine_s16(d6s16, d7s16); // Part two q9s32 = vdupq_n_s32(12000); q10s32 = vdupq_n_s32(51000); v2tmp2 = vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16)); v2tmp3 = vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16)); v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 vreinterpretq_s16_s32(v2tmp3.val[0])); // q1 v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 vreinterpretq_s16_s32(v2tmp3.val[1])); // q3 q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]); q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]); q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]); q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]); q15s16 = vdupq_n_s16(7); q11s16 = vaddq_s16(q11s16, q15s16); q0s16 = vaddq_s16(q11s16, q12s16); q1s16 = vsubq_s16(q11s16, q12s16); q11s32 = q9s32; q12s32 = q10s32; d0s16 = vget_low_s16(q0s16); d1s16 = vget_high_s16(q0s16); d2s16 = vget_low_s16(q1s16); d3s16 = vget_high_s16(q1s16); d0s16 = vshr_n_s16(d0s16, 4); d4s16 = vshr_n_s16(d1s16, 4); d2s16 = vshr_n_s16(d2s16, 4); d6s16 = vshr_n_s16(d3s16, 4); d26s16 = vget_low_s16(q13s16); d27s16 = vget_high_s16(q13s16); d28s16 = vget_low_s16(q14s16); d29s16 = vget_high_s16(q14s16); q9s32 = vmlal_s16(q9s32, d28s16, d16s16); q10s32 = vmlal_s16(q10s32, d28s16, d17s16); q11s32 = vmlal_s16(q11s32, d29s16, d16s16); q12s32 = vmlal_s16(q12s32, d29s16, d17s16); q9s32 = vmlal_s16(q9s32, d26s16, d17s16); q10s32 = vmlsl_s16(q10s32, d26s16, d16s16); q11s32 = vmlal_s16(q11s32, d27s16, d17s16); q12s32 = vmlsl_s16(q12s32, d27s16, d16s16); d1s16 = vshrn_n_s32(q9s32, 16); d3s16 = vshrn_n_s32(q10s32, 16); d5s16 = vshrn_n_s32(q11s32, 16); d7s16 = vshrn_n_s32(q12s32, 16); qEmptys16 = vdupq_n_s16(0); q14u16 = vceqq_s16(q14s16, qEmptys16); q14u16 = vmvnq_u16(q14u16); d28u16 = vget_low_u16(q14u16); d29u16 = vget_high_u16(q14u16); d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d28u16)); d5s16 = vsub_s16(d5s16, vreinterpret_s16_u16(d29u16)); q0s16 = vcombine_s16(d0s16, d1s16); q1s16 = vcombine_s16(d2s16, d3s16); q2s16 = vcombine_s16(d4s16, d5s16); q3s16 = vcombine_s16(d6s16, d7s16); vst1q_s16(output, q0s16); vst1q_s16(output + 8, q1s16); vst1q_s16(output + 16, q2s16); vst1q_s16(output + 24, q3s16); return; }
void vpx_idct4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d26u8, d27u8; uint32x2_t d26u32, d27u32; uint16x8_t q8u16, q9u16; int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16; int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16; int16x8_t q8s16, q9s16, q13s16, q14s16; int32x4_t q1s32, q13s32, q14s32, q15s32; int16x4x2_t d0x2s16, d1x2s16; int32x4x2_t q0x2s32; uint8_t *d; d26u32 = d27u32 = vdup_n_u32(0); q8s16 = load_tran_low_to_s16(input); q9s16 = load_tran_low_to_s16(input + 8); d16s16 = vget_low_s16(q8s16); d17s16 = vget_high_s16(q8s16); d18s16 = vget_low_s16(q9s16); d19s16 = vget_high_s16(q9s16); d0x2s16 = vtrn_s16(d16s16, d17s16); d1x2s16 = vtrn_s16(d18s16, d19s16); q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); d20s16 = vdup_n_s16((int16_t)cospi_8_64); d21s16 = vdup_n_s16((int16_t)cospi_16_64); q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d22s16 = vdup_n_s16((int16_t)cospi_24_64); // stage 1 d23s16 = vadd_s16(d16s16, d18s16); d24s16 = vsub_s16(d16s16, d18s16); q15s32 = vmull_s16(d17s16, d22s16); q1s32 = vmull_s16(d17s16, d20s16); q13s32 = vmull_s16(d23s16, d21s16); q14s32 = vmull_s16(d24s16, d21s16); q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); q1s32 = vmlal_s16(q1s32, d19s16, d22s16); d26s16 = vqrshrn_n_s32(q13s32, 14); d27s16 = vqrshrn_n_s32(q14s32, 14); d29s16 = vqrshrn_n_s32(q15s32, 14); d28s16 = vqrshrn_n_s32(q1s32, 14); q13s16 = vcombine_s16(d26s16, d27s16); q14s16 = vcombine_s16(d28s16, d29s16); // stage 2 q8s16 = vaddq_s16(q13s16, q14s16); q9s16 = vsubq_s16(q13s16, q14s16); d16s16 = vget_low_s16(q8s16); d17s16 = vget_high_s16(q8s16); d18s16 = vget_high_s16(q9s16); // vswp d18 d19 d19s16 = vget_low_s16(q9s16); d0x2s16 = vtrn_s16(d16s16, d17s16); d1x2s16 = vtrn_s16(d18s16, d19s16); q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); // do the transform on columns // stage 1 d23s16 = vadd_s16(d16s16, d18s16); d24s16 = vsub_s16(d16s16, d18s16); q15s32 = vmull_s16(d17s16, d22s16); q1s32 = vmull_s16(d17s16, d20s16); q13s32 = vmull_s16(d23s16, d21s16); q14s32 = vmull_s16(d24s16, d21s16); q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); q1s32 = vmlal_s16(q1s32, d19s16, d22s16); d26s16 = vqrshrn_n_s32(q13s32, 14); d27s16 = vqrshrn_n_s32(q14s32, 14); d29s16 = vqrshrn_n_s32(q15s32, 14); d28s16 = vqrshrn_n_s32(q1s32, 14); q13s16 = vcombine_s16(d26s16, d27s16); q14s16 = vcombine_s16(d28s16, d29s16); // stage 2 q8s16 = vaddq_s16(q13s16, q14s16); q9s16 = vsubq_s16(q13s16, q14s16); q8s16 = vrshrq_n_s16(q8s16, 4); q9s16 = vrshrq_n_s16(q9s16, 4); d = dest; d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0); d += dest_stride; d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1); d += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1); d += dest_stride; d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0); q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); d = dest; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1); d += dest_stride; vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0); }
void vp9_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int j, k; uint16x8_t q0u16, q3u16, q8u16, q9u16, q10u16, q11u16; uint8x16_t q0u8, q1u8, q2u8; int16x8_t q12s16, q13s16, q14s16, q15s16; uint16x4_t d6u16; uint8x8_t d0u8, d1u8, d2u8, d3u8, d26u8; q0u8 = vld1q_dup_u8(above - 1); q1u8 = vld1q_u8(above); q2u8 = vld1q_u8(above + 16); q8u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8)); q9u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q0u8)); q10u16 = vsubl_u8(vget_low_u8(q2u8), vget_low_u8(q0u8)); q11u16 = vsubl_u8(vget_high_u8(q2u8), vget_high_u8(q0u8)); for (k = 0; k < 4; k++, left += 8) { d26u8 = vld1_u8(left); q3u16 = vmovl_u8(d26u8); d6u16 = vget_low_u16(q3u16); for (j = 0; j < 2; j++, d6u16 = vget_high_u16(q3u16)) { q0u16 = vdupq_lane_u16(d6u16, 0); q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q11u16)); d0u8 = vqmovun_s16(q12s16); d1u8 = vqmovun_s16(q13s16); d2u8 = vqmovun_s16(q14s16); d3u8 = vqmovun_s16(q15s16); q0u8 = vcombine_u8(d0u8, d1u8); q1u8 = vcombine_u8(d2u8, d3u8); vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8)); vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8)); dst += stride; q0u16 = vdupq_lane_u16(d6u16, 1); q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q11u16)); d0u8 = vqmovun_s16(q12s16); d1u8 = vqmovun_s16(q13s16); d2u8 = vqmovun_s16(q14s16); d3u8 = vqmovun_s16(q15s16); q0u8 = vcombine_u8(d0u8, d1u8); q1u8 = vcombine_u8(d2u8, d3u8); vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8)); vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8)); dst += stride; q0u16 = vdupq_lane_u16(d6u16, 2); q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q11u16)); d0u8 = vqmovun_s16(q12s16); d1u8 = vqmovun_s16(q13s16); d2u8 = vqmovun_s16(q14s16); d3u8 = vqmovun_s16(q15s16); q0u8 = vcombine_u8(d0u8, d1u8); q1u8 = vcombine_u8(d2u8, d3u8); vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8)); vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8)); dst += stride; q0u16 = vdupq_lane_u16(d6u16, 3); q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q11u16)); d0u8 = vqmovun_s16(q12s16); d1u8 = vqmovun_s16(q13s16); d2u8 = vqmovun_s16(q14s16); d3u8 = vqmovun_s16(q15s16); q0u8 = vcombine_u8(d0u8, d1u8); q1u8 = vcombine_u8(d2u8, d3u8); vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8)); vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8)); dst += stride; } } }
void vp9_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int j, k; uint16x8_t q0u16, q2u16, q3u16, q8u16, q10u16; uint8x16_t q0u8, q1u8; int16x8_t q0s16, q1s16, q8s16, q11s16; uint16x4_t d20u16; uint8x8_t d2u8, d3u8, d18u8, d22u8, d23u8; q0u8 = vld1q_dup_u8(above - 1); q1u8 = vld1q_u8(above); q2u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8)); q3u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q0u8)); for (k = 0; k < 2; k++, left += 8) { d18u8 = vld1_u8(left); q10u16 = vmovl_u8(d18u8); d20u16 = vget_low_u16(q10u16); for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) { q0u16 = vdupq_lane_u16(d20u16, 0); q8u16 = vdupq_lane_u16(d20u16, 1); q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16)); q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16)); q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16)); q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16)); d2u8 = vqmovun_s16(q1s16); d3u8 = vqmovun_s16(q0s16); d22u8 = vqmovun_s16(q11s16); d23u8 = vqmovun_s16(q8s16); vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8)); vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8)); dst += stride; vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8)); vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8)); dst += stride; q0u16 = vdupq_lane_u16(d20u16, 2); q8u16 = vdupq_lane_u16(d20u16, 3); q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16)); q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16)); q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16)); q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16)); d2u8 = vqmovun_s16(q1s16); d3u8 = vqmovun_s16(q0s16); d22u8 = vqmovun_s16(q11s16); d23u8 = vqmovun_s16(q8s16); vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8)); vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8)); dst += stride; vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8)); vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8)); dst += stride; } } }
void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { // TODO(jingning) Decide the need of these arguments after the // quantization process is completed. (void)zbin_ptr; (void)quant_shift_ptr; (void)zbin_oq_value; (void)scan; if (!skip_block) { // Quantization pass: All coefficients with index >= zero_flag are // skippable. Note: zero_flag can be zero. int i; const int16x8_t v_zero = vdupq_n_s16(0); const int16x8_t v_one = vdupq_n_s16(1); int16x8_t v_eobmax_76543210 = vdupq_n_s16(-1); int16x8_t v_round = vmovq_n_s16(round_ptr[1]); int16x8_t v_quant = vmovq_n_s16(quant_ptr[1]); int16x8_t v_dequant = vmovq_n_s16(dequant_ptr[1]); // adjust for dc v_round = vsetq_lane_s16(round_ptr[0], v_round, 0); v_quant = vsetq_lane_s16(quant_ptr[0], v_quant, 0); v_dequant = vsetq_lane_s16(dequant_ptr[0], v_dequant, 0); // process dc and the first seven ac coeffs { const int16x8_t v_iscan = vld1q_s16(&iscan[0]); const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]); const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant)); const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant)); const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign); const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign); const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant); v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan); vst1q_s16(&qcoeff_ptr[0], v_qcoeff); vst1q_s16(&dqcoeff_ptr[0], v_dqcoeff); v_round = vmovq_n_s16(round_ptr[1]); v_quant = vmovq_n_s16(quant_ptr[1]); v_dequant = vmovq_n_s16(dequant_ptr[1]); } // now process the rest of the ac coeffs for (i = 8; i < count; i += 8) { const int16x8_t v_iscan = vld1q_s16(&iscan[i]); const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]); const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant)); const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant)); const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign); const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign); const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant); v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan); vst1q_s16(&qcoeff_ptr[i], v_qcoeff); vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff); } { const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210)); const int64x1_t v_eobmax_xx32 = vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32); const int16x4_t v_eobmax_tmp = vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32)); const int64x1_t v_eobmax_xxx3 = vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16); const int16x4_t v_eobmax_final = vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3)); *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0); } } else { vpx_memset(qcoeff_ptr, 0, count * sizeof(int16_t)); vpx_memset(dqcoeff_ptr, 0, count * sizeof(int16_t)); *eob_ptr = 0; } }