void test_vminQs32 (void) { int32x4_t out_int32x4_t; int32x4_t arg0_int32x4_t; int32x4_t arg1_int32x4_t; out_int32x4_t = vminq_s32 (arg0_int32x4_t, arg1_int32x4_t); }
static inline void yuv2rgb_4x2(const uint8_t *y1, const uint8_t *y2, const uint8_t *u, const uint8_t *v, int16_t *r1, int16_t *g1, int16_t *b1, int16_t *r2, int16_t *g2, int16_t *b2){ int32x4_t ry1; int32x4_t ry2; int32x4_t rvug; int32x4_t rvr; int32x4_t rub; int32x4_t rr1,rg1,rb1,rr2,rg2,rb2; int32x4_t max; LOAD_Y_PREMULTS(0) LOAD_Y_PREMULTS(1) LOAD_Y_PREMULTS(2) LOAD_Y_PREMULTS(3) LOAD_UV_PREMULTS(0) LOAD_UV_PREMULTS(1) max=vld1q_s32(yuvmax); /*the following does not work */ //max=vdupq_n_s32(255); rr1=vaddq_s32(ry1,rvr); rr2=vaddq_s32(ry2,rvr); rg1=vaddq_s32(ry1,rvug); rg2=vaddq_s32(ry2,rvug); rb1=vaddq_s32(ry1,rub); rb2=vaddq_s32(ry2,rub); rr1=vminq_s32(vabsq_s32(rr1),max); rr2=vminq_s32(vabsq_s32(rr2),max); rg1=vminq_s32(vabsq_s32(rg1),max); rg2=vminq_s32(vabsq_s32(rg2),max); rb1=vminq_s32(vabsq_s32(rb1),max); rb2=vminq_s32(vabsq_s32(rb2),max); vst1_s16(r1,vqshrn_n_s32(rr1,13)); vst1_s16(r2,vqshrn_n_s32(rr2,13)); vst1_s16(g1,vqshrn_n_s32(rg1,13)); vst1_s16(g2,vqshrn_n_s32(rg2,13)); vst1_s16(b1,vqshrn_n_s32(rb1,13)); vst1_s16(b2,vqshrn_n_s32(rb2,13)); }
void ne10_img_vresize_linear_neon (const int** src, unsigned char* dst, const short* beta, int width) { const int *S0 = src[0], *S1 = src[1]; int32x4_t qS0_0123, qS0_4567, qS1_0123, qS1_4567; int32x4_t qT_0123, qT_4567; int16x4_t dT_0123, dT_4567; uint16x8_t qT_01234567; uint8x8_t dT_01234567, dDst_01234567; int32x2_t dBeta; dBeta = vset_lane_s32 ( (int) (beta[0]), dBeta, 0); dBeta = vset_lane_s32 ( (int) (beta[1]), dBeta, 1); int32x4_t qDelta, qMin, qMax; qDelta = vdupq_n_s32 (DELTA); qMin = vdupq_n_s32 (0); qMax = vdupq_n_s32 (255); int x = 0; for (; x <= width - 8; x += 8) { qS0_0123 = vld1q_s32 (&S0[x]); qS0_4567 = vld1q_s32 (&S0[x + 4]); qS1_0123 = vld1q_s32 (&S1[x]); qS1_4567 = vld1q_s32 (&S1[x + 4]); qT_0123 = vmulq_lane_s32 (qS0_0123, dBeta, 0); qT_4567 = vmulq_lane_s32 (qS0_4567, dBeta, 0); qT_0123 = vmlaq_lane_s32 (qT_0123, qS1_0123, dBeta, 1); qT_4567 = vmlaq_lane_s32 (qT_4567, qS1_4567, dBeta, 1); qT_0123 = vaddq_s32 (qT_0123, qDelta); qT_4567 = vaddq_s32 (qT_4567, qDelta); qT_0123 = vshrq_n_s32 (qT_0123, BITS); qT_4567 = vshrq_n_s32 (qT_4567, BITS); qT_0123 = vmaxq_s32 (qT_0123, qMin); qT_4567 = vmaxq_s32 (qT_4567, qMin); qT_0123 = vminq_s32 (qT_0123, qMax); qT_4567 = vminq_s32 (qT_4567, qMax); dT_0123 = vmovn_s32 (qT_0123); dT_4567 = vmovn_s32 (qT_4567); qT_01234567 = vreinterpretq_u16_s16 (vcombine_s16 (dT_0123, dT_4567)); dT_01234567 = vmovn_u16 (qT_01234567); vst1_u8 (&dst[x], dT_01234567); } if (x < width) { uint8x8_t dMask; dMask = vld1_u8 ( (uint8_t *) (&ne10_img_vresize_linear_mask_residual_table[ (width - x - 1)])); dDst_01234567 = vld1_u8 (&dst[x]); qS0_0123 = vld1q_s32 (&S0[x]); qS0_4567 = vld1q_s32 (&S0[x + 4]); qS1_0123 = vld1q_s32 (&S1[x]); qS1_4567 = vld1q_s32 (&S1[x + 4]); qT_0123 = vmulq_lane_s32 (qS0_0123, dBeta, 0); qT_4567 = vmulq_lane_s32 (qS0_4567, dBeta, 0); qT_0123 = vmlaq_lane_s32 (qT_0123, qS1_0123, dBeta, 1); qT_4567 = vmlaq_lane_s32 (qT_4567, qS1_4567, dBeta, 1); qT_0123 = vaddq_s32 (qT_0123, qDelta); qT_4567 = vaddq_s32 (qT_4567, qDelta); qT_0123 = vshrq_n_s32 (qT_0123, BITS); qT_4567 = vshrq_n_s32 (qT_4567, BITS); qT_0123 = vmaxq_s32 (qT_0123, qMin); qT_4567 = vmaxq_s32 (qT_4567, qMin); qT_0123 = vminq_s32 (qT_0123, qMax); qT_4567 = vminq_s32 (qT_4567, qMax); dT_0123 = vmovn_s32 (qT_0123); dT_4567 = vmovn_s32 (qT_4567); qT_01234567 = vreinterpretq_u16_s16 (vcombine_s16 (dT_0123, dT_4567)); dT_01234567 = vmovn_u16 (qT_01234567); dMask = vbsl_u8 (dMask, dT_01234567, dDst_01234567); vst1_u8 (&dst[x], dMask); } }
inline int32x4_t vminq(const int32x4_t & v0, const int32x4_t & v1) { return vminq_s32(v0, v1); }
int32x4_t test_vminq_s32 (int32x4_t __a, int32x4_t __b) { return vminq_s32(__a, __b); }