void test_vmovns16 (void) { int8x8_t out_int8x8_t; int16x8_t arg0_int16x8_t; out_int8x8_t = vmovn_s16 (arg0_int16x8_t); }
JL_DLLEXPORT struct_aa64_3 test_aa64_vec_2(struct_aa64_3 v1, struct_aa64_4 v2) { struct_aa64_3 x = {v1.v1 + vmovn_s16(v2.v1), v1.v2 - v2.v2}; return x; }
inline int8x8_t vmovn(const int16x8_t & v) { return vmovn_s16(v); }
void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, int w, int h, int const_alpha) { const uint *src = (const uint *) srcPixels; uint *dst = (uint *) destPixels; int16x8_t half = vdupq_n_s16(0x80); int16x8_t full = vdupq_n_s16(0xff); if (const_alpha == 256) { for (int y = 0; y < h; ++y) { int x = 0; for (; x < w-3; x += 4) { int32x4_t src32 = vld1q_s32((int32_t *)&src[x]); if ((src[x] & src[x+1] & src[x+2] & src[x+3]) >= 0xff000000) { // all opaque vst1q_s32((int32_t *)&dst[x], src32); } else if (src[x] | src[x+1] | src[x+2] | src[x+3]) { int32x4_t dst32 = vld1q_s32((int32_t *)&dst[x]); const uint8x16_t src8 = vreinterpretq_u8_s32(src32); const uint8x16_t dst8 = vreinterpretq_u8_s32(dst32); const uint8x8_t src8_low = vget_low_u8(src8); const uint8x8_t dst8_low = vget_low_u8(dst8); const uint8x8_t src8_high = vget_high_u8(src8); const uint8x8_t dst8_high = vget_high_u8(dst8); const int16x8_t src16_low = vreinterpretq_s16_u16(vmovl_u8(src8_low)); const int16x8_t dst16_low = vreinterpretq_s16_u16(vmovl_u8(dst8_low)); const int16x8_t src16_high = vreinterpretq_s16_u16(vmovl_u8(src8_high)); const int16x8_t dst16_high = vreinterpretq_s16_u16(vmovl_u8(dst8_high)); const int16x8_t result16_low = qvsource_over_s16(src16_low, dst16_low, half, full); const int16x8_t result16_high = qvsource_over_s16(src16_high, dst16_high, half, full); const int32x2_t result32_low = vreinterpret_s32_s8(vmovn_s16(result16_low)); const int32x2_t result32_high = vreinterpret_s32_s8(vmovn_s16(result16_high)); vst1q_s32((int32_t *)&dst[x], vcombine_s32(result32_low, result32_high)); } } for (; x<w; ++x) { uint s = src[x]; if (s >= 0xff000000) dst[x] = s; else if (s != 0) dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); } dst = (quint32 *)(((uchar *) dst) + dbpl); src = (const quint32 *)(((const uchar *) src) + sbpl); } } else if (const_alpha != 0) { const_alpha = (const_alpha * 255) >> 8; int16x8_t const_alpha16 = vdupq_n_s16(const_alpha); for (int y = 0; y < h; ++y) { int x = 0; for (; x < w-3; x += 4) { if (src[x] | src[x+1] | src[x+2] | src[x+3]) { int32x4_t src32 = vld1q_s32((int32_t *)&src[x]); int32x4_t dst32 = vld1q_s32((int32_t *)&dst[x]); const uint8x16_t src8 = vreinterpretq_u8_s32(src32); const uint8x16_t dst8 = vreinterpretq_u8_s32(dst32); const uint8x8_t src8_low = vget_low_u8(src8); const uint8x8_t dst8_low = vget_low_u8(dst8); const uint8x8_t src8_high = vget_high_u8(src8); const uint8x8_t dst8_high = vget_high_u8(dst8); const int16x8_t src16_low = vreinterpretq_s16_u16(vmovl_u8(src8_low)); const int16x8_t dst16_low = vreinterpretq_s16_u16(vmovl_u8(dst8_low)); const int16x8_t src16_high = vreinterpretq_s16_u16(vmovl_u8(src8_high)); const int16x8_t dst16_high = vreinterpretq_s16_u16(vmovl_u8(dst8_high)); const int16x8_t srcalpha16_low = qvbyte_mul_s16(src16_low, const_alpha16, half); const int16x8_t srcalpha16_high = qvbyte_mul_s16(src16_high, const_alpha16, half); const int16x8_t result16_low = qvsource_over_s16(srcalpha16_low, dst16_low, half, full); const int16x8_t result16_high = qvsource_over_s16(srcalpha16_high, dst16_high, half, full); const int32x2_t result32_low = vreinterpret_s32_s8(vmovn_s16(result16_low)); const int32x2_t result32_high = vreinterpret_s32_s8(vmovn_s16(result16_high)); vst1q_s32((int32_t *)&dst[x], vcombine_s32(result32_low, result32_high)); } } for (; x<w; ++x) { uint s = src[x]; if (s != 0) { s = BYTE_MUL(s, const_alpha); dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); } } dst = (quint32 *)(((uchar *) dst) + dbpl); src = (const quint32 *)(((const uchar *) src) + sbpl); } }