Exemple #1
0
void memcpy_neon(char *src, char *des, int len)
{
	int8_t *src1 = (int8_t*)src;
	int8_t *des1 = (int8_t*)des;
	int8x16_t vec;
	int i;
	int temp = len % 16;
	int len1 = len - temp;
	char ch;
	/*use neon */
	for(i = 0; i < len1; i += 16)
	{
		vec = vld1q_s8(src1);
		vst1q_s8(des1, vec);
		src1 += 16;
		des1 =+ 16;
	}
	/*duplicate the rest characters*/
	while(temp != 0)
	{
		*(char*)des1 = *(char*)src1;
		des1++;
		src1++;
		temp--;	
	}
}
Exemple #2
0
/* s8x16 add */
void mw_neon_mm_add_s8x16(signed char * A, int Row, int Col, signed char * B, signed char * C)
{
	int8x16_t neon_a, neon_b, neon_c;
	int size = Row * Col;
	int i = 0;
	int k = 0;

	for (i = 16; i <= size ; i+=16)
	{
		k = i - 16;
		neon_a = vld1q_s8(A + k);
		neon_b = vld1q_s8(B + k);
		neon_c = vaddq_s8(neon_a, neon_b);
		vst1q_s8(C + k, neon_c);
	}

	k = i - 16;
    for (i = 0; i < size % 16; i++)
	{
		C[k + i] = A[k + i] + B[k + i];
	}
}
Exemple #3
0
test_vreinterpretq_f64_s8 ()
{
  int8x16_t a;
  float64x2_t b;
  int8_t c[16] = { 0x18, 0x2D, 0x44, 0x54, 0xFB, 0x21, 0x09, 0x40,
		   0x69, 0x57, 0x14, 0x8B, 0x0A, 0xBF, 0x05, 0x40 };
  float64_t d[2] = { PI_F64, E_F64 };
  float64_t e[2];
  int i;

  a = vld1q_s8 (c);
  b = wrap_vreinterpretq_f64_s8 (a);
  vst1q_f64 (e, b);
  for (i = 0; i < 2; i++)
    if (!DOUBLE_EQUALS (d[i], e[i], __DBL_EPSILON__))
      return 1;
  return 0;
};
Exemple #4
0
void test_vld1Qs8 (void)
{
  int8x16_t out_int8x16_t;

  out_int8x16_t = vld1q_s8 (0);
}
Exemple #5
0
/* s8x16 mv mul */
void mw_neon_mv_mul_s8x16(signed char * A, int Row, int T, signed char * B, signed char * C)
{
	int i = 0;
	int k = 0;

	int8x16_t neon_b, neon_c;
	int8x16_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7;
	int8x16_t neon_a8, neon_a9, neon_a10, neon_a11, neon_a12, neon_a13, neon_a14, neon_a15;
	int8x16_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7;
	int8x16_t neon_b8, neon_b9, neon_b10, neon_b11, neon_b12, neon_b13, neon_b14, neon_b15;

	for (i = 0; i < Row; i+=16)
	{
		neon_c = vmovq_n_s8(0);

		for (k = 0; k < T; k+=16)
		{
			int j = k * T + i;

			neon_a0 = vld1q_s8(A + j);
			j+=Row;
			neon_a1 = vld1q_s8(A + j);
			j+=Row;
			neon_a2 = vld1q_s8(A + j);
			j+=Row;
			neon_a3 = vld1q_s8(A + j);
			j+=Row;
			neon_a4 = vld1q_s8(A + j);
			j+=Row;
			neon_a5 = vld1q_s8(A + j);
			j+=Row;
			neon_a6 = vld1q_s8(A + j);
			j+=Row;
			neon_a7 = vld1q_s8(A + j);
			j+=Row;
			neon_a8 = vld1q_s8(A + j);
			j+=Row;
			neon_a9 = vld1q_s8(A + j);
			j+=Row;
			neon_a10 = vld1q_s8(A + j);
			j+=Row;
			neon_a11 = vld1q_s8(A + j);
			j+=Row;
			neon_a12 = vld1q_s8(A + j);
			j+=Row;
			neon_a13 = vld1q_s8(A + j);
			j+=Row;
			neon_a14 = vld1q_s8(A + j);
			j+=Row;
			neon_a15 = vld1q_s8(A + j);

			neon_b = vld1q_s8(B + k);
			neon_b0 = vdupq_n_s8(vgetq_lane_s8(neon_b, 0));
			neon_b1 = vdupq_n_s8(vgetq_lane_s8(neon_b, 1));
			neon_b2 = vdupq_n_s8(vgetq_lane_s8(neon_b, 2));
			neon_b3 = vdupq_n_s8(vgetq_lane_s8(neon_b, 3));
			neon_b4 = vdupq_n_s8(vgetq_lane_s8(neon_b, 4));
			neon_b5 = vdupq_n_s8(vgetq_lane_s8(neon_b, 5));
			neon_b6 = vdupq_n_s8(vgetq_lane_s8(neon_b, 6));
			neon_b7 = vdupq_n_s8(vgetq_lane_s8(neon_b, 7));
			neon_b8 = vdupq_n_s8(vgetq_lane_s8(neon_b, 8));
			neon_b9 = vdupq_n_s8(vgetq_lane_s8(neon_b, 9));
			neon_b10 = vdupq_n_s8(vgetq_lane_s8(neon_b, 10));
			neon_b11 = vdupq_n_s8(vgetq_lane_s8(neon_b, 11));
			neon_b12 = vdupq_n_s8(vgetq_lane_s8(neon_b, 12));
			neon_b13 = vdupq_n_s8(vgetq_lane_s8(neon_b, 13));
			neon_b14 = vdupq_n_s8(vgetq_lane_s8(neon_b, 14));
			neon_b15 = vdupq_n_s8(vgetq_lane_s8(neon_b, 15));

			neon_c = vaddq_s8(vmulq_s8(neon_a0, neon_b0), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a1, neon_b1), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a2, neon_b2), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a3, neon_b3), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a4, neon_b4), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a5, neon_b5), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a6, neon_b6), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a7, neon_b7), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a8, neon_b8), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a9, neon_b9), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a10, neon_b10), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a11, neon_b11), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a12, neon_b12), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a13, neon_b13), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a14, neon_b14), neon_c);
			neon_c = vaddq_s8(vmulq_s8(neon_a15, neon_b15), neon_c);

		}

		vst1q_s8(C + i, neon_c);
	}
}
Exemple #6
0
inline   int8x16_t vld1q(const s8  * ptr) { return  vld1q_s8(ptr); }
Exemple #7
0
f64 dotProduct(const Size2D &_size,
               const s8 * src0Base, ptrdiff_t src0Stride,
               const s8 * src1Base, ptrdiff_t src1Stride)
{
    internal::assertSupportedConfiguration();
#ifdef CAROTENE_NEON
    Size2D size(_size);
    if (src0Stride == src1Stride &&
        src0Stride == (ptrdiff_t)(size.width))
    {
        size.width *= size.height;
        size.height = 1;
    }

// It is possible to accumulate up to 131071 schar multiplication results in sint32 without overflow
// We process 16 elements and accumulate two new elements per step. So we could handle 131071/2*16 elements
#define DOT_INT_BLOCKSIZE 131070*8
    f64 result = 0.0;
    for (size_t row = 0; row < size.height; ++row)
    {
        const s8 * src0 = internal::getRowPtr(src0Base, src0Stride, row);
        const s8 * src1 = internal::getRowPtr(src1Base, src1Stride, row);

        size_t i = 0;
        int64x2_t ws = vmovq_n_s64(0);

        while(i + 16 <= size.width)
        {
            size_t lim = std::min(i + DOT_UINT_BLOCKSIZE, size.width) - 16;

            int32x4_t s1 = vmovq_n_s32(0);
            int32x4_t s2 = vmovq_n_s32(0);

            for (; i <= lim; i += 16)
            {
                internal::prefetch(src0 + i);
                internal::prefetch(src1 + i);

                int8x16_t vs1 = vld1q_s8(src0 + i);
                int8x16_t vs2 = vld1q_s8(src1 + i);

                int16x8_t vdot1 = vmull_s8(vget_low_s8(vs1), vget_low_s8(vs2));
                int16x8_t vdot2 = vmull_s8(vget_high_s8(vs1), vget_high_s8(vs2));

                s1 = vpadalq_s16(s1, vdot1);
                s2 = vpadalq_s16(s2, vdot2);
            }

            ws = vpadalq_s32(ws, s1);
            ws = vpadalq_s32(ws, s2);
        }

        if(i + 8 <= size.width)
        {
            int8x8_t vs1 = vld1_s8(src0 + i);
            int8x8_t vs2 = vld1_s8(src1 + i);

            ws = vpadalq_s32(ws, vpaddlq_s16(vmull_s8(vs1, vs2)));
            i += 8;
        }

        result += (double)vget_lane_s64(vadd_s64(vget_low_s64(ws), vget_high_s64(ws)), 0);

        for (; i < size.width; ++i)
            result += s32(src0[i]) * s32(src1[i]);
    }
    return result;
#else
    (void)_size;
    (void)src0Base;
    (void)src0Stride;
    (void)src1Base;
    (void)src1Stride;

    return 0;
#endif
}