Esempio n. 1
0
void
foo ()
{
  int8x8_t val14;
  int8x8_t val15;
  uint8x8_t val16;
  uint32x4_t val40;
  val14 = vcreate_s8 (0xff0080f6807f807fUL);
  val15 = vcreate_s8 (0x10807fff7f808080UL);
  val16 = vcgt_s8 (val14, val15);
  val40 = vreinterpretq_u32_u64 (
    vdupq_n_u64 (
         vget_lane_s64_1 (
         vreinterpret_s64_u8 (val16), 0)
    ));
}
Esempio n. 2
0
inline int v_signmask(const v_uint8x16& a)
{
    int8x8_t m0 = vcreate_s8(CV_BIG_UINT(0x0706050403020100));
    uint8x16_t v0 = vshlq_u8(vshrq_n_u8(a.val, 7), vcombine_s8(m0, m0));
    uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0)));
    return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8);
}
Esempio n. 3
0
int8x8_t test_vcreate_s8(uint64_t v1) {
  // CHECK: test_vcreate_s8
  return vcreate_s8(v1);
  // CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
}
Esempio n. 4
0
inline v_int32x4 v_load_expand_q(const schar* ptr)
{
    int8x8_t v0 = vcreate_s8(*(unsigned*)ptr);
    int16x4_t v1 = vget_low_s16(vmovl_s8(v0));
    return v_int32x4(vmovl_s16(v1));
}