Exemple #1
0
bool
expand_vec_cond_expr_p (tree value_type, tree cmp_op_type)
{
  machine_mode value_mode = TYPE_MODE (value_type);
  machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
  if (VECTOR_BOOLEAN_TYPE_P (cmp_op_type))
    return get_vcond_mask_icode (TYPE_MODE (value_type),
				 TYPE_MODE (cmp_op_type)) != CODE_FOR_nothing;
  if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode)
      || GET_MODE_NUNITS (value_mode) != GET_MODE_NUNITS (cmp_op_mode)
      || get_vcond_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type),
			  TYPE_UNSIGNED (cmp_op_type)) == CODE_FOR_nothing)
    return false;
  return true;
}
Exemple #2
0
static enum machine_mode
type_natural_mode (tree type)
{
    enum machine_mode mode = TYPE_MODE (type);

    if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
    {
        HOST_WIDE_INT size = int_size_in_bytes (type);
        if ((size == 8 || size == 16)
        /* ??? Generic code allows us to create width 1 vectors.  Ignore.  */
        && TYPE_VECTOR_SUBPARTS (type) > 1)
        {
            enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));

            if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
                mode = MIN_MODE_VECTOR_FLOAT;
            else
                mode = MIN_MODE_VECTOR_INT;

            /* Get the mode which has this inner mode and number of units.  */
            for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
                if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
                && GET_MODE_INNER (mode) == innermode)
                    return mode;

            gcc_unreachable ();
        }
    }
    return mode;
}
rtx
rtx_vector_builder::build ()
{
  finalize ();

  rtx x = find_cached_value ();
  if (x)
    return x;

  unsigned int nelts;
  if (!GET_MODE_NUNITS (m_mode).is_constant (&nelts))
    nelts = encoded_nelts ();
  rtvec v = rtvec_alloc (nelts);
  for (unsigned int i = 0; i < nelts; ++i)
    RTVEC_ELT (v, i) = elt (i);
  x = gen_rtx_raw_CONST_VECTOR (m_mode, v);
  CONST_VECTOR_NPATTERNS (x) = npatterns ();
  CONST_VECTOR_NELTS_PER_PATTERN (x) = nelts_per_pattern ();
  return x;
}
static inline int
vector_all_ones_operand_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#line 769 "../.././gcc/config/i386/predicates.md"
{
  int nunits = GET_MODE_NUNITS (mode);

  if (GET_CODE (op) == CONST_VECTOR
      && CONST_VECTOR_NUNITS (op) == nunits)
    {
      int i;
      for (i = 0; i < nunits; ++i)
        {
          rtx x = CONST_VECTOR_ELT (op, i);
          if (x != constm1_rtx)
            return 0;
        }
      return 1;
    }

  return 0;
}
Exemple #5
0
int
omp_max_vf (void)
{
  if (!optimize
      || optimize_debug
      || !flag_tree_loop_optimize
      || (!flag_tree_loop_vectorize
	  && (global_options_set.x_flag_tree_loop_vectorize
	      || global_options_set.x_flag_tree_vectorize)))
    return 1;

  int vf = 1;
  int vs = targetm.vectorize.autovectorize_vector_sizes ();
  if (vs)
    vf = 1 << floor_log2 (vs);
  else
    {
      machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
      if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
	vf = GET_MODE_NUNITS (vqimode);
    }
  return vf;
}
Exemple #6
0
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
			  tree exp, builtin_simd_arg *args)
{
  rtx pat;
  tree arg[SIMD_MAX_BUILTIN_ARGS];
  rtx op[SIMD_MAX_BUILTIN_ARGS];
  machine_mode tmode = insn_data[icode].operand[0].mode;
  machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
  int argc = 0;

  if (have_retval
      && (!target
	  || GET_MODE (target) != tmode
	  || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
    target = gen_reg_rtx (tmode);

  for (;;)
    {
      builtin_simd_arg thisarg = args[argc];

      if (thisarg == SIMD_ARG_STOP)
	break;
      else
	{
	  arg[argc] = CALL_EXPR_ARG (exp, argc);
	  op[argc] = expand_normal (arg[argc]);
	  mode[argc] = insn_data[icode].operand[argc + have_retval].mode;

	  switch (thisarg)
	    {
	    case SIMD_ARG_COPY_TO_REG:
	      if (POINTER_TYPE_P (TREE_TYPE (arg[argc])))
		op[argc] = convert_memory_address (Pmode, op[argc]);
	      /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
	      if (!(*insn_data[icode].operand[argc + have_retval].predicate)
		  (op[argc], mode[argc]))
		op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
	      break;

	    case SIMD_ARG_LANE_INDEX:
	      /* Must be a previous operand into which this is an index.  */
	      gcc_assert (argc > 0);
	      if (CONST_INT_P (op[argc]))
		{
		  enum machine_mode vmode = mode[argc - 1];
		  aarch64_simd_lane_bounds (op[argc],
					    0, GET_MODE_NUNITS (vmode));
		  /* Keep to GCC-vector-extension lane indices in the RTL.  */
		  op[argc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[argc])));
		}
	      /* Fall through - if the lane index isn't a constant then
		 the next case will error.  */
	    case SIMD_ARG_CONSTANT:
	      if (!(*insn_data[icode].operand[argc + have_retval].predicate)
		  (op[argc], mode[argc]))
	      {
		error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, "
		       "expected %<const int%>", argc + 1);
		return const0_rtx;
	      }
	      break;

	    case SIMD_ARG_STOP:
	      gcc_unreachable ();
	    }

	  argc++;
	}
    }

  if (have_retval)
    switch (argc)
      {
      case 1:
	pat = GEN_FCN (icode) (target, op[0]);
	break;

      case 2:
	pat = GEN_FCN (icode) (target, op[0], op[1]);
	break;

      case 3:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
	break;

      case 4:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
	break;

      case 5:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
	break;

      default:
	gcc_unreachable ();
      }
  else
    switch (argc)
      {
      case 1:
	pat = GEN_FCN (icode) (op[0]);
	break;

      case 2:
	pat = GEN_FCN (icode) (op[0], op[1]);
	break;

      case 3:
	pat = GEN_FCN (icode) (op[0], op[1], op[2]);
	break;

      case 4:
	pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
	break;

      case 5:
	pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
	break;

      default:
	gcc_unreachable ();
      }

  if (!pat)
    return NULL_RTX;

  emit_insn (pat);

  return target;
}
Exemple #7
0
static void
aarch64_init_simd_builtin_types (void)
{
  int i;
  int nelts = sizeof (aarch64_simd_types) / sizeof (aarch64_simd_types[0]);
  tree tdecl;

  /* Init all the element types built by the front-end.  */
  aarch64_simd_types[Int8x8_t].eltype = intQI_type_node;
  aarch64_simd_types[Int8x16_t].eltype = intQI_type_node;
  aarch64_simd_types[Int16x4_t].eltype = intHI_type_node;
  aarch64_simd_types[Int16x8_t].eltype = intHI_type_node;
  aarch64_simd_types[Int32x2_t].eltype = intSI_type_node;
  aarch64_simd_types[Int32x4_t].eltype = intSI_type_node;
  aarch64_simd_types[Int64x1_t].eltype = intDI_type_node;
  aarch64_simd_types[Int64x2_t].eltype = intDI_type_node;
  aarch64_simd_types[Uint8x8_t].eltype = unsigned_intQI_type_node;
  aarch64_simd_types[Uint8x16_t].eltype = unsigned_intQI_type_node;
  aarch64_simd_types[Uint16x4_t].eltype = unsigned_intHI_type_node;
  aarch64_simd_types[Uint16x8_t].eltype = unsigned_intHI_type_node;
  aarch64_simd_types[Uint32x2_t].eltype = unsigned_intSI_type_node;
  aarch64_simd_types[Uint32x4_t].eltype = unsigned_intSI_type_node;
  aarch64_simd_types[Uint64x1_t].eltype = unsigned_intDI_type_node;
  aarch64_simd_types[Uint64x2_t].eltype = unsigned_intDI_type_node;

  /* Poly types are a world of their own.  */
  aarch64_simd_types[Poly8_t].eltype = aarch64_simd_types[Poly8_t].itype =
    build_distinct_type_copy (unsigned_intQI_type_node);
  aarch64_simd_types[Poly16_t].eltype = aarch64_simd_types[Poly16_t].itype =
    build_distinct_type_copy (unsigned_intHI_type_node);
  aarch64_simd_types[Poly64_t].eltype = aarch64_simd_types[Poly64_t].itype =
    build_distinct_type_copy (unsigned_intDI_type_node);
  aarch64_simd_types[Poly128_t].eltype = aarch64_simd_types[Poly128_t].itype =
    build_distinct_type_copy (unsigned_intTI_type_node);
  /* Init poly vector element types with scalar poly types.  */
  aarch64_simd_types[Poly8x8_t].eltype = aarch64_simd_types[Poly8_t].itype;
  aarch64_simd_types[Poly8x16_t].eltype = aarch64_simd_types[Poly8_t].itype;
  aarch64_simd_types[Poly16x4_t].eltype = aarch64_simd_types[Poly16_t].itype;
  aarch64_simd_types[Poly16x8_t].eltype = aarch64_simd_types[Poly16_t].itype;
  aarch64_simd_types[Poly64x1_t].eltype = aarch64_simd_types[Poly64_t].itype;
  aarch64_simd_types[Poly64x2_t].eltype = aarch64_simd_types[Poly64_t].itype;

  /* Continue with standard types.  */
  aarch64_simd_types[Float32x2_t].eltype = float_type_node;
  aarch64_simd_types[Float32x4_t].eltype = float_type_node;
  aarch64_simd_types[Float64x1_t].eltype = double_type_node;
  aarch64_simd_types[Float64x2_t].eltype = double_type_node;

  for (i = 0; i < nelts; i++)
    {
      tree eltype = aarch64_simd_types[i].eltype;
      enum machine_mode mode = aarch64_simd_types[i].mode;

      if (aarch64_simd_types[i].itype == NULL)
	aarch64_simd_types[i].itype =
	  build_distinct_type_copy
	    (build_vector_type (eltype, GET_MODE_NUNITS (mode)));

      tdecl = add_builtin_type (aarch64_simd_types[i].name,
				aarch64_simd_types[i].itype);
      TYPE_NAME (aarch64_simd_types[i].itype) = tdecl;
      SET_TYPE_STRUCTURAL_EQUALITY (aarch64_simd_types[i].itype);
    }

#define AARCH64_BUILD_SIGNED_TYPE(mode)  \
  make_signed_type (GET_MODE_PRECISION (mode));
  aarch64_simd_intOI_type_node = AARCH64_BUILD_SIGNED_TYPE (OImode);
  aarch64_simd_intEI_type_node = AARCH64_BUILD_SIGNED_TYPE (EImode);
  aarch64_simd_intCI_type_node = AARCH64_BUILD_SIGNED_TYPE (CImode);
  aarch64_simd_intXI_type_node = AARCH64_BUILD_SIGNED_TYPE (XImode);
#undef AARCH64_BUILD_SIGNED_TYPE

  tdecl = add_builtin_type
	    ("__builtin_aarch64_simd_ei" , aarch64_simd_intEI_type_node);
  TYPE_NAME (aarch64_simd_intEI_type_node) = tdecl;
  tdecl = add_builtin_type
	    ("__builtin_aarch64_simd_oi" , aarch64_simd_intOI_type_node);
  TYPE_NAME (aarch64_simd_intOI_type_node) = tdecl;
  tdecl = add_builtin_type
	    ("__builtin_aarch64_simd_ci" , aarch64_simd_intCI_type_node);
  TYPE_NAME (aarch64_simd_intCI_type_node) = tdecl;
  tdecl = add_builtin_type
	    ("__builtin_aarch64_simd_xi" , aarch64_simd_intXI_type_node);
  TYPE_NAME (aarch64_simd_intXI_type_node) = tdecl;
}
Exemple #8
0
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
			  tree exp, builtin_simd_arg *args)
{
  rtx pat;
  rtx op[SIMD_MAX_BUILTIN_ARGS + 1]; /* First element for result operand.  */
  int opc = 0;

  if (have_retval)
    {
      machine_mode tmode = insn_data[icode].operand[0].mode;
      if (!target
	  || GET_MODE (target) != tmode
	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
	target = gen_reg_rtx (tmode);
      op[opc++] = target;
    }

  for (;;)
    {
      builtin_simd_arg thisarg = args[opc - have_retval];

      if (thisarg == SIMD_ARG_STOP)
	break;
      else
	{
	  tree arg = CALL_EXPR_ARG (exp, opc - have_retval);
	  enum machine_mode mode = insn_data[icode].operand[opc].mode;
	  op[opc] = expand_normal (arg);

	  switch (thisarg)
	    {
	    case SIMD_ARG_COPY_TO_REG:
	      if (POINTER_TYPE_P (TREE_TYPE (arg)))
		op[opc] = convert_memory_address (Pmode, op[opc]);
	      /*gcc_assert (GET_MODE (op[opc]) == mode); */
	      if (!(*insn_data[icode].operand[opc].predicate)
		  (op[opc], mode))
		op[opc] = copy_to_mode_reg (mode, op[opc]);
	      break;

	    case SIMD_ARG_LANE_INDEX:
	      /* Must be a previous operand into which this is an index.  */
	      gcc_assert (opc > 0);
	      if (CONST_INT_P (op[opc]))
		{
		  machine_mode vmode = insn_data[icode].operand[opc - 1].mode;
		  aarch64_simd_lane_bounds (op[opc],
					    0, GET_MODE_NUNITS (vmode), exp);
		  /* Keep to GCC-vector-extension lane indices in the RTL.  */
		  op[opc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[opc])));
		}
	      /* Fall through - if the lane index isn't a constant then
		 the next case will error.  */
	    case SIMD_ARG_CONSTANT:
	      if (!(*insn_data[icode].operand[opc].predicate)
		  (op[opc], mode))
	      {
		error ("%Kargument %d must be a constant immediate",
		       exp, opc + 1 - have_retval);
		return const0_rtx;
	      }
	      break;

	    case SIMD_ARG_STOP:
	      gcc_unreachable ();
	    }

	  opc++;
	}
    }

  switch (opc)
    {
    case 1:
      pat = GEN_FCN (icode) (op[0]);
      break;

    case 2:
      pat = GEN_FCN (icode) (op[0], op[1]);
      break;

    case 3:
      pat = GEN_FCN (icode) (op[0], op[1], op[2]);
      break;

    case 4:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
      break;

    case 5:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
      break;

    case 6:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
      break;

    default:
      gcc_unreachable ();
    }

  if (!pat)
    return NULL_RTX;

  emit_insn (pat);

  return target;
}