예제 #1
0
tree
type_promotes_to (tree type)
{
  tree promoted_type;

  if (type == error_mark_node)
    return error_mark_node;

  type = TYPE_MAIN_VARIANT (type);

  /* Check for promotions of target-defined types first.  */
  promoted_type = targetm.promoted_type (type);
  if (promoted_type)
    return promoted_type;

  /* bool always promotes to int (not unsigned), even if it's the same
     size.  */
  if (TREE_CODE (type) == BOOLEAN_TYPE)
    type = integer_type_node;

  /* Normally convert enums to int, but convert wide enums to something
     wider.  */
  else if (TREE_CODE (type) == ENUMERAL_TYPE
	   || type == char16_type_node
	   || type == char32_type_node
	   || type == wchar_type_node)
    {
      int precision = MAX (TYPE_PRECISION (type),
			   TYPE_PRECISION (integer_type_node));
      tree totype = c_common_type_for_size (precision, 0);
      if (TREE_CODE (type) == ENUMERAL_TYPE)
	type = ENUM_UNDERLYING_TYPE (type);
      if (TYPE_UNSIGNED (type)
	  && ! int_fits_type_p (TYPE_MAX_VALUE (type), totype))
	type = c_common_type_for_size (precision, 1);
      else
	type = totype;
    }
  else if (c_promoting_integer_type_p (type))
    {
      /* Retain unsignedness if really not getting bigger.  */
      if (TYPE_UNSIGNED (type)
	  && TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
	type = unsigned_type_node;
      else
	type = integer_type_node;
    }
  else if (type == float_type_node)
    type = double_type_node;

  return type;
}
예제 #2
0
파일: tree-chrec.c 프로젝트: woepaul/gcc
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
  double_int num, denom, idx, di_res;
  bool overflow;
  unsigned int i;
  tree res;

  /* Handle the most frequent cases.  */
  if (k == 0)
    return build_int_cst (type, 1);
  if (k == 1)
    return fold_convert (type, n);

  /* Numerator = n.  */
  num = TREE_INT_CST (n);

  /* Check that k <= n.  */
  if (num.ult (double_int::from_uhwi (k)))
    return NULL_TREE;

  /* Denominator = 2.  */
  denom = double_int::from_uhwi (2);

  /* Index = Numerator-1.  */
  idx = num - double_int_one;

  /* Numerator = Numerator*Index = n*(n-1).  */
  num = num.mul_with_sign (idx, false, &overflow);
  if (overflow)
    return NULL_TREE;

  for (i = 3; i <= k; i++)
    {
      /* Index--.  */
      --idx;

      /* Numerator *= Index.  */
      num = num.mul_with_sign (idx, false, &overflow);
      if (overflow)
	return NULL_TREE;

      /* Denominator *= i.  */
      denom *= double_int::from_uhwi (i);
    }

  /* Result = Numerator / Denominator.  */
  di_res = num.div (denom, true, EXACT_DIV_EXPR);
  res = build_int_cst_wide (type, di_res.low, di_res.high);
  return int_fits_type_p (res, type) ? res : NULL_TREE;
}
예제 #3
0
static tree 
chrec_convert_1 (tree type, tree chrec, tree at_stmt,
		 bool use_overflow_semantics)
{
  tree ct, res;
  tree base, step;
  struct loop *loop;

  if (automatically_generated_chrec_p (chrec))
    return chrec;
  
  ct = chrec_type (chrec);
  if (ct == type)
    return chrec;

  if (!evolution_function_is_affine_p (chrec))
    goto keep_cast;

  loop = current_loops->parray[CHREC_VARIABLE (chrec)];
  base = CHREC_LEFT (chrec);
  step = CHREC_RIGHT (chrec);

  if (convert_affine_scev (loop, type, &base, &step, at_stmt,
			   use_overflow_semantics))
    return build_polynomial_chrec (loop->num, base, step);

  /* If we cannot propagate the cast inside the chrec, just keep the cast.  */
keep_cast:
  res = fold_convert (type, chrec);

  /* Don't propagate overflows.  */
  if (CONSTANT_CLASS_P (res))
    {
      TREE_CONSTANT_OVERFLOW (res) = 0;
      TREE_OVERFLOW (res) = 0;
    }

  /* But reject constants that don't fit in their type after conversion.
     This can happen if TYPE_MIN_VALUE or TYPE_MAX_VALUE are not the
     natural values associated with TYPE_PRECISION and TYPE_UNSIGNED,
     and can cause problems later when computing niters of loops.  Note
     that we don't do the check before converting because we don't want
     to reject conversions of negative chrecs to unsigned types.  */
  if (TREE_CODE (res) == INTEGER_CST
      && TREE_CODE (type) == INTEGER_TYPE
      && !int_fits_type_p (res, type))
    res = chrec_dont_know;

  return res;
}
예제 #4
0
tree 
chrec_convert (tree type, 
	       tree chrec)
{
  tree ct;
  
  if (automatically_generated_chrec_p (chrec))
    return chrec;
  
  ct = chrec_type (chrec);
  if (ct == type)
    return chrec;

  if (TYPE_PRECISION (ct) < TYPE_PRECISION (type))
    return count_ev_in_wider_type (type, chrec);

  switch (TREE_CODE (chrec))
    {
    case POLYNOMIAL_CHREC:
      return build_polynomial_chrec (CHREC_VARIABLE (chrec),
				     chrec_convert (type,
						    CHREC_LEFT (chrec)),
				     chrec_convert (type,
						    CHREC_RIGHT (chrec)));

    default:
      {
	tree res = fold_convert (type, chrec);

	/* Don't propagate overflows.  */
	TREE_OVERFLOW (res) = 0;
	if (CONSTANT_CLASS_P (res))
	  TREE_CONSTANT_OVERFLOW (res) = 0;

	/* But reject constants that don't fit in their type after conversion.
	   This can happen if TYPE_MIN_VALUE or TYPE_MAX_VALUE are not the
	   natural values associated with TYPE_PRECISION and TYPE_UNSIGNED,
	   and can cause problems later when computing niters of loops.  Note
	   that we don't do the check before converting because we don't want
	   to reject conversions of negative chrecs to unsigned types.  */
	if (TREE_CODE (res) == INTEGER_CST
	    && TREE_CODE (type) == INTEGER_TYPE
	    && !int_fits_type_p (res, type))
	  res = chrec_dont_know;

	return res;
      }
    }
}
예제 #5
0
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
  bool overflow;
  unsigned int i;
  tree res;

  /* Handle the most frequent cases.  */
  if (k == 0)
    return build_int_cst (type, 1);
  if (k == 1)
    return fold_convert (type, n);

  /* Check that k <= n.  */
  if (wi::ltu_p (n, k))
    return NULL_TREE;

  /* Denominator = 2.  */
  wide_int denom = wi::two (TYPE_PRECISION (TREE_TYPE (n)));

  /* Index = Numerator-1.  */
  wide_int idx = wi::sub (n, 1);

  /* Numerator = Numerator*Index = n*(n-1).  */
  wide_int num = wi::smul (n, idx, &overflow);
  if (overflow)
    return NULL_TREE;

  for (i = 3; i <= k; i++)
    {
      /* Index--.  */
      --idx;

      /* Numerator *= Index.  */
      num = wi::smul (num, idx, &overflow);
      if (overflow)
	return NULL_TREE;

      /* Denominator *= i.  */
      denom *= i;
    }

  /* Result = Numerator / Denominator.  */
  wide_int di_res = wi::udiv_trunc (num, denom);
  res = wide_int_to_tree (type, di_res);
  return int_fits_type_p (res, type) ? res : NULL_TREE;
}
예제 #6
0
static tree
chrec_convert_1 (tree type, tree chrec, gimple *at_stmt,
		 bool use_overflow_semantics)
{
  tree ct, res;
  tree base, step;
  struct loop *loop;

  if (automatically_generated_chrec_p (chrec))
    return chrec;

  ct = chrec_type (chrec);
  if (useless_type_conversion_p (type, ct))
    return chrec;

  if (!evolution_function_is_affine_p (chrec))
    goto keep_cast;

  loop = get_chrec_loop (chrec);
  base = CHREC_LEFT (chrec);
  step = CHREC_RIGHT (chrec);

  if (convert_affine_scev (loop, type, &base, &step, at_stmt,
			   use_overflow_semantics))
    return build_polynomial_chrec (loop->num, base, step);

  /* If we cannot propagate the cast inside the chrec, just keep the cast.  */
keep_cast:
  /* Fold will not canonicalize (long)(i - 1) to (long)i - 1 because that
     may be more expensive.  We do want to perform this optimization here
     though for canonicalization reasons.  */
  if (use_overflow_semantics
      && (TREE_CODE (chrec) == PLUS_EXPR
	  || TREE_CODE (chrec) == MINUS_EXPR)
      && TREE_CODE (type) == INTEGER_TYPE
      && TREE_CODE (ct) == INTEGER_TYPE
      && TYPE_PRECISION (type) > TYPE_PRECISION (ct)
      && TYPE_OVERFLOW_UNDEFINED (ct))
    res = fold_build2 (TREE_CODE (chrec), type,
		       fold_convert (type, TREE_OPERAND (chrec, 0)),
		       fold_convert (type, TREE_OPERAND (chrec, 1)));
  /* Similar perform the trick that (signed char)((int)x + 2) can be
     narrowed to (signed char)((unsigned char)x + 2).  */
  else if (use_overflow_semantics
	   && TREE_CODE (chrec) == POLYNOMIAL_CHREC
	   && TREE_CODE (ct) == INTEGER_TYPE
	   && TREE_CODE (type) == INTEGER_TYPE
	   && TYPE_OVERFLOW_UNDEFINED (type)
	   && TYPE_PRECISION (type) < TYPE_PRECISION (ct))
    {
      tree utype = unsigned_type_for (type);
      res = build_polynomial_chrec (CHREC_VARIABLE (chrec),
				    fold_convert (utype,
						  CHREC_LEFT (chrec)),
				    fold_convert (utype,
						  CHREC_RIGHT (chrec)));
      res = chrec_convert_1 (type, res, at_stmt, use_overflow_semantics);
    }
  else
    res = fold_convert (type, chrec);

  /* Don't propagate overflows.  */
  if (CONSTANT_CLASS_P (res))
    TREE_OVERFLOW (res) = 0;

  /* But reject constants that don't fit in their type after conversion.
     This can happen if TYPE_MIN_VALUE or TYPE_MAX_VALUE are not the
     natural values associated with TYPE_PRECISION and TYPE_UNSIGNED,
     and can cause problems later when computing niters of loops.  Note
     that we don't do the check before converting because we don't want
     to reject conversions of negative chrecs to unsigned types.  */
  if (TREE_CODE (res) == INTEGER_CST
      && TREE_CODE (type) == INTEGER_TYPE
      && !int_fits_type_p (res, type))
    res = chrec_dont_know;

  return res;
}
예제 #7
0
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
  unsigned HOST_WIDE_INT lidx, lnum, ldenom, lres, ldum;
  HOST_WIDE_INT hidx, hnum, hdenom, hres, hdum;
  unsigned int i;
  tree res;

  /* Handle the most frequent cases.  */
  if (k == 0)
    return build_int_cst (type, 1);
  if (k == 1)
    return fold_convert (type, n);

  /* Check that k <= n.  */
  if (TREE_INT_CST_HIGH (n) == 0
      && TREE_INT_CST_LOW (n) < k)
    return NULL_TREE;

  /* Numerator = n.  */
  lnum = TREE_INT_CST_LOW (n);
  hnum = TREE_INT_CST_HIGH (n);

  /* Denominator = 2.  */
  ldenom = 2;
  hdenom = 0;

  /* Index = Numerator-1.  */
  if (lnum == 0)
    {
      hidx = hnum - 1;
      lidx = ~ (unsigned HOST_WIDE_INT) 0;
    }
  else
    {
      hidx = hnum;
      lidx = lnum - 1;
    }

  /* Numerator = Numerator*Index = n*(n-1).  */
  if (mul_double (lnum, hnum, lidx, hidx, &lnum, &hnum))
    return NULL_TREE;

  for (i = 3; i <= k; i++)
    {
      /* Index--.  */
      if (lidx == 0)
	{
	  hidx--;
	  lidx = ~ (unsigned HOST_WIDE_INT) 0;
	}
      else
        lidx--;

      /* Numerator *= Index.  */
      if (mul_double (lnum, hnum, lidx, hidx, &lnum, &hnum))
	return NULL_TREE;

      /* Denominator *= i.  */
      mul_double (ldenom, hdenom, i, 0, &ldenom, &hdenom);
    }

  /* Result = Numerator / Denominator.  */
  div_and_round_double (EXACT_DIV_EXPR, 1, lnum, hnum, ldenom, hdenom,
			&lres, &hres, &ldum, &hdum);

  res = build_int_cst_wide (type, lres, hres);
  return int_fits_type_p (res, type) ? res : NULL_TREE;
}
예제 #8
0
파일: s390-c.c 프로젝트: WojciechMigda/gcc
/* Return a tree expression for a call to the overloaded builtin
   function OB_FNDECL at LOC with arguments PASSED_ARGLIST.  */
tree
s390_resolve_overloaded_builtin (location_t loc,
				 tree ob_fndecl,
				 void *passed_arglist)
{
  vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
  unsigned int in_args_num = vec_safe_length (arglist);
  unsigned int ob_args_num = 0;
  unsigned int ob_fcode = DECL_FUNCTION_CODE (ob_fndecl);
  enum s390_overloaded_builtin_vars bindex;
  unsigned int i;
  int last_match_type = INT_MAX;
  int last_match_index = -1;
  unsigned int all_op_flags;
  int num_matches = 0;
  tree target_builtin_decl, b_arg_chain, return_type;
  enum s390_builtin_ov_type_index last_match_fntype_index;

  if (TARGET_DEBUG_ARG)
    fprintf (stderr,
      "s390_resolve_overloaded_builtin, code = %4d, %s - %s overloaded\n",
      (int)ob_fcode, IDENTIFIER_POINTER (DECL_NAME (ob_fndecl)),
     ob_fcode < S390_BUILTIN_MAX ? "not" : "");

  /* 0...S390_BUILTIN_MAX-1 is for non-overloaded builtins.  */
  if (ob_fcode < S390_BUILTIN_MAX)
    {
      if (bflags_for_builtin(ob_fcode) & B_INT)
	{
	  error_at (loc,
		    "Builtin %qF is for GCC internal use only.",
		    ob_fndecl);
	  return error_mark_node;
	}
      return NULL_TREE;
    }

  ob_fcode -= S390_BUILTIN_MAX;

  for (b_arg_chain = TYPE_ARG_TYPES (TREE_TYPE (ob_fndecl));
       !VOID_TYPE_P (TREE_VALUE (b_arg_chain));
       b_arg_chain = TREE_CHAIN (b_arg_chain))
    ob_args_num++;

  if (ob_args_num != in_args_num)
    {
      error_at (loc,
		"Mismatch in number of arguments for builtin %qF. "
		"Expected: %d got %d", ob_fndecl,
		ob_args_num, in_args_num);
      return error_mark_node;
    }

  for (i = 0; i < in_args_num; i++)
    if ((*arglist)[i] == error_mark_node)
      return error_mark_node;

  /* Overloaded builtins without any variants are directly expanded here.  */
  if (desc_start_for_overloaded_builtin[ob_fcode] ==
      S390_OVERLOADED_BUILTIN_VAR_MAX)
    return s390_expand_overloaded_builtin (loc, ob_fcode, arglist, NULL_TREE);

  for (bindex = desc_start_for_overloaded_builtin[ob_fcode];
       bindex <= desc_end_for_overloaded_builtin[ob_fcode];
       bindex = (enum s390_overloaded_builtin_vars)((int)bindex + 1))
  {
    int match_type;
    enum s390_builtin_ov_type_index type_index =
      type_for_overloaded_builtin_var[bindex];

    if (TARGET_DEBUG_ARG)
      fprintf (stderr, "checking variant number: %d", (int)bindex);

    match_type = s390_fn_types_compatible (type_index, arglist);

    if (match_type == INT_MAX)
      continue;

    if (TARGET_DEBUG_ARG)
      fprintf (stderr,
	       " %s match score: %d\n", match_type == 0 ? "perfect" : "imperfect",
	       match_type);

    if (match_type < last_match_type)
      {
	num_matches = 1;
	last_match_type = match_type;
	last_match_fntype_index = type_index;
	last_match_index = bindex;
      }
    else if (match_type == last_match_type)
      num_matches++;
  }

  if (last_match_type == INT_MAX)
    {
      error_at (loc, "invalid parameter combination for intrinsic %qs",
		IDENTIFIER_POINTER (DECL_NAME (ob_fndecl)));
      return error_mark_node;
    }
  else if (num_matches > 1)
    {
      error_at (loc, "ambiguous overload for intrinsic %qs",
		IDENTIFIER_POINTER (DECL_NAME (ob_fndecl)));
      return error_mark_node;
    }

  if (bt_for_overloaded_builtin_var[last_match_index] == S390_BUILTIN_MAX)
    target_builtin_decl = ob_fndecl;
  else
    target_builtin_decl = s390_builtin_decls[bt_for_overloaded_builtin_var[last_match_index]];

  all_op_flags = opflags_overloaded_builtin_var[last_match_index];
  return_type = s390_builtin_types[s390_builtin_ov_types[last_match_fntype_index][0]];

  /* Check for the operand flags in the overloaded builtin variant.  */
  for (i = 0; i < ob_args_num; i++)
    {
      unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
      tree arg = (*arglist)[i];
      tree type = s390_builtin_types[s390_builtin_ov_types[last_match_fntype_index][i + 1]];

      all_op_flags = all_op_flags >> O_SHIFT;

      if (op_flags == O_ELEM)
	{
	  int n_elem = s390_vec_n_elem (target_builtin_decl);
	  gcc_assert (n_elem > 0);
	  gcc_assert (type == integer_type_node);
	  (*arglist)[i] = build2 (BIT_AND_EXPR, integer_type_node,
				  fold_convert (integer_type_node, arg),
				  build_int_cst (NULL_TREE, n_elem - 1));
	}

      if (TREE_CODE (arg) != INTEGER_CST || !O_IMM_P (op_flags))
	continue;

      if ((TYPE_UNSIGNED (type)
	   && !int_fits_type_p (arg, c_common_unsigned_type (type)))
	  || (!TYPE_UNSIGNED (type)
	      && !int_fits_type_p (arg, c_common_signed_type (type))))
	{
	  error("constant argument %d for builtin %qF is out "
		"of range for target type",
		i + 1, target_builtin_decl);
	  return error_mark_node;
	}

      if (TREE_CODE (arg) == INTEGER_CST
	  && !s390_const_operand_ok (arg, i + 1, op_flags, target_builtin_decl))
	return error_mark_node;
    }

  /* Handle builtins we expand directly - without mapping it to a low
     level builtin.  */
  if (bt_for_overloaded_builtin_var[last_match_index] == S390_BUILTIN_MAX)
    return s390_expand_overloaded_builtin (loc, ob_fcode, arglist, return_type);

  s390_adjust_builtin_arglist (ob_fcode, target_builtin_decl, &arglist);

  if (VOID_TYPE_P (return_type))
    return build_function_call_vec (loc, vNULL, target_builtin_decl,
				    arglist, NULL);
  else
    return fully_fold_convert (return_type,
			       build_function_call_vec (loc, vNULL, target_builtin_decl,
							arglist, NULL));
}
예제 #9
0
파일: cvt.c 프로젝트: h4ck3rm1k3/gcc
tree
ocp_convert (tree type, tree expr, int convtype, int flags)
{
  tree e = expr;
  enum tree_code code = TREE_CODE (type);
  const char *invalid_conv_diag;

  if (error_operand_p (e) || type == error_mark_node)
    return error_mark_node;

  complete_type (type);
  complete_type (TREE_TYPE (expr));

  if ((invalid_conv_diag
       = targetm.invalid_conversion (TREE_TYPE (expr), type)))
    {
      error (invalid_conv_diag);
      return error_mark_node;
    }

  e = integral_constant_value (e);

  if (MAYBE_CLASS_TYPE_P (type) && (convtype & CONV_FORCE_TEMP))
    /* We need a new temporary; don't take this shortcut.  */;
  else if (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (e)))
    {
      if (same_type_p (type, TREE_TYPE (e)))
	/* The call to fold will not always remove the NOP_EXPR as
	   might be expected, since if one of the types is a typedef;
	   the comparison in fold is just equality of pointers, not a
	   call to comptypes.  We don't call fold in this case because
	   that can result in infinite recursion; fold will call
	   convert, which will call ocp_convert, etc.  */
	return e;
      /* For complex data types, we need to perform componentwise
	 conversion.  */
      else if (TREE_CODE (type) == COMPLEX_TYPE)
	return fold_if_not_in_template (convert_to_complex (type, e));
      else if (TREE_CODE (e) == TARGET_EXPR)
	{
	  /* Don't build a NOP_EXPR of class type.  Instead, change the
	     type of the temporary.  */
	  TREE_TYPE (e) = TREE_TYPE (TARGET_EXPR_SLOT (e)) = type;
	  return e;
	}
      else
	{
	  /* We shouldn't be treating objects of ADDRESSABLE type as
	     rvalues.  */
	  gcc_assert (!TREE_ADDRESSABLE (type));
	  return fold_if_not_in_template (build_nop (type, e));
	}
    }

  if (code == VOID_TYPE && (convtype & CONV_STATIC))
    {
      e = convert_to_void (e, /*implicit=*/NULL, tf_warning_or_error);
      return e;
    }

  if (INTEGRAL_CODE_P (code))
    {
      tree intype = TREE_TYPE (e);

      if (TREE_CODE (type) == ENUMERAL_TYPE)
	{
	  /* enum = enum, enum = int, enum = float, (enum)pointer are all
	     errors.  */
	  if (((INTEGRAL_OR_ENUMERATION_TYPE_P (intype)
		|| TREE_CODE (intype) == REAL_TYPE)
	       && ! (convtype & CONV_STATIC))
	      || TREE_CODE (intype) == POINTER_TYPE)
	    {
	      if (flags & LOOKUP_COMPLAIN)
		permerror (input_location, "conversion from %q#T to %q#T", intype, type);

	      if (!flag_permissive)
		return error_mark_node;
	    }

	  /* [expr.static.cast]

	     8. A value of integral or enumeration type can be explicitly
	     converted to an enumeration type. The value is unchanged if
	     the original value is within the range of the enumeration
	     values. Otherwise, the resulting enumeration value is
	     unspecified.  */
	  if (TREE_CODE (expr) == INTEGER_CST && !int_fits_type_p (expr, type))
	    warning (OPT_Wconversion, 
		     "the result of the conversion is unspecified because "
		     "%qE is outside the range of type %qT",
		     expr, type);
	}
      if (MAYBE_CLASS_TYPE_P (intype))
	{
	  tree rval;
	  rval = build_type_conversion (type, e);
	  if (rval)
	    return rval;
	  if (flags & LOOKUP_COMPLAIN)
	    error ("%q#T used where a %qT was expected", intype, type);
	  return error_mark_node;
	}
      if (code == BOOLEAN_TYPE)
	return cp_truthvalue_conversion (e);

      return fold_if_not_in_template (convert_to_integer (type, e));
    }
  if (POINTER_TYPE_P (type) || TYPE_PTR_TO_MEMBER_P (type))
    return fold_if_not_in_template (cp_convert_to_pointer (type, e));
  if (code == VECTOR_TYPE)
    {
      tree in_vtype = TREE_TYPE (e);
      if (MAYBE_CLASS_TYPE_P (in_vtype))
	{
	  tree ret_val;
	  ret_val = build_type_conversion (type, e);
	  if (ret_val)
	    return ret_val;
	  if (flags & LOOKUP_COMPLAIN)
	    error ("%q#T used where a %qT was expected", in_vtype, type);
	  return error_mark_node;
	}
      return fold_if_not_in_template (convert_to_vector (type, e));
    }
  if (code == REAL_TYPE || code == COMPLEX_TYPE)
    {
      if (MAYBE_CLASS_TYPE_P (TREE_TYPE (e)))
	{
	  tree rval;
	  rval = build_type_conversion (type, e);
	  if (rval)
	    return rval;
	  else
	    if (flags & LOOKUP_COMPLAIN)
	      error ("%q#T used where a floating point value was expected",
			TREE_TYPE (e));
	}
      if (code == REAL_TYPE)
	return fold_if_not_in_template (convert_to_real (type, e));
      else if (code == COMPLEX_TYPE)
	return fold_if_not_in_template (convert_to_complex (type, e));
    }

  /* New C++ semantics:  since assignment is now based on
     memberwise copying,  if the rhs type is derived from the
     lhs type, then we may still do a conversion.  */
  if (RECORD_OR_UNION_CODE_P (code))
    {
      tree dtype = TREE_TYPE (e);
      tree ctor = NULL_TREE;

      dtype = TYPE_MAIN_VARIANT (dtype);

      /* Conversion between aggregate types.  New C++ semantics allow
	 objects of derived type to be cast to objects of base type.
	 Old semantics only allowed this between pointers.

	 There may be some ambiguity between using a constructor
	 vs. using a type conversion operator when both apply.  */

      ctor = e;

      if (abstract_virtuals_error (NULL_TREE, type))
	return error_mark_node;

      if (BRACE_ENCLOSED_INITIALIZER_P (ctor))
	ctor = perform_implicit_conversion (type, ctor, tf_warning_or_error);
      else if ((flags & LOOKUP_ONLYCONVERTING)
	       && ! (CLASS_TYPE_P (dtype) && DERIVED_FROM_P (type, dtype)))
	/* For copy-initialization, first we create a temp of the proper type
	   with a user-defined conversion sequence, then we direct-initialize
	   the target with the temp (see [dcl.init]).  */
	ctor = build_user_type_conversion (type, ctor, flags);
      else
	ctor = build_special_member_call (NULL_TREE,
					  complete_ctor_identifier,
					  build_tree_list (NULL_TREE, ctor),
					  type, flags,
                                          tf_warning_or_error);
      if (ctor)
	return build_cplus_new (type, ctor);
    }

  if (flags & LOOKUP_COMPLAIN)
    error ("conversion from %qT to non-scalar type %qT requested",
	   TREE_TYPE (expr), type);
  return error_mark_node;
}
예제 #10
0
static bool
factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
				   tree arg0, tree arg1)
{
  gimple arg0_def_stmt = NULL, arg1_def_stmt = NULL, new_stmt;
  tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
  tree temp, result;
  gphi *newphi;
  gimple_stmt_iterator gsi, gsi_for_def;
  source_location locus = gimple_location (phi);
  enum tree_code convert_code;

  /* Handle only PHI statements with two arguments.  TODO: If all
     other arguments to PHI are INTEGER_CST or if their defining
     statement have the same unary operation, we can handle more
     than two arguments too.  */
  if (gimple_phi_num_args (phi) != 2)
    return false;

  /* First canonicalize to simplify tests.  */
  if (TREE_CODE (arg0) != SSA_NAME)
    {
      std::swap (arg0, arg1);
      std::swap (e0, e1);
    }

  if (TREE_CODE (arg0) != SSA_NAME
      || (TREE_CODE (arg1) != SSA_NAME
	  && TREE_CODE (arg1) != INTEGER_CST))
    return false;

  /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
     a conversion.  */
  arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
  if (!is_gimple_assign (arg0_def_stmt)
      || !gimple_assign_cast_p (arg0_def_stmt))
    return false;

  /* Use the RHS as new_arg0.  */
  convert_code = gimple_assign_rhs_code (arg0_def_stmt);
  new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
  if (convert_code == VIEW_CONVERT_EXPR)
    new_arg0 = TREE_OPERAND (new_arg0, 0);

  if (TREE_CODE (arg1) == SSA_NAME)
    {
      /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
	 is a conversion.  */
      arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
      if (!is_gimple_assign (arg1_def_stmt)
	  || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
	return false;

      /* Use the RHS as new_arg1.  */
      new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
      if (convert_code == VIEW_CONVERT_EXPR)
	new_arg1 = TREE_OPERAND (new_arg1, 0);
    }
  else
    {
      /* If arg1 is an INTEGER_CST, fold it to new type.  */
      if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
	  && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
	{
	  if (gimple_assign_cast_p (arg0_def_stmt))
	    new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
	  else
	    return false;
	}
      else
	return false;
    }

  /*  If arg0/arg1 have > 1 use, then this transformation actually increases
      the number of expressions evaluated at runtime.  */
  if (!has_single_use (arg0)
      || (arg1_def_stmt && !has_single_use (arg1)))
    return false;

  /* If types of new_arg0 and new_arg1 are different bailout.  */
  if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
    return false;

  /* Create a new PHI stmt.  */
  result = PHI_RESULT (phi);
  temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
  newphi = create_phi_node (temp, gimple_bb (phi));

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "PHI ");
      print_generic_expr (dump_file, gimple_phi_result (phi), 0);
      fprintf (dump_file,
	       " changed to factor conversion out from COND_EXPR.\n");
      fprintf (dump_file, "New stmt with CAST that defines ");
      print_generic_expr (dump_file, result, 0);
      fprintf (dump_file, ".\n");
    }

  /* Remove the old cast(s) that has single use.  */
  gsi_for_def = gsi_for_stmt (arg0_def_stmt);
  gsi_remove (&gsi_for_def, true);
  if (arg1_def_stmt)
    {
      gsi_for_def = gsi_for_s