Пример #1
0
tree
chrec_convert (tree type, tree chrec, gimple *at_stmt,
	       bool use_overflow_semantics)
{
  return chrec_convert_1 (type, chrec, at_stmt, use_overflow_semantics);
}
Пример #2
0
tree 
chrec_convert (tree type, tree chrec, gimple at_stmt)
{
  return chrec_convert_1 (type, chrec, at_stmt, true);
}
Пример #3
0
static tree
chrec_convert_1 (tree type, tree chrec, gimple *at_stmt,
		 bool use_overflow_semantics)
{
  tree ct, res;
  tree base, step;
  struct loop *loop;

  if (automatically_generated_chrec_p (chrec))
    return chrec;

  ct = chrec_type (chrec);
  if (useless_type_conversion_p (type, ct))
    return chrec;

  if (!evolution_function_is_affine_p (chrec))
    goto keep_cast;

  loop = get_chrec_loop (chrec);
  base = CHREC_LEFT (chrec);
  step = CHREC_RIGHT (chrec);

  if (convert_affine_scev (loop, type, &base, &step, at_stmt,
			   use_overflow_semantics))
    return build_polynomial_chrec (loop->num, base, step);

  /* If we cannot propagate the cast inside the chrec, just keep the cast.  */
keep_cast:
  /* Fold will not canonicalize (long)(i - 1) to (long)i - 1 because that
     may be more expensive.  We do want to perform this optimization here
     though for canonicalization reasons.  */
  if (use_overflow_semantics
      && (TREE_CODE (chrec) == PLUS_EXPR
	  || TREE_CODE (chrec) == MINUS_EXPR)
      && TREE_CODE (type) == INTEGER_TYPE
      && TREE_CODE (ct) == INTEGER_TYPE
      && TYPE_PRECISION (type) > TYPE_PRECISION (ct)
      && TYPE_OVERFLOW_UNDEFINED (ct))
    res = fold_build2 (TREE_CODE (chrec), type,
		       fold_convert (type, TREE_OPERAND (chrec, 0)),
		       fold_convert (type, TREE_OPERAND (chrec, 1)));
  /* Similar perform the trick that (signed char)((int)x + 2) can be
     narrowed to (signed char)((unsigned char)x + 2).  */
  else if (use_overflow_semantics
	   && TREE_CODE (chrec) == POLYNOMIAL_CHREC
	   && TREE_CODE (ct) == INTEGER_TYPE
	   && TREE_CODE (type) == INTEGER_TYPE
	   && TYPE_OVERFLOW_UNDEFINED (type)
	   && TYPE_PRECISION (type) < TYPE_PRECISION (ct))
    {
      tree utype = unsigned_type_for (type);
      res = build_polynomial_chrec (CHREC_VARIABLE (chrec),
				    fold_convert (utype,
						  CHREC_LEFT (chrec)),
				    fold_convert (utype,
						  CHREC_RIGHT (chrec)));
      res = chrec_convert_1 (type, res, at_stmt, use_overflow_semantics);
    }
  else
    res = fold_convert (type, chrec);

  /* Don't propagate overflows.  */
  if (CONSTANT_CLASS_P (res))
    TREE_OVERFLOW (res) = 0;

  /* But reject constants that don't fit in their type after conversion.
     This can happen if TYPE_MIN_VALUE or TYPE_MAX_VALUE are not the
     natural values associated with TYPE_PRECISION and TYPE_UNSIGNED,
     and can cause problems later when computing niters of loops.  Note
     that we don't do the check before converting because we don't want
     to reject conversions of negative chrecs to unsigned types.  */
  if (TREE_CODE (res) == INTEGER_CST
      && TREE_CODE (type) == INTEGER_TYPE
      && !int_fits_type_p (res, type))
    res = chrec_dont_know;

  return res;
}
Пример #4
0
bool
convert_affine_scev (struct loop *loop, tree type,
		     tree *base, tree *step, gimple at_stmt,
		     bool use_overflow_semantics)
{
  tree ct = TREE_TYPE (*step);
  bool enforce_overflow_semantics;
  bool must_check_src_overflow, must_check_rslt_overflow;
  tree new_base, new_step;
  tree step_type = POINTER_TYPE_P (type) ? sizetype : type;

  /* If we cannot perform arithmetic in TYPE, avoid creating an scev.  */
  if (avoid_arithmetics_in_type_p (type))
    return false;

  /* In general,
     (TYPE) (BASE + STEP * i) = (TYPE) BASE + (TYPE -- sign extend) STEP * i,
     but we must check some assumptions.
     
     1) If [BASE, +, STEP] wraps, the equation is not valid when precision
        of CT is smaller than the precision of TYPE.  For example, when we
	cast unsigned char [254, +, 1] to unsigned, the values on left side
	are 254, 255, 0, 1, ..., but those on the right side are
	254, 255, 256, 257, ...
     2) In case that we must also preserve the fact that signed ivs do not
        overflow, we must additionally check that the new iv does not wrap.
	For example, unsigned char [125, +, 1] casted to signed char could
	become a wrapping variable with values 125, 126, 127, -128, -127, ...,
	which would confuse optimizers that assume that this does not
	happen.  */
  must_check_src_overflow = TYPE_PRECISION (ct) < TYPE_PRECISION (type);

  enforce_overflow_semantics = (use_overflow_semantics
				&& nowrap_type_p (type));
  if (enforce_overflow_semantics)
    {
      /* We can avoid checking whether the result overflows in the following
	 cases:

	 -- must_check_src_overflow is true, and the range of TYPE is superset
	    of the range of CT -- i.e., in all cases except if CT signed and
	    TYPE unsigned.
         -- both CT and TYPE have the same precision and signedness, and we
	    verify instead that the source does not overflow (this may be
	    easier than verifying it for the result, as we may use the
	    information about the semantics of overflow in CT).  */
      if (must_check_src_overflow)
	{
	  if (TYPE_UNSIGNED (type) && !TYPE_UNSIGNED (ct))
	    must_check_rslt_overflow = true;
	  else
	    must_check_rslt_overflow = false;
	}
      else if (TYPE_UNSIGNED (ct) == TYPE_UNSIGNED (type)
	       && TYPE_PRECISION (ct) == TYPE_PRECISION (type))
	{
	  must_check_rslt_overflow = false;
	  must_check_src_overflow = true;
	}
      else
	must_check_rslt_overflow = true;
    }
  else
    must_check_rslt_overflow = false;

  if (must_check_src_overflow
      && scev_probably_wraps_p (*base, *step, at_stmt, loop,
				use_overflow_semantics))
    return false;

  new_base = chrec_convert_1 (type, *base, at_stmt,
			      use_overflow_semantics);
  /* The step must be sign extended, regardless of the signedness
     of CT and TYPE.  This only needs to be handled specially when
     CT is unsigned -- to avoid e.g. unsigned char [100, +, 255]
     (with values 100, 99, 98, ...) from becoming signed or unsigned
     [100, +, 255] with values 100, 355, ...; the sign-extension is 
     performed by default when CT is signed.  */
  new_step = *step;
  if (TYPE_PRECISION (step_type) > TYPE_PRECISION (ct) && TYPE_UNSIGNED (ct))
    new_step = chrec_convert_1 (signed_type_for (ct), new_step, at_stmt,
				use_overflow_semantics);
  new_step = chrec_convert_1 (step_type, new_step, at_stmt, use_overflow_semantics);

  if (automatically_generated_chrec_p (new_base)
      || automatically_generated_chrec_p (new_step))
    return false;

  if (must_check_rslt_overflow
      /* Note that in this case we cannot use the fact that signed variables
	 do not overflow, as this is what we are verifying for the new iv.  */
      && scev_probably_wraps_p (new_base, new_step, at_stmt, loop, false))
    return false;

  *base = new_base;
  *step = new_step;
  return true;
}