Beispiel #1
0
static inline tree
chrec_fold_multiply_poly_poly (tree type,
			       tree poly0,
			       tree poly1)
{
  tree t0, t1, t2;
  int var;
  struct loop *loop0 = get_chrec_loop (poly0);
  struct loop *loop1 = get_chrec_loop (poly1);

  gcc_assert (poly0);
  gcc_assert (poly1);
  gcc_assert (TREE_CODE (poly0) == POLYNOMIAL_CHREC);
  gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC);
  gcc_checking_assert (useless_type_conversion_p (type, chrec_type (poly0))
		       && useless_type_conversion_p (type, chrec_type (poly1)));

  /* {a, +, b}_1 * {c, +, d}_2  ->  {c*{a, +, b}_1, +, d}_2,
     {a, +, b}_2 * {c, +, d}_1  ->  {a*{c, +, d}_1, +, b}_2,
     {a, +, b}_x * {c, +, d}_x  ->  {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x.  */
  if (flow_loop_nested_p (loop0, loop1))
    /* poly0 is a constant wrt. poly1.  */
    return build_polynomial_chrec
      (CHREC_VARIABLE (poly1),
       chrec_fold_multiply (type, CHREC_LEFT (poly1), poly0),
       CHREC_RIGHT (poly1));

  if (flow_loop_nested_p (loop1, loop0))
    /* poly1 is a constant wrt. poly0.  */
    return build_polynomial_chrec
      (CHREC_VARIABLE (poly0),
       chrec_fold_multiply (type, CHREC_LEFT (poly0), poly1),
       CHREC_RIGHT (poly0));

  gcc_assert (loop0 == loop1);

  /* poly0 and poly1 are two polynomials in the same variable,
     {a, +, b}_x * {c, +, d}_x  ->  {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x.  */

  /* "a*c".  */
  t0 = chrec_fold_multiply (type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));

  /* "a*d + b*c".  */
  t1 = chrec_fold_multiply (type, CHREC_LEFT (poly0), CHREC_RIGHT (poly1));
  t1 = chrec_fold_plus (type, t1, chrec_fold_multiply (type,
						       CHREC_RIGHT (poly0),
						       CHREC_LEFT (poly1)));
  /* "b*d".  */
  t2 = chrec_fold_multiply (type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
  /* "a*d + b*c + b*d".  */
  t1 = chrec_fold_plus (type, t1, t2);
  /* "2*b*d".  */
  t2 = chrec_fold_multiply (type, SCALAR_FLOAT_TYPE_P (type)
			    ? build_real (type, dconst2)
			    : build_int_cst (type, 2), t2);

  var = CHREC_VARIABLE (poly0);
  return build_polynomial_chrec (var, t0,
				 build_polynomial_chrec (var, t1, t2));
}
Beispiel #2
0
tree
build_real_imag_expr (location_t location, enum tree_code code, tree arg)
{
  tree ret;
  tree arg_type = TREE_TYPE (arg);

  gcc_assert (code == REALPART_EXPR || code == IMAGPART_EXPR);

  if (TREE_CODE (arg_type) == COMPLEX_TYPE)
    {
      ret = build1 (code, TREE_TYPE (TREE_TYPE (arg)), arg);
      SET_EXPR_LOCATION (ret, location);
    }
  else if (INTEGRAL_TYPE_P (arg_type) || SCALAR_FLOAT_TYPE_P (arg_type))
    {
      ret = (code == REALPART_EXPR
	     ? arg
	     : omit_one_operand_loc (location, arg_type,
				     integer_zero_node, arg));
    }
  else
    {
      error_at (location, "wrong type argument to %s",
		code == REALPART_EXPR ? "__real" : "__imag");
      ret = error_mark_node;
    }

  return ret;
}
Beispiel #3
0
tree
UI_To_gnu (Uint Input, tree type)
{
  /* We might have a TYPE with biased representation and be passed an unbiased
     value that doesn't fit.  We always use an unbiased type to be able to hold
     any such possible value for intermediate computations and then rely on a
     conversion back to TYPE to perform the bias adjustment when need be.  */
  tree comp_type
    = TREE_CODE (type) == INTEGER_TYPE && TYPE_BIASED_REPRESENTATION_P (type)
      ? get_base_type (type) : type;
  tree gnu_ret;

  if (Input <= Uint_Direct_Last)
    gnu_ret = build_cst_from_int (comp_type, Input - Uint_Direct_Bias);
  else
    {
      Int Idx = Uints_Ptr[Input].Loc;
      Pos Length = Uints_Ptr[Input].Length;
      Int First = Udigits_Ptr[Idx];
      tree gnu_base;

      gcc_assert (Length > 0);

      /* The computations we perform below always require a type at least as
	 large as an integer not to overflow.  FP types are always fine, but
	 INTEGER or ENUMERAL types we are handed may be too short.  We use a
	 base integer type node for the computations in this case and will
	 convert the final result back to the incoming type later on.  */
      if (!SCALAR_FLOAT_TYPE_P (comp_type) && TYPE_PRECISION (comp_type) < 32)
	comp_type = gnat_type_for_size (32, 0);

      gnu_base = build_cst_from_int (comp_type, Base);

      gnu_ret = build_cst_from_int (comp_type, First);
      if (First < 0)
	for (Idx++, Length--; Length; Idx++, Length--)
	  gnu_ret = fold_build2 (MINUS_EXPR, comp_type,
				 fold_build2 (MULT_EXPR, comp_type,
					      gnu_ret, gnu_base),
				 build_cst_from_int (comp_type,
						     Udigits_Ptr[Idx]));
      else
	for (Idx++, Length--; Length; Idx++, Length--)
	  gnu_ret = fold_build2 (PLUS_EXPR, comp_type,
				 fold_build2 (MULT_EXPR, comp_type,
					      gnu_ret, gnu_base),
				 build_cst_from_int (comp_type,
						     Udigits_Ptr[Idx]));
    }

  gnu_ret = convert (type, gnu_ret);

  /* We don't need any NOP_EXPR or NON_LVALUE_EXPR on GNU_RET.  */
  while ((TREE_CODE (gnu_ret) == NOP_EXPR
	  || TREE_CODE (gnu_ret) == NON_LVALUE_EXPR)
	 && TREE_TYPE (TREE_OPERAND (gnu_ret, 0)) == TREE_TYPE (gnu_ret))
    gnu_ret = TREE_OPERAND (gnu_ret, 0);

  return gnu_ret;
}
Beispiel #4
0
static tree
build_cst_from_int (tree type, HOST_WIDE_INT low)
{
  if (SCALAR_FLOAT_TYPE_P (type))
    return convert (type, build_int_cst (gnat_type_for_size (32, 0), low));
  else
    return build_int_cst_type (type, low);
}
Beispiel #5
0
tree
c_finish_omp_atomic (location_t loc, enum tree_code code, tree lhs, tree rhs)
{
  tree x, type, addr;

  if (lhs == error_mark_node || rhs == error_mark_node)
    return error_mark_node;

  /* ??? According to one reading of the OpenMP spec, complex type are
     supported, but there are no atomic stores for any architecture.
     But at least icc 9.0 doesn't support complex types here either.
     And lets not even talk about vector types...  */
  type = TREE_TYPE (lhs);
  if (!INTEGRAL_TYPE_P (type)
      && !POINTER_TYPE_P (type)
      && !SCALAR_FLOAT_TYPE_P (type))
    {
      error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
      return error_mark_node;
    }

  /* ??? Validate that rhs does not overlap lhs.  */

  /* Take and save the address of the lhs.  From then on we'll reference it
     via indirection.  */
  addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
  if (addr == error_mark_node)
    return error_mark_node;
  addr = save_expr (addr);
  if (TREE_CODE (addr) != SAVE_EXPR
      && (TREE_CODE (addr) != ADDR_EXPR
	  || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
    {
      /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
	 it even after unsharing function body.  */
      tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL);
      DECL_CONTEXT (var) = current_function_decl;
      addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
    }
  lhs = build_indirect_ref (loc, addr, RO_NULL);

  /* There are lots of warnings, errors, and conversions that need to happen
     in the course of interpreting a statement.  Use the normal mechanisms
     to do this, and then take it apart again.  */
  x = build_modify_expr (input_location, lhs, NULL_TREE, code,
      			 input_location, rhs, NULL_TREE);
  if (x == error_mark_node)
    return error_mark_node;
  gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
  rhs = TREE_OPERAND (x, 1);

  /* Punt the actual generation of atomic operations to common code.  */
  x = build2 (OMP_ATOMIC, void_type_node, addr, rhs);
  SET_EXPR_LOCATION (x, loc);
  return x;
}
Beispiel #6
0
tree 
chrec_apply (unsigned var,
	     tree chrec, 
	     tree x)
{
  tree type = chrec_type (chrec);
  tree res = chrec_dont_know;

  if (automatically_generated_chrec_p (chrec)
      || automatically_generated_chrec_p (x)

      /* When the symbols are defined in an outer loop, it is possible
	 to symbolically compute the apply, since the symbols are
	 constants with respect to the varying loop.  */
      || chrec_contains_symbols_defined_in_loop (chrec, var))
    return chrec_dont_know;
 
  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "(chrec_apply \n");

  if (TREE_CODE (x) == INTEGER_CST && SCALAR_FLOAT_TYPE_P (type))
    x = build_real_from_int_cst (type, x);

  if (evolution_function_is_affine_p (chrec))
    {
      /* "{a, +, b} (x)"  ->  "a + b*x".  */
      x = chrec_convert (type, x, NULL_TREE);
      res = chrec_fold_multiply (type, CHREC_RIGHT (chrec), x);
      if (!integer_zerop (CHREC_LEFT (chrec)))
	res = chrec_fold_plus (type, CHREC_LEFT (chrec), res);
    }
  
  else if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
    res = chrec;
  
  else if (TREE_CODE (x) == INTEGER_CST
	   && tree_int_cst_sgn (x) == 1)
    /* testsuite/.../ssa-chrec-38.c.  */
    res = chrec_evaluate (var, chrec, x, 0);
  else
    res = chrec_dont_know;
  
  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "  (varying_loop = %d\n", var);
      fprintf (dump_file, ")\n  (chrec = ");
      print_generic_expr (dump_file, chrec, 0);
      fprintf (dump_file, ")\n  (x = ");
      print_generic_expr (dump_file, x, 0);
      fprintf (dump_file, ")\n  (res = ");
      print_generic_expr (dump_file, res, 0);
      fprintf (dump_file, "))\n");
    }
  
  return res;
}
Beispiel #7
0
static inline tree 
chrec_fold_multiply_poly_poly (tree type, 
			       tree poly0, 
			       tree poly1)
{
  tree t0, t1, t2;
  int var;

  gcc_assert (poly0);
  gcc_assert (poly1);
  gcc_assert (TREE_CODE (poly0) == POLYNOMIAL_CHREC);
  gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC);
  gcc_assert (chrec_type (poly0) == chrec_type (poly1));
  gcc_assert (type == chrec_type (poly0));
  
  /* {a, +, b}_1 * {c, +, d}_2  ->  {c*{a, +, b}_1, +, d}_2,
     {a, +, b}_2 * {c, +, d}_1  ->  {a*{c, +, d}_1, +, b}_2,
     {a, +, b}_x * {c, +, d}_x  ->  {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x.  */
  if (CHREC_VARIABLE (poly0) < CHREC_VARIABLE (poly1))
    /* poly0 is a constant wrt. poly1.  */
    return build_polynomial_chrec 
      (CHREC_VARIABLE (poly1), 
       chrec_fold_multiply (type, CHREC_LEFT (poly1), poly0),
       CHREC_RIGHT (poly1));
  
  if (CHREC_VARIABLE (poly1) < CHREC_VARIABLE (poly0))
    /* poly1 is a constant wrt. poly0.  */
    return build_polynomial_chrec 
      (CHREC_VARIABLE (poly0), 
       chrec_fold_multiply (type, CHREC_LEFT (poly0), poly1),
       CHREC_RIGHT (poly0));
  
  /* poly0 and poly1 are two polynomials in the same variable,
     {a, +, b}_x * {c, +, d}_x  ->  {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x.  */
      
  /* "a*c".  */
  t0 = chrec_fold_multiply (type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));

  /* "a*d + b*c + b*d".  */
  t1 = chrec_fold_multiply (type, CHREC_LEFT (poly0), CHREC_RIGHT (poly1));
  t1 = chrec_fold_plus (type, t1, chrec_fold_multiply (type,
						       CHREC_RIGHT (poly0),
						       CHREC_LEFT (poly1)));
  t1 = chrec_fold_plus (type, t1, chrec_fold_multiply (type,
						       CHREC_RIGHT (poly0),
						       CHREC_RIGHT (poly1)));
  /* "2*b*d".  */
  t2 = chrec_fold_multiply (type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
  t2 = chrec_fold_multiply (type, SCALAR_FLOAT_TYPE_P (type)
			    ? build_real (type, dconst2)
			    : build_int_cst (type, 2), t2);

  var = CHREC_VARIABLE (poly0);
  return build_polynomial_chrec (var, t0,
				 build_polynomial_chrec (var, t1, t2));
}
Beispiel #8
0
/* Classify an invariant tree into integer, float, or other, so that
   we can sort them to be near other constants of the same type.  */
static inline int
constant_type (tree t)
{
  if (INTEGRAL_TYPE_P (TREE_TYPE (t)))
    return INTEGER_CONST_TYPE;
  else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (t)))
    return FLOAT_CONST_TYPE;
  else
    return OTHER_CONST_TYPE;
}
Beispiel #9
0
bool
useless_type_conversion_p (tree outer_type, tree inner_type)
{
  /* Do the following before stripping toplevel qualifiers.  */
  if (POINTER_TYPE_P (inner_type)
      && POINTER_TYPE_P (outer_type))
    {
      /* Do not lose casts between pointers to different address spaces.  */
      if (TYPE_ADDR_SPACE (TREE_TYPE (outer_type))
	  != TYPE_ADDR_SPACE (TREE_TYPE (inner_type)))
	return false;
      /* Do not lose casts to function pointer types.  */
      if ((TREE_CODE (TREE_TYPE (outer_type)) == FUNCTION_TYPE
	   || TREE_CODE (TREE_TYPE (outer_type)) == METHOD_TYPE)
	  && !(TREE_CODE (TREE_TYPE (inner_type)) == FUNCTION_TYPE
	       || TREE_CODE (TREE_TYPE (inner_type)) == METHOD_TYPE))
	return false;
    }

  /* From now on qualifiers on value types do not matter.  */
  inner_type = TYPE_MAIN_VARIANT (inner_type);
  outer_type = TYPE_MAIN_VARIANT (outer_type);

  if (inner_type == outer_type)
    return true;

  /* Changes in machine mode are never useless conversions because the RTL
     middle-end expects explicit conversions between modes.  */
  if (TYPE_MODE (inner_type) != TYPE_MODE (outer_type))
    return false;

  /* If both the inner and outer types are integral types, then the
     conversion is not necessary if they have the same mode and
     signedness and precision, and both or neither are boolean.  */
  if (INTEGRAL_TYPE_P (inner_type)
      && INTEGRAL_TYPE_P (outer_type))
    {
      /* Preserve changes in signedness or precision.  */
      if (TYPE_UNSIGNED (inner_type) != TYPE_UNSIGNED (outer_type)
	  || TYPE_PRECISION (inner_type) != TYPE_PRECISION (outer_type))
	return false;

      /* Preserve conversions to/from BOOLEAN_TYPE if types are not
	 of precision one.  */
      if (((TREE_CODE (inner_type) == BOOLEAN_TYPE)
	   != (TREE_CODE (outer_type) == BOOLEAN_TYPE))
	  && TYPE_PRECISION (outer_type) != 1)
	return false;

      /* We don't need to preserve changes in the types minimum or
	 maximum value in general as these do not generate code
	 unless the types precisions are different.  */
      return true;
    }

  /* Scalar floating point types with the same mode are compatible.  */
  else if (SCALAR_FLOAT_TYPE_P (inner_type)
	   && SCALAR_FLOAT_TYPE_P (outer_type))
    return true;

  /* Fixed point types with the same mode are compatible.  */
  else if (FIXED_POINT_TYPE_P (inner_type)
	   && FIXED_POINT_TYPE_P (outer_type))
    return true;

  /* We need to take special care recursing to pointed-to types.  */
  else if (POINTER_TYPE_P (inner_type)
	   && POINTER_TYPE_P (outer_type))
    {
      /* We do not care for const qualification of the pointed-to types
	 as const qualification has no semantic value to the middle-end.  */

      /* Otherwise pointers/references are equivalent.  */
      return true;
    }

  /* Recurse for complex types.  */
  else if (TREE_CODE (inner_type) == COMPLEX_TYPE
	   && TREE_CODE (outer_type) == COMPLEX_TYPE)
    return useless_type_conversion_p (TREE_TYPE (outer_type),
				      TREE_TYPE (inner_type));

  /* Recurse for vector types with the same number of subparts.  */
  else if (TREE_CODE (inner_type) == VECTOR_TYPE
	   && TREE_CODE (outer_type) == VECTOR_TYPE
	   && TYPE_PRECISION (inner_type) == TYPE_PRECISION (outer_type))
    return useless_type_conversion_p (TREE_TYPE (outer_type),
				      TREE_TYPE (inner_type));

  else if (TREE_CODE (inner_type) == ARRAY_TYPE
	   && TREE_CODE (outer_type) == ARRAY_TYPE)
    {
      /* Preserve various attributes.  */
      if (TYPE_REVERSE_STORAGE_ORDER (inner_type)
	  != TYPE_REVERSE_STORAGE_ORDER (outer_type))
	return false;
      if (TYPE_STRING_FLAG (inner_type) != TYPE_STRING_FLAG (outer_type))
	return false;

      /* Conversions from array types with unknown extent to
	 array types with known extent are not useless.  */
      if (!TYPE_DOMAIN (inner_type) && TYPE_DOMAIN (outer_type))
	return false;

      /* Nor are conversions from array types with non-constant size to
         array types with constant size or to different size.  */
      if (TYPE_SIZE (outer_type)
	  && TREE_CODE (TYPE_SIZE (outer_type)) == INTEGER_CST
	  && (!TYPE_SIZE (inner_type)
	      || TREE_CODE (TYPE_SIZE (inner_type)) != INTEGER_CST
	      || !tree_int_cst_equal (TYPE_SIZE (outer_type),
				      TYPE_SIZE (inner_type))))
	return false;

      /* Check conversions between arrays with partially known extents.
	 If the array min/max values are constant they have to match.
	 Otherwise allow conversions to unknown and variable extents.
	 In particular this declares conversions that may change the
	 mode to BLKmode as useless.  */
      if (TYPE_DOMAIN (inner_type)
	  && TYPE_DOMAIN (outer_type)
	  && TYPE_DOMAIN (inner_type) != TYPE_DOMAIN (outer_type))
	{
	  tree inner_min = TYPE_MIN_VALUE (TYPE_DOMAIN (inner_type));
	  tree outer_min = TYPE_MIN_VALUE (TYPE_DOMAIN (outer_type));
	  tree inner_max = TYPE_MAX_VALUE (TYPE_DOMAIN (inner_type));
	  tree outer_max = TYPE_MAX_VALUE (TYPE_DOMAIN (outer_type));

	  /* After gimplification a variable min/max value carries no
	     additional information compared to a NULL value.  All that
	     matters has been lowered to be part of the IL.  */
	  if (inner_min && TREE_CODE (inner_min) != INTEGER_CST)
	    inner_min = NULL_TREE;
	  if (outer_min && TREE_CODE (outer_min) != INTEGER_CST)
	    outer_min = NULL_TREE;
	  if (inner_max && TREE_CODE (inner_max) != INTEGER_CST)
	    inner_max = NULL_TREE;
	  if (outer_max && TREE_CODE (outer_max) != INTEGER_CST)
	    outer_max = NULL_TREE;

	  /* Conversions NULL / variable <- cst are useless, but not
	     the other way around.  */
	  if (outer_min
	      && (!inner_min
		  || !tree_int_cst_equal (inner_min, outer_min)))
	    return false;
	  if (outer_max
	      && (!inner_max
		  || !tree_int_cst_equal (inner_max, outer_max)))
	    return false;
	}

      /* Recurse on the element check.  */
      return useless_type_conversion_p (TREE_TYPE (outer_type),
					TREE_TYPE (inner_type));
    }

  else if ((TREE_CODE (inner_type) == FUNCTION_TYPE
	    || TREE_CODE (inner_type) == METHOD_TYPE)
	   && TREE_CODE (inner_type) == TREE_CODE (outer_type))
    {
      tree outer_parm, inner_parm;

      /* If the return types are not compatible bail out.  */
      if (!useless_type_conversion_p (TREE_TYPE (outer_type),
				      TREE_TYPE (inner_type)))
	return false;

      /* Method types should belong to a compatible base class.  */
      if (TREE_CODE (inner_type) == METHOD_TYPE
	  && !useless_type_conversion_p (TYPE_METHOD_BASETYPE (outer_type),
					 TYPE_METHOD_BASETYPE (inner_type)))
	return false;

      /* A conversion to an unprototyped argument list is ok.  */
      if (!prototype_p (outer_type))
	return true;

      /* If the unqualified argument types are compatible the conversion
	 is useless.  */
      if (TYPE_ARG_TYPES (outer_type) == TYPE_ARG_TYPES (inner_type))
	return true;

      for (outer_parm = TYPE_ARG_TYPES (outer_type),
	   inner_parm = TYPE_ARG_TYPES (inner_type);
	   outer_parm && inner_parm;
	   outer_parm = TREE_CHAIN (outer_parm),
	   inner_parm = TREE_CHAIN (inner_parm))
	if (!useless_type_conversion_p
	       (TYPE_MAIN_VARIANT (TREE_VALUE (outer_parm)),
		TYPE_MAIN_VARIANT (TREE_VALUE (inner_parm))))
	  return false;

      /* If there is a mismatch in the number of arguments the functions
	 are not compatible.  */
      if (outer_parm || inner_parm)
	return false;

      /* Defer to the target if necessary.  */
      if (TYPE_ATTRIBUTES (inner_type) || TYPE_ATTRIBUTES (outer_type))
	return comp_type_attributes (outer_type, inner_type) != 0;

      return true;
    }

  /* For aggregates we rely on TYPE_CANONICAL exclusively and require
     explicit conversions for types involving to be structurally
     compared types.  */
  else if (AGGREGATE_TYPE_P (inner_type)
	   && TREE_CODE (inner_type) == TREE_CODE (outer_type))
    return TYPE_CANONICAL (inner_type)
	   && TYPE_CANONICAL (inner_type) == TYPE_CANONICAL (outer_type);

  else if (TREE_CODE (inner_type) == OFFSET_TYPE
	   && TREE_CODE (outer_type) == OFFSET_TYPE)
    return useless_type_conversion_p (TREE_TYPE (outer_type),
				      TREE_TYPE (inner_type))
	   && useless_type_conversion_p
	        (TYPE_OFFSET_BASETYPE (outer_type),
		 TYPE_OFFSET_BASETYPE (inner_type));

  return false;
}
Beispiel #10
0
static inline tree
chrec_fold_plus_poly_poly (enum tree_code code,
			   tree type,
			   tree poly0,
			   tree poly1)
{
  tree left, right;
  struct loop *loop0 = get_chrec_loop (poly0);
  struct loop *loop1 = get_chrec_loop (poly1);
  tree rtype = code == POINTER_PLUS_EXPR ? chrec_type (poly1) : type;

  gcc_assert (poly0);
  gcc_assert (poly1);
  gcc_assert (TREE_CODE (poly0) == POLYNOMIAL_CHREC);
  gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC);
  if (POINTER_TYPE_P (chrec_type (poly0)))
    gcc_checking_assert (ptrofftype_p (chrec_type (poly1))
			 && useless_type_conversion_p (type, chrec_type (poly0)));
  else
    gcc_checking_assert (useless_type_conversion_p (type, chrec_type (poly0))
			 && useless_type_conversion_p (type, chrec_type (poly1)));

  /*
    {a, +, b}_1 + {c, +, d}_2  ->  {{a, +, b}_1 + c, +, d}_2,
    {a, +, b}_2 + {c, +, d}_1  ->  {{c, +, d}_1 + a, +, b}_2,
    {a, +, b}_x + {c, +, d}_x  ->  {a+c, +, b+d}_x.  */
  if (flow_loop_nested_p (loop0, loop1))
    {
      if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
	return build_polynomial_chrec
	  (CHREC_VARIABLE (poly1),
	   chrec_fold_plus (type, poly0, CHREC_LEFT (poly1)),
	   CHREC_RIGHT (poly1));
      else
	return build_polynomial_chrec
	  (CHREC_VARIABLE (poly1),
	   chrec_fold_minus (type, poly0, CHREC_LEFT (poly1)),
	   chrec_fold_multiply (type, CHREC_RIGHT (poly1),
				SCALAR_FLOAT_TYPE_P (type)
				? build_real (type, dconstm1)
				: build_int_cst_type (type, -1)));
    }

  if (flow_loop_nested_p (loop1, loop0))
    {
      if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
	return build_polynomial_chrec
	  (CHREC_VARIABLE (poly0),
	   chrec_fold_plus (type, CHREC_LEFT (poly0), poly1),
	   CHREC_RIGHT (poly0));
      else
	return build_polynomial_chrec
	  (CHREC_VARIABLE (poly0),
	   chrec_fold_minus (type, CHREC_LEFT (poly0), poly1),
	   CHREC_RIGHT (poly0));
    }

  /* This function should never be called for chrecs of loops that
     do not belong to the same loop nest.  */
  gcc_assert (loop0 == loop1);

  if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
    {
      left = chrec_fold_plus
	(type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));
      right = chrec_fold_plus
	(rtype, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
    }
  else
    {
      left = chrec_fold_minus
	(type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));
      right = chrec_fold_minus
	(type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
    }

  if (chrec_zerop (right))
    return left;
  else
    return build_polynomial_chrec
      (CHREC_VARIABLE (poly0), left, right);
}
Beispiel #11
0
static tree
chrec_fold_plus_1 (enum tree_code code, tree type,
		   tree op0, tree op1)
{
  if (automatically_generated_chrec_p (op0)
      || automatically_generated_chrec_p (op1))
    return chrec_fold_automatically_generated_operands (op0, op1);

  switch (TREE_CODE (op0))
    {
    case POLYNOMIAL_CHREC:
      gcc_checking_assert
	(!chrec_contains_symbols_defined_in_loop (op0, CHREC_VARIABLE (op0)));
      switch (TREE_CODE (op1))
	{
	case POLYNOMIAL_CHREC:
	  gcc_checking_assert
	    (!chrec_contains_symbols_defined_in_loop (op1,
						      CHREC_VARIABLE (op1)));
	  return chrec_fold_plus_poly_poly (code, type, op0, op1);

	CASE_CONVERT:
	  if (tree_contains_chrecs (op1, NULL))
	    return chrec_dont_know;

	default:
	  if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
	    return build_polynomial_chrec
	      (CHREC_VARIABLE (op0),
	       chrec_fold_plus (type, CHREC_LEFT (op0), op1),
	       CHREC_RIGHT (op0));
	  else
	    return build_polynomial_chrec
	      (CHREC_VARIABLE (op0),
	       chrec_fold_minus (type, CHREC_LEFT (op0), op1),
	       CHREC_RIGHT (op0));
	}

    CASE_CONVERT:
      if (tree_contains_chrecs (op0, NULL))
	return chrec_dont_know;

    default:
      switch (TREE_CODE (op1))
	{
	case POLYNOMIAL_CHREC:
	  gcc_checking_assert
	    (!chrec_contains_symbols_defined_in_loop (op1,
						      CHREC_VARIABLE (op1)));
	  if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
	    return build_polynomial_chrec
	      (CHREC_VARIABLE (op1),
	       chrec_fold_plus (type, op0, CHREC_LEFT (op1)),
	       CHREC_RIGHT (op1));
	  else
	    return build_polynomial_chrec
	      (CHREC_VARIABLE (op1),
	       chrec_fold_minus (type, op0, CHREC_LEFT (op1)),
	       chrec_fold_multiply (type, CHREC_RIGHT (op1),
				    SCALAR_FLOAT_TYPE_P (type)
				    ? build_real (type, dconstm1)
				    : build_int_cst_type (type, -1)));

	CASE_CONVERT:
	  if (tree_contains_chrecs (op1, NULL))
	    return chrec_dont_know;

	default:
	  {
	    int size = 0;
	    if ((tree_contains_chrecs (op0, &size)
		 || tree_contains_chrecs (op1, &size))
		&& size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
	      return build2 (code, type, op0, op1);
	    else if (size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
	      {
		if (code == POINTER_PLUS_EXPR)
		  return fold_build_pointer_plus (fold_convert (type, op0),
						  op1);
		else
		  return fold_build2 (code, type,
				      fold_convert (type, op0),
				      fold_convert (type, op1));
	      }
	    else
	      return chrec_dont_know;
	  }
	}
    }
}
Beispiel #12
0
tree
c_finish_omp_atomic (location_t loc, enum tree_code code,
		     enum tree_code opcode, tree lhs, tree rhs,
		     tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
{
  tree x, type, addr, pre = NULL_TREE;

  if (lhs == error_mark_node || rhs == error_mark_node
      || v == error_mark_node || lhs1 == error_mark_node
      || rhs1 == error_mark_node)
    return error_mark_node;

  /* ??? According to one reading of the OpenMP spec, complex type are
     supported, but there are no atomic stores for any architecture.
     But at least icc 9.0 doesn't support complex types here either.
     And lets not even talk about vector types...  */
  type = TREE_TYPE (lhs);
  if (!INTEGRAL_TYPE_P (type)
      && !POINTER_TYPE_P (type)
      && !SCALAR_FLOAT_TYPE_P (type))
    {
      error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
      return error_mark_node;
    }

  if (opcode == RDIV_EXPR)
    opcode = TRUNC_DIV_EXPR;

  /* ??? Validate that rhs does not overlap lhs.  */

  /* Take and save the address of the lhs.  From then on we'll reference it
     via indirection.  */
  addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
  if (addr == error_mark_node)
    return error_mark_node;
  addr = save_expr (addr);
  if (TREE_CODE (addr) != SAVE_EXPR
      && (TREE_CODE (addr) != ADDR_EXPR
	  || !VAR_P (TREE_OPERAND (addr, 0))))
    {
      /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
	 it even after unsharing function body.  */
      tree var = create_tmp_var_raw (TREE_TYPE (addr));
      DECL_CONTEXT (var) = current_function_decl;
      addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
    }
  lhs = build_indirect_ref (loc, addr, RO_NULL);

  if (code == OMP_ATOMIC_READ)
    {
      x = build1 (OMP_ATOMIC_READ, type, addr);
      SET_EXPR_LOCATION (x, loc);
      OMP_ATOMIC_SEQ_CST (x) = seq_cst;
      return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
				loc, x, NULL_TREE);
    }

  /* There are lots of warnings, errors, and conversions that need to happen
     in the course of interpreting a statement.  Use the normal mechanisms
     to do this, and then take it apart again.  */
  if (swapped)
    {
      rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
      opcode = NOP_EXPR;
    }
  bool save = in_late_binary_op;
  in_late_binary_op = true;
  x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
  in_late_binary_op = save;
  if (x == error_mark_node)
    return error_mark_node;
  if (TREE_CODE (x) == COMPOUND_EXPR)
    {
      pre = TREE_OPERAND (x, 0);
      gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
      x = TREE_OPERAND (x, 1);
    }
  gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
  rhs = TREE_OPERAND (x, 1);

  /* Punt the actual generation of atomic operations to common code.  */
  if (code == OMP_ATOMIC)
    type = void_type_node;
  x = build2 (code, type, addr, rhs);
  SET_EXPR_LOCATION (x, loc);
  OMP_ATOMIC_SEQ_CST (x) = seq_cst;

  /* Generally it is hard to prove lhs1 and lhs are the same memory
     location, just diagnose different variables.  */
  if (rhs1
      && VAR_P (rhs1)
      && VAR_P (lhs)
      && rhs1 != lhs)
    {
      if (code == OMP_ATOMIC)
	error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
      else
	error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
      return error_mark_node;
    }

  if (code != OMP_ATOMIC)
    {
      /* Generally it is hard to prove lhs1 and lhs are the same memory
	 location, just diagnose different variables.  */
      if (lhs1 && VAR_P (lhs1) && VAR_P (lhs))
	{
	  if (lhs1 != lhs)
	    {
	      error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
	      return error_mark_node;
	    }
	}
      x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
			     loc, x, NULL_TREE);
      if (rhs1 && rhs1 != lhs)
	{
	  tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
	  if (rhs1addr == error_mark_node)
	    return error_mark_node;
	  x = omit_one_operand_loc (loc, type, x, rhs1addr);
	}
      if (lhs1 && lhs1 != lhs)
	{
	  tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
	  if (lhs1addr == error_mark_node)
	    return error_mark_node;
	  if (code == OMP_ATOMIC_CAPTURE_OLD)
	    x = omit_one_operand_loc (loc, type, x, lhs1addr);
	  else
	    {
	      x = save_expr (x);
	      x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
	    }
	}
    }
  else if (rhs1 && rhs1 != lhs)
    {
      tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
      if (rhs1addr == error_mark_node)
	return error_mark_node;
      x = omit_one_operand_loc (loc, type, x, rhs1addr);
    }

  if (pre)
    x = omit_one_operand_loc (loc, type, x, pre);
  return x;
}
Beispiel #13
0
static inline tree 
chrec_fold_plus_poly_poly (enum tree_code code, 
			   tree type, 
			   tree poly0, 
			   tree poly1)
{
  tree left, right;

  gcc_assert (poly0);
  gcc_assert (poly1);
  gcc_assert (TREE_CODE (poly0) == POLYNOMIAL_CHREC);
  gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC);
  gcc_assert (chrec_type (poly0) == chrec_type (poly1));
  gcc_assert (type == chrec_type (poly0));
  
  /*
    {a, +, b}_1 + {c, +, d}_2  ->  {{a, +, b}_1 + c, +, d}_2,
    {a, +, b}_2 + {c, +, d}_1  ->  {{c, +, d}_1 + a, +, b}_2,
    {a, +, b}_x + {c, +, d}_x  ->  {a+c, +, b+d}_x.  */
  if (CHREC_VARIABLE (poly0) < CHREC_VARIABLE (poly1))
    {
      if (code == PLUS_EXPR)
	return build_polynomial_chrec 
	  (CHREC_VARIABLE (poly1), 
	   chrec_fold_plus (type, poly0, CHREC_LEFT (poly1)),
	   CHREC_RIGHT (poly1));
      else
	return build_polynomial_chrec 
	  (CHREC_VARIABLE (poly1), 
	   chrec_fold_minus (type, poly0, CHREC_LEFT (poly1)),
	   chrec_fold_multiply (type, CHREC_RIGHT (poly1), 
				SCALAR_FLOAT_TYPE_P (type)
				? build_real (type, dconstm1)
				: build_int_cst_type (type, -1)));
    }
  
  if (CHREC_VARIABLE (poly0) > CHREC_VARIABLE (poly1))
    {
      if (code == PLUS_EXPR)
	return build_polynomial_chrec 
	  (CHREC_VARIABLE (poly0), 
	   chrec_fold_plus (type, CHREC_LEFT (poly0), poly1),
	   CHREC_RIGHT (poly0));
      else
	return build_polynomial_chrec 
	  (CHREC_VARIABLE (poly0), 
	   chrec_fold_minus (type, CHREC_LEFT (poly0), poly1),
	   CHREC_RIGHT (poly0));
    }
  
  if (code == PLUS_EXPR)
    {
      left = chrec_fold_plus 
	(type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));
      right = chrec_fold_plus 
	(type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
    }
  else
    {
      left = chrec_fold_minus 
	(type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));
      right = chrec_fold_minus 
	(type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
    }

  if (chrec_zerop (right))
    return left;
  else
    return build_polynomial_chrec 
      (CHREC_VARIABLE (poly0), left, right); 
}