Beispiel #1
0
static void
pack_ts_type_common_value_fields (struct bitpack_d *bp, tree expr)
{
  bp_pack_enum (bp, machine_mode, MAX_MACHINE_MODE, TYPE_MODE (expr));
  bp_pack_value (bp, TYPE_STRING_FLAG (expr), 1);
  bp_pack_value (bp, TYPE_NO_FORCE_BLK (expr), 1);
  bp_pack_value (bp, TYPE_NEEDS_CONSTRUCTING (expr), 1);
  if (RECORD_OR_UNION_TYPE_P (expr))
    {
      bp_pack_value (bp, TYPE_TRANSPARENT_AGGR (expr), 1);
      bp_pack_value (bp, TYPE_FINAL_P (expr), 1);
    }
  else if (TREE_CODE (expr) == ARRAY_TYPE)
    bp_pack_value (bp, TYPE_NONALIASED_COMPONENT (expr), 1);
  bp_pack_value (bp, TYPE_PACKED (expr), 1);
  bp_pack_value (bp, TYPE_RESTRICT (expr), 1);
  bp_pack_value (bp, TYPE_USER_ALIGN (expr), 1);
  bp_pack_value (bp, TYPE_READONLY (expr), 1);
  bp_pack_var_len_unsigned (bp, TYPE_PRECISION (expr));
  bp_pack_var_len_unsigned (bp, TYPE_ALIGN (expr));
  /* Make sure to preserve the fact whether the frontend would assign
     alias-set zero to this type.  */
  bp_pack_var_len_int (bp, (TYPE_ALIAS_SET (expr) == 0
			    || (!in_lto_p
				&& get_alias_set (expr) == 0)) ? 0 : -1);
}
Beispiel #2
0
static void
add_alias_set_conflicts (void)
{
  size_t i, j, n = stack_vars_num;

  for (i = 0; i < n; ++i)
    {
      bool aggr_i = AGGREGATE_TYPE_P (TREE_TYPE (stack_vars[i].decl));
      HOST_WIDE_INT set_i = get_alias_set (stack_vars[i].decl);

      for (j = 0; j < i; ++j)
	{
	  bool aggr_j = AGGREGATE_TYPE_P (TREE_TYPE (stack_vars[j].decl));
	  HOST_WIDE_INT set_j = get_alias_set (stack_vars[j].decl);
	  if (aggr_i != aggr_j || !alias_sets_conflict_p (set_i, set_j))
	    add_stack_var_conflict (i, j);
	}
    }
}
Beispiel #3
0
HOST_WIDE_INT
cxx_get_alias_set (tree t)
{
  if (IS_FAKE_BASE_TYPE (t))
    /* The base variant of a type must be in the same alias set as the
       complete type.  */
    return get_alias_set (TYPE_CONTEXT (t));

  /* Punt on PMFs until we canonicalize functions properly.  */
  if (TYPE_PTRMEMFUNC_P (t))
    return 0;

  return c_common_get_alias_set (t);
}
Beispiel #4
0
static HOST_WIDE_INT
cxx_get_alias_set (tree t)
{
    if (TREE_CODE (t) == RECORD_TYPE
            && TYPE_CONTEXT (t) && CLASS_TYPE_P (TYPE_CONTEXT (t))
            && CLASSTYPE_AS_BASE (TYPE_CONTEXT (t)) == t)
        /* The base variant of a type must be in the same alias set as the
           complete type.  */
        return get_alias_set (TYPE_CONTEXT (t));

    if (/* It's not yet safe to use alias sets for some classes in C++.  */
        !ok_to_generate_alias_set_for_type (t)
        /* Nor is it safe to use alias sets for pointers-to-member
        functions, due to the fact that there may be more than one
         RECORD_TYPE type corresponding to the same pointer-to-member
         type.  */
        || TYPE_PTRMEMFUNC_P (t))
        return 0;

    return c_common_get_alias_set (t);
}
Beispiel #5
0
bool
may_propagate_copy (tree dest, tree orig)
{
  tree type_d = TREE_TYPE (dest);
  tree type_o = TREE_TYPE (orig);

  /* Do not copy between types for which we *do* need a conversion.  */
  if (!tree_ssa_useless_type_conversion_1 (type_d, type_o))
    return false;

  /* FIXME.  GIMPLE is allowing pointer assignments and comparisons of
     pointers that have different alias sets.  This means that these
     pointers will have different memory tags associated to them.

     If we allow copy propagation in these cases, statements de-referencing
     the new pointer will now have a reference to a different memory tag
     with potentially incorrect SSA information.

     This was showing up in libjava/java/util/zip/ZipFile.java with code
     like:

     	struct java.io.BufferedInputStream *T.660;
	struct java.io.BufferedInputStream *T.647;
	struct java.io.InputStream *is;
	struct java.io.InputStream *is.662;
	[ ... ]
	T.660 = T.647;
	is = T.660;	<-- This ought to be type-casted
	is.662 = is;

     Also, f/name.c exposed a similar problem with a COND_EXPR predicate
     that was causing DOM to generate and equivalence with two pointers of
     alias-incompatible types:

     	struct _ffename_space *n;
	struct _ffename *ns;
	[ ... ]
	if (n == ns)
	  goto lab;
	...
	lab:
	return n;

     I think that GIMPLE should emit the appropriate type-casts.  For the
     time being, blocking copy-propagation in these cases is the safe thing
     to do.  */
  if (TREE_CODE (dest) == SSA_NAME
      && TREE_CODE (orig) == SSA_NAME
      && POINTER_TYPE_P (type_d)
      && POINTER_TYPE_P (type_o))
    {
      tree mt_dest = var_ann (SSA_NAME_VAR (dest))->type_mem_tag;
      tree mt_orig = var_ann (SSA_NAME_VAR (orig))->type_mem_tag;
      if (mt_dest && mt_orig && mt_dest != mt_orig)
	return false;
      else if (!lang_hooks.types_compatible_p (type_d, type_o))
	return false;
      else if (get_alias_set (TREE_TYPE (type_d)) != 
	       get_alias_set (TREE_TYPE (type_o)))
	return false;
    }

  /* If the destination is a SSA_NAME for a virtual operand, then we have
     some special cases to handle.  */
  if (TREE_CODE (dest) == SSA_NAME && !is_gimple_reg (dest))
    {
      /* If both operands are SSA_NAMEs referring to virtual operands, then
	 we can always propagate.  */
      if (TREE_CODE (orig) == SSA_NAME
	  && !is_gimple_reg (orig))
	return true;

      /* We have a "copy" from something like a constant into a virtual
	 operand.  Reject these.  */
      return false;
    }

  /* If ORIG flows in from an abnormal edge, it cannot be propagated.  */
  if (TREE_CODE (orig) == SSA_NAME
      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
    return false;

  /* If DEST is an SSA_NAME that flows from an abnormal edge, then it
     cannot be replaced.  */
  if (TREE_CODE (dest) == SSA_NAME
      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest))
    return false;

  /* Anything else is OK.  */
  return true;
}
Beispiel #6
0
  tree new_sym = SSA_NAME_VAR (new);
  tree orig_sym = SSA_NAME_VAR (orig);
  var_ann_t new_ann = var_ann (new_sym);
  var_ann_t orig_ann = var_ann (orig_sym);

  gcc_assert (POINTER_TYPE_P (TREE_TYPE (orig)));
  gcc_assert (POINTER_TYPE_P (TREE_TYPE (new)));

#if defined ENABLE_CHECKING
  gcc_assert (lang_hooks.types_compatible_p (TREE_TYPE (orig),
					     TREE_TYPE (new)));

  /* If the pointed-to alias sets are different, these two pointers
     would never have the same memory tag.  In this case, NEW should
     not have been propagated into ORIG.  */
  gcc_assert (get_alias_set (TREE_TYPE (TREE_TYPE (new_sym)))
	      == get_alias_set (TREE_TYPE (TREE_TYPE (orig_sym))));
#endif

  /* Synchronize the type tags.  If both pointers had a tag and they
     are different, then something has gone wrong.  Type tags can
     always be merged because they are flow insensitive, all the SSA
     names of the same base DECL share the same type tag.  */
  if (new_ann->type_mem_tag == NULL_TREE)
    new_ann->type_mem_tag = orig_ann->type_mem_tag;
  else if (orig_ann->type_mem_tag == NULL_TREE)
    orig_ann->type_mem_tag = new_ann->type_mem_tag;
  else
    gcc_assert (new_ann->type_mem_tag == orig_ann->type_mem_tag);

  /* Check that flow-sensitive information is compatible.  Notice that
static bool
forward_propagate_addr_expr_1 (tree name, tree def_rhs,
			       gimple_stmt_iterator *use_stmt_gsi,
			       bool single_use_p)
{
  tree lhs, rhs, rhs2, array_ref;
  tree *rhsp, *lhsp;
  gimple use_stmt = gsi_stmt (*use_stmt_gsi);
  enum tree_code rhs_code;
  bool res = true;

  gcc_assert (TREE_CODE (def_rhs) == ADDR_EXPR);

  lhs = gimple_assign_lhs (use_stmt);
  rhs_code = gimple_assign_rhs_code (use_stmt);
  rhs = gimple_assign_rhs1 (use_stmt);

  /* Trivial cases.  The use statement could be a trivial copy or a
     useless conversion.  Recurse to the uses of the lhs as copyprop does
     not copy through different variant pointers and FRE does not catch
     all useless conversions.  Treat the case of a single-use name and
     a conversion to def_rhs type separate, though.  */
  if (TREE_CODE (lhs) == SSA_NAME
      && ((rhs_code == SSA_NAME && rhs == name)
	  || CONVERT_EXPR_CODE_P (rhs_code)))
    {
      /* Only recurse if we don't deal with a single use or we cannot
	 do the propagation to the current statement.  In particular
	 we can end up with a conversion needed for a non-invariant
	 address which we cannot do in a single statement.  */
      if (!single_use_p
	  || (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs))
	      && (!is_gimple_min_invariant (def_rhs)
		  || (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
		      && POINTER_TYPE_P (TREE_TYPE (def_rhs))
		      && (TYPE_PRECISION (TREE_TYPE (lhs))
			  > TYPE_PRECISION (TREE_TYPE (def_rhs)))))))
	return forward_propagate_addr_expr (lhs, def_rhs);

      gimple_assign_set_rhs1 (use_stmt, unshare_expr (def_rhs));
      if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)))
	gimple_assign_set_rhs_code (use_stmt, TREE_CODE (def_rhs));
      else
	gimple_assign_set_rhs_code (use_stmt, NOP_EXPR);
      return true;
    }

  /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS.
     ADDR_EXPR will not appear on the LHS.  */
  lhsp = gimple_assign_lhs_ptr (use_stmt);
  while (handled_component_p (*lhsp))
    lhsp = &TREE_OPERAND (*lhsp, 0);
  lhs = *lhsp;

  /* Now see if the LHS node is an INDIRECT_REF using NAME.  If so,
     propagate the ADDR_EXPR into the use of NAME and fold the result.  */
  if (TREE_CODE (lhs) == INDIRECT_REF
      && TREE_OPERAND (lhs, 0) == name)
    {
      if (may_propagate_address_into_dereference (def_rhs, lhs)
	  && (lhsp != gimple_assign_lhs_ptr (use_stmt)
	      || useless_type_conversion_p
	           (TREE_TYPE (TREE_OPERAND (def_rhs, 0)), TREE_TYPE (rhs))))
	{
	  *lhsp = unshare_expr (TREE_OPERAND (def_rhs, 0));
	  fold_stmt_inplace (use_stmt);
	  tidy_after_forward_propagate_addr (use_stmt);

	  /* Continue propagating into the RHS if this was not the only use.  */
	  if (single_use_p)
	    return true;
	}
      else
	/* We can have a struct assignment dereferencing our name twice.
	   Note that we didn't propagate into the lhs to not falsely
	   claim we did when propagating into the rhs.  */
	res = false;
    }

  /* Strip away any outer COMPONENT_REF, ARRAY_REF or ADDR_EXPR
     nodes from the RHS.  */
  rhsp = gimple_assign_rhs1_ptr (use_stmt);
  while (handled_component_p (*rhsp)
	 || TREE_CODE (*rhsp) == ADDR_EXPR)
    rhsp = &TREE_OPERAND (*rhsp, 0);
  rhs = *rhsp;

  /* Now see if the RHS node is an INDIRECT_REF using NAME.  If so,
     propagate the ADDR_EXPR into the use of NAME and fold the result.  */
  if (TREE_CODE (rhs) == INDIRECT_REF
      && TREE_OPERAND (rhs, 0) == name
      && may_propagate_address_into_dereference (def_rhs, rhs))
    {
      *rhsp = unshare_expr (TREE_OPERAND (def_rhs, 0));
      fold_stmt_inplace (use_stmt);
      tidy_after_forward_propagate_addr (use_stmt);
      return res;
    }

  /* Now see if the RHS node is an INDIRECT_REF using NAME.  If so,
     propagate the ADDR_EXPR into the use of NAME and try to
     create a VCE and fold the result.  */
  if (TREE_CODE (rhs) == INDIRECT_REF
      && TREE_OPERAND (rhs, 0) == name
      && TYPE_SIZE (TREE_TYPE (rhs))
      && TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0)))
      /* Function decls should not be used for VCE either as it could be a
         function descriptor that we want and not the actual function code.  */
      && TREE_CODE (TREE_OPERAND (def_rhs, 0)) != FUNCTION_DECL
      /* We should not convert volatile loads to non volatile loads. */
      && !TYPE_VOLATILE (TREE_TYPE (rhs))
      && !TYPE_VOLATILE (TREE_TYPE (TREE_OPERAND (def_rhs, 0)))
      && operand_equal_p (TYPE_SIZE (TREE_TYPE (rhs)),
			  TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))), 0)
      /* Make sure we only do TBAA compatible replacements.  */
      && get_alias_set (TREE_OPERAND (def_rhs, 0)) == get_alias_set (rhs))
   {
     tree def_rhs_base, new_rhs = unshare_expr (TREE_OPERAND (def_rhs, 0));
     new_rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (rhs), new_rhs);
     if (TREE_CODE (new_rhs) != VIEW_CONVERT_EXPR)
       {
	 /* If we have folded the VIEW_CONVERT_EXPR then the result is only
	    valid if we can replace the whole rhs of the use statement.  */
	 if (rhs != gimple_assign_rhs1 (use_stmt))
	   return false;
	 new_rhs = force_gimple_operand_gsi (use_stmt_gsi, new_rhs, true, NULL,
					     true, GSI_NEW_STMT);
	 gimple_assign_set_rhs1 (use_stmt, new_rhs);
	 tidy_after_forward_propagate_addr (use_stmt);
	 return res;
       }
     /* If the defining rhs comes from an indirect reference, then do not
        convert into a VIEW_CONVERT_EXPR.  */
     def_rhs_base = TREE_OPERAND (def_rhs, 0);
     while (handled_component_p (def_rhs_base))
       def_rhs_base = TREE_OPERAND (def_rhs_base, 0);
     if (!INDIRECT_REF_P (def_rhs_base))
       {
	 /* We may have arbitrary VIEW_CONVERT_EXPRs in a nested component
	    reference.  Place it there and fold the thing.  */
	 *rhsp = new_rhs;
	 fold_stmt_inplace (use_stmt);
	 tidy_after_forward_propagate_addr (use_stmt);
	 return res;
       }
   }

  /* If the use of the ADDR_EXPR is not a POINTER_PLUS_EXPR, there
     is nothing to do. */
  if (gimple_assign_rhs_code (use_stmt) != POINTER_PLUS_EXPR
      || gimple_assign_rhs1 (use_stmt) != name)
    return false;

  /* The remaining cases are all for turning pointer arithmetic into
     array indexing.  They only apply when we have the address of
     element zero in an array.  If that is not the case then there
     is nothing to do.  */
  array_ref = TREE_OPERAND (def_rhs, 0);
  if (TREE_CODE (array_ref) != ARRAY_REF
      || TREE_CODE (TREE_TYPE (TREE_OPERAND (array_ref, 0))) != ARRAY_TYPE
      || TREE_CODE (TREE_OPERAND (array_ref, 1)) != INTEGER_CST)
    return false;

  rhs2 = gimple_assign_rhs2 (use_stmt);
  /* Try to optimize &x[C1] p+ C2 where C2 is a multiple of the size
     of the elements in X into &x[C1 + C2/element size].  */
  if (TREE_CODE (rhs2) == INTEGER_CST)
    {
      tree new_rhs = maybe_fold_stmt_addition (gimple_location (use_stmt),
	  				       TREE_TYPE (def_rhs),
					       def_rhs, rhs2);
      if (new_rhs)
	{
	  tree type = TREE_TYPE (gimple_assign_lhs (use_stmt));
	  new_rhs = unshare_expr (new_rhs);
	  if (!useless_type_conversion_p (type, TREE_TYPE (new_rhs)))
	    {
	      if (!is_gimple_min_invariant (new_rhs))
		new_rhs = force_gimple_operand_gsi (use_stmt_gsi, new_rhs,
						    true, NULL_TREE,
						    true, GSI_SAME_STMT);
	      new_rhs = fold_convert (type, new_rhs);
	    }
	  gimple_assign_set_rhs_from_tree (use_stmt_gsi, new_rhs);
	  use_stmt = gsi_stmt (*use_stmt_gsi);
	  update_stmt (use_stmt);
	  tidy_after_forward_propagate_addr (use_stmt);
	  return true;
	}
    }

  /* Try to optimize &x[0] p+ OFFSET where OFFSET is defined by
     converting a multiplication of an index by the size of the
     array elements, then the result is converted into the proper
     type for the arithmetic.  */
  if (TREE_CODE (rhs2) == SSA_NAME
      && integer_zerop (TREE_OPERAND (array_ref, 1))
      && useless_type_conversion_p (TREE_TYPE (name), TREE_TYPE (def_rhs))
      /* Avoid problems with IVopts creating PLUS_EXPRs with a
	 different type than their operands.  */
      && useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)))
    return forward_propagate_addr_into_variable_array_index (rhs2, def_rhs,
							     use_stmt_gsi);
  return false;
}
Beispiel #8
0
static int
warn_type_compatibility_p (tree prevailing_type, tree type,
			   bool common_or_extern)
{
  int lev = 0;
  bool odr_p = odr_or_derived_type_p (prevailing_type)
	       && odr_or_derived_type_p (type);

  if (prevailing_type == type)
    return 0;

  /* C++ provide a robust way to check for type compatibility via the ODR
     rule.  */
  if (odr_p && !odr_types_equivalent_p (prevailing_type, type))
    lev |= 2;

  /* Function types needs special care, because types_compatible_p never
     thinks prototype is compatible to non-prototype.  */
  if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
    {
      if (TREE_CODE (type) != TREE_CODE (prevailing_type))
	lev |= 1;
      lev |= warn_type_compatibility_p (TREE_TYPE (prevailing_type),
				        TREE_TYPE (type), false);
      if (TREE_CODE (type) == METHOD_TYPE
	  && TREE_CODE (prevailing_type) == METHOD_TYPE)
	lev |= warn_type_compatibility_p (TYPE_METHOD_BASETYPE (prevailing_type),
					  TYPE_METHOD_BASETYPE (type), false);
      if (prototype_p (prevailing_type) && prototype_p (type)
	  && TYPE_ARG_TYPES (prevailing_type) != TYPE_ARG_TYPES (type))
	{
	  tree parm1, parm2;
	  for (parm1 = TYPE_ARG_TYPES (prevailing_type),
	       parm2 = TYPE_ARG_TYPES (type);
	       parm1 && parm2;
	       parm1 = TREE_CHAIN (parm1),
	       parm2 = TREE_CHAIN (parm2))
	    lev |= warn_type_compatibility_p (TREE_VALUE (parm1),
					      TREE_VALUE (parm2), false);
	  if (parm1 || parm2)
	    lev |= odr_p ? 3 : 1;
	}
      if (comp_type_attributes (prevailing_type, type) == 0)
	lev |= 1;
      return lev;
    }

  /* Get complete type.  */
  prevailing_type = TYPE_MAIN_VARIANT (prevailing_type);
  type = TYPE_MAIN_VARIANT (type);

  /* We can not use types_compatible_p because we permit some changes
     across types.  For example unsigned size_t and "signed size_t" may be
     compatible when merging C and Fortran types.  */
  if (COMPLETE_TYPE_P (prevailing_type)
      && COMPLETE_TYPE_P (type)
      /* While global declarations are never variadic, we can recurse here
	 for function parameter types.  */
      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
      && TREE_CODE (TYPE_SIZE (prevailing_type)) == INTEGER_CST
      && !tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prevailing_type)))
    {
       /* As a special case do not warn about merging
	  int a[];
	  and
	  int a[]={1,2,3};
	  here the first declaration is COMMON or EXTERN
	  and sizeof(a) == sizeof (int).  */
       if (!common_or_extern
	   || TREE_CODE (type) != ARRAY_TYPE
	   || TYPE_SIZE (type) != TYPE_SIZE (TREE_TYPE (type)))
       lev |= 1;
    }

  /* Verify TBAA compatibility.  Take care of alias set 0 and the fact that
     we make ptr_type_node to TBAA compatible with every other type.  */
  if (type_with_alias_set_p (type) && type_with_alias_set_p (prevailing_type))
    {
      alias_set_type set1 = get_alias_set (type);
      alias_set_type set2 = get_alias_set (prevailing_type);

      if (set1 && set2 && set1 != set2 
          && (!POINTER_TYPE_P (type) || !POINTER_TYPE_P (prevailing_type)
	      || (set1 != TYPE_ALIAS_SET (ptr_type_node)
		  && set2 != TYPE_ALIAS_SET (ptr_type_node))))
        lev |= 5;
    }

  return lev;
}