Example #1
0
/* Return true if LAT1 and LAT2 are equal.  */
static inline bool
ipcp_lats_are_equal (struct ipcp_lattice *lat1, struct ipcp_lattice *lat2)
{
  gcc_assert (ipcp_lat_is_const (lat1) && ipcp_lat_is_const (lat2));
  if (lat1->type != lat2->type)
    return false;

  if (TREE_CODE (lat1->constant) ==  ADDR_EXPR
      && TREE_CODE (lat2->constant) ==  ADDR_EXPR
      && TREE_CODE (TREE_OPERAND (lat1->constant, 0)) == CONST_DECL
      && TREE_CODE (TREE_OPERAND (lat2->constant, 0)) == CONST_DECL)
    return operand_equal_p (DECL_INITIAL (TREE_OPERAND (lat1->constant, 0)),
			    DECL_INITIAL (TREE_OPERAND (lat2->constant, 0)), 0);
  else
    return operand_equal_p (lat1->constant, lat2->constant, 0);
}
Example #2
0
  FOR_EACH_VEC_ELT (constructor_elt, vec, i, elt)
    {
      if (!prev)
	prev = elt->value;
      else if (!operand_equal_p (elt->value, prev, OEP_ONLY_CONST))
	return NULL_TREE;
    }
Example #3
0
static bool
equal_mem_array_ref_p (tree t0, tree t1)
{
  if (TREE_CODE (t0) != MEM_REF && ! handled_component_p (t0))
    return false;
  if (TREE_CODE (t1) != MEM_REF && ! handled_component_p (t1))
    return false;

  if (!types_compatible_p (TREE_TYPE (t0), TREE_TYPE (t1)))
    return false;
  bool rev0;
  HOST_WIDE_INT off0, sz0, max0;
  tree base0 = get_ref_base_and_extent (t0, &off0, &sz0, &max0, &rev0);
  if (sz0 == -1
      || sz0 != max0)
    return false;

  bool rev1;
  HOST_WIDE_INT off1, sz1, max1;
  tree base1 = get_ref_base_and_extent (t1, &off1, &sz1, &max1, &rev1);
  if (sz1 == -1
      || sz1 != max1)
    return false;

  if (rev0 != rev1)
    return false;

  /* Types were compatible, so this is a sanity check.  */
  gcc_assert (sz0 == sz1);

  return (off0 == off1) && operand_equal_p (base0, base1, 0);
}
Example #4
0
bool 
eq_evolutions_p (tree chrec0, 
		 tree chrec1)
{
  if (chrec0 == NULL_TREE
      || chrec1 == NULL_TREE
      || TREE_CODE (chrec0) != TREE_CODE (chrec1))
    return false;

  if (chrec0 == chrec1)
    return true;

  switch (TREE_CODE (chrec0))
    {
    case INTEGER_CST:
      return operand_equal_p (chrec0, chrec1, 0);

    case POLYNOMIAL_CHREC:
      return (CHREC_VARIABLE (chrec0) == CHREC_VARIABLE (chrec1)
	      && eq_evolutions_p (CHREC_LEFT (chrec0), CHREC_LEFT (chrec1))
	      && eq_evolutions_p (CHREC_RIGHT (chrec0), CHREC_RIGHT (chrec1)));
    default:
      return false;
    }  
}
Example #5
0
bool
expressions_equal_p (tree e1, tree e2)
{
  tree te1, te2;
  
  if (e1 == e2)
    return true;

  te1 = TREE_TYPE (e1);
  te2 = TREE_TYPE (e2);

  if (TREE_CODE (e1) == TREE_LIST && TREE_CODE (e2) == TREE_LIST)
    {
      tree lop1 = e1;
      tree lop2 = e2;
      for (lop1 = e1, lop2 = e2;
	   lop1 || lop2;
	   lop1 = TREE_CHAIN (lop1), lop2 = TREE_CHAIN (lop2))
	{
	  if (!lop1 || !lop2)
	    return false;
	  if (!expressions_equal_p (TREE_VALUE (lop1), TREE_VALUE (lop2)))
	    return false;
	}
      return true;

    }
  else if (TREE_CODE (e1) == TREE_CODE (e2) 
	   && (te1 == te2 || lang_hooks.types_compatible_p (te1, te2))
	   && operand_equal_p (e1, e2, OEP_PURE_SAME))
    return true;

  return false;
}
static void
move_hint_to_base (tree type, struct mem_address *parts, tree base_hint,
		   aff_tree *addr)
{
  unsigned i;
  tree val = NULL_TREE;
  int qual;

  for (i = 0; i < addr->n; i++)
    {
      if (!double_int_one_p (addr->elts[i].coef))
	continue;

      val = addr->elts[i].val;
      if (operand_equal_p (val, base_hint, 0))
	break;
    }

  if (i == addr->n)
    return;

  /* Cast value to appropriate pointer type.  We cannot use a pointer
     to TYPE directly, as the back-end will assume registers of pointer
     type are aligned, and just the base itself may not actually be.
     We use void pointer to the type's address space instead.  */
  qual = ENCODE_QUAL_ADDR_SPACE (TYPE_ADDR_SPACE (type));
  type = build_qualified_type (void_type_node, qual);
  parts->base = fold_convert (build_pointer_type (type), val);
  aff_combination_remove_elt (addr, i);
}
Example #7
0
static struct mem_ref_group *
find_or_create_group (struct mem_ref_group **groups, tree base,
		      HOST_WIDE_INT step)
{
  struct mem_ref_group *group;

  for (; *groups; groups = &(*groups)->next)
    {
      if ((*groups)->step == step
	  && operand_equal_p ((*groups)->base, base, 0))
	return *groups;

      /* Keep the list of groups sorted by decreasing step.  */
      if ((*groups)->step < step)
	break;
    }

  group = XNEW (struct mem_ref_group);
  group->base = base;
  group->step = step;
  group->refs = NULL;
  group->next = *groups;
  *groups = group;

  return group;
}
Example #8
0
tree
degenerate_phi_result (gimple phi)
{
  tree lhs = gimple_phi_result (phi);
  tree val = NULL;
  size_t i;

  /* Ignoring arguments which are the same as LHS, if all the remaining
     arguments are the same, then the PHI is a degenerate and has the
     value of that common argument.  */
  for (i = 0; i < gimple_phi_num_args (phi); i++)
    {
      tree arg = gimple_phi_arg_def (phi, i);

      if (arg == lhs)
	continue;
      else if (!arg)
	break;
      else if (!val)
	val = arg;
      else if (arg == val)
	continue;
      /* We bring in some of operand_equal_p not only to speed things
	 up, but also to avoid crashing when dereferencing the type of
	 a released SSA name.  */
      else if (TREE_CODE (val) != TREE_CODE (arg)
	       || TREE_CODE (val) == SSA_NAME
	       || !operand_equal_p (arg, val, 0))
	break;
    }
  return (i == gimple_phi_num_args (phi) ? val : NULL);
}
Example #9
0
bool
eq_evolutions_p (const_tree chrec0, const_tree chrec1)
{
  if (chrec0 == NULL_TREE
      || chrec1 == NULL_TREE
      || TREE_CODE (chrec0) != TREE_CODE (chrec1))
    return false;

  if (chrec0 == chrec1)
    return true;

  switch (TREE_CODE (chrec0))
    {
    case INTEGER_CST:
      return operand_equal_p (chrec0, chrec1, 0);

    case POLYNOMIAL_CHREC:
      return (CHREC_VARIABLE (chrec0) == CHREC_VARIABLE (chrec1)
	      && eq_evolutions_p (CHREC_LEFT (chrec0), CHREC_LEFT (chrec1))
	      && eq_evolutions_p (CHREC_RIGHT (chrec0), CHREC_RIGHT (chrec1)));

    case PLUS_EXPR:
    case MULT_EXPR:
    case MINUS_EXPR:
    case POINTER_PLUS_EXPR:
      return eq_evolutions_p (TREE_OPERAND (chrec0, 0),
			      TREE_OPERAND (chrec1, 0))
	  && eq_evolutions_p (TREE_OPERAND (chrec0, 1),
			      TREE_OPERAND (chrec1, 1));

    default:
      return false;
    }
}
Example #10
0
void
aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
{
  unsigned i;
  tree type;

  widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
  if (scale == 0)
    return;

  for (i = 0; i < comb->n; i++)
    if (operand_equal_p (comb->elts[i].val, elt, 0))
      {
	widest_int new_coef
	  = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb->type);
	if (new_coef != 0)
	  {
	    comb->elts[i].coef = new_coef;
	    return;
	  }

	comb->n--;
	comb->elts[i] = comb->elts[comb->n];

	if (comb->rest)
	  {
	    gcc_assert (comb->n == MAX_AFF_ELTS - 1);
	    comb->elts[comb->n].coef = 1;
	    comb->elts[comb->n].val = comb->rest;
	    comb->rest = NULL_TREE;
	    comb->n++;
	  }
	return;
      }
  if (comb->n < MAX_AFF_ELTS)
    {
      comb->elts[comb->n].coef = scale;
      comb->elts[comb->n].val = elt;
      comb->n++;
      return;
    }

  type = comb->type;
  if (POINTER_TYPE_P (type))
    type = sizetype;

  if (scale == 1)
    elt = fold_convert (type, elt);
  else
    elt = fold_build2 (MULT_EXPR, type,
		       fold_convert (type, elt),
		       wide_int_to_tree (type, scale));

  if (comb->rest)
    comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
			      elt);
  else
    comb->rest = elt;
}
static bool
ptr_deref_may_alias_decl_p (tree ptr, tree decl)
{
  struct ptr_info_def *pi;

  gcc_assert ((TREE_CODE (ptr) == SSA_NAME
	       || TREE_CODE (ptr) == ADDR_EXPR
	       || TREE_CODE (ptr) == INTEGER_CST)
	      && (TREE_CODE (decl) == VAR_DECL
		  || TREE_CODE (decl) == PARM_DECL
		  || TREE_CODE (decl) == RESULT_DECL));

  /* Non-aliased variables can not be pointed to.  */
  if (!may_be_aliased (decl))
    return false;

  /* ADDR_EXPR pointers either just offset another pointer or directly
     specify the pointed-to set.  */
  if (TREE_CODE (ptr) == ADDR_EXPR)
    {
      tree base = get_base_address (TREE_OPERAND (ptr, 0));
      if (base
	  && INDIRECT_REF_P (base))
	ptr = TREE_OPERAND (base, 0);
      else if (base
	       && SSA_VAR_P (base))
	return operand_equal_p (base, decl, 0);
      else if (base
	       && CONSTANT_CLASS_P (base))
	return false;
      else
	return true;
    }

  /* We can end up with dereferencing constant pointers.
     Just bail out in this case.  */
  if (TREE_CODE (ptr) == INTEGER_CST)
    return true;

  /* If we do not have useful points-to information for this pointer
     we cannot disambiguate anything else.  */
  pi = SSA_NAME_PTR_INFO (ptr);
  if (!pi)
    return true;

  /* If the decl can be used as a restrict tag and we have a restrict
     pointer and that pointers points-to set doesn't contain this decl
     then they can't alias.  */
  if (DECL_RESTRICTED_P (decl)
      && TYPE_RESTRICT (TREE_TYPE (ptr))
      && pi->pt.vars_contains_restrict)
    return bitmap_bit_p (pi->pt.vars, DECL_UID (decl));

  return pt_solution_includes (&pi->pt, decl);
}
/* Callback for walk_stmt_load_store_ops.

   Return TRUE if OP will dereference the tree stored in DATA, FALSE
   otherwise.

   This routine only makes a superficial check for a dereference.  Thus,
   it must only be used if it is safe to return a false negative.  */
static bool
check_loadstore (gimple stmt, tree op, tree, void *data)
{
  if ((TREE_CODE (op) == MEM_REF || TREE_CODE (op) == TARGET_MEM_REF)
      && operand_equal_p (TREE_OPERAND (op, 0), (tree)data, 0))
    {
      TREE_THIS_VOLATILE (op) = 1;
      TREE_SIDE_EFFECTS (op) = 1;
      update_stmt (stmt);
      return true;
    }
  return false;
}
static struct aff_comb_elt *
aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
{
  unsigned i;

  for (i = 0; i < comb->n; i++)
    if (operand_equal_p (comb->elts[i].val, val, 0))
      {
	if (idx)
	  *idx = i;

	return &comb->elts[i];
      }

  return NULL;
}
Example #14
0
static inline bool
set_copy_of_val (tree var, tree val)
{
  unsigned int ver = SSA_NAME_VERSION (var);
  tree old;

  /* Set FIRST to be the first link in COPY_OF[DEST].  If that
     changed, return true.  */
  old = copy_of[ver].value;
  copy_of[ver].value = val;

  if (old != val
      || (val && !operand_equal_p (old, val, 0)))
    return true;

  return false;
}
static bool
same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
{
  edge e1 = find_edge (bb1, dest);
  edge e2 = find_edge (bb2, dest);
  gimple_stmt_iterator gsi;
  gimple phi;

  for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
    {
      phi = gsi_stmt (gsi);
      if (!operand_equal_p (PHI_ARG_DEF_FROM_EDGE (phi, e1),
			    PHI_ARG_DEF_FROM_EDGE (phi, e2), 0))
        return false;
    }

  return true;
}
unsigned int find_arg_number_tree(const_tree arg, const_tree func)
{
	tree var;
	unsigned int argnum = 1;

	if (DECL_ARGUMENTS(func) == NULL_TREE)
		return CANNOT_FIND_ARG;

	if (TREE_CODE(arg) == SSA_NAME)
		arg = SSA_NAME_VAR(arg);

	for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
		if (!operand_equal_p(arg, var, 0) && strcmp(DECL_NAME_POINTER(var), DECL_NAME_POINTER(arg)))
			continue;
		if (!skip_types(var))
			return argnum;
	}

	return CANNOT_FIND_ARG;
}
Example #17
0
bool
expressions_equal_p (tree e1, tree e2)
{
  tree te1, te2;

  /* The obvious case.  */
  if (e1 == e2)
    return true;

  /* If only one of them is null, they cannot be equal.  */
  if (!e1 || !e2)
    return false;

  /* Recurse on elements of lists.  */
  if (TREE_CODE (e1) == TREE_LIST && TREE_CODE (e2) == TREE_LIST)
    {
      tree lop1 = e1;
      tree lop2 = e2;
      for (lop1 = e1, lop2 = e2;
	   lop1 || lop2;
	   lop1 = TREE_CHAIN (lop1), lop2 = TREE_CHAIN (lop2))
	{
	  if (!lop1 || !lop2)
	    return false;
	  if (!expressions_equal_p (TREE_VALUE (lop1), TREE_VALUE (lop2)))
	    return false;
	}
      return true;
    }

  te1 = TREE_TYPE (e1);
  te2 = TREE_TYPE (e2);

  /* Now perform the actual comparison.  */
  if (TREE_CODE (e1) == TREE_CODE (e2)
      && (te1 == te2 || types_compatible_p (te1, te2))
      && operand_equal_p (e1, e2, OEP_PURE_SAME))
    return true;

  return false;
}
static void
move_variant_to_index (struct mem_address *parts, aff_tree *addr, tree v)
{
  unsigned i;
  tree val = NULL_TREE;

  gcc_assert (!parts->index);
  for (i = 0; i < addr->n; i++)
    {
      val = addr->elts[i].val;
      if (operand_equal_p (val, v, 0))
	break;
    }

  if (i == addr->n)
    return;

  parts->index = fold_convert (sizetype, val);
  parts->step = double_int_to_tree (sizetype, addr->elts[i].coef);
  aff_combination_remove_elt (addr, i);
}
Example #19
0
static void
compare_and_warn (gimple stmt, tree lhs, tree rhs)
{
  if (operand_equal_p (lhs, rhs, OEP_PURE_SAME))
    {
      location_t location;
      location = (gimple_has_location (stmt)
                  ? gimple_location (stmt)
                  : (DECL_P (lhs)
                     ? DECL_SOURCE_LOCATION (lhs)
                     : input_location));
      /* If LHS contains any tree node not currently supported by
         get_non_ssa_expr, simply emit a generic warning without
         specifying LHS in the message.  */
      lhs = get_non_ssa_expr (lhs);
      if (lhs)
        warning_at (location, 0, G_("%qE is assigned to itself"), lhs);
      else
        warning_at (location, 0, G_("self-assignment detected"));
    }
}
Example #20
0
tree
chrec_merge (tree chrec1, 
	     tree chrec2)
{
  if (chrec1 == chrec_dont_know
      || chrec2 == chrec_dont_know)
    return chrec_dont_know;

  if (chrec1 == chrec_known 
      || chrec2 == chrec_known)
    return chrec_known;

  if (chrec1 == chrec_not_analyzed_yet)
    return chrec2;
  if (chrec2 == chrec_not_analyzed_yet)
    return chrec1;

  if (operand_equal_p (chrec1, chrec2, 0))
    return chrec1;

  return chrec_dont_know;
}
Example #21
0
static bool
dse_possible_dead_store_p (gimple stmt, gimple *use_stmt)
{
  gimple temp;
  unsigned cnt = 0;

  *use_stmt = NULL;

  /* Self-assignments are zombies.  */
  if (operand_equal_p (gimple_assign_rhs1 (stmt), gimple_assign_lhs (stmt), 0))
    {
      *use_stmt = stmt;
      return true;
    }

  /* Find the first dominated statement that clobbers (part of) the
     memory stmt stores to with no intermediate statement that may use
     part of the memory stmt stores.  That is, find a store that may
     prove stmt to be a dead store.  */
  temp = stmt;
  do
    {
      gimple use_stmt, defvar_def;
      imm_use_iterator ui;
      bool fail = false;
      tree defvar;

      /* Limit stmt walking to be linear in the number of possibly
         dead stores.  */
      if (++cnt > 256)
	return false;

      if (gimple_code (temp) == GIMPLE_PHI)
	defvar = PHI_RESULT (temp);
      else
	defvar = gimple_vdef (temp);
      defvar_def = temp;
      temp = NULL;
      FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
	{
	  cnt++;

	  /* If we ever reach our DSE candidate stmt again fail.  We
	     cannot handle dead stores in loops.  */
	  if (use_stmt == stmt)
	    {
	      fail = true;
	      BREAK_FROM_IMM_USE_STMT (ui);
	    }
	  /* In simple cases we can look through PHI nodes, but we
	     have to be careful with loops and with memory references
	     containing operands that are also operands of PHI nodes.
	     See gcc.c-torture/execute/20051110-*.c.  */
	  else if (gimple_code (use_stmt) == GIMPLE_PHI)
	    {
	      if (temp
		  /* Make sure we are not in a loop latch block.  */
		  || gimple_bb (stmt) == gimple_bb (use_stmt)
		  || dominated_by_p (CDI_DOMINATORS,
				     gimple_bb (stmt), gimple_bb (use_stmt))
		  /* We can look through PHIs to regions post-dominating
		     the DSE candidate stmt.  */
		  || !dominated_by_p (CDI_POST_DOMINATORS,
				      gimple_bb (stmt), gimple_bb (use_stmt)))
		{
		  fail = true;
		  BREAK_FROM_IMM_USE_STMT (ui);
		}
	      /* Do not consider the PHI as use if it dominates the 
	         stmt defining the virtual operand we are processing,
		 we have processed it already in this case.  */
	      if (gimple_bb (defvar_def) != gimple_bb (use_stmt)
		  && !dominated_by_p (CDI_DOMINATORS,
				      gimple_bb (defvar_def),
				      gimple_bb (use_stmt)))
		temp = use_stmt;
	    }
	  /* If the statement is a use the store is not dead.  */
	  else if (ref_maybe_used_by_stmt_p (use_stmt,
					     gimple_assign_lhs (stmt)))
	    {
	      fail = true;
	      BREAK_FROM_IMM_USE_STMT (ui);
	    }
	  /* If this is a store, remember it or bail out if we have
	     multiple ones (the will be in different CFG parts then).  */
	  else if (gimple_vdef (use_stmt))
	    {
	      if (temp)
		{
		  fail = true;
		  BREAK_FROM_IMM_USE_STMT (ui);
		}
	      temp = use_stmt;
	    }
	}

      if (fail)
	return false;

      /* If we didn't find any definition this means the store is dead
         if it isn't a store to global reachable memory.  In this case
	 just pretend the stmt makes itself dead.  Otherwise fail.  */
      if (!temp)
	{
	  if (stmt_may_clobber_global_p (stmt))
	    return false;

	  temp = stmt;
	  break;
	}
    }
Example #22
0
static enum ssa_prop_result
copy_prop_visit_phi_node (gimple phi)
{
  enum ssa_prop_result retval;
  unsigned i;
  prop_value_t phi_val = { NULL_TREE };

  tree lhs = gimple_phi_result (phi);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "\nVisiting PHI node: ");
      print_gimple_stmt (dump_file, phi, 0, dump_flags);
    }

  for (i = 0; i < gimple_phi_num_args (phi); i++)
    {
      prop_value_t *arg_val;
      tree arg_value;
      tree arg = gimple_phi_arg_def (phi, i);
      edge e = gimple_phi_arg_edge (phi, i);

      /* We don't care about values flowing through non-executable
	 edges.  */
      if (!(e->flags & EDGE_EXECUTABLE))
	continue;

      /* Names that flow through abnormal edges cannot be used to
	 derive copies.  */
      if (TREE_CODE (arg) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (arg))
	{
	  phi_val.value = lhs;
	  break;
	}

      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  fprintf (dump_file, "\tArgument #%d: ", i);
	  dump_copy_of (dump_file, arg);
	  fprintf (dump_file, "\n");
	}

      if (TREE_CODE (arg) == SSA_NAME)
	{
	  arg_val = get_copy_of_val (arg);

	  /* If we didn't visit the definition of arg yet treat it as
	     UNDEFINED.  This also handles PHI arguments that are the
	     same as lhs.  We'll come here again.  */
	  if (!arg_val->value)
	    continue;

	  arg_value = arg_val->value;
	}
      else
	arg_value = valueize_val (arg);

      /* Avoid copy propagation from an inner into an outer loop.
	 Otherwise, this may move loop variant variables outside of
	 their loops and prevent coalescing opportunities.  If the
	 value was loop invariant, it will be hoisted by LICM and
	 exposed for copy propagation.
	 ???  The value will be always loop invariant.
	 In loop-closed SSA form do not copy-propagate through
	 PHI nodes in blocks with a loop exit edge predecessor.  */
      if (current_loops
	  && TREE_CODE (arg_value) == SSA_NAME
	  && (loop_depth_of_name (arg_value) > loop_depth_of_name (lhs)
	      || (loops_state_satisfies_p (LOOP_CLOSED_SSA)
		  && loop_exit_edge_p (e->src->loop_father, e))))
	{
	  phi_val.value = lhs;
	  break;
	}

      /* If the LHS didn't have a value yet, make it a copy of the
	 first argument we find.   */
      if (phi_val.value == NULL_TREE)
	{
	  phi_val.value = arg_value;
	  continue;
	}

      /* If PHI_VAL and ARG don't have a common copy-of chain, then
	 this PHI node cannot be a copy operation.  */
      if (phi_val.value != arg_value
	  && !operand_equal_p (phi_val.value, arg_value, 0))
	{
	  phi_val.value = lhs;
	  break;
	}
    }

  if (phi_val.value
      && may_propagate_copy (lhs, phi_val.value)
      && set_copy_of_val (lhs, phi_val.value))
    retval = (phi_val.value != lhs) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
  else
    retval = SSA_PROP_NOT_INTERESTING;

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "PHI node ");
      dump_copy_of (dump_file, lhs);
      fprintf (dump_file, "\nTelling the propagator to ");
      if (retval == SSA_PROP_INTERESTING)
	fprintf (dump_file, "add SSA edges out of this PHI and continue.");
      else if (retval == SSA_PROP_VARYING)
	fprintf (dump_file, "add SSA edges out of this PHI and never visit again.");
      else
	fprintf (dump_file, "do nothing with SSA edges and keep iterating.");
      fprintf (dump_file, "\n\n");
    }

  return retval;
}
Example #23
0
 static inline bool equal_keys (const_tree ref1, const_tree ref2)
 {
   return operand_equal_p (ref1, ref2, 0);
 }
static bool
ifcombine_iforif (basic_block inner_cond_bb, basic_block outer_cond_bb)
{
  gimple inner_cond, outer_cond;
  tree name1, name2, bits1, bits2;

  inner_cond = last_stmt (inner_cond_bb);
  if (!inner_cond
      || gimple_code (inner_cond) != GIMPLE_COND)
    return false;

  outer_cond = last_stmt (outer_cond_bb);
  if (!outer_cond
      || gimple_code (outer_cond) != GIMPLE_COND)
    return false;

  /* See if we have two bit tests of the same name in both tests.
     In that case remove the outer test and change the inner one to
     test for name & (bits1 | bits2) != 0.  */
  if (recognize_bits_test (inner_cond, &name1, &bits1)
      && recognize_bits_test (outer_cond, &name2, &bits2))
    {
      gimple_stmt_iterator gsi;
      tree t;

      /* Find the common name which is bit-tested.  */
      if (name1 == name2)
	;
      else if (bits1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else if (name1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	}
      else if (bits1 == name2)
	{
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else
	return false;

      /* As we strip non-widening conversions in finding a common
         name that is tested make sure to end up with an integral
	 type for building the bit operations.  */
      if (TYPE_PRECISION (TREE_TYPE (bits1))
	  >= TYPE_PRECISION (TREE_TYPE (bits2)))
	{
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  name1 = fold_convert (TREE_TYPE (bits1), name1);
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  bits2 = fold_convert (TREE_TYPE (bits1), bits2);
	}
      else
	{
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  name1 = fold_convert (TREE_TYPE (bits2), name1);
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  bits1 = fold_convert (TREE_TYPE (bits2), bits1);
	}

      /* Do it.  */
      gsi = gsi_for_stmt (inner_cond);
      t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (NE_EXPR, boolean_type_node, t,
		       build_int_cst (TREE_TYPE (t), 0));
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing bits or bits test to ");
	  print_generic_expr (dump_file, name1, 0);
	  fprintf (dump_file, " & T != 0\nwith temporary T = ");
	  print_generic_expr (dump_file, bits1, 0);
	  fprintf (dump_file, " | ");
	  print_generic_expr (dump_file, bits2, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  /* See if we have two comparisons that we can merge into one.
     This happens for C++ operator overloading where for example
     GE_EXPR is implemented as GT_EXPR || EQ_EXPR.  */
  else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
	   && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison
	   && operand_equal_p (gimple_cond_lhs (inner_cond),
			       gimple_cond_lhs (outer_cond), 0)
	   && operand_equal_p (gimple_cond_rhs (inner_cond),
			       gimple_cond_rhs (outer_cond), 0))
    {
      enum tree_code code1 = gimple_cond_code (inner_cond);
      enum tree_code code2 = gimple_cond_code (outer_cond);
      enum tree_code code;
      tree t;

#define CHK(a,b) ((code1 == a ## _EXPR && code2 == b ## _EXPR) \
		  || (code2 == a ## _EXPR && code1 == b ## _EXPR))
      /* Merge the two condition codes if possible.  */
      if (code1 == code2)
	code = code1;
      else if (CHK (EQ, LT))
	code = LE_EXPR;
      else if (CHK (EQ, GT))
	code = GE_EXPR;
      else if (CHK (LT, LE))
	code = LE_EXPR;
      else if (CHK (GT, GE))
	code = GE_EXPR;
      else if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (inner_cond)))
	       || flag_unsafe_math_optimizations)
	{
	  if (CHK (LT, GT))
	    code = NE_EXPR;
	  else if (CHK (LT, NE))
	    code = NE_EXPR;
	  else if (CHK (GT, NE))
	    code = NE_EXPR;
	  else
	    return false;
	}
      /* We could check for combinations leading to trivial true/false.  */
      else
	return false;
#undef CHK

      /* Do it.  */
      t = fold_build2 (code, boolean_type_node, gimple_cond_lhs (outer_cond),
		       gimple_cond_rhs (outer_cond));
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing two comparisons to ");
	  print_generic_expr (dump_file, t, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  return false;
}
Example #25
0
static void
add_type_duplicate (odr_type val, tree type)
{
  if (!val->types_set)
    val->types_set = pointer_set_create ();

  /* See if this duplicate is new.  */
  if (!pointer_set_insert (val->types_set, type))
    {
      bool merge = true;
      bool base_mismatch = false;
      gcc_assert (in_lto_p);
      vec_safe_push (val->types, type);
      unsigned int i,j;

      /* First we compare memory layout.  */
      if (!types_compatible_p (val->type, type))
	{
	  merge = false;
	  if (BINFO_VTABLE (TYPE_BINFO (val->type))
	      && warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (type)), 0,
			     "type %qD violates one definition rule  ",
			     type))
	    inform (DECL_SOURCE_LOCATION (TYPE_NAME (val->type)),
		    "a type with the same name but different layout is "
		    "defined in another translation unit");
	  if (cgraph_dump_file)
	    {
	      fprintf (cgraph_dump_file, "ODR violation or merging or ODR type bug?\n");
	    
	      print_node (cgraph_dump_file, "", val->type, 0);
	      putc ('\n',cgraph_dump_file);
	      print_node (cgraph_dump_file, "", type, 0);
	      putc ('\n',cgraph_dump_file);
	    }
	}

      /* Next sanity check that bases are the same.  If not, we will end
	 up producing wrong answers.  */
      for (j = 0, i = 0; i < BINFO_N_BASE_BINFOS (TYPE_BINFO (type)); i++)
	if (polymorphic_type_binfo_p (BINFO_BASE_BINFO (TYPE_BINFO (type), i)))
	  {
	    odr_type base = get_odr_type
			       (BINFO_TYPE
				  (BINFO_BASE_BINFO (TYPE_BINFO (type),
						     i)),
				true);
	    if (val->bases.length () <= j || val->bases[j] != base)
	      base_mismatch = true;
	    j++;
	  }
      if (base_mismatch)
	{
	  merge = false;

	  if (warning_at (DECL_SOURCE_LOCATION (TYPE_NAME (type)), 0,
			  "type %qD violates one definition rule  ",
			  type))
	    inform (DECL_SOURCE_LOCATION (TYPE_NAME (val->type)),
		    "a type with the same name but different bases is "
		    "defined in another translation unit");
	  if (cgraph_dump_file)
	    {
	      fprintf (cgraph_dump_file, "ODR bse violation or merging bug?\n");
	    
	      print_node (cgraph_dump_file, "", val->type, 0);
	      putc ('\n',cgraph_dump_file);
	      print_node (cgraph_dump_file, "", type, 0);
	      putc ('\n',cgraph_dump_file);
	    }
	}

      /* Regularize things a little.  During LTO same types may come with
	 different BINFOs.  Either because their virtual table was
	 not merged by tree merging and only later at decl merging or
	 because one type comes with external vtable, while other
	 with internal.  We want to merge equivalent binfos to conserve
	 memory and streaming overhead.

	 The external vtables are more harmful: they contain references
	 to external declarations of methods that may be defined in the
	 merged LTO unit.  For this reason we absolutely need to remove
	 them and replace by internal variants. Not doing so will lead
         to incomplete answers from possible_polymorphic_call_targets.  */
      if (!flag_ltrans && merge)
	{
	  tree master_binfo = TYPE_BINFO (val->type);
	  tree v1 = BINFO_VTABLE (master_binfo);
	  tree v2 = BINFO_VTABLE (TYPE_BINFO (type));

	  if (TREE_CODE (v1) == POINTER_PLUS_EXPR)
	    {
	      gcc_assert (TREE_CODE (v2) == POINTER_PLUS_EXPR
			  && operand_equal_p (TREE_OPERAND (v1, 1),
					      TREE_OPERAND (v2, 1), 0));
	      v1 = TREE_OPERAND (TREE_OPERAND (v1, 0), 0);
	      v2 = TREE_OPERAND (TREE_OPERAND (v2, 0), 0);
	    }
	  gcc_assert (DECL_ASSEMBLER_NAME (v1)
		      == DECL_ASSEMBLER_NAME (v2));

	  if (DECL_EXTERNAL (v1) && !DECL_EXTERNAL (v2))
	    {
	      unsigned int i;

	      TYPE_BINFO (val->type) = TYPE_BINFO (type);
	      for (i = 0; i < val->types->length (); i++)
		{
		  if (TYPE_BINFO ((*val->types)[i])
		      == master_binfo)
		    TYPE_BINFO ((*val->types)[i]) = TYPE_BINFO (type);
		}
	    }
	  else
	    TYPE_BINFO (type) = master_binfo;
	}
    }
}
Example #26
0
static bool
forward_propagate_addr_expr_1 (tree name, tree def_rhs,
			       gimple_stmt_iterator *use_stmt_gsi,
			       bool single_use_p)
{
  tree lhs, rhs, rhs2, array_ref;
  tree *rhsp, *lhsp;
  gimple use_stmt = gsi_stmt (*use_stmt_gsi);
  enum tree_code rhs_code;

  gcc_assert (TREE_CODE (def_rhs) == ADDR_EXPR);

  lhs = gimple_assign_lhs (use_stmt);
  rhs_code = gimple_assign_rhs_code (use_stmt);
  rhs = gimple_assign_rhs1 (use_stmt);

  /* Trivial cases.  The use statement could be a trivial copy or a
     useless conversion.  Recurse to the uses of the lhs as copyprop does
     not copy through different variant pointers and FRE does not catch
     all useless conversions.  Treat the case of a single-use name and
     a conversion to def_rhs type separate, though.  */
  if (TREE_CODE (lhs) == SSA_NAME
      && ((rhs_code == SSA_NAME && rhs == name)
	  || CONVERT_EXPR_CODE_P (rhs_code)))
    {
      /* Only recurse if we don't deal with a single use or we cannot
	 do the propagation to the current statement.  In particular
	 we can end up with a conversion needed for a non-invariant
	 address which we cannot do in a single statement.  */
      if (!single_use_p
	  || (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs))
	      && !is_gimple_min_invariant (def_rhs)))
	return forward_propagate_addr_expr (lhs, def_rhs);

      gimple_assign_set_rhs1 (use_stmt, unshare_expr (def_rhs));
      if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)))
	gimple_assign_set_rhs_code (use_stmt, TREE_CODE (def_rhs));
      else
	gimple_assign_set_rhs_code (use_stmt, NOP_EXPR);
      return true;
    }

  /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS. 
     ADDR_EXPR will not appear on the LHS.  */
  lhsp = gimple_assign_lhs_ptr (use_stmt);
  while (handled_component_p (*lhsp))
    lhsp = &TREE_OPERAND (*lhsp, 0);
  lhs = *lhsp;

  /* Now see if the LHS node is an INDIRECT_REF using NAME.  If so, 
     propagate the ADDR_EXPR into the use of NAME and fold the result.  */
  if (TREE_CODE (lhs) == INDIRECT_REF
      && TREE_OPERAND (lhs, 0) == name
      && may_propagate_address_into_dereference (def_rhs, lhs)
      && (lhsp != gimple_assign_lhs_ptr (use_stmt)
	  || useless_type_conversion_p (TREE_TYPE (TREE_OPERAND (def_rhs, 0)),
					TREE_TYPE (rhs))))
    {
      *lhsp = unshare_expr (TREE_OPERAND (def_rhs, 0));
      fold_stmt_inplace (use_stmt);
      tidy_after_forward_propagate_addr (use_stmt);

      /* Continue propagating into the RHS if this was not the only use.  */
      if (single_use_p)
	return true;
    }

  /* Strip away any outer COMPONENT_REF, ARRAY_REF or ADDR_EXPR
     nodes from the RHS.  */
  rhsp = gimple_assign_rhs1_ptr (use_stmt);
  while (handled_component_p (*rhsp)
	 || TREE_CODE (*rhsp) == ADDR_EXPR)
    rhsp = &TREE_OPERAND (*rhsp, 0);
  rhs = *rhsp;

  /* Now see if the RHS node is an INDIRECT_REF using NAME.  If so,
     propagate the ADDR_EXPR into the use of NAME and fold the result.  */
  if (TREE_CODE (rhs) == INDIRECT_REF
      && TREE_OPERAND (rhs, 0) == name
      && may_propagate_address_into_dereference (def_rhs, rhs))
    {
      *rhsp = unshare_expr (TREE_OPERAND (def_rhs, 0));
      fold_stmt_inplace (use_stmt);
      tidy_after_forward_propagate_addr (use_stmt);
      return true;
    }

  /* Now see if the RHS node is an INDIRECT_REF using NAME.  If so, 
     propagate the ADDR_EXPR into the use of NAME and try to
     create a VCE and fold the result.  */
  if (TREE_CODE (rhs) == INDIRECT_REF
      && TREE_OPERAND (rhs, 0) == name
      && TYPE_SIZE (TREE_TYPE (rhs))
      && TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0)))
      /* Function decls should not be used for VCE either as it could be a
         function descriptor that we want and not the actual function code.  */
      && TREE_CODE (TREE_OPERAND (def_rhs, 0)) != FUNCTION_DECL
      /* We should not convert volatile loads to non volatile loads. */
      && !TYPE_VOLATILE (TREE_TYPE (rhs))
      && !TYPE_VOLATILE (TREE_TYPE (TREE_OPERAND (def_rhs, 0)))
      && operand_equal_p (TYPE_SIZE (TREE_TYPE (rhs)),
			  TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))), 0)) 
   {
     tree def_rhs_base, new_rhs = unshare_expr (TREE_OPERAND (def_rhs, 0));
     new_rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (rhs), new_rhs);
     if (TREE_CODE (new_rhs) != VIEW_CONVERT_EXPR)
       {
	 /* If we have folded the VIEW_CONVERT_EXPR then the result is only
	    valid if we can replace the whole rhs of the use statement.  */
	 if (rhs != gimple_assign_rhs1 (use_stmt))
	   return false;
	 new_rhs = force_gimple_operand_gsi (use_stmt_gsi, new_rhs, true, NULL,
					     true, GSI_NEW_STMT);
	 gimple_assign_set_rhs1 (use_stmt, new_rhs);
	 tidy_after_forward_propagate_addr (use_stmt);
	 return true;
       }
     /* If the defining rhs comes from an indirect reference, then do not
        convert into a VIEW_CONVERT_EXPR.  */
     def_rhs_base = TREE_OPERAND (def_rhs, 0);
     while (handled_component_p (def_rhs_base))
       def_rhs_base = TREE_OPERAND (def_rhs_base, 0);
     if (!INDIRECT_REF_P (def_rhs_base))
       {
	 /* We may have arbitrary VIEW_CONVERT_EXPRs in a nested component
	    reference.  Place it there and fold the thing.  */
	 *rhsp = new_rhs;
	 fold_stmt_inplace (use_stmt);
	 tidy_after_forward_propagate_addr (use_stmt);
	 return true;
       }
   }

  /* If the use of the ADDR_EXPR is not a POINTER_PLUS_EXPR, there
     is nothing to do. */
  if (gimple_assign_rhs_code (use_stmt) != POINTER_PLUS_EXPR
      || gimple_assign_rhs1 (use_stmt) != name)
    return false;

  /* The remaining cases are all for turning pointer arithmetic into
     array indexing.  They only apply when we have the address of
     element zero in an array.  If that is not the case then there
     is nothing to do.  */
  array_ref = TREE_OPERAND (def_rhs, 0);
  if (TREE_CODE (array_ref) != ARRAY_REF
      || TREE_CODE (TREE_TYPE (TREE_OPERAND (array_ref, 0))) != ARRAY_TYPE
      || !integer_zerop (TREE_OPERAND (array_ref, 1)))
    return false;

  rhs2 = gimple_assign_rhs2 (use_stmt);
  /* Try to optimize &x[0] p+ C where C is a multiple of the size
     of the elements in X into &x[C/element size].  */
  if (TREE_CODE (rhs2) == INTEGER_CST)
    {
      tree new_rhs = maybe_fold_stmt_addition (gimple_expr_type (use_stmt),
					       array_ref, rhs2);
      if (new_rhs)
	{
	  gimple_assign_set_rhs_from_tree (use_stmt_gsi, new_rhs);
	  use_stmt = gsi_stmt (*use_stmt_gsi);
	  update_stmt (use_stmt);
	  tidy_after_forward_propagate_addr (use_stmt);
	  return true;
	}
    }

  /* Try to optimize &x[0] p+ OFFSET where OFFSET is defined by
     converting a multiplication of an index by the size of the
     array elements, then the result is converted into the proper
     type for the arithmetic.  */
  if (TREE_CODE (rhs2) == SSA_NAME
      /* Avoid problems with IVopts creating PLUS_EXPRs with a
	 different type than their operands.  */
      && useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)))
    return forward_propagate_addr_into_variable_array_index (rhs2, def_rhs,
							     use_stmt_gsi);
  return false;
}
Example #27
0
File: c-omp.c Project: ymgcmstk/gcc
tree
c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
		  tree orig_declv, tree initv, tree condv, tree incrv,
		  tree body, tree pre_body)
{
  location_t elocus;
  bool fail = false;
  int i;

  if ((code == CILK_SIMD || code == CILK_FOR)
      && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
    fail = true;

  gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
  gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
  gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
  for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
    {
      tree decl = TREE_VEC_ELT (declv, i);
      tree init = TREE_VEC_ELT (initv, i);
      tree cond = TREE_VEC_ELT (condv, i);
      tree incr = TREE_VEC_ELT (incrv, i);

      elocus = locus;
      if (EXPR_HAS_LOCATION (init))
	elocus = EXPR_LOCATION (init);

      /* Validate the iteration variable.  */
      if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
	  && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
	{
	  error_at (elocus, "invalid type for iteration variable %qE", decl);
	  fail = true;
	}

      /* In the case of "for (int i = 0...)", init will be a decl.  It should
	 have a DECL_INITIAL that we can turn into an assignment.  */
      if (init == decl)
	{
	  elocus = DECL_SOURCE_LOCATION (decl);

	  init = DECL_INITIAL (decl);
	  if (init == NULL)
	    {
	      error_at (elocus, "%qE is not initialized", decl);
	      init = integer_zero_node;
	      fail = true;
	    }
	  DECL_INITIAL (decl) = NULL_TREE;

	  init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
	      			    /* FIXME diagnostics: This should
				       be the location of the INIT.  */
	      			    elocus,
				    init,
				    NULL_TREE);
	}
      if (init != error_mark_node)
	{
	  gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
	  gcc_assert (TREE_OPERAND (init, 0) == decl);
	}

      if (cond == NULL_TREE)
	{
	  error_at (elocus, "missing controlling predicate");
	  fail = true;
	}
      else
	{
	  bool cond_ok = false;

	  if (EXPR_HAS_LOCATION (cond))
	    elocus = EXPR_LOCATION (cond);

	  if (TREE_CODE (cond) == LT_EXPR
	      || TREE_CODE (cond) == LE_EXPR
	      || TREE_CODE (cond) == GT_EXPR
	      || TREE_CODE (cond) == GE_EXPR
	      || TREE_CODE (cond) == NE_EXPR
	      || TREE_CODE (cond) == EQ_EXPR)
	    {
	      tree op0 = TREE_OPERAND (cond, 0);
	      tree op1 = TREE_OPERAND (cond, 1);

	      /* 2.5.1.  The comparison in the condition is computed in
		 the type of DECL, otherwise the behavior is undefined.

		 For example:
		 long n; int i;
		 i < n;

		 according to ISO will be evaluated as:
		 (long)i < n;

		 We want to force:
		 i < (int)n;  */
	      if (TREE_CODE (op0) == NOP_EXPR
		  && decl == TREE_OPERAND (op0, 0))
		{
		  TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
		  TREE_OPERAND (cond, 1)
		    = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
				   TREE_OPERAND (cond, 1));
		}
	      else if (TREE_CODE (op1) == NOP_EXPR
		       && decl == TREE_OPERAND (op1, 0))
		{
		  TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
		  TREE_OPERAND (cond, 0)
		    = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
				   TREE_OPERAND (cond, 0));
		}

	      if (decl == TREE_OPERAND (cond, 0))
		cond_ok = true;
	      else if (decl == TREE_OPERAND (cond, 1))
		{
		  TREE_SET_CODE (cond,
				 swap_tree_comparison (TREE_CODE (cond)));
		  TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
		  TREE_OPERAND (cond, 0) = decl;
		  cond_ok = true;
		}

	      if (TREE_CODE (cond) == NE_EXPR
		  || TREE_CODE (cond) == EQ_EXPR)
		{
		  if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
		    {
		      if (code != CILK_SIMD && code != CILK_FOR)
			cond_ok = false;
		    }
		  else if (operand_equal_p (TREE_OPERAND (cond, 1),
					    TYPE_MIN_VALUE (TREE_TYPE (decl)),
					    0))
		    TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
					 ? GT_EXPR : LE_EXPR);
		  else if (operand_equal_p (TREE_OPERAND (cond, 1),
					    TYPE_MAX_VALUE (TREE_TYPE (decl)),
					    0))
		    TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
					 ? LT_EXPR : GE_EXPR);
		  else if (code != CILK_SIMD && code != CILK_FOR)
		    cond_ok = false;
		}
	    }

	  if (!cond_ok)
	    {
	      error_at (elocus, "invalid controlling predicate");
	      fail = true;
	    }
	}

      if (incr == NULL_TREE)
	{
	  error_at (elocus, "missing increment expression");
	  fail = true;
	}
      else
	{
	  bool incr_ok = false;

	  if (EXPR_HAS_LOCATION (incr))
	    elocus = EXPR_LOCATION (incr);

	  /* Check all the valid increment expressions: v++, v--, ++v, --v,
	     v = v + incr, v = incr + v and v = v - incr.  */
	  switch (TREE_CODE (incr))
	    {
	    case POSTINCREMENT_EXPR:
	    case PREINCREMENT_EXPR:
	    case POSTDECREMENT_EXPR:
	    case PREDECREMENT_EXPR:
	      if (TREE_OPERAND (incr, 0) != decl)
		break;

	      incr_ok = true;
	      incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
	      break;

	    case COMPOUND_EXPR:
	      if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
		  || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
		break;
	      incr = TREE_OPERAND (incr, 1);
	      /* FALLTHRU */
	    case MODIFY_EXPR:
	      if (TREE_OPERAND (incr, 0) != decl)
		break;
	      if (TREE_OPERAND (incr, 1) == decl)
		break;
	      if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
		  && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
		      || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
		incr_ok = true;
	      else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
			|| (TREE_CODE (TREE_OPERAND (incr, 1))
			    == POINTER_PLUS_EXPR))
		       && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
		incr_ok = true;
	      else
		{
		  tree t = check_omp_for_incr_expr (elocus,
						    TREE_OPERAND (incr, 1),
						    decl);
		  if (t != error_mark_node)
		    {
		      incr_ok = true;
		      t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
		      incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
		    }
		}
	      break;

	    default:
	      break;
	    }
	  if (!incr_ok)
	    {
	      error_at (elocus, "invalid increment expression");
	      fail = true;
	    }
	}

      TREE_VEC_ELT (initv, i) = init;
      TREE_VEC_ELT (incrv, i) = incr;
    }

  if (fail)
    return NULL;
  else
    {
      tree t = make_node (code);

      TREE_TYPE (t) = void_type_node;
      OMP_FOR_INIT (t) = initv;
      OMP_FOR_COND (t) = condv;
      OMP_FOR_INCR (t) = incrv;
      OMP_FOR_BODY (t) = body;
      OMP_FOR_PRE_BODY (t) = pre_body;
      if (code == OMP_FOR)
	OMP_FOR_ORIG_DECLS (t) = orig_declv;

      SET_EXPR_LOCATION (t, locus);
      return add_stmt (t);
    }
}
void
aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
{
  unsigned i;
  tree type;

  scale = double_int_ext_for_comb (scale, comb);
  if (scale.is_zero ())
    return;

  for (i = 0; i < comb->n; i++)
    if (operand_equal_p (comb->elts[i].val, elt, 0))
      {
	double_int new_coef;

	new_coef = comb->elts[i].coef + scale;
	new_coef = double_int_ext_for_comb (new_coef, comb);
	if (!new_coef.is_zero ())
	  {
	    comb->elts[i].coef = new_coef;
	    return;
	  }

	comb->n--;
	comb->elts[i] = comb->elts[comb->n];

	if (comb->rest)
	  {
	    gcc_assert (comb->n == MAX_AFF_ELTS - 1);
	    comb->elts[comb->n].coef = double_int_one;
	    comb->elts[comb->n].val = comb->rest;
	    comb->rest = NULL_TREE;
	    comb->n++;
	  }
	return;
      }
  if (comb->n < MAX_AFF_ELTS)
    {
      comb->elts[comb->n].coef = scale;
      comb->elts[comb->n].val = elt;
      comb->n++;
      return;
    }

  type = comb->type;
  if (POINTER_TYPE_P (type))
    type = sizetype;

  if (scale.is_one ())
    elt = fold_convert (type, elt);
  else
    elt = fold_build2 (MULT_EXPR, type,
		       fold_convert (type, elt),
		       double_int_to_tree (type, scale));

  if (comb->rest)
    comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
			      elt);
  else
    comb->rest = elt;
}
Example #29
0
void
tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
{
  aff_tree tmp;
  enum tree_code code;
  tree cst, core, toffset;
  poly_int64 bitpos, bitsize, bytepos;
  machine_mode mode;
  int unsignedp, reversep, volatilep;

  STRIP_NOPS (expr);

  code = TREE_CODE (expr);
  switch (code)
    {
    case POINTER_PLUS_EXPR:
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
      aff_combination_add (comb, &tmp);
      return;

    case PLUS_EXPR:
    case MINUS_EXPR:
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
      if (code == MINUS_EXPR)
	aff_combination_scale (&tmp, -1);
      aff_combination_add (comb, &tmp);
      return;

    case MULT_EXPR:
      cst = TREE_OPERAND (expr, 1);
      if (TREE_CODE (cst) != INTEGER_CST)
	break;
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      aff_combination_scale (comb, wi::to_widest (cst));
      return;

    case NEGATE_EXPR:
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      aff_combination_scale (comb, -1);
      return;

    case BIT_NOT_EXPR:
      /* ~x = -x - 1 */
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      aff_combination_scale (comb, -1);
      aff_combination_add_cst (comb, -1);
      return;

    case ADDR_EXPR:
      /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR.  */
      if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
	{
	  expr = TREE_OPERAND (expr, 0);
	  tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
	  tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
	  aff_combination_add (comb, &tmp);
	  return;
	}
      core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
				  &toffset, &mode, &unsignedp, &reversep,
				  &volatilep);
      if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
	break;
      aff_combination_const (comb, type, bytepos);
      if (TREE_CODE (core) == MEM_REF)
	{
	  tree mem_offset = TREE_OPERAND (core, 1);
	  aff_combination_add_cst (comb, wi::to_poly_widest (mem_offset));
	  core = TREE_OPERAND (core, 0);
	}
      else
	core = build_fold_addr_expr (core);

      if (TREE_CODE (core) == ADDR_EXPR)
	aff_combination_add_elt (comb, core, 1);
      else
	{
	  tree_to_aff_combination (core, type, &tmp);
	  aff_combination_add (comb, &tmp);
	}
      if (toffset)
	{
	  tree_to_aff_combination (toffset, type, &tmp);
	  aff_combination_add (comb, &tmp);
	}
      return;

    case MEM_REF:
      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
	tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
				 type, comb);
      else if (integer_zerop (TREE_OPERAND (expr, 1)))
	{
	  aff_combination_elt (comb, type, expr);
	  return;
	}
      else
	aff_combination_elt (comb, type,
			     build2 (MEM_REF, TREE_TYPE (expr),
				     TREE_OPERAND (expr, 0),
				     build_int_cst
				      (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
      tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
      aff_combination_add (comb, &tmp);
      return;

    CASE_CONVERT:
      {
	tree otype = TREE_TYPE (expr);
	tree inner = TREE_OPERAND (expr, 0);
	tree itype = TREE_TYPE (inner);
	enum tree_code icode = TREE_CODE (inner);

	/* In principle this is a valid folding, but it isn't necessarily
	   an optimization, so do it here and not in fold_unary.  */
	if ((icode == PLUS_EXPR || icode == MINUS_EXPR || icode == MULT_EXPR)
	    && TREE_CODE (itype) == INTEGER_TYPE
	    && TREE_CODE (otype) == INTEGER_TYPE
	    && TYPE_PRECISION (otype) > TYPE_PRECISION (itype))
	  {
	    tree op0 = TREE_OPERAND (inner, 0), op1 = TREE_OPERAND (inner, 1);

	    /* If inner type has undefined overflow behavior, fold conversion
	       for below two cases:
		 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
		 (T1)(X + X)     -> (T1)X + (T1)X.  */
	    if (TYPE_OVERFLOW_UNDEFINED (itype)
		&& (TREE_CODE (op1) == INTEGER_CST
		    || (icode == PLUS_EXPR && operand_equal_p (op0, op1, 0))))
	      {
		op0 = fold_convert (otype, op0);
		op1 = fold_convert (otype, op1);
		expr = fold_build2 (icode, otype, op0, op1);
		tree_to_aff_combination (expr, type, comb);
		return;
	      }
	    wide_int minv, maxv;
	    /* If inner type has wrapping overflow behavior, fold conversion
	       for below case:
		 (T1)(X - CST) -> (T1)X - (T1)CST
	       if X - CST doesn't overflow by range information.  Also handle
	       (T1)(X + CST) as (T1)(X - (-CST)).  */
	    if (TYPE_UNSIGNED (itype)
		&& TYPE_OVERFLOW_WRAPS (itype)
		&& TREE_CODE (op0) == SSA_NAME
		&& TREE_CODE (op1) == INTEGER_CST
		&& icode != MULT_EXPR
		&& get_range_info (op0, &minv, &maxv) == VR_RANGE)
	      {
		if (icode == PLUS_EXPR)
		  op1 = wide_int_to_tree (itype, -wi::to_wide (op1));
		if (wi::geu_p (minv, wi::to_wide (op1)))
		  {
		    op0 = fold_convert (otype, op0);
		    op1 = fold_convert (otype, op1);
		    expr = fold_build2 (MINUS_EXPR, otype, op0, op1);
		    tree_to_aff_combination (expr, type, comb);
		    return;
		  }
	      }
	  }
      }
      break;

    default:
      {
	if (poly_int_tree_p (expr))
	  {
	    aff_combination_const (comb, type, wi::to_poly_widest (expr));
	    return;
	  }
	break;
      }
    }

  aff_combination_elt (comb, type, expr);
}
Example #30
0
static bool
hashable_expr_equal_p (const struct hashable_expr *expr0,
		       const struct hashable_expr *expr1)
{
  tree type0 = expr0->type;
  tree type1 = expr1->type;

  /* If either type is NULL, there is nothing to check.  */
  if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
    return false;

  /* If both types don't have the same signedness, precision, and mode,
     then we can't consider  them equal.  */
  if (type0 != type1
      && (TREE_CODE (type0) == ERROR_MARK
	  || TREE_CODE (type1) == ERROR_MARK
	  || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
	  || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
	  || TYPE_MODE (type0) != TYPE_MODE (type1)))
    return false;

  if (expr0->kind != expr1->kind)
    return false;

  switch (expr0->kind)
    {
    case EXPR_SINGLE:
      return operand_equal_p (expr0->ops.single.rhs,
                              expr1->ops.single.rhs, 0);

    case EXPR_UNARY:
      if (expr0->ops.unary.op != expr1->ops.unary.op)
        return false;

      if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
           || expr0->ops.unary.op == NON_LVALUE_EXPR)
          && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
        return false;

      return operand_equal_p (expr0->ops.unary.opnd,
                              expr1->ops.unary.opnd, 0);

    case EXPR_BINARY:
      if (expr0->ops.binary.op != expr1->ops.binary.op)
	return false;

      if (operand_equal_p (expr0->ops.binary.opnd0,
			   expr1->ops.binary.opnd0, 0)
	  && operand_equal_p (expr0->ops.binary.opnd1,
			      expr1->ops.binary.opnd1, 0))
	return true;

      /* For commutative ops, allow the other order.  */
      return (commutative_tree_code (expr0->ops.binary.op)
	      && operand_equal_p (expr0->ops.binary.opnd0,
				  expr1->ops.binary.opnd1, 0)
	      && operand_equal_p (expr0->ops.binary.opnd1,
				  expr1->ops.binary.opnd0, 0));

    case EXPR_TERNARY:
      if (expr0->ops.ternary.op != expr1->ops.ternary.op
	  || !operand_equal_p (expr0->ops.ternary.opnd2,
			       expr1->ops.ternary.opnd2, 0))
	return false;

      if (operand_equal_p (expr0->ops.ternary.opnd0,
			   expr1->ops.ternary.opnd0, 0)
	  && operand_equal_p (expr0->ops.ternary.opnd1,
			      expr1->ops.ternary.opnd1, 0))
	return true;

      /* For commutative ops, allow the other order.  */
      return (commutative_ternary_tree_code (expr0->ops.ternary.op)
	      && operand_equal_p (expr0->ops.ternary.opnd0,
				  expr1->ops.ternary.opnd1, 0)
	      && operand_equal_p (expr0->ops.ternary.opnd1,
				  expr1->ops.ternary.opnd0, 0));

    case EXPR_CALL:
      {
        size_t i;

        /* If the calls are to different functions, then they
           clearly cannot be equal.  */
        if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
                                        expr1->ops.call.fn_from))
          return false;

        if (! expr0->ops.call.pure)
          return false;

        if (expr0->ops.call.nargs !=  expr1->ops.call.nargs)
          return false;

        for (i = 0; i < expr0->ops.call.nargs; i++)
          if (! operand_equal_p (expr0->ops.call.args[i],
                                 expr1->ops.call.args[i], 0))
            return false;

	if (stmt_could_throw_p (expr0->ops.call.fn_from))
	  {
	    int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
	    int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
	    if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
	      return false;
	  }

        return true;
      }

    case EXPR_PHI:
      {
        size_t i;

        if (expr0->ops.phi.nargs !=  expr1->ops.phi.nargs)
          return false;

        for (i = 0; i < expr0->ops.phi.nargs; i++)
          if (! operand_equal_p (expr0->ops.phi.args[i],
                                 expr1->ops.phi.args[i], 0))
            return false;

        return true;
      }

    default:
      gcc_unreachable ();
    }
}