Ejemplo n.º 1
0
static void
prop_phis (basic_block b)
{
  gimple_stmt_iterator psi;
  gimple_seq phis = phi_nodes (b);

  for (psi = gsi_start (phis); !gsi_end_p (psi); )
    {
      gimple phi = gsi_stmt (psi);
      tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);

      gcc_assert (gimple_phi_num_args (phi) == 1);

      if (!is_gimple_reg (def))
	{
	  imm_use_iterator iter;
	  use_operand_p use_p;
	  gimple stmt;

	  FOR_EACH_IMM_USE_STMT (stmt, iter, def)
	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
	      SET_USE (use_p, use);
	}
      else
	replace_uses_by (def, use);

      remove_phi_node (&psi, true);
    }
}
Ejemplo n.º 2
0
edge
ssa_redirect_edge (edge e, basic_block dest)
{
  gphi_iterator gsi;
  gphi *phi;

  redirect_edge_var_map_clear (e);

  /* Remove the appropriate PHI arguments in E's destination block.  */
  for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
    {
      tree def;
      source_location locus ;

      phi = gsi.phi ();
      def = gimple_phi_arg_def (phi, e->dest_idx);
      locus = gimple_phi_arg_location (phi, e->dest_idx);

      if (def == NULL_TREE)
	continue;

      redirect_edge_var_map_add (e, gimple_phi_result (phi), def, locus);
    }

  e = redirect_edge_succ_nodup (e, dest);

  return e;
}
Ejemplo n.º 3
0
tree
degenerate_phi_result (gimple phi)
{
  tree lhs = gimple_phi_result (phi);
  tree val = NULL;
  size_t i;

  /* Ignoring arguments which are the same as LHS, if all the remaining
     arguments are the same, then the PHI is a degenerate and has the
     value of that common argument.  */
  for (i = 0; i < gimple_phi_num_args (phi); i++)
    {
      tree arg = gimple_phi_arg_def (phi, i);

      if (arg == lhs)
	continue;
      else if (!arg)
	break;
      else if (!val)
	val = arg;
      else if (arg == val)
	continue;
      /* We bring in some of operand_equal_p not only to speed things
	 up, but also to avoid crashing when dereferencing the type of
	 a released SSA name.  */
      else if (TREE_CODE (val) != TREE_CODE (arg)
	       || TREE_CODE (val) == SSA_NAME
	       || !operand_equal_p (arg, val, 0))
	break;
    }
  return (i == gimple_phi_num_args (phi) ? val : NULL);
}
Ejemplo n.º 4
0
static void
output_phi (struct output_block *ob, gimple phi)
{
  unsigned i, len = gimple_phi_num_args (phi);

  streamer_write_record_start (ob, lto_gimple_code_to_tag (GIMPLE_PHI));
  streamer_write_uhwi (ob, SSA_NAME_VERSION (PHI_RESULT (phi)));

  for (i = 0; i < len; i++)
    {
      stream_write_tree (ob, gimple_phi_arg_def (phi, i), true);
      streamer_write_uhwi (ob, gimple_phi_arg_edge (phi, i)->src->index);
      lto_output_location (ob, gimple_phi_arg_location (phi, i));
    }
}
Ejemplo n.º 5
0
static gphi *
single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
{
  gimple_stmt_iterator i;
  gphi *phi = NULL;
  if (gimple_seq_singleton_p (seq))
    return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
  for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
    {
      gphi *p = as_a <gphi *> (gsi_stmt (i));
      /* If the PHI arguments are equal then we can skip this PHI. */
      if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
				       gimple_phi_arg_def (p, e1->dest_idx)))
	continue;

      /* If we already have a PHI that has the two edge arguments are
	 different, then return it is not a singleton for these PHIs. */
      if (phi)
	return NULL;

      phi = p;
    }
  return phi;
}
static bool
check_final_bb (void)
{
  gimple_stmt_iterator gsi;

  info.phi_count = 0;
  for (gsi = gsi_start_phis (info.final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
    {
      gimple phi = gsi_stmt (gsi);
      unsigned int i;

      info.phi_count++;

      for (i = 0; i < gimple_phi_num_args (phi); i++)
	{
	  basic_block bb = gimple_phi_arg_edge (phi, i)->src;

	  if (bb == info.switch_bb
	      || (single_pred_p (bb) && single_pred (bb) == info.switch_bb))
	    {
	      tree reloc, val;

	      val = gimple_phi_arg_def (phi, i);
	      if (!is_gimple_ip_invariant (val))
		{
		  info.reason = "   Non-invariant value from a case\n";
		  return false; /* Non-invariant argument.  */
		}
	      reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
	      if ((flag_pic && reloc != null_pointer_node)
		  || (!flag_pic && reloc == NULL_TREE))
		{
		  if (reloc)
		    info.reason
		      = "   Value from a case would need runtime relocations\n";
		  else
		    info.reason
		      = "   Value from a case is not a valid initializer\n";
		  return false;
		}
	    }
	}
    }

  return true;
}
Ejemplo n.º 7
0
DEBUG_FUNCTION void
verify_ssaname_freelists (struct function *fun)
{
  if (!gimple_in_ssa_p (fun))
    return;

  bitmap names_in_il = BITMAP_ALLOC (NULL);

  /* Walk the entire IL noting every SSA_NAME we see.  */
  basic_block bb;
  FOR_EACH_BB_FN (bb, fun)
    {
      tree t;
      /* First note the result and arguments of PHI nodes.  */
      for (gphi_iterator gsi = gsi_start_phis (bb);
	   !gsi_end_p (gsi);
	   gsi_next (&gsi))
	{
	  gphi *phi = gsi.phi ();
	  t = gimple_phi_result (phi);
	  bitmap_set_bit (names_in_il, SSA_NAME_VERSION (t));

	  for (unsigned int i = 0; i < gimple_phi_num_args (phi); i++)
	    {
	      t = gimple_phi_arg_def (phi, i);
	      if (TREE_CODE (t) == SSA_NAME)
		bitmap_set_bit (names_in_il, SSA_NAME_VERSION (t));
	    }
	}

      /* Then note the operands of each statement.  */
      for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
	   !gsi_end_p (gsi);
	   gsi_next (&gsi))
	{
	  ssa_op_iter iter;
	  gimple *stmt = gsi_stmt (gsi);
	  FOR_EACH_SSA_TREE_OPERAND (t, stmt, iter, SSA_OP_ALL_OPERANDS)
	    bitmap_set_bit (names_in_il, SSA_NAME_VERSION (t));
	}
    }
Ejemplo n.º 8
0
static enum ssa_prop_result
copy_prop_visit_phi_node (gimple phi)
{
  enum ssa_prop_result retval;
  unsigned i;
  prop_value_t phi_val = { NULL_TREE };

  tree lhs = gimple_phi_result (phi);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "\nVisiting PHI node: ");
      print_gimple_stmt (dump_file, phi, 0, dump_flags);
    }

  for (i = 0; i < gimple_phi_num_args (phi); i++)
    {
      prop_value_t *arg_val;
      tree arg_value;
      tree arg = gimple_phi_arg_def (phi, i);
      edge e = gimple_phi_arg_edge (phi, i);

      /* We don't care about values flowing through non-executable
	 edges.  */
      if (!(e->flags & EDGE_EXECUTABLE))
	continue;

      /* Names that flow through abnormal edges cannot be used to
	 derive copies.  */
      if (TREE_CODE (arg) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (arg))
	{
	  phi_val.value = lhs;
	  break;
	}

      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  fprintf (dump_file, "\tArgument #%d: ", i);
	  dump_copy_of (dump_file, arg);
	  fprintf (dump_file, "\n");
	}

      if (TREE_CODE (arg) == SSA_NAME)
	{
	  arg_val = get_copy_of_val (arg);

	  /* If we didn't visit the definition of arg yet treat it as
	     UNDEFINED.  This also handles PHI arguments that are the
	     same as lhs.  We'll come here again.  */
	  if (!arg_val->value)
	    continue;

	  arg_value = arg_val->value;
	}
      else
	arg_value = valueize_val (arg);

      /* Avoid copy propagation from an inner into an outer loop.
	 Otherwise, this may move loop variant variables outside of
	 their loops and prevent coalescing opportunities.  If the
	 value was loop invariant, it will be hoisted by LICM and
	 exposed for copy propagation.
	 ???  The value will be always loop invariant.
	 In loop-closed SSA form do not copy-propagate through
	 PHI nodes in blocks with a loop exit edge predecessor.  */
      if (current_loops
	  && TREE_CODE (arg_value) == SSA_NAME
	  && (loop_depth_of_name (arg_value) > loop_depth_of_name (lhs)
	      || (loops_state_satisfies_p (LOOP_CLOSED_SSA)
		  && loop_exit_edge_p (e->src->loop_father, e))))
	{
	  phi_val.value = lhs;
	  break;
	}

      /* If the LHS didn't have a value yet, make it a copy of the
	 first argument we find.   */
      if (phi_val.value == NULL_TREE)
	{
	  phi_val.value = arg_value;
	  continue;
	}

      /* If PHI_VAL and ARG don't have a common copy-of chain, then
	 this PHI node cannot be a copy operation.  */
      if (phi_val.value != arg_value
	  && !operand_equal_p (phi_val.value, arg_value, 0))
	{
	  phi_val.value = lhs;
	  break;
	}
    }

  if (phi_val.value
      && may_propagate_copy (lhs, phi_val.value)
      && set_copy_of_val (lhs, phi_val.value))
    retval = (phi_val.value != lhs) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
  else
    retval = SSA_PROP_NOT_INTERESTING;

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "PHI node ");
      dump_copy_of (dump_file, lhs);
      fprintf (dump_file, "\nTelling the propagator to ");
      if (retval == SSA_PROP_INTERESTING)
	fprintf (dump_file, "add SSA edges out of this PHI and continue.");
      else if (retval == SSA_PROP_VARYING)
	fprintf (dump_file, "add SSA edges out of this PHI and never visit again.");
      else
	fprintf (dump_file, "do nothing with SSA edges and keep iterating.");
      fprintf (dump_file, "\n\n");
    }

  return retval;
}
Ejemplo n.º 9
0
static enum ssa_prop_result
copy_prop_visit_phi_node (gimple phi)
{
  enum ssa_prop_result retval;
  unsigned i;
  prop_value_t phi_val = { 0, NULL_TREE };

  tree lhs = gimple_phi_result (phi);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "\nVisiting PHI node: ");
      print_gimple_stmt (dump_file, phi, 0, dump_flags);
      fprintf (dump_file, "\n\n");
    }

  for (i = 0; i < gimple_phi_num_args (phi); i++)
    {
      prop_value_t *arg_val;
      tree arg = gimple_phi_arg_def (phi, i);
      edge e = gimple_phi_arg_edge (phi, i);

      /* We don't care about values flowing through non-executable
	 edges.  */
      if (!(e->flags & EDGE_EXECUTABLE))
	continue;

      /* Constants in the argument list never generate a useful copy.
	 Similarly, names that flow through abnormal edges cannot be
	 used to derive copies.  */
      if (TREE_CODE (arg) != SSA_NAME || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (arg))
	{
	  phi_val.value = lhs;
	  break;
	}

      /* Avoid copy propagation from an inner into an outer loop.
	 Otherwise, this may move loop variant variables outside of
	 their loops and prevent coalescing opportunities.  If the
	 value was loop invariant, it will be hoisted by LICM and
	 exposed for copy propagation.  Not a problem for virtual
	 operands though.  */
      if (is_gimple_reg (lhs)
	  && loop_depth_of_name (arg) > loop_depth_of_name (lhs))
	{
	  phi_val.value = lhs;
	  break;
	}

      /* If the LHS appears in the argument list, ignore it.  It is
	 irrelevant as a copy.  */
      if (arg == lhs || get_last_copy_of (arg) == lhs)
	continue;

      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  fprintf (dump_file, "\tArgument #%d: ", i);
	  dump_copy_of (dump_file, arg);
	  fprintf (dump_file, "\n");
	}

      arg_val = get_copy_of_val (arg);

      /* If the LHS didn't have a value yet, make it a copy of the
	 first argument we find.  Notice that while we make the LHS be
	 a copy of the argument itself, we take the memory reference
	 from the argument's value so that we can compare it to the
	 memory reference of all the other arguments.  */
      if (phi_val.value == NULL_TREE)
	{
	  phi_val.value = arg_val->value ? arg_val->value : arg;
	  continue;
	}

      /* If PHI_VAL and ARG don't have a common copy-of chain, then
	 this PHI node cannot be a copy operation.  Also, if we are
	 copy propagating stores and these two arguments came from
	 different memory references, they cannot be considered
	 copies.  */
      if (get_last_copy_of (phi_val.value) != get_last_copy_of (arg))
	{
	  phi_val.value = lhs;
	  break;
	}
    }

  if (phi_val.value &&  may_propagate_copy (lhs, phi_val.value)
      && set_copy_of_val (lhs, phi_val.value))
    retval = (phi_val.value != lhs) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
  else
    retval = SSA_PROP_NOT_INTERESTING;

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "\nPHI node ");
      dump_copy_of (dump_file, lhs);
      fprintf (dump_file, "\nTelling the propagator to ");
      if (retval == SSA_PROP_INTERESTING)
	fprintf (dump_file, "add SSA edges out of this PHI and continue.");
      else if (retval == SSA_PROP_VARYING)
	fprintf (dump_file, "add SSA edges out of this PHI and never visit again.");
      else
	fprintf (dump_file, "do nothing with SSA edges and keep iterating.");
      fprintf (dump_file, "\n\n");
    }

  return retval;
}
Ejemplo n.º 10
0
expr_hash_elt::expr_hash_elt (gimple *stmt, tree orig_lhs)
{
  enum gimple_code code = gimple_code (stmt);
  struct hashable_expr *expr = this->expr ();

  if (code == GIMPLE_ASSIGN)
    {
      enum tree_code subcode = gimple_assign_rhs_code (stmt);

      switch (get_gimple_rhs_class (subcode))
        {
        case GIMPLE_SINGLE_RHS:
	  expr->kind = EXPR_SINGLE;
	  expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
	  expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
	  break;
        case GIMPLE_UNARY_RHS:
	  expr->kind = EXPR_UNARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  if (CONVERT_EXPR_CODE_P (subcode))
	    subcode = NOP_EXPR;
	  expr->ops.unary.op = subcode;
	  expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
	  break;
        case GIMPLE_BINARY_RHS:
	  expr->kind = EXPR_BINARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  expr->ops.binary.op = subcode;
	  expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
	  expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
	  break;
        case GIMPLE_TERNARY_RHS:
	  expr->kind = EXPR_TERNARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  expr->ops.ternary.op = subcode;
	  expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
	  expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
	  expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
	  break;
        default:
          gcc_unreachable ();
        }
    }
  else if (code == GIMPLE_COND)
    {
      expr->type = boolean_type_node;
      expr->kind = EXPR_BINARY;
      expr->ops.binary.op = gimple_cond_code (stmt);
      expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
      expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
    }
  else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
    {
      size_t nargs = gimple_call_num_args (call_stmt);
      size_t i;

      gcc_assert (gimple_call_lhs (call_stmt));

      expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
      expr->kind = EXPR_CALL;
      expr->ops.call.fn_from = call_stmt;

      if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
        expr->ops.call.pure = true;
      else
        expr->ops.call.pure = false;

      expr->ops.call.nargs = nargs;
      expr->ops.call.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
    }
  else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
    {
      expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
      expr->kind = EXPR_SINGLE;
      expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
    }
  else if (code == GIMPLE_GOTO)
    {
      expr->type = TREE_TYPE (gimple_goto_dest (stmt));
      expr->kind = EXPR_SINGLE;
      expr->ops.single.rhs = gimple_goto_dest (stmt);
    }
  else if (code == GIMPLE_PHI)
    {
      size_t nargs = gimple_phi_num_args (stmt);
      size_t i;

      expr->type = TREE_TYPE (gimple_phi_result (stmt));
      expr->kind = EXPR_PHI;
      expr->ops.phi.nargs = nargs;
      expr->ops.phi.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
    }
  else
    gcc_unreachable ();

  m_lhs = orig_lhs;
  m_vop = gimple_vuse (stmt);
  m_hash = avail_expr_hash (this);
  m_stamp = this;
}
/* Look for PHI nodes which feed statements in the same block where
   the value of the PHI node implies the statement is erroneous.

   For example, a NULL PHI arg value which then feeds a pointer
   dereference.

   When found isolate and optimize the path associated with the PHI
   argument feeding the erroneous statement.  */
static void
find_implicit_erroneous_behaviour (void)
{
  basic_block bb;

  FOR_EACH_BB_FN (bb, cfun)
    {
      gimple_stmt_iterator si;

      /* Out of an abundance of caution, do not isolate paths to a
	 block where the block has any abnormal outgoing edges.

	 We might be able to relax this in the future.  We have to detect
	 when we have to split the block with the NULL dereference and
	 the trap we insert.  We have to preserve abnormal edges out
	 of the isolated block which in turn means updating PHIs at
	 the targets of those abnormal outgoing edges.  */
      if (has_abnormal_or_eh_outgoing_edge_p (bb))
	continue;

      /* First look for a PHI which sets a pointer to NULL and which
 	 is then dereferenced within BB.  This is somewhat overly
	 conservative, but probably catches most of the interesting
	 cases.   */
      for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
	{
	  gimple phi = gsi_stmt (si);
	  tree lhs = gimple_phi_result (phi);

	  /* If the result is not a pointer, then there is no need to
 	     examine the arguments.  */
	  if (!POINTER_TYPE_P (TREE_TYPE (lhs)))
	    continue;

	  /* PHI produces a pointer result.  See if any of the PHI's
	     arguments are NULL.

	     When we remove an edge, we want to reprocess the current
	     index, hence the ugly way we update I for each iteration.  */
	  basic_block duplicate = NULL;
	  for (unsigned i = 0, next_i = 0;
	       i < gimple_phi_num_args (phi);
	       i = next_i)
	    {
	      tree op = gimple_phi_arg_def (phi, i);

	      next_i = i + 1;

	      if (!integer_zerop (op))
		continue;

	      edge e = gimple_phi_arg_edge (phi, i);
	      imm_use_iterator iter;
	      gimple use_stmt;

	      /* We've got a NULL PHI argument.  Now see if the
 	         PHI's result is dereferenced within BB.  */
	      FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
	        {
	          /* We only care about uses in BB.  Catching cases in
		     in other blocks would require more complex path
		     isolation code.   */
		  if (gimple_bb (use_stmt) != bb)
		    continue;

		  if (infer_nonnull_range (use_stmt, lhs,
					   flag_isolate_erroneous_paths_dereference,
					   flag_isolate_erroneous_paths_attribute))

		    {
		      duplicate = isolate_path (bb, duplicate,
						e, use_stmt, lhs);

		      /* When we remove an incoming edge, we need to
			 reprocess the Ith element.  */
		      next_i = i;
		      cfg_altered = true;
		    }
		}
	    }
	}
    }
Ejemplo n.º 12
0
static void
shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
					  unsigned int nconds)
{
  gimple_stmt_iterator bi_call_bsi;
  basic_block bi_call_bb, join_tgt_bb, guard_bb;
  edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
  edge bi_call_in_edge0, guard_bb_in_edge;
  unsigned tn_cond_stmts;
  unsigned ci;
  gimple *cond_expr = NULL;
  gimple *cond_expr_start;

  /* The cfg we want to create looks like this:

	   [guard n-1]         <- guard_bb (old block)
	     |    \
	     | [guard n-2]                   }
	     |    / \                        }
	     |   /  ...                      } new blocks
	     |  /  [guard 0]                 }
	     | /    /   |                    }
	    [ call ]    |     <- bi_call_bb  }
	     | \        |
	     |  \       |
	     |   [ join ]     <- join_tgt_bb (old iff call must end bb)
	     |
	 possible EH edges (only if [join] is old)

     When [join] is new, the immediate dominators for these blocks are:

     1. [guard n-1]: unchanged
     2. [call]: [guard n-1]
     3. [guard m]: [guard m+1] for 0 <= m <= n-2
     4. [join]: [guard n-1]

     We punt for the more complex case case of [join] being old and
     simply free the dominance info.  We also punt on postdominators,
     which aren't expected to be available at this point anyway.  */
  bi_call_bb = gimple_bb (bi_call);

  /* Now find the join target bb -- split bi_call_bb if needed.  */
  if (stmt_ends_bb_p (bi_call))
    {
      /* We checked that there was a fallthrough edge in
	 can_guard_call_p.  */
      join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs);
      gcc_assert (join_tgt_in_edge_from_call);
      /* We don't want to handle PHIs.  */
      if (EDGE_COUNT (join_tgt_in_edge_from_call->dest->preds) > 1)
	join_tgt_bb = split_edge (join_tgt_in_edge_from_call);
      else
	{
	  join_tgt_bb = join_tgt_in_edge_from_call->dest;
	  /* We may have degenerate PHIs in the destination.  Propagate
	     those out.  */
	  for (gphi_iterator i = gsi_start_phis (join_tgt_bb); !gsi_end_p (i);)
	    {
	      gphi *phi = i.phi ();
	      replace_uses_by (gimple_phi_result (phi),
			       gimple_phi_arg_def (phi, 0));
	      remove_phi_node (&i, true);
	    }
	}
    }
  else
    {
      join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
      join_tgt_bb = join_tgt_in_edge_from_call->dest;
    }

  bi_call_bsi = gsi_for_stmt (bi_call);

  /* Now it is time to insert the first conditional expression
     into bi_call_bb and split this bb so that bi_call is
     shrink-wrapped.  */
  tn_cond_stmts = conds.length ();
  cond_expr = NULL;
  cond_expr_start = conds[0];
  for (ci = 0; ci < tn_cond_stmts; ci++)
    {
      gimple *c = conds[ci];
      gcc_assert (c || ci != 0);
      if (!c)
        break;
      gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT);
      cond_expr = c;
    }
  ci++;
  gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);

  typedef std::pair<edge, edge> edge_pair;
  auto_vec<edge_pair, 8> edges;

  bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
  bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
  bi_call_in_edge0->flags |= EDGE_FALSE_VALUE;
  guard_bb = bi_call_bb;
  bi_call_bb = bi_call_in_edge0->dest;
  join_tgt_in_edge_fall_thru = make_edge (guard_bb, join_tgt_bb,
                                          EDGE_TRUE_VALUE);

  edges.reserve (nconds);
  edges.quick_push (edge_pair (bi_call_in_edge0, join_tgt_in_edge_fall_thru));

  /* Code generation for the rest of the conditions  */
  for (unsigned int i = 1; i < nconds; ++i)
    {
      unsigned ci0;
      edge bi_call_in_edge;
      gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
      ci0 = ci;
      cond_expr_start = conds[ci0];
      for (; ci < tn_cond_stmts; ci++)
        {
	  gimple *c = conds[ci];
          gcc_assert (c || ci != ci0);
          if (!c)
            break;
          gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT);
          cond_expr = c;
        }
      ci++;
      gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
      guard_bb_in_edge = split_block (guard_bb, cond_expr);
      guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
      guard_bb_in_edge->flags |= EDGE_TRUE_VALUE;

      bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_FALSE_VALUE);
      edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge));
    }

  /* Now update the probability and profile information, processing the
     guards in order of execution.

     There are two approaches we could take here.  On the one hand we
     could assign a probability of X to the call block and distribute
     that probability among its incoming edges.  On the other hand we
     could assign a probability of X to each individual call edge.

     The choice only affects calls that have more than one condition.
     In those cases, the second approach would give the call block
     a greater probability than the first.  However, the difference
     is only small, and our chosen X is a pure guess anyway.

     Here we take the second approach because it's slightly simpler
     and because it's easy to see that it doesn't lose profile counts.  */
  bi_call_bb->count = profile_count::zero ();
  while (!edges.is_empty ())
    {
      edge_pair e = edges.pop ();
      edge call_edge = e.first;
      edge nocall_edge = e.second;
      basic_block src_bb = call_edge->src;
      gcc_assert (src_bb == nocall_edge->src);

      call_edge->probability = profile_probability::very_unlikely ();
      nocall_edge->probability = profile_probability::always ()
				 - call_edge->probability;

      bi_call_bb->count += call_edge->count ();

      if (nocall_edge->dest != join_tgt_bb)
	nocall_edge->dest->count = src_bb->count - bi_call_bb->count;
    }

  if (dom_info_available_p (CDI_DOMINATORS))
    {
      /* The split_blocks leave [guard 0] as the immediate dominator
	 of [call] and [call] as the immediate dominator of [join].
	 Fix them up.  */
      set_immediate_dominator (CDI_DOMINATORS, bi_call_bb, guard_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_tgt_bb, guard_bb);
    }

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      location_t loc;
      loc = gimple_location (bi_call);
      fprintf (dump_file,
               "%s:%d: note: function call is shrink-wrapped"
               " into error conditions.\n",
               LOCATION_FILE (loc), LOCATION_LINE (loc));
    }
}
Ejemplo n.º 13
0
static bool
is_feasible_trace (basic_block bb)
{
    basic_block pred1 = EDGE_PRED (bb, 0)->src;
    basic_block pred2 = EDGE_PRED (bb, 1)->src;
    int num_stmts_in_join = count_stmts_in_block (bb);
    int num_stmts_in_pred1 = count_stmts_in_block (pred1);
    int num_stmts_in_pred2 = count_stmts_in_block (pred2);

    /* This is meant to catch cases that are likely opportunities for
       if-conversion.  Essentially we look for the case where
       BB's predecessors are both single statement blocks where
       the output of that statement feed the same PHI in BB.  */
    if (num_stmts_in_pred1 == 1 && num_stmts_in_pred2 == 1)
    {
        gimple *stmt1 = last_and_only_stmt (pred1);
        gimple *stmt2 = last_and_only_stmt (pred2);

        if (stmt1 && stmt2
                && gimple_code (stmt1) == GIMPLE_ASSIGN
                && gimple_code (stmt2) == GIMPLE_ASSIGN)
        {
            enum tree_code code1 = gimple_assign_rhs_code (stmt1);
            enum tree_code code2 = gimple_assign_rhs_code (stmt2);

            if (!poor_ifcvt_candidate_code (code1)
                    && !poor_ifcvt_candidate_code (code2))
            {
                tree lhs1 = gimple_assign_lhs (stmt1);
                tree lhs2 = gimple_assign_lhs (stmt2);
                gimple_stmt_iterator gsi;
                for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
                {
                    gimple *phi = gsi_stmt (gsi);
                    if ((gimple_phi_arg_def (phi, 0) == lhs1
                            && gimple_phi_arg_def (phi, 1) == lhs2)
                            || (gimple_phi_arg_def (phi, 1) == lhs1
                                && gimple_phi_arg_def (phi, 0) == lhs2))
                    {
                        if (dump_file && (dump_flags & TDF_DETAILS))
                            fprintf (dump_file,
                                     "Block %d appears to be a join point for "
                                     "if-convertable diamond.\n",
                                     bb->index);
                        return false;
                    }
                }
            }
        }
    }

    /* We may want something here which looks at dataflow and tries
       to guess if duplication of BB is likely to result in simplification
       of instructions in BB in either the original or the duplicate.  */

    /* Upper Hard limit on the number statements to copy.  */
    if (num_stmts_in_join
            >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
        return false;

    return true;
}
/* The core routine of conditional store replacement and normal
   phi optimizations.  Both share much of the infrastructure in how
   to match applicable basic block patterns.  DO_STORE_ELIM is true
   when we want to do conditional store replacement, false otherwise.  */
static unsigned int
tree_ssa_phiopt_worker (bool do_store_elim)
{
    basic_block bb;
    basic_block *bb_order;
    unsigned n, i;
    bool cfgchanged = false;
    struct pointer_set_t *nontrap = 0;

    if (do_store_elim)
    {
        condstoretemp = NULL_TREE;
        /* Calculate the set of non-trapping memory accesses.  */
        nontrap = get_non_trapping ();
    }

    /* Search every basic block for COND_EXPR we may be able to optimize.

       We walk the blocks in order that guarantees that a block with
       a single predecessor is processed before the predecessor.
       This ensures that we collapse inner ifs before visiting the
       outer ones, and also that we do not try to visit a removed
       block.  */
    bb_order = blocks_in_phiopt_order ();
    n = n_basic_blocks - NUM_FIXED_BLOCKS;

    for (i = 0; i < n; i++)
    {
        gimple cond_stmt, phi;
        basic_block bb1, bb2;
        edge e1, e2;
        tree arg0, arg1;

        bb = bb_order[i];

        cond_stmt = last_stmt (bb);
        /* Check to see if the last statement is a GIMPLE_COND.  */
        if (!cond_stmt
                || gimple_code (cond_stmt) != GIMPLE_COND)
            continue;

        e1 = EDGE_SUCC (bb, 0);
        bb1 = e1->dest;
        e2 = EDGE_SUCC (bb, 1);
        bb2 = e2->dest;

        /* We cannot do the optimization on abnormal edges.  */
        if ((e1->flags & EDGE_ABNORMAL) != 0
                || (e2->flags & EDGE_ABNORMAL) != 0)
            continue;

        /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
        if (EDGE_COUNT (bb1->succs) == 0
                || bb2 == NULL
                || EDGE_COUNT (bb2->succs) == 0)
            continue;

        /* Find the bb which is the fall through to the other.  */
        if (EDGE_SUCC (bb1, 0)->dest == bb2)
            ;
        else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
            basic_block bb_tmp = bb1;
            edge e_tmp = e1;
            bb1 = bb2;
            bb2 = bb_tmp;
            e1 = e2;
            e2 = e_tmp;
        }
        else
            continue;

        e1 = EDGE_SUCC (bb1, 0);

        /* Make sure that bb1 is just a fall through.  */
        if (!single_succ_p (bb1)
                || (e1->flags & EDGE_FALLTHRU) == 0)
            continue;

        /* Also make sure that bb1 only have one predecessor and that it
        is bb.  */
        if (!single_pred_p (bb1)
                || single_pred (bb1) != bb)
            continue;

        if (do_store_elim)
        {
            /* bb1 is the middle block, bb2 the join block, bb the split block,
               e1 the fallthrough edge from bb1 to bb2.  We can't do the
               optimization if the join block has more than two predecessors.  */
            if (EDGE_COUNT (bb2->preds) > 2)
                continue;
            if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
                cfgchanged = true;
        }
        else
        {
            gimple_seq phis = phi_nodes (bb2);

            /* Check to make sure that there is only one PHI node.
               TODO: we could do it with more than one iff the other PHI nodes
               have the same elements for these two edges.  */
            if (! gimple_seq_singleton_p (phis))
                continue;

            phi = gsi_stmt (gsi_start (phis));
            arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
            arg1 = gimple_phi_arg_def (phi, e2->dest_idx);

            /* Something is wrong if we cannot find the arguments in the PHI
               node.  */
            gcc_assert (arg0 != NULL && arg1 != NULL);

            /* Do the replacement of conditional if it can be done.  */
            if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
        }
    }

    free (bb_order);

    if (do_store_elim)
        pointer_set_destroy (nontrap);
    /* If the CFG has changed, we should cleanup the CFG.  */
    if (cfgchanged && do_store_elim)
    {
        /* In cond-store replacement we have added some loads on edges
           and new VOPS (as we moved the store, and created a load).  */
        gsi_commit_edge_inserts ();
        return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
    }
    else if (cfgchanged)
        return TODO_cleanup_cfg;
    return 0;
}
Ejemplo n.º 15
0
/* The core routine of conditional store replacement and normal
   phi optimizations.  Both share much of the infrastructure in how
   to match applicable basic block patterns.  DO_STORE_ELIM is true
   when we want to do conditional store replacement, false otherwise.
   DO_HOIST_LOADS is true when we want to hoist adjacent loads out
   of diamond control flow patterns, false otherwise.  */
static unsigned int
tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
{
  basic_block bb;
  basic_block *bb_order;
  unsigned n, i;
  bool cfgchanged = false;
  hash_set<tree> *nontrap = 0;

  if (do_store_elim)
    /* Calculate the set of non-trapping memory accesses.  */
    nontrap = get_non_trapping ();

  /* Search every basic block for COND_EXPR we may be able to optimize.

     We walk the blocks in order that guarantees that a block with
     a single predecessor is processed before the predecessor.
     This ensures that we collapse inner ifs before visiting the
     outer ones, and also that we do not try to visit a removed
     block.  */
  bb_order = single_pred_before_succ_order ();
  n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;

  for (i = 0; i < n; i++)
    {
      gimple cond_stmt;
      gphi *phi;
      basic_block bb1, bb2;
      edge e1, e2;
      tree arg0, arg1;

      bb = bb_order[i];

      cond_stmt = last_stmt (bb);
      /* Check to see if the last statement is a GIMPLE_COND.  */
      if (!cond_stmt
          || gimple_code (cond_stmt) != GIMPLE_COND)
        continue;

      e1 = EDGE_SUCC (bb, 0);
      bb1 = e1->dest;
      e2 = EDGE_SUCC (bb, 1);
      bb2 = e2->dest;

      /* We cannot do the optimization on abnormal edges.  */
      if ((e1->flags & EDGE_ABNORMAL) != 0
          || (e2->flags & EDGE_ABNORMAL) != 0)
       continue;

      /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
      if (EDGE_COUNT (bb1->succs) == 0
          || bb2 == NULL
	  || EDGE_COUNT (bb2->succs) == 0)
        continue;

      /* Find the bb which is the fall through to the other.  */
      if (EDGE_SUCC (bb1, 0)->dest == bb2)
        ;
      else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
	  std::swap (bb1, bb2);
	  std::swap (e1, e2);
	}
      else if (do_store_elim
	       && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
	{
	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;

	  if (!single_succ_p (bb1)
	      || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
	      || !single_succ_p (bb2)
	      || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
	      || EDGE_COUNT (bb3->preds) != 2)
	    continue;
	  if (cond_if_else_store_replacement (bb1, bb2, bb3))
	    cfgchanged = true;
	  continue;
	}
      else if (do_hoist_loads
		 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
	{
	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;

	  if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
	      && single_succ_p (bb1)
	      && single_succ_p (bb2)
	      && single_pred_p (bb1)
	      && single_pred_p (bb2)
	      && EDGE_COUNT (bb->succs) == 2
	      && EDGE_COUNT (bb3->preds) == 2
	      /* If one edge or the other is dominant, a conditional move
		 is likely to perform worse than the well-predicted branch.  */
	      && !predictable_edge_p (EDGE_SUCC (bb, 0))
	      && !predictable_edge_p (EDGE_SUCC (bb, 1)))
	    hoist_adjacent_loads (bb, bb1, bb2, bb3);
	  continue;
	}
      else
	continue;

      e1 = EDGE_SUCC (bb1, 0);

      /* Make sure that bb1 is just a fall through.  */
      if (!single_succ_p (bb1)
	  || (e1->flags & EDGE_FALLTHRU) == 0)
        continue;

      /* Also make sure that bb1 only have one predecessor and that it
	 is bb.  */
      if (!single_pred_p (bb1)
          || single_pred (bb1) != bb)
	continue;

      if (do_store_elim)
	{
	  /* bb1 is the middle block, bb2 the join block, bb the split block,
	     e1 the fallthrough edge from bb1 to bb2.  We can't do the
	     optimization if the join block has more than two predecessors.  */
	  if (EDGE_COUNT (bb2->preds) > 2)
	    continue;
	  if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
	    cfgchanged = true;
	}
      else
	{
	  gimple_seq phis = phi_nodes (bb2);
	  gimple_stmt_iterator gsi;
	  bool candorest = true;

	  /* Value replacement can work with more than one PHI
	     so try that first. */
	  for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      phi = as_a <gphi *> (gsi_stmt (gsi));
	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
	      if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
		{
		  candorest = false;
	          cfgchanged = true;
		  break;
		}
	    }

	  if (!candorest)
	    continue;

	  phi = single_non_singleton_phi_for_edges (phis, e1, e2);
	  if (!phi)
	    continue;

	  arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	  arg1 = gimple_phi_arg_def (phi, e2->dest_idx);

	  /* Something is wrong if we cannot find the arguments in the PHI
	     node.  */
	  gcc_assert (arg0 != NULL && arg1 != NULL);

	  if (factor_out_conditional_conversion (e1, e2, phi, arg0, arg1))
	    {
	      /* factor_out_conditional_conversion may create a new PHI in
		 BB2 and eliminate an existing PHI in BB2.  Recompute values
		 that may be affected by that change.  */
	      phis = phi_nodes (bb2);
	      phi = single_non_singleton_phi_for_edges (phis, e1, e2);
	      gcc_assert (phi);
	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
	      gcc_assert (arg0 != NULL && arg1 != NULL);
	    }

	  /* Do the replacement of conditional if it can be done.  */
	  if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	}
    }

  free (bb_order);

  if (do_store_elim)
    delete nontrap;
  /* If the CFG has changed, we should cleanup the CFG.  */
  if (cfgchanged && do_store_elim)
    {
      /* In cond-store replacement we have added some loads on edges
         and new VOPS (as we moved the store, and created a load).  */
      gsi_commit_edge_inserts ();
      return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
    }
  else if (cfgchanged)
    return TODO_cleanup_cfg;
  return 0;
}
Ejemplo n.º 16
0
static inline bool
same_close_phi_node (gphi *p1, gphi *p2)
{
  return operand_equal_p (gimple_phi_arg_def (p1, 0),
			  gimple_phi_arg_def (p2, 0), 0);
}