示例#1
0
void
output_bb (struct output_block *ob, basic_block bb, struct function *fn)
{
  gimple_stmt_iterator bsi = gsi_start_bb (bb);

  streamer_write_record_start (ob,
			       (!gsi_end_p (bsi)) || phi_nodes (bb)
			        ? LTO_bb1
				: LTO_bb0);

  streamer_write_uhwi (ob, bb->index);
  streamer_write_gcov_count (ob, bb->count);
  streamer_write_hwi (ob, bb->frequency);
  streamer_write_hwi (ob, bb->flags);

  if (!gsi_end_p (bsi) || phi_nodes (bb))
    {
      /* Output the statements.  The list of statements is terminated
	 with a zero.  */
      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	{
	  int region;
	  gimple stmt = gsi_stmt (bsi);

	  output_gimple_stmt (ob, stmt);

	  /* Emit the EH region holding STMT.  */
	  region = lookup_stmt_eh_lp_fn (fn, stmt);
	  if (region != 0)
	    {
	      streamer_write_record_start (ob, LTO_eh_region);
	      streamer_write_hwi (ob, region);
	    }
	  else
	    streamer_write_record_start (ob, LTO_null);
	}

      streamer_write_record_start (ob, LTO_null);

      for (gphi_iterator psi = gsi_start_phis (bb);
	   !gsi_end_p (psi);
	   gsi_next (&psi))
	{
	  gphi *phi = psi.phi ();

	  /* Only emit PHIs for gimple registers.  PHI nodes for .MEM
	     will be filled in on reading when the SSA form is
	     updated.  */
	  if (!virtual_operand_p (gimple_phi_result (phi)))
	    output_phi (ob, phi);
	}

      streamer_write_record_start (ob, LTO_null);
    }
}
示例#2
0
void 
add_phi_node_to_bb (gimple phi, basic_block bb)
{
  gimple_stmt_iterator gsi;
  /* Add the new PHI node to the list of PHI nodes for block BB.  */
  if (phi_nodes (bb) == NULL)
    set_phi_nodes (bb, gimple_seq_alloc ());

  gsi = gsi_last (phi_nodes (bb));
  gsi_insert_after (&gsi, phi, GSI_NEW_STMT);

  /* Associate BB to the PHI node.  */
  gimple_set_bb (phi, bb);

}
示例#3
0
static void
tree_ssa_phiopt (void)
{
  basic_block bb;
  bool removed_phis = false;

  /* Search every basic block for PHI nodes we may be able to optimize.  */
  FOR_EACH_BB (bb)
    {
      tree arg0, arg1, phi;

      /* We're searching for blocks with one PHI node which has two
	 arguments.  */
      phi = phi_nodes (bb);
      if (phi && PHI_CHAIN (phi) == NULL
	  && PHI_NUM_ARGS (phi) == 2)
	{
	  arg0 = PHI_ARG_DEF (phi, 0);
	  arg1 = PHI_ARG_DEF (phi, 1);
	    
	  /* Do the replacement of conditional if it can be done.  */
	    if (conditional_replacement (bb, phi, arg0, arg1)
		|| value_replacement (bb, phi, arg0, arg1)
		|| abs_replacement (bb, phi, arg0, arg1))
	      {
		/* We have done the replacement so we need to rebuild the
		   cfg when this pass is complete.  */
		removed_phis = true;
	      }
	}
    }
}
void
cgraph_rebuild_references (void)
{
  basic_block bb;
  struct cgraph_node *node = cgraph_get_node (current_function_decl);
  gimple_stmt_iterator gsi;

  ipa_remove_all_references (&node->ref_list);

  node->count = ENTRY_BLOCK_PTR->count;

  FOR_EACH_BB (bb)
    {
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt = gsi_stmt (gsi);

	  walk_stmt_load_store_addr_ops (stmt, node, mark_load,
					 mark_store, mark_address);

	}
      for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi))
	walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node,
				       mark_load, mark_store, mark_address);
    }
  record_eh_tables (node, cfun);
}
示例#5
0
edge
ssa_redirect_edge (edge e, basic_block dest)
{
  tree phi;
  tree list = NULL, *last = &list;
  tree src, dst, node;

  /* Remove the appropriate PHI arguments in E's destination block.  */
  for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi))
    {
      if (PHI_ARG_DEF (phi, e->dest_idx) == NULL_TREE)
	continue;

      src = PHI_ARG_DEF (phi, e->dest_idx);
      dst = PHI_RESULT (phi);
      node = build_tree_list (dst, src);
      *last = node;
      last = &TREE_CHAIN (node);
    }

  e = redirect_edge_succ_nodup (e, dest);
  PENDING_STMT (e) = list;

  return e;
}
示例#6
0
static void
prop_phis (basic_block b)
{
  gimple_stmt_iterator psi;
  gimple_seq phis = phi_nodes (b);

  for (psi = gsi_start (phis); !gsi_end_p (psi); )
    {
      gimple phi = gsi_stmt (psi);
      tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);

      gcc_assert (gimple_phi_num_args (phi) == 1);

      if (!is_gimple_reg (def))
	{
	  imm_use_iterator iter;
	  use_operand_p use_p;
	  gimple stmt;

	  FOR_EACH_IMM_USE_STMT (stmt, iter, def)
	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
	      SET_USE (use_p, use);
	}
      else
	replace_uses_by (def, use);

      remove_phi_node (&psi, true);
    }
}
示例#7
0
void
add_phi_node_to_bb (gimple phi, basic_block bb)
{
  gimple_seq seq = phi_nodes (bb);
  /* Add the new PHI node to the list of PHI nodes for block BB.  */
  if (seq == NULL)
    set_phi_nodes (bb, gimple_seq_alloc_with_stmt (phi));
  else
    {
      gimple_seq_add_stmt (&seq, phi);
      gcc_assert (seq == phi_nodes (bb));
    }

  /* Associate BB to the PHI node.  */
  gimple_set_bb (phi, bb);

}
示例#8
0
void
remove_phi_args (edge e)
{
  tree phi;

  for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi))
    remove_phi_arg_num (phi, e->dest_idx);
}
/* We can eliminate a load of the SRA'd variable edge_iterator.container */
rewrite_add_phi_arguments (basic_block bb)
{
  edge e;
  edge_iterator ei;
  for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei), &(e));
       ei_next (&(ei)))
    {
      tree phi;
      for (phi = phi_nodes (e->dest); phi; phi = (((phi))->common.chain))
	  get_reaching_def ((get_def_from_ptr (get_phi_result_ptr (phi)))->ssa_name.var);
    }
}
示例#10
0
tree
create_phi_node (tree var, basic_block bb)
{
  tree phi;

  phi = make_phi_node (var, EDGE_COUNT (bb->preds));

  /* Add the new PHI node to the list of PHI nodes for block BB.  */
  PHI_CHAIN (phi) = phi_nodes (bb);
  bb->phi_nodes = phi;

  /* Associate BB to the PHI node.  */
  set_bb_for_stmt (phi, bb);

  return phi;
}
示例#11
0
static bool
candidate_bb_for_phi_optimization (basic_block bb,
				   basic_block *cond_block_p,
				   basic_block *other_block_p)
{
  tree last0, last1;
  basic_block cond_block, other_block;

  /* One of the alternatives must come from a block ending with
     a COND_EXPR.  */
  last0 = last_stmt (EDGE_PRED (bb, 0)->src);
  last1 = last_stmt (EDGE_PRED (bb, 1)->src);
  if (last0 && TREE_CODE (last0) == COND_EXPR)
    {
      cond_block = EDGE_PRED (bb, 0)->src;
      other_block = EDGE_PRED (bb, 1)->src;
    }
  else if (last1 && TREE_CODE (last1) == COND_EXPR)
    {
      other_block = EDGE_PRED (bb, 0)->src;
      cond_block = EDGE_PRED (bb, 1)->src;
    }
  else
    return false;
  
  /* COND_BLOCK must have precisely two successors.  We indirectly
     verify that those successors are BB and OTHER_BLOCK.  */
  if (EDGE_COUNT (cond_block->succs) != 2
      || (EDGE_SUCC (cond_block, 0)->flags & EDGE_ABNORMAL) != 0
      || (EDGE_SUCC (cond_block, 1)->flags & EDGE_ABNORMAL) != 0)
    return false;
  
  /* OTHER_BLOCK must have a single predecessor which is COND_BLOCK,
     OTHER_BLOCK must have a single successor which is BB and
     OTHER_BLOCK must have no PHI nodes.  */
  if (EDGE_COUNT (other_block->preds) != 1
      || EDGE_PRED (other_block, 0)->src != cond_block
      || EDGE_COUNT (other_block->succs) != 1
      || EDGE_SUCC (other_block, 0)->dest != bb
      || phi_nodes (other_block))
    return false;
  
  *cond_block_p = cond_block;
  *other_block_p = other_block;
  /* Everything looks OK.  */
  return true;
}
unsigned int
rebuild_cgraph_edges (void)
{
  basic_block bb;
  struct cgraph_node *node = cgraph_get_node (current_function_decl);
  gimple_stmt_iterator gsi;

  cgraph_node_remove_callees (node);
  ipa_remove_all_references (&node->ref_list);

  node->count = ENTRY_BLOCK_PTR->count;

  FOR_EACH_BB (bb)
    {
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt = gsi_stmt (gsi);
	  tree decl;

	  if (is_gimple_call (stmt))
	    {
	      int freq = compute_call_stmt_bb_frequency (current_function_decl,
							 bb);
	      decl = gimple_call_fndecl (stmt);
	      if (decl)
		cgraph_create_edge (node, cgraph_get_create_node (decl), stmt,
				    bb->count, freq);
	      else
		cgraph_create_indirect_edge (node, stmt,
					     gimple_call_flags (stmt),
					     bb->count, freq);
	    }
	  walk_stmt_load_store_addr_ops (stmt, node, mark_load,
					 mark_store, mark_address);

	}
      for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi))
	walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node,
				       mark_load, mark_store, mark_address);
    }
  record_eh_tables (node, cfun);
  gcc_assert (!node->global.inlined_to);

  return 0;
}
示例#13
0
void
flush_pending_stmts (edge e)
{
  tree phi, arg;

  if (!PENDING_STMT (e))
    return;

  for (phi = phi_nodes (e->dest), arg = PENDING_STMT (e);
       phi;
       phi = PHI_CHAIN (phi), arg = TREE_CHAIN (arg))
    {
      tree def = TREE_VALUE (arg);
      add_phi_arg (phi, def, e);
    }

  PENDING_STMT (e) = NULL;
}
static bool
init_dont_simulate_again (void)
{
  basic_block bb;
  block_stmt_iterator bsi;
  tree phi;
  bool saw_a_complex_op = false;

  FOR_EACH_BB (bb)
    {
      for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
	DONT_SIMULATE_AGAIN (phi) = !is_complex_reg (PHI_RESULT (phi));

      for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
	{
	  tree orig_stmt, stmt, rhs = NULL;
	  bool dsa;

	  orig_stmt = stmt = bsi_stmt (bsi);

	  /* Most control-altering statements must be initially 
	     simulated, else we won't cover the entire cfg.  */
	  dsa = !stmt_ends_bb_p (stmt);

	  switch (TREE_CODE (stmt))
	    {
	    case RETURN_EXPR:
	      /* We don't care what the lattice value of <retval> is,
		 since it's never used as an input to another computation.  */
	      dsa = true;
	      stmt = TREE_OPERAND (stmt, 0);
	      if (!stmt || TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
		break;
	      /* FALLTHRU */

	    case GIMPLE_MODIFY_STMT:
	      dsa = !is_complex_reg (GIMPLE_STMT_OPERAND (stmt, 0));
	      rhs = GIMPLE_STMT_OPERAND (stmt, 1);
	      break;

	    case COND_EXPR:
	      rhs = TREE_OPERAND (stmt, 0);
	      break;

	    default:
	      break;
	    }

	  if (rhs)
	    switch (TREE_CODE (rhs))
	      {
	      case EQ_EXPR:
	      case NE_EXPR:
		rhs = TREE_OPERAND (rhs, 0);
		/* FALLTHRU */

	      case PLUS_EXPR:
	      case MINUS_EXPR:
	      case MULT_EXPR:
	      case TRUNC_DIV_EXPR:
	      case CEIL_DIV_EXPR:
	      case FLOOR_DIV_EXPR:
	      case ROUND_DIV_EXPR:
	      case RDIV_EXPR:
	      case NEGATE_EXPR:
	      case CONJ_EXPR:
		if (TREE_CODE (TREE_TYPE (rhs)) == COMPLEX_TYPE)
		  saw_a_complex_op = true;
		break;

	      case REALPART_EXPR:
	      case IMAGPART_EXPR:
		/* The total store transformation performed during
		   gimplification creates such uninitialized loads
		   and we need to lower the statement to be able
		   to fix things up.  */
		if (TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME
		    && ssa_undefined_value_p (TREE_OPERAND (rhs, 0)))
		  saw_a_complex_op = true;
		break;

	      default:
		break;
	      }

	  DONT_SIMULATE_AGAIN (orig_stmt) = dsa;
	}
    }

  return saw_a_complex_op;
}
示例#15
0
static bool
gimple_find_edge_insert_loc (edge e, gimple_stmt_iterator *gsi,
			     basic_block *new_bb)
{
  basic_block dest, src;
  gimple *tmp;

  dest = e->dest;

  /* If the destination has one predecessor which has no PHI nodes,
     insert there.  Except for the exit block.

     The requirement for no PHI nodes could be relaxed.  Basically we
     would have to examine the PHIs to prove that none of them used
     the value set by the statement we want to insert on E.  That
     hardly seems worth the effort.  */
 restart:
  if (single_pred_p (dest)
      && gimple_seq_empty_p (phi_nodes (dest))
      && dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
    {
      *gsi = gsi_start_bb (dest);
      if (gsi_end_p (*gsi))
	return true;

      /* Make sure we insert after any leading labels.  */
      tmp = gsi_stmt (*gsi);
      while (gimple_code (tmp) == GIMPLE_LABEL)
	{
	  gsi_next (gsi);
	  if (gsi_end_p (*gsi))
	    break;
	  tmp = gsi_stmt (*gsi);
	}

      if (gsi_end_p (*gsi))
	{
	  *gsi = gsi_last_bb (dest);
	  return true;
	}
      else
	return false;
    }

  /* If the source has one successor, the edge is not abnormal and
     the last statement does not end a basic block, insert there.
     Except for the entry block.  */
  src = e->src;
  if ((e->flags & EDGE_ABNORMAL) == 0
      && single_succ_p (src)
      && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
    {
      *gsi = gsi_last_bb (src);
      if (gsi_end_p (*gsi))
	return true;

      tmp = gsi_stmt (*gsi);
      if (!stmt_ends_bb_p (tmp))
	return true;

      switch (gimple_code (tmp))
	{
	case GIMPLE_RETURN:
	case GIMPLE_RESX:
	  return false;
	default:
	  break;
        }
    }

  /* Otherwise, create a new basic block, and split this edge.  */
  dest = split_edge (e);
  if (new_bb)
    *new_bb = dest;
  e = single_pred_edge (dest);
  goto restart;
}
示例#16
0
static bool
init_dont_simulate_again (void)
{
  basic_block bb;
  block_stmt_iterator bsi;
  tree phi;
  bool saw_a_complex_op = false;

  FOR_EACH_BB (bb)
    {
      for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
	DONT_SIMULATE_AGAIN (phi) = !is_complex_reg (PHI_RESULT (phi));

      for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
	{
	  tree orig_stmt, stmt, rhs = NULL;
	  bool dsa;

	  orig_stmt = stmt = bsi_stmt (bsi);

	  /* Most control-altering statements must be initially 
	     simulated, else we won't cover the entire cfg.  */
	  dsa = !stmt_ends_bb_p (stmt);

	  switch (TREE_CODE (stmt))
	    {
	    case RETURN_EXPR:
	      /* We don't care what the lattice value of <retval> is,
		 since it's never used as an input to another computation.  */
	      dsa = true;
	      stmt = TREE_OPERAND (stmt, 0);
	      if (!stmt || TREE_CODE (stmt) != MODIFY_EXPR)
		break;
	      /* FALLTHRU */

	    case MODIFY_EXPR:
	      dsa = !is_complex_reg (TREE_OPERAND (stmt, 0));
	      rhs = TREE_OPERAND (stmt, 1);
	      break;

	    case COND_EXPR:
	      rhs = TREE_OPERAND (stmt, 0);
	      break;

	    default:
	      break;
	    }

	  if (rhs)
	    switch (TREE_CODE (rhs))
	      {
	      case EQ_EXPR:
	      case NE_EXPR:
		rhs = TREE_OPERAND (rhs, 0);
		/* FALLTHRU */

	      case PLUS_EXPR:
	      case MINUS_EXPR:
	      case MULT_EXPR:
	      case TRUNC_DIV_EXPR:
	      case CEIL_DIV_EXPR:
	      case FLOOR_DIV_EXPR:
	      case ROUND_DIV_EXPR:
	      case RDIV_EXPR:
	      case NEGATE_EXPR:
	      case CONJ_EXPR:
		if (TREE_CODE (TREE_TYPE (rhs)) == COMPLEX_TYPE)
		  saw_a_complex_op = true;
		break;

	      default:
		break;
	      }

	  DONT_SIMULATE_AGAIN (orig_stmt) = dsa;
	}
    }

  return saw_a_complex_op;
}
示例#17
0
/* The core routine of conditional store replacement and normal
   phi optimizations.  Both share much of the infrastructure in how
   to match applicable basic block patterns.  DO_STORE_ELIM is true
   when we want to do conditional store replacement, false otherwise.
   DO_HOIST_LOADS is true when we want to hoist adjacent loads out
   of diamond control flow patterns, false otherwise.  */
static unsigned int
tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
{
  basic_block bb;
  basic_block *bb_order;
  unsigned n, i;
  bool cfgchanged = false;
  hash_set<tree> *nontrap = 0;

  if (do_store_elim)
    /* Calculate the set of non-trapping memory accesses.  */
    nontrap = get_non_trapping ();

  /* Search every basic block for COND_EXPR we may be able to optimize.

     We walk the blocks in order that guarantees that a block with
     a single predecessor is processed before the predecessor.
     This ensures that we collapse inner ifs before visiting the
     outer ones, and also that we do not try to visit a removed
     block.  */
  bb_order = single_pred_before_succ_order ();
  n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;

  for (i = 0; i < n; i++)
    {
      gimple cond_stmt;
      gphi *phi;
      basic_block bb1, bb2;
      edge e1, e2;
      tree arg0, arg1;

      bb = bb_order[i];

      cond_stmt = last_stmt (bb);
      /* Check to see if the last statement is a GIMPLE_COND.  */
      if (!cond_stmt
          || gimple_code (cond_stmt) != GIMPLE_COND)
        continue;

      e1 = EDGE_SUCC (bb, 0);
      bb1 = e1->dest;
      e2 = EDGE_SUCC (bb, 1);
      bb2 = e2->dest;

      /* We cannot do the optimization on abnormal edges.  */
      if ((e1->flags & EDGE_ABNORMAL) != 0
          || (e2->flags & EDGE_ABNORMAL) != 0)
       continue;

      /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
      if (EDGE_COUNT (bb1->succs) == 0
          || bb2 == NULL
	  || EDGE_COUNT (bb2->succs) == 0)
        continue;

      /* Find the bb which is the fall through to the other.  */
      if (EDGE_SUCC (bb1, 0)->dest == bb2)
        ;
      else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
	  std::swap (bb1, bb2);
	  std::swap (e1, e2);
	}
      else if (do_store_elim
	       && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
	{
	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;

	  if (!single_succ_p (bb1)
	      || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
	      || !single_succ_p (bb2)
	      || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
	      || EDGE_COUNT (bb3->preds) != 2)
	    continue;
	  if (cond_if_else_store_replacement (bb1, bb2, bb3))
	    cfgchanged = true;
	  continue;
	}
      else if (do_hoist_loads
		 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
	{
	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;

	  if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
	      && single_succ_p (bb1)
	      && single_succ_p (bb2)
	      && single_pred_p (bb1)
	      && single_pred_p (bb2)
	      && EDGE_COUNT (bb->succs) == 2
	      && EDGE_COUNT (bb3->preds) == 2
	      /* If one edge or the other is dominant, a conditional move
		 is likely to perform worse than the well-predicted branch.  */
	      && !predictable_edge_p (EDGE_SUCC (bb, 0))
	      && !predictable_edge_p (EDGE_SUCC (bb, 1)))
	    hoist_adjacent_loads (bb, bb1, bb2, bb3);
	  continue;
	}
      else
	continue;

      e1 = EDGE_SUCC (bb1, 0);

      /* Make sure that bb1 is just a fall through.  */
      if (!single_succ_p (bb1)
	  || (e1->flags & EDGE_FALLTHRU) == 0)
        continue;

      /* Also make sure that bb1 only have one predecessor and that it
	 is bb.  */
      if (!single_pred_p (bb1)
          || single_pred (bb1) != bb)
	continue;

      if (do_store_elim)
	{
	  /* bb1 is the middle block, bb2 the join block, bb the split block,
	     e1 the fallthrough edge from bb1 to bb2.  We can't do the
	     optimization if the join block has more than two predecessors.  */
	  if (EDGE_COUNT (bb2->preds) > 2)
	    continue;
	  if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
	    cfgchanged = true;
	}
      else
	{
	  gimple_seq phis = phi_nodes (bb2);
	  gimple_stmt_iterator gsi;
	  bool candorest = true;

	  /* Value replacement can work with more than one PHI
	     so try that first. */
	  for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      phi = as_a <gphi *> (gsi_stmt (gsi));
	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
	      if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
		{
		  candorest = false;
	          cfgchanged = true;
		  break;
		}
	    }

	  if (!candorest)
	    continue;

	  phi = single_non_singleton_phi_for_edges (phis, e1, e2);
	  if (!phi)
	    continue;

	  arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	  arg1 = gimple_phi_arg_def (phi, e2->dest_idx);

	  /* Something is wrong if we cannot find the arguments in the PHI
	     node.  */
	  gcc_assert (arg0 != NULL && arg1 != NULL);

	  if (factor_out_conditional_conversion (e1, e2, phi, arg0, arg1))
	    {
	      /* factor_out_conditional_conversion may create a new PHI in
		 BB2 and eliminate an existing PHI in BB2.  Recompute values
		 that may be affected by that change.  */
	      phis = phi_nodes (bb2);
	      phi = single_non_singleton_phi_for_edges (phis, e1, e2);
	      gcc_assert (phi);
	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
	      gcc_assert (arg0 != NULL && arg1 != NULL);
	    }

	  /* Do the replacement of conditional if it can be done.  */
	  if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	}
    }

  free (bb_order);

  if (do_store_elim)
    delete nontrap;
  /* If the CFG has changed, we should cleanup the CFG.  */
  if (cfgchanged && do_store_elim)
    {
      /* In cond-store replacement we have added some loads on edges
         and new VOPS (as we moved the store, and created a load).  */
      gsi_commit_edge_inserts ();
      return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
    }
  else if (cfgchanged)
    return TODO_cleanup_cfg;
  return 0;
}
示例#18
0
gimple_stmt_iterator
gsi_start_phis (basic_block bb)
{
  return gsi_start (phi_nodes (bb));
}
/* The core routine of conditional store replacement and normal
   phi optimizations.  Both share much of the infrastructure in how
   to match applicable basic block patterns.  DO_STORE_ELIM is true
   when we want to do conditional store replacement, false otherwise.  */
static unsigned int
tree_ssa_phiopt_worker (bool do_store_elim)
{
    basic_block bb;
    basic_block *bb_order;
    unsigned n, i;
    bool cfgchanged = false;
    struct pointer_set_t *nontrap = 0;

    if (do_store_elim)
    {
        condstoretemp = NULL_TREE;
        /* Calculate the set of non-trapping memory accesses.  */
        nontrap = get_non_trapping ();
    }

    /* Search every basic block for COND_EXPR we may be able to optimize.

       We walk the blocks in order that guarantees that a block with
       a single predecessor is processed before the predecessor.
       This ensures that we collapse inner ifs before visiting the
       outer ones, and also that we do not try to visit a removed
       block.  */
    bb_order = blocks_in_phiopt_order ();
    n = n_basic_blocks - NUM_FIXED_BLOCKS;

    for (i = 0; i < n; i++)
    {
        gimple cond_stmt, phi;
        basic_block bb1, bb2;
        edge e1, e2;
        tree arg0, arg1;

        bb = bb_order[i];

        cond_stmt = last_stmt (bb);
        /* Check to see if the last statement is a GIMPLE_COND.  */
        if (!cond_stmt
                || gimple_code (cond_stmt) != GIMPLE_COND)
            continue;

        e1 = EDGE_SUCC (bb, 0);
        bb1 = e1->dest;
        e2 = EDGE_SUCC (bb, 1);
        bb2 = e2->dest;

        /* We cannot do the optimization on abnormal edges.  */
        if ((e1->flags & EDGE_ABNORMAL) != 0
                || (e2->flags & EDGE_ABNORMAL) != 0)
            continue;

        /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
        if (EDGE_COUNT (bb1->succs) == 0
                || bb2 == NULL
                || EDGE_COUNT (bb2->succs) == 0)
            continue;

        /* Find the bb which is the fall through to the other.  */
        if (EDGE_SUCC (bb1, 0)->dest == bb2)
            ;
        else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
            basic_block bb_tmp = bb1;
            edge e_tmp = e1;
            bb1 = bb2;
            bb2 = bb_tmp;
            e1 = e2;
            e2 = e_tmp;
        }
        else
            continue;

        e1 = EDGE_SUCC (bb1, 0);

        /* Make sure that bb1 is just a fall through.  */
        if (!single_succ_p (bb1)
                || (e1->flags & EDGE_FALLTHRU) == 0)
            continue;

        /* Also make sure that bb1 only have one predecessor and that it
        is bb.  */
        if (!single_pred_p (bb1)
                || single_pred (bb1) != bb)
            continue;

        if (do_store_elim)
        {
            /* bb1 is the middle block, bb2 the join block, bb the split block,
               e1 the fallthrough edge from bb1 to bb2.  We can't do the
               optimization if the join block has more than two predecessors.  */
            if (EDGE_COUNT (bb2->preds) > 2)
                continue;
            if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
                cfgchanged = true;
        }
        else
        {
            gimple_seq phis = phi_nodes (bb2);

            /* Check to make sure that there is only one PHI node.
               TODO: we could do it with more than one iff the other PHI nodes
               have the same elements for these two edges.  */
            if (! gimple_seq_singleton_p (phis))
                continue;

            phi = gsi_stmt (gsi_start (phis));
            arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
            arg1 = gimple_phi_arg_def (phi, e2->dest_idx);

            /* Something is wrong if we cannot find the arguments in the PHI
               node.  */
            gcc_assert (arg0 != NULL && arg1 != NULL);

            /* Do the replacement of conditional if it can be done.  */
            if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
        }
    }

    free (bb_order);

    if (do_store_elim)
        pointer_set_destroy (nontrap);
    /* If the CFG has changed, we should cleanup the CFG.  */
    if (cfgchanged && do_store_elim)
    {
        /* In cond-store replacement we have added some loads on edges
           and new VOPS (as we moved the store, and created a load).  */
        gsi_commit_edge_inserts ();
        return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
    }
    else if (cfgchanged)
        return TODO_cleanup_cfg;
    return 0;
}
static unsigned int
build_cgraph_edges (void)
{
  basic_block bb;
  struct cgraph_node *node = cgraph_get_node (current_function_decl);
  struct pointer_set_t *visited_nodes = pointer_set_create ();
  gimple_stmt_iterator gsi;
  tree decl;
  unsigned ix;

  /* Create the callgraph edges and record the nodes referenced by the function.
     body.  */
  FOR_EACH_BB (bb)
    {
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt = gsi_stmt (gsi);
	  tree decl;

	  if (is_gimple_call (stmt))
	    {
	      int freq = compute_call_stmt_bb_frequency (current_function_decl,
							 bb);
	      decl = gimple_call_fndecl (stmt);
	      if (decl)
		cgraph_create_edge (node, cgraph_get_create_node (decl),
				    stmt, bb->count, freq);
	      else
		cgraph_create_indirect_edge (node, stmt,
					     gimple_call_flags (stmt),
					     bb->count, freq);
	    }
	  walk_stmt_load_store_addr_ops (stmt, node, mark_load,
					 mark_store, mark_address);
	  if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
	      && gimple_omp_parallel_child_fn (stmt))
	    {
	      tree fn = gimple_omp_parallel_child_fn (stmt);
	      ipa_record_reference (node, NULL, cgraph_get_create_node (fn),
				    NULL, IPA_REF_ADDR, stmt);
	    }
	  if (gimple_code (stmt) == GIMPLE_OMP_TASK)
	    {
	      tree fn = gimple_omp_task_child_fn (stmt);
	      if (fn)
		ipa_record_reference (node, NULL, cgraph_get_create_node (fn),
				      NULL, IPA_REF_ADDR, stmt);
	      fn = gimple_omp_task_copy_fn (stmt);
	      if (fn)
		ipa_record_reference (node, NULL, cgraph_get_create_node (fn),
				      NULL, IPA_REF_ADDR, stmt);
	    }
	}
      for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi))
	walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node,
				       mark_load, mark_store, mark_address);
   }

  /* Look for initializers of constant variables and private statics.  */
  FOR_EACH_LOCAL_DECL (cfun, ix, decl)
    if (TREE_CODE (decl) == VAR_DECL
	&& (TREE_STATIC (decl) && !DECL_EXTERNAL (decl)))
      varpool_finalize_decl (decl);
  record_eh_tables (node, cfun);

  pointer_set_destroy (visited_nodes);
  return 0;
}
示例#21
0
static unsigned int
tree_ssa_phiopt (void)
{
  basic_block bb;
  basic_block *bb_order;
  unsigned n, i;
  bool cfgchanged = false;

  /* Search every basic block for COND_EXPR we may be able to optimize.

     We walk the blocks in order that guarantees that a block with
     a single predecessor is processed before the predecessor.
     This ensures that we collapse inner ifs before visiting the
     outer ones, and also that we do not try to visit a removed
     block.  */
  bb_order = blocks_in_phiopt_order ();
  n = n_basic_blocks - NUM_FIXED_BLOCKS;

  for (i = 0; i < n; i++) 
    {
      tree cond_expr;
      tree phi;
      basic_block bb1, bb2;
      edge e1, e2;
      tree arg0, arg1;

      bb = bb_order[i];

      cond_expr = last_stmt (bb);
      /* Check to see if the last statement is a COND_EXPR.  */
      if (!cond_expr
          || TREE_CODE (cond_expr) != COND_EXPR)
        continue;

      e1 = EDGE_SUCC (bb, 0);
      bb1 = e1->dest;
      e2 = EDGE_SUCC (bb, 1);
      bb2 = e2->dest;

      /* We cannot do the optimization on abnormal edges.  */
      if ((e1->flags & EDGE_ABNORMAL) != 0
          || (e2->flags & EDGE_ABNORMAL) != 0)
       continue;

      /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
      if (EDGE_COUNT (bb1->succs) == 0
          || bb2 == NULL
          || EDGE_COUNT (bb2->succs) == 0)
        continue;

      /* Find the bb which is the fall through to the other.  */
      if (EDGE_SUCC (bb1, 0)->dest == bb2)
        ;
      else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
          basic_block bb_tmp = bb1;
          edge e_tmp = e1;
          bb1 = bb2;
          bb2 = bb_tmp;
          e1 = e2;
          e2 = e_tmp;
        }
      else
        continue;

      e1 = EDGE_SUCC (bb1, 0);

      /* Make sure that bb1 is just a fall through.  */
      if (!single_succ_p (bb1)
          || (e1->flags & EDGE_FALLTHRU) == 0)
        continue;

      /* Also make sure that bb1 only have one predecessor and that it
         is bb.  */
      if (!single_pred_p (bb1)
          || single_pred (bb1) != bb)
        continue;

      phi = phi_nodes (bb2);

      /* Check to make sure that there is only one PHI node.
         TODO: we could do it with more than one iff the other PHI nodes
         have the same elements for these two edges.  */
      if (!phi || PHI_CHAIN (phi) != NULL)
        continue;

      arg0 = PHI_ARG_DEF_TREE (phi, e1->dest_idx);
      arg1 = PHI_ARG_DEF_TREE (phi, e2->dest_idx);

      /* Something is wrong if we cannot find the arguments in the PHI
         node.  */
      gcc_assert (arg0 != NULL && arg1 != NULL);

      /* Do the replacement of conditional if it can be done.  */
      if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
      else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
      else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
      else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
    }

  free (bb_order);
  
  /* If the CFG has changed, we should cleanup the CFG. */
  return cfgchanged ? TODO_cleanup_cfg : 0;
}