示例#1
0
文件: cfgloop.c 项目: BelyyVedmak/gcc
int
flow_loops_find (struct loops *loops, int flags)
{
  int i;
  int b;
  int num_loops;
  edge e;
  sbitmap headers;
  int *dfs_order;
  int *rc_order;
  basic_block header;
  basic_block bb;

  /* This function cannot be repeatedly called with different
     flags to build up the loop information.  The loop tree
     must always be built if this function is called.  */
  if (! (flags & LOOP_TREE))
    abort ();

  memset (loops, 0, sizeof *loops);

  /* Taking care of this degenerate case makes the rest of
     this code simpler.  */
  if (n_basic_blocks == 0)
    return 0;

  dfs_order = NULL;
  rc_order = NULL;

  /* Join loops with shared headers.  */
  canonicalize_loop_headers ();

  /* Compute the dominators.  */
  calculate_dominance_info (CDI_DOMINATORS);

  /* Count the number of loop headers.  This should be the
     same as the number of natural loops.  */
  headers = sbitmap_alloc (last_basic_block);
  sbitmap_zero (headers);

  num_loops = 0;
  FOR_EACH_BB (header)
    {
      int more_latches = 0;

      header->loop_depth = 0;

      /* If we have an abnormal predecessor, do not consider the
	 loop (not worth the problems).  */
      for (e = header->pred; e; e = e->pred_next)
	if (e->flags & EDGE_ABNORMAL)
	  break;
      if (e)
	continue;

      for (e = header->pred; e; e = e->pred_next)
	{
	  basic_block latch = e->src;

	  if (e->flags & EDGE_ABNORMAL)
	    abort ();

	  /* Look for back edges where a predecessor is dominated
	     by this block.  A natural loop has a single entry
	     node (header) that dominates all the nodes in the
	     loop.  It also has single back edge to the header
	     from a latch node.  */
	  if (latch != ENTRY_BLOCK_PTR
	      && dominated_by_p (CDI_DOMINATORS, latch, header))
	    {
	      /* Shared headers should be eliminated by now.  */
	      if (more_latches)
		abort ();
	      more_latches = 1;
	      SET_BIT (headers, header->index);
	      num_loops++;
	    }
	}
    }

  /* Allocate loop structures.  */
  loops->parray = xcalloc (num_loops + 1, sizeof (struct loop *));

  /* Dummy loop containing whole function.  */
  loops->parray[0] = xcalloc (1, sizeof (struct loop));
  loops->parray[0]->next = NULL;
  loops->parray[0]->inner = NULL;
  loops->parray[0]->outer = NULL;
  loops->parray[0]->depth = 0;
  loops->parray[0]->pred = NULL;
  loops->parray[0]->num_nodes = n_basic_blocks + 2;
  loops->parray[0]->latch = EXIT_BLOCK_PTR;
  loops->parray[0]->header = ENTRY_BLOCK_PTR;
  ENTRY_BLOCK_PTR->loop_father = loops->parray[0];
  EXIT_BLOCK_PTR->loop_father = loops->parray[0];

  loops->tree_root = loops->parray[0];

  /* Find and record information about all the natural loops
     in the CFG.  */
  loops->num = 1;
  FOR_EACH_BB (bb)
    bb->loop_father = loops->tree_root;

  if (num_loops)
    {
      /* Compute depth first search order of the CFG so that outer
	 natural loops will be found before inner natural loops.  */
      dfs_order = xmalloc (n_basic_blocks * sizeof (int));
      rc_order = xmalloc (n_basic_blocks * sizeof (int));
      flow_depth_first_order_compute (dfs_order, rc_order);

      /* Save CFG derived information to avoid recomputing it.  */
      loops->cfg.dfs_order = dfs_order;
      loops->cfg.rc_order = rc_order;

      num_loops = 1;

      for (b = 0; b < n_basic_blocks; b++)
	{
	  struct loop *loop;

	  /* Search the nodes of the CFG in reverse completion order
	     so that we can find outer loops first.  */
	  if (!TEST_BIT (headers, rc_order[b]))
	    continue;

	  header = BASIC_BLOCK (rc_order[b]);

	  loop = loops->parray[num_loops] = xcalloc (1, sizeof (struct loop));

	  loop->header = header;
	  loop->num = num_loops;
	  num_loops++;

	  /* Look for the latch for this header block.  */
	  for (e = header->pred; e; e = e->pred_next)
	    {
	      basic_block latch = e->src;

	      if (latch != ENTRY_BLOCK_PTR
		  && dominated_by_p (CDI_DOMINATORS, latch, header))
		{
		  loop->latch = latch;
		  break;
		}
	    }

	  flow_loop_tree_node_add (header->loop_father, loop);
	  loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
	}

      /* Assign the loop nesting depth and enclosed loop level for each
	 loop.  */
      loops->levels = flow_loops_level_compute (loops);

      /* Scan the loops.  */
      for (i = 1; i < num_loops; i++)
	flow_loop_scan (loops->parray[i], flags);

      loops->num = num_loops;
    }
  else
    {
      free_dominance_info (CDI_DOMINATORS);
    }

  sbitmap_free (headers);

  loops->state = 0;
#ifdef ENABLE_CHECKING
  verify_flow_info ();
  verify_loop_structure (loops);
#endif

  return loops->num;
}
示例#2
0
文件: cfgloop.c 项目: BelyyVedmak/gcc
/* Takes care of merging natural loops with shared headers.  */
static void
canonicalize_loop_headers (void)
{
  basic_block header;
  edge e;

  /* Compute the dominators.  */
  calculate_dominance_info (CDI_DOMINATORS);

  alloc_aux_for_blocks (sizeof (int));
  alloc_aux_for_edges (sizeof (int));

  /* Split blocks so that each loop has only single latch.  */
  FOR_EACH_BB (header)
    {
      int num_latches = 0;
      int have_abnormal_edge = 0;

      for (e = header->pred; e; e = e->pred_next)
	{
	  basic_block latch = e->src;

	  if (e->flags & EDGE_ABNORMAL)
	    have_abnormal_edge = 1;

	  if (latch != ENTRY_BLOCK_PTR
	      && dominated_by_p (CDI_DOMINATORS, latch, header))
	    {
	      num_latches++;
	      LATCH_EDGE (e) = 1;
	    }
	}
      if (have_abnormal_edge)
	HEADER_BLOCK (header) = 0;
      else
	HEADER_BLOCK (header) = num_latches;
    }

  free_dominance_info (CDI_DOMINATORS);

  if (HEADER_BLOCK (ENTRY_BLOCK_PTR->succ->dest))
    {
      basic_block bb;

      /* We could not redirect edges freely here. On the other hand,
	 we can simply split the edge from entry block.  */
      bb = split_edge (ENTRY_BLOCK_PTR->succ);

      alloc_aux_for_edge (bb->succ, sizeof (int));
      LATCH_EDGE (bb->succ) = 0;
      alloc_aux_for_block (bb, sizeof (int));
      HEADER_BLOCK (bb) = 0;
    }

  FOR_EACH_BB (header)
    {
      int num_latch;
      int want_join_latch;
      int max_freq, is_heavy;
      edge heavy;

      if (!HEADER_BLOCK (header))
	continue;

      num_latch = HEADER_BLOCK (header);

      want_join_latch = (num_latch > 1);

      if (!want_join_latch)
	continue;

      /* Find a heavy edge.  */
      is_heavy = 1;
      heavy = NULL;
      max_freq = 0;
      for (e = header->pred; e; e = e->pred_next)
	if (LATCH_EDGE (e) &&
	    EDGE_FREQUENCY (e) > max_freq)
	  max_freq = EDGE_FREQUENCY (e);
      for (e = header->pred; e; e = e->pred_next)
	if (LATCH_EDGE (e) &&
	    EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO)
	  {
	    if (heavy)
	      {
		is_heavy = 0;
		break;
	      }
	    else
	      heavy = e;
	  }

      if (is_heavy)
	{
	  basic_block new_header =
	    make_forwarder_block (header, true, true, heavy, 0);
	  if (num_latch > 2)
	    make_forwarder_block (new_header, true, false, NULL, 1);
	}
      else
	make_forwarder_block (header, true, false, NULL, 1);
    }

  free_aux_for_blocks ();
  free_aux_for_edges ();
}
static bool
tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
			 int upper_bound)
{
  basic_block *body = get_loop_body (loop);
  gimple_stmt_iterator gsi;
  unsigned int i;
  bool after_exit;
  vec<basic_block> path = get_loop_hot_path (loop);

  size->overall = 0;
  size->eliminated_by_peeling = 0;
  size->last_iteration = 0;
  size->last_iteration_eliminated_by_peeling = 0;
  size->num_pure_calls_on_hot_path = 0;
  size->num_non_pure_calls_on_hot_path = 0;
  size->non_call_stmts_on_hot_path = 0;
  size->num_branches_on_hot_path = 0;
  size->constant_iv = 0;

  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
  for (i = 0; i < loop->num_nodes; i++)
    {
      if (edge_to_cancel && body[i] != edge_to_cancel->src
	  && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
	after_exit = true;
      else
	after_exit = false;
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);

      for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple *stmt = gsi_stmt (gsi);
	  int num = estimate_num_insns (stmt, &eni_size_weights);
	  bool likely_eliminated = false;
	  bool likely_eliminated_last = false;
	  bool likely_eliminated_peeled = false;

	  if (dump_file && (dump_flags & TDF_DETAILS))
	    {
	      fprintf (dump_file, "  size: %3i ", num);
	      print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
	    }

	  /* Look for reasons why we might optimize this stmt away. */

	  if (gimple_has_side_effects (stmt))
	    ;
	  /* Exit conditional.  */
	  else if (exit && body[i] == exit->src
		   && stmt == last_stmt (exit->src))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Exit condition will be eliminated "
			 "in peeled copies.\n");
	      likely_eliminated_peeled = true;
	    }
	  else if (edge_to_cancel && body[i] == edge_to_cancel->src
		   && stmt == last_stmt (edge_to_cancel->src))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Exit condition will be eliminated "
			 "in last copy.\n");
	      likely_eliminated_last = true;
	    }
	  /* Sets of IV variables  */
	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
	      && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Induction variable computation will"
			 " be folded away.\n");
	      likely_eliminated = true;
	    }
	  /* Assignments of IV variables.  */
	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
		   && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
		   && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
		   && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
		       || constant_after_peeling (gimple_assign_rhs2 (stmt),
		       				  stmt, loop)))
	    {
	      size->constant_iv = true;
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Constant expression will be folded away.\n");
	      likely_eliminated = true;
	    }
	  /* Conditionals.  */
	  else if ((gimple_code (stmt) == GIMPLE_COND
		    && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
		    && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)
		    /* We don't simplify all constant compares so make sure
		       they are not both constant already.  See PR70288.  */
		    && (! is_gimple_min_invariant (gimple_cond_lhs (stmt))
			|| ! is_gimple_min_invariant (gimple_cond_rhs (stmt))))
		   || (gimple_code (stmt) == GIMPLE_SWITCH
		       && constant_after_peeling (gimple_switch_index (
						    as_a <gswitch *> (stmt)),
						  stmt, loop)
		       && ! is_gimple_min_invariant (gimple_switch_index (
						       as_a <gswitch *> (stmt)))))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Constant conditional.\n");
	      likely_eliminated = true;
	    }

	  size->overall += num;
	  if (likely_eliminated || likely_eliminated_peeled)
	    size->eliminated_by_peeling += num;
	  if (!after_exit)
	    {
	      size->last_iteration += num;
	      if (likely_eliminated || likely_eliminated_last)
		size->last_iteration_eliminated_by_peeling += num;
	    }
	  if ((size->overall * 3 / 2 - size->eliminated_by_peeling
	      - size->last_iteration_eliminated_by_peeling) > upper_bound)
	    {
              free (body);
	      path.release ();
	      return true;
	    }
	}
    }
  while (path.length ())
    {
      basic_block bb = path.pop ();
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple *stmt = gsi_stmt (gsi);
	  if (gimple_code (stmt) == GIMPLE_CALL)
	    {
	      int flags = gimple_call_flags (stmt);
	      tree decl = gimple_call_fndecl (stmt);

	      if (decl && DECL_IS_BUILTIN (decl)
		  && is_inexpensive_builtin (decl))
		;
	      else if (flags & (ECF_PURE | ECF_CONST))
		size->num_pure_calls_on_hot_path++;
	      else
		size->num_non_pure_calls_on_hot_path++;
	      size->num_branches_on_hot_path ++;
	    }
	  else if (gimple_code (stmt) != GIMPLE_CALL
		   && gimple_code (stmt) != GIMPLE_DEBUG)
	    size->non_call_stmts_on_hot_path++;
	  if (((gimple_code (stmt) == GIMPLE_COND
	        && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
		    || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
	       || (gimple_code (stmt) == GIMPLE_SWITCH
		   && !constant_after_peeling (gimple_switch_index (
						 as_a <gswitch *> (stmt)),
					       stmt, loop)))
	      && (!exit || bb != exit->src))
	    size->num_branches_on_hot_path++;
	}
    }
  path.release ();
  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
    	     size->eliminated_by_peeling, size->last_iteration,
	     size->last_iteration_eliminated_by_peeling);

  free (body);
  return false;
}
示例#4
0
static bool
dse_possible_dead_store_p (ao_ref *ref, gimple *stmt, gimple **use_stmt)
{
  gimple *temp;
  unsigned cnt = 0;

  *use_stmt = NULL;

  /* Find the first dominated statement that clobbers (part of) the
     memory stmt stores to with no intermediate statement that may use
     part of the memory stmt stores.  That is, find a store that may
     prove stmt to be a dead store.  */
  temp = stmt;
  do
    {
      gimple *use_stmt, *defvar_def;
      imm_use_iterator ui;
      bool fail = false;
      tree defvar;

      /* Limit stmt walking to be linear in the number of possibly
         dead stores.  */
      if (++cnt > 256)
	return false;

      if (gimple_code (temp) == GIMPLE_PHI)
	defvar = PHI_RESULT (temp);
      else
	defvar = gimple_vdef (temp);
      defvar_def = temp;
      temp = NULL;
      FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
	{
	  cnt++;

	  /* If we ever reach our DSE candidate stmt again fail.  We
	     cannot handle dead stores in loops.  */
	  if (use_stmt == stmt)
	    {
	      fail = true;
	      BREAK_FROM_IMM_USE_STMT (ui);
	    }
	  /* In simple cases we can look through PHI nodes, but we
	     have to be careful with loops and with memory references
	     containing operands that are also operands of PHI nodes.
	     See gcc.c-torture/execute/20051110-*.c.  */
	  else if (gimple_code (use_stmt) == GIMPLE_PHI)
	    {
	      if (temp
		  /* Make sure we are not in a loop latch block.  */
		  || gimple_bb (stmt) == gimple_bb (use_stmt)
		  || dominated_by_p (CDI_DOMINATORS,
				     gimple_bb (stmt), gimple_bb (use_stmt))
		  /* We can look through PHIs to regions post-dominating
		     the DSE candidate stmt.  */
		  || !dominated_by_p (CDI_POST_DOMINATORS,
				      gimple_bb (stmt), gimple_bb (use_stmt)))
		{
		  fail = true;
		  BREAK_FROM_IMM_USE_STMT (ui);
		}
	      /* Do not consider the PHI as use if it dominates the 
	         stmt defining the virtual operand we are processing,
		 we have processed it already in this case.  */
	      if (gimple_bb (defvar_def) != gimple_bb (use_stmt)
		  && !dominated_by_p (CDI_DOMINATORS,
				      gimple_bb (defvar_def),
				      gimple_bb (use_stmt)))
		temp = use_stmt;
	    }
	  /* If the statement is a use the store is not dead.  */
	  else if (ref_maybe_used_by_stmt_p (use_stmt, ref))
	    {
	      fail = true;
	      BREAK_FROM_IMM_USE_STMT (ui);
	    }
	  /* If this is a store, remember it or bail out if we have
	     multiple ones (the will be in different CFG parts then).  */
	  else if (gimple_vdef (use_stmt))
	    {
	      if (temp)
		{
		  fail = true;
		  BREAK_FROM_IMM_USE_STMT (ui);
		}
	      temp = use_stmt;
	    }
	}

      if (fail)
	return false;

      /* If we didn't find any definition this means the store is dead
         if it isn't a store to global reachable memory.  In this case
	 just pretend the stmt makes itself dead.  Otherwise fail.  */
      if (!temp)
	{
	  if (ref_may_alias_global_p (ref))
	    return false;

	  temp = stmt;
	  break;
	}
    }
示例#5
0
static bool
verify_use (basic_block bb, basic_block def_bb, use_operand_p use_p,
	    tree stmt, bool check_abnormal, bool is_virtual,
	    bitmap names_defined_in_bb)
{
  bool err = false;
  tree ssa_name = USE_FROM_PTR (use_p);

  err = verify_ssa_name (ssa_name, is_virtual);

  if (!TREE_VISITED (ssa_name))
    if (verify_imm_links (stderr, ssa_name))
      err = true;

  TREE_VISITED (ssa_name) = 1;

  if (IS_EMPTY_STMT (SSA_NAME_DEF_STMT (ssa_name))
      && default_def (SSA_NAME_VAR (ssa_name)) == ssa_name)
    ; /* Default definitions have empty statements.  Nothing to do.  */
  else if (!def_bb)
    {
      error ("missing definition");
      err = true;
    }
  else if (bb != def_bb
	   && !dominated_by_p (CDI_DOMINATORS, bb, def_bb))
    {
      error ("definition in block %i does not dominate use in block %i",
	     def_bb->index, bb->index);
      err = true;
    }
  else if (bb == def_bb
	   && names_defined_in_bb != NULL
	   && !bitmap_bit_p (names_defined_in_bb, SSA_NAME_VERSION (ssa_name)))
    {
      error ("definition in block %i follows the use", def_bb->index);
      err = true;
    }

  if (check_abnormal
      && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ssa_name))
    {
      error ("SSA_NAME_OCCURS_IN_ABNORMAL_PHI should be set");
      err = true;
    }

  /* Make sure the use is in an appropriate list by checking the previous 
     element to make sure it's the same.  */
  if (use_p->prev == NULL)
    {
      error ("no immediate_use list");
      err = true;
    }
  else
    {
      tree listvar ;
      if (use_p->prev->use == NULL)
	listvar = use_p->prev->stmt;
      else
	listvar = USE_FROM_PTR (use_p->prev);
      if (listvar != ssa_name)
        {
	  error ("wrong immediate use list");
	  err = true;
	}
    }

  if (err)
    {
      fprintf (stderr, "for SSA_NAME: ");
      print_generic_expr (stderr, ssa_name, TDF_VOPS);
      fprintf (stderr, " in statement:\n");
      print_generic_stmt (stderr, stmt, TDF_VOPS);
    }

  return err;
}