unsigned int
execute_free_datastructures (void)
{
  free_dominance_info (CDI_DOMINATORS);
  free_dominance_info (CDI_POST_DOMINATORS);

  /* And get rid of annotations we no longer need.  */
  delete_tree_cfg_annotations ();

  return 0;
}
Exemplo n.º 2
0
static void
tree_profiling (void)
{
    branch_prob ();
    if (flag_branch_probabilities
            && flag_profile_values
            && flag_value_profile_transformations)
        value_profile_transformations ();
    /* The above could hose dominator info.  Currently there is
       none coming in, this is a safety valve.  It should be
       easy to adjust it, if and when there is some.  */
    free_dominance_info (CDI_DOMINATORS);
    free_dominance_info (CDI_POST_DOMINATORS);
}
Exemplo n.º 3
0
static unsigned int
tree_call_cdce (void)
{
    basic_block bb;
    gimple_stmt_iterator i;
    bool something_changed = false;
    vec<gimple> cond_dead_built_in_calls = vNULL;
    FOR_EACH_BB (bb)
    {
        /* Collect dead call candidates.  */
        for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
        {
            gimple stmt = gsi_stmt (i);
            if (is_gimple_call (stmt)
                    && is_call_dce_candidate (stmt))
            {
                if (dump_file && (dump_flags & TDF_DETAILS))
                {
                    fprintf (dump_file, "Found conditional dead call: ");
                    print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
                    fprintf (dump_file, "\n");
                }
                if (!cond_dead_built_in_calls.exists ())
                    cond_dead_built_in_calls.create (64);
                cond_dead_built_in_calls.safe_push (stmt);
            }
        }
    }

    if (!cond_dead_built_in_calls.exists ())
        return 0;

    something_changed
        = shrink_wrap_conditional_dead_built_in_calls (cond_dead_built_in_calls);

    cond_dead_built_in_calls.release ();

    if (something_changed)
    {
        free_dominance_info (CDI_DOMINATORS);
        free_dominance_info (CDI_POST_DOMINATORS);
        /* As we introduced new control-flow we need to insert PHI-nodes
           for the call-clobbers of the remaining call.  */
        mark_virtual_operands_for_renaming (cfun);
        return TODO_update_ssa;
    }

    return 0;
}
Exemplo n.º 4
0
struct loops *
loop_optimizer_init (FILE *dumpfile)
{
    struct loops *loops = xcalloc (1, sizeof (struct loops));
    edge e;

    /* Initialize structures for layout changes.  */
    cfg_layout_initialize (0);

    /* Avoid annoying special cases of edges going to exit
       block.  */
    for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
        if ((e->flags & EDGE_FALLTHRU) && e->src->succ->succ_next)
            split_edge (e);

    /* Find the loops.  */

    if (flow_loops_find (loops, LOOP_TREE) <= 1)
    {
        basic_block bb;

        /* No loops.  */
        flow_loops_free (loops);
        free_dominance_info (CDI_DOMINATORS);
        free (loops);

        /* Make chain.  */
        FOR_EACH_BB (bb)
        if (bb->next_bb != EXIT_BLOCK_PTR)
            bb->rbi->next = bb->next_bb;
        cfg_layout_finalize ();
        return NULL;
    }
Exemplo n.º 5
0
/* Finalization of the RTL loop passes.  */
static unsigned int
rtl_loop_done (void)
{
  basic_block bb;

  if (current_loops)
    loop_optimizer_finalize (current_loops);

  free_dominance_info (CDI_DOMINATORS);

  /* Finalize layout changes.  */
  FOR_EACH_BB (bb)
    if (bb->next_bb != EXIT_BLOCK_PTR)
      bb->aux = bb->next_bb;
  cfg_layout_finalize ();

  cleanup_cfg (CLEANUP_EXPENSIVE);
  delete_trivially_dead_insns (get_insns (), max_reg_num ());
  reg_scan (get_insns (), max_reg_num ());
  if (dump_file)
    dump_flow_info (dump_file, dump_flags);

  current_loops = NULL;
  return 0;
}
Exemplo n.º 6
0
Arquivo: tracer.c Projeto: Lao16/gcc
static unsigned int
tracer (void)
{
  bool changed;

  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
    return 0;

  mark_dfs_back_edges ();
  if (dump_file)
    brief_dump_cfg (dump_file, dump_flags);

  /* Trace formation is done on the fly inside tail_duplicate */
  changed = tail_duplicate ();
  if (changed)
    {
      free_dominance_info (CDI_DOMINATORS);
      calculate_dominance_info (CDI_DOMINATORS);
      if (current_loops)
	fix_loop_structure (NULL);
    }

  if (dump_file)
    brief_dump_cfg (dump_file, dump_flags);

  return changed ? TODO_cleanup_cfg : 0;
}
Exemplo n.º 7
0
static unsigned int
tracer (void)
{
  bool changed;

  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
    return 0;

  mark_dfs_back_edges ();
  if (dump_file)
    brief_dump_cfg (dump_file, dump_flags);

  /* Trace formation is done on the fly inside tail_duplicate */
  changed = tail_duplicate ();
  if (changed)
    {
      free_dominance_info (CDI_DOMINATORS);
      /* If we changed the CFG schedule loops for fixup by cleanup_cfg.  */
      if (current_loops)
	loops_state_set (LOOPS_NEED_FIXUP);
    }

  if (dump_file)
    brief_dump_cfg (dump_file, dump_flags);

  return changed ? TODO_cleanup_cfg : 0;
}
void
tree_lowering_passes (tree fn)
{
  tree saved_current_function_decl = current_function_decl;

  current_function_decl = fn;
  push_cfun (DECL_STRUCT_FUNCTION (fn));
  gimple_register_cfg_hooks ();
  bitmap_obstack_initialize (NULL);
  execute_pass_list (all_lowering_passes);
  if (optimize && cgraph_global_info_ready)
    execute_pass_list (pass_early_local_passes.pass.sub);
  free_dominance_info (CDI_POST_DOMINATORS);
  free_dominance_info (CDI_DOMINATORS);
  compact_blocks ();
  current_function_decl = saved_current_function_decl;
  bitmap_obstack_release (NULL);
  pop_cfun ();
}
Exemplo n.º 9
0
static unsigned int
execute_split_paths ()
{
    /* If we don't have at least 2 real blocks and backedges in the
       CFG, then there's no point in trying to perform path splitting.  */
    if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
            || !mark_dfs_back_edges ())
        return 0;

    bool changed = split_paths();
    if (changed)
        free_dominance_info (CDI_DOMINATORS);

    return changed ? TODO_cleanup_cfg : 0;
}
static unsigned int
tracer (void)
{
  gcc_assert (current_ir_type () == IR_GIMPLE);

  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
    return 0;

  mark_dfs_back_edges ();
  if (dump_file)
    dump_flow_info (dump_file, dump_flags);

  /* Trace formation is done on the fly inside tail_duplicate */
  tail_duplicate ();

  /* FIXME: We really only need to do this when we know tail duplication
            has altered the CFG. */
  free_dominance_info (CDI_DOMINATORS);
  if (dump_file)
    dump_flow_info (dump_file, dump_flags);

  return 0;
}
Exemplo n.º 11
0
static unsigned int
tree_profiling (void)
{
  struct cgraph_node *node;

  /* This is a small-ipa pass that gets called only once, from
     cgraphunit.c:ipa_passes().  */
  gcc_assert (symtab->state == IPA_SSA);

  init_node_map (true);

  FOR_EACH_DEFINED_FUNCTION (node)
    {
      if (!gimple_has_body_p (node->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION)
	continue;

      /* Do not instrument extern inline functions when testing coverage.
	 While this is not perfectly consistent (early inlined extern inlines
	 will get acocunted), testsuite expects that.  */
      if (DECL_EXTERNAL (node->decl)
	  && flag_test_coverage)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->decl));

      /* Local pure-const may imply need to fixup the cfg.  */
      if (execute_fixup_cfg () & TODO_cleanup_cfg)
	cleanup_tree_cfg ();

      branch_prob ();

      if (! flag_branch_probabilities
	  && flag_profile_values)
	gimple_gen_ic_func_profiler ();

      if (flag_branch_probabilities
	  && flag_profile_values
	  && flag_value_profile_transformations)
	gimple_value_profile_transformations ();

      /* The above could hose dominator info.  Currently there is
	 none coming in, this is a safety valve.  It should be
	 easy to adjust it, if and when there is some.  */
      free_dominance_info (CDI_DOMINATORS);
      free_dominance_info (CDI_POST_DOMINATORS);
      pop_cfun ();
    }

  /* Drop pure/const flags from instrumented functions.  */
  FOR_EACH_DEFINED_FUNCTION (node)
    {
      if (!gimple_has_body_p (node->decl)
	  || !(!node->clone_of
	  || node->decl != node->clone_of->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION)
	continue;

      node->set_const_flag (false, false);
      node->set_pure_flag (false, false);
    }

  /* Update call statements and rebuild the cgraph.  */
  FOR_EACH_DEFINED_FUNCTION (node)
    {
      basic_block bb;

      if (!gimple_has_body_p (node->decl)
	  || !(!node->clone_of
	  || node->decl != node->clone_of->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->decl));

      FOR_EACH_BB_FN (bb, cfun)
	{
	  gimple_stmt_iterator gsi;
	  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_call (stmt))
		update_stmt (stmt);
	    }
	}

      /* re-merge split blocks.  */
      cleanup_tree_cfg ();
      update_ssa (TODO_update_ssa);

      cgraph_edge::rebuild_edges ();

      pop_cfun ();
    }
Exemplo n.º 12
0
bool
cgraph_process_new_functions (void)
{
  bool output = false;
  tree fndecl;
  struct cgraph_node *node;

  /*  Note that this queue may grow as its being processed, as the new
      functions may generate new ones.  */
  while (cgraph_new_nodes)
    {
      node = cgraph_new_nodes;
      fndecl = node->decl;
      cgraph_new_nodes = cgraph_new_nodes->next_needed;
      switch (cgraph_state)
	{
	case CGRAPH_STATE_CONSTRUCTION:
	  /* At construction time we just need to finalize function and move
	     it into reachable functions list.  */

	  node->next_needed = NULL;
	  cgraph_finalize_function (fndecl, false);
	  cgraph_mark_reachable_node (node);
	  output = true;
	  break;

	case CGRAPH_STATE_IPA:
	case CGRAPH_STATE_IPA_SSA:
	  /* When IPA optimization already started, do all essential
	     transformations that has been already performed on the whole
	     cgraph but not on this function.  */

	  gimple_register_cfg_hooks ();
	  if (!node->analyzed)
	    cgraph_analyze_function (node);
	  push_cfun (DECL_STRUCT_FUNCTION (fndecl));
	  current_function_decl = fndecl;
	  compute_inline_parameters (node);
	  if ((cgraph_state == CGRAPH_STATE_IPA_SSA
	      && !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (fndecl)))
	      /* When not optimizing, be sure we run early local passes anyway
		 to expand OMP.  */
	      || !optimize)
	    execute_pass_list (pass_early_local_passes.pass.sub);
	  free_dominance_info (CDI_POST_DOMINATORS);
	  free_dominance_info (CDI_DOMINATORS);
	  pop_cfun ();
	  current_function_decl = NULL;
	  break;

	case CGRAPH_STATE_EXPANSION:
	  /* Functions created during expansion shall be compiled
	     directly.  */
	  node->output = 0;
	  cgraph_expand_function (node);
	  break;

	default:
	  gcc_unreachable ();
	  break;
	}
      cgraph_call_function_insertion_hooks (node);
    }
  return output;
}
Exemplo n.º 13
0
int
flow_loops_find (struct loops *loops, int flags)
{
  int i;
  int b;
  int num_loops;
  edge e;
  sbitmap headers;
  int *dfs_order;
  int *rc_order;
  basic_block header;
  basic_block bb;

  /* This function cannot be repeatedly called with different
     flags to build up the loop information.  The loop tree
     must always be built if this function is called.  */
  if (! (flags & LOOP_TREE))
    abort ();

  memset (loops, 0, sizeof *loops);

  /* Taking care of this degenerate case makes the rest of
     this code simpler.  */
  if (n_basic_blocks == 0)
    return 0;

  dfs_order = NULL;
  rc_order = NULL;

  /* Join loops with shared headers.  */
  canonicalize_loop_headers ();

  /* Compute the dominators.  */
  calculate_dominance_info (CDI_DOMINATORS);

  /* Count the number of loop headers.  This should be the
     same as the number of natural loops.  */
  headers = sbitmap_alloc (last_basic_block);
  sbitmap_zero (headers);

  num_loops = 0;
  FOR_EACH_BB (header)
    {
      int more_latches = 0;

      header->loop_depth = 0;

      /* If we have an abnormal predecessor, do not consider the
	 loop (not worth the problems).  */
      for (e = header->pred; e; e = e->pred_next)
	if (e->flags & EDGE_ABNORMAL)
	  break;
      if (e)
	continue;

      for (e = header->pred; e; e = e->pred_next)
	{
	  basic_block latch = e->src;

	  if (e->flags & EDGE_ABNORMAL)
	    abort ();

	  /* Look for back edges where a predecessor is dominated
	     by this block.  A natural loop has a single entry
	     node (header) that dominates all the nodes in the
	     loop.  It also has single back edge to the header
	     from a latch node.  */
	  if (latch != ENTRY_BLOCK_PTR
	      && dominated_by_p (CDI_DOMINATORS, latch, header))
	    {
	      /* Shared headers should be eliminated by now.  */
	      if (more_latches)
		abort ();
	      more_latches = 1;
	      SET_BIT (headers, header->index);
	      num_loops++;
	    }
	}
    }

  /* Allocate loop structures.  */
  loops->parray = xcalloc (num_loops + 1, sizeof (struct loop *));

  /* Dummy loop containing whole function.  */
  loops->parray[0] = xcalloc (1, sizeof (struct loop));
  loops->parray[0]->next = NULL;
  loops->parray[0]->inner = NULL;
  loops->parray[0]->outer = NULL;
  loops->parray[0]->depth = 0;
  loops->parray[0]->pred = NULL;
  loops->parray[0]->num_nodes = n_basic_blocks + 2;
  loops->parray[0]->latch = EXIT_BLOCK_PTR;
  loops->parray[0]->header = ENTRY_BLOCK_PTR;
  ENTRY_BLOCK_PTR->loop_father = loops->parray[0];
  EXIT_BLOCK_PTR->loop_father = loops->parray[0];

  loops->tree_root = loops->parray[0];

  /* Find and record information about all the natural loops
     in the CFG.  */
  loops->num = 1;
  FOR_EACH_BB (bb)
    bb->loop_father = loops->tree_root;

  if (num_loops)
    {
      /* Compute depth first search order of the CFG so that outer
	 natural loops will be found before inner natural loops.  */
      dfs_order = xmalloc (n_basic_blocks * sizeof (int));
      rc_order = xmalloc (n_basic_blocks * sizeof (int));
      flow_depth_first_order_compute (dfs_order, rc_order);

      /* Save CFG derived information to avoid recomputing it.  */
      loops->cfg.dfs_order = dfs_order;
      loops->cfg.rc_order = rc_order;

      num_loops = 1;

      for (b = 0; b < n_basic_blocks; b++)
	{
	  struct loop *loop;

	  /* Search the nodes of the CFG in reverse completion order
	     so that we can find outer loops first.  */
	  if (!TEST_BIT (headers, rc_order[b]))
	    continue;

	  header = BASIC_BLOCK (rc_order[b]);

	  loop = loops->parray[num_loops] = xcalloc (1, sizeof (struct loop));

	  loop->header = header;
	  loop->num = num_loops;
	  num_loops++;

	  /* Look for the latch for this header block.  */
	  for (e = header->pred; e; e = e->pred_next)
	    {
	      basic_block latch = e->src;

	      if (latch != ENTRY_BLOCK_PTR
		  && dominated_by_p (CDI_DOMINATORS, latch, header))
		{
		  loop->latch = latch;
		  break;
		}
	    }

	  flow_loop_tree_node_add (header->loop_father, loop);
	  loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
	}

      /* Assign the loop nesting depth and enclosed loop level for each
	 loop.  */
      loops->levels = flow_loops_level_compute (loops);

      /* Scan the loops.  */
      for (i = 1; i < num_loops; i++)
	flow_loop_scan (loops->parray[i], flags);

      loops->num = num_loops;
    }
  else
    {
      free_dominance_info (CDI_DOMINATORS);
    }

  sbitmap_free (headers);

  loops->state = 0;
#ifdef ENABLE_CHECKING
  verify_flow_info ();
  verify_loop_structure (loops);
#endif

  return loops->num;
}
Exemplo n.º 14
0
/* Takes care of merging natural loops with shared headers.  */
static void
canonicalize_loop_headers (void)
{
  basic_block header;
  edge e;

  /* Compute the dominators.  */
  calculate_dominance_info (CDI_DOMINATORS);

  alloc_aux_for_blocks (sizeof (int));
  alloc_aux_for_edges (sizeof (int));

  /* Split blocks so that each loop has only single latch.  */
  FOR_EACH_BB (header)
    {
      int num_latches = 0;
      int have_abnormal_edge = 0;

      for (e = header->pred; e; e = e->pred_next)
	{
	  basic_block latch = e->src;

	  if (e->flags & EDGE_ABNORMAL)
	    have_abnormal_edge = 1;

	  if (latch != ENTRY_BLOCK_PTR
	      && dominated_by_p (CDI_DOMINATORS, latch, header))
	    {
	      num_latches++;
	      LATCH_EDGE (e) = 1;
	    }
	}
      if (have_abnormal_edge)
	HEADER_BLOCK (header) = 0;
      else
	HEADER_BLOCK (header) = num_latches;
    }

  free_dominance_info (CDI_DOMINATORS);

  if (HEADER_BLOCK (ENTRY_BLOCK_PTR->succ->dest))
    {
      basic_block bb;

      /* We could not redirect edges freely here. On the other hand,
	 we can simply split the edge from entry block.  */
      bb = split_edge (ENTRY_BLOCK_PTR->succ);

      alloc_aux_for_edge (bb->succ, sizeof (int));
      LATCH_EDGE (bb->succ) = 0;
      alloc_aux_for_block (bb, sizeof (int));
      HEADER_BLOCK (bb) = 0;
    }

  FOR_EACH_BB (header)
    {
      int num_latch;
      int want_join_latch;
      int max_freq, is_heavy;
      edge heavy;

      if (!HEADER_BLOCK (header))
	continue;

      num_latch = HEADER_BLOCK (header);

      want_join_latch = (num_latch > 1);

      if (!want_join_latch)
	continue;

      /* Find a heavy edge.  */
      is_heavy = 1;
      heavy = NULL;
      max_freq = 0;
      for (e = header->pred; e; e = e->pred_next)
	if (LATCH_EDGE (e) &&
	    EDGE_FREQUENCY (e) > max_freq)
	  max_freq = EDGE_FREQUENCY (e);
      for (e = header->pred; e; e = e->pred_next)
	if (LATCH_EDGE (e) &&
	    EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO)
	  {
	    if (heavy)
	      {
		is_heavy = 0;
		break;
	      }
	    else
	      heavy = e;
	  }

      if (is_heavy)
	{
	  basic_block new_header =
	    make_forwarder_block (header, true, true, heavy, 0);
	  if (num_latch > 2)
	    make_forwarder_block (new_header, true, false, NULL, 1);
	}
      else
	make_forwarder_block (header, true, false, NULL, 1);
    }

  free_aux_for_blocks ();
  free_aux_for_edges ();
}
Exemplo n.º 15
0
static unsigned int
tree_profiling (void)
{
  struct cgraph_node *node;

  /* This is a small-ipa pass that gets called only once, from
     cgraphunit.c:ipa_passes().  */
  gcc_assert (cgraph_state == CGRAPH_STATE_IPA_SSA);

  init_node_map();

  FOR_EACH_DEFINED_FUNCTION (node)
    {
      if (!gimple_has_body_p (node->symbol.decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));

      /* Local pure-const may imply need to fixup the cfg.  */
      if (execute_fixup_cfg () & TODO_cleanup_cfg)
	cleanup_tree_cfg ();

      branch_prob ();

      if (! flag_branch_probabilities
	  && flag_profile_values)
	gimple_gen_ic_func_profiler ();

      if (flag_branch_probabilities
	  && flag_profile_values
	  && flag_value_profile_transformations)
	gimple_value_profile_transformations ();

      /* The above could hose dominator info.  Currently there is
	 none coming in, this is a safety valve.  It should be
	 easy to adjust it, if and when there is some.  */
      free_dominance_info (CDI_DOMINATORS);
      free_dominance_info (CDI_POST_DOMINATORS);
      pop_cfun ();
    }

  /* Drop pure/const flags from instrumented functions.  */
  FOR_EACH_DEFINED_FUNCTION (node)
    {
      if (!gimple_has_body_p (node->symbol.decl)
	  || !(!node->clone_of
	  || node->symbol.decl != node->clone_of->symbol.decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION)
	continue;

      cgraph_set_const_flag (node, false, false);
      cgraph_set_pure_flag (node, false, false);
    }

  /* Update call statements and rebuild the cgraph.  */
  FOR_EACH_DEFINED_FUNCTION (node)
    {
      basic_block bb;

      if (!gimple_has_body_p (node->symbol.decl)
	  || !(!node->clone_of
	  || node->symbol.decl != node->clone_of->symbol.decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));

      FOR_EACH_BB (bb)
	{
	  gimple_stmt_iterator gsi;
	  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_call (stmt))
		update_stmt (stmt);
	    }
	}

      update_ssa (TODO_update_ssa);

      rebuild_cgraph_edges ();

      pop_cfun ();
    }

  del_node_map();
  return 0;
}
static unsigned int
tree_profiling (void)
{
  struct cgraph_node *node;

  /* Don't profile functions produced at destruction time, particularly
     the gcov datastructure initializer.  Don't profile if it has been
     already instrumented either (when OpenMP expansion creates
     child function from already instrumented body).  */
  if (cgraph_state == CGRAPH_STATE_FINISHED)
    return 0;

  init_node_map();

  for (node = cgraph_nodes; node; node = node->next)
    {
      if (!node->analyzed
	  || !gimple_has_body_p (node->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION
	  || DECL_STRUCT_FUNCTION (node->decl)->after_tree_profile)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->decl));
      current_function_decl = node->decl;

      /* Re-set global shared temporary variable for edge-counters.  */
      gcov_type_tmp_var = NULL_TREE;

      /* Local pure-const may imply need to fixup the cfg.  */
      execute_fixup_cfg ();
      branch_prob ();

      if (! flag_branch_probabilities
	  && flag_profile_values)
	gimple_gen_ic_func_profiler ();

      if (flag_branch_probabilities
	  && flag_profile_values
	  && flag_value_profile_transformations)
	gimple_value_profile_transformations ();

      /* The above could hose dominator info.  Currently there is
	 none coming in, this is a safety valve.  It should be
	 easy to adjust it, if and when there is some.  */
      free_dominance_info (CDI_DOMINATORS);
      free_dominance_info (CDI_POST_DOMINATORS);

      current_function_decl = NULL;
      pop_cfun ();
    }

  /* Drop pure/const flags from instrumented functions.  */
  for (node = cgraph_nodes; node; node = node->next)
    {
      if (!node->analyzed
	  || !gimple_has_body_p (node->decl)
	  || !(!node->clone_of || node->decl != node->clone_of->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION
	  || DECL_STRUCT_FUNCTION (node->decl)->after_tree_profile)
	continue;

      cgraph_set_const_flag (node, false, false);
      cgraph_set_pure_flag (node, false, false);
    }

  /* Update call statements and rebuild the cgraph.  */
  for (node = cgraph_nodes; node; node = node->next)
    {
      basic_block bb;

      if (!node->analyzed
	  || !gimple_has_body_p (node->decl)
	  || !(!node->clone_of || node->decl != node->clone_of->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION
	  || DECL_STRUCT_FUNCTION (node->decl)->after_tree_profile)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->decl));
      current_function_decl = node->decl;

      FOR_EACH_BB (bb)
	{
	  gimple_stmt_iterator gsi;
	  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_call (stmt))
		update_stmt (stmt);
	    }
	}

      cfun->after_tree_profile = 1;
      update_ssa (TODO_update_ssa);

      rebuild_cgraph_edges ();

      current_function_decl = NULL;
      pop_cfun ();
    }

  del_node_map();
  return 0;
}
Exemplo n.º 17
0
static bool
tree_if_conversion (struct loop *loop, bool for_vectorizer)
{
  basic_block bb;
  block_stmt_iterator itr;
  unsigned int i;

  ifc_bbs = NULL;

  /* if-conversion is not appropriate for all loops. First, check if loop  is
     if-convertible or not.  */
  if (!if_convertible_loop_p (loop, for_vectorizer))
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,"-------------------------\n");
      if (ifc_bbs)
	{
	  free (ifc_bbs);
	  ifc_bbs = NULL;
	}
      free_dominance_info (CDI_POST_DOMINATORS);
      return false;
    }

  /* Do actual work now.  */
  for (i = 0; i < loop->num_nodes; i++)
    {
      tree cond;

      bb = ifc_bbs [i];

      /* Update condition using predicate list.  */
      cond = bb->aux;

      /* Process all statements in this basic block.
	 Remove conditional expression, if any, and annotate
	 destination basic block(s) appropriately.  */
      for (itr = bsi_start (bb); !bsi_end_p (itr); /* empty */)
	{
	  tree t = bsi_stmt (itr);
	  cond = tree_if_convert_stmt (loop, t, cond, &itr);
	  if (!bsi_end_p (itr))
	    bsi_next (&itr);
	}

      /* If current bb has only one successor, then consider it as an
	 unconditional goto.  */
      if (single_succ_p (bb))
	{
	  basic_block bb_n = single_succ (bb);
	  if (cond != NULL_TREE)
	    add_to_predicate_list (bb_n, cond);
	}
    }

  /* Now, all statements are if-converted and basic blocks are
     annotated appropriately. Combine all basic block into one huge
     basic block.  */
  combine_blocks (loop);

  /* clean up */
  clean_predicate_lists (loop);
  free (ifc_bbs);
  ifc_bbs = NULL;

  return true;
}