示例#1
0
void
clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
		     bool update_original, int *overall_size)
{
  if (duplicate)
    {
      /* We may eliminate the need for out-of-line copy to be output.
	 In that case just go ahead and re-use it.  This is not just an
	 memory optimization.  Making offline copy of fuction disappear
	 from the program will improve future decisions on inlining.  */
      if (!e->callee->callers->next_caller
	  /* Recursive inlining never wants the master clone to
	     be overwritten.  */
	  && update_original
	  && can_remove_node_now_p (e->callee, e))
	{
	  /* TODO: When callee is in a comdat group, we could remove all of it,
	     including all inline clones inlined into it.  That would however
	     need small function inlining to register edge removal hook to
	     maintain the priority queue.

	     For now we keep the ohter functions in the group in program until
	     cgraph_remove_unreachable_functions gets rid of them.  */
	  gcc_assert (!e->callee->global.inlined_to);
          symtab_dissolve_same_comdat_group_list ((symtab_node) e->callee);
	  if (e->callee->analyzed && !DECL_EXTERNAL (e->callee->symbol.decl))
	    {
	      if (overall_size)
	        *overall_size -= inline_summary (e->callee)->size;
	      nfunctions_inlined++;
	    }
	  duplicate = false;
	  e->callee->symbol.externally_visible = false;
          update_noncloned_frequencies (e->callee, e->frequency);
	}
      else
	{
	  struct cgraph_node *n;
	  n = cgraph_clone_node (e->callee, e->callee->symbol.decl,
				 e->count, e->frequency,
				 update_original, vNULL, true);
	  cgraph_redirect_edge_callee (e, n);
	}
    }
  else
    symtab_dissolve_same_comdat_group_list ((symtab_node) e->callee);

  if (e->caller->global.inlined_to)
    e->callee->global.inlined_to = e->caller->global.inlined_to;
  else
    e->callee->global.inlined_to = e->caller;

  /* Recursively clone all bodies.  */
  for (e = e->callee->callees; e; e = e->next_callee)
    if (!e->inline_failed)
      clone_inlined_nodes (e, duplicate, update_original, overall_size);
}
示例#2
0
struct cgraph_node *
cgraph_clone_node (struct cgraph_node *n, tree decl, gcov_type count, int freq,
		   bool update_original,
		   vec<cgraph_edge_p> redirect_callers,
		   bool call_duplication_hook,
		   struct cgraph_node *new_inlined_to)
{
  struct cgraph_node *new_node = cgraph_create_empty_node ();
  struct cgraph_edge *e;
  gcov_type count_scale;
  unsigned i;

  new_node->decl = decl;
  symtab_register_node (new_node);
  new_node->origin = n->origin;
  new_node->lto_file_data = n->lto_file_data;
  if (new_node->origin)
    {
      new_node->next_nested = new_node->origin->nested;
      new_node->origin->nested = new_node;
    }
  new_node->analyzed = n->analyzed;
  new_node->definition = n->definition;
  new_node->local = n->local;
  new_node->externally_visible = false;
  new_node->local.local = true;
  new_node->global = n->global;
  new_node->global.inlined_to = new_inlined_to;
  new_node->rtl = n->rtl;
  new_node->count = count;
  new_node->frequency = n->frequency;
  new_node->clone = n->clone;
  new_node->clone.tree_map = NULL;
  new_node->tp_first_run = n->tp_first_run;
  if (n->count)
    {
      if (new_node->count > n->count)
        count_scale = REG_BR_PROB_BASE;
      else
        count_scale = GCOV_COMPUTE_SCALE (new_node->count, n->count);
    }
  else
    count_scale = 0;
  if (update_original)
    {
      n->count -= count;
      if (n->count < 0)
	n->count = 0;
    }

  FOR_EACH_VEC_ELT (redirect_callers, i, e)
    {
      /* Redirect calls to the old version node to point to its new
	 version.  */
      cgraph_redirect_edge_callee (e, new_node);
    }
示例#3
0
static void
lto_cgraph_replace_node (struct cgraph_node *node,
			 struct cgraph_node *prevailing_node)
{
  struct cgraph_edge *e, *next;
  bool compatible_p;

  if (cgraph_dump_file)
    {
      fprintf (cgraph_dump_file, "Replacing cgraph node %s/%i by %s/%i"
 	       " for symbol %s\n",
	       xstrdup (cgraph_node_name (node)), node->uid,
	       xstrdup (cgraph_node_name (prevailing_node)),
	       prevailing_node->uid,
	       IDENTIFIER_POINTER ((*targetm.asm_out.mangle_assembler_name)
		 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)))));
    }

  /* Merge node flags.  */
  if (node->needed)
    cgraph_mark_needed_node (prevailing_node);
  if (node->reachable)
    cgraph_mark_reachable_node (prevailing_node);
  if (node->address_taken)
    {
      gcc_assert (!prevailing_node->global.inlined_to);
      cgraph_mark_address_taken_node (prevailing_node);
    }

  /* Redirect all incoming edges.  */
  compatible_p
    = types_compatible_p (TREE_TYPE (TREE_TYPE (prevailing_node->decl)),
			  TREE_TYPE (TREE_TYPE (node->decl)));
  for (e = node->callers; e; e = next)
    {
      next = e->next_caller;
      cgraph_redirect_edge_callee (e, prevailing_node);
      /* If there is a mismatch between the supposed callee return type and
	 the real one do not attempt to inline this function.
	 ???  We really need a way to match function signatures for ABI
	 compatibility and perform related promotions at inlining time.  */
      if (!compatible_p)
	e->call_stmt_cannot_inline_p = 1;
    }
  /* Redirect incomming references.  */
  ipa_clone_refering (prevailing_node, NULL, &node->ref_list);

  /* Finally remove the replaced node.  */
  cgraph_remove_node (node);
}
示例#4
0
/* Fix the callsites and the call graph after function cloning was done.  */
static void
ipcp_update_callgraph (void)
{
  struct cgraph_node *node;

  for (node = cgraph_nodes; node; node = node->next)
    if (node->analyzed && ipcp_node_is_clone (node))
      {
	bitmap args_to_skip = NULL;
	struct cgraph_node *orig_node = ipcp_get_orig_node (node);
        struct ipa_node_params *info = IPA_NODE_REF (orig_node);
        int i, count = ipa_get_param_count (info);
        struct cgraph_edge *cs, *next;

	if (node->local.can_change_signature)
	  {
	    args_to_skip = BITMAP_ALLOC (NULL);
	    for (i = 0; i < count; i++)
	      {
		struct ipcp_lattice *lat = ipcp_get_lattice (info, i);

		/* We can proactively remove obviously unused arguments.  */
		if (!ipa_is_param_used (info, i))
		  {
		    bitmap_set_bit (args_to_skip, i);
		    continue;
		  }

		if (lat->type == IPA_CONST_VALUE)
		  bitmap_set_bit (args_to_skip, i);
	      }
	  }
	for (cs = node->callers; cs; cs = next)
	  {
	    next = cs->next_caller;
	    if (!ipcp_node_is_clone (cs->caller) && ipcp_need_redirect_p (cs))
	      {
		if (dump_file)
		  fprintf (dump_file, "Redirecting edge %s/%i -> %s/%i "
			   "back to %s/%i.",
			   cgraph_node_name (cs->caller), cs->caller->uid,
			   cgraph_node_name (cs->callee), cs->callee->uid,
			   cgraph_node_name (orig_node), orig_node->uid);
		cgraph_redirect_edge_callee (cs, orig_node);
	      }
	  }
      }
}
示例#5
0
bool
inline_call (struct cgraph_edge *e, bool update_original,
	     vec<cgraph_edge_p> *new_edges,
	     int *overall_size, bool update_overall_summary)
{
  int old_size = 0, new_size = 0;
  struct cgraph_node *to = NULL;
  struct cgraph_edge *curr = e;
  struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
  bool new_edges_found = false;

#ifdef ENABLE_CHECKING
  int estimated_growth = estimate_edge_growth (e);
  bool predicated = inline_edge_summary (e)->predicate != NULL;
#endif

  /* Don't inline inlined edges.  */
  gcc_assert (e->inline_failed);
  /* Don't even think of inlining inline clone.  */
  gcc_assert (!callee->global.inlined_to);

  e->inline_failed = CIF_OK;
  DECL_POSSIBLY_INLINED (callee->symbol.decl) = true;

  to = e->caller;
  if (to->global.inlined_to)
    to = to->global.inlined_to;

  /* If aliases are involved, redirect edge to the actual destination and
     possibly remove the aliases.  */
  if (e->callee != callee)
    {
      struct cgraph_node *alias = e->callee, *next_alias;
      cgraph_redirect_edge_callee (e, callee);
      while (alias && alias != callee)
	{
	  if (!alias->callers
	      && can_remove_node_now_p (alias, e))
	    {
	      next_alias = cgraph_alias_target (alias);
	      cgraph_remove_node (alias);
	      alias = next_alias;
	    }
	  else
	    break;
	}
    }

  clone_inlined_nodes (e, true, update_original, overall_size);

  gcc_assert (curr->callee->global.inlined_to == to);

  old_size = inline_summary (to)->size;
  inline_merge_summary (e);
  if (optimize)
    new_edges_found = ipa_propagate_indirect_call_infos (curr, new_edges);
  if (update_overall_summary)
   inline_update_overall_summary (to);
  new_size = inline_summary (to)->size;

#ifdef ENABLE_CHECKING
  /* Verify that estimated growth match real growth.  Allow off-by-one
     error due to INLINE_SIZE_SCALE roudoff errors.  */
  gcc_assert (!update_overall_summary || !overall_size || new_edges_found
	      || abs (estimated_growth - (new_size - old_size)) <= 1
	      /* FIXME: a hack.  Edges with false predicate are accounted
		 wrong, we should remove them from callgraph.  */
	      || predicated);
#endif

  /* Account the change of overall unit size; external functions will be
     removed and are thus not accounted.  */
  if (overall_size
      && !DECL_EXTERNAL (to->symbol.decl))
    *overall_size += new_size - old_size;
  ncalls_inlined++;

  /* This must happen after inline_merge_summary that rely on jump
     functions of callee to not be updated.  */
  return new_edges_found;
}