Пример #1
0
static void 
merge_callee_local_info (struct cgraph_node *target, 
                         struct cgraph_node *x)
{
  struct cgraph_edge *e;
  ipa_reference_local_vars_info_t x_l = 
    get_reference_vars_info_from_cgraph (target)->local;

  /* Make the world safe for tail recursion.  */
  struct ipa_dfs_info *node_info = x->aux;
  
  if (node_info->aux) 
    return;

  node_info->aux = x;

  for (e = x->callees; e; e = e->next_callee) 
    {
      struct cgraph_node *y = e->callee;
      if (y->global.inlined_to) 
        {
          ipa_reference_vars_info_t y_info;
          ipa_reference_local_vars_info_t y_l;
          struct cgraph_node* orig_y = y;
         
          y = cgraph_master_clone (y);
          if (y)
            {
              y_info = get_reference_vars_info_from_cgraph (y);
              y_l = y_info->local;
              if (x_l != y_l)
                {
                  bitmap_ior_into (x_l->statics_read,
                                   y_l->statics_read);
                  bitmap_ior_into (x_l->statics_written,
                                   y_l->statics_written);
                }
              x_l->calls_read_all |= y_l->calls_read_all;
              x_l->calls_write_all |= y_l->calls_write_all;
              merge_callee_local_info (target, y);
            }
          else 
            {
              fprintf(stderr, "suspect inlining of ");
              dump_cgraph_node (stderr, orig_y);
              fprintf(stderr, "\ninto ");
              dump_cgraph_node (stderr, target);
              dump_cgraph (stderr);
              gcc_assert(false);
            }
        }
    }

  node_info->aux = NULL;
}
Пример #2
0
static void 
process_replaceable (temp_expr_table_p tab, gimple stmt)
{
  tree var, def, basevar;
  int version;
  ssa_op_iter iter;
  bitmap def_vars, use_vars;

#ifdef ENABLE_CHECKING
  gcc_assert (is_replaceable_p (stmt));
#endif

  def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF);
  version = SSA_NAME_VERSION (def);
  basevar = SSA_NAME_VAR (def);
  def_vars = BITMAP_ALLOC (NULL);

  bitmap_set_bit (def_vars, DECL_UID (basevar));

  /* Add this expression to the dependency list for each use partition.  */
  FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_USE)
    {
      int var_version = SSA_NAME_VERSION (var);

      use_vars = tab->expr_decl_uids[var_version];
      add_dependence (tab, version, var);
      if (use_vars)
        {
	  bitmap_ior_into (def_vars, use_vars);
	  BITMAP_FREE (tab->expr_decl_uids[var_version]);
	}
      else
	bitmap_set_bit (def_vars, DECL_UID (SSA_NAME_VAR (var)));
    }
Пример #3
0
static void
add_dependence (temp_expr_table_p tab, int version, tree var)
{
  int i;
  bitmap_iterator bi;
  unsigned x;

  i = SSA_NAME_VERSION (var);
  if (version_to_be_replaced_p (tab, i))
    {
      if (!bitmap_empty_p (tab->new_replaceable_dependencies))
        {
	  /* Version will now be killed by a write to any partition the 
	     substituted expression would have been killed by.  */
	  EXECUTE_IF_SET_IN_BITMAP (tab->new_replaceable_dependencies, 0, x, bi)
	    add_to_partition_kill_list (tab, x, version);

	  /* Rather than set partition_dependencies and in_use lists bit by 
	     bit, simply OR in the new_replaceable_dependencies bits.  */
	  if (!tab->partition_dependencies[version])
	    tab->partition_dependencies[version] = BITMAP_ALLOC (NULL);
	  bitmap_ior_into (tab->partition_dependencies[version], 
			   tab->new_replaceable_dependencies);
	  bitmap_ior_into (tab->partition_in_use, 
			   tab->new_replaceable_dependencies);
	  /* It is only necessary to add these once.  */
	  bitmap_clear (tab->new_replaceable_dependencies);
	}
    }
  else
    {
      i = var_to_partition (tab->map, var);
#ifdef ENABLE_CHECKING
      gcc_assert (i != NO_PARTITION);
      gcc_assert (tab->num_in_part[i] != 0);
#endif
      /* Only dependencies on ssa_names which are coalesced with something need
         to be tracked.  Partitions with containing only a single SSA_NAME
	 *cannot* have their value changed.  */
      if (tab->num_in_part[i] > 1)
        {
	  add_to_partition_kill_list (tab, i, version);
	  make_dependent_on_partition (tab, version, i);
	}
    }
}
Пример #4
0
void
ebb_compute_jump_reg_dependencies (rtx insn, regset used)
{
  basic_block b = BLOCK_FOR_INSN (insn);
  edge e;
  edge_iterator ei;

  FOR_EACH_EDGE (e, ei, b->succs)
    if ((e->flags & EDGE_FALLTHRU) == 0)
      bitmap_ior_into (used, df_get_live_in (e->dest));
}
Пример #5
0
/* Set up USED_PSEUDOS_BITMAP, and update LR_BITMAP (a BB live info
   bitmap).  */
static void
update_live_info (bitmap lr_bitmap)
{
  unsigned int j;
  bitmap_iterator bi;

  bitmap_clear (&used_pseudos_bitmap);
  EXECUTE_IF_AND_IN_BITMAP (&coalesced_pseudos_bitmap, lr_bitmap,
			    FIRST_PSEUDO_REGISTER, j, bi)
    bitmap_set_bit (&used_pseudos_bitmap, first_coalesced_pseudo[j]);
  if (! bitmap_empty_p (&used_pseudos_bitmap))
    {
      bitmap_and_compl_into (lr_bitmap, &coalesced_pseudos_bitmap);
      bitmap_ior_into (lr_bitmap, &used_pseudos_bitmap);
    }
}
Пример #6
0
void
ebb_compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
				   regset set)
{
  basic_block b = BLOCK_FOR_INSN (insn);
  edge e;
  edge_iterator ei;

  FOR_EACH_EDGE (e, ei, b->succs)
    if (e->flags & EDGE_FALLTHRU)
      /* The jump may be a by-product of a branch that has been merged
	 in the main codepath after being conditionalized.  Therefore
	 it may guard the fallthrough block from using a value that has
	 conditionally overwritten that of the main codepath.  So we
	 consider that it restores the value of the main codepath.  */
      bitmap_and (set, df_get_live_in (e->dest), cond_set);
    else
      bitmap_ior_into (used, df_get_live_in (e->dest));
}
Пример #7
0
/* Spill pseudos which are assigned to hard registers in SET.  Add
   affected insns for processing in the subsequent constraint
   pass.  */
static void
spill_pseudos (HARD_REG_SET set)
{
  int i;
  bitmap_head to_process;
  rtx_insn *insn;

  if (hard_reg_set_empty_p (set))
    return;
  if (lra_dump_file != NULL)
    {
      fprintf (lra_dump_file, "	   Spilling non-eliminable hard regs:");
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
	if (TEST_HARD_REG_BIT (set, i))
	  fprintf (lra_dump_file, " %d", i);
      fprintf (lra_dump_file, "\n");
    }
  bitmap_initialize (&to_process, &reg_obstack);
  for (i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++)
    if (lra_reg_info[i].nrefs != 0 && reg_renumber[i] >= 0
	&& overlaps_hard_reg_set_p (set,
				    PSEUDO_REGNO_MODE (i), reg_renumber[i]))
      {
	if (lra_dump_file != NULL)
	  fprintf (lra_dump_file, "	 Spilling r%d(%d)\n",
		   i, reg_renumber[i]);
	reg_renumber[i] = -1;
	bitmap_ior_into (&to_process, &lra_reg_info[i].insn_bitmap);
      }
  IOR_HARD_REG_SET (lra_no_alloc_regs, set);
  for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
    if (bitmap_bit_p (&to_process, INSN_UID (insn)))
      {
	lra_push_insn (insn);
	lra_set_used_insn_alternative (insn, -1);
      }
  bitmap_clear (&to_process);
}
Пример #8
0
/* The major function for aggressive pseudo coalescing of moves only
   if the both pseudos were spilled and not special reload pseudos.  */
bool
lra_coalesce (void)
{
  basic_block bb;
  rtx mv, set, insn, next, *sorted_moves;
  int i, mv_num, sregno, dregno;
  int coalesced_moves;
  int max_regno = max_reg_num ();
  bitmap_head involved_insns_bitmap;

  timevar_push (TV_LRA_COALESCE);

  if (lra_dump_file != NULL)
    fprintf (lra_dump_file,
	     "\n********** Pseudos coalescing #%d: **********\n\n",
	     ++lra_coalesce_iter);
  first_coalesced_pseudo = XNEWVEC (int, max_regno);
  next_coalesced_pseudo = XNEWVEC (int, max_regno);
  for (i = 0; i < max_regno; i++)
    first_coalesced_pseudo[i] = next_coalesced_pseudo[i] = i;
  sorted_moves = XNEWVEC (rtx, get_max_uid ());
  mv_num = 0;
  /* Collect moves.  */
  coalesced_moves = 0;
  FOR_EACH_BB (bb)
    {
      FOR_BB_INSNS_SAFE (bb, insn, next)
	if (INSN_P (insn)
	    && (set = single_set (insn)) != NULL_RTX
	    && REG_P (SET_DEST (set)) && REG_P (SET_SRC (set))
	    && (sregno = REGNO (SET_SRC (set))) >= FIRST_PSEUDO_REGISTER
	    && (dregno = REGNO (SET_DEST (set))) >= FIRST_PSEUDO_REGISTER
	    && mem_move_p (sregno, dregno)
	    && coalescable_pseudo_p (sregno) && coalescable_pseudo_p (dregno)
	    && ! side_effects_p (set)
	    && !(lra_intersected_live_ranges_p
		 (lra_reg_info[sregno].live_ranges,
		  lra_reg_info[dregno].live_ranges)))
	  sorted_moves[mv_num++] = insn;
    }
  qsort (sorted_moves, mv_num, sizeof (rtx), move_freq_compare_func);
  /* Coalesced copies, most frequently executed first.	*/
  bitmap_initialize (&coalesced_pseudos_bitmap, &reg_obstack);
  bitmap_initialize (&involved_insns_bitmap, &reg_obstack);
  for (i = 0; i < mv_num; i++)
    {
      mv = sorted_moves[i];
      set = single_set (mv);
      lra_assert (set != NULL && REG_P (SET_SRC (set))
		  && REG_P (SET_DEST (set)));
      sregno = REGNO (SET_SRC (set));
      dregno = REGNO (SET_DEST (set));
      if (first_coalesced_pseudo[sregno] == first_coalesced_pseudo[dregno])
	{
	  coalesced_moves++;
	  if (lra_dump_file != NULL)
	    fprintf
	      (lra_dump_file, "      Coalescing move %i:r%d-r%d (freq=%d)\n",
	       INSN_UID (mv), sregno, dregno,
	       BLOCK_FOR_INSN (mv)->frequency);
	  /* We updated involved_insns_bitmap when doing the merge.  */
	}
      else if (!(lra_intersected_live_ranges_p
		 (lra_reg_info[first_coalesced_pseudo[sregno]].live_ranges,
		  lra_reg_info[first_coalesced_pseudo[dregno]].live_ranges)))
	{
	  coalesced_moves++;
	  if (lra_dump_file != NULL)
	    fprintf
	      (lra_dump_file,
	       "  Coalescing move %i:r%d(%d)-r%d(%d) (freq=%d)\n",
	       INSN_UID (mv), sregno, ORIGINAL_REGNO (SET_SRC (set)),
	       dregno, ORIGINAL_REGNO (SET_DEST (set)),
	       BLOCK_FOR_INSN (mv)->frequency);
	  bitmap_ior_into (&involved_insns_bitmap,
			   &lra_reg_info[sregno].insn_bitmap);
	  bitmap_ior_into (&involved_insns_bitmap,
			   &lra_reg_info[dregno].insn_bitmap);
	  merge_pseudos (sregno, dregno);
	}
    }
  bitmap_initialize (&used_pseudos_bitmap, &reg_obstack);
  FOR_EACH_BB (bb)
    {
      update_live_info (df_get_live_in (bb));
      update_live_info (df_get_live_out (bb));
      FOR_BB_INSNS_SAFE (bb, insn, next)
	if (INSN_P (insn)
	    && bitmap_bit_p (&involved_insns_bitmap, INSN_UID (insn)))
	  {
	    if (! substitute (&insn))
	      continue;
	    lra_update_insn_regno_info (insn);
	    if ((set = single_set (insn)) != NULL_RTX && set_noop_p (set))
	      {
		/* Coalesced move.  */
		if (lra_dump_file != NULL)
		  fprintf (lra_dump_file, "	 Removing move %i (freq=%d)\n",
			 INSN_UID (insn), BLOCK_FOR_INSN (insn)->frequency);
		lra_set_insn_deleted (insn);
	      }
	  }
    }
  bitmap_clear (&used_pseudos_bitmap);
  bitmap_clear (&involved_insns_bitmap);
  bitmap_clear (&coalesced_pseudos_bitmap);
  if (lra_dump_file != NULL && coalesced_moves != 0)
    fprintf (lra_dump_file, "Coalesced Moves = %d\n", coalesced_moves);
  free (sorted_moves);
  free (next_coalesced_pseudo);
  free (first_coalesced_pseudo);
  timevar_pop (TV_LRA_COALESCE);
  return coalesced_moves != 0;
}
Пример #9
0
static void
propagate_bits (struct cgraph_node *x)
{
  ipa_reference_vars_info_t x_info = get_reference_vars_info_from_cgraph (x);
  ipa_reference_global_vars_info_t x_global = x_info->global;

  struct cgraph_edge *e;
  for (e = x->callees; e; e = e->next_callee) 
    {
      struct cgraph_node *y = e->callee;

      /* Only look at the master nodes and skip external nodes.  */
      y = cgraph_master_clone (y);
      if (y)
        {
          if (get_reference_vars_info_from_cgraph (y))
            {
              ipa_reference_vars_info_t y_info = get_reference_vars_info_from_cgraph (y);
              ipa_reference_global_vars_info_t y_global = y_info->global;
              
              if (x_global->statics_read
                  != all_module_statics)
                {
                  if (y_global->statics_read 
                      == all_module_statics)
                    {
                      BITMAP_FREE (x_global->statics_read);
                      x_global->statics_read 
                        = all_module_statics;
                    }
                  /* Skip bitmaps that are pointer equal to node's bitmap
                     (no reason to spin within the cycle).  */
                  else if (x_global->statics_read 
                           != y_global->statics_read)
                    bitmap_ior_into (x_global->statics_read,
                                     y_global->statics_read);
                }
              
              if (x_global->statics_written 
                  != all_module_statics)
                {
                  if (y_global->statics_written 
                      == all_module_statics)
                    {
                      BITMAP_FREE (x_global->statics_written);
                      x_global->statics_written 
                        = all_module_statics;
                    }
                  /* Skip bitmaps that are pointer equal to node's bitmap
                     (no reason to spin within the cycle).  */
                  else if (x_global->statics_written 
                           != y_global->statics_written)
                    bitmap_ior_into (x_global->statics_written,
                                     y_global->statics_written);
                }
            }
          else 
            {
              gcc_unreachable ();
            }
        }
    }
}
Пример #10
0
/* Update all offsets and possibility for elimination on eliminable
   registers.  Spill pseudos assigned to registers which are
   uneliminable, update LRA_NO_ALLOC_REGS and ELIMINABLE_REG_SET.  Add
   insns to INSNS_WITH_CHANGED_OFFSETS containing eliminable hard
   registers whose offsets should be changed.  Return true if any
   elimination offset changed.  */
static bool
update_reg_eliminate (bitmap insns_with_changed_offsets)
{
  bool prev, result;
  struct lra_elim_table *ep, *ep1;
  HARD_REG_SET temp_hard_reg_set;

  /* Clear self elimination offsets.  */
  for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
    self_elim_offsets[ep->from] = 0;
  for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
    {
      /* If it is a currently used elimination: update the previous
	 offset.  */
      if (elimination_map[ep->from] == ep)
	ep->previous_offset = ep->offset;

      prev = ep->prev_can_eliminate;
      setup_can_eliminate (ep, targetm.can_eliminate (ep->from, ep->to));
      if (ep->can_eliminate && ! prev)
	{
	  /* It is possible that not eliminable register becomes
	     eliminable because we took other reasons into account to
	     set up eliminable regs in the initial set up.  Just
	     ignore new eliminable registers.  */
	  setup_can_eliminate (ep, false);
	  continue;
	}
      if (ep->can_eliminate != prev && elimination_map[ep->from] == ep)
	{
	  /* We cannot use this elimination anymore -- find another
	     one.  */
	  if (lra_dump_file != NULL)
	    fprintf (lra_dump_file,
		     "	Elimination %d to %d is not possible anymore\n",
		     ep->from, ep->to);
	  /* If after processing RTL we decides that SP can be used as
	     a result of elimination, it can not be changed.  */
	  gcc_assert ((ep->to_rtx != stack_pointer_rtx)
		      || (ep->from < FIRST_PSEUDO_REGISTER
			  && fixed_regs [ep->from]));
	  /* Mark that is not eliminable anymore.  */
	  elimination_map[ep->from] = NULL;
	  for (ep1 = ep + 1; ep1 < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep1++)
	    if (ep1->can_eliminate && ep1->from == ep->from)
	      break;
	  if (ep1 < &reg_eliminate[NUM_ELIMINABLE_REGS])
	    {
	      if (lra_dump_file != NULL)
		fprintf (lra_dump_file, "    Using elimination %d to %d now\n",
			 ep1->from, ep1->to);
	      lra_assert (ep1->previous_offset == 0);
	      ep1->previous_offset = ep->offset;
	    }
	  else
	    {
	      /* There is no elimination anymore just use the hard
		 register `from' itself.  Setup self elimination
		 offset to restore the original offset values.	*/
	      if (lra_dump_file != NULL)
		fprintf (lra_dump_file, "    %d is not eliminable at all\n",
			 ep->from);
	      self_elim_offsets[ep->from] = -ep->offset;
	      if (ep->offset != 0)
		bitmap_ior_into (insns_with_changed_offsets,
				 &lra_reg_info[ep->from].insn_bitmap);
	    }
	}

      INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, ep->offset);
    }
  setup_elimination_map ();
  result = false;
  CLEAR_HARD_REG_SET (temp_hard_reg_set);
  for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
    if (elimination_map[ep->from] == NULL)
      SET_HARD_REG_BIT (temp_hard_reg_set, ep->from);
    else if (elimination_map[ep->from] == ep)
      {
	/* Prevent the hard register into which we eliminate from
	   the usage for pseudos.  */
        if (ep->from != ep->to)
	  SET_HARD_REG_BIT (temp_hard_reg_set, ep->to);
	if (ep->previous_offset != ep->offset)
	  {
	    bitmap_ior_into (insns_with_changed_offsets,
			     &lra_reg_info[ep->from].insn_bitmap);

	    /* Update offset when the eliminate offset have been
	       changed.  */
	    lra_update_reg_val_offset (lra_reg_info[ep->from].val,
				       ep->offset - ep->previous_offset);
	    result = true;
	  }
      }
  IOR_HARD_REG_SET (lra_no_alloc_regs, temp_hard_reg_set);
  AND_COMPL_HARD_REG_SET (eliminable_regset, temp_hard_reg_set);
  spill_pseudos (temp_hard_reg_set);
  return result;
}