예제 #1
0
파일: fwprop.c 프로젝트: ollie314/gcc
static void
build_single_def_use_links (void)
{
  /* We use the multiple definitions problem to compute our restricted
     use-def chains.  */
  df_set_flags (DF_EQ_NOTES);
  df_md_add_problem ();
  df_note_add_problem ();
  df_analyze ();
  df_maybe_reorganize_use_refs (DF_REF_ORDER_BY_INSN_WITH_NOTES);

  use_def_ref.create (DF_USES_TABLE_SIZE ());
  use_def_ref.safe_grow_cleared (DF_USES_TABLE_SIZE ());

  reg_defs.create (max_reg_num ());
  reg_defs.safe_grow_cleared (max_reg_num ());

  reg_defs_stack.create (n_basic_blocks_for_fn (cfun) * 10);
  local_md = BITMAP_ALLOC (NULL);
  local_lr = BITMAP_ALLOC (NULL);

  /* Walk the dominator tree looking for single reaching definitions
     dominating the uses.  This is similar to how SSA form is built.  */
  single_def_use_dom_walker (CDI_DOMINATORS)
    .walk (cfun->cfg->x_entry_block_ptr);

  BITMAP_FREE (local_lr);
  BITMAP_FREE (local_md);
  reg_defs.release ();
  reg_defs_stack.release ();
}
예제 #2
0
static basic_block
split_block_and_df_analyze (basic_block bb, rtx insn)
{
  basic_block next;
  next = split_block (bb, insn)->dest;
  df_analyze ();
  return next;
}
예제 #3
0
static unsigned int
rest_of_handle_stack_adjustments (void)
{
  df_note_add_problem ();
  df_analyze ();
  combine_stack_adjustments ();
  return 0;
}
예제 #4
0
파일: web.c 프로젝트: 0mp/freebsd
static void
web_main (void)
{
  struct df *df;
  struct web_entry *def_entry;
  struct web_entry *use_entry;
  unsigned int i;
  int max = max_reg_num ();
  char *used;

  df = df_init (DF_EQUIV_NOTES);
  df_chain_add_problem (df, DF_UD_CHAIN);
  df_analyze (df);
  df_reorganize_refs (&df->def_info);
  df_reorganize_refs (&df->use_info);

  def_entry = XCNEWVEC (struct web_entry, DF_DEFS_SIZE (df));
  use_entry = XCNEWVEC (struct web_entry, DF_USES_SIZE (df));
  used = XCNEWVEC (char, max);

  if (dump_file)
    df_dump (df, dump_file);

  /* Produce the web.  */
  for (i = 0; i < DF_USES_SIZE (df); i++)
    union_defs (df, DF_USES_GET (df, i), def_entry, use_entry, unionfind_union);

  /* Update the instruction stream, allocating new registers for split pseudos
     in progress.  */
  for (i = 0; i < DF_USES_SIZE (df); i++)
    replace_ref (DF_USES_GET (df, i), 
		 entry_register (use_entry + i, DF_USES_GET (df, i), used));
  for (i = 0; i < DF_DEFS_SIZE (df); i++)
    replace_ref (DF_DEFS_GET (df, i), 
		 entry_register (def_entry + i, DF_DEFS_GET (df, i), used));

  /* Dataflow information is corrupt here, but it can be easily updated
     by creating new entries for new registers and updates or calling
     df_insns_modify.  */
  free (def_entry);
  free (use_entry);
  free (used);
  df_finish (df);
  df = NULL;
}
예제 #5
0
static unsigned int
rest_of_handle_stack_adjustments (void)
{
    cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);

    /* This is kind of a heuristic.  We need to run combine_stack_adjustments
       even for machines with possibly nonzero TARGET_RETURN_POPS_ARGS
       and ACCUMULATE_OUTGOING_ARGS.  We expect that only ports having
       push instructions will have popping returns.  */
#ifndef PUSH_ROUNDING
    if (!ACCUMULATE_OUTGOING_ARGS)
#endif
    {
        df_note_add_problem ();
        df_analyze ();
        combine_stack_adjustments ();
    }
    return 0;
}
예제 #6
0
파일: web.c 프로젝트: AhmadTux/DragonFlyBSD
void
web_main (void)
{
  struct df *df;
  struct web_entry *def_entry;
  struct web_entry *use_entry;
  unsigned int i;
  int max = max_reg_num ();
  char *used;

  df = df_init ();
  df_analyze (df, 0, DF_UD_CHAIN | DF_EQUIV_NOTES);

  def_entry = xcalloc (df->n_defs, sizeof (struct web_entry));
  use_entry = xcalloc (df->n_uses, sizeof (struct web_entry));
  used = xcalloc (max, sizeof (char));

  if (dump_file)
    df_dump (df, DF_UD_CHAIN | DF_DU_CHAIN, dump_file);

  /* Produce the web.  */
  for (i = 0; i < df->n_uses; i++)
    union_defs (df, df->uses[i], def_entry, use_entry);

  /* Update the instruction stream, allocating new registers for split pseudos
     in progress.  */
  for (i = 0; i < df->n_uses; i++)
    replace_ref (df->uses[i], entry_register (use_entry + i, df->uses[i],
					      used));
  for (i = 0; i < df->n_defs; i++)
    replace_ref (df->defs[i], entry_register (def_entry + i, df->defs[i],
					      used));

  /* Dataflow information is corrupt here, but it can be easily updated
     by creating new entries for new registers and updates or calling
     df_insns_modify.  */
  free (def_entry);
  free (use_entry);
  free (used);
  df_finish (df);
}
예제 #7
0
static void
rtl_seqabstr (void)
{
  int iter;
  df_set_flags (DF_LR_RUN_DCE);
  df_analyze ();

  /* Create a hash list for COLLECT_PATTERN_SEQS.  */
  hash_buckets = htab_create (HASH_INIT, htab_hash_bucket , htab_eq_bucket ,
                              htab_del_bucket);
  fill_hash_bucket ();

  /* Compute the common cost of abstraction.  */
  compute_init_costs ();

  /* Build an initial set of pattern sequences from the current function.  */
  collect_pattern_seqs ();
  dump_pattern_seqs ();

  /* Iterate until there are no sequences to abstract.  */
  for (iter = 1;; iter++)
    {
      /* Recompute gain for sequences if necessary and select sequence with
         biggest gain.  */
      recompute_gain ();
      if (!pattern_seqs)
        break;
      dump_best_pattern_seq (iter);
      /* Update the cached info of the other sequences and force gain
         recomputation where needed.  */
      update_pattern_seqs ();
      /* Turn best sequences into pseudo-functions and -calls.  */
      abstract_best_seq ();
    }

  /* Cleanup hash tables.  */
  htab_delete (hash_buckets);
}
예제 #8
0
static int
optimize_mode_switching (void)
{
  int e;
  basic_block bb;
  bool need_commit = false;
  static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
#define N_ENTITIES ARRAY_SIZE (num_modes)
  int entity_map[N_ENTITIES];
  struct bb_info *bb_info[N_ENTITIES];
  int i, j;
  int n_entities = 0;
  int max_num_modes = 0;
  bool emitted ATTRIBUTE_UNUSED = false;
  basic_block post_entry = 0;
  basic_block pre_exit = 0;
  struct edge_list *edge_list = 0;

  /* These bitmaps are used for the LCM algorithm.  */
  sbitmap *kill, *del, *insert, *antic, *transp, *comp;
  sbitmap *avin, *avout;

  for (e = N_ENTITIES - 1; e >= 0; e--)
    if (OPTIMIZE_MODE_SWITCHING (e))
      {
	int entry_exit_extra = 0;

	/* Create the list of segments within each basic block.
	   If NORMAL_MODE is defined, allow for two extra
	   blocks split from the entry and exit block.  */
	if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	  entry_exit_extra = 3;

	bb_info[n_entities]
	  = XCNEWVEC (struct bb_info,
		      last_basic_block_for_fn (cfun) + entry_exit_extra);
	entity_map[n_entities++] = e;
	if (num_modes[e] > max_num_modes)
	  max_num_modes = num_modes[e];
      }

  if (! n_entities)
    return 0;

  /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined.  */
  gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
	      || (!targetm.mode_switching.entry
		  && !targetm.mode_switching.exit));

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    {
      /* Split the edge from the entry block, so that we can note that
	 there NORMAL_MODE is supplied.  */
      post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
      pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
    }

  df_analyze ();

  /* Create the bitmap vectors.  */
  antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				 n_entities * max_num_modes);
  comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);

  bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));

  for (j = n_entities - 1; j >= 0; j--)
    {
      int e = entity_map[j];
      int no_mode = num_modes[e];
      struct bb_info *info = bb_info[j];
      rtx_insn *insn;

      /* Determine what the first use (if any) need for a mode of entity E is.
	 This will be the mode that is anticipatable for this block.
	 Also compute the initial transparency settings.  */
      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr;
	  int last_mode = no_mode;
	  bool any_set_required = false;
	  HARD_REG_SET live_now;

	  info[bb->index].mode_out = info[bb->index].mode_in = no_mode;

	  REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));

	  /* Pretend the mode is clobbered across abnormal edges.  */
	  {
	    edge_iterator ei;
	    edge eg;
	    FOR_EACH_EDGE (eg, ei, bb->preds)
	      if (eg->flags & EDGE_COMPLEX)
		break;
	    if (eg)
	      {
		rtx_insn *ins_pos = BB_HEAD (bb);
		if (LABEL_P (ins_pos))
		  ins_pos = NEXT_INSN (ins_pos);
		gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
		if (ins_pos != BB_END (bb))
		  ins_pos = NEXT_INSN (ins_pos);
		ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
		add_seginfo (info + bb->index, ptr);
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	      }
	  }

	  FOR_BB_INSNS (bb, insn)
	    {
	      if (INSN_P (insn))
		{
		  int mode = targetm.mode_switching.needed (e, insn);
		  rtx link;

		  if (mode != no_mode && mode != last_mode)
		    {
		      any_set_required = true;
		      last_mode = mode;
		      ptr = new_seginfo (mode, insn, bb->index, live_now);
		      add_seginfo (info + bb->index, ptr);
		      for (i = 0; i < no_mode; i++)
			clear_mode_bit (transp[bb->index], j, i);
		    }

		  if (targetm.mode_switching.after)
		    last_mode = targetm.mode_switching.after (e, last_mode,
							      insn);

		  /* Update LIVE_NOW.  */
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_DEAD)
		      reg_dies (XEXP (link, 0), &live_now);

		  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_UNUSED)
		      reg_dies (XEXP (link, 0), &live_now);
		}
	    }

	  info[bb->index].computing = last_mode;
	  /* Check for blocks without ANY mode requirements.
	     N.B. because of MODE_AFTER, last_mode might still
	     be different from no_mode, in which case we need to
	     mark the block as nontransparent.  */
	  if (!any_set_required)
	    {
	      ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
	      add_seginfo (info + bb->index, ptr);
	      if (last_mode != no_mode)
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	    }
	}
      if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	{
	  int mode = targetm.mode_switching.entry (e);

	  info[post_entry->index].mode_out =
	    info[post_entry->index].mode_in = no_mode;
	  if (pre_exit)
	    {
	      info[pre_exit->index].mode_out =
		info[pre_exit->index].mode_in = no_mode;
	    }

	  if (mode != no_mode)
	    {
	      bb = post_entry;

	      /* By always making this nontransparent, we save
		 an extra check in make_preds_opaque.  We also
		 need this to avoid confusing pre_edge_lcm when
		 antic is cleared but transp and comp are set.  */
	      for (i = 0; i < no_mode; i++)
		clear_mode_bit (transp[bb->index], j, i);

	      /* Insert a fake computing definition of MODE into entry
		 blocks which compute no mode. This represents the mode on
		 entry.  */
	      info[bb->index].computing = mode;

	      if (pre_exit)
		info[pre_exit->index].seginfo->mode =
		  targetm.mode_switching.exit (e);
	    }
	}

      /* Set the anticipatable and computing arrays.  */
      for (i = 0; i < no_mode; i++)
	{
	  int m = targetm.mode_switching.priority (entity_map[j], i);

	  FOR_EACH_BB_FN (bb, cfun)
	    {
	      if (info[bb->index].seginfo->mode == m)
		set_mode_bit (antic[bb->index], j, m);

	      if (info[bb->index].computing == m)
		set_mode_bit (comp[bb->index], j, m);
	    }
	}
    }

  /* Calculate the optimal locations for the
     placement mode switches to modes with priority I.  */

  FOR_EACH_BB_FN (bb, cfun)
    bitmap_not (kill[bb->index], transp[bb->index]);

  edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
				kill, avin, avout, &insert, &del);

  for (j = n_entities - 1; j >= 0; j--)
    {
      int no_mode = num_modes[entity_map[j]];

      /* Insert all mode sets that have been inserted by lcm.  */

      for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
	{
	  edge eg = INDEX_EDGE (edge_list, ed);

	  eg->aux = (void *)(intptr_t)-1;

	  for (i = 0; i < no_mode; i++)
	    {
	      int m = targetm.mode_switching.priority (entity_map[j], i);
	      if (mode_bit_p (insert[ed], j, m))
		{
		  eg->aux = (void *)(intptr_t)m;
		  break;
		}
	    }
	}

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct bb_info *info = bb_info[j];
	  int last_mode = no_mode;

	  /* intialize mode in availability for bb.  */
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avout[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_out = last_mode;

	  /* intialize mode out availability for bb.  */
	  last_mode = no_mode;
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avin[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_in = last_mode;

	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (del[bb->index], j, i))
	      info[bb->index].seginfo->mode = no_mode;
	}

      /* Now output the remaining mode sets in all the segments.  */

      /* In case there was no mode inserted. the mode information on the edge
	 might not be complete.
	 Update mode info on edges and commit pending mode sets.  */
      need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);

      /* Reset modes for next entity.  */
      clear_aux_for_edges ();

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr, *next;
	  int cur_mode = bb_info[j][bb->index].mode_in;

	  for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
	    {
	      next = ptr->next;
	      if (ptr->mode != no_mode)
		{
		  rtx_insn *mode_set;

		  rtl_profile_for_bb (bb);
		  start_sequence ();

		  targetm.mode_switching.emit (entity_map[j], ptr->mode,
					       cur_mode, ptr->regs_live);
		  mode_set = get_insns ();
		  end_sequence ();

		  /* modes kill each other inside a basic block.  */
		  cur_mode = ptr->mode;

		  /* Insert MODE_SET only if it is nonempty.  */
		  if (mode_set != NULL_RTX)
		    {
		      emitted = true;
		      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
			/* We need to emit the insns in a FIFO-like manner,
			   i.e. the first to be emitted at our insertion
			   point ends up first in the instruction steam.
			   Because we made sure that NOTE_INSN_BASIC_BLOCK is
			   only used for initially empty basic blocks, we
			   can achieve this by appending at the end of
			   the block.  */
			emit_insn_after
			  (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
		      else
			emit_insn_before (mode_set, ptr->insn_ptr);
		    }

		  default_rtl_profile ();
		}

	      free (ptr);
	    }
	}

      free (bb_info[j]);
    }

  free_edge_list (edge_list);

  /* Finished. Free up all the things we've allocated.  */
  sbitmap_vector_free (del);
  sbitmap_vector_free (insert);
  sbitmap_vector_free (kill);
  sbitmap_vector_free (antic);
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
  sbitmap_vector_free (avin);
  sbitmap_vector_free (avout);

  if (need_commit)
    commit_edge_insertions ();

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    cleanup_cfg (CLEANUP_NO_INSN_DEL);
  else if (!need_commit && !emitted)
    return 0;

  return 1;
}
예제 #9
0
static unsigned int
copyprop_hardreg_forward (void)
{
  struct value_data *all_vd;
  basic_block bb;
  sbitmap visited;
  bool analyze_called = false;

  all_vd = XNEWVEC (struct value_data, last_basic_block);

  visited = sbitmap_alloc (last_basic_block);
  bitmap_clear (visited);

  if (MAY_HAVE_DEBUG_INSNS)
    debug_insn_changes_pool
      = create_alloc_pool ("debug insn changes pool",
			   sizeof (struct queued_debug_insn_change), 256);

  FOR_EACH_BB (bb)
    {
      bitmap_set_bit (visited, bb->index);

      /* If a block has a single predecessor, that we've already
	 processed, begin with the value data that was live at
	 the end of the predecessor block.  */
      /* ??? Ought to use more intelligent queuing of blocks.  */
      if (single_pred_p (bb)
	  && bitmap_bit_p (visited, single_pred (bb)->index)
	  && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
	{
	  all_vd[bb->index] = all_vd[single_pred (bb)->index];
	  if (all_vd[bb->index].n_debug_insn_changes)
	    {
	      unsigned int regno;

	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
		{
		  if (all_vd[bb->index].e[regno].debug_insn_changes)
		    {
		      all_vd[bb->index].e[regno].debug_insn_changes = NULL;
		      if (--all_vd[bb->index].n_debug_insn_changes == 0)
			break;
		    }
		}
	    }
	}
      else
	init_value_data (all_vd + bb->index);

      copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
    }

  if (MAY_HAVE_DEBUG_INSNS)
    {
      FOR_EACH_BB (bb)
	if (bitmap_bit_p (visited, bb->index)
	    && all_vd[bb->index].n_debug_insn_changes)
	  {
	    unsigned int regno;
	    bitmap live;

	    if (!analyze_called)
	      {
		df_analyze ();
		analyze_called = true;
	      }
	    live = df_get_live_out (bb);
	    for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
	      if (all_vd[bb->index].e[regno].debug_insn_changes)
		{
		  if (REGNO_REG_SET_P (live, regno))
		    apply_debug_insn_changes (all_vd + bb->index, regno);
		  if (all_vd[bb->index].n_debug_insn_changes == 0)
		    break;
		}
	  }

      free_alloc_pool (debug_insn_changes_pool);
    }

  sbitmap_free (visited);
  free (all_vd);
  return 0;
}
예제 #10
0
파일: init-regs.c 프로젝트: alcides/gcc
static void
initialize_uninitialized_regs (void)
{
  basic_block bb;
  bitmap already_genned = BITMAP_ALLOC (NULL);

  if (optimize == 1)
    {
      df_live_add_problem ();
      df_live_set_all_dirty ();
    }

  df_analyze ();

  FOR_EACH_BB_FN (bb, cfun)
    {
      rtx_insn *insn;
      bitmap lr = DF_LR_IN (bb);
      bitmap ur = DF_LIVE_IN (bb);
      bitmap_clear (already_genned);

      FOR_BB_INSNS (bb, insn)
	{
	  df_ref use;
	  if (!NONDEBUG_INSN_P (insn))
	    continue;

	  FOR_EACH_INSN_USE (use, insn)
	    {
	      unsigned int regno = DF_REF_REGNO (use);

	      /* Only do this for the pseudos.  */
	      if (regno < FIRST_PSEUDO_REGISTER)
		continue;

	      /* Do not generate multiple moves for the same regno.
		 This is common for sequences of subreg operations.
		 They would be deleted during combine but there is no
		 reason to churn the system.  */
	      if (bitmap_bit_p (already_genned, regno))
		continue;

	      /* A use is MUST uninitialized if it reaches the top of
		 the block from the inside of the block (the lr test)
		 and no def for it reaches the top of the block from
		 outside of the block (the ur test).  */
	      if (bitmap_bit_p (lr, regno)
		  && (!bitmap_bit_p (ur, regno)))
		{
		  rtx_insn *move_insn;
		  rtx reg = DF_REF_REAL_REG (use);

		  bitmap_set_bit (already_genned, regno);

		  start_sequence ();
		  emit_move_insn (reg, CONST0_RTX (GET_MODE (reg)));
		  move_insn = get_insns ();
		  end_sequence ();
		  emit_insn_before (move_insn, insn);
		  if (dump_file)
		    fprintf (dump_file,
			     "adding initialization in %s of reg %d at in block %d for insn %d.\n",
			     current_function_name (), regno, bb->index,
			     INSN_UID (insn));
		}
	    }