Example #1
0
static rtx
prev_active_insn_bb (basic_block bb, rtx insn)
{
    for (insn = PREV_INSN (insn);
            insn != PREV_INSN (BB_HEAD (bb));
            insn = PREV_INSN (insn))
        if (active_insn_p (insn))
            return insn;
    return NULL_RTX;
}
Example #2
0
/* INSN is being scheduled after LAST.  Update counters.  */
static void
begin_schedule_ready (rtx insn, rtx last)
{
  sched_rgn_n_insns++;

  if (BLOCK_FOR_INSN (insn) == last_bb
      /* INSN is a jump in the last block, ...  */
      && control_flow_insn_p (insn)
      /* that is going to be moved over some instructions.  */
      && last != PREV_INSN (insn))
    {
      edge e;
      basic_block bb;

      /* An obscure special case, where we do have partially dead
	 instruction scheduled after last control flow instruction.
	 In this case we can create new basic block.  It is
	 always exactly one basic block last in the sequence.  */

      e = find_fallthru_edge (last_bb->succs);

      gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));

      gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
			   && !IS_SPECULATION_CHECK_P (insn)
			   && BB_HEAD (last_bb) != insn
			   && BB_END (last_bb) == insn);

      {
	rtx x;

	x = NEXT_INSN (insn);
	if (e)
	  gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
	else
	  gcc_checking_assert (BARRIER_P (x));
      }

      if (e)
	{
	  bb = split_edge (e);
	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
	}
      else
	/* Create an empty unreachable block after the INSN.  */
	bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);

      /* split_edge () creates BB before E->DEST.  Keep in mind, that
	 this operation extends scheduling region till the end of BB.
	 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
	 of the scheduling region.  */
      current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
      gcc_assert (current_sched_info->next_tail);

      /* Append new basic block to the end of the ebb.  */
      sched_init_only_bb (bb, last_bb);
      gcc_assert (last_bb == bb);
    }
}
Example #3
0
rtx
unlink_insn_chain (rtx first, rtx last)
{
  rtx prevfirst = PREV_INSN (first);
  rtx nextlast = NEXT_INSN (last);

  PREV_INSN (first) = NULL;
  NEXT_INSN (last) = NULL;
  if (prevfirst)
    NEXT_INSN (prevfirst) = nextlast;
  if (nextlast)
    PREV_INSN (nextlast) = prevfirst;
  else
    set_last_insn (prevfirst);
  if (!prevfirst)
    set_first_insn (nextlast);
  return first;
}
Example #4
0
static bool
interesting_second_load (rtx set, struct load ***load, rtx insn)
{
  rtx mem, reg;

  if (!set)
    return false;

  mem = SET_SRC (set);
  reg = SET_DEST (set);
  if (!MEM_P (mem) || MEM_VOLATILE_P (mem) || !REG_P (reg))
    return false;

  *load = (struct load **)
    htab_find_slot_with_hash (htab_load, mem, load_rtx_hash (mem),
			      NO_INSERT);
  if (!*load)
    return false;

  /* Don't work on cases that never happen: if there is no kill, we
     would have inherited the reload; if the store and load regs are
     the same we would need to find an available register.  If the
     kill insn was already replaced by a move this information is
     stale, disregard it.  */
  if (rtx_equal_p (reg, (**load)->reg)
      || !(**load)->reg_kill
      || INSN_DELETED_P ((**load)->reg_kill)
      || reg_used_between_p (reg, PREV_INSN ((**load)->reg_kill),
			     NEXT_INSN (insn))
      || reg_set_between_p (reg, PREV_INSN ((**load)->reg_kill), insn))
    {
      if (dump_file)
	{
	  fputs ("\nCan't insert the move before the kill for this load:\n  ",
		 dump_file);
	  print_inline_rtx (dump_file, insn, 2);
	  fputs ("\n\n", dump_file);
	}
      return false;
    }

  return true;
}
Example #5
0
static void
clear_regs_live_in_seq (HARD_REG_SET * regs, rtx insn, int length)
{
  basic_block bb;
  regset_head live;
  HARD_REG_SET hlive;
  rtx x;
  int i;

  /* Initialize liveness propagation.  */
  bb = BLOCK_FOR_INSN (insn);
  INIT_REG_SET (&live);
  bitmap_copy (&live, DF_LR_OUT (bb));
  df_simulate_initialize_backwards (bb, &live);

  /* Propagate until INSN if found.  */
  for (x = BB_END (bb); x != insn; x = PREV_INSN (x))
    df_simulate_one_insn_backwards (bb, x, &live);

  /* Clear registers live after INSN.  */
  renumbered_reg_set_to_hard_reg_set (&hlive, &live);
  AND_COMPL_HARD_REG_SET (*regs, hlive);

  /* Clear registers live in and before the sequence.  */
  for (i = 0; i < length;)
    {
      rtx prev = PREV_INSN (x);
      df_simulate_one_insn_backwards (bb, x, &live);

      if (INSN_P (x))
        {
          renumbered_reg_set_to_hard_reg_set (&hlive, &live);
          AND_COMPL_HARD_REG_SET (*regs, hlive);
          i++;
        }

      x = prev;
    }

  /* Free unused data.  */
  CLEAR_REG_SET (&live);
}
Example #6
0
void
verify_insn_chain (void)
{
  rtx x, prevx, nextx;
  int insn_cnt1, insn_cnt2;

  for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
       x != 0;
       prevx = x, insn_cnt1++, x = NEXT_INSN (x))
    gcc_assert (PREV_INSN (x) == prevx);

  gcc_assert (prevx == get_last_insn ());

  for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
       x != 0;
       nextx = x, insn_cnt2++, x = PREV_INSN (x))
    gcc_assert (NEXT_INSN (x) == nextx);

  gcc_assert (insn_cnt1 == insn_cnt2);
}
Example #7
0
static void
record_effective_endpoints (void)
{
  rtx next_insn;
  basic_block bb;
  rtx insn;

  for (insn = get_insns ();
       insn
       && NOTE_P (insn)
       && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK;
       insn = NEXT_INSN (insn))
    continue;
  /* No basic blocks at all?  */
  gcc_assert (insn);

  if (PREV_INSN (insn))
    cfg_layout_function_header =
	    unlink_insn_chain (get_insns (), PREV_INSN (insn));
  else
    cfg_layout_function_header = NULL_RTX;

  next_insn = get_insns ();
  FOR_EACH_BB (bb)
    {
      rtx end;

      if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
	bb->il.rtl->header = unlink_insn_chain (next_insn,
					      PREV_INSN (BB_HEAD (bb)));
      end = skip_insns_after_block (bb);
      if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
	bb->il.rtl->footer = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
      next_insn = NEXT_INSN (BB_END (bb));
    }

  cfg_layout_function_footer = next_insn;
  if (cfg_layout_function_footer)
    cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
}
Example #8
0
static int
call_ends_block_p (rtx insn, rtx end)
{
  rtx new_insn;
  /* END might be a note, so get the last nonnote insn of the block.  */
  end = next_nonnote_insn (PREV_INSN (end));

  /* If the call was the end of the block, then we're OK.  */
  if (insn == end)
    return 1;

  /* Skip over copying from the call's return value pseudo into
     this function's hard return register and if that's the end
     of the block, we're OK.  */
  new_insn = skip_copy_to_return_value (insn);

  /* In case we return value in pseudo, we must set the pseudo to
     return value of called function, otherwise we are returning
     something else.  */
  if (return_value_pseudo && insn == new_insn)
    return 0;
  insn = new_insn;

  if (insn == end)
    return 1;

  /* Skip any stack adjustment.  */
  insn = skip_stack_adjustment (insn);
  if (insn == end)
    return 1;

  /* Skip over a CLOBBER of the return value as a hard reg.  */
  insn = skip_use_of_return_value (insn, CLOBBER);
  if (insn == end)
    return 1;

  /* Skip over a CLOBBER of the return value as a hard reg.  */
  insn = skip_unreturned_value (insn);
  if (insn == end)
    return 1;

  /* Skip over a USE of the return value (as a hard reg).  */
  insn = skip_use_of_return_value (insn, USE);
  if (insn == end)
    return 1;

  /* Skip over a JUMP_INSN at the end of the block.  If that doesn't end the
     block, the original CALL_INSN didn't.  */
  insn = skip_jump_insn (insn);
  return insn == end;
}
Example #9
0
void
verify_insn_chain (void)
{
  rtx x, prevx, nextx;
  int insn_cnt1, insn_cnt2;

  for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
       x != 0;
       prevx = x, insn_cnt1++, x = NEXT_INSN (x))
    if (PREV_INSN (x) != prevx)
      abort ();

  if (prevx != get_last_insn ())
    abort ();

  for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
       x != 0;
       nextx = x, insn_cnt2++, x = PREV_INSN (x))
    if (NEXT_INSN (x) != nextx)
      abort ();

  if (insn_cnt1 != insn_cnt2)
    abort ();
}
Example #10
0
void
debug_rtx_list (rtx x, int n)
{
  int i,count;
  rtx insn;

  count = n == 0 ? 1 : n < 0 ? -n : n;

  /* If we are printing a window, back up to the start.  */

  if (n < 0)
    for (i = count / 2; i > 0; i--)
      {
        if (PREV_INSN (x) == 0)
          break;
        x = PREV_INSN (x);
      }

  for (i = count, insn = x; i > 0 && insn != 0; i--, insn = NEXT_INSN (insn))
    {
      debug_rtx (insn);
      fprintf (stderr, "\n");
    }
}
Example #11
0
static rtx
prev_insn_in_block (rtx insn)
{
  basic_block bb = BLOCK_FOR_INSN (insn);

  if (!bb)
    return NULL_RTX;

  while (insn != BB_HEAD (bb))
    {
      insn = PREV_INSN (insn);
      if (INSN_P (insn))
        return insn;
    }
  return NULL_RTX;
}
Example #12
0
/* Some old code expects exactly one BARRIER as the NEXT_INSN of a
   non-fallthru insn.  This is not generally true, as multiple barriers
   may have crept in, or the BARRIER may be separated from the last
   real insn by one or more NOTEs.

   This simple pass moves barriers and removes duplicates so that the
   old code is happy.
 */
unsigned int
cleanup_barriers (void)
{
  rtx insn, next, prev;
  for (insn = get_insns (); insn; insn = next)
    {
      next = NEXT_INSN (insn);
      if (BARRIER_P (insn))
	{
	  prev = prev_nonnote_insn (insn);
	  if (BARRIER_P (prev))
	    delete_insn (insn);
	  else if (prev != PREV_INSN (insn))
	    reorder_insns (insn, insn, prev);
	}
    }
  return 0;
}
Example #13
0
static basic_block
make_forwarder_block (basic_block bb, int redirect_latch, int redirect_nonlatch, edge except, int conn_latch)
{
  edge e, next_e, fallthru;
  basic_block dummy;
  rtx insn;

  insn = PREV_INSN (first_insn_after_basic_block_note (bb));

  /* For empty block split_block will return NULL.  */
  if (BB_END (bb) == insn)
    emit_note_after (NOTE_INSN_DELETED, insn);

  fallthru = split_block (bb, insn);
  dummy = fallthru->src;
  bb = fallthru->dest;

  bb->aux = xmalloc (sizeof (int));
  HEADER_BLOCK (dummy) = 0;
  HEADER_BLOCK (bb) = 1;

  /* Redirect back edges we want to keep.  */
  for (e = dummy->pred; e; e = next_e)
    {
      next_e = e->pred_next;
      if (e == except
	  || !((redirect_latch && LATCH_EDGE (e))
	       || (redirect_nonlatch && !LATCH_EDGE (e))))
	{
	  dummy->frequency -= EDGE_FREQUENCY (e);
	  dummy->count -= e->count;
	  if (dummy->frequency < 0)
	    dummy->frequency = 0;
	  if (dummy->count < 0)
	    dummy->count = 0;
	  redirect_edge_with_latch_update (e, bb);
	}
    }

  alloc_aux_for_edge (fallthru, sizeof (int));
  LATCH_EDGE (fallthru) = conn_latch;

  return dummy;
}
Example #14
0
void
init_target_units (void)
{
  rtx insn;
  int unit;

  for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
    {
      if (! INSN_P (insn))
	continue;

      unit = insn_unit (insn);

      if (unit < 0)
	target_units |= ~unit;
      else
	target_units |= (1 << unit);
    }
}
Example #15
0
static void
init_ready_list (void)
{
  int n = 0;
  rtx prev_head = current_sched_info->prev_head;
  rtx next_tail = current_sched_info->next_tail;
  rtx insn;

  sched_rgn_n_insns = 0;

  /* Print debugging information.  */
  if (sched_verbose >= 5)
    debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));

  /* Initialize ready list with all 'ready' insns in target block.
     Count number of insns in the target block being scheduled.  */
  for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
    {
      try_ready (insn);
      n++;
    }

  gcc_assert (n == rgn_n_insns);
}
Example #16
0
/* Do transform 2) on INSN if applicable.  */
static bool
mod_pow2_value_transform (rtx insn)
{
  rtx set, set_src, set_dest, op1, op2, value, histogram;
  enum rtx_code code;
  enum machine_mode mode;
  gcov_type wrong_values, count;
  edge e;
  int i, all, prob;

  set = single_set (insn);
  if (!set)
    return false;

  set_src = SET_SRC (set);
  set_dest = SET_DEST (set);
  code = GET_CODE (set_src);
  mode = GET_MODE (set_dest);
  
  if (code != UMOD)
    return false;
  op1 = XEXP (set_src, 0);
  op2 = XEXP (set_src, 1);

  for (histogram = REG_NOTES (insn);
       histogram;
       histogram = XEXP (histogram, 1))
    if (REG_NOTE_KIND (histogram) == REG_VALUE_PROFILE
	&& XEXP (XEXP (histogram, 0), 0) == GEN_INT (HIST_TYPE_POW2))
      break;

  if (!histogram)
    return false;

  histogram = XEXP (XEXP (histogram, 0), 1);
  value = XEXP (histogram, 0);
  histogram = XEXP (histogram, 1);
  wrong_values =INTVAL (XEXP (histogram, 0));
  histogram = XEXP (histogram, 1);

  count = 0;
  for (i = 0; i < GET_MODE_BITSIZE (mode); i++)
    {
      count += INTVAL (XEXP (histogram, 0));
      histogram = XEXP (histogram, 1);
    }

  if (!rtx_equal_p (op2, value))
    return false;

  /* We require that we hit a power of two at least half of all evaluations.  */
  if (count < wrong_values)
    return false;

  if (dump_file)
    fprintf (dump_file, "Mod power of 2 transformation on insn %d\n",
	     INSN_UID (insn));

  /* Compute probability of taking the optimal path.  */
  all = count + wrong_values;
  prob = (count * REG_BR_PROB_BASE + all / 2) / all;

  e = split_block (BLOCK_FOR_INSN (insn), PREV_INSN (insn));
  delete_insn (insn);
  
  insert_insn_on_edge (
	gen_mod_pow2 (mode, code, set_dest, op1, op2, prob), e);

  return true;
}
Example #17
0
static basic_block
expand_gimple_tailcall (basic_block bb, tree stmt, bool *can_fallthru)
{
  rtx last2, last;
  edge e;
  edge_iterator ei;
  int probability;
  gcov_type count;

  last2 = last = get_last_insn ();

  expand_expr_stmt (stmt);

  for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
    if (CALL_P (last) && SIBLING_CALL_P (last))
      goto found;

  maybe_dump_rtl_for_tree_stmt (stmt, last2);

  *can_fallthru = true;
  return NULL;

 found:
  /* ??? Wouldn't it be better to just reset any pending stack adjust?
     Any instructions emitted here are about to be deleted.  */
  do_pending_stack_adjust ();

  /* Remove any non-eh, non-abnormal edges that don't go to exit.  */
  /* ??? I.e. the fallthrough edge.  HOWEVER!  If there were to be
     EH or abnormal edges, we shouldn't have created a tail call in
     the first place.  So it seems to me we should just be removing
     all edges here, or redirecting the existing fallthru edge to
     the exit block.  */

  probability = 0;
  count = 0;

  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
    {
      if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
	{
	  if (e->dest != EXIT_BLOCK_PTR)
	    {
	      e->dest->count -= e->count;
	      e->dest->frequency -= EDGE_FREQUENCY (e);
	      if (e->dest->count < 0)
	        e->dest->count = 0;
	      if (e->dest->frequency < 0)
	        e->dest->frequency = 0;
	    }
	  count += e->count;
	  probability += e->probability;
	  remove_edge (e);
	}
      else
	ei_next (&ei);
    }

  /* This is somewhat ugly: the call_expr expander often emits instructions
     after the sibcall (to perform the function return).  These confuse the
     find_sub_basic_blocks code, so we need to get rid of these.  */
  last = NEXT_INSN (last);
  gcc_assert (BARRIER_P (last));

  *can_fallthru = false;
  while (NEXT_INSN (last))
    {
      /* For instance an sqrt builtin expander expands if with
	 sibcall in the then and label for `else`.  */
      if (LABEL_P (NEXT_INSN (last)))
	{
	  *can_fallthru = true;
	  break;
	}
      delete_insn (NEXT_INSN (last));
    }

  e = make_edge (bb, EXIT_BLOCK_PTR, EDGE_ABNORMAL | EDGE_SIBCALL);
  e->probability += probability;
  e->count += count;
  BB_END (bb) = last;
  update_bb_for_insn (bb);

  if (NEXT_INSN (last))
    {
      bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);

      last = BB_END (bb);
      if (BARRIER_P (last))
	BB_END (bb) = PREV_INSN (last);
    }

  maybe_dump_rtl_for_tree_stmt (stmt, last2);

  return bb;
}
Example #18
0
static basic_block
expand_gimple_cond_expr (basic_block bb, tree stmt)
{
  basic_block new_bb, dest;
  edge new_edge;
  edge true_edge;
  edge false_edge;
  tree pred = COND_EXPR_COND (stmt);
  tree then_exp = COND_EXPR_THEN (stmt);
  tree else_exp = COND_EXPR_ELSE (stmt);
  rtx last2, last;

  last2 = last = get_last_insn ();

  extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
  if (EXPR_LOCUS (stmt))
    {
      emit_line_note (*(EXPR_LOCUS (stmt)));
      record_block_change (TREE_BLOCK (stmt));
    }

  /* These flags have no purpose in RTL land.  */
  true_edge->flags &= ~EDGE_TRUE_VALUE;
  false_edge->flags &= ~EDGE_FALSE_VALUE;

  /* We can either have a pure conditional jump with one fallthru edge or
     two-way jump that needs to be decomposed into two basic blocks.  */
  if (TREE_CODE (then_exp) == GOTO_EXPR && IS_EMPTY_STMT (else_exp))
    {
      jumpif (pred, label_rtx (GOTO_DESTINATION (then_exp)));
      add_reg_br_prob_note (dump_file, last, true_edge->probability);
      maybe_dump_rtl_for_tree_stmt (stmt, last);
      if (EXPR_LOCUS (then_exp))
	emit_line_note (*(EXPR_LOCUS (then_exp)));
      return NULL;
    }
  if (TREE_CODE (else_exp) == GOTO_EXPR && IS_EMPTY_STMT (then_exp))
    {
      jumpifnot (pred, label_rtx (GOTO_DESTINATION (else_exp)));
      add_reg_br_prob_note (dump_file, last, false_edge->probability);
      maybe_dump_rtl_for_tree_stmt (stmt, last);
      if (EXPR_LOCUS (else_exp))
	emit_line_note (*(EXPR_LOCUS (else_exp)));
      return NULL;
    }
  gcc_assert (TREE_CODE (then_exp) == GOTO_EXPR
	      && TREE_CODE (else_exp) == GOTO_EXPR);

  jumpif (pred, label_rtx (GOTO_DESTINATION (then_exp)));
  add_reg_br_prob_note (dump_file, last, true_edge->probability);
  last = get_last_insn ();
  expand_expr (else_exp, const0_rtx, VOIDmode, 0);

  BB_END (bb) = last;
  if (BARRIER_P (BB_END (bb)))
    BB_END (bb) = PREV_INSN (BB_END (bb));
  update_bb_for_insn (bb);

  new_bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
  dest = false_edge->dest;
  redirect_edge_succ (false_edge, new_bb);
  false_edge->flags |= EDGE_FALLTHRU;
  new_bb->count = false_edge->count;
  new_bb->frequency = EDGE_FREQUENCY (false_edge);
  new_edge = make_edge (new_bb, dest, 0);
  new_edge->probability = REG_BR_PROB_BASE;
  new_edge->count = new_bb->count;
  if (BARRIER_P (BB_END (new_bb)))
    BB_END (new_bb) = PREV_INSN (BB_END (new_bb));
  update_bb_for_insn (new_bb);

  maybe_dump_rtl_for_tree_stmt (stmt, last2);
  
  if (EXPR_LOCUS (else_exp))
    emit_line_note (*(EXPR_LOCUS (else_exp)));

  return new_bb;
}
Example #19
0
static basic_block
expand_gimple_basic_block (basic_block bb, FILE * dump_file)
{
  block_stmt_iterator bsi = bsi_start (bb);
  tree stmt = NULL;
  rtx note, last;
  edge e;
  edge_iterator ei;

  if (dump_file)
    {
      fprintf (dump_file,
	       "\n;; Generating RTL for tree basic block %d\n",
	       bb->index);
    }

  if (!bsi_end_p (bsi))
    stmt = bsi_stmt (bsi);

  if (stmt && TREE_CODE (stmt) == LABEL_EXPR)
    {
      last = get_last_insn ();

      expand_expr_stmt (stmt);

      /* Java emits line number notes in the top of labels.
         ??? Make this go away once line number notes are obsoleted.  */
      BB_HEAD (bb) = NEXT_INSN (last);
      if (NOTE_P (BB_HEAD (bb)))
	BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
      bsi_next (&bsi);
      note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb));

      maybe_dump_rtl_for_tree_stmt (stmt, last);
    }
  else
    note = BB_HEAD (bb) = emit_note (NOTE_INSN_BASIC_BLOCK);

  NOTE_BASIC_BLOCK (note) = bb;

  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
    {
      /* Clear EDGE_EXECUTABLE.  This flag is never used in the backend.  */
      e->flags &= ~EDGE_EXECUTABLE;

      /* At the moment not all abnormal edges match the RTL representation.
         It is safe to remove them here as find_sub_basic_blocks will
         rediscover them.  In the future we should get this fixed properly.  */
      if (e->flags & EDGE_ABNORMAL)
	remove_edge (e);
      else
	ei_next (&ei);
    }

  for (; !bsi_end_p (bsi); bsi_next (&bsi))
    {
      tree stmt = bsi_stmt (bsi);
      basic_block new_bb;

      if (!stmt)
	continue;

      /* Expand this statement, then evaluate the resulting RTL and
	 fixup the CFG accordingly.  */
      if (TREE_CODE (stmt) == COND_EXPR)
	{
	  new_bb = expand_gimple_cond_expr (bb, stmt);
	  if (new_bb)
	    return new_bb;
	}
      else
	{
	  tree call = get_call_expr_in (stmt);
	  if (call && CALL_EXPR_TAILCALL (call))
	    {
	      bool can_fallthru;
	      new_bb = expand_gimple_tailcall (bb, stmt, &can_fallthru);
	      if (new_bb)
		{
		  if (can_fallthru)
		    bb = new_bb;
		  else
		    return new_bb;
		}
	    }
	  else
	    {
	      last = get_last_insn ();
	      expand_expr_stmt (stmt);
	      maybe_dump_rtl_for_tree_stmt (stmt, last);
	    }
	}
    }

  do_pending_stack_adjust ();

  /* Find the block tail.  The last insn in the block is the insn
     before a barrier and/or table jump insn.  */
  last = get_last_insn ();
  if (BARRIER_P (last))
    last = PREV_INSN (last);
  if (JUMP_TABLE_DATA_P (last))
    last = PREV_INSN (PREV_INSN (last));
  BB_END (bb) = last;

  update_bb_for_insn (bb);

  return bb;
}
Example #20
0
static int
identify_call_return_value (rtx cp, rtx *p_hard_return, rtx *p_soft_return)
{
  rtx insn, set, hard, soft;

  insn = XEXP (cp, 0);
  /* Search backward through the "normal" call sequence to the CALL insn.  */
  while (NEXT_INSN (insn))
    insn = NEXT_INSN (insn);
  while (GET_CODE (insn) != CALL_INSN)
    insn = PREV_INSN (insn);

  /* Assume the pattern is (set (dest) (call ...)), or that the first
     member of a parallel is.  This is the hard return register used
     by the function.  */
  if (GET_CODE (PATTERN (insn)) == SET
      && GET_CODE (SET_SRC (PATTERN (insn))) == CALL)
    hard = SET_DEST (PATTERN (insn));
  else if (GET_CODE (PATTERN (insn)) == PARALLEL
	   && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
	   && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == CALL)
    hard = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
  else
    return 0;

  /* If we didn't get a single hard register (e.g. a parallel), give up.  */
  if (GET_CODE (hard) != REG)
    return 0;

  /* Stack adjustment done after call may appear here.  */
  insn = skip_stack_adjustment (insn);
  if (! insn)
    return 0;

  /* Restore of GP register may appear here.  */
  insn = skip_pic_restore (insn);
  if (! insn)
    return 0;

  /* If there's nothing after, there's no soft return value.  */
  insn = NEXT_INSN (insn);
  if (! insn)
    return 0;

  /* We're looking for a source of the hard return register.  */
  set = single_set (insn);
  if (! set || SET_SRC (set) != hard)
    return 0;

  soft = SET_DEST (set);
  insn = NEXT_INSN (insn);

  /* Allow this first destination to be copied to a second register,
     as might happen if the first register wasn't the particular pseudo
     we'd been expecting.  */
  if (insn
      && (set = single_set (insn)) != NULL_RTX
      && SET_SRC (set) == soft)
    {
      soft = SET_DEST (set);
      insn = NEXT_INSN (insn);
    }

  /* Don't fool with anything but pseudo registers.  */
  if (GET_CODE (soft) != REG || REGNO (soft) < FIRST_PSEUDO_REGISTER)
    return 0;

  /* This value must not be modified before the end of the sequence.  */
  if (reg_set_between_p (soft, insn, NULL_RTX))
    return 0;

  *p_hard_return = hard;
  *p_soft_return = soft;

  return 1;
}
Example #21
0
/* Do transforms 3) and 4) on INSN if applicable.  */
static bool
mod_subtract_transform (rtx insn)
{
  rtx set, set_src, set_dest, op1, op2, value, histogram;
  enum rtx_code code;
  enum machine_mode mode;
  gcov_type wrong_values, counts[2], count, all;
  edge e;
  int i, prob1, prob2;

  set = single_set (insn);
  if (!set)
    return false;

  set_src = SET_SRC (set);
  set_dest = SET_DEST (set);
  code = GET_CODE (set_src);
  mode = GET_MODE (set_dest);
  
  if (code != UMOD)
    return false;
  op1 = XEXP (set_src, 0);
  op2 = XEXP (set_src, 1);

  for (histogram = REG_NOTES (insn);
       histogram;
       histogram = XEXP (histogram, 1))
    if (REG_NOTE_KIND (histogram) == REG_VALUE_PROFILE
	&& XEXP (XEXP (histogram, 0), 0) == GEN_INT (HIST_TYPE_INTERVAL))
      break;

  if (!histogram)
    return false;

  histogram = XEXP (XEXP (histogram, 0), 1);
  value = XEXP (histogram, 0);
  histogram = XEXP (histogram, 1);

  all = 0;
  for (i = 0; i < 2; i++)
    {
      counts[i] = INTVAL (XEXP (histogram, 0));
      all += counts[i];
      histogram = XEXP (histogram, 1);
    }
  wrong_values = INTVAL (XEXP (histogram, 0));
  histogram = XEXP (histogram, 1);
  wrong_values += INTVAL (XEXP (histogram, 0));
  all += wrong_values;

  /* We require that we use just subtractions in at least 50% of all
     evaluations.  */
  count = 0;
  for (i = 0; i < 2; i++)
    {
      count += counts[i];
      if (count * 2 >= all)
	break;
    }
  
  if (i == 2)
    return false;

  if (dump_file)
    fprintf (dump_file, "Mod subtract transformation on insn %d\n",
	     INSN_UID (insn));

  /* Compute probability of taking the optimal path(s).  */
  prob1 = (counts[0] * REG_BR_PROB_BASE + all / 2) / all;
  prob2 = (counts[1] * REG_BR_PROB_BASE + all / 2) / all;

  e = split_block (BLOCK_FOR_INSN (insn), PREV_INSN (insn));
  delete_insn (insn);
  
  insert_insn_on_edge (
	gen_mod_subtract (mode, code, set_dest,
			  op1, op2, i, prob1, prob2), e);

  return true;
}
Example #22
0
/* Do transform 1) on INSN if applicable.  */
static bool
divmod_fixed_value_transform (rtx insn)
{
  rtx set, set_src, set_dest, op1, op2, value, histogram;
  enum rtx_code code;
  enum machine_mode mode;
  gcov_type val, count, all;
  edge e;
  int prob;

  set = single_set (insn);
  if (!set)
    return false;

  set_src = SET_SRC (set);
  set_dest = SET_DEST (set);
  code = GET_CODE (set_src);
  mode = GET_MODE (set_dest);
  
  if (code != DIV && code != MOD && code != UDIV && code != UMOD)
    return false;
  op1 = XEXP (set_src, false);
  op2 = XEXP (set_src, 1);

  for (histogram = REG_NOTES (insn);
       histogram;
       histogram = XEXP (histogram, 1))
    if (REG_NOTE_KIND (histogram) == REG_VALUE_PROFILE
	&& XEXP (XEXP (histogram, 0), 0) == GEN_INT (HIST_TYPE_SINGLE_VALUE))
      break;

  if (!histogram)
    return false;

  histogram = XEXP (XEXP (histogram, 0), 1);
  value = XEXP (histogram, 0);
  histogram = XEXP (histogram, 1);
  val = INTVAL (XEXP (histogram, 0));
  histogram = XEXP (histogram, 1);
  count = INTVAL (XEXP (histogram, 0));
  histogram = XEXP (histogram, 1);
  all = INTVAL (XEXP (histogram, 0));

  /* We require that count be at least half of all; this means
     that for the transformation to fire the value must be constant
     at least 50% of time (and 75% gives the guarantee of usage).  */
  if (!rtx_equal_p (op2, value) || 2 * count < all)
    return false;

  if (dump_file)
    fprintf (dump_file, "Div/mod by constant transformation on insn %d\n",
	     INSN_UID (insn));

  /* Compute probability of taking the optimal path.  */
  prob = (count * REG_BR_PROB_BASE + all / 2) / all;

  e = split_block (BLOCK_FOR_INSN (insn), PREV_INSN (insn));
  delete_insn (insn);
  
  insert_insn_on_edge (
	gen_divmod_fixed_value (mode, code, set_dest,
				op1, op2, val, prob), e);

  return true;
}
Example #23
0
static bool
speculative_prefetching_transform (rtx insn)
{
  rtx histogram, value;
  gcov_type val, count, all;
  edge e;
  rtx mem, address;
  int write;

  if (!maybe_hot_bb_p (BLOCK_FOR_INSN (insn)))
    return false;

  if (!find_mem_reference (insn, &mem, &write))
    return false;

  address = XEXP (mem, 0);
  if (side_effects_p (address))
    return false;
      
  if (CONSTANT_P (address))
    return false;

  for (histogram = REG_NOTES (insn);
       histogram;
       histogram = XEXP (histogram, 1))
    if (REG_NOTE_KIND (histogram) == REG_VALUE_PROFILE
	&& XEXP (XEXP (histogram, 0), 0) == GEN_INT (HIST_TYPE_CONST_DELTA))
      break;

  if (!histogram)
    return false;

  histogram = XEXP (XEXP (histogram, 0), 1);
  value = XEXP (histogram, 0);
  histogram = XEXP (histogram, 1);
  /* Skip last value referenced.  */
  histogram = XEXP (histogram, 1);
  val = INTVAL (XEXP (histogram, 0));
  histogram = XEXP (histogram, 1);
  count = INTVAL (XEXP (histogram, 0));
  histogram = XEXP (histogram, 1);
  all = INTVAL (XEXP (histogram, 0));

  /* With that few executions we do not really have a reason to optimize the
     statement, and more importantly, the data about differences of addresses
     are spoiled by the first item that had no previous value to compare
     with.  */
  if (all < 4)
    return false;

  /* We require that count be at least half of all; this means
     that for the transformation to fire the value must be constant
     at least 50% of time (and 75% gives the guarantee of usage).  */
  if (!rtx_equal_p (address, value) || 2 * count < all)
    return false;

  /* If the difference is too small, it does not make too much sense to
     prefetch, as the memory is probably already in cache.  */
  if (val >= NOPREFETCH_RANGE_MIN && val <= NOPREFETCH_RANGE_MAX)
    return false;

  if (dump_file)
    fprintf (dump_file, "Speculative prefetching for insn %d\n",
	     INSN_UID (insn));

  e = split_block (BLOCK_FOR_INSN (insn), PREV_INSN (insn));
  
  insert_insn_on_edge (gen_speculative_prefetch (address, val, write), e);

  return true;
}
Example #24
0
static void
fixup_reorder_chain (void)
{
  basic_block bb, prev_bb;
  int index;
  rtx insn = NULL;

  if (cfg_layout_function_header)
    {
      set_first_insn (cfg_layout_function_header);
      insn = cfg_layout_function_header;
      while (NEXT_INSN (insn))
	insn = NEXT_INSN (insn);
    }

  /* First do the bulk reordering -- rechain the blocks without regard to
     the needed changes to jumps and labels.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
       bb != 0;
       bb = bb->rbi->next, index++)
    {
      if (bb->rbi->header)
	{
	  if (insn)
	    NEXT_INSN (insn) = bb->rbi->header;
	  else
	    set_first_insn (bb->rbi->header);
	  PREV_INSN (bb->rbi->header) = insn;
	  insn = bb->rbi->header;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
      if (insn)
	NEXT_INSN (insn) = BB_HEAD (bb);
      else
	set_first_insn (BB_HEAD (bb));
      PREV_INSN (BB_HEAD (bb)) = insn;
      insn = BB_END (bb);
      if (bb->rbi->footer)
	{
	  NEXT_INSN (insn) = bb->rbi->footer;
	  PREV_INSN (bb->rbi->footer) = insn;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
    }

  if (index != n_basic_blocks)
    abort ();

  NEXT_INSN (insn) = cfg_layout_function_footer;
  if (cfg_layout_function_footer)
    PREV_INSN (cfg_layout_function_footer) = insn;

  while (NEXT_INSN (insn))
    insn = NEXT_INSN (insn);

  set_last_insn (insn);
#ifdef ENABLE_CHECKING
  verify_insn_chain ();
#endif
  delete_dead_jumptables ();

  /* Now add jumps and labels as needed to match the blocks new
     outgoing edges.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->rbi->next)
    {
      edge e_fall, e_taken, e;
      rtx bb_end_insn;
      basic_block nb;

      if (bb->succ == NULL)
	continue;

      /* Find the old fallthru edge, and another non-EH edge for
	 a taken jump.  */
      e_taken = e_fall = NULL;
      for (e = bb->succ; e ; e = e->succ_next)
	if (e->flags & EDGE_FALLTHRU)
	  e_fall = e;
	else if (! (e->flags & EDGE_EH))
	  e_taken = e;

      bb_end_insn = BB_END (bb);
      if (GET_CODE (bb_end_insn) == JUMP_INSN)
	{
	  if (any_condjump_p (bb_end_insn))
	    {
	      /* If the old fallthru is still next, nothing to do.  */
	      if (bb->rbi->next == e_fall->dest
	          || (!bb->rbi->next
		      && e_fall->dest == EXIT_BLOCK_PTR))
		continue;

	      /* The degenerated case of conditional jump jumping to the next
		 instruction can happen on target having jumps with side
		 effects.

		 Create temporarily the duplicated edge representing branch.
		 It will get unidentified by force_nonfallthru_and_redirect
		 that would otherwise get confused by fallthru edge not pointing
		 to the next basic block.  */
	      if (!e_taken)
		{
		  rtx note;
		  edge e_fake;

		  e_fake = unchecked_make_edge (bb, e_fall->dest, 0);

		  if (!redirect_jump (BB_END (bb), block_label (bb), 0))
		    abort ();
		  note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
		  if (note)
		    {
		      int prob = INTVAL (XEXP (note, 0));

		      e_fake->probability = prob;
		      e_fake->count = e_fall->count * prob / REG_BR_PROB_BASE;
		      e_fall->probability -= e_fall->probability;
		      e_fall->count -= e_fake->count;
		      if (e_fall->probability < 0)
			e_fall->probability = 0;
		      if (e_fall->count < 0)
			e_fall->count = 0;
		    }
		}
	      /* There is one special case: if *neither* block is next,
		 such as happens at the very end of a function, then we'll
		 need to add a new unconditional jump.  Choose the taken
		 edge based on known or assumed probability.  */
	      else if (bb->rbi->next != e_taken->dest)
		{
		  rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);

		  if (note
		      && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2
		      && invert_jump (bb_end_insn,
				      label_for_bb (e_fall->dest), 0))
		    {
		      e_fall->flags &= ~EDGE_FALLTHRU;
		      e_taken->flags |= EDGE_FALLTHRU;
		      update_br_prob_note (bb);
		      e = e_fall, e_fall = e_taken, e_taken = e;
		    }
		}

	      /* Otherwise we can try to invert the jump.  This will
		 basically never fail, however, keep up the pretense.  */
	      else if (invert_jump (bb_end_insn,
				    label_for_bb (e_fall->dest), 0))
		{
		  e_fall->flags &= ~EDGE_FALLTHRU;
		  e_taken->flags |= EDGE_FALLTHRU;
		  update_br_prob_note (bb);
		  continue;
		}
	    }
	  else if (returnjump_p (bb_end_insn))
	    continue;
	  else
	    {
	      /* Otherwise we have some switch or computed jump.  In the
		 99% case, there should not have been a fallthru edge.  */
	      if (! e_fall)
		continue;

#ifdef CASE_DROPS_THROUGH
	      /* Except for VAX.  Since we didn't have predication for the
		 tablejump, the fallthru block should not have moved.  */
	      if (bb->rbi->next == e_fall->dest)
		continue;
	      bb_end_insn = skip_insns_after_block (bb);
#else
	      abort ();
#endif
	    }
	}
      else
	{
	  /* No fallthru implies a noreturn function with EH edges, or
	     something similarly bizarre.  In any case, we don't need to
	     do anything.  */
	  if (! e_fall)
	    continue;

	  /* If the fallthru block is still next, nothing to do.  */
	  if (bb->rbi->next == e_fall->dest)
	    continue;

	  /* A fallthru to exit block.  */
	  if (!bb->rbi->next && e_fall->dest == EXIT_BLOCK_PTR)
	    continue;
	}

      /* We got here if we need to add a new jump insn.  */
      nb = force_nonfallthru (e_fall);
      if (nb)
	{
	  cfg_layout_initialize_rbi (nb);
	  nb->rbi->visited = 1;
	  nb->rbi->next = bb->rbi->next;
	  bb->rbi->next = nb;
	  /* Don't process this new block.  */
	  bb = nb;
	}
    }

  /* Put basic_block_info in the new order.  */

  if (rtl_dump_file)
    {
      fprintf (rtl_dump_file, "Reordered sequence:\n");
      for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb; bb = bb->rbi->next, index ++)
	{
	  fprintf (rtl_dump_file, " %i ", index);
	  if (bb->rbi->original)
	    fprintf (rtl_dump_file, "duplicate of %i ",
		     bb->rbi->original->index);
	  else if (forwarder_block_p (bb) && GET_CODE (BB_HEAD (bb)) != CODE_LABEL)
	    fprintf (rtl_dump_file, "compensation ");
	  else
	    fprintf (rtl_dump_file, "bb %i ", bb->index);
	  fprintf (rtl_dump_file, " [%i]\n", bb->frequency);
	}
    }

  prev_bb = ENTRY_BLOCK_PTR;
  bb = ENTRY_BLOCK_PTR->next_bb;
  index = 0;

  for (; bb; prev_bb = bb, bb = bb->rbi->next, index ++)
    {
      bb->index = index;
      BASIC_BLOCK (index) = bb;

      bb->prev_bb = prev_bb;
      prev_bb->next_bb = bb;
    }
  prev_bb->next_bb = EXIT_BLOCK_PTR;
  EXIT_BLOCK_PTR->prev_bb = prev_bb;

  /* Annoying special case - jump around dead jumptables left in the code.  */
  FOR_EACH_BB (bb)
    {
      edge e;
      for (e = bb->succ; e && !(e->flags & EDGE_FALLTHRU); e = e->succ_next)
	continue;
      if (e && !can_fallthru (e->src, e->dest))
	force_nonfallthru (e);
    }
}
Example #25
0
static void
collect_pattern_seqs (void)
{
  htab_iterator hti0, hti1, hti2;
  p_hash_bucket hash_bucket;
  p_hash_elem e0, e1;
#if defined STACK_REGS || defined HAVE_cc0
  basic_block bb;
  bitmap_head dont_collect;

  /* Extra initialization step to ensure that no stack registers (if present)
     or cc0 code (if present) are live across abnormal edges.
     Set a flag in DONT_COLLECT for an insn if a stack register is live
     after the insn or the insn is cc0 setter or user.  */
  bitmap_initialize (&dont_collect, NULL);

#ifdef STACK_REGS
  FOR_EACH_BB (bb)
  {
    regset_head live;
    rtx insn;
    rtx prev;

    /* Initialize liveness propagation.  */
    INIT_REG_SET (&live);
    bitmap_copy (&live, DF_LR_OUT (bb));
    df_simulate_initialize_backwards (bb, &live);

    /* Propagate liveness info and mark insns where a stack reg is live.  */
    insn = BB_END (bb);
    for (insn = BB_END (bb); ; insn = prev)
      {
	prev = PREV_INSN (insn);
	if (INSN_P (insn))
	  {
	    int reg;
	    for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++)
	      {
		if (REGNO_REG_SET_P (&live, reg))
		  {
		    bitmap_set_bit (&dont_collect, INSN_UID (insn));
		    break;
		  }
	      }
	    
	  }
	if (insn == BB_HEAD (bb))
	  break;
	df_simulate_one_insn_backwards (bb, insn, &live);
	insn = prev;
      }

    /* Free unused data.  */
    CLEAR_REG_SET (&live);
  }
#endif

#ifdef HAVE_cc0
  /* Mark CC0 setters and users as ineligible for collection into sequences.
     This is an over-conservative fix, since it is OK to include
     a cc0_setter, but only if we also include the corresponding cc0_user,
     and vice versa.  */
  FOR_EACH_BB (bb)
  {
    rtx insn;
    rtx next_tail;

    next_tail = NEXT_INSN (BB_END (bb));

    for (insn = BB_HEAD (bb); insn != next_tail; insn = NEXT_INSN (insn))
      {
	if (INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
	  bitmap_set_bit (&dont_collect, INSN_UID (insn));
      }
  }
#endif

#endif /* defined STACK_REGS || defined HAVE_cc0 */

  /* Initialize PATTERN_SEQS to empty.  */
  pattern_seqs = 0;

  /* Try to match every abstractable insn with every other insn in the same
     HASH_BUCKET.  */

  FOR_EACH_HTAB_ELEMENT (hash_buckets, hash_bucket, p_hash_bucket, hti0)
    if (htab_elements (hash_bucket->seq_candidates) > 1)
      FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e0, p_hash_elem, hti1)
        FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e1, p_hash_elem,
                               hti2)
          if (e0 != e1
#if defined STACK_REGS || defined HAVE_cc0
              && !bitmap_bit_p (&dont_collect, INSN_UID (e0->insn))
              && !bitmap_bit_p (&dont_collect, INSN_UID (e1->insn))
#endif
             )
            match_seqs (e0, e1);
#if defined STACK_REGS || defined HAVE_cc0
  /* Free unused data.  */
  bitmap_clear (&dont_collect);
#endif
}
Example #26
0
static void
fixup_reorder_chain (void)
{
  basic_block bb, prev_bb;
  int index;
  rtx insn = NULL;

  if (cfg_layout_function_header)
    {
      set_first_insn (cfg_layout_function_header);
      insn = cfg_layout_function_header;
      while (NEXT_INSN (insn))
	insn = NEXT_INSN (insn);
    }

  /* First do the bulk reordering -- rechain the blocks without regard to
     the needed changes to jumps and labels.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
       bb != 0;
       bb = bb->aux, index++)
    {
      if (bb->il.rtl->header)
	{
	  if (insn)
	    NEXT_INSN (insn) = bb->il.rtl->header;
	  else
	    set_first_insn (bb->il.rtl->header);
	  PREV_INSN (bb->il.rtl->header) = insn;
	  insn = bb->il.rtl->header;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
      if (insn)
	NEXT_INSN (insn) = BB_HEAD (bb);
      else
	set_first_insn (BB_HEAD (bb));
      PREV_INSN (BB_HEAD (bb)) = insn;
      insn = BB_END (bb);
      if (bb->il.rtl->footer)
	{
	  NEXT_INSN (insn) = bb->il.rtl->footer;
	  PREV_INSN (bb->il.rtl->footer) = insn;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
    }

  gcc_assert (index == n_basic_blocks);

  NEXT_INSN (insn) = cfg_layout_function_footer;
  if (cfg_layout_function_footer)
    PREV_INSN (cfg_layout_function_footer) = insn;

  while (NEXT_INSN (insn))
    insn = NEXT_INSN (insn);

  set_last_insn (insn);
#ifdef ENABLE_CHECKING
  verify_insn_chain ();
#endif
  delete_dead_jumptables ();

  /* Now add jumps and labels as needed to match the blocks new
     outgoing edges.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->aux)
    {
      edge e_fall, e_taken, e;
      rtx bb_end_insn;
      basic_block nb;
      edge_iterator ei;

      if (EDGE_COUNT (bb->succs) == 0)
	continue;

      /* Find the old fallthru edge, and another non-EH edge for
	 a taken jump.  */
      e_taken = e_fall = NULL;

      FOR_EACH_EDGE (e, ei, bb->succs)
	if (e->flags & EDGE_FALLTHRU)
	  e_fall = e;
	else if (! (e->flags & EDGE_EH))
	  e_taken = e;

      bb_end_insn = BB_END (bb);
      if (JUMP_P (bb_end_insn))
	{
	  if (any_condjump_p (bb_end_insn))
	    {
	      /* If the old fallthru is still next, nothing to do.  */
	      if (bb->aux == e_fall->dest
		  || e_fall->dest == EXIT_BLOCK_PTR)
		continue;

	      /* The degenerated case of conditional jump jumping to the next
		 instruction can happen for jumps with side effects.  We need
		 to construct a forwarder block and this will be done just
		 fine by force_nonfallthru below.  */
	      if (!e_taken)
		;

	      /* There is another special case: if *neither* block is next,
		 such as happens at the very end of a function, then we'll
		 need to add a new unconditional jump.  Choose the taken
		 edge based on known or assumed probability.  */
	      else if (bb->aux != e_taken->dest)
		{
		  rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);

		  if (note
		      && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2
		      && invert_jump (bb_end_insn,
				      (e_fall->dest == EXIT_BLOCK_PTR
				       ? NULL_RTX
				       : label_for_bb (e_fall->dest)), 0))
		    {
		      e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
		      gcc_assert (could_fall_through
				  (e_taken->src, e_taken->dest));
#endif
		      e_taken->flags |= EDGE_FALLTHRU;
		      update_br_prob_note (bb);
		      e = e_fall, e_fall = e_taken, e_taken = e;
		    }
		}

	      /* If the "jumping" edge is a crossing edge, and the fall
		 through edge is non-crossing, leave things as they are.  */
	      else if ((e_taken->flags & EDGE_CROSSING)
		       && !(e_fall->flags & EDGE_CROSSING))
		continue;

	      /* Otherwise we can try to invert the jump.  This will
		 basically never fail, however, keep up the pretense.  */
	      else if (invert_jump (bb_end_insn,
				    (e_fall->dest == EXIT_BLOCK_PTR
				     ? NULL_RTX
				     : label_for_bb (e_fall->dest)), 0))
		{
		  e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
		  gcc_assert (could_fall_through
			      (e_taken->src, e_taken->dest));
#endif
		  e_taken->flags |= EDGE_FALLTHRU;
		  update_br_prob_note (bb);
		  continue;
		}
	    }
	  else
	    {
	      /* Otherwise we have some return, switch or computed
		 jump.  In the 99% case, there should not have been a
		 fallthru edge.  */
	      gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
	      continue;
	    }
	}
      else
	{
	  /* No fallthru implies a noreturn function with EH edges, or
	     something similarly bizarre.  In any case, we don't need to
	     do anything.  */
	  if (! e_fall)
	    continue;

	  /* If the fallthru block is still next, nothing to do.  */
	  if (bb->aux == e_fall->dest)
	    continue;

	  /* A fallthru to exit block.  */
	  if (e_fall->dest == EXIT_BLOCK_PTR)
	    continue;
	}

      /* We got here if we need to add a new jump insn.  */
      nb = force_nonfallthru (e_fall);
      if (nb)
	{
	  nb->il.rtl->visited = 1;
	  nb->aux = bb->aux;
	  bb->aux = nb;
	  /* Don't process this new block.  */
	  bb = nb;

	  /* Make sure new bb is tagged for correct section (same as
	     fall-thru source, since you cannot fall-throu across
	     section boundaries).  */
	  BB_COPY_PARTITION (e_fall->src, single_pred (bb));
	  if (flag_reorder_blocks_and_partition
	      && targetm.have_named_sections
	      && JUMP_P (BB_END (bb))
	      && !any_condjump_p (BB_END (bb))
	      && (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING))
	    REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
	      (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
	}
    }

  /* Put basic_block_info in the new order.  */

  if (dump_file)
    {
      fprintf (dump_file, "Reordered sequence:\n");
      for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
	   bb;
	   bb = bb->aux, index++)
	{
	  fprintf (dump_file, " %i ", index);
	  if (get_bb_original (bb))
	    fprintf (dump_file, "duplicate of %i ",
		     get_bb_original (bb)->index);
	  else if (forwarder_block_p (bb)
		   && !LABEL_P (BB_HEAD (bb)))
	    fprintf (dump_file, "compensation ");
	  else
	    fprintf (dump_file, "bb %i ", bb->index);
	  fprintf (dump_file, " [%i]\n", bb->frequency);
	}
    }

  prev_bb = ENTRY_BLOCK_PTR;
  bb = ENTRY_BLOCK_PTR->next_bb;
  index = NUM_FIXED_BLOCKS;

  for (; bb; prev_bb = bb, bb = bb->aux, index ++)
    {
      bb->index = index;
      SET_BASIC_BLOCK (index, bb);

      bb->prev_bb = prev_bb;
      prev_bb->next_bb = bb;
    }
  prev_bb->next_bb = EXIT_BLOCK_PTR;
  EXIT_BLOCK_PTR->prev_bb = prev_bb;

  /* Annoying special case - jump around dead jumptables left in the code.  */
  FOR_EACH_BB (bb)
    {
      edge e;
      edge_iterator ei;

      FOR_EACH_EDGE (e, ei, bb->succs)
	if (e->flags & EDGE_FALLTHRU)
	  break;

      if (e && !can_fallthru (e->src, e->dest))
	force_nonfallthru (e);
    }
}
Example #27
0
static basic_block
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
{
  edge eg;
  edge_iterator ei;
  basic_block pre_exit;

  /* The only non-call predecessor at this stage is a block with a
     fallthrough edge; there can be at most one, but there could be
     none at all, e.g. when exit is called.  */
  pre_exit = 0;
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
    if (eg->flags & EDGE_FALLTHRU)
      {
	basic_block src_bb = eg->src;
	rtx_insn *last_insn;
	rtx ret_reg;

	gcc_assert (!pre_exit);
	/* If this function returns a value at the end, we have to
	   insert the final mode switch before the return value copy
	   to its hard register.  */
	if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
	    && GET_CODE (PATTERN (last_insn)) == USE
	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
	  {
	    int ret_start = REGNO (ret_reg);
	    int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
	    int ret_end = ret_start + nregs;
	    bool short_block = false;
	    bool multi_reg_return = false;
	    bool forced_late_switch = false;
	    rtx_insn *before_return_copy;

	    do
	      {
		rtx_insn *return_copy = PREV_INSN (last_insn);
		rtx return_copy_pat, copy_reg;
		int copy_start, copy_num;
		int j;

		if (NONDEBUG_INSN_P (return_copy))
		  {
		    /* When using SJLJ exceptions, the call to the
		       unregister function is inserted between the
		       clobber of the return value and the copy.
		       We do not want to split the block before this
		       or any other call; if we have not found the
		       copy yet, the copy must have been deleted.  */
		    if (CALL_P (return_copy))
		      {
			short_block = true;
			break;
		      }
		    return_copy_pat = PATTERN (return_copy);
		    switch (GET_CODE (return_copy_pat))
		      {
		      case USE:
			/* Skip USEs of multiple return registers.
			   __builtin_apply pattern is also handled here.  */
			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
			    && (targetm.calls.function_value_regno_p
				(REGNO (XEXP (return_copy_pat, 0)))))
			  {
			    multi_reg_return = true;
			    last_insn = return_copy;
			    continue;
			  }
			break;

		      case ASM_OPERANDS:
			/* Skip barrier insns.  */
			if (!MEM_VOLATILE_P (return_copy_pat))
			  break;

			/* Fall through.  */

		      case ASM_INPUT:
		      case UNSPEC_VOLATILE:
			last_insn = return_copy;
			continue;

		      default:
			break;
		      }

		    /* If the return register is not (in its entirety)
		       likely spilled, the return copy might be
		       partially or completely optimized away.  */
		    return_copy_pat = single_set (return_copy);
		    if (!return_copy_pat)
		      {
			return_copy_pat = PATTERN (return_copy);
			if (GET_CODE (return_copy_pat) != CLOBBER)
			  break;
			else if (!optimize)
			  {
			    /* This might be (clobber (reg [<result>]))
			       when not optimizing.  Then check if
			       the previous insn is the clobber for
			       the return register.  */
			    copy_reg = SET_DEST (return_copy_pat);
			    if (GET_CODE (copy_reg) == REG
				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
			      {
				if (INSN_P (PREV_INSN (return_copy)))
				  {
				    return_copy = PREV_INSN (return_copy);
				    return_copy_pat = PATTERN (return_copy);
				    if (GET_CODE (return_copy_pat) != CLOBBER)
				      break;
				  }
			      }
			  }
		      }
		    copy_reg = SET_DEST (return_copy_pat);
		    if (GET_CODE (copy_reg) == REG)
		      copy_start = REGNO (copy_reg);
		    else if (GET_CODE (copy_reg) == SUBREG
			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
		      copy_start = REGNO (SUBREG_REG (copy_reg));
		    else
		      {
			/* When control reaches end of non-void function,
			   there are no return copy insns at all.  This
			   avoids an ice on that invalid function.  */
			if (ret_start + nregs == ret_end)
			  short_block = true;
			break;
		      }
		    if (!targetm.calls.function_value_regno_p (copy_start))
		      copy_num = 0;
		    else
		      copy_num
			= hard_regno_nregs[copy_start][GET_MODE (copy_reg)];

		    /* If the return register is not likely spilled, - as is
		       the case for floating point on SH4 - then it might
		       be set by an arithmetic operation that needs a
		       different mode than the exit block.  */
		    for (j = n_entities - 1; j >= 0; j--)
		      {
			int e = entity_map[j];
			int mode =
			  targetm.mode_switching.needed (e, return_copy);

			if (mode != num_modes[e]
			    && mode != targetm.mode_switching.exit (e))
			  break;
		      }
		    if (j >= 0)
		      {
			/* __builtin_return emits a sequence of loads to all
			   return registers.  One of them might require
			   another mode than MODE_EXIT, even if it is
			   unrelated to the return value, so we want to put
			   the final mode switch after it.  */
			if (multi_reg_return
			    && targetm.calls.function_value_regno_p
			        (copy_start))
			  forced_late_switch = true;

			/* For the SH4, floating point loads depend on fpscr,
			   thus we might need to put the final mode switch
			   after the return value copy.  That is still OK,
			   because a floating point return value does not
			   conflict with address reloads.  */
			if (copy_start >= ret_start
			    && copy_start + copy_num <= ret_end
			    && OBJECT_P (SET_SRC (return_copy_pat)))
			  forced_late_switch = true;
			break;
		      }
		    if (copy_num == 0)
		      {
			last_insn = return_copy;
			continue;
		      }

		    if (copy_start >= ret_start
			&& copy_start + copy_num <= ret_end)
		      nregs -= copy_num;
		    else if (!multi_reg_return
			     || !targetm.calls.function_value_regno_p
				 (copy_start))
		      break;
		    last_insn = return_copy;
		  }
		/* ??? Exception handling can lead to the return value
		   copy being already separated from the return value use,
		   as in  unwind-dw2.c .
		   Similarly, conditionally returning without a value,
		   and conditionally using builtin_return can lead to an
		   isolated use.  */
		if (return_copy == BB_HEAD (src_bb))
		  {
		    short_block = true;
		    break;
		  }
		last_insn = return_copy;
	      }
	    while (nregs);

	    /* If we didn't see a full return value copy, verify that there
	       is a plausible reason for this.  If some, but not all of the
	       return register is likely spilled, we can expect that there
	       is a copy for the likely spilled part.  */
	    gcc_assert (!nregs
			|| forced_late_switch
			|| short_block
			|| !(targetm.class_likely_spilled_p
			     (REGNO_REG_CLASS (ret_start)))
			|| (nregs
			    != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
			/* For multi-hard-register floating point
		   	   values, sometimes the likely-spilled part
		   	   is ordinarily copied first, then the other
		   	   part is set with an arithmetic operation.
		   	   This doesn't actually cause reload
		   	   failures, so let it pass.  */
			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
			    && nregs != 1));

	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
	      {
		before_return_copy
		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
		/* Instructions preceding LAST_INSN in the same block might
		   require a different mode than MODE_EXIT, so if we might
		   have such instructions, keep them in a separate block
		   from pre_exit.  */
		src_bb = split_block (src_bb,
				      PREV_INSN (before_return_copy))->dest;
	      }
	    else
	      before_return_copy = last_insn;
	    pre_exit = split_block (src_bb, before_return_copy)->src;
	  }
	else
	  {
	    pre_exit = split_edge (eg);
	  }
      }

  return pre_exit;
}
Example #28
0
static rtx
skip_insns_after_block (basic_block bb)
{
  rtx insn, last_insn, next_head, prev;

  next_head = NULL_RTX;
  if (bb->next_bb != EXIT_BLOCK_PTR)
    next_head = BB_HEAD (bb->next_bb);

  for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
    {
      if (insn == next_head)
	break;

      switch (GET_CODE (insn))
	{
	case BARRIER:
	  last_insn = insn;
	  continue;

	case NOTE:
	  switch (NOTE_LINE_NUMBER (insn))
	    {
	    case NOTE_INSN_LOOP_END:
	    case NOTE_INSN_BLOCK_END:
	      last_insn = insn;
	      continue;
	    case NOTE_INSN_DELETED:
	    case NOTE_INSN_DELETED_LABEL:
	      continue;

	    default:
	      continue;
	      break;
	    }
	  break;

	case CODE_LABEL:
	  if (NEXT_INSN (insn)
	      && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
	      && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC
	          || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC))
	    {
	      insn = NEXT_INSN (insn);
	      last_insn = insn;
	      continue;
	    }
	  break;

	default:
	  break;
	}

      break;
    }

  /* It is possible to hit contradictory sequence.  For instance:

     jump_insn
     NOTE_INSN_LOOP_BEG
     barrier

     Where barrier belongs to jump_insn, but the note does not.  This can be
     created by removing the basic block originally following
     NOTE_INSN_LOOP_BEG.  In such case reorder the notes.  */

  for (insn = last_insn; insn != BB_END (bb); insn = prev)
    {
      prev = PREV_INSN (insn);
      if (GET_CODE (insn) == NOTE)
	switch (NOTE_LINE_NUMBER (insn))
	  {
	  case NOTE_INSN_LOOP_END:
	  case NOTE_INSN_BLOCK_END:
	  case NOTE_INSN_DELETED:
	  case NOTE_INSN_DELETED_LABEL:
	    continue;
	  default:
	    reorder_insns (insn, insn, last_insn);
	  }
    }

  return last_insn;
}