Exemplo n.º 1
0
/* Return true if INSN requires the stack frame to be set up.
   PROLOGUE_USED contains the hard registers used in the function
   prologue.  SET_UP_BY_PROLOGUE is the set of registers we expect the
   prologue to set up for the function.  */
bool
requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used,
			HARD_REG_SET set_up_by_prologue)
{
  df_ref def, use;
  HARD_REG_SET hardregs;
  unsigned regno;

  if (CALL_P (insn))
    return !SIBLING_CALL_P (insn);

  /* We need a frame to get the unique CFA expected by the unwinder.  */
  if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
    return true;

  CLEAR_HARD_REG_SET (hardregs);
  FOR_EACH_INSN_DEF (def, insn)
    {
      rtx dreg = DF_REF_REG (def);

      if (!REG_P (dreg))
	continue;

      add_to_hard_reg_set (&hardregs, GET_MODE (dreg),
			   REGNO (dreg));
    }
Exemplo n.º 2
0
static unsigned int
postreload_load (void)
{
  basic_block bb;

  init_alias_analysis ();

  FOR_EACH_BB (bb)
    {
      rtx insn;

      htab_load = htab_create (10, load_htab_hash, load_htab_eq, NULL);

      FOR_BB_INSNS (bb, insn)
	{
	  rtx set;
	  struct load **load;

	  /* Set reg_kill, invalidate entries if there is an
	     aliasing store or if the registers making up the address
	     change.  */
	  htab_traverse_noresize
	    (htab_load, find_reg_kill_and_mem_invalidate, insn);	

	  set = single_set (insn);
	  if (interesting_second_load (set, &load, insn))
	    {
	      rtx move;

	      move = gen_move_insn (SET_DEST (set), (*load)->reg);
	      /* Make sure we can generate a move.  */
	      extract_insn (move);
	      if (! constrain_operands (1))
		continue;

	      move = emit_insn_before (move, (*load)->reg_kill);
	      delete_insn (insn);

	      if (dump_file)
		{
		  fputs ("Replaced this load:\n  ", dump_file);
		  print_inline_rtx (dump_file, insn, 2);
		  fputs ("\n  with this move:\n  ", dump_file);
		  print_inline_rtx (dump_file, move, 2);
		  fputs ("\n\n", dump_file);
		}
	    }
	  else if (interesting_load (set))
	    alloc_load (set);
	  else if (CALL_P (insn))
	    htab_empty (htab_load);
	}

      htab_empty (htab_load);
    }
Exemplo n.º 3
0
Arquivo: dce.c Projeto: AHelper/gcc
static void
mark_insn (rtx_insn *insn, bool fast)
{
  if (!marked_insn_p (insn))
    {
      if (!fast)
	worklist.safe_push (insn);
      bitmap_set_bit (marked, INSN_UID (insn));
      if (dump_file)
	fprintf (dump_file, "  Adding insn %d to worklist\n", INSN_UID (insn));
      if (CALL_P (insn)
	  && !df_in_progress
	  && !SIBLING_CALL_P (insn)
	  && (RTL_CONST_OR_PURE_CALL_P (insn)
	      && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)))
	find_call_stack_args (as_a <rtx_call_insn *> (insn), true, fast, NULL);
    }
}
Exemplo n.º 4
0
static basic_block
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
{
  edge eg;
  edge_iterator ei;
  basic_block pre_exit;

  /* The only non-call predecessor at this stage is a block with a
     fallthrough edge; there can be at most one, but there could be
     none at all, e.g. when exit is called.  */
  pre_exit = 0;
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
    if (eg->flags & EDGE_FALLTHRU)
      {
	basic_block src_bb = eg->src;
	rtx_insn *last_insn;
	rtx ret_reg;

	gcc_assert (!pre_exit);
	/* If this function returns a value at the end, we have to
	   insert the final mode switch before the return value copy
	   to its hard register.  */
	if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
	    && GET_CODE (PATTERN (last_insn)) == USE
	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
	  {
	    int ret_start = REGNO (ret_reg);
	    int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
	    int ret_end = ret_start + nregs;
	    bool short_block = false;
	    bool multi_reg_return = false;
	    bool forced_late_switch = false;
	    rtx_insn *before_return_copy;

	    do
	      {
		rtx_insn *return_copy = PREV_INSN (last_insn);
		rtx return_copy_pat, copy_reg;
		int copy_start, copy_num;
		int j;

		if (NONDEBUG_INSN_P (return_copy))
		  {
		    /* When using SJLJ exceptions, the call to the
		       unregister function is inserted between the
		       clobber of the return value and the copy.
		       We do not want to split the block before this
		       or any other call; if we have not found the
		       copy yet, the copy must have been deleted.  */
		    if (CALL_P (return_copy))
		      {
			short_block = true;
			break;
		      }
		    return_copy_pat = PATTERN (return_copy);
		    switch (GET_CODE (return_copy_pat))
		      {
		      case USE:
			/* Skip USEs of multiple return registers.
			   __builtin_apply pattern is also handled here.  */
			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
			    && (targetm.calls.function_value_regno_p
				(REGNO (XEXP (return_copy_pat, 0)))))
			  {
			    multi_reg_return = true;
			    last_insn = return_copy;
			    continue;
			  }
			break;

		      case ASM_OPERANDS:
			/* Skip barrier insns.  */
			if (!MEM_VOLATILE_P (return_copy_pat))
			  break;

			/* Fall through.  */

		      case ASM_INPUT:
		      case UNSPEC_VOLATILE:
			last_insn = return_copy;
			continue;

		      default:
			break;
		      }

		    /* If the return register is not (in its entirety)
		       likely spilled, the return copy might be
		       partially or completely optimized away.  */
		    return_copy_pat = single_set (return_copy);
		    if (!return_copy_pat)
		      {
			return_copy_pat = PATTERN (return_copy);
			if (GET_CODE (return_copy_pat) != CLOBBER)
			  break;
			else if (!optimize)
			  {
			    /* This might be (clobber (reg [<result>]))
			       when not optimizing.  Then check if
			       the previous insn is the clobber for
			       the return register.  */
			    copy_reg = SET_DEST (return_copy_pat);
			    if (GET_CODE (copy_reg) == REG
				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
			      {
				if (INSN_P (PREV_INSN (return_copy)))
				  {
				    return_copy = PREV_INSN (return_copy);
				    return_copy_pat = PATTERN (return_copy);
				    if (GET_CODE (return_copy_pat) != CLOBBER)
				      break;
				  }
			      }
			  }
		      }
		    copy_reg = SET_DEST (return_copy_pat);
		    if (GET_CODE (copy_reg) == REG)
		      copy_start = REGNO (copy_reg);
		    else if (GET_CODE (copy_reg) == SUBREG
			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
		      copy_start = REGNO (SUBREG_REG (copy_reg));
		    else
		      {
			/* When control reaches end of non-void function,
			   there are no return copy insns at all.  This
			   avoids an ice on that invalid function.  */
			if (ret_start + nregs == ret_end)
			  short_block = true;
			break;
		      }
		    if (!targetm.calls.function_value_regno_p (copy_start))
		      copy_num = 0;
		    else
		      copy_num
			= hard_regno_nregs[copy_start][GET_MODE (copy_reg)];

		    /* If the return register is not likely spilled, - as is
		       the case for floating point on SH4 - then it might
		       be set by an arithmetic operation that needs a
		       different mode than the exit block.  */
		    for (j = n_entities - 1; j >= 0; j--)
		      {
			int e = entity_map[j];
			int mode =
			  targetm.mode_switching.needed (e, return_copy);

			if (mode != num_modes[e]
			    && mode != targetm.mode_switching.exit (e))
			  break;
		      }
		    if (j >= 0)
		      {
			/* __builtin_return emits a sequence of loads to all
			   return registers.  One of them might require
			   another mode than MODE_EXIT, even if it is
			   unrelated to the return value, so we want to put
			   the final mode switch after it.  */
			if (multi_reg_return
			    && targetm.calls.function_value_regno_p
			        (copy_start))
			  forced_late_switch = true;

			/* For the SH4, floating point loads depend on fpscr,
			   thus we might need to put the final mode switch
			   after the return value copy.  That is still OK,
			   because a floating point return value does not
			   conflict with address reloads.  */
			if (copy_start >= ret_start
			    && copy_start + copy_num <= ret_end
			    && OBJECT_P (SET_SRC (return_copy_pat)))
			  forced_late_switch = true;
			break;
		      }
		    if (copy_num == 0)
		      {
			last_insn = return_copy;
			continue;
		      }

		    if (copy_start >= ret_start
			&& copy_start + copy_num <= ret_end)
		      nregs -= copy_num;
		    else if (!multi_reg_return
			     || !targetm.calls.function_value_regno_p
				 (copy_start))
		      break;
		    last_insn = return_copy;
		  }
		/* ??? Exception handling can lead to the return value
		   copy being already separated from the return value use,
		   as in  unwind-dw2.c .
		   Similarly, conditionally returning without a value,
		   and conditionally using builtin_return can lead to an
		   isolated use.  */
		if (return_copy == BB_HEAD (src_bb))
		  {
		    short_block = true;
		    break;
		  }
		last_insn = return_copy;
	      }
	    while (nregs);

	    /* If we didn't see a full return value copy, verify that there
	       is a plausible reason for this.  If some, but not all of the
	       return register is likely spilled, we can expect that there
	       is a copy for the likely spilled part.  */
	    gcc_assert (!nregs
			|| forced_late_switch
			|| short_block
			|| !(targetm.class_likely_spilled_p
			     (REGNO_REG_CLASS (ret_start)))
			|| (nregs
			    != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
			/* For multi-hard-register floating point
		   	   values, sometimes the likely-spilled part
		   	   is ordinarily copied first, then the other
		   	   part is set with an arithmetic operation.
		   	   This doesn't actually cause reload
		   	   failures, so let it pass.  */
			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
			    && nregs != 1));

	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
	      {
		before_return_copy
		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
		/* Instructions preceding LAST_INSN in the same block might
		   require a different mode than MODE_EXIT, so if we might
		   have such instructions, keep them in a separate block
		   from pre_exit.  */
		src_bb = split_block (src_bb,
				      PREV_INSN (before_return_copy))->dest;
	      }
	    else
	      before_return_copy = last_insn;
	    pre_exit = split_block (src_bb, before_return_copy)->src;
	  }
	else
	  {
	    pre_exit = split_edge (eg);
	  }
      }

  return pre_exit;
}
Exemplo n.º 5
0
static void
recompute_gain_for_pattern_seq (pattern_seq pseq)
{
  matching_seq mseq;
  rtx x;
  int i;
  int hascall;
  HARD_REG_SET linkregs;

  /* Initialize data.  */
  SET_HARD_REG_SET (linkregs);
  pseq->link_reg = NULL_RTX;
  pseq->abstracted_length = 0;

  pseq->gain = -(seq_call_cost - seq_jump_cost + seq_return_cost);

  /* Determine ABSTRACTED_LENGTH and COST for matching sequences of PSEQ.
     ABSTRACTED_LENGTH may be less than MATCHING_LENGTH if sequences in the
     same block overlap. */

  for (mseq = pseq->matching_seqs; mseq; mseq = mseq->next_matching_seq)
    {
      /* Determine ABSTRACTED_LENGTH.  */
      if (mseq->next_matching_seq)
        mseq->abstracted_length = (int)(mseq->next_matching_seq->idx -
                                        mseq->idx);
      else
        mseq->abstracted_length = mseq->matching_length;

      if (mseq->abstracted_length > mseq->matching_length)
        mseq->abstracted_length = mseq->matching_length;

      /* Compute the cost of sequence.  */
      RECOMPUTE_COST (mseq);

      /* If COST is big enough registers live in this matching sequence
         should not be used as a link register. Also set ABSTRACTED_LENGTH
         of PSEQ.  */
      if (mseq->cost > seq_call_cost)
        {
          clear_regs_live_in_seq (&linkregs, mseq->insn,
                                  mseq->abstracted_length);
          if (mseq->abstracted_length > pseq->abstracted_length)
            pseq->abstracted_length = mseq->abstracted_length;
        }
    }

  /* Modify ABSTRACTED_LENGTH of PSEQ if pattern sequence overlaps with one
     of the matching sequences.  */
  for (mseq = pseq->matching_seqs; mseq; mseq = mseq->next_matching_seq)
    {
      x = pseq->insn;
      for (i = 0; (i < pseq->abstracted_length) && (x != mseq->insn); i++)
        x = prev_insn_in_block (x);
      pseq->abstracted_length = i;
    }

  /* Compute the cost of pattern sequence.  */
  RECOMPUTE_COST (pseq);

  /* No gain if COST is too small.  */
  if (pseq->cost <= seq_call_cost)
  {
    pseq->gain = -1;
    return;
  }

  /* Ensure that no matching sequence is longer than the pattern sequence.  */
  for (mseq = pseq->matching_seqs; mseq; mseq = mseq->next_matching_seq)
    {
      if (mseq->abstracted_length > pseq->abstracted_length)
        {
          mseq->abstracted_length = pseq->abstracted_length;
          RECOMPUTE_COST (mseq);
        }
      /* Once the length is stabilizing the gain can be calculated.  */
      if (mseq->cost > seq_call_cost)
        pseq->gain += mseq->cost - seq_call_cost;
    }

  /* No need to do further work if there is no gain.  */
  if (pseq->gain <= 0)
    return;

  /* Should not use registers live in the pattern sequence as link register.
   */
  clear_regs_live_in_seq (&linkregs, pseq->insn, pseq->abstracted_length);

  /* Determine whether pattern sequence contains a call_insn.  */
  hascall = 0;
  x = pseq->insn;
  for (i = 0; i < pseq->abstracted_length; i++)
    {
      if (CALL_P (x))
        {
          hascall = 1;
          break;
        }
      x = prev_insn_in_block (x);
    }

  /* Should not use a register as a link register if - it is a fixed
     register, or - the sequence contains a call insn and the register is a
     call used register, or - the register needs to be saved if used in a
     function but was not used before (since saving it can invalidate already
     computed frame pointer offsets), or - the register cannot be used as a
     base register.  */

  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
    if (fixed_regs[i]
#ifdef REGNO_OK_FOR_INDIRECT_JUMP_P
        || (!REGNO_OK_FOR_INDIRECT_JUMP_P (i, Pmode))
#else
        || (!ok_for_base_p_1 (i, Pmode, MEM, SCRATCH))
        || (!reg_class_subset_p (REGNO_REG_CLASS (i),
				 base_reg_class (VOIDmode, MEM, SCRATCH)))
#endif
        || (hascall && call_used_regs[i])
        || (!call_used_regs[i] && !df_regs_ever_live_p (i)))
      CLEAR_HARD_REG_BIT (linkregs, i);

  /* Find an appropriate register to be used as the link register.  */
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
    if (TEST_HARD_REG_BIT (linkregs, i))
      {
        pseq->link_reg = gen_rtx_REG (Pmode, i);
        break;
      }

  /* Abstraction is not possible if no link register is available, so set
     gain to 0.  */
  if (!pseq->link_reg)
    pseq->gain = 0;
}
Exemplo n.º 6
0
Arquivo: dce.c Projeto: AHelper/gcc
static bool
find_call_stack_args (rtx_call_insn *call_insn, bool do_mark, bool fast,
		      bitmap arg_stores)
{
  rtx p;
  rtx_insn *insn, *prev_insn;
  bool ret;
  HOST_WIDE_INT min_sp_off, max_sp_off;
  bitmap sp_bytes;

  gcc_assert (CALL_P (call_insn));
  if (!ACCUMULATE_OUTGOING_ARGS)
    return true;

  if (!do_mark)
    {
      gcc_assert (arg_stores);
      bitmap_clear (arg_stores);
    }

  min_sp_off = INTTYPE_MAXIMUM (HOST_WIDE_INT);
  max_sp_off = 0;

  /* First determine the minimum and maximum offset from sp for
     stored arguments.  */
  for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
    if (GET_CODE (XEXP (p, 0)) == USE
	&& MEM_P (XEXP (XEXP (p, 0), 0)))
      {
	rtx mem = XEXP (XEXP (p, 0), 0), addr;
	HOST_WIDE_INT off = 0, size;
	if (!MEM_SIZE_KNOWN_P (mem))
	  return false;
	size = MEM_SIZE (mem);
	addr = XEXP (mem, 0);
	if (GET_CODE (addr) == PLUS
	    && REG_P (XEXP (addr, 0))
	    && CONST_INT_P (XEXP (addr, 1)))
	  {
	    off = INTVAL (XEXP (addr, 1));
	    addr = XEXP (addr, 0);
	  }
	if (addr != stack_pointer_rtx)
	  {
	    if (!REG_P (addr))
	      return false;
	    /* If not fast, use chains to see if addr wasn't set to
	       sp + offset.  */
	    if (!fast)
	      {
		df_ref use;
		struct df_link *defs;
		rtx set;

		FOR_EACH_INSN_USE (use, call_insn)
		  if (rtx_equal_p (addr, DF_REF_REG (use)))
		    break;

		if (use == NULL)
		  return false;

		for (defs = DF_REF_CHAIN (use); defs; defs = defs->next)
		  if (! DF_REF_IS_ARTIFICIAL (defs->ref))
		    break;

		if (defs == NULL)
		  return false;

		set = single_set (DF_REF_INSN (defs->ref));
		if (!set)
		  return false;

		if (GET_CODE (SET_SRC (set)) != PLUS
		    || XEXP (SET_SRC (set), 0) != stack_pointer_rtx
		    || !CONST_INT_P (XEXP (SET_SRC (set), 1)))
		  return false;

		off += INTVAL (XEXP (SET_SRC (set), 1));
	      }
	    else
	      return false;
Exemplo n.º 7
0
Arquivo: dce.c Projeto: AHelper/gcc
static bool
deletable_insn_p (rtx_insn *insn, bool fast, bitmap arg_stores)
{
  rtx body, x;
  int i;
  df_ref def;

  if (CALL_P (insn)
      /* We cannot delete calls inside of the recursive dce because
	 this may cause basic blocks to be deleted and this messes up
	 the rest of the stack of optimization passes.  */
      && (!df_in_progress)
      /* We cannot delete pure or const sibling calls because it is
	 hard to see the result.  */
      && (!SIBLING_CALL_P (insn))
      /* We can delete dead const or pure calls as long as they do not
         infinite loop.  */
      && (RTL_CONST_OR_PURE_CALL_P (insn)
	  && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)))
    return find_call_stack_args (as_a <rtx_call_insn *> (insn), false,
				 fast, arg_stores);

  /* Don't delete jumps, notes and the like.  */
  if (!NONJUMP_INSN_P (insn))
    return false;

  /* Don't delete insns that may throw if we cannot do so.  */
  if (!(cfun->can_delete_dead_exceptions && can_alter_cfg)
      && !insn_nothrow_p (insn))
    return false;

  /* If INSN sets a global_reg, leave it untouched.  */
  FOR_EACH_INSN_DEF (def, insn)
    if (HARD_REGISTER_NUM_P (DF_REF_REGNO (def))
	&& global_regs[DF_REF_REGNO (def)])
      return false;
    /* Initialization of pseudo PIC register should never be removed.  */
    else if (DF_REF_REG (def) == pic_offset_table_rtx
	     && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
      return false;

  body = PATTERN (insn);
  switch (GET_CODE (body))
    {
    case USE:
    case VAR_LOCATION:
      return false;

    case CLOBBER:
      if (fast)
	{
	  /* A CLOBBER of a dead pseudo register serves no purpose.
	     That is not necessarily true for hard registers until
	     after reload.  */
	  x = XEXP (body, 0);
	  return REG_P (x) && (!HARD_REGISTER_P (x) || reload_completed);
	}
      else
	/* Because of the way that use-def chains are built, it is not
	   possible to tell if the clobber is dead because it can
	   never be the target of a use-def chain.  */
	return false;

    case PARALLEL:
      for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
	if (!deletable_insn_p_1 (XVECEXP (body, 0, i)))
	  return false;
      return true;

    default:
      return deletable_insn_p_1 (body);
    }
}
Exemplo n.º 8
0
static void
combine_stack_adjustments_for_block (basic_block bb)
{
    HOST_WIDE_INT last_sp_adjust = 0;
    rtx last_sp_set = NULL_RTX;
    rtx last2_sp_set = NULL_RTX;
    struct csa_reflist *reflist = NULL;
    rtx insn, next, set;
    struct record_stack_refs_data data;
    bool end_of_block = false;

    for (insn = BB_HEAD (bb); !end_of_block ; insn = next)
    {
        end_of_block = insn == BB_END (bb);
        next = NEXT_INSN (insn);

        if (! INSN_P (insn))
            continue;

        set = single_set_for_csa (insn);
        if (set)
        {
            rtx dest = SET_DEST (set);
            rtx src = SET_SRC (set);

            /* Find constant additions to the stack pointer.  */
            if (dest == stack_pointer_rtx
                    && GET_CODE (src) == PLUS
                    && XEXP (src, 0) == stack_pointer_rtx
                    && CONST_INT_P (XEXP (src, 1)))
            {
                HOST_WIDE_INT this_adjust = INTVAL (XEXP (src, 1));

                /* If we've not seen an adjustment previously, record
                it now and continue.  */
                if (! last_sp_set)
                {
                    last_sp_set = insn;
                    last_sp_adjust = this_adjust;
                    continue;
                }

                /* If not all recorded refs can be adjusted, or the
                adjustment is now too large for a constant addition,
                 we cannot merge the two stack adjustments.

                 Also we need to be careful to not move stack pointer
                 such that we create stack accesses outside the allocated
                 area.  We can combine an allocation into the first insn,
                 or a deallocation into the second insn.  We can not
                 combine an allocation followed by a deallocation.

                 The only somewhat frequent occurrence of the later is when
                 a function allocates a stack frame but does not use it.
                 For this case, we would need to analyze rtl stream to be
                 sure that allocated area is really unused.  This means not
                 only checking the memory references, but also all registers
                 or global memory references possibly containing a stack
                 frame address.

                 Perhaps the best way to address this problem is to teach
                 gcc not to allocate stack for objects never used.  */

                /* Combine an allocation into the first instruction.  */
                if (STACK_GROWS_DOWNWARD ? this_adjust <= 0 : this_adjust >= 0)
                {
                    if (try_apply_stack_adjustment (last_sp_set, reflist,
                                                    last_sp_adjust + this_adjust,
                                                    this_adjust))
                    {
                        /* It worked!  */
                        maybe_move_args_size_note (last_sp_set, insn, false);
                        delete_insn (insn);
                        last_sp_adjust += this_adjust;
                        continue;
                    }
                }

                /* Otherwise we have a deallocation.  Do not combine with
                a previous allocation.  Combine into the second insn.  */
                else if (STACK_GROWS_DOWNWARD
                         ? last_sp_adjust >= 0 : last_sp_adjust <= 0)
                {
                    if (try_apply_stack_adjustment (insn, reflist,
                                                    last_sp_adjust + this_adjust,
                                                    -last_sp_adjust))
                    {
                        /* It worked!  */
                        maybe_move_args_size_note (insn, last_sp_set, true);
                        delete_insn (last_sp_set);
                        last_sp_set = insn;
                        last_sp_adjust += this_adjust;
                        free_csa_reflist (reflist);
                        reflist = NULL;
                        continue;
                    }
                }

                /* Combination failed.  Restart processing from here.  If
                deallocation+allocation conspired to cancel, we can
                 delete the old deallocation insn.  */
                if (last_sp_set)
                {
                    if (last_sp_adjust == 0)
                    {
                        maybe_move_args_size_note (insn, last_sp_set, true);
                        delete_insn (last_sp_set);
                    }
                    else
                        last2_sp_set = last_sp_set;
                }
                free_csa_reflist (reflist);
                reflist = NULL;
                last_sp_set = insn;
                last_sp_adjust = this_adjust;
                continue;
            }

            /* Find a store with pre-(dec|inc)rement or pre-modify of exactly
               the previous adjustment and turn it into a simple store.  This
               is equivalent to anticipating the stack adjustment so this must
               be an allocation.  */
            if (MEM_P (dest)
                    && ((STACK_GROWS_DOWNWARD
                         ? (GET_CODE (XEXP (dest, 0)) == PRE_DEC
                            && last_sp_adjust
                            == (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest)))
                         : (GET_CODE (XEXP (dest, 0)) == PRE_INC
                            && last_sp_adjust
                            == -(HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest))))
                        || ((STACK_GROWS_DOWNWARD
                             ? last_sp_adjust >= 0 : last_sp_adjust <= 0)
                            && GET_CODE (XEXP (dest, 0)) == PRE_MODIFY
                            && GET_CODE (XEXP (XEXP (dest, 0), 1)) == PLUS
                            && XEXP (XEXP (XEXP (dest, 0), 1), 0)
                            == stack_pointer_rtx
                            && GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
                            == CONST_INT
                            && INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1))
                            == -last_sp_adjust))
                    && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx
                    && !reg_mentioned_p (stack_pointer_rtx, src)
                    && memory_address_p (GET_MODE (dest), stack_pointer_rtx)
                    && try_apply_stack_adjustment (insn, reflist, 0,
                                                   -last_sp_adjust))
            {
                if (last2_sp_set)
                    maybe_move_args_size_note (last2_sp_set, last_sp_set, false);
                else
                    maybe_move_args_size_note (insn, last_sp_set, true);
                delete_insn (last_sp_set);
                free_csa_reflist (reflist);
                reflist = NULL;
                last_sp_set = NULL_RTX;
                last_sp_adjust = 0;
                continue;
            }
        }

        data.insn = insn;
        data.reflist = reflist;
        if (!CALL_P (insn) && last_sp_set
                && !for_each_rtx (&PATTERN (insn), record_stack_refs, &data))
        {
            reflist = data.reflist;
            continue;
        }
        reflist = data.reflist;

        /* Otherwise, we were not able to process the instruction.
        Do not continue collecting data across such a one.  */
        if (last_sp_set
                && (CALL_P (insn)
                    || reg_mentioned_p (stack_pointer_rtx, PATTERN (insn))))
        {
            if (last_sp_set && last_sp_adjust == 0)
            {
                force_move_args_size_note (bb, last2_sp_set, last_sp_set);
                delete_insn (last_sp_set);
            }
            free_csa_reflist (reflist);
            reflist = NULL;
            last2_sp_set = NULL_RTX;
            last_sp_set = NULL_RTX;
            last_sp_adjust = 0;
        }
    }

    if (last_sp_set && last_sp_adjust == 0)
    {
        force_move_args_size_note (bb, last2_sp_set, last_sp_set);
        delete_insn (last_sp_set);
    }

    if (reflist)
        free_csa_reflist (reflist);
}
Exemplo n.º 9
0
static void
force_move_args_size_note (basic_block bb, rtx prev, rtx insn)
{
    rtx note, test, next_candidate, prev_candidate;

    /* If PREV exists, tail-call to the logic in the other function.  */
    if (prev)
    {
        maybe_move_args_size_note (prev, insn, false);
        return;
    }

    /* First, make sure there's anything that needs doing.  */
    note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
    if (note == NULL)
        return;

    /* We need to find a spot between the previous and next exception points
       where we can place the note and "properly" deallocate the arguments.  */
    next_candidate = prev_candidate = NULL;

    /* It is often the case that we have insns in the order:
    call
    add sp (previous deallocation)
    sub sp (align for next arglist)
    push arg
       and the add/sub cancel.  Therefore we begin by searching forward.  */

    test = insn;
    while ((test = next_active_insn_bb (bb, test)) != NULL)
    {
        /* Found an existing note: nothing to do.  */
        if (find_reg_note (test, REG_ARGS_SIZE, NULL_RTX))
            return;
        /* Found something that affects unwinding.  Stop searching.  */
        if (CALL_P (test) || !insn_nothrow_p (test))
            break;
        if (next_candidate == NULL)
            next_candidate = test;
    }

    test = insn;
    while ((test = prev_active_insn_bb (bb, test)) != NULL)
    {
        rtx tnote;
        /* Found a place that seems logical to adjust the stack.  */
        tnote = find_reg_note (test, REG_ARGS_SIZE, NULL_RTX);
        if (tnote)
        {
            XEXP (tnote, 0) = XEXP (note, 0);
            return;
        }
        if (prev_candidate == NULL)
            prev_candidate = test;
        /* Found something that affects unwinding.  Stop searching.  */
        if (CALL_P (test) || !insn_nothrow_p (test))
            break;
    }

    if (prev_candidate)
        test = prev_candidate;
    else if (next_candidate)
        test = next_candidate;
    else
    {
        /* ??? We *must* have a place, lest we ICE on the lost adjustment.
        Options are: dummy clobber insn, nop, or prevent the removal of
         the sp += 0 insn.  */
        /* TODO: Find another way to indicate to the dwarf2 code that we
        have not in fact lost an adjustment.  */
        test = emit_insn_before (gen_rtx_CLOBBER (VOIDmode, const0_rtx), insn);
    }
    add_reg_note (test, REG_ARGS_SIZE, XEXP (note, 0));
}
Exemplo n.º 10
0
/* If INSN can not be used for rematerialization, return negative
   value.  If INSN can be considered as a candidate for
   rematerialization, return value which is the operand number of the
   pseudo for which the insn can be used for rematerialization.  Here
   we consider the insns without any memory, spilled pseudo (except
   for the rematerialization pseudo), or dying or unused regs.  */
static int
operand_to_remat (rtx_insn *insn)
{
  lra_insn_recog_data_t id = lra_get_insn_recog_data (insn);
  struct lra_static_insn_data *static_id = id->insn_static_data;
  struct lra_insn_reg *reg, *found_reg = NULL;

  /* Don't rematerialize insns which can change PC.  */
  if (JUMP_P (insn) || CALL_P (insn))
    return -1;
  /* First find a pseudo which can be rematerialized.  */
  for (reg = id->regs; reg != NULL; reg = reg->next)
    /* True FRAME_POINTER_NEEDED might be because we can not follow
       changing sp offsets, e.g. alloca is used.  If the insn contains
       stack pointer in such case, we can not rematerialize it as we
       can not know sp offset at a rematerialization place.  */
    if (reg->regno == STACK_POINTER_REGNUM && frame_pointer_needed)
      return -1;
    else if (reg->type == OP_OUT && ! reg->subreg_p
	     && find_regno_note (insn, REG_UNUSED, reg->regno) == NULL)
      {
	/* We permits only one spilled reg.  */
	if (found_reg != NULL)
	  return -1;
	found_reg = reg;
      }
    /* IRA calculates conflicts separately for subregs of two words
       pseudo.  Even if the pseudo lives, e.g. one its subreg can be
       used lately, another subreg hard register can be already used
       for something else.  In such case, it is not safe to
       rematerialize the insn.  */
    else if (reg->type == OP_IN && reg->subreg_p
	     && reg->regno >= FIRST_PSEUDO_REGISTER
	     && (GET_MODE_SIZE (PSEUDO_REGNO_MODE (reg->regno))
		 == 2 * UNITS_PER_WORD))
      return -1;
  if (found_reg == NULL)
    return -1;
  if (found_reg->regno < FIRST_PSEUDO_REGISTER)
    return -1;
  if (bad_for_rematerialization_p (PATTERN (insn)))
    return -1;
  /* Check the other regs are not spilled. */
  for (reg = id->regs; reg != NULL; reg = reg->next)
    if (found_reg == reg)
      continue;
    else if (reg->type == OP_INOUT)
      return -1;
    else if (reg->regno >= FIRST_PSEUDO_REGISTER
	     && reg_renumber[reg->regno] < 0)
      /* Another spilled reg.  */
      return -1;
    else if (reg->type == OP_IN)
      {
	if (find_regno_note (insn, REG_DEAD, reg->regno) != NULL)
	  /* We don't want to make live ranges longer.  */
	  return -1;
	/* Check that there is no output reg as the input one.  */
	for (struct lra_insn_reg *reg2 = id->regs;
	     reg2 != NULL;
	     reg2 = reg2->next)
	  if (reg2->type == OP_OUT && reg->regno == reg2->regno)
	    return -1;
	if (reg->regno < FIRST_PSEUDO_REGISTER)
	  for (struct lra_insn_reg *reg2 = static_id->hard_regs;
	       reg2 != NULL;
	       reg2 = reg2->next)
	    if (reg2->type == OP_OUT
		&& reg->regno <= reg2->regno
		&& (reg2->regno
		    < (reg->regno
		       + hard_regno_nregs[reg->regno][reg->biggest_mode])))
	      return -1;
      }
  /* Find the rematerialization operand.  */
  int nop = static_id->n_operands;
  for (int i = 0; i < nop; i++)
    if (REG_P (*id->operand_loc[i])
	&& (int) REGNO (*id->operand_loc[i]) == found_reg->regno)
      return i;
  return -1;
}
Exemplo n.º 11
0
static bool
copyprop_hardreg_forward_1 (basic_block bb, struct value_data *vd)
{
  bool anything_changed = false;
  rtx insn;

  for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
    {
      int n_ops, i, alt, predicated;
      bool is_asm, any_replacements;
      rtx set;
      rtx link;
      bool replaced[MAX_RECOG_OPERANDS];
      bool changed = false;
      struct kill_set_value_data ksvd;

      if (!NONDEBUG_INSN_P (insn))
	{
	  if (DEBUG_INSN_P (insn))
	    {
	      rtx loc = INSN_VAR_LOCATION_LOC (insn);
	      if (!VAR_LOC_UNKNOWN_P (loc))
		replace_oldest_value_addr (&INSN_VAR_LOCATION_LOC (insn),
					   ALL_REGS, GET_MODE (loc),
					   ADDR_SPACE_GENERIC, insn, vd);
	    }

	  if (insn == BB_END (bb))
	    break;
	  else
	    continue;
	}

      set = single_set (insn);
      extract_insn (insn);
      if (! constrain_operands (1))
	fatal_insn_not_found (insn);
      preprocess_constraints ();
      alt = which_alternative;
      n_ops = recog_data.n_operands;
      is_asm = asm_noperands (PATTERN (insn)) >= 0;

      /* Simplify the code below by rewriting things to reflect
	 matching constraints.  Also promote OP_OUT to OP_INOUT
	 in predicated instructions.  */

      predicated = GET_CODE (PATTERN (insn)) == COND_EXEC;
      for (i = 0; i < n_ops; ++i)
	{
	  int matches = recog_op_alt[i][alt].matches;
	  if (matches >= 0)
	    recog_op_alt[i][alt].cl = recog_op_alt[matches][alt].cl;
	  if (matches >= 0 || recog_op_alt[i][alt].matched >= 0
	      || (predicated && recog_data.operand_type[i] == OP_OUT))
	    recog_data.operand_type[i] = OP_INOUT;
	}

      /* Apply changes to earlier DEBUG_INSNs if possible.  */
      if (vd->n_debug_insn_changes)
	note_uses (&PATTERN (insn), cprop_find_used_regs, vd);

      /* For each earlyclobber operand, zap the value data.  */
      for (i = 0; i < n_ops; i++)
	if (recog_op_alt[i][alt].earlyclobber)
	  kill_value (recog_data.operand[i], vd);

      /* Within asms, a clobber cannot overlap inputs or outputs.
	 I wouldn't think this were true for regular insns, but
	 scan_rtx treats them like that...  */
      note_stores (PATTERN (insn), kill_clobbered_value, vd);

      /* Kill all auto-incremented values.  */
      /* ??? REG_INC is useless, since stack pushes aren't done that way.  */
      for_each_rtx (&PATTERN (insn), kill_autoinc_value, vd);

      /* Kill all early-clobbered operands.  */
      for (i = 0; i < n_ops; i++)
	if (recog_op_alt[i][alt].earlyclobber)
	  kill_value (recog_data.operand[i], vd);

      /* If we have dead sets in the insn, then we need to note these as we
	 would clobbers.  */
      for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
	{
	  if (REG_NOTE_KIND (link) == REG_UNUSED)
	    {
	      kill_value (XEXP (link, 0), vd);
	      /* Furthermore, if the insn looked like a single-set,
		 but the dead store kills the source value of that
		 set, then we can no-longer use the plain move
		 special case below.  */
	      if (set
		  && reg_overlap_mentioned_p (XEXP (link, 0), SET_SRC (set)))
		set = NULL;
	    }
	}

      /* Special-case plain move instructions, since we may well
	 be able to do the move from a different register class.  */
      if (set && REG_P (SET_SRC (set)))
	{
	  rtx src = SET_SRC (set);
	  unsigned int regno = REGNO (src);
	  enum machine_mode mode = GET_MODE (src);
	  unsigned int i;
	  rtx new_rtx;

	  /* If we are accessing SRC in some mode other that what we
	     set it in, make sure that the replacement is valid.  */
	  if (mode != vd->e[regno].mode)
	    {
	      if (hard_regno_nregs[regno][mode]
		  > hard_regno_nregs[regno][vd->e[regno].mode])
		goto no_move_special_case;

	      /* And likewise, if we are narrowing on big endian the transformation
		 is also invalid.  */
	      if (hard_regno_nregs[regno][mode]
		  < hard_regno_nregs[regno][vd->e[regno].mode]
		  && (GET_MODE_SIZE (vd->e[regno].mode) > UNITS_PER_WORD
		      ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
		goto no_move_special_case;
	    }

	  /* If the destination is also a register, try to find a source
	     register in the same class.  */
	  if (REG_P (SET_DEST (set)))
	    {
	      new_rtx = find_oldest_value_reg (REGNO_REG_CLASS (regno), src, vd);
	      if (new_rtx && validate_change (insn, &SET_SRC (set), new_rtx, 0))
		{
		  if (dump_file)
		    fprintf (dump_file,
			     "insn %u: replaced reg %u with %u\n",
			     INSN_UID (insn), regno, REGNO (new_rtx));
		  changed = true;
		  goto did_replacement;
		}
	      /* We need to re-extract as validate_change clobbers
		 recog_data.  */
	      extract_insn (insn);
	      if (! constrain_operands (1))
		fatal_insn_not_found (insn);
	      preprocess_constraints ();
	    }

	  /* Otherwise, try all valid registers and see if its valid.  */
	  for (i = vd->e[regno].oldest_regno; i != regno;
	       i = vd->e[i].next_regno)
	    {
	      new_rtx = maybe_mode_change (vd->e[i].mode, vd->e[regno].mode,
				       mode, i, regno);
	      if (new_rtx != NULL_RTX)
		{
		  if (validate_change (insn, &SET_SRC (set), new_rtx, 0))
		    {
		      ORIGINAL_REGNO (new_rtx) = ORIGINAL_REGNO (src);
		      REG_ATTRS (new_rtx) = REG_ATTRS (src);
		      REG_POINTER (new_rtx) = REG_POINTER (src);
		      if (dump_file)
			fprintf (dump_file,
				 "insn %u: replaced reg %u with %u\n",
				 INSN_UID (insn), regno, REGNO (new_rtx));
		      changed = true;
		      goto did_replacement;
		    }
		  /* We need to re-extract as validate_change clobbers
		     recog_data.  */
		  extract_insn (insn);
		  if (! constrain_operands (1))
		    fatal_insn_not_found (insn);
		  preprocess_constraints ();
		}
	    }
	}
      no_move_special_case:

      any_replacements = false;

      /* For each input operand, replace a hard register with the
	 eldest live copy that's in an appropriate register class.  */
      for (i = 0; i < n_ops; i++)
	{
	  replaced[i] = false;

	  /* Don't scan match_operand here, since we've no reg class
	     information to pass down.  Any operands that we could
	     substitute in will be represented elsewhere.  */
	  if (recog_data.constraints[i][0] == '\0')
	    continue;

	  /* Don't replace in asms intentionally referencing hard regs.  */
	  if (is_asm && REG_P (recog_data.operand[i])
	      && (REGNO (recog_data.operand[i])
		  == ORIGINAL_REGNO (recog_data.operand[i])))
	    continue;

	  if (recog_data.operand_type[i] == OP_IN)
	    {
	      if (recog_op_alt[i][alt].is_address)
		replaced[i]
		  = replace_oldest_value_addr (recog_data.operand_loc[i],
					       recog_op_alt[i][alt].cl,
					       VOIDmode, ADDR_SPACE_GENERIC,
					       insn, vd);
	      else if (REG_P (recog_data.operand[i]))
		replaced[i]
		  = replace_oldest_value_reg (recog_data.operand_loc[i],
					      recog_op_alt[i][alt].cl,
					      insn, vd);
	      else if (MEM_P (recog_data.operand[i]))
		replaced[i] = replace_oldest_value_mem (recog_data.operand[i],
							insn, vd);
	    }
	  else if (MEM_P (recog_data.operand[i]))
	    replaced[i] = replace_oldest_value_mem (recog_data.operand[i],
						    insn, vd);

	  /* If we performed any replacement, update match_dups.  */
	  if (replaced[i])
	    {
	      int j;
	      rtx new_rtx;

	      new_rtx = *recog_data.operand_loc[i];
	      recog_data.operand[i] = new_rtx;
	      for (j = 0; j < recog_data.n_dups; j++)
		if (recog_data.dup_num[j] == i)
		  validate_unshare_change (insn, recog_data.dup_loc[j], new_rtx, 1);

	      any_replacements = true;
	    }
	}

      if (any_replacements)
	{
	  if (! apply_change_group ())
	    {
	      for (i = 0; i < n_ops; i++)
		if (replaced[i])
		  {
		    rtx old = *recog_data.operand_loc[i];
		    recog_data.operand[i] = old;
		  }

	      if (dump_file)
		fprintf (dump_file,
			 "insn %u: reg replacements not verified\n",
			 INSN_UID (insn));
	    }
	  else
	    changed = true;
	}

    did_replacement:
      if (changed)
	{
	  anything_changed = true;

	  /* If something changed, perhaps further changes to earlier
	     DEBUG_INSNs can be applied.  */
	  if (vd->n_debug_insn_changes)
	    note_uses (&PATTERN (insn), cprop_find_used_regs, vd);
	}

      ksvd.vd = vd;
      ksvd.ignore_set_reg = NULL_RTX;

      /* Clobber call-clobbered registers.  */
      if (CALL_P (insn))
	{
	  unsigned int set_regno = INVALID_REGNUM;
	  unsigned int set_nregs = 0;
	  unsigned int regno;
	  rtx exp;

	  for (exp = CALL_INSN_FUNCTION_USAGE (insn); exp; exp = XEXP (exp, 1))
	    {
	      rtx x = XEXP (exp, 0);
	      if (GET_CODE (x) == SET)
		{
		  rtx dest = SET_DEST (x);
		  kill_value (dest, vd);
		  set_value_regno (REGNO (dest), GET_MODE (dest), vd);
		  copy_value (dest, SET_SRC (x), vd);
		  ksvd.ignore_set_reg = dest;
		  set_regno = REGNO (dest);
		  set_nregs
		    = hard_regno_nregs[set_regno][GET_MODE (dest)];
		  break;
		}
	    }

	  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
	    if ((TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)
		 || HARD_REGNO_CALL_PART_CLOBBERED (regno, vd->e[regno].mode))
		&& (regno < set_regno || regno >= set_regno + set_nregs))
	      kill_value_regno (regno, 1, vd);

	  /* If SET was seen in CALL_INSN_FUNCTION_USAGE, and SET_SRC
	     of the SET isn't in regs_invalidated_by_call hard reg set,
	     but instead among CLOBBERs on the CALL_INSN, we could wrongly
	     assume the value in it is still live.  */
	  if (ksvd.ignore_set_reg)
	    {
	      note_stores (PATTERN (insn), kill_clobbered_value, vd);
	      for (exp = CALL_INSN_FUNCTION_USAGE (insn);
		   exp;
		   exp = XEXP (exp, 1))
		{
		  rtx x = XEXP (exp, 0);
		  if (GET_CODE (x) == CLOBBER)
		    kill_value (SET_DEST (x), vd);
		}
	    }
	}

      /* Notice stores.  */
      note_stores (PATTERN (insn), kill_set_value, &ksvd);

      /* Notice copies.  */
      if (set && REG_P (SET_DEST (set)) && REG_P (SET_SRC (set)))
	copy_value (SET_DEST (set), SET_SRC (set), vd);

      if (insn == BB_END (bb))
	break;
    }

  return anything_changed;
}
Exemplo n.º 12
0
static basic_block
expand_gimple_tailcall (basic_block bb, tree stmt, bool *can_fallthru)
{
  rtx last2, last;
  edge e;
  edge_iterator ei;
  int probability;
  gcov_type count;

  last2 = last = get_last_insn ();

  expand_expr_stmt (stmt);

  for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
    if (CALL_P (last) && SIBLING_CALL_P (last))
      goto found;

  maybe_dump_rtl_for_tree_stmt (stmt, last2);

  *can_fallthru = true;
  return NULL;

 found:
  /* ??? Wouldn't it be better to just reset any pending stack adjust?
     Any instructions emitted here are about to be deleted.  */
  do_pending_stack_adjust ();

  /* Remove any non-eh, non-abnormal edges that don't go to exit.  */
  /* ??? I.e. the fallthrough edge.  HOWEVER!  If there were to be
     EH or abnormal edges, we shouldn't have created a tail call in
     the first place.  So it seems to me we should just be removing
     all edges here, or redirecting the existing fallthru edge to
     the exit block.  */

  probability = 0;
  count = 0;

  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
    {
      if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
	{
	  if (e->dest != EXIT_BLOCK_PTR)
	    {
	      e->dest->count -= e->count;
	      e->dest->frequency -= EDGE_FREQUENCY (e);
	      if (e->dest->count < 0)
	        e->dest->count = 0;
	      if (e->dest->frequency < 0)
	        e->dest->frequency = 0;
	    }
	  count += e->count;
	  probability += e->probability;
	  remove_edge (e);
	}
      else
	ei_next (&ei);
    }

  /* This is somewhat ugly: the call_expr expander often emits instructions
     after the sibcall (to perform the function return).  These confuse the
     find_sub_basic_blocks code, so we need to get rid of these.  */
  last = NEXT_INSN (last);
  gcc_assert (BARRIER_P (last));

  *can_fallthru = false;
  while (NEXT_INSN (last))
    {
      /* For instance an sqrt builtin expander expands if with
	 sibcall in the then and label for `else`.  */
      if (LABEL_P (NEXT_INSN (last)))
	{
	  *can_fallthru = true;
	  break;
	}
      delete_insn (NEXT_INSN (last));
    }

  e = make_edge (bb, EXIT_BLOCK_PTR, EDGE_ABNORMAL | EDGE_SIBCALL);
  e->probability += probability;
  e->count += count;
  BB_END (bb) = last;
  update_bb_for_insn (bb);

  if (NEXT_INSN (last))
    {
      bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);

      last = BB_END (bb);
      if (BARRIER_P (last))
	BB_END (bb) = PREV_INSN (last);
    }

  maybe_dump_rtl_for_tree_stmt (stmt, last2);

  return bb;
}
Exemplo n.º 13
0
static bool
doloop_valid_p (struct loop *loop, struct niter_desc *desc)
{
    basic_block *body = get_loop_body (loop), bb;
    rtx insn;
    unsigned i;
    bool result = true;

    /* Check for loops that may not terminate under special conditions.  */
    if (!desc->simple_p
            || desc->assumptions
            || desc->infinite)
    {
        /* There are some cases that would require a special attention.
        For example if the comparison is LEU and the comparison value
         is UINT_MAX then the loop will not terminate.  Similarly, if the
         comparison code is GEU and the comparison value is 0, the
         loop will not terminate.

         If the absolute increment is not 1, the loop can be infinite
         even with LTU/GTU, e.g. for (i = 3; i > 0; i -= 2)

         APPLE LOCAL begin lno
         Note that with LE and GE, the loop behavior is undefined
         (C++ standard section 5 clause 5) if an overflow occurs, say
         between INT_MAX and INT_MAX + 1.  We thus don't have to worry
         about these two cases.
         APPLE LOCAL end lno

         ??? We could compute these conditions at run-time and have a
         additional jump around the loop to ensure an infinite loop.
         However, it is very unlikely that this is the intended
         behavior of the loop and checking for these rare boundary
         conditions would pessimize all other code.

         If the loop is executed only a few times an extra check to
         restart the loop could use up most of the benefits of using a
         count register loop.  Note however, that normally, this
         restart branch would never execute, so it could be predicted
         well by the CPU.  We should generate the pessimistic code by
         default, and have an option, e.g. -funsafe-loops that would
         enable count-register loops in this case.  */
        if (dump_file)
            fprintf (dump_file, "Doloop: Possible infinite iteration case.\n");
        result = false;
        goto cleanup;
    }

    for (i = 0; i < loop->num_nodes; i++)
    {
        bb = body[i];

        for (insn = BB_HEAD (bb);
                insn != NEXT_INSN (BB_END (bb));
                insn = NEXT_INSN (insn))
        {
            /* A called function may clobber any special registers required for
               low-overhead looping.  */
            if (CALL_P (insn))
            {
                if (dump_file)
                    fprintf (dump_file, "Doloop: Function call in loop.\n");
                result = false;
                goto cleanup;
            }

            /* Some targets (eg, PPC) use the count register for branch on table
               instructions.  ??? This should be a target specific check.  */
            if (JUMP_P (insn)
                    && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
                        || GET_CODE (PATTERN (insn)) == ADDR_VEC))
            {
                if (dump_file)
                    fprintf (dump_file, "Doloop: Computed branch in the loop.\n");
                result = false;
                goto cleanup;
            }
        }
    }
    result = true;

cleanup:
    free (body);

    return result;
}