コード例 #1
0
ファイル: lra-eliminations.c プロジェクト: paranoiacblack/gcc
/* Function for initialization of elimination once per function.  It
   sets up sp offset for each insn.  */
static void
init_elimination (void)
{
  bool stop_to_sp_elimination_p;
  basic_block bb;
  rtx_insn *insn;
  struct lra_elim_table *ep;

  init_elim_table ();
  FOR_EACH_BB_FN (bb, cfun)
    {
      curr_sp_change = 0;
      stop_to_sp_elimination_p = false;
      FOR_BB_INSNS (bb, insn)
	if (INSN_P (insn))
	  {
	    lra_get_insn_recog_data (insn)->sp_offset = curr_sp_change;
	    if (NONDEBUG_INSN_P (insn))
	      {
		mark_not_eliminable (PATTERN (insn), VOIDmode);
		if (curr_sp_change != 0
		    && find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX))
		  stop_to_sp_elimination_p = true;
	      }
	  }
      if (! frame_pointer_needed
	  && (curr_sp_change != 0 || stop_to_sp_elimination_p)
	  && bb->succs && bb->succs->length () != 0)
	for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
	  if (ep->to == STACK_POINTER_REGNUM)
	    setup_can_eliminate (ep, false);
    }
コード例 #2
0
ファイル: combine-stack-adj.c プロジェクト: FilipinOTech/gcc
static void
adjust_frame_related_expr (rtx last_sp_set, rtx insn,
			   HOST_WIDE_INT this_adjust)
{
  rtx note = find_reg_note (last_sp_set, REG_FRAME_RELATED_EXPR, NULL_RTX);
  rtx new_expr = NULL_RTX;

  if (note == NULL_RTX && RTX_FRAME_RELATED_P (insn))
    return;

  if (note
      && GET_CODE (XEXP (note, 0)) == SEQUENCE
      && XVECLEN (XEXP (note, 0), 0) >= 2)
    {
      rtx expr = XEXP (note, 0);
      rtx last = XVECEXP (expr, 0, XVECLEN (expr, 0) - 1);
      int i;

      if (GET_CODE (last) == SET
	  && RTX_FRAME_RELATED_P (last) == RTX_FRAME_RELATED_P (insn)
	  && SET_DEST (last) == stack_pointer_rtx
	  && GET_CODE (SET_SRC (last)) == PLUS
	  && XEXP (SET_SRC (last), 0) == stack_pointer_rtx
	  && CONST_INT_P (XEXP (SET_SRC (last), 1)))
	{
	  XEXP (SET_SRC (last), 1)
	    = GEN_INT (INTVAL (XEXP (SET_SRC (last), 1)) + this_adjust);
	  return;
	}

      new_expr = gen_rtx_SEQUENCE (VOIDmode,
				   rtvec_alloc (XVECLEN (expr, 0) + 1));
      for (i = 0; i < XVECLEN (expr, 0); i++)
	XVECEXP (new_expr, 0, i) = XVECEXP (expr, 0, i);
    }
  else
    {
      new_expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
      if (note)
	XVECEXP (new_expr, 0, 0) = XEXP (note, 0);
      else
	{
	  rtx expr = copy_rtx (single_set_for_csa (last_sp_set));

	  XEXP (SET_SRC (expr), 1)
	    = GEN_INT (INTVAL (XEXP (SET_SRC (expr), 1)) - this_adjust);
	  RTX_FRAME_RELATED_P (expr) = 1;
	  XVECEXP (new_expr, 0, 0) = expr;
	}
    }

  XVECEXP (new_expr, 0, XVECLEN (new_expr, 0) - 1)
    = copy_rtx (single_set_for_csa (insn));
  RTX_FRAME_RELATED_P (XVECEXP (new_expr, 0, XVECLEN (new_expr, 0) - 1))
    = RTX_FRAME_RELATED_P (insn);
  if (note)
    XEXP (note, 0) = new_expr;
  else
    add_reg_note (last_sp_set, REG_FRAME_RELATED_EXPR, new_expr);
}
コード例 #3
0
static void
maybe_move_args_size_note (rtx last, rtx insn, bool after)
{
    rtx note, last_note;

    note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
    if (note == NULL)
        return;

    last_note = find_reg_note (last, REG_ARGS_SIZE, NULL_RTX);
    if (last_note)
    {
        /* The ARGS_SIZE notes are *not* cumulative.  They represent an
        absolute value, and the "most recent" note wins.  */
        if (!after)
            XEXP (last_note, 0) = XEXP (note, 0);
    }
    else
        add_reg_note (last, REG_ARGS_SIZE, XEXP (note, 0));
}
コード例 #4
0
ファイル: rtl-factoring.c プロジェクト: AhmadTux/DragonFlyBSD
static void
split_pattern_seq (void)
{
  rtx insn;
  basic_block bb;
  rtx retlabel, retjmp, saveinsn;
  int i;
  seq_block sb;

  insn = pattern_seqs->insn;
  bb = BLOCK_FOR_INSN (insn);

  /* Get the label after the sequence. This will be the return address. The
     label will be referenced using a symbol_ref so protect it from
     deleting.  */
  retlabel = block_label_after (insn);
  LABEL_PRESERVE_P (retlabel) = 1;

  /* Emit an indirect jump via the link register after the sequence acting
     as the return insn.  Also emit a barrier and update the basic block.  */
  if (!find_reg_note (BB_END (bb), REG_NORETURN, NULL))
    retjmp = emit_jump_insn_after (gen_indirect_jump (pattern_seqs->link_reg),
                                   BB_END (bb));
  emit_barrier_after (BB_END (bb));

  /* Replace all outgoing edges with a new one to the block of RETLABEL.  */
  while (EDGE_COUNT (bb->succs) != 0)
    remove_edge (EDGE_SUCC (bb, 0));
  make_edge (bb, BLOCK_FOR_INSN (retlabel), EDGE_ABNORMAL);

  /* Split the sequence according to SEQ_BLOCKS and cache the label of the
     resulting basic blocks.  */
  i = 0;
  for (sb = seq_blocks; sb; sb = sb->next_seq_block)
    {
      for (; i < sb->length; i++)
        insn = prev_insn_in_block (insn);

      sb->label = block_label (split_block_and_df_analyze (bb, insn));
    }

  /* Emit an insn saving the return address to the link register before the
     sequence.  */
  saveinsn = emit_insn_after (gen_move_insn (pattern_seqs->link_reg,
                              gen_symbol_ref_rtx_for_label
                              (retlabel)), BB_END (bb));
  /* Update liveness info.  */
  SET_REGNO_REG_SET (df_get_live_out (bb),
                     REGNO (pattern_seqs->link_reg));
}
コード例 #5
0
ファイル: cfgexpand.c プロジェクト: seguljac/higpu
/* Verify that there is exactly single jump instruction since last and attach
   REG_BR_PROB note specifying probability.
   ??? We really ought to pass the probability down to RTL expanders and let it
   re-distribute it when the conditional expands into multiple conditionals.
   This is however difficult to do.  */
static void
add_reg_br_prob_note (FILE *dump_file, rtx last, int probability)
{
  if (profile_status == PROFILE_ABSENT)
    return;
  for (last = NEXT_INSN (last); last && NEXT_INSN (last); last = NEXT_INSN (last))
    if (GET_CODE (last) == JUMP_INSN)
      {
	/* It is common to emit condjump-around-jump sequence when we don't know
	   how to reverse the conditional.  Special case this.  */
	if (!any_condjump_p (last)
	    || GET_CODE (NEXT_INSN (last)) != JUMP_INSN
	    || !simplejump_p (NEXT_INSN (last))
	    || GET_CODE (NEXT_INSN (NEXT_INSN (last))) != BARRIER
	    || GET_CODE (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))) != CODE_LABEL
	    || NEXT_INSN (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))))
	  goto failed;
	if (find_reg_note (last, REG_BR_PROB, 0))
	  abort ();
	REG_NOTES (last)
	  = gen_rtx_EXPR_LIST (REG_BR_PROB,
			       GEN_INT (REG_BR_PROB_BASE - probability),
			       REG_NOTES (last));
	return;
      }
  if (!last || GET_CODE (last) != JUMP_INSN || !any_condjump_p (last))
      goto failed;
  if (find_reg_note (last, REG_BR_PROB, 0))
    abort ();
  REG_NOTES (last)
    = gen_rtx_EXPR_LIST (REG_BR_PROB,
			 GEN_INT (probability), REG_NOTES (last));
  return;
failed:
  if (dump_file)
    fprintf (dump_file, "Failed to add probability note\n");
}
コード例 #6
0
ファイル: value-prof.c プロジェクト: DJHartley/iphone-dev
static bool
rtl_value_profile_transformations (void)
{
  rtx insn, next;
  int changed = false;

  for (insn = get_insns (); insn; insn = next)
    {
      next = NEXT_INSN (insn);

      if (!INSN_P (insn))
	continue;

      /* Scan for insn carrying a histogram.  */
      if (!find_reg_note (insn, REG_VALUE_PROFILE, 0))
	continue;

      /* Ignore cold areas -- we are growing a code.  */
      if (!maybe_hot_bb_p (BLOCK_FOR_INSN (insn)))
	continue;

      if (dump_file)
	{
	  fprintf (dump_file, "Trying transformations on insn %d\n",
		   INSN_UID (insn));
	  print_rtl_single (dump_file, insn);
	}

      /* Transformations:  */
      if (flag_value_profile_transformations
	  && (mod_subtract_transform (insn)
	      || divmod_fixed_value_transform (insn)
	      || mod_pow2_value_transform (insn)))
	changed = true;
#ifdef HAVE_prefetch
      if (flag_speculative_prefetching
	  && speculative_prefetching_transform (insn))
	changed = true;
#endif
    }

  if (changed)
    {
      commit_edge_insertions ();
      allocate_reg_info (max_reg_num (), FALSE, FALSE);
    }

  return changed;
}
コード例 #7
0
static void
maybe_propagate_label_ref (rtx jump_insn, rtx prev_nonjump_insn)
{
  rtx label_note, pc, pc_src;

  pc = pc_set (jump_insn);
  pc_src = pc != NULL ? SET_SRC (pc) : NULL;
  label_note = find_reg_note (prev_nonjump_insn, REG_LABEL_OPERAND, NULL);

  /* If the previous non-jump insn sets something to a label,
     something that this jump insn uses, make that label the primary
     target of this insn if we don't yet have any.  That previous
     insn must be a single_set and not refer to more than one label.
     The jump insn must not refer to other labels as jump targets
     and must be a plain (set (pc) ...), maybe in a parallel, and
     may refer to the item being set only directly or as one of the
     arms in an IF_THEN_ELSE.  */

  if (label_note != NULL && pc_src != NULL)
    {
      rtx label_set = single_set (prev_nonjump_insn);
      rtx label_dest = label_set != NULL ? SET_DEST (label_set) : NULL;

      if (label_set != NULL
	  /* The source must be the direct LABEL_REF, not a
	     PLUS, UNSPEC, IF_THEN_ELSE etc.  */
	  && GET_CODE (SET_SRC (label_set)) == LABEL_REF
	  && (rtx_equal_p (label_dest, pc_src)
	      || (GET_CODE (pc_src) == IF_THEN_ELSE
		  && (rtx_equal_p (label_dest, XEXP (pc_src, 1))
		      || rtx_equal_p (label_dest, XEXP (pc_src, 2))))))
	{
	  /* The CODE_LABEL referred to in the note must be the
	     CODE_LABEL in the LABEL_REF of the "set".  We can
	     conveniently use it for the marker function, which
	     requires a LABEL_REF wrapping.  */
	  gcc_assert (XEXP (label_note, 0) == XEXP (SET_SRC (label_set), 0));

	  mark_jump_label_1 (label_set, jump_insn, false, true);

	  gcc_assert (JUMP_LABEL (jump_insn) == XEXP (label_note, 0));
	}
    }
}
コード例 #8
0
ファイル: sibcall.c プロジェクト: robinsonb5/zpugcc
static void
purge_reg_equiv_notes (void)
{
  rtx insn;

  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
    {
      while (1)
	{
	  rtx note = find_reg_note (insn, REG_EQUIV, 0);
	  if (note)
	    {
	      /* Remove the note and keep looking at the notes for
		 this insn.  */
	      remove_note (insn, note);
	      continue;
	    }
	  break;
	}
    }
}
コード例 #9
0
ファイル: compare-elim.c プロジェクト: jtramm/gcc
void
find_comparison_dom_walker::before_dom_children (basic_block bb)
{
  struct comparison *last_cmp;
  rtx_insn *insn, *next, *last_clobber;
  bool last_cmp_valid;
  bool need_purge = false;
  bitmap killed;

  killed = BITMAP_ALLOC (NULL);

  /* The last comparison that was made.  Will be reset to NULL
     once the flags are clobbered.  */
  last_cmp = NULL;

  /* True iff the last comparison has not been clobbered, nor
     have its inputs.  Used to eliminate duplicate compares.  */
  last_cmp_valid = false;

  /* The last insn that clobbered the flags, if that insn is of
     a form that may be valid for eliminating a following compare.
     To be reset to NULL once the flags are set otherwise.  */
  last_clobber = NULL;

  /* Propagate the last live comparison throughout the extended basic block. */
  if (single_pred_p (bb))
    {
      last_cmp = (struct comparison *) single_pred (bb)->aux;
      if (last_cmp)
	last_cmp_valid = last_cmp->inputs_valid;
    }

  for (insn = BB_HEAD (bb); insn; insn = next)
    {
      rtx src;

      next = (insn == BB_END (bb) ? NULL : NEXT_INSN (insn));
      if (!NONDEBUG_INSN_P (insn))
	continue;

      /* Compute the set of registers modified by this instruction.  */
      bitmap_clear (killed);
      df_simulate_find_defs (insn, killed);

      src = conforming_compare (insn);
      if (src)
	{
	  rtx eh_note = NULL;

	  if (cfun->can_throw_non_call_exceptions)
	    eh_note = find_reg_note (insn, REG_EH_REGION, NULL);

	  if (last_cmp_valid && can_eliminate_compare (src, eh_note, last_cmp))
	    {
	      if (eh_note)
		need_purge = true;
	      delete_insn (insn);
	      continue;
	    }

	  last_cmp = XCNEW (struct comparison);
	  last_cmp->insn = insn;
	  last_cmp->prev_clobber = last_clobber;
	  last_cmp->in_a = XEXP (src, 0);
	  last_cmp->in_b = XEXP (src, 1);
	  last_cmp->eh_note = eh_note;
	  last_cmp->orig_mode = GET_MODE (src);
	  all_compares.safe_push (last_cmp);

	  /* It's unusual, but be prepared for comparison patterns that
	     also clobber an input, or perhaps a scratch.  */
	  last_clobber = NULL;
	  last_cmp_valid = true;
	}

      /* Notice if this instruction kills the flags register.  */
      else if (bitmap_bit_p (killed, targetm.flags_regnum))
	{
	  /* See if this insn could be the "clobber" that eliminates
	     a future comparison.   */
	  last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL);

	  /* In either case, the previous compare is no longer valid.  */
	  last_cmp = NULL;
	  last_cmp_valid = false;
	}

      /* Notice if this instruction uses the flags register.  */
      else if (last_cmp)
	find_flags_uses_in_insn (last_cmp, insn);

      /* Notice if any of the inputs to the comparison have changed.  */
      if (last_cmp_valid
	  && (bitmap_bit_p (killed, REGNO (last_cmp->in_a))
	      || (REG_P (last_cmp->in_b)
		  && bitmap_bit_p (killed, REGNO (last_cmp->in_b)))))
	last_cmp_valid = false;
    }
コード例 #10
0
static void
mark_all_labels (rtx f)
{
  rtx insn;
  rtx prev_nonjump_insn = NULL;

  for (insn = f; insn; insn = NEXT_INSN (insn))
    if (INSN_P (insn))
      {
	mark_jump_label (PATTERN (insn), insn, 0);

	/* If the previous non-jump insn sets something to a label,
	   something that this jump insn uses, make that label the primary
	   target of this insn if we don't yet have any.  That previous
	   insn must be a single_set and not refer to more than one label.
	   The jump insn must not refer to other labels as jump targets
	   and must be a plain (set (pc) ...), maybe in a parallel, and
	   may refer to the item being set only directly or as one of the
	   arms in an IF_THEN_ELSE.  */
	if (! INSN_DELETED_P (insn)
	    && JUMP_P (insn)
	    && JUMP_LABEL (insn) == NULL)
	  {
	    rtx label_note = NULL;
	    rtx pc = pc_set (insn);
	    rtx pc_src = pc != NULL ? SET_SRC (pc) : NULL;

	    if (prev_nonjump_insn != NULL)
	      label_note
		= find_reg_note (prev_nonjump_insn, REG_LABEL_OPERAND, NULL);

	    if (label_note != NULL && pc_src != NULL)
	      {
		rtx label_set = single_set (prev_nonjump_insn);
		rtx label_dest
		  = label_set != NULL ? SET_DEST (label_set) : NULL;

		if (label_set != NULL
		    /* The source must be the direct LABEL_REF, not a
		       PLUS, UNSPEC, IF_THEN_ELSE etc.  */
		    && GET_CODE (SET_SRC (label_set)) == LABEL_REF
		    && (rtx_equal_p (label_dest, pc_src)
			|| (GET_CODE (pc_src) == IF_THEN_ELSE
			    && (rtx_equal_p (label_dest, XEXP (pc_src, 1))
				|| rtx_equal_p (label_dest,
						XEXP (pc_src, 2))))))

		  {
		    /* The CODE_LABEL referred to in the note must be the
		       CODE_LABEL in the LABEL_REF of the "set".  We can
		       conveniently use it for the marker function, which
		       requires a LABEL_REF wrapping.  */
		    gcc_assert (XEXP (label_note, 0)
				== XEXP (SET_SRC (label_set), 0));

		    mark_jump_label_1 (label_set, insn, false, true);
		    gcc_assert (JUMP_LABEL (insn)
				== XEXP (SET_SRC (label_set), 0));
		  }
	      }
	  }
	else if (! INSN_DELETED_P (insn))
	  prev_nonjump_insn = insn;
      }
    else if (LABEL_P (insn))
      prev_nonjump_insn = NULL;

  /* If we are in cfglayout mode, there may be non-insns between the
     basic blocks.  If those non-insns represent tablejump data, they
     contain label references that we must record.  */
  if (current_ir_type () == IR_RTL_CFGLAYOUT)
    {
      basic_block bb;
      rtx insn;
      FOR_EACH_BB (bb)
	{
	  for (insn = bb->il.rtl->header; insn; insn = NEXT_INSN (insn))
	    if (INSN_P (insn))
	      {
		gcc_assert (JUMP_TABLE_DATA_P (insn));
		mark_jump_label (PATTERN (insn), insn, 0);
	      }

	  for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn))
	    if (INSN_P (insn))
	      {
		gcc_assert (JUMP_TABLE_DATA_P (insn));
		mark_jump_label (PATTERN (insn), insn, 0);
	      }
	}
    }
コード例 #11
0
ファイル: dojump.c プロジェクト: FilipinOTech/gcc
void
do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp,
			 enum machine_mode mode, rtx size, rtx if_false_label,
			 rtx if_true_label, int prob)
{
  rtx tem;
  rtx dummy_label = NULL_RTX;
  rtx last;

  /* Reverse the comparison if that is safe and we want to jump if it is
     false.  Also convert to the reverse comparison if the target can
     implement it.  */
  if ((! if_true_label
       || ! can_compare_p (code, mode, ccp_jump))
      && (! FLOAT_MODE_P (mode)
	  || code == ORDERED || code == UNORDERED
	  || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
	  || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
    {
      enum rtx_code rcode;
      if (FLOAT_MODE_P (mode))
        rcode = reverse_condition_maybe_unordered (code);
      else
        rcode = reverse_condition (code);

      /* Canonicalize to UNORDERED for the libcall.  */
      if (can_compare_p (rcode, mode, ccp_jump)
	  || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
	{
          tem = if_true_label;
          if_true_label = if_false_label;
          if_false_label = tem;
	  code = rcode;
	  prob = inv (prob);
	}
    }

  /* If one operand is constant, make it the second one.  Only do this
     if the other operand is not constant as well.  */

  if (swap_commutative_operands_p (op0, op1))
    {
      tem = op0;
      op0 = op1;
      op1 = tem;
      code = swap_condition (code);
    }

  do_pending_stack_adjust ();

  code = unsignedp ? unsigned_condition (code) : code;
  if (0 != (tem = simplify_relational_operation (code, mode, VOIDmode,
						 op0, op1)))
    {
      if (CONSTANT_P (tem))
	{
	  rtx label = (tem == const0_rtx || tem == CONST0_RTX (mode))
		      ? if_false_label : if_true_label;
	  if (label)
	    emit_jump (label);
	  return;
	}

      code = GET_CODE (tem);
      mode = GET_MODE (tem);
      op0 = XEXP (tem, 0);
      op1 = XEXP (tem, 1);
      unsignedp = (code == GTU || code == LTU || code == GEU || code == LEU);
    }

  if (! if_true_label)
    dummy_label = if_true_label = gen_label_rtx ();

  if (GET_MODE_CLASS (mode) == MODE_INT
      && ! can_compare_p (code, mode, ccp_jump))
    {
      switch (code)
	{
	case LTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case LT:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LE:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GT:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GE:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case EQ:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
					 if_true_label, prob);
	  break;

	case NE:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_true_label,
					 if_false_label, inv (prob));
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  else
    {
      if (GET_MODE_CLASS (mode) == MODE_FLOAT
	  && ! can_compare_p (code, mode, ccp_jump)
	  && can_compare_p (swap_condition (code), mode, ccp_jump))
	{
	  rtx tmp;
	  code = swap_condition (code);
	  tmp = op0;
	  op0 = op1;
	  op1 = tmp;
	}

      else if (GET_MODE_CLASS (mode) == MODE_FLOAT
	       && ! can_compare_p (code, mode, ccp_jump)

	       /* Never split ORDERED and UNORDERED.  These must be implemented.  */
	       && (code != ORDERED && code != UNORDERED)

               /* Split a floating-point comparison if we can jump on other
	          conditions...  */
	       && (have_insn_for (COMPARE, mode)

	           /* ... or if there is no libcall for it.  */
	           || code_to_optab[code] == NULL))
        {
	  enum rtx_code first_code;
	  bool and_them = split_comparison (code, mode, &first_code, &code);

	  /* If there are no NaNs, the first comparison should always fall
	     through.  */
	  if (!HONOR_NANS (mode))
	    gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));

	  else
	    {
	      if (and_them)
		{
		  rtx dest_label;
		  /* If we only jump if true, just bypass the second jump.  */
		  if (! if_false_label)
		    {
		      if (! dummy_label)
		        dummy_label = gen_label_rtx ();
		      dest_label = dummy_label;
		    }
		  else
		    dest_label = if_false_label;
                  do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					   size, dest_label, NULL_RTX, prob);
		}
              else
                do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					 size, NULL_RTX, if_true_label, prob);
	    }
	}

      last = get_last_insn ();
      emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp,
			       if_true_label);
      if (prob != -1 && profile_status != PROFILE_ABSENT)
	{
	  for (last = NEXT_INSN (last);
	       last && NEXT_INSN (last);
	       last = NEXT_INSN (last))
	    if (JUMP_P (last))
	      break;
	  if (!last
	      || !JUMP_P (last)
	      || NEXT_INSN (last)
	      || !any_condjump_p (last))
	    {
	      if (dump_file)
		fprintf (dump_file, "Failed to add probability note\n");
	    }
	  else
	    {
	      gcc_assert (!find_reg_note (last, REG_BR_PROB, 0));
	      add_reg_note (last, REG_BR_PROB, GEN_INT (prob));
	    }
	}
    }

  if (if_false_label)
    emit_jump (if_false_label);
  if (dummy_label)
    emit_label (dummy_label);
}
コード例 #12
0
static void
force_move_args_size_note (basic_block bb, rtx prev, rtx insn)
{
    rtx note, test, next_candidate, prev_candidate;

    /* If PREV exists, tail-call to the logic in the other function.  */
    if (prev)
    {
        maybe_move_args_size_note (prev, insn, false);
        return;
    }

    /* First, make sure there's anything that needs doing.  */
    note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
    if (note == NULL)
        return;

    /* We need to find a spot between the previous and next exception points
       where we can place the note and "properly" deallocate the arguments.  */
    next_candidate = prev_candidate = NULL;

    /* It is often the case that we have insns in the order:
    call
    add sp (previous deallocation)
    sub sp (align for next arglist)
    push arg
       and the add/sub cancel.  Therefore we begin by searching forward.  */

    test = insn;
    while ((test = next_active_insn_bb (bb, test)) != NULL)
    {
        /* Found an existing note: nothing to do.  */
        if (find_reg_note (test, REG_ARGS_SIZE, NULL_RTX))
            return;
        /* Found something that affects unwinding.  Stop searching.  */
        if (CALL_P (test) || !insn_nothrow_p (test))
            break;
        if (next_candidate == NULL)
            next_candidate = test;
    }

    test = insn;
    while ((test = prev_active_insn_bb (bb, test)) != NULL)
    {
        rtx tnote;
        /* Found a place that seems logical to adjust the stack.  */
        tnote = find_reg_note (test, REG_ARGS_SIZE, NULL_RTX);
        if (tnote)
        {
            XEXP (tnote, 0) = XEXP (note, 0);
            return;
        }
        if (prev_candidate == NULL)
            prev_candidate = test;
        /* Found something that affects unwinding.  Stop searching.  */
        if (CALL_P (test) || !insn_nothrow_p (test))
            break;
    }

    if (prev_candidate)
        test = prev_candidate;
    else if (next_candidate)
        test = next_candidate;
    else
    {
        /* ??? We *must* have a place, lest we ICE on the lost adjustment.
        Options are: dummy clobber insn, nop, or prevent the removal of
         the sp += 0 insn.  */
        /* TODO: Find another way to indicate to the dwarf2 code that we
        have not in fact lost an adjustment.  */
        test = emit_insn_before (gen_rtx_CLOBBER (VOIDmode, const0_rtx), insn);
    }
    add_reg_note (test, REG_ARGS_SIZE, XEXP (note, 0));
}
コード例 #13
0
ファイル: internal-fn.c プロジェクト: Alexpux/GCC
void
ubsan_expand_si_overflow_neg_check (gimple stmt)
{
  rtx res, op1;
  tree lhs, fn, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();

  do_pending_stack_adjust ();
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode = optab_handler (negv3_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[3];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op1, mode);
      create_fixed_operand (&ops[2], do_error);
      if (maybe_expand_insn (icode, 3, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      /* Compute the operation.  On RTL level, the addition is always
	 unsigned.  */
      res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);

      /* Compare the operand with the most negative value.  */
      rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
      emit_cmp_and_jump_insns (op1, minv, NE, NULL_RTX, mode, false,
			       done_label, PROB_VERY_LIKELY);
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (NEGATE_EXPR, gimple_location (stmt),
				     TREE_TYPE (arg1), arg1, NULL_TREE);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
コード例 #14
0
ファイル: dce.c プロジェクト: MaxKellermann/gcc
static bool
deletable_insn_p (rtx_insn *insn, bool fast, bitmap arg_stores)
{
  rtx body, x;
  int i;
  df_ref def;

  if (CALL_P (insn)
      /* We cannot delete calls inside of the recursive dce because
	 this may cause basic blocks to be deleted and this messes up
	 the rest of the stack of optimization passes.  */
      && (!df_in_progress)
      /* We cannot delete pure or const sibling calls because it is
	 hard to see the result.  */
      && (!SIBLING_CALL_P (insn))
      /* We can delete dead const or pure calls as long as they do not
         infinite loop.  */
      && (RTL_CONST_OR_PURE_CALL_P (insn)
	  && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)))
    return find_call_stack_args (as_a <rtx_call_insn *> (insn), false,
				 fast, arg_stores);

  /* Don't delete jumps, notes and the like.  */
  if (!NONJUMP_INSN_P (insn))
    return false;

  /* Don't delete insns that may throw if we cannot do so.  */
  if (!(cfun->can_delete_dead_exceptions && can_alter_cfg)
      && !insn_nothrow_p (insn))
    return false;

  /* If INSN sets a global_reg, leave it untouched.  */
  FOR_EACH_INSN_DEF (def, insn)
    if (HARD_REGISTER_NUM_P (DF_REF_REGNO (def))
	&& global_regs[DF_REF_REGNO (def)])
      return false;
    /* Initialization of pseudo PIC register should never be removed.  */
    else if (DF_REF_REG (def) == pic_offset_table_rtx
	     && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
      return false;

  /* Callee-save restores are needed.  */
  if (RTX_FRAME_RELATED_P (insn)
      && crtl->shrink_wrapped_separate
      && find_reg_note (insn, REG_CFA_RESTORE, NULL))
    return false;

  body = PATTERN (insn);
  switch (GET_CODE (body))
    {
    case USE:
    case VAR_LOCATION:
      return false;

    case CLOBBER:
      if (fast)
	{
	  /* A CLOBBER of a dead pseudo register serves no purpose.
	     That is not necessarily true for hard registers until
	     after reload.  */
	  x = XEXP (body, 0);
	  return REG_P (x) && (!HARD_REGISTER_P (x) || reload_completed);
	}
      else
	/* Because of the way that use-def chains are built, it is not
	   possible to tell if the clobber is dead because it can
	   never be the target of a use-def chain.  */
	return false;

    case PARALLEL:
      for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
	if (!deletable_insn_p_1 (XVECEXP (body, 0, i)))
	  return false;
      return true;

    default:
      return deletable_insn_p_1 (body);
    }
}
コード例 #15
0
static void
fixup_reorder_chain (void)
{
  basic_block bb, prev_bb;
  int index;
  rtx insn = NULL;

  if (cfg_layout_function_header)
    {
      set_first_insn (cfg_layout_function_header);
      insn = cfg_layout_function_header;
      while (NEXT_INSN (insn))
	insn = NEXT_INSN (insn);
    }

  /* First do the bulk reordering -- rechain the blocks without regard to
     the needed changes to jumps and labels.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
       bb != 0;
       bb = bb->rbi->next, index++)
    {
      if (bb->rbi->header)
	{
	  if (insn)
	    NEXT_INSN (insn) = bb->rbi->header;
	  else
	    set_first_insn (bb->rbi->header);
	  PREV_INSN (bb->rbi->header) = insn;
	  insn = bb->rbi->header;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
      if (insn)
	NEXT_INSN (insn) = BB_HEAD (bb);
      else
	set_first_insn (BB_HEAD (bb));
      PREV_INSN (BB_HEAD (bb)) = insn;
      insn = BB_END (bb);
      if (bb->rbi->footer)
	{
	  NEXT_INSN (insn) = bb->rbi->footer;
	  PREV_INSN (bb->rbi->footer) = insn;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
    }

  if (index != n_basic_blocks)
    abort ();

  NEXT_INSN (insn) = cfg_layout_function_footer;
  if (cfg_layout_function_footer)
    PREV_INSN (cfg_layout_function_footer) = insn;

  while (NEXT_INSN (insn))
    insn = NEXT_INSN (insn);

  set_last_insn (insn);
#ifdef ENABLE_CHECKING
  verify_insn_chain ();
#endif
  delete_dead_jumptables ();

  /* Now add jumps and labels as needed to match the blocks new
     outgoing edges.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->rbi->next)
    {
      edge e_fall, e_taken, e;
      rtx bb_end_insn;
      basic_block nb;

      if (bb->succ == NULL)
	continue;

      /* Find the old fallthru edge, and another non-EH edge for
	 a taken jump.  */
      e_taken = e_fall = NULL;
      for (e = bb->succ; e ; e = e->succ_next)
	if (e->flags & EDGE_FALLTHRU)
	  e_fall = e;
	else if (! (e->flags & EDGE_EH))
	  e_taken = e;

      bb_end_insn = BB_END (bb);
      if (GET_CODE (bb_end_insn) == JUMP_INSN)
	{
	  if (any_condjump_p (bb_end_insn))
	    {
	      /* If the old fallthru is still next, nothing to do.  */
	      if (bb->rbi->next == e_fall->dest
	          || (!bb->rbi->next
		      && e_fall->dest == EXIT_BLOCK_PTR))
		continue;

	      /* The degenerated case of conditional jump jumping to the next
		 instruction can happen on target having jumps with side
		 effects.

		 Create temporarily the duplicated edge representing branch.
		 It will get unidentified by force_nonfallthru_and_redirect
		 that would otherwise get confused by fallthru edge not pointing
		 to the next basic block.  */
	      if (!e_taken)
		{
		  rtx note;
		  edge e_fake;

		  e_fake = unchecked_make_edge (bb, e_fall->dest, 0);

		  if (!redirect_jump (BB_END (bb), block_label (bb), 0))
		    abort ();
		  note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
		  if (note)
		    {
		      int prob = INTVAL (XEXP (note, 0));

		      e_fake->probability = prob;
		      e_fake->count = e_fall->count * prob / REG_BR_PROB_BASE;
		      e_fall->probability -= e_fall->probability;
		      e_fall->count -= e_fake->count;
		      if (e_fall->probability < 0)
			e_fall->probability = 0;
		      if (e_fall->count < 0)
			e_fall->count = 0;
		    }
		}
	      /* There is one special case: if *neither* block is next,
		 such as happens at the very end of a function, then we'll
		 need to add a new unconditional jump.  Choose the taken
		 edge based on known or assumed probability.  */
	      else if (bb->rbi->next != e_taken->dest)
		{
		  rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);

		  if (note
		      && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2
		      && invert_jump (bb_end_insn,
				      label_for_bb (e_fall->dest), 0))
		    {
		      e_fall->flags &= ~EDGE_FALLTHRU;
		      e_taken->flags |= EDGE_FALLTHRU;
		      update_br_prob_note (bb);
		      e = e_fall, e_fall = e_taken, e_taken = e;
		    }
		}

	      /* Otherwise we can try to invert the jump.  This will
		 basically never fail, however, keep up the pretense.  */
	      else if (invert_jump (bb_end_insn,
				    label_for_bb (e_fall->dest), 0))
		{
		  e_fall->flags &= ~EDGE_FALLTHRU;
		  e_taken->flags |= EDGE_FALLTHRU;
		  update_br_prob_note (bb);
		  continue;
		}
	    }
	  else if (returnjump_p (bb_end_insn))
	    continue;
	  else
	    {
	      /* Otherwise we have some switch or computed jump.  In the
		 99% case, there should not have been a fallthru edge.  */
	      if (! e_fall)
		continue;

#ifdef CASE_DROPS_THROUGH
	      /* Except for VAX.  Since we didn't have predication for the
		 tablejump, the fallthru block should not have moved.  */
	      if (bb->rbi->next == e_fall->dest)
		continue;
	      bb_end_insn = skip_insns_after_block (bb);
#else
	      abort ();
#endif
	    }
	}
      else
	{
	  /* No fallthru implies a noreturn function with EH edges, or
	     something similarly bizarre.  In any case, we don't need to
	     do anything.  */
	  if (! e_fall)
	    continue;

	  /* If the fallthru block is still next, nothing to do.  */
	  if (bb->rbi->next == e_fall->dest)
	    continue;

	  /* A fallthru to exit block.  */
	  if (!bb->rbi->next && e_fall->dest == EXIT_BLOCK_PTR)
	    continue;
	}

      /* We got here if we need to add a new jump insn.  */
      nb = force_nonfallthru (e_fall);
      if (nb)
	{
	  cfg_layout_initialize_rbi (nb);
	  nb->rbi->visited = 1;
	  nb->rbi->next = bb->rbi->next;
	  bb->rbi->next = nb;
	  /* Don't process this new block.  */
	  bb = nb;
	}
    }

  /* Put basic_block_info in the new order.  */

  if (rtl_dump_file)
    {
      fprintf (rtl_dump_file, "Reordered sequence:\n");
      for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb; bb = bb->rbi->next, index ++)
	{
	  fprintf (rtl_dump_file, " %i ", index);
	  if (bb->rbi->original)
	    fprintf (rtl_dump_file, "duplicate of %i ",
		     bb->rbi->original->index);
	  else if (forwarder_block_p (bb) && GET_CODE (BB_HEAD (bb)) != CODE_LABEL)
	    fprintf (rtl_dump_file, "compensation ");
	  else
	    fprintf (rtl_dump_file, "bb %i ", bb->index);
	  fprintf (rtl_dump_file, " [%i]\n", bb->frequency);
	}
    }

  prev_bb = ENTRY_BLOCK_PTR;
  bb = ENTRY_BLOCK_PTR->next_bb;
  index = 0;

  for (; bb; prev_bb = bb, bb = bb->rbi->next, index ++)
    {
      bb->index = index;
      BASIC_BLOCK (index) = bb;

      bb->prev_bb = prev_bb;
      prev_bb->next_bb = bb;
    }
  prev_bb->next_bb = EXIT_BLOCK_PTR;
  EXIT_BLOCK_PTR->prev_bb = prev_bb;

  /* Annoying special case - jump around dead jumptables left in the code.  */
  FOR_EACH_BB (bb)
    {
      edge e;
      for (e = bb->succ; e && !(e->flags & EDGE_FALLTHRU); e = e->succ_next)
	continue;
      if (e && !can_fallthru (e->src, e->dest))
	force_nonfallthru (e);
    }
}
コード例 #16
0
static void
fixup_reorder_chain (void)
{
  basic_block bb, prev_bb;
  int index;
  rtx insn = NULL;

  if (cfg_layout_function_header)
    {
      set_first_insn (cfg_layout_function_header);
      insn = cfg_layout_function_header;
      while (NEXT_INSN (insn))
	insn = NEXT_INSN (insn);
    }

  /* First do the bulk reordering -- rechain the blocks without regard to
     the needed changes to jumps and labels.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
       bb != 0;
       bb = bb->aux, index++)
    {
      if (bb->il.rtl->header)
	{
	  if (insn)
	    NEXT_INSN (insn) = bb->il.rtl->header;
	  else
	    set_first_insn (bb->il.rtl->header);
	  PREV_INSN (bb->il.rtl->header) = insn;
	  insn = bb->il.rtl->header;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
      if (insn)
	NEXT_INSN (insn) = BB_HEAD (bb);
      else
	set_first_insn (BB_HEAD (bb));
      PREV_INSN (BB_HEAD (bb)) = insn;
      insn = BB_END (bb);
      if (bb->il.rtl->footer)
	{
	  NEXT_INSN (insn) = bb->il.rtl->footer;
	  PREV_INSN (bb->il.rtl->footer) = insn;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
    }

  gcc_assert (index == n_basic_blocks);

  NEXT_INSN (insn) = cfg_layout_function_footer;
  if (cfg_layout_function_footer)
    PREV_INSN (cfg_layout_function_footer) = insn;

  while (NEXT_INSN (insn))
    insn = NEXT_INSN (insn);

  set_last_insn (insn);
#ifdef ENABLE_CHECKING
  verify_insn_chain ();
#endif
  delete_dead_jumptables ();

  /* Now add jumps and labels as needed to match the blocks new
     outgoing edges.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->aux)
    {
      edge e_fall, e_taken, e;
      rtx bb_end_insn;
      basic_block nb;
      edge_iterator ei;

      if (EDGE_COUNT (bb->succs) == 0)
	continue;

      /* Find the old fallthru edge, and another non-EH edge for
	 a taken jump.  */
      e_taken = e_fall = NULL;

      FOR_EACH_EDGE (e, ei, bb->succs)
	if (e->flags & EDGE_FALLTHRU)
	  e_fall = e;
	else if (! (e->flags & EDGE_EH))
	  e_taken = e;

      bb_end_insn = BB_END (bb);
      if (JUMP_P (bb_end_insn))
	{
	  if (any_condjump_p (bb_end_insn))
	    {
	      /* If the old fallthru is still next, nothing to do.  */
	      if (bb->aux == e_fall->dest
		  || e_fall->dest == EXIT_BLOCK_PTR)
		continue;

	      /* The degenerated case of conditional jump jumping to the next
		 instruction can happen for jumps with side effects.  We need
		 to construct a forwarder block and this will be done just
		 fine by force_nonfallthru below.  */
	      if (!e_taken)
		;

	      /* There is another special case: if *neither* block is next,
		 such as happens at the very end of a function, then we'll
		 need to add a new unconditional jump.  Choose the taken
		 edge based on known or assumed probability.  */
	      else if (bb->aux != e_taken->dest)
		{
		  rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);

		  if (note
		      && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2
		      && invert_jump (bb_end_insn,
				      (e_fall->dest == EXIT_BLOCK_PTR
				       ? NULL_RTX
				       : label_for_bb (e_fall->dest)), 0))
		    {
		      e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
		      gcc_assert (could_fall_through
				  (e_taken->src, e_taken->dest));
#endif
		      e_taken->flags |= EDGE_FALLTHRU;
		      update_br_prob_note (bb);
		      e = e_fall, e_fall = e_taken, e_taken = e;
		    }
		}

	      /* If the "jumping" edge is a crossing edge, and the fall
		 through edge is non-crossing, leave things as they are.  */
	      else if ((e_taken->flags & EDGE_CROSSING)
		       && !(e_fall->flags & EDGE_CROSSING))
		continue;

	      /* Otherwise we can try to invert the jump.  This will
		 basically never fail, however, keep up the pretense.  */
	      else if (invert_jump (bb_end_insn,
				    (e_fall->dest == EXIT_BLOCK_PTR
				     ? NULL_RTX
				     : label_for_bb (e_fall->dest)), 0))
		{
		  e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
		  gcc_assert (could_fall_through
			      (e_taken->src, e_taken->dest));
#endif
		  e_taken->flags |= EDGE_FALLTHRU;
		  update_br_prob_note (bb);
		  continue;
		}
	    }
	  else
	    {
	      /* Otherwise we have some return, switch or computed
		 jump.  In the 99% case, there should not have been a
		 fallthru edge.  */
	      gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
	      continue;
	    }
	}
      else
	{
	  /* No fallthru implies a noreturn function with EH edges, or
	     something similarly bizarre.  In any case, we don't need to
	     do anything.  */
	  if (! e_fall)
	    continue;

	  /* If the fallthru block is still next, nothing to do.  */
	  if (bb->aux == e_fall->dest)
	    continue;

	  /* A fallthru to exit block.  */
	  if (e_fall->dest == EXIT_BLOCK_PTR)
	    continue;
	}

      /* We got here if we need to add a new jump insn.  */
      nb = force_nonfallthru (e_fall);
      if (nb)
	{
	  nb->il.rtl->visited = 1;
	  nb->aux = bb->aux;
	  bb->aux = nb;
	  /* Don't process this new block.  */
	  bb = nb;

	  /* Make sure new bb is tagged for correct section (same as
	     fall-thru source, since you cannot fall-throu across
	     section boundaries).  */
	  BB_COPY_PARTITION (e_fall->src, single_pred (bb));
	  if (flag_reorder_blocks_and_partition
	      && targetm.have_named_sections
	      && JUMP_P (BB_END (bb))
	      && !any_condjump_p (BB_END (bb))
	      && (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING))
	    REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
	      (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
	}
    }

  /* Put basic_block_info in the new order.  */

  if (dump_file)
    {
      fprintf (dump_file, "Reordered sequence:\n");
      for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
	   bb;
	   bb = bb->aux, index++)
	{
	  fprintf (dump_file, " %i ", index);
	  if (get_bb_original (bb))
	    fprintf (dump_file, "duplicate of %i ",
		     get_bb_original (bb)->index);
	  else if (forwarder_block_p (bb)
		   && !LABEL_P (BB_HEAD (bb)))
	    fprintf (dump_file, "compensation ");
	  else
	    fprintf (dump_file, "bb %i ", bb->index);
	  fprintf (dump_file, " [%i]\n", bb->frequency);
	}
    }

  prev_bb = ENTRY_BLOCK_PTR;
  bb = ENTRY_BLOCK_PTR->next_bb;
  index = NUM_FIXED_BLOCKS;

  for (; bb; prev_bb = bb, bb = bb->aux, index ++)
    {
      bb->index = index;
      SET_BASIC_BLOCK (index, bb);

      bb->prev_bb = prev_bb;
      prev_bb->next_bb = bb;
    }
  prev_bb->next_bb = EXIT_BLOCK_PTR;
  EXIT_BLOCK_PTR->prev_bb = prev_bb;

  /* Annoying special case - jump around dead jumptables left in the code.  */
  FOR_EACH_BB (bb)
    {
      edge e;
      edge_iterator ei;

      FOR_EACH_EDGE (e, ei, bb->succs)
	if (e->flags & EDGE_FALLTHRU)
	  break;

      if (e && !can_fallthru (e->src, e->dest))
	force_nonfallthru (e);
    }
}
コード例 #17
0
ファイル: internal-fn.c プロジェクト: Alexpux/GCC
void
ubsan_expand_si_overflow_addsub_check (tree_code code, gimple stmt)
{
  rtx res, op0, op1;
  tree lhs, fn, arg0, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg0 = gimple_call_arg (stmt, 0);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();
  do_pending_stack_adjust ();
  op0 = expand_normal (arg0);
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode
    = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[4];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op0, mode);
      create_input_operand (&ops[2], op1, mode);
      create_fixed_operand (&ops[3], do_error);
      if (maybe_expand_insn (icode, 4, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      rtx_code_label *sub_check = gen_label_rtx ();
      int pos_neg = 3;

      /* Compute the operation.  On RTL level, the addition is always
	 unsigned.  */
      res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);

      /* If we can prove one of the arguments (for MINUS_EXPR only
	 the second operand, as subtraction is not commutative) is always
	 non-negative or always negative, we can do just one comparison
	 and conditional jump instead of 2 at runtime, 3 present in the
	 emitted code.  If one of the arguments is CONST_INT, all we
	 need is to make sure it is op1, then the first
	 emit_cmp_and_jump_insns will be just folded.  Otherwise try
	 to use range info if available.  */
      if (code == PLUS_EXPR && CONST_INT_P (op0))
	{
	  rtx tem = op0;
	  op0 = op1;
	  op1 = tem;
	}
      else if (CONST_INT_P (op1))
	;
      else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
	{
	  wide_int arg0_min, arg0_max;
	  if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
	    {
	      if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
		pos_neg = 1;
	      else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
		pos_neg = 2;
	    }
	  if (pos_neg != 3)
	    {
	      rtx tem = op0;
	      op0 = op1;
	      op1 = tem;
	    }
	}
      if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
	{
	  wide_int arg1_min, arg1_max;
	  if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
	    {
	      if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
		pos_neg = 1;
	      else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
		pos_neg = 2;
	    }
	}

      /* If the op1 is negative, we have to use a different check.  */
      if (pos_neg == 3)
	emit_cmp_and_jump_insns (op1, const0_rtx, LT, NULL_RTX, mode,
				 false, sub_check, PROB_EVEN);

      /* Compare the result of the operation with one of the operands.  */
      if (pos_neg & 1)
	emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? GE : LE,
				 NULL_RTX, mode, false, done_label,
				 PROB_VERY_LIKELY);

      /* If we get here, we have to print the error.  */
      if (pos_neg == 3)
	{
	  emit_jump (do_error);

	  emit_label (sub_check);
	}

      /* We have k = a + b for b < 0 here.  k <= a must hold.  */
      if (pos_neg & 2)
	emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? LE : GE,
				 NULL_RTX, mode, false, done_label,
				 PROB_VERY_LIKELY);
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (code, gimple_location (stmt),
				     TREE_TYPE (arg0), arg0, arg1);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
コード例 #18
0
ファイル: compare-elim.c プロジェクト: TinoSM/gcc
void
find_comparison_dom_walker::before_dom_children (basic_block bb)
{
  struct comparison *last_cmp;
  rtx insn, next, last_clobber;
  bool last_cmp_valid;
  bool need_purge = false;
  bitmap killed;

  killed = BITMAP_ALLOC (NULL);

  /* The last comparison that was made.  Will be reset to NULL
     once the flags are clobbered.  */
  last_cmp = NULL;

  /* True iff the last comparison has not been clobbered, nor
     have its inputs.  Used to eliminate duplicate compares.  */
  last_cmp_valid = false;

  /* The last insn that clobbered the flags, if that insn is of
     a form that may be valid for eliminating a following compare.
     To be reset to NULL once the flags are set otherwise.  */
  last_clobber = NULL;

  /* Propagate the last live comparison throughout the extended basic block. */
  if (single_pred_p (bb))
    {
      last_cmp = (struct comparison *) single_pred (bb)->aux;
      if (last_cmp)
	last_cmp_valid = last_cmp->inputs_valid;
    }

  for (insn = BB_HEAD (bb); insn; insn = next)
    {
      rtx src;

      next = (insn == BB_END (bb) ? NULL_RTX : NEXT_INSN (insn));
      if (!NONDEBUG_INSN_P (insn))
	continue;

      /* Compute the set of registers modified by this instruction.  */
      bitmap_clear (killed);
      df_simulate_find_defs (insn, killed);

      src = conforming_compare (insn);
      if (src)
	{
	  enum machine_mode src_mode = GET_MODE (src);
	  rtx eh_note = NULL;

	  if (flag_non_call_exceptions)
	    eh_note = find_reg_note (insn, REG_EH_REGION, NULL);

	  if (!last_cmp_valid)
	    goto dont_delete;

	  /* Take care that it's in the same EH region.  */
	  if (flag_non_call_exceptions
	      && !rtx_equal_p (eh_note, last_cmp->eh_note))
	    goto dont_delete;

	  /* Make sure the compare is redundant with the previous.  */
	  if (!rtx_equal_p (last_cmp->in_a, XEXP (src, 0))
	      || !rtx_equal_p (last_cmp->in_b, XEXP (src, 1)))
	    goto dont_delete;

	  /* New mode must be compatible with the previous compare mode.  */
	  {
	    enum machine_mode new_mode
	      = targetm.cc_modes_compatible (last_cmp->orig_mode, src_mode);
	    if (new_mode == VOIDmode)
	      goto dont_delete;

	    if (new_mode != last_cmp->orig_mode)
	      {
		rtx x, flags = gen_rtx_REG (src_mode, targetm.flags_regnum);

		/* Generate new comparison for substitution.  */
		x = gen_rtx_COMPARE (new_mode, XEXP (src, 0), XEXP (src, 1));
		x = gen_rtx_SET (VOIDmode, flags, x);

		if (!validate_change (last_cmp->insn,
				      &PATTERN (last_cmp->insn), x, false))
		  goto dont_delete;

		last_cmp->orig_mode = new_mode;
	      }
	  }

	  /* All tests and substitutions succeeded!  */
	  if (eh_note)
	    need_purge = true;
	  delete_insn (insn);
	  continue;

	dont_delete:
	  last_cmp = XCNEW (struct comparison);
	  last_cmp->insn = insn;
	  last_cmp->prev_clobber = last_clobber;
	  last_cmp->in_a = XEXP (src, 0);
	  last_cmp->in_b = XEXP (src, 1);
	  last_cmp->eh_note = eh_note;
	  last_cmp->orig_mode = src_mode;
	  all_compares.safe_push (last_cmp);

	  /* It's unusual, but be prepared for comparison patterns that
	     also clobber an input, or perhaps a scratch.  */
	  last_clobber = NULL;
	  last_cmp_valid = true;
	}

      /* Notice if this instruction kills the flags register.  */
      else if (bitmap_bit_p (killed, targetm.flags_regnum))
	{
	  /* See if this insn could be the "clobber" that eliminates
	     a future comparison.   */
	  last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL);

	  /* In either case, the previous compare is no longer valid.  */
	  last_cmp = NULL;
	  last_cmp_valid = false;
	  continue;
	}

      /* Notice if this instruction uses the flags register.  */
      else if (last_cmp)
	find_flags_uses_in_insn (last_cmp, insn);

      /* Notice if any of the inputs to the comparison have changed.  */
      if (last_cmp_valid
	  && (bitmap_bit_p (killed, REGNO (last_cmp->in_a))
	      || (REG_P (last_cmp->in_b)
		  && bitmap_bit_p (killed, REGNO (last_cmp->in_b)))))
	last_cmp_valid = false;
    }
コード例 #19
0
ファイル: internal-fn.c プロジェクト: Alexpux/GCC
void
ubsan_expand_si_overflow_mul_check (gimple stmt)
{
  rtx res, op0, op1;
  tree lhs, fn, arg0, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg0 = gimple_call_arg (stmt, 0);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();

  do_pending_stack_adjust ();
  op0 = expand_normal (arg0);
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode = optab_handler (mulv4_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[4];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op0, mode);
      create_input_operand (&ops[2], op1, mode);
      create_fixed_operand (&ops[3], do_error);
      if (maybe_expand_insn (icode, 4, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      struct separate_ops ops;
      machine_mode hmode
	= mode_for_size (GET_MODE_PRECISION (mode) / 2, MODE_INT, 1);
      ops.op0 = arg0;
      ops.op1 = arg1;
      ops.op2 = NULL_TREE;
      ops.location = gimple_location (stmt);
      if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
	  && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
	{
	  machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type
	    = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), 0);

	  res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
	  rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res,
				     GET_MODE_PRECISION (mode), NULL_RTX, 0);
	  hipart = gen_lowpart (mode, hipart);
	  res = gen_lowpart (mode, res);
	  rtx signbit = expand_shift (RSHIFT_EXPR, mode, res,
				      GET_MODE_PRECISION (mode) - 1,
				      NULL_RTX, 0);
	  /* RES is low half of the double width result, HIPART
	     the high half.  There was overflow if
	     HIPART is different from RES < 0 ? -1 : 0.  */
	  emit_cmp_and_jump_insns (signbit, hipart, EQ, NULL_RTX, mode,
				   false, done_label, PROB_VERY_LIKELY);
	}
      else if (hmode != BLKmode
	       && 2 * GET_MODE_PRECISION (hmode) == GET_MODE_PRECISION (mode))
	{
	  rtx_code_label *large_op0 = gen_label_rtx ();
	  rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
	  rtx_code_label *one_small_one_large = gen_label_rtx ();
	  rtx_code_label *both_ops_large = gen_label_rtx ();
	  rtx_code_label *after_hipart_neg = gen_label_rtx ();
	  rtx_code_label *after_lopart_neg = gen_label_rtx ();
	  rtx_code_label *do_overflow = gen_label_rtx ();
	  rtx_code_label *hipart_different = gen_label_rtx ();

	  unsigned int hprec = GET_MODE_PRECISION (hmode);
	  rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
				      NULL_RTX, 0);
	  hipart0 = gen_lowpart (hmode, hipart0);
	  rtx lopart0 = gen_lowpart (hmode, op0);
	  rtx signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
				       NULL_RTX, 0);
	  rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
				      NULL_RTX, 0);
	  hipart1 = gen_lowpart (hmode, hipart1);
	  rtx lopart1 = gen_lowpart (hmode, op1);
	  rtx signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
				       NULL_RTX, 0);

	  res = gen_reg_rtx (mode);

	  /* True if op0 resp. op1 are known to be in the range of
	     halfstype.  */
	  bool op0_small_p = false;
	  bool op1_small_p = false;
	  /* True if op0 resp. op1 are known to have all zeros or all ones
	     in the upper half of bits, but are not known to be
	     op{0,1}_small_p.  */
	  bool op0_medium_p = false;
	  bool op1_medium_p = false;
	  /* -1 if op{0,1} is known to be negative, 0 if it is known to be
	     nonnegative, 1 if unknown.  */
	  int op0_sign = 1;
	  int op1_sign = 1;

	  if (TREE_CODE (arg0) == SSA_NAME)
	    {
	      wide_int arg0_min, arg0_max;
	      if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg0_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg0_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op0_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op0_medium_p = true;
		  if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = 0;
		  else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = -1;
		}
	    }
	  if (TREE_CODE (arg1) == SSA_NAME)
	    {
	      wide_int arg1_min, arg1_max;
	      if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg1_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg1_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op1_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op1_medium_p = true;
		  if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = 0;
		  else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = -1;
		}
	    }

	  int smaller_sign = 1;
	  int larger_sign = 1;
	  if (op0_small_p)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op1_sign;
	    }
	  else if (op1_small_p)
	    {
	      smaller_sign = op1_sign;
	      larger_sign = op0_sign;
	    }
	  else if (op0_sign == op1_sign)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op0_sign;
	    }

	  if (!op0_small_p)
	    emit_cmp_and_jump_insns (signbit0, hipart0, NE, NULL_RTX, hmode,
				     false, large_op0, PROB_UNLIKELY);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, small_op0_large_op1,
				     PROB_UNLIKELY);

	  /* If both op0 and op1 are sign extended from hmode to mode,
	     the multiplication will never overflow.  We can do just one
	     hmode x hmode => mode widening multiplication.  */
	  if (GET_CODE (lopart0) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart0) = 1;
	      SUBREG_PROMOTED_SET (lopart0, 0);
	    }
	  if (GET_CODE (lopart1) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart1) = 1;
	      SUBREG_PROMOTED_SET (lopart1, 0);
	    }
	  tree halfstype = build_nonstandard_integer_type (hprec, 0);
	  ops.op0 = make_tree (halfstype, lopart0);
	  ops.op1 = make_tree (halfstype, lopart1);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  rtx thisres
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, thisres);
	  emit_jump (done_label);

	  emit_label (small_op0_large_op1);

	  /* If op0 is sign extended from hmode to mode, but op1 is not,
	     just swap the arguments and handle it as op1 sign extended,
	     op0 not.  */
	  rtx larger = gen_reg_rtx (mode);
	  rtx hipart = gen_reg_rtx (hmode);
	  rtx lopart = gen_reg_rtx (hmode);
	  emit_move_insn (larger, op1);
	  emit_move_insn (hipart, hipart1);
	  emit_move_insn (lopart, lopart0);
	  emit_jump (one_small_one_large);

	  emit_label (large_op0);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, both_ops_large, PROB_UNLIKELY);

	  /* If op1 is sign extended from hmode to mode, but op0 is not,
	     prepare larger, hipart and lopart pseudos and handle it together
	     with small_op0_large_op1.  */
	  emit_move_insn (larger, op0);
	  emit_move_insn (hipart, hipart0);
	  emit_move_insn (lopart, lopart1);

	  emit_label (one_small_one_large);

	  /* lopart is the low part of the operand that is sign extended
	     to mode, larger is the the other operand, hipart is the
	     high part of larger and lopart0 and lopart1 are the low parts
	     of both operands.
	     We perform lopart0 * lopart1 and lopart * hipart widening
	     multiplications.  */
	  tree halfutype = build_nonstandard_integer_type (hprec, 1);
	  ops.op0 = make_tree (halfutype, lopart0);
	  ops.op1 = make_tree (halfutype, lopart1);
	  rtx lo0xlo1
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);

	  ops.op0 = make_tree (halfutype, lopart);
	  ops.op1 = make_tree (halfutype, hipart);
	  rtx loxhi = gen_reg_rtx (mode);
	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (loxhi, tem);

	  /* if (hipart < 0) loxhi -= lopart << (bitsize / 2);  */
	  if (larger_sign == 0)
	    emit_jump (after_hipart_neg);
	  else if (larger_sign != -1)
	    emit_cmp_and_jump_insns (hipart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_hipart_neg, PROB_EVEN);

	  tem = convert_modes (mode, hmode, lopart, 1);
	  tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_hipart_neg);

	  /* if (lopart < 0) loxhi -= larger;  */
	  if (smaller_sign == 0)
	    emit_jump (after_lopart_neg);
	  else if (smaller_sign != -1)
	    emit_cmp_and_jump_insns (lopart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_lopart_neg, PROB_EVEN);

	  tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_lopart_neg);

	  /* loxhi += (uns) lo0xlo1 >> (bitsize / 2);  */
	  tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  /* if (loxhi >> (bitsize / 2)
		 == (hmode) loxhi >> (bitsize / 2 - 1))  */
	  rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
					  NULL_RTX, 0);
	  hipartloxhi = gen_lowpart (hmode, hipartloxhi);
	  rtx lopartloxhi = gen_lowpart (hmode, loxhi);
	  rtx signbitloxhi = expand_shift (RSHIFT_EXPR, hmode, lopartloxhi,
					   hprec - 1, NULL_RTX, 0);

	  emit_cmp_and_jump_insns (signbitloxhi, hipartloxhi, NE, NULL_RTX,
				   hmode, false, do_overflow,
				   PROB_VERY_UNLIKELY);

	  /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1;  */
	  rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
					   NULL_RTX, 1);
	  tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);

	  tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
				     1, OPTAB_DIRECT);
	  if (tem != res)
	    emit_move_insn (res, tem);
	  emit_jump (done_label);

	  emit_label (both_ops_large);

	  /* If both operands are large (not sign extended from hmode),
	     then perform the full multiplication which will be the result
	     of the operation.  The only cases which don't overflow are
	     some cases where both hipart0 and highpart1 are 0 or -1.  */
	  ops.code = MULT_EXPR;
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);

	  if (!op0_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  if (!op1_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  /* At this point hipart{0,1} are both in [-1, 0].  If they are the
	     same, overflow happened if res is negative, if they are different,
	     overflow happened if res is positive.  */
	  if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
	    emit_jump (hipart_different);
	  else if (op0_sign == 1 || op1_sign == 1)
	    emit_cmp_and_jump_insns (hipart0, hipart1, NE, NULL_RTX, hmode,
				     true, hipart_different, PROB_EVEN);

	  emit_cmp_and_jump_insns (res, const0_rtx, LT, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (hipart_different);

	  emit_cmp_and_jump_insns (res, const0_rtx, GE, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (do_overflow);

	  /* Overflow, do full multiplication and fallthru into do_error.  */
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);
	}
      else
	{
	  ops.code = MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_jump (done_label);
	}
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (MULT_EXPR, gimple_location (stmt),
				     TREE_TYPE (arg0), arg0, arg1);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
コード例 #20
0
ファイル: value-prof.c プロジェクト: DJHartley/iphone-dev
/* Find list of values for that we want to measure histograms.  */
static void
rtl_find_values_to_profile (histogram_values *values)
{
  rtx insn;
  unsigned i, libcall_level;

  life_analysis (NULL, PROP_DEATH_NOTES);

  *values = VEC_alloc (histogram_value, 0);
  libcall_level = 0;
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
    {
      if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
	libcall_level++;

      /* Do not instrument values inside libcalls (we are going to split block
	 due to instrumentation, and libcall blocks should be local to a single
	 basic block).  */
      if (!libcall_level)
	insn_values_to_profile (insn, values);

      if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
	{
	  gcc_assert (libcall_level > 0);
	  libcall_level--;
	}
    }
  gcc_assert (libcall_level == 0);

  for (i = 0; i < VEC_length (histogram_value, *values); i++)
    {
      histogram_value hist = VEC_index (histogram_value, *values, i);

      switch (hist->type)
	{
	case HIST_TYPE_INTERVAL:
	  if (dump_file)
	    fprintf (dump_file,
		     "Interval counter for insn %d, range %d -- %d.\n",
		     INSN_UID ((rtx)hist->insn),
		     hist->hdata.intvl.int_start,
		     (hist->hdata.intvl.int_start
		      + hist->hdata.intvl.steps - 1));
	  hist->n_counters = hist->hdata.intvl.steps +
		  (hist->hdata.intvl.may_be_less ? 1 : 0) +
		  (hist->hdata.intvl.may_be_more ? 1 : 0);
	  break;

	case HIST_TYPE_POW2:
	  if (dump_file)
	    fprintf (dump_file,
		     "Pow2 counter for insn %d.\n",
		     INSN_UID ((rtx)hist->insn));
	  hist->n_counters 
		= GET_MODE_BITSIZE (hist->mode)
		  +  (hist->hdata.pow2.may_be_other ? 1 : 0);
	  break;

	case HIST_TYPE_SINGLE_VALUE:
	  if (dump_file)
	    fprintf (dump_file,
		     "Single value counter for insn %d.\n",
		     INSN_UID ((rtx)hist->insn));
	  hist->n_counters = 3;
	  break;

	case HIST_TYPE_CONST_DELTA:
	  if (dump_file)
	    fprintf (dump_file,
		     "Constant delta counter for insn %d.\n",
		     INSN_UID ((rtx)hist->insn));
	  hist->n_counters = 4;
	  break;

	default:
	  abort ();
	}
    }
  allocate_reg_info (max_reg_num (), FALSE, FALSE);
}