コード例 #1
0
ファイル: lra-eliminations.c プロジェクト: paranoiacblack/gcc
/* Transform (subreg (plus reg const)) to (plus (subreg reg) const)
   when it is possible.  Return X or the transformation result if the
   transformation is done.  */
static rtx
move_plus_up (rtx x)
{
  rtx subreg_reg;
  enum machine_mode x_mode, subreg_reg_mode;
  
  if (GET_CODE (x) != SUBREG || !subreg_lowpart_p (x))
    return x;
  subreg_reg = SUBREG_REG (x);
  x_mode = GET_MODE (x);
  subreg_reg_mode = GET_MODE (subreg_reg);
  if (GET_CODE (x) == SUBREG && GET_CODE (subreg_reg) == PLUS
      && GET_MODE_SIZE (x_mode) <= GET_MODE_SIZE (subreg_reg_mode)
      && CONSTANT_P (XEXP (subreg_reg, 1))
      && GET_MODE_CLASS (x_mode) == MODE_INT
      && GET_MODE_CLASS (subreg_reg_mode) == MODE_INT)
    {
      rtx cst = simplify_subreg (x_mode, XEXP (subreg_reg, 1), subreg_reg_mode,
				 subreg_lowpart_offset (x_mode,
							subreg_reg_mode));
      if (cst && CONSTANT_P (cst))
	return gen_rtx_PLUS (x_mode, lowpart_subreg (x_mode,
						     XEXP (subreg_reg, 0),
						     subreg_reg_mode), cst);
    }
  return x;
}
コード例 #2
0
int
crx_hard_regno_mode_ok (int regno, enum machine_mode mode)
{
  /* CC can only hold CCmode values.  */
  if (regno == CC_REGNUM)
    return GET_MODE_CLASS (mode) == MODE_CC;
  if (GET_MODE_CLASS (mode) == MODE_CC)
    return 0;
  /* HILO registers can only hold SImode and DImode */
  if (HILO_REGNO_P (regno))
    return mode == SImode || mode == DImode;
  return 1;
}
コード例 #3
0
rtx
rtx_vector_builder::find_cached_value ()
{
  if (encoded_nelts () != 1)
    return NULL_RTX;

  rtx elt = (*this)[0];

  if (GET_MODE_CLASS (m_mode) == MODE_VECTOR_BOOL)
    {
      if (elt == const1_rtx || elt == constm1_rtx)
	return CONST1_RTX (m_mode);
      else if (elt == const0_rtx)
	return CONST0_RTX (m_mode);
      else
	gcc_unreachable ();
    }

  /* We can be called before the global vector constants are set up,
     but in that case we'll just return null.  */
  scalar_mode inner_mode = GET_MODE_INNER (m_mode);
  if (elt == CONST0_RTX (inner_mode))
    return CONST0_RTX (m_mode);
  else if (elt == CONST1_RTX (inner_mode))
    return CONST1_RTX (m_mode);
  else if (elt == CONSTM1_RTX (inner_mode))
    return CONSTM1_RTX (m_mode);

  return NULL_RTX;
}
コード例 #4
0
// llvm_mips_should_not_return_complex_in_memory -  Return true if TYPE 
// should be returned using multiple value return instruction. This 
// implementation is based on mips_function_value in mips.c
bool llvm_mips_should_not_return_complex_in_memory(tree type) {

  enum machine_mode mode = TYPE_MODE(type);

  if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
      && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
    return true;

  return false;
}
コード例 #5
0
rtx
compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp, rtx label, int prob,
		      rtx cinsn)
{
  rtx seq, jump, cond;
  enum machine_mode mode;

  mode = GET_MODE (op0);
  if (mode == VOIDmode)
    mode = GET_MODE (op1);

  start_sequence ();
  if (GET_MODE_CLASS (mode) == MODE_CC)
    {
      /* A hack -- there seems to be no easy generic way how to make a
	 conditional jump from a ccmode comparison.  */
      gcc_assert (cinsn);
      cond = XEXP (SET_SRC (pc_set (cinsn)), 0);
      gcc_assert (GET_CODE (cond) == comp);
      gcc_assert (rtx_equal_p (op0, XEXP (cond, 0)));
      gcc_assert (rtx_equal_p (op1, XEXP (cond, 1)));
      emit_jump_insn (copy_insn (PATTERN (cinsn)));
      jump = get_last_insn ();
      gcc_assert (JUMP_P (jump));
      JUMP_LABEL (jump) = JUMP_LABEL (cinsn);
      LABEL_NUSES (JUMP_LABEL (jump))++;
      redirect_jump (jump, label, 0);
    }
  else
    {
      gcc_assert (!cinsn);

      op0 = force_operand (op0, NULL_RTX);
      op1 = force_operand (op1, NULL_RTX);
      do_compare_rtx_and_jump (op0, op1, comp, 0,
			       mode, NULL_RTX, NULL_RTX, label, -1);
      jump = get_last_insn ();
      gcc_assert (JUMP_P (jump));
      JUMP_LABEL (jump) = label;
      LABEL_NUSES (label)++;
    }
  add_reg_note (jump, REG_BR_PROB, GEN_INT (prob));

  seq = get_insns ();
  end_sequence ();

  return seq;
}
コード例 #6
0
ファイル: omp-general.c プロジェクト: WojciechMigda/gcc
int
omp_max_vf (void)
{
  if (!optimize
      || optimize_debug
      || !flag_tree_loop_optimize
      || (!flag_tree_loop_vectorize
	  && (global_options_set.x_flag_tree_loop_vectorize
	      || global_options_set.x_flag_tree_vectorize)))
    return 1;

  int vf = 1;
  int vs = targetm.vectorize.autovectorize_vector_sizes ();
  if (vs)
    vf = 1 << floor_log2 (vs);
  else
    {
      machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
      if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
	vf = GET_MODE_NUNITS (vqimode);
    }
  return vf;
}
コード例 #7
0
ファイル: dojump.c プロジェクト: FilipinOTech/gcc
void
do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp,
			 enum machine_mode mode, rtx size, rtx if_false_label,
			 rtx if_true_label, int prob)
{
  rtx tem;
  rtx dummy_label = NULL_RTX;
  rtx last;

  /* Reverse the comparison if that is safe and we want to jump if it is
     false.  Also convert to the reverse comparison if the target can
     implement it.  */
  if ((! if_true_label
       || ! can_compare_p (code, mode, ccp_jump))
      && (! FLOAT_MODE_P (mode)
	  || code == ORDERED || code == UNORDERED
	  || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
	  || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
    {
      enum rtx_code rcode;
      if (FLOAT_MODE_P (mode))
        rcode = reverse_condition_maybe_unordered (code);
      else
        rcode = reverse_condition (code);

      /* Canonicalize to UNORDERED for the libcall.  */
      if (can_compare_p (rcode, mode, ccp_jump)
	  || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
	{
          tem = if_true_label;
          if_true_label = if_false_label;
          if_false_label = tem;
	  code = rcode;
	  prob = inv (prob);
	}
    }

  /* If one operand is constant, make it the second one.  Only do this
     if the other operand is not constant as well.  */

  if (swap_commutative_operands_p (op0, op1))
    {
      tem = op0;
      op0 = op1;
      op1 = tem;
      code = swap_condition (code);
    }

  do_pending_stack_adjust ();

  code = unsignedp ? unsigned_condition (code) : code;
  if (0 != (tem = simplify_relational_operation (code, mode, VOIDmode,
						 op0, op1)))
    {
      if (CONSTANT_P (tem))
	{
	  rtx label = (tem == const0_rtx || tem == CONST0_RTX (mode))
		      ? if_false_label : if_true_label;
	  if (label)
	    emit_jump (label);
	  return;
	}

      code = GET_CODE (tem);
      mode = GET_MODE (tem);
      op0 = XEXP (tem, 0);
      op1 = XEXP (tem, 1);
      unsignedp = (code == GTU || code == LTU || code == GEU || code == LEU);
    }

  if (! if_true_label)
    dummy_label = if_true_label = gen_label_rtx ();

  if (GET_MODE_CLASS (mode) == MODE_INT
      && ! can_compare_p (code, mode, ccp_jump))
    {
      switch (code)
	{
	case LTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case LT:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LE:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GT:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GE:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case EQ:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
					 if_true_label, prob);
	  break;

	case NE:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_true_label,
					 if_false_label, inv (prob));
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  else
    {
      if (GET_MODE_CLASS (mode) == MODE_FLOAT
	  && ! can_compare_p (code, mode, ccp_jump)
	  && can_compare_p (swap_condition (code), mode, ccp_jump))
	{
	  rtx tmp;
	  code = swap_condition (code);
	  tmp = op0;
	  op0 = op1;
	  op1 = tmp;
	}

      else if (GET_MODE_CLASS (mode) == MODE_FLOAT
	       && ! can_compare_p (code, mode, ccp_jump)

	       /* Never split ORDERED and UNORDERED.  These must be implemented.  */
	       && (code != ORDERED && code != UNORDERED)

               /* Split a floating-point comparison if we can jump on other
	          conditions...  */
	       && (have_insn_for (COMPARE, mode)

	           /* ... or if there is no libcall for it.  */
	           || code_to_optab[code] == NULL))
        {
	  enum rtx_code first_code;
	  bool and_them = split_comparison (code, mode, &first_code, &code);

	  /* If there are no NaNs, the first comparison should always fall
	     through.  */
	  if (!HONOR_NANS (mode))
	    gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));

	  else
	    {
	      if (and_them)
		{
		  rtx dest_label;
		  /* If we only jump if true, just bypass the second jump.  */
		  if (! if_false_label)
		    {
		      if (! dummy_label)
		        dummy_label = gen_label_rtx ();
		      dest_label = dummy_label;
		    }
		  else
		    dest_label = if_false_label;
                  do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					   size, dest_label, NULL_RTX, prob);
		}
              else
                do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					 size, NULL_RTX, if_true_label, prob);
	    }
	}

      last = get_last_insn ();
      emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp,
			       if_true_label);
      if (prob != -1 && profile_status != PROFILE_ABSENT)
	{
	  for (last = NEXT_INSN (last);
	       last && NEXT_INSN (last);
	       last = NEXT_INSN (last))
	    if (JUMP_P (last))
	      break;
	  if (!last
	      || !JUMP_P (last)
	      || NEXT_INSN (last)
	      || !any_condjump_p (last))
	    {
	      if (dump_file)
		fprintf (dump_file, "Failed to add probability note\n");
	    }
	  else
	    {
	      gcc_assert (!find_reg_note (last, REG_BR_PROB, 0));
	      add_reg_note (last, REG_BR_PROB, GEN_INT (prob));
	    }
	}
    }

  if (if_false_label)
    emit_jump (if_false_label);
  if (dummy_label)
    emit_label (dummy_label);
}
コード例 #8
0
ファイル: dojump.c プロジェクト: FilipinOTech/gcc
void
do_jump_1 (enum tree_code code, tree op0, tree op1,
	   rtx if_false_label, rtx if_true_label, int prob)
{
  enum machine_mode mode;
  rtx drop_through_label = 0;

  switch (code)
    {
    case EQ_EXPR:
      {
        tree inner_type = TREE_TYPE (op0);

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_FLOAT);
	gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_INT);

        if (integer_zerop (op1))
	  do_jump (op0, if_true_label, if_false_label, inv (prob));
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
                 && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
	  do_jump_by_parts_equality (op0, op1, if_false_label, if_true_label,
				     prob);
        else
	  do_compare_and_jump (op0, op1, EQ, EQ, if_false_label, if_true_label,
			       prob);
        break;
      }

    case NE_EXPR:
      {
        tree inner_type = TREE_TYPE (op0);

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_FLOAT);
	gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_INT);

        if (integer_zerop (op1))
	  do_jump (op0, if_false_label, if_true_label, prob);
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
           && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
	  do_jump_by_parts_equality (op0, op1, if_true_label, if_false_label,
				     inv (prob));
        else
	  do_compare_and_jump (op0, op1, NE, NE, if_false_label, if_true_label,
			       prob);
        break;
      }

    case LT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LT, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 1, if_false_label, if_true_label,
				  prob);
      else
	do_compare_and_jump (op0, op1, LT, LTU, if_false_label, if_true_label,
			     prob);
      break;

    case LE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LE, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 0, if_true_label, if_false_label,
				  inv (prob));
      else
	do_compare_and_jump (op0, op1, LE, LEU, if_false_label, if_true_label,
			     prob);
      break;

    case GT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GT, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 0, if_false_label, if_true_label,
				  prob);
      else
	do_compare_and_jump (op0, op1, GT, GTU, if_false_label, if_true_label,
			     prob);
      break;

    case GE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GE, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 1, if_true_label, if_false_label,
				  inv (prob));
      else
	do_compare_and_jump (op0, op1, GE, GEU, if_false_label, if_true_label,
			     prob);
      break;

    case ORDERED_EXPR:
      do_compare_and_jump (op0, op1, ORDERED, ORDERED,
			   if_false_label, if_true_label, prob);
      break;

    case UNORDERED_EXPR:
      do_compare_and_jump (op0, op1, UNORDERED, UNORDERED,
			   if_false_label, if_true_label, prob);
      break;

    case UNLT_EXPR:
      do_compare_and_jump (op0, op1, UNLT, UNLT, if_false_label, if_true_label,
			   prob);
      break;

    case UNLE_EXPR:
      do_compare_and_jump (op0, op1, UNLE, UNLE, if_false_label, if_true_label,
			   prob);
      break;

    case UNGT_EXPR:
      do_compare_and_jump (op0, op1, UNGT, UNGT, if_false_label, if_true_label,
			   prob);
      break;

    case UNGE_EXPR:
      do_compare_and_jump (op0, op1, UNGE, UNGE, if_false_label, if_true_label,
			   prob);
      break;

    case UNEQ_EXPR:
      do_compare_and_jump (op0, op1, UNEQ, UNEQ, if_false_label, if_true_label,
			   prob);
      break;

    case LTGT_EXPR:
      do_compare_and_jump (op0, op1, LTGT, LTGT, if_false_label, if_true_label,
			   prob);
      break;

    case TRUTH_ANDIF_EXPR:
      if (if_false_label == NULL_RTX)
        {
	  drop_through_label = gen_label_rtx ();
	  do_jump (op0, drop_through_label, NULL_RTX, prob);
	  do_jump (op1, NULL_RTX, if_true_label, prob);
	}
      else
	{
	  do_jump (op0, if_false_label, NULL_RTX, prob);
	  do_jump (op1, if_false_label, if_true_label, prob);
	}
      break;

    case TRUTH_ORIF_EXPR:
      if (if_true_label == NULL_RTX)
	{
          drop_through_label = gen_label_rtx ();
	  do_jump (op0, NULL_RTX, drop_through_label, prob);
	  do_jump (op1, if_false_label, NULL_RTX, prob);
	}
      else
	{
	  do_jump (op0, NULL_RTX, if_true_label, prob);
	  do_jump (op1, if_false_label, if_true_label, prob);
	}
      break;

    default:
      gcc_unreachable ();
    }

  if (drop_through_label)
    {
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}
コード例 #9
0
ファイル: dojump.c プロジェクト: Abioy/gccxml
void
do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp,
                         enum machine_mode mode, rtx size, rtx if_false_label,
                         rtx if_true_label)
{
  rtx tem;
  int dummy_true_label = 0;

  /* Reverse the comparison if that is safe and we want to jump if it is
     false.  */
  if (! if_true_label && ! FLOAT_MODE_P (mode))
    {
      if_true_label = if_false_label;
      if_false_label = 0;
      code = reverse_condition (code);
    }

  /* If one operand is constant, make it the second one.  Only do this
     if the other operand is not constant as well.  */

  if (swap_commutative_operands_p (op0, op1))
    {
      tem = op0;
      op0 = op1;
      op1 = tem;
      code = swap_condition (code);
    }

  do_pending_stack_adjust ();

  code = unsignedp ? unsigned_condition (code) : code;
  if (0 != (tem = simplify_relational_operation (code, mode, VOIDmode,
                                                 op0, op1)))
    {
      if (CONSTANT_P (tem))
        {
          rtx label = (tem == const0_rtx || tem == CONST0_RTX (mode))
                      ? if_false_label : if_true_label;
          if (label)
            emit_jump (label);
          return;
        }

      code = GET_CODE (tem);
      mode = GET_MODE (tem);
      op0 = XEXP (tem, 0);
      op1 = XEXP (tem, 1);
      unsignedp = (code == GTU || code == LTU || code == GEU || code == LEU);
    }


  if (! if_true_label)
    {
      dummy_true_label = 1;
      if_true_label = gen_label_rtx ();
    }

  if (GET_MODE_CLASS (mode) == MODE_INT
      && ! can_compare_p (code, mode, ccp_jump))
    {
      switch (code)
        {
        case LTU:
          do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
                                        if_false_label, if_true_label);
          break;

        case LEU:
          do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
                                        if_true_label, if_false_label);
          break;

        case GTU:
          do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
                                        if_false_label, if_true_label);
          break;

        case GEU:
          do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
                                        if_true_label, if_false_label);
          break;

        case LT:
          do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
                                        if_false_label, if_true_label);
          break;

        case LE:
          do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
                                        if_true_label, if_false_label);
          break;

        case GT:
          do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
                                        if_false_label, if_true_label);
          break;

        case GE:
          do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
                                        if_true_label, if_false_label);
          break;

        case EQ:
          do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
                                         if_true_label);
          break;

        case NE:
          do_jump_by_parts_equality_rtx (mode, op0, op1, if_true_label,
                                         if_false_label);
          break;

        default:
          gcc_unreachable ();
        }
    }
  else
    emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp,
                             if_true_label);

  if (if_false_label)
    emit_jump (if_false_label);
  if (dummy_true_label)
    emit_label (if_true_label);
}
コード例 #10
0
ファイル: dojump.c プロジェクト: Abioy/gccxml
void
do_jump (tree exp, rtx if_false_label, rtx if_true_label)
{
  enum tree_code code = TREE_CODE (exp);
  rtx temp;
  int i;
  tree type;
  enum machine_mode mode;
  rtx drop_through_label = 0;

  switch (code)
    {
    case ERROR_MARK:
      break;

    case INTEGER_CST:
      temp = integer_zerop (exp) ? if_false_label : if_true_label;
      if (temp)
        emit_jump (temp);
      break;

#if 0
      /* This is not true with #pragma weak  */
    case ADDR_EXPR:
      /* The address of something can never be zero.  */
      if (if_true_label)
        emit_jump (if_true_label);
      break;
#endif

    case NOP_EXPR:
      if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF)
        goto normal;
    case CONVERT_EXPR:
      /* If we are narrowing the operand, we have to do the compare in the
         narrower mode.  */
      if ((TYPE_PRECISION (TREE_TYPE (exp))
           < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
        goto normal;
    case NON_LVALUE_EXPR:
    case ABS_EXPR:
    case NEGATE_EXPR:
    case LROTATE_EXPR:
    case RROTATE_EXPR:
      /* These cannot change zero->nonzero or vice versa.  */
      do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
      break;

    case BIT_AND_EXPR:
      /* fold_single_bit_test() converts (X & (1 << C)) into (X >> C) & 1.
         See if the former is preferred for jump tests and restore it
         if so.  */
      if (integer_onep (TREE_OPERAND (exp, 1)))
        {
          tree exp0 = TREE_OPERAND (exp, 0);
          rtx set_label, clr_label;

          /* Strip narrowing integral type conversions.  */
          while ((TREE_CODE (exp0) == NOP_EXPR
                  || TREE_CODE (exp0) == CONVERT_EXPR
                  || TREE_CODE (exp0) == NON_LVALUE_EXPR)
                 && TREE_OPERAND (exp0, 0) != error_mark_node
                 && TYPE_PRECISION (TREE_TYPE (exp0))
                    <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp0, 0))))
            exp0 = TREE_OPERAND (exp0, 0);

          /* "exp0 ^ 1" inverts the sense of the single bit test.  */
          if (TREE_CODE (exp0) == BIT_XOR_EXPR
              && integer_onep (TREE_OPERAND (exp0, 1)))
            {
              exp0 = TREE_OPERAND (exp0, 0);
              clr_label = if_true_label;
              set_label = if_false_label;
            }
          else
            {
              clr_label = if_false_label;
              set_label = if_true_label;
            }

          if (TREE_CODE (exp0) == RSHIFT_EXPR)
            {
              tree arg = TREE_OPERAND (exp0, 0);
              tree shift = TREE_OPERAND (exp0, 1);
              tree argtype = TREE_TYPE (arg);
              if (TREE_CODE (shift) == INTEGER_CST
                  && compare_tree_int (shift, 0) >= 0
                  && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0
                  && prefer_and_bit_test (TYPE_MODE (argtype),
                                          TREE_INT_CST_LOW (shift)))
                {
                  HOST_WIDE_INT mask = (HOST_WIDE_INT) 1
                                       << TREE_INT_CST_LOW (shift);
                  do_jump (build2 (BIT_AND_EXPR, argtype, arg,
                                   build_int_cst_type (argtype, mask)),
                           clr_label, set_label);
                  break;
                }
            }
        }

      /* If we are AND'ing with a small constant, do this comparison in the
         smallest type that fits.  If the machine doesn't have comparisons
         that small, it will be converted back to the wider comparison.
         This helps if we are testing the sign bit of a narrower object.
         combine can't do this for us because it can't know whether a
         ZERO_EXTRACT or a compare in a smaller mode exists, but we do.  */

      if (! SLOW_BYTE_ACCESS
          && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
          && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
          && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
          && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
          && (type = lang_hooks.types.type_for_mode (mode, 1)) != 0
          && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
          && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
              != CODE_FOR_nothing))
        {
          do_jump (fold_convert (type, exp), if_false_label, if_true_label);
          break;
        }
      goto normal;

    case TRUTH_NOT_EXPR:
      do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
      break;

    case COND_EXPR:
      {
        rtx label1 = gen_label_rtx ();
        if (!if_true_label || !if_false_label)
          {
            drop_through_label = gen_label_rtx ();
            if (!if_true_label)
              if_true_label = drop_through_label;
            if (!if_false_label)
              if_false_label = drop_through_label;
          }

        do_pending_stack_adjust ();
        do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX);
        do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
        emit_label (label1);
        do_jump (TREE_OPERAND (exp, 2), if_false_label, if_true_label);
        break;
      }

    case TRUTH_ANDIF_EXPR:
    case TRUTH_ORIF_EXPR:
    case COMPOUND_EXPR:
      /* Lowered by gimplify.c.  */
      gcc_unreachable ();

    case COMPONENT_REF:
    case BIT_FIELD_REF:
    case ARRAY_REF:
    case ARRAY_RANGE_REF:
      {
        HOST_WIDE_INT bitsize, bitpos;
        int unsignedp;
        enum machine_mode mode;
        tree type;
        tree offset;
        int volatilep = 0;

        /* Get description of this reference.  We don't actually care
           about the underlying object here.  */
        get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode,
                             &unsignedp, &volatilep, false);

        type = lang_hooks.types.type_for_size (bitsize, unsignedp);
        if (! SLOW_BYTE_ACCESS
            && type != 0 && bitsize >= 0
            && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
            && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
                != CODE_FOR_nothing))
          {
            do_jump (fold_convert (type, exp), if_false_label, if_true_label);
            break;
          }
        goto normal;
      }

    case EQ_EXPR:
      {
        tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_FLOAT);
        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_INT);
        
        if (integer_zerop (TREE_OPERAND (exp, 1)))
          do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
                 && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
          do_jump_by_parts_equality (exp, if_false_label, if_true_label);
        else
          do_compare_and_jump (exp, EQ, EQ, if_false_label, if_true_label);
        break;
      }

    case MINUS_EXPR:
      /* Nonzero iff operands of minus differ.  */
      exp = build2 (NE_EXPR, TREE_TYPE (exp),
                    TREE_OPERAND (exp, 0),
                    TREE_OPERAND (exp, 1));
      /* FALLTHRU */
    case NE_EXPR:
      {
        tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_FLOAT);
        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_INT);
        
        if (integer_zerop (TREE_OPERAND (exp, 1)))
          do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
           && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
          do_jump_by_parts_equality (exp, if_true_label, if_false_label);
        else
          do_compare_and_jump (exp, NE, NE, if_false_label, if_true_label);
        break;
      }

    case LT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LT, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label);
      else
        do_compare_and_jump (exp, LT, LTU, if_false_label, if_true_label);
      break;

    case LE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LE, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label);
      else
        do_compare_and_jump (exp, LE, LEU, if_false_label, if_true_label);
      break;

    case GT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GT, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label);
      else
        do_compare_and_jump (exp, GT, GTU, if_false_label, if_true_label);
      break;

    case GE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GE, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label);
      else
        do_compare_and_jump (exp, GE, GEU, if_false_label, if_true_label);
      break;

    case UNORDERED_EXPR:
    case ORDERED_EXPR:
      {
        enum rtx_code cmp, rcmp;
        int do_rev;

        if (code == UNORDERED_EXPR)
          cmp = UNORDERED, rcmp = ORDERED;
        else
          cmp = ORDERED, rcmp = UNORDERED;
        mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));

        do_rev = 0;
        if (! can_compare_p (cmp, mode, ccp_jump)
            && (can_compare_p (rcmp, mode, ccp_jump)
          /* If the target doesn't provide either UNORDERED or ORDERED
             comparisons, canonicalize on UNORDERED for the library.  */
          || rcmp == UNORDERED))
          do_rev = 1;

        if (! do_rev)
          do_compare_and_jump (exp, cmp, cmp, if_false_label, if_true_label);
        else
          do_compare_and_jump (exp, rcmp, rcmp, if_true_label, if_false_label);
      }
      break;

    {
      enum rtx_code rcode1;
      enum tree_code tcode1, tcode2;

      case UNLT_EXPR:
        rcode1 = UNLT;
        tcode1 = UNORDERED_EXPR;
        tcode2 = LT_EXPR;
        goto unordered_bcc;
      case UNLE_EXPR:
        rcode1 = UNLE;
        tcode1 = UNORDERED_EXPR;
        tcode2 = LE_EXPR;
        goto unordered_bcc;
      case UNGT_EXPR:
        rcode1 = UNGT;
        tcode1 = UNORDERED_EXPR;
        tcode2 = GT_EXPR;
        goto unordered_bcc;
      case UNGE_EXPR:
        rcode1 = UNGE;
        tcode1 = UNORDERED_EXPR;
        tcode2 = GE_EXPR;
        goto unordered_bcc;
      case UNEQ_EXPR:
        rcode1 = UNEQ;
        tcode1 = UNORDERED_EXPR;
        tcode2 = EQ_EXPR;
        goto unordered_bcc;
      case LTGT_EXPR:
        /* It is ok for LTGT_EXPR to trap when the result is unordered,
           so expand to (a < b) || (a > b).  */
        rcode1 = LTGT;
        tcode1 = LT_EXPR;
        tcode2 = GT_EXPR;
        goto unordered_bcc;

      unordered_bcc:
        mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
        if (can_compare_p (rcode1, mode, ccp_jump))
          do_compare_and_jump (exp, rcode1, rcode1, if_false_label,
                               if_true_label);
        else
          {
            tree op0 = save_expr (TREE_OPERAND (exp, 0));
            tree op1 = save_expr (TREE_OPERAND (exp, 1));
            tree cmp0, cmp1;

            /* If the target doesn't support combined unordered
               compares, decompose into two comparisons.  */
            if (if_true_label == 0)
              drop_through_label = if_true_label = gen_label_rtx ();
              
            cmp0 = fold_build2 (tcode1, TREE_TYPE (exp), op0, op1);
            cmp1 = fold_build2 (tcode2, TREE_TYPE (exp), op0, op1);
            do_jump (cmp0, 0, if_true_label);
            do_jump (cmp1, if_false_label, if_true_label);
          }
      }
      break;

    case TRUTH_AND_EXPR:
      /* High branch cost, expand as the bitwise AND of the conditions.
         Do the same if the RHS has side effects, because we're effectively
         turning a TRUTH_AND_EXPR into a TRUTH_ANDIF_EXPR.  */
      if (BRANCH_COST >= 4 || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
        goto normal;

      if (if_false_label == NULL_RTX)
        {
          drop_through_label = gen_label_rtx ();
          do_jump (TREE_OPERAND (exp, 0), drop_through_label, NULL_RTX);
          do_jump (TREE_OPERAND (exp, 1), NULL_RTX, if_true_label);
        }
      else
        {
          do_jump (TREE_OPERAND (exp, 0), if_false_label, NULL_RTX);
          do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
        }
      break;

    case TRUTH_OR_EXPR:
      /* High branch cost, expand as the bitwise OR of the conditions.
         Do the same if the RHS has side effects, because we're effectively
         turning a TRUTH_OR_EXPR into a TRUTH_ORIF_EXPR.  */
      if (BRANCH_COST >= 4 || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
        goto normal;

      if (if_true_label == NULL_RTX)
        {
          drop_through_label = gen_label_rtx ();
          do_jump (TREE_OPERAND (exp, 0), NULL_RTX, drop_through_label);
          do_jump (TREE_OPERAND (exp, 1), if_false_label, NULL_RTX);
        }
      else
        {
          do_jump (TREE_OPERAND (exp, 0), NULL_RTX, if_true_label);
          do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
        }
      break;

      /* Special case:
          __builtin_expect (<test>, 0)        and
          __builtin_expect (<test>, 1)

         We need to do this here, so that <test> is not converted to a SCC
         operation on machines that use condition code registers and COMPARE
         like the PowerPC, and then the jump is done based on whether the SCC
         operation produced a 1 or 0.  */
    case CALL_EXPR:
      /* Check for a built-in function.  */
      {
        tree fndecl = get_callee_fndecl (exp);
        tree arglist = TREE_OPERAND (exp, 1);

        if (fndecl
            && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
            && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
            && arglist != NULL_TREE
            && TREE_CHAIN (arglist) != NULL_TREE)
          {
            rtx seq = expand_builtin_expect_jump (exp, if_false_label,
                                                  if_true_label);

            if (seq != NULL_RTX)
              {
                emit_insn (seq);
                return;
              }
          }
      }
 
      /* Fall through and generate the normal code.  */
    default:
    normal:
      temp = expand_normal (exp);
      do_pending_stack_adjust ();
      /* The RTL optimizers prefer comparisons against pseudos.  */
      if (GET_CODE (temp) == SUBREG)
        {
          /* Compare promoted variables in their promoted mode.  */
          if (SUBREG_PROMOTED_VAR_P (temp)
              && REG_P (XEXP (temp, 0)))
            temp = XEXP (temp, 0);
          else
            temp = copy_to_reg (temp);
        }
      do_compare_rtx_and_jump (temp, CONST0_RTX (GET_MODE (temp)),
                               NE, TYPE_UNSIGNED (TREE_TYPE (exp)),
                               GET_MODE (temp), NULL_RTX,
                               if_false_label, if_true_label);
    }

  if (drop_through_label)
    {
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}
コード例 #11
0
ファイル: dojump.c プロジェクト: VincentLorquet/gcc
void
do_jump_1 (enum tree_code code, tree op0, tree op1,
	   rtx if_false_label, rtx if_true_label, int prob)
{
  enum machine_mode mode;
  rtx drop_through_label = 0;

  switch (code)
    {
    case EQ_EXPR:
      {
        tree inner_type = TREE_TYPE (op0);

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_FLOAT);
	gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_INT);

        if (integer_zerop (op1))
	  do_jump (op0, if_true_label, if_false_label, inv (prob));
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
                 && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
	  do_jump_by_parts_equality (op0, op1, if_false_label, if_true_label,
				     prob);
        else
	  do_compare_and_jump (op0, op1, EQ, EQ, if_false_label, if_true_label,
			       prob);
        break;
      }

    case NE_EXPR:
      {
        tree inner_type = TREE_TYPE (op0);

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_FLOAT);
	gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_INT);

        if (integer_zerop (op1))
	  do_jump (op0, if_false_label, if_true_label, prob);
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
           && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
	  do_jump_by_parts_equality (op0, op1, if_true_label, if_false_label,
				     inv (prob));
        else
	  do_compare_and_jump (op0, op1, NE, NE, if_false_label, if_true_label,
			       prob);
        break;
      }

    case LT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LT, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 1, if_false_label, if_true_label,
				  prob);
      else
	do_compare_and_jump (op0, op1, LT, LTU, if_false_label, if_true_label,
			     prob);
      break;

    case LE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LE, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 0, if_true_label, if_false_label,
				  inv (prob));
      else
	do_compare_and_jump (op0, op1, LE, LEU, if_false_label, if_true_label,
			     prob);
      break;

    case GT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GT, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 0, if_false_label, if_true_label,
				  prob);
      else
	do_compare_and_jump (op0, op1, GT, GTU, if_false_label, if_true_label,
			     prob);
      break;

    case GE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GE, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 1, if_true_label, if_false_label,
				  inv (prob));
      else
	do_compare_and_jump (op0, op1, GE, GEU, if_false_label, if_true_label,
			     prob);
      break;

    case ORDERED_EXPR:
      do_compare_and_jump (op0, op1, ORDERED, ORDERED,
			   if_false_label, if_true_label, prob);
      break;

    case UNORDERED_EXPR:
      do_compare_and_jump (op0, op1, UNORDERED, UNORDERED,
			   if_false_label, if_true_label, prob);
      break;

    case UNLT_EXPR:
      do_compare_and_jump (op0, op1, UNLT, UNLT, if_false_label, if_true_label,
			   prob);
      break;

    case UNLE_EXPR:
      do_compare_and_jump (op0, op1, UNLE, UNLE, if_false_label, if_true_label,
			   prob);
      break;

    case UNGT_EXPR:
      do_compare_and_jump (op0, op1, UNGT, UNGT, if_false_label, if_true_label,
			   prob);
      break;

    case UNGE_EXPR:
      do_compare_and_jump (op0, op1, UNGE, UNGE, if_false_label, if_true_label,
			   prob);
      break;

    case UNEQ_EXPR:
      do_compare_and_jump (op0, op1, UNEQ, UNEQ, if_false_label, if_true_label,
			   prob);
      break;

    case LTGT_EXPR:
      do_compare_and_jump (op0, op1, LTGT, LTGT, if_false_label, if_true_label,
			   prob);
      break;

    case TRUTH_ANDIF_EXPR:
      {
        /* Spread the probability that the expression is false evenly between
           the two conditions. So the first condition is false half the total
           probability of being false. The second condition is false the other
           half of the total probability of being false, so its jump has a false
           probability of half the total, relative to the probability we
           reached it (i.e. the first condition was true).  */
        int op0_prob = -1;
        int op1_prob = -1;
        if (prob != -1)
          {
            int false_prob = inv (prob);
            int op0_false_prob = false_prob / 2;
            int op1_false_prob = GCOV_COMPUTE_SCALE ((false_prob / 2),
                                                     inv (op0_false_prob));
            /* Get the probability that each jump below is true.  */
            op0_prob = inv (op0_false_prob);
            op1_prob = inv (op1_false_prob);
          }
        if (if_false_label == NULL_RTX)
          {
            drop_through_label = gen_label_rtx ();
            do_jump (op0, drop_through_label, NULL_RTX, op0_prob);
            do_jump (op1, NULL_RTX, if_true_label, op1_prob);
          }
        else
          {
            do_jump (op0, if_false_label, NULL_RTX, op0_prob);
            do_jump (op1, if_false_label, if_true_label, op1_prob);
          }
        break;
      }

    case TRUTH_ORIF_EXPR:
      {
        /* Spread the probability evenly between the two conditions. So
           the first condition has half the total probability of being true.
           The second condition has the other half of the total probability,
           so its jump has a probability of half the total, relative to
           the probability we reached it (i.e. the first condition was false).  */
        int op0_prob = -1;
        int op1_prob = -1;
        if (prob != -1)
          {
            op0_prob = prob / 2;
            op1_prob = GCOV_COMPUTE_SCALE ((prob / 2), inv (op0_prob));
          }
        if (if_true_label == NULL_RTX)
          {
            drop_through_label = gen_label_rtx ();
            do_jump (op0, NULL_RTX, drop_through_label, op0_prob);
            do_jump (op1, if_false_label, NULL_RTX, op1_prob);
          }
        else
          {
            do_jump (op0, NULL_RTX, if_true_label, op0_prob);
            do_jump (op1, if_false_label, if_true_label, op1_prob);
          }
        break;
      }

    default:
      gcc_unreachable ();
    }

  if (drop_through_label)
    {
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}
コード例 #12
0
void
vax_notice_update_cc (rtx exp, rtx insn ATTRIBUTE_UNUSED)
{
  if (GET_CODE (exp) == SET)
    {
      if (GET_CODE (SET_SRC (exp)) == CALL)
	CC_STATUS_INIT;
      else if (GET_CODE (SET_DEST (exp)) != ZERO_EXTRACT
	       && GET_CODE (SET_DEST (exp)) != PC)
	{
	  cc_status.flags = 0;
	  /* The integer operations below don't set carry or
	     set it in an incompatible way.  That's ok though
	     as the Z bit is all we need when doing unsigned
	     comparisons on the result of these insns (since
	     they're always with 0).  Set CC_NO_OVERFLOW to
	     generate the correct unsigned branches.  */
	  switch (GET_CODE (SET_SRC (exp)))
	    {
	    case NEG:
	      if (GET_MODE_CLASS (GET_MODE (exp)) == MODE_FLOAT)
		break;
	    case AND:
	    case IOR:
	    case XOR:
	    case NOT:
	    case MEM:
	    case REG:
	      cc_status.flags = CC_NO_OVERFLOW;
	      break;
	    default:
	      break;
	    }
	  cc_status.value1 = SET_DEST (exp);
	  cc_status.value2 = SET_SRC (exp);
	}
    }
  else if (GET_CODE (exp) == PARALLEL
	   && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
    {
      if (GET_CODE (SET_SRC (XVECEXP (exp, 0, 0))) == CALL)
	CC_STATUS_INIT;
      else if (GET_CODE (SET_DEST (XVECEXP (exp, 0, 0))) != PC)
	{
	  cc_status.flags = 0;
	  cc_status.value1 = SET_DEST (XVECEXP (exp, 0, 0));
	  cc_status.value2 = SET_SRC (XVECEXP (exp, 0, 0));
	}
      else
	/* PARALLELs whose first element sets the PC are aob,
	   sob insns.  They do change the cc's.  */
	CC_STATUS_INIT;
    }
  else
    CC_STATUS_INIT;
  if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
      && cc_status.value2
      && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
    cc_status.value2 = 0;
  if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
      && cc_status.value2
      && GET_CODE (cc_status.value2) == MEM)
    cc_status.value2 = 0;
  /* Actual condition, one line up, should be that value2's address
     depends on value1, but that is too much of a pain.  */
}
コード例 #13
0
static bool
vax_rtx_costs (rtx x, int code, int outer_code, int *total)
{
  enum machine_mode mode = GET_MODE (x);
  int i = 0;				   /* may be modified in switch */
  const char *fmt = GET_RTX_FORMAT (code); /* may be modified in switch */

  switch (code)
    {
      /* On a VAX, constants from 0..63 are cheap because they can use the
	 1 byte literal constant format.  Compare to -1 should be made cheap
	 so that decrement-and-branch insns can be formed more easily (if
	 the value -1 is copied to a register some decrement-and-branch
	 patterns will not match).  */
    case CONST_INT:
      if (INTVAL (x) == 0)
	return true;
      if (outer_code == AND)
	{
          *total = ((unsigned HOST_WIDE_INT) ~INTVAL (x) <= 077) ? 1 : 2;
	  return true;
	}
      if ((unsigned HOST_WIDE_INT) INTVAL (x) <= 077
	  || (outer_code == COMPARE
	      && INTVAL (x) == -1)
	  || ((outer_code == PLUS || outer_code == MINUS)
	      && (unsigned HOST_WIDE_INT) -INTVAL (x) <= 077))
	{
	  *total = 1;
	  return true;
	}
      /* FALLTHRU */

    case CONST:
    case LABEL_REF:
    case SYMBOL_REF:
      *total = 3;
      return true;

    case CONST_DOUBLE:
      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
	*total = vax_float_literal (x) ? 5 : 8;
      else
        *total = ((CONST_DOUBLE_HIGH (x) == 0
		   && (unsigned HOST_WIDE_INT) CONST_DOUBLE_LOW (x) < 64)
		  || (outer_code == PLUS
		      && CONST_DOUBLE_HIGH (x) == -1
		      && (unsigned HOST_WIDE_INT)-CONST_DOUBLE_LOW (x) < 64))
		 ? 2 : 5;
      return true;

    case POST_INC:
      *total = 2;
      return true;		/* Implies register operand.  */

    case PRE_DEC:
      *total = 3;
      return true;		/* Implies register operand.  */

    case MULT:
      switch (mode)
	{
	case DFmode:
	  *total = 16;		/* 4 on VAX 9000 */
	  break;
	case SFmode:
	  *total = 9;		/* 4 on VAX 9000, 12 on VAX 2 */
	  break;
	case DImode:
	  *total = 16;		/* 6 on VAX 9000, 28 on VAX 2 */
	  break;
	case SImode:
	case HImode:
	case QImode:
	  *total = 10;		/* 3-4 on VAX 9000, 20-28 on VAX 2 */
	  break;
	default:
	  *total = MAX_COST;	/* Mode is not supported.  */
	  return true;
	}
      break;

    case UDIV:
      if (mode != SImode)
	{
	  *total = MAX_COST;	/* Mode is not supported.  */
	  return true;
	}
      *total = 17;
      break;

    case DIV:
      if (mode == DImode)
	*total = 30;		/* Highly variable.  */
      else if (mode == DFmode)
	/* divide takes 28 cycles if the result is not zero, 13 otherwise */
	*total = 24;
      else
	*total = 11;		/* 25 on VAX 2 */
      break;

    case MOD:
      *total = 23;
      break;

    case UMOD:
      if (mode != SImode)
	{
	  *total = MAX_COST;	/* Mode is not supported.  */
	  return true;
	}
      *total = 29;
      break;

    case FLOAT:
      *total = (6		/* 4 on VAX 9000 */
		+ (mode == DFmode) + (GET_MODE (XEXP (x, 0)) != SImode));
      break;

    case FIX:
      *total = 7;		/* 17 on VAX 2 */
      break;

    case ASHIFT:
    case LSHIFTRT:
    case ASHIFTRT:
      if (mode == DImode)
	*total = 12;
      else
	*total = 10;		/* 6 on VAX 9000 */
      break;

    case ROTATE:
    case ROTATERT:
      *total = 6;		/* 5 on VAX 2, 4 on VAX 9000 */
      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
	fmt = "e"; 		/* all constant rotate counts are short */
      break;

    case PLUS:
    case MINUS:
      *total = (mode == DFmode) ? 13 : 8; /* 6/8 on VAX 9000, 16/15 on VAX 2 */
      /* Small integer operands can use subl2 and addl2.  */
      if ((GET_CODE (XEXP (x, 1)) == CONST_INT)
	  && (unsigned HOST_WIDE_INT)(INTVAL (XEXP (x, 1)) + 63) < 127)
	fmt = "e";
      break;

    case IOR:
    case XOR:
      *total = 3;
      break;

    case AND:
      /* AND is special because the first operand is complemented.  */
      *total = 3;
      if (GET_CODE (XEXP (x, 0)) == CONST_INT)
	{
	  if ((unsigned HOST_WIDE_INT)~INTVAL (XEXP (x, 0)) > 63)
	    *total = 4;
	  fmt = "e";
	  i = 1;
	}
      break;

    case NEG:
      if (mode == DFmode)
	*total = 9;
      else if (mode == SFmode)
	*total = 6;
      else if (mode == DImode)
	*total = 4;
      else
	*total = 2;
      break;

    case NOT:
      *total = 2;
      break;

    case ZERO_EXTRACT:
    case SIGN_EXTRACT:
      *total = 15;
      break;

    case MEM:
      if (mode == DImode || mode == DFmode)
	*total = 5;		/* 7 on VAX 2 */
      else
	*total = 3;		/* 4 on VAX 2 */
      x = XEXP (x, 0);
      if (GET_CODE (x) != REG && GET_CODE (x) != POST_INC)
	*total += vax_address_cost_1 (x);
      return true;

    case FLOAT_EXTEND:
    case FLOAT_TRUNCATE:
    case TRUNCATE:
      *total = 3;		/* FIXME: Costs need to be checked  */
      break;

    default:
      return false;
    }

  /* Now look inside the expression.  Operands which are not registers or
     short constants add to the cost.

     FMT and I may have been adjusted in the switch above for instructions
     which require special handling.  */

  while (*fmt++ == 'e')
    {
      rtx op = XEXP (x, i);

      i += 1;
      code = GET_CODE (op);

      /* A NOT is likely to be found as the first operand of an AND
	 (in which case the relevant cost is of the operand inside
	 the not) and not likely to be found anywhere else.  */
      if (code == NOT)
	op = XEXP (op, 0), code = GET_CODE (op);

      switch (code)
	{
	case CONST_INT:
	  if ((unsigned HOST_WIDE_INT)INTVAL (op) > 63
	      && GET_MODE (x) != QImode)
	    *total += 1;	/* 2 on VAX 2 */
	  break;
	case CONST:
	case LABEL_REF:
	case SYMBOL_REF:
	  *total += 1;		/* 2 on VAX 2 */
	  break;
	case CONST_DOUBLE:
	  if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT)
	    {
	      /* Registers are faster than floating point constants -- even
		 those constants which can be encoded in a single byte.  */
	      if (vax_float_literal (op))
		*total += 1;
	      else
		*total += (GET_MODE (x) == DFmode) ? 3 : 2;
	    }
	  else
	    {
	      if (CONST_DOUBLE_HIGH (op) != 0
		  || (unsigned)CONST_DOUBLE_LOW (op) > 63)
		*total += 2;
	    }
	  break;
	case MEM:
	  *total += 1;		/* 2 on VAX 2 */
	  if (GET_CODE (XEXP (op, 0)) != REG)
	    *total += vax_address_cost_1 (XEXP (op, 0));
	  break;
	case REG:
	case SUBREG:
	  break;
	default:
	  *total += 1;
	  break;
	}
    }
  return true;
}
コード例 #14
0
static rtx
may_unswitch_on (basic_block bb, struct loop *loop, rtx *cinsn)
{
  rtx test, at, op[2], stest;
  struct rtx_iv iv;
  unsigned i;
  enum machine_mode mode;

  /* BB must end in a simple conditional jump.  */
  if (EDGE_COUNT (bb->succs) != 2)
    return NULL_RTX;
  if (!any_condjump_p (BB_END (bb)))
    return NULL_RTX;

  /* With branches inside loop.  */
  if (!flow_bb_inside_loop_p (loop, EDGE_SUCC (bb, 0)->dest)
      || !flow_bb_inside_loop_p (loop, EDGE_SUCC (bb, 1)->dest))
    return NULL_RTX;

  /* It must be executed just once each iteration (because otherwise we
     are unable to update dominator/irreducible loop information correctly).  */
  if (!just_once_each_iteration_p (loop, bb))
    return NULL_RTX;

  /* Condition must be invariant.  */
  test = get_condition (BB_END (bb), &at, true, false);
  if (!test)
    return NULL_RTX;

  for (i = 0; i < 2; i++)
    {
      op[i] = XEXP (test, i);

      if (CONSTANT_P (op[i]))
	continue;

      if (!iv_analyze (at, op[i], &iv))
	return NULL_RTX;
      if (iv.step != const0_rtx
	  || iv.first_special)
	return NULL_RTX;

      op[i] = get_iv_value (&iv, const0_rtx);
    }

  mode = GET_MODE (op[0]);
  if (mode == VOIDmode)
    mode = GET_MODE (op[1]);
  if (GET_MODE_CLASS (mode) == MODE_CC)
    {
      if (at != BB_END (bb))
	return NULL_RTX;

      if (!rtx_equal_p (op[0], XEXP (test, 0))
	  || !rtx_equal_p (op[1], XEXP (test, 1)))
	return NULL_RTX;

      *cinsn = BB_END (bb);
      return test;
    }

  stest = simplify_gen_relational (GET_CODE (test), SImode,
				   mode, op[0], op[1]);
  if (stest == const0_rtx
      || stest == const_true_rtx)
    return stest;

  return canon_condition (gen_rtx_fmt_ee (GET_CODE (test), SImode,
					  op[0], op[1]));
}
コード例 #15
0
ファイル: dojump.c プロジェクト: WojciechMigda/gcc
void
do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp,
			 machine_mode mode, rtx size,
			 rtx_code_label *if_false_label,
			 rtx_code_label *if_true_label, int prob)
{
  rtx tem;
  rtx_code_label *dummy_label = NULL;

  /* Reverse the comparison if that is safe and we want to jump if it is
     false.  Also convert to the reverse comparison if the target can
     implement it.  */
  if ((! if_true_label
       || ! can_compare_p (code, mode, ccp_jump))
      && (! FLOAT_MODE_P (mode)
	  || code == ORDERED || code == UNORDERED
	  || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
	  || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
    {
      enum rtx_code rcode;
      if (FLOAT_MODE_P (mode))
        rcode = reverse_condition_maybe_unordered (code);
      else
        rcode = reverse_condition (code);

      /* Canonicalize to UNORDERED for the libcall.  */
      if (can_compare_p (rcode, mode, ccp_jump)
	  || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
	{
	  std::swap (if_true_label, if_false_label);
	  code = rcode;
	  prob = inv (prob);
	}
    }

  /* If one operand is constant, make it the second one.  Only do this
     if the other operand is not constant as well.  */

  if (swap_commutative_operands_p (op0, op1))
    {
      std::swap (op0, op1);
      code = swap_condition (code);
    }

  do_pending_stack_adjust ();

  code = unsignedp ? unsigned_condition (code) : code;
  if (0 != (tem = simplify_relational_operation (code, mode, VOIDmode,
						 op0, op1)))
    {
      if (CONSTANT_P (tem))
	{
	  rtx_code_label *label = (tem == const0_rtx
				   || tem == CONST0_RTX (mode))
					? if_false_label : if_true_label;
	  if (label)
	    emit_jump (label);
	  return;
	}

      code = GET_CODE (tem);
      mode = GET_MODE (tem);
      op0 = XEXP (tem, 0);
      op1 = XEXP (tem, 1);
      unsignedp = (code == GTU || code == LTU || code == GEU || code == LEU);
    }

  if (! if_true_label)
    dummy_label = if_true_label = gen_label_rtx ();

  if (GET_MODE_CLASS (mode) == MODE_INT
      && ! can_compare_p (code, mode, ccp_jump))
    {
      switch (code)
	{
	case LTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case LT:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LE:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GT:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GE:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case EQ:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
					 if_true_label, prob);
	  break;

	case NE:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_true_label,
					 if_false_label, inv (prob));
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  else
    {
      if (SCALAR_FLOAT_MODE_P (mode)
	  && ! can_compare_p (code, mode, ccp_jump)
	  && can_compare_p (swap_condition (code), mode, ccp_jump))
	{
	  code = swap_condition (code);
	  std::swap (op0, op1);
	}
      else if (SCALAR_FLOAT_MODE_P (mode)
	       && ! can_compare_p (code, mode, ccp_jump)
	       /* Never split ORDERED and UNORDERED.
		  These must be implemented.  */
	       && (code != ORDERED && code != UNORDERED)
               /* Split a floating-point comparison if
		  we can jump on other conditions...  */
	       && (have_insn_for (COMPARE, mode)
	           /* ... or if there is no libcall for it.  */
	           || code_to_optab (code) == unknown_optab))
        {
	  enum rtx_code first_code;
	  bool and_them = split_comparison (code, mode, &first_code, &code);

	  /* If there are no NaNs, the first comparison should always fall
	     through.  */
	  if (!HONOR_NANS (mode))
	    gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));

	  else
	    {
	      int first_prob = prob;
	      if (first_code == UNORDERED)
		first_prob = REG_BR_PROB_BASE / 100;
	      else if (first_code == ORDERED)
		first_prob = REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100;
	      if (and_them)
		{
		  rtx_code_label *dest_label;
		  /* If we only jump if true, just bypass the second jump.  */
		  if (! if_false_label)
		    {
		      if (! dummy_label)
		        dummy_label = gen_label_rtx ();
		      dest_label = dummy_label;
		    }
		  else
		    dest_label = if_false_label;
                  do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					   size, dest_label, NULL, first_prob);
		}
              else
                do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					 size, NULL, if_true_label, first_prob);
	    }
	}

      emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp,
			       if_true_label, prob);
    }

  if (if_false_label)
    emit_jump (if_false_label);
  if (dummy_label)
    emit_label (dummy_label);
}
コード例 #16
0
ファイル: mode-switching.c プロジェクト: groessler/gcc-6502
static basic_block
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
{
  edge eg;
  edge_iterator ei;
  basic_block pre_exit;

  /* The only non-call predecessor at this stage is a block with a
     fallthrough edge; there can be at most one, but there could be
     none at all, e.g. when exit is called.  */
  pre_exit = 0;
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
    if (eg->flags & EDGE_FALLTHRU)
      {
	basic_block src_bb = eg->src;
	rtx_insn *last_insn;
	rtx ret_reg;

	gcc_assert (!pre_exit);
	/* If this function returns a value at the end, we have to
	   insert the final mode switch before the return value copy
	   to its hard register.  */
	if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
	    && GET_CODE (PATTERN (last_insn)) == USE
	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
	  {
	    int ret_start = REGNO (ret_reg);
	    int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
	    int ret_end = ret_start + nregs;
	    bool short_block = false;
	    bool multi_reg_return = false;
	    bool forced_late_switch = false;
	    rtx_insn *before_return_copy;

	    do
	      {
		rtx_insn *return_copy = PREV_INSN (last_insn);
		rtx return_copy_pat, copy_reg;
		int copy_start, copy_num;
		int j;

		if (NONDEBUG_INSN_P (return_copy))
		  {
		    /* When using SJLJ exceptions, the call to the
		       unregister function is inserted between the
		       clobber of the return value and the copy.
		       We do not want to split the block before this
		       or any other call; if we have not found the
		       copy yet, the copy must have been deleted.  */
		    if (CALL_P (return_copy))
		      {
			short_block = true;
			break;
		      }
		    return_copy_pat = PATTERN (return_copy);
		    switch (GET_CODE (return_copy_pat))
		      {
		      case USE:
			/* Skip USEs of multiple return registers.
			   __builtin_apply pattern is also handled here.  */
			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
			    && (targetm.calls.function_value_regno_p
				(REGNO (XEXP (return_copy_pat, 0)))))
			  {
			    multi_reg_return = true;
			    last_insn = return_copy;
			    continue;
			  }
			break;

		      case ASM_OPERANDS:
			/* Skip barrier insns.  */
			if (!MEM_VOLATILE_P (return_copy_pat))
			  break;

			/* Fall through.  */

		      case ASM_INPUT:
		      case UNSPEC_VOLATILE:
			last_insn = return_copy;
			continue;

		      default:
			break;
		      }

		    /* If the return register is not (in its entirety)
		       likely spilled, the return copy might be
		       partially or completely optimized away.  */
		    return_copy_pat = single_set (return_copy);
		    if (!return_copy_pat)
		      {
			return_copy_pat = PATTERN (return_copy);
			if (GET_CODE (return_copy_pat) != CLOBBER)
			  break;
			else if (!optimize)
			  {
			    /* This might be (clobber (reg [<result>]))
			       when not optimizing.  Then check if
			       the previous insn is the clobber for
			       the return register.  */
			    copy_reg = SET_DEST (return_copy_pat);
			    if (GET_CODE (copy_reg) == REG
				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
			      {
				if (INSN_P (PREV_INSN (return_copy)))
				  {
				    return_copy = PREV_INSN (return_copy);
				    return_copy_pat = PATTERN (return_copy);
				    if (GET_CODE (return_copy_pat) != CLOBBER)
				      break;
				  }
			      }
			  }
		      }
		    copy_reg = SET_DEST (return_copy_pat);
		    if (GET_CODE (copy_reg) == REG)
		      copy_start = REGNO (copy_reg);
		    else if (GET_CODE (copy_reg) == SUBREG
			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
		      copy_start = REGNO (SUBREG_REG (copy_reg));
		    else
		      {
			/* When control reaches end of non-void function,
			   there are no return copy insns at all.  This
			   avoids an ice on that invalid function.  */
			if (ret_start + nregs == ret_end)
			  short_block = true;
			break;
		      }
		    if (!targetm.calls.function_value_regno_p (copy_start))
		      copy_num = 0;
		    else
		      copy_num
			= hard_regno_nregs[copy_start][GET_MODE (copy_reg)];

		    /* If the return register is not likely spilled, - as is
		       the case for floating point on SH4 - then it might
		       be set by an arithmetic operation that needs a
		       different mode than the exit block.  */
		    for (j = n_entities - 1; j >= 0; j--)
		      {
			int e = entity_map[j];
			int mode =
			  targetm.mode_switching.needed (e, return_copy);

			if (mode != num_modes[e]
			    && mode != targetm.mode_switching.exit (e))
			  break;
		      }
		    if (j >= 0)
		      {
			/* __builtin_return emits a sequence of loads to all
			   return registers.  One of them might require
			   another mode than MODE_EXIT, even if it is
			   unrelated to the return value, so we want to put
			   the final mode switch after it.  */
			if (multi_reg_return
			    && targetm.calls.function_value_regno_p
			        (copy_start))
			  forced_late_switch = true;

			/* For the SH4, floating point loads depend on fpscr,
			   thus we might need to put the final mode switch
			   after the return value copy.  That is still OK,
			   because a floating point return value does not
			   conflict with address reloads.  */
			if (copy_start >= ret_start
			    && copy_start + copy_num <= ret_end
			    && OBJECT_P (SET_SRC (return_copy_pat)))
			  forced_late_switch = true;
			break;
		      }
		    if (copy_num == 0)
		      {
			last_insn = return_copy;
			continue;
		      }

		    if (copy_start >= ret_start
			&& copy_start + copy_num <= ret_end)
		      nregs -= copy_num;
		    else if (!multi_reg_return
			     || !targetm.calls.function_value_regno_p
				 (copy_start))
		      break;
		    last_insn = return_copy;
		  }
		/* ??? Exception handling can lead to the return value
		   copy being already separated from the return value use,
		   as in  unwind-dw2.c .
		   Similarly, conditionally returning without a value,
		   and conditionally using builtin_return can lead to an
		   isolated use.  */
		if (return_copy == BB_HEAD (src_bb))
		  {
		    short_block = true;
		    break;
		  }
		last_insn = return_copy;
	      }
	    while (nregs);

	    /* If we didn't see a full return value copy, verify that there
	       is a plausible reason for this.  If some, but not all of the
	       return register is likely spilled, we can expect that there
	       is a copy for the likely spilled part.  */
	    gcc_assert (!nregs
			|| forced_late_switch
			|| short_block
			|| !(targetm.class_likely_spilled_p
			     (REGNO_REG_CLASS (ret_start)))
			|| (nregs
			    != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
			/* For multi-hard-register floating point
		   	   values, sometimes the likely-spilled part
		   	   is ordinarily copied first, then the other
		   	   part is set with an arithmetic operation.
		   	   This doesn't actually cause reload
		   	   failures, so let it pass.  */
			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
			    && nregs != 1));

	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
	      {
		before_return_copy
		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
		/* Instructions preceding LAST_INSN in the same block might
		   require a different mode than MODE_EXIT, so if we might
		   have such instructions, keep them in a separate block
		   from pre_exit.  */
		src_bb = split_block (src_bb,
				      PREV_INSN (before_return_copy))->dest;
	      }
	    else
	      before_return_copy = last_insn;
	    pre_exit = split_block (src_bb, before_return_copy)->src;
	  }
	else
	  {
	    pre_exit = split_edge (eg);
	  }
      }

  return pre_exit;
}
コード例 #17
0
ファイル: dojump.c プロジェクト: Fokycnuk/gcc
void
do_jump (tree exp, rtx if_false_label, rtx if_true_label)
{
  enum tree_code code = TREE_CODE (exp);
  /* Some cases need to create a label to jump to
     in order to properly fall through.
     These cases set DROP_THROUGH_LABEL nonzero.  */
  rtx drop_through_label = 0;
  rtx temp;
  int i;
  tree type;
  enum machine_mode mode;

  emit_queue ();

  switch (code)
    {
    case ERROR_MARK:
      break;

    case INTEGER_CST:
      temp = integer_zerop (exp) ? if_false_label : if_true_label;
      if (temp)
        emit_jump (temp);
      break;

#if 0
      /* This is not true with #pragma weak  */
    case ADDR_EXPR:
      /* The address of something can never be zero.  */
      if (if_true_label)
        emit_jump (if_true_label);
      break;
#endif

    case UNSAVE_EXPR:
      do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
      TREE_OPERAND (exp, 0)
	= (*lang_hooks.unsave_expr_now) (TREE_OPERAND (exp, 0));
      break;

    case NOP_EXPR:
      if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF)
        goto normal;
    case CONVERT_EXPR:
      /* If we are narrowing the operand, we have to do the compare in the
         narrower mode.  */
      if ((TYPE_PRECISION (TREE_TYPE (exp))
           < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
        goto normal;
    case NON_LVALUE_EXPR:
    case REFERENCE_EXPR:
    case ABS_EXPR:
    case NEGATE_EXPR:
    case LROTATE_EXPR:
    case RROTATE_EXPR:
      /* These cannot change zero->nonzero or vice versa.  */
      do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
      break;

    case WITH_RECORD_EXPR:
      /* Put the object on the placeholder list, recurse through our first
         operand, and pop the list.  */
      placeholder_list = tree_cons (TREE_OPERAND (exp, 1), NULL_TREE,
                                    placeholder_list);
      do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
      placeholder_list = TREE_CHAIN (placeholder_list);
      break;

#if 0
      /* This is never less insns than evaluating the PLUS_EXPR followed by
         a test and can be longer if the test is eliminated.  */
    case PLUS_EXPR:
      /* Reduce to minus.  */
      exp = build (MINUS_EXPR, TREE_TYPE (exp),
                   TREE_OPERAND (exp, 0),
                   fold (build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (exp, 1)),
                                 TREE_OPERAND (exp, 1))));
      /* Process as MINUS.  */
#endif

    case MINUS_EXPR:
      /* Nonzero iff operands of minus differ.  */
      do_compare_and_jump (build (NE_EXPR, TREE_TYPE (exp),
                                  TREE_OPERAND (exp, 0),
                                  TREE_OPERAND (exp, 1)),
                           NE, NE, if_false_label, if_true_label);
      break;

    case BIT_AND_EXPR:
      /* fold_single_bit_test() converts (X & (1 << C)) into (X >> C) & 1.
	 See if the former is preferred for jump tests and restore it
	 if so.  */
      if (integer_onep (TREE_OPERAND (exp, 1)))
	{
	  tree exp0 = TREE_OPERAND (exp, 0);
	  rtx set_label, clr_label;

	  /* Strip narrowing integral type conversions.  */
	  while ((TREE_CODE (exp0) == NOP_EXPR
		  || TREE_CODE (exp0) == CONVERT_EXPR
		  || TREE_CODE (exp0) == NON_LVALUE_EXPR)
		 && TREE_OPERAND (exp0, 0) != error_mark_node
		 && TYPE_PRECISION (TREE_TYPE (exp0))
		    <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp0, 0))))
	    exp0 = TREE_OPERAND (exp0, 0);

	  /* "exp0 ^ 1" inverts the sense of the single bit test.  */
	  if (TREE_CODE (exp0) == BIT_XOR_EXPR
	      && integer_onep (TREE_OPERAND (exp0, 1)))
	    {
	      exp0 = TREE_OPERAND (exp0, 0);
	      clr_label = if_true_label;
	      set_label = if_false_label;
	    }
	  else
	    {
	      clr_label = if_false_label;
	      set_label = if_true_label;
	    }

	  if (TREE_CODE (exp0) == RSHIFT_EXPR)
	    {
	      tree arg = TREE_OPERAND (exp0, 0);
	      tree shift = TREE_OPERAND (exp0, 1);
	      tree argtype = TREE_TYPE (arg);
	      if (TREE_CODE (shift) == INTEGER_CST
		  && compare_tree_int (shift, 0) >= 0
		  && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0
		  && prefer_and_bit_test (TYPE_MODE (argtype),
					  TREE_INT_CST_LOW (shift)))
		{
		  HOST_WIDE_INT mask = (HOST_WIDE_INT) 1
				       << TREE_INT_CST_LOW (shift);
		  tree t = build_int_2 (mask, 0);
		  TREE_TYPE (t) = argtype;
		  do_jump (build (BIT_AND_EXPR, argtype, arg, t),
			   clr_label, set_label);
		  break;
		}
	    }
	}

      /* If we are AND'ing with a small constant, do this comparison in the
         smallest type that fits.  If the machine doesn't have comparisons
         that small, it will be converted back to the wider comparison.
         This helps if we are testing the sign bit of a narrower object.
         combine can't do this for us because it can't know whether a
         ZERO_EXTRACT or a compare in a smaller mode exists, but we do.  */

      if (! SLOW_BYTE_ACCESS
          && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
          && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
          && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
          && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
          && (type = (*lang_hooks.types.type_for_mode) (mode, 1)) != 0
          && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
          && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
              != CODE_FOR_nothing))
        {
          do_jump (convert (type, exp), if_false_label, if_true_label);
          break;
        }
      goto normal;

    case TRUTH_NOT_EXPR:
      do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
      break;

    case TRUTH_ANDIF_EXPR:
      if (if_false_label == 0)
        if_false_label = drop_through_label = gen_label_rtx ();
      do_jump (TREE_OPERAND (exp, 0), if_false_label, NULL_RTX);
      start_cleanup_deferral ();
      do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
      end_cleanup_deferral ();
      break;

    case TRUTH_ORIF_EXPR:
      if (if_true_label == 0)
        if_true_label = drop_through_label = gen_label_rtx ();
      do_jump (TREE_OPERAND (exp, 0), NULL_RTX, if_true_label);
      start_cleanup_deferral ();
      do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
      end_cleanup_deferral ();
      break;

    case COMPOUND_EXPR:
      push_temp_slots ();
      expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
      preserve_temp_slots (NULL_RTX);
      free_temp_slots ();
      pop_temp_slots ();
      emit_queue ();
      do_pending_stack_adjust ();
      do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
      break;

    case COMPONENT_REF:
    case BIT_FIELD_REF:
    case ARRAY_REF:
    case ARRAY_RANGE_REF:
      {
        HOST_WIDE_INT bitsize, bitpos;
        int unsignedp;
        enum machine_mode mode;
        tree type;
        tree offset;
        int volatilep = 0;

        /* Get description of this reference.  We don't actually care
           about the underlying object here.  */
        get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode,
                             &unsignedp, &volatilep);

        type = (*lang_hooks.types.type_for_size) (bitsize, unsignedp);
        if (! SLOW_BYTE_ACCESS
            && type != 0 && bitsize >= 0
            && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
            && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
          != CODE_FOR_nothing))
          {
            do_jump (convert (type, exp), if_false_label, if_true_label);
            break;
          }
        goto normal;
      }

    case COND_EXPR:
      /* Do (a ? 1 : 0) and (a ? 0 : 1) as special cases.  */
      if (integer_onep (TREE_OPERAND (exp, 1))
          && integer_zerop (TREE_OPERAND (exp, 2)))
        do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);

      else if (integer_zerop (TREE_OPERAND (exp, 1))
               && integer_onep (TREE_OPERAND (exp, 2)))
        do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);

      else
      {
        rtx label1 = gen_label_rtx ();
        drop_through_label = gen_label_rtx ();

        do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX);

        start_cleanup_deferral ();
        /* Now the THEN-expression.  */
        do_jump (TREE_OPERAND (exp, 1),
                 if_false_label ? if_false_label : drop_through_label,
                 if_true_label ? if_true_label : drop_through_label);
        /* In case the do_jump just above never jumps.  */
        do_pending_stack_adjust ();
        emit_label (label1);

        /* Now the ELSE-expression.  */
        do_jump (TREE_OPERAND (exp, 2),
           if_false_label ? if_false_label : drop_through_label,
           if_true_label ? if_true_label : drop_through_label);
        end_cleanup_deferral ();
      }
      break;

    case EQ_EXPR:
      {
        tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));

        if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT
            || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT)
          {
            tree exp0 = save_expr (TREE_OPERAND (exp, 0));
            tree exp1 = save_expr (TREE_OPERAND (exp, 1));
            do_jump
              (fold
               (build (TRUTH_ANDIF_EXPR, TREE_TYPE (exp),
                 fold (build (EQ_EXPR, TREE_TYPE (exp),
                  fold (build1 (REALPART_EXPR,
                    TREE_TYPE (inner_type),
                    exp0)),
                  fold (build1 (REALPART_EXPR,
                    TREE_TYPE (inner_type),
                    exp1)))),
                 fold (build (EQ_EXPR, TREE_TYPE (exp),
                  fold (build1 (IMAGPART_EXPR,
                    TREE_TYPE (inner_type),
                    exp0)),
                  fold (build1 (IMAGPART_EXPR,
                    TREE_TYPE (inner_type),
                    exp1)))))),
               if_false_label, if_true_label);
          }

        else if (integer_zerop (TREE_OPERAND (exp, 1)))
          do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);

        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
                 && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
          do_jump_by_parts_equality (exp, if_false_label, if_true_label);
        else
          do_compare_and_jump (exp, EQ, EQ, if_false_label, if_true_label);
        break;
      }

    case NE_EXPR:
      {
        tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));

        if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT
            || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT)
          {
            tree exp0 = save_expr (TREE_OPERAND (exp, 0));
            tree exp1 = save_expr (TREE_OPERAND (exp, 1));
            do_jump
              (fold
               (build (TRUTH_ORIF_EXPR, TREE_TYPE (exp),
                 fold (build (NE_EXPR, TREE_TYPE (exp),
                  fold (build1 (REALPART_EXPR,
                    TREE_TYPE (inner_type),
                    exp0)),
                  fold (build1 (REALPART_EXPR,
                    TREE_TYPE (inner_type),
                    exp1)))),
                 fold (build (NE_EXPR, TREE_TYPE (exp),
                    fold (build1 (IMAGPART_EXPR,
                      TREE_TYPE (inner_type),
                      exp0)),
                    fold (build1 (IMAGPART_EXPR,
                      TREE_TYPE (inner_type),
                      exp1)))))),
               if_false_label, if_true_label);
          }

        else if (integer_zerop (TREE_OPERAND (exp, 1)))
          do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);

        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
           && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
          do_jump_by_parts_equality (exp, if_true_label, if_false_label);
        else
          do_compare_and_jump (exp, NE, NE, if_false_label, if_true_label);
        break;
      }

    case LT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LT, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label);
      else
        do_compare_and_jump (exp, LT, LTU, if_false_label, if_true_label);
      break;

    case LE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LE, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label);
      else
        do_compare_and_jump (exp, LE, LEU, if_false_label, if_true_label);
      break;

    case GT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GT, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label);
      else
        do_compare_and_jump (exp, GT, GTU, if_false_label, if_true_label);
      break;

    case GE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GE, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label);
      else
        do_compare_and_jump (exp, GE, GEU, if_false_label, if_true_label);
      break;

    case UNORDERED_EXPR:
    case ORDERED_EXPR:
      {
        enum rtx_code cmp, rcmp;
        int do_rev;

        if (code == UNORDERED_EXPR)
          cmp = UNORDERED, rcmp = ORDERED;
        else
          cmp = ORDERED, rcmp = UNORDERED;
        mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));

        do_rev = 0;
        if (! can_compare_p (cmp, mode, ccp_jump)
            && (can_compare_p (rcmp, mode, ccp_jump)
          /* If the target doesn't provide either UNORDERED or ORDERED
             comparisons, canonicalize on UNORDERED for the library.  */
          || rcmp == UNORDERED))
          do_rev = 1;

        if (! do_rev)
          do_compare_and_jump (exp, cmp, cmp, if_false_label, if_true_label);
        else
          do_compare_and_jump (exp, rcmp, rcmp, if_true_label, if_false_label);
      }
      break;

    {
      enum rtx_code rcode1;
      enum tree_code tcode2;

      case UNLT_EXPR:
        rcode1 = UNLT;
        tcode2 = LT_EXPR;
        goto unordered_bcc;
      case UNLE_EXPR:
        rcode1 = UNLE;
        tcode2 = LE_EXPR;
        goto unordered_bcc;
      case UNGT_EXPR:
        rcode1 = UNGT;
        tcode2 = GT_EXPR;
        goto unordered_bcc;
      case UNGE_EXPR:
        rcode1 = UNGE;
        tcode2 = GE_EXPR;
        goto unordered_bcc;
      case UNEQ_EXPR:
        rcode1 = UNEQ;
        tcode2 = EQ_EXPR;
        goto unordered_bcc;

      unordered_bcc:
        mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
        if (can_compare_p (rcode1, mode, ccp_jump))
          do_compare_and_jump (exp, rcode1, rcode1, if_false_label,
                               if_true_label);
        else
          {
            tree op0 = save_expr (TREE_OPERAND (exp, 0));
            tree op1 = save_expr (TREE_OPERAND (exp, 1));
            tree cmp0, cmp1;

            /* If the target doesn't support combined unordered
               compares, decompose into UNORDERED + comparison.  */
            cmp0 = fold (build (UNORDERED_EXPR, TREE_TYPE (exp), op0, op1));
            cmp1 = fold (build (tcode2, TREE_TYPE (exp), op0, op1));
            exp = build (TRUTH_ORIF_EXPR, TREE_TYPE (exp), cmp0, cmp1);
            do_jump (exp, if_false_label, if_true_label);
          }
      }
      break;

      /* Special case:
          __builtin_expect (<test>, 0)	and
          __builtin_expect (<test>, 1)

         We need to do this here, so that <test> is not converted to a SCC
         operation on machines that use condition code registers and COMPARE
         like the PowerPC, and then the jump is done based on whether the SCC
         operation produced a 1 or 0.  */
    case CALL_EXPR:
      /* Check for a built-in function.  */
      {
	tree fndecl = get_callee_fndecl (exp);
	tree arglist = TREE_OPERAND (exp, 1);

	if (fndecl
	    && DECL_BUILT_IN (fndecl)
	    && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
	    && arglist != NULL_TREE
	    && TREE_CHAIN (arglist) != NULL_TREE)
	  {
	    rtx seq = expand_builtin_expect_jump (exp, if_false_label,
						  if_true_label);

	    if (seq != NULL_RTX)
	      {
		emit_insn (seq);
		return;
	      }
	  }
      }
      /* Fall through and generate the normal code.  */

    default:
    normal:
      temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
#if 0
      /* This is not needed any more and causes poor code since it causes
         comparisons and tests from non-SI objects to have different code
         sequences.  */
      /* Copy to register to avoid generating bad insns by cse
         from (set (mem ...) (arithop))  (set (cc0) (mem ...)).  */
      if (!cse_not_expected && GET_CODE (temp) == MEM)
        temp = copy_to_reg (temp);
#endif
      do_pending_stack_adjust ();
      /* Do any postincrements in the expression that was tested.  */
      emit_queue ();

      if (GET_CODE (temp) == CONST_INT
          || (GET_CODE (temp) == CONST_DOUBLE && GET_MODE (temp) == VOIDmode)
          || GET_CODE (temp) == LABEL_REF)
        {
          rtx target = temp == const0_rtx ? if_false_label : if_true_label;
          if (target)
            emit_jump (target);
        }
      else if (GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT
               && ! can_compare_p (NE, GET_MODE (temp), ccp_jump))
        /* Note swapping the labels gives us not-equal.  */
        do_jump_by_parts_equality_rtx (temp, if_true_label, if_false_label);
      else if (GET_MODE (temp) != VOIDmode)
	{
	  /* The RTL optimizers prefer comparisons against pseudos.  */
	  if (GET_CODE (temp) == SUBREG)
	    {
	      /* Compare promoted variables in their promoted mode.  */
	      if (SUBREG_PROMOTED_VAR_P (temp)
		  && GET_CODE (XEXP (temp, 0)) == REG)
		temp = XEXP (temp, 0);
	      else
		temp = copy_to_reg (temp);
	    }
	  do_compare_rtx_and_jump (temp, CONST0_RTX (GET_MODE (temp)),
				   NE, TREE_UNSIGNED (TREE_TYPE (exp)),
				   GET_MODE (temp), NULL_RTX,
				   if_false_label, if_true_label);
	}
      else
        abort ();
    }

  if (drop_through_label)
    {
      /* If do_jump produces code that might be jumped around,
         do any stack adjusts from that code, before the place
         where control merges in.  */
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}