Пример #1
0
rtx
aarch64_crc32_expand_builtin (int fcode, tree exp, rtx target)
{
  rtx pat;
  aarch64_crc_builtin_datum *d
    = &aarch64_crc_builtin_data[fcode - (AARCH64_CRC32_BUILTIN_BASE + 1)];
  enum insn_code icode = d->icode;
  tree arg0 = CALL_EXPR_ARG (exp, 0);
  tree arg1 = CALL_EXPR_ARG (exp, 1);
  rtx op0 = expand_normal (arg0);
  rtx op1 = expand_normal (arg1);
  machine_mode tmode = insn_data[icode].operand[0].mode;
  machine_mode mode0 = insn_data[icode].operand[1].mode;
  machine_mode mode1 = insn_data[icode].operand[2].mode;

  if (! target
      || GET_MODE (target) != tmode
      || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
    target = gen_reg_rtx (tmode);

  gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
	      && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));

  if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
    op0 = copy_to_mode_reg (mode0, op0);
  if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
    op1 = copy_to_mode_reg (mode1, op1);

  pat = GEN_FCN (icode) (target, op0, op1);
  if (!pat)
    return NULL_RTX;

  emit_insn (pat);
  return target;
}
Пример #2
0
/* Function to expand builtin function for
   '[(unspec_volatile [(reg) (imm)])]' pattern.  */
static rtx
nds32_expand_builtin_null_ftype_reg_imm (enum insn_code icode,
					 tree exp, rtx target)
{
  /* Mapping:
       ops[0] <--> value0 <--> arg0
       ops[1] <--> value1 <--> arg1 */
  struct expand_operand ops[2];
  tree arg0, arg1;
  rtx value0, value1;

  /* Grab the incoming arguments and extract its rtx.  */
  arg0 = CALL_EXPR_ARG (exp, 0);
  arg1 = CALL_EXPR_ARG (exp, 1);
  value0 = expand_normal (arg0);
  value1 = expand_normal (arg1);

  /* Create operands.  */
  create_input_operand (&ops[0], value0, TYPE_MODE (TREE_TYPE (arg0)));
  create_input_operand (&ops[1], value1, TYPE_MODE (TREE_TYPE (arg1)));

  /* Emit new instruction.  */
  if (!maybe_expand_insn (icode, 2, ops))
    error ("invalid argument to built-in function");

  return target;
}
Пример #3
0
static void
do_jump_by_parts_equality (tree exp, rtx if_false_label, rtx if_true_label)
{
  rtx op0 = expand_normal (TREE_OPERAND (exp, 0));
  rtx op1 = expand_normal (TREE_OPERAND (exp, 1));
  enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
  do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
                                 if_true_label);
}
Пример #4
0
static void
do_jump_by_parts_equality (tree treeop0, tree treeop1, rtx if_false_label,
			   rtx if_true_label, int prob)
{
  rtx op0 = expand_normal (treeop0);
  rtx op1 = expand_normal (treeop1);
  enum machine_mode mode = TYPE_MODE (TREE_TYPE (treeop0));
  do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
				 if_true_label, prob);
}
Пример #5
0
static void
do_jump_by_parts_greater (tree exp, int swap, rtx if_false_label,
                          rtx if_true_label)
{
  rtx op0 = expand_normal (TREE_OPERAND (exp, swap));
  rtx op1 = expand_normal (TREE_OPERAND (exp, !swap));
  enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
  int unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)));

  do_jump_by_parts_greater_rtx (mode, unsignedp, op0, op1, if_false_label,
                                if_true_label);
}
Пример #6
0
static void
do_jump_by_parts_greater (tree treeop0, tree treeop1, int swap,
			  rtx if_false_label, rtx if_true_label, int prob)
{
  rtx op0 = expand_normal (swap ? treeop1 : treeop0);
  rtx op1 = expand_normal (swap ? treeop0 : treeop1);
  enum machine_mode mode = TYPE_MODE (TREE_TYPE (treeop0));
  int unsignedp = TYPE_UNSIGNED (TREE_TYPE (treeop0));

  do_jump_by_parts_greater_rtx (mode, unsignedp, op0, op1, if_false_label,
				if_true_label, prob);
}
Пример #7
0
/* Expand an AArch64 AdvSIMD builtin(intrinsic).  */
rtx
aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
{
  aarch64_simd_builtin_datum *d =
		&aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
  enum insn_code icode = d->code;
  builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS];
  int num_args = insn_data[d->code].n_operands;
  int is_void = 0;
  int k;

  is_void = !!(d->qualifiers[0] & qualifier_void);

  num_args += is_void;

  for (k = 1; k < num_args; k++)
    {
      /* We have four arrays of data, each indexed in a different fashion.
	 qualifiers - element 0 always describes the function return type.
	 operands - element 0 is either the operand for return value (if
	   the function has a non-void return type) or the operand for the
	   first argument.
	 expr_args - element 0 always holds the first argument.
	 args - element 0 is always used for the return type.  */
      int qualifiers_k = k;
      int operands_k = k - is_void;
      int expr_args_k = k - 1;

      if (d->qualifiers[qualifiers_k] & qualifier_lane_index)
	args[k] = SIMD_ARG_LANE_INDEX;
      else if (d->qualifiers[qualifiers_k] & qualifier_immediate)
	args[k] = SIMD_ARG_CONSTANT;
      else if (d->qualifiers[qualifiers_k] & qualifier_maybe_immediate)
	{
	  rtx arg
	    = expand_normal (CALL_EXPR_ARG (exp,
					    (expr_args_k)));
	  /* Handle constants only if the predicate allows it.  */
	  bool op_const_int_p =
	    (CONST_INT_P (arg)
	     && (*insn_data[icode].operand[operands_k].predicate)
		(arg, insn_data[icode].operand[operands_k].mode));
	  args[k] = op_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG;
	}
      else
	args[k] = SIMD_ARG_COPY_TO_REG;

    }
  args[k] = SIMD_ARG_STOP;

  /* The interface to aarch64_simd_expand_args expects a 0 if
     the function is void, and a 1 if it is not.  */
  return aarch64_simd_expand_args
	  (target, icode, !is_void, exp, &args[1]);
}
Пример #8
0
static void
expand_MASK_STORE (gimple stmt)
{
  struct expand_operand ops[3];
  tree type, lhs, rhs, maskt;
  rtx mem, reg, mask;

  maskt = gimple_call_arg (stmt, 2);
  rhs = gimple_call_arg (stmt, 3);
  type = TREE_TYPE (rhs);
  lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
		     gimple_call_arg (stmt, 1));

  mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
  gcc_assert (MEM_P (mem));
  mask = expand_normal (maskt);
  reg = expand_normal (rhs);
  create_fixed_operand (&ops[0], mem);
  create_input_operand (&ops[1], reg, TYPE_MODE (type));
  create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
  expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
}
Пример #9
0
/* Expand an expression EXP that calls a built-in function,
   with result going to TARGET if that's convenient.  */
rtx
aarch64_expand_builtin (tree exp,
		     rtx target,
		     rtx subtarget ATTRIBUTE_UNUSED,
		     machine_mode mode ATTRIBUTE_UNUSED,
		     int ignore ATTRIBUTE_UNUSED)
{
  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
  int fcode = DECL_FUNCTION_CODE (fndecl);
  int icode;
  rtx pat, op0;
  tree arg0;

  switch (fcode)
    {
    case AARCH64_BUILTIN_GET_FPCR:
    case AARCH64_BUILTIN_SET_FPCR:
    case AARCH64_BUILTIN_GET_FPSR:
    case AARCH64_BUILTIN_SET_FPSR:
      if ((fcode == AARCH64_BUILTIN_GET_FPCR)
	  || (fcode == AARCH64_BUILTIN_GET_FPSR))
	{
	  icode = (fcode == AARCH64_BUILTIN_GET_FPSR) ?
	    CODE_FOR_get_fpsr : CODE_FOR_get_fpcr;
	  target = gen_reg_rtx (SImode);
	  pat = GEN_FCN (icode) (target);
	}
      else
	{
	  target = NULL_RTX;
	  icode = (fcode == AARCH64_BUILTIN_SET_FPSR) ?
	    CODE_FOR_set_fpsr : CODE_FOR_set_fpcr;
	  arg0 = CALL_EXPR_ARG (exp, 0);
	  op0 = expand_normal (arg0);
	  pat = GEN_FCN (icode) (op0);
	}
      emit_insn (pat);
      return target;
    }

  if (fcode >= AARCH64_SIMD_BUILTIN_BASE && fcode <= AARCH64_SIMD_BUILTIN_MAX)
    return aarch64_simd_expand_builtin (fcode, exp, target);
  else if (fcode >= AARCH64_CRC32_BUILTIN_BASE && fcode <= AARCH64_CRC32_BUILTIN_MAX)
    return aarch64_crc32_expand_builtin (fcode, exp, target);

  gcc_unreachable ();
}
Пример #10
0
static void
expand_STORE_LANES (gimple stmt)
{
  struct expand_operand ops[2];
  tree type, lhs, rhs;
  rtx target, reg;

  lhs = gimple_call_lhs (stmt);
  rhs = gimple_call_arg (stmt, 0);
  type = TREE_TYPE (rhs);

  target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
  reg = expand_normal (rhs);

  gcc_assert (MEM_P (target));
  PUT_MODE (target, TYPE_MODE (type));

  create_fixed_operand (&ops[0], target);
  create_input_operand (&ops[1], reg, TYPE_MODE (type));
  expand_insn (get_multi_vector_move (type, vec_store_lanes_optab), 2, ops);
}
Пример #11
0
static void
expand_MASK_LOAD (gimple stmt)
{
  struct expand_operand ops[3];
  tree type, lhs, rhs, maskt;
  rtx mem, target, mask;

  maskt = gimple_call_arg (stmt, 2);
  lhs = gimple_call_lhs (stmt);
  if (lhs == NULL_TREE)
    return;
  type = TREE_TYPE (lhs);
  rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
		     gimple_call_arg (stmt, 1));

  mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
  gcc_assert (MEM_P (mem));
  mask = expand_normal (maskt);
  target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
  create_output_operand (&ops[0], target, TYPE_MODE (type));
  create_fixed_operand (&ops[1], mem);
  create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
  expand_insn (optab_handler (maskload_optab, TYPE_MODE (type)), 3, ops);
}
Пример #12
0
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
			  tree exp, builtin_simd_arg *args)
{
  rtx pat;
  tree arg[SIMD_MAX_BUILTIN_ARGS];
  rtx op[SIMD_MAX_BUILTIN_ARGS];
  machine_mode tmode = insn_data[icode].operand[0].mode;
  machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
  int argc = 0;

  if (have_retval
      && (!target
	  || GET_MODE (target) != tmode
	  || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
    target = gen_reg_rtx (tmode);

  for (;;)
    {
      builtin_simd_arg thisarg = args[argc];

      if (thisarg == SIMD_ARG_STOP)
	break;
      else
	{
	  arg[argc] = CALL_EXPR_ARG (exp, argc);
	  op[argc] = expand_normal (arg[argc]);
	  mode[argc] = insn_data[icode].operand[argc + have_retval].mode;

	  switch (thisarg)
	    {
	    case SIMD_ARG_COPY_TO_REG:
	      if (POINTER_TYPE_P (TREE_TYPE (arg[argc])))
		op[argc] = convert_memory_address (Pmode, op[argc]);
	      /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
	      if (!(*insn_data[icode].operand[argc + have_retval].predicate)
		  (op[argc], mode[argc]))
		op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
	      break;

	    case SIMD_ARG_CONSTANT:
	      if (!(*insn_data[icode].operand[argc + have_retval].predicate)
		  (op[argc], mode[argc]))
	      {
		error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, "
		       "expected %<const int%>", argc + 1);
		return const0_rtx;
	      }
	      break;

	    case SIMD_ARG_STOP:
	      gcc_unreachable ();
	    }

	  argc++;
	}
    }

  if (have_retval)
    switch (argc)
      {
      case 1:
	pat = GEN_FCN (icode) (target, op[0]);
	break;

      case 2:
	pat = GEN_FCN (icode) (target, op[0], op[1]);
	break;

      case 3:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
	break;

      case 4:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
	break;

      case 5:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
	break;

      default:
	gcc_unreachable ();
      }
  else
    switch (argc)
      {
      case 1:
	pat = GEN_FCN (icode) (op[0]);
	break;

      case 2:
	pat = GEN_FCN (icode) (op[0], op[1]);
	break;

      case 3:
	pat = GEN_FCN (icode) (op[0], op[1], op[2]);
	break;

      case 4:
	pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
	break;

      case 5:
	pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
	break;

      default:
	gcc_unreachable ();
      }

  if (!pat)
    return NULL_RTX;

  emit_insn (pat);

  return target;
}
Пример #13
0
void
ubsan_expand_si_overflow_mul_check (gimple stmt)
{
  rtx res, op0, op1;
  tree lhs, fn, arg0, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg0 = gimple_call_arg (stmt, 0);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();

  do_pending_stack_adjust ();
  op0 = expand_normal (arg0);
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode = optab_handler (mulv4_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[4];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op0, mode);
      create_input_operand (&ops[2], op1, mode);
      create_fixed_operand (&ops[3], do_error);
      if (maybe_expand_insn (icode, 4, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      struct separate_ops ops;
      machine_mode hmode
	= mode_for_size (GET_MODE_PRECISION (mode) / 2, MODE_INT, 1);
      ops.op0 = arg0;
      ops.op1 = arg1;
      ops.op2 = NULL_TREE;
      ops.location = gimple_location (stmt);
      if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
	  && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
	{
	  machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type
	    = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), 0);

	  res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
	  rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res,
				     GET_MODE_PRECISION (mode), NULL_RTX, 0);
	  hipart = gen_lowpart (mode, hipart);
	  res = gen_lowpart (mode, res);
	  rtx signbit = expand_shift (RSHIFT_EXPR, mode, res,
				      GET_MODE_PRECISION (mode) - 1,
				      NULL_RTX, 0);
	  /* RES is low half of the double width result, HIPART
	     the high half.  There was overflow if
	     HIPART is different from RES < 0 ? -1 : 0.  */
	  emit_cmp_and_jump_insns (signbit, hipart, EQ, NULL_RTX, mode,
				   false, done_label, PROB_VERY_LIKELY);
	}
      else if (hmode != BLKmode
	       && 2 * GET_MODE_PRECISION (hmode) == GET_MODE_PRECISION (mode))
	{
	  rtx_code_label *large_op0 = gen_label_rtx ();
	  rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
	  rtx_code_label *one_small_one_large = gen_label_rtx ();
	  rtx_code_label *both_ops_large = gen_label_rtx ();
	  rtx_code_label *after_hipart_neg = gen_label_rtx ();
	  rtx_code_label *after_lopart_neg = gen_label_rtx ();
	  rtx_code_label *do_overflow = gen_label_rtx ();
	  rtx_code_label *hipart_different = gen_label_rtx ();

	  unsigned int hprec = GET_MODE_PRECISION (hmode);
	  rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
				      NULL_RTX, 0);
	  hipart0 = gen_lowpart (hmode, hipart0);
	  rtx lopart0 = gen_lowpart (hmode, op0);
	  rtx signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
				       NULL_RTX, 0);
	  rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
				      NULL_RTX, 0);
	  hipart1 = gen_lowpart (hmode, hipart1);
	  rtx lopart1 = gen_lowpart (hmode, op1);
	  rtx signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
				       NULL_RTX, 0);

	  res = gen_reg_rtx (mode);

	  /* True if op0 resp. op1 are known to be in the range of
	     halfstype.  */
	  bool op0_small_p = false;
	  bool op1_small_p = false;
	  /* True if op0 resp. op1 are known to have all zeros or all ones
	     in the upper half of bits, but are not known to be
	     op{0,1}_small_p.  */
	  bool op0_medium_p = false;
	  bool op1_medium_p = false;
	  /* -1 if op{0,1} is known to be negative, 0 if it is known to be
	     nonnegative, 1 if unknown.  */
	  int op0_sign = 1;
	  int op1_sign = 1;

	  if (TREE_CODE (arg0) == SSA_NAME)
	    {
	      wide_int arg0_min, arg0_max;
	      if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg0_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg0_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op0_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op0_medium_p = true;
		  if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = 0;
		  else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = -1;
		}
	    }
	  if (TREE_CODE (arg1) == SSA_NAME)
	    {
	      wide_int arg1_min, arg1_max;
	      if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg1_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg1_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op1_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op1_medium_p = true;
		  if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = 0;
		  else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = -1;
		}
	    }

	  int smaller_sign = 1;
	  int larger_sign = 1;
	  if (op0_small_p)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op1_sign;
	    }
	  else if (op1_small_p)
	    {
	      smaller_sign = op1_sign;
	      larger_sign = op0_sign;
	    }
	  else if (op0_sign == op1_sign)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op0_sign;
	    }

	  if (!op0_small_p)
	    emit_cmp_and_jump_insns (signbit0, hipart0, NE, NULL_RTX, hmode,
				     false, large_op0, PROB_UNLIKELY);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, small_op0_large_op1,
				     PROB_UNLIKELY);

	  /* If both op0 and op1 are sign extended from hmode to mode,
	     the multiplication will never overflow.  We can do just one
	     hmode x hmode => mode widening multiplication.  */
	  if (GET_CODE (lopart0) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart0) = 1;
	      SUBREG_PROMOTED_SET (lopart0, 0);
	    }
	  if (GET_CODE (lopart1) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart1) = 1;
	      SUBREG_PROMOTED_SET (lopart1, 0);
	    }
	  tree halfstype = build_nonstandard_integer_type (hprec, 0);
	  ops.op0 = make_tree (halfstype, lopart0);
	  ops.op1 = make_tree (halfstype, lopart1);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  rtx thisres
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, thisres);
	  emit_jump (done_label);

	  emit_label (small_op0_large_op1);

	  /* If op0 is sign extended from hmode to mode, but op1 is not,
	     just swap the arguments and handle it as op1 sign extended,
	     op0 not.  */
	  rtx larger = gen_reg_rtx (mode);
	  rtx hipart = gen_reg_rtx (hmode);
	  rtx lopart = gen_reg_rtx (hmode);
	  emit_move_insn (larger, op1);
	  emit_move_insn (hipart, hipart1);
	  emit_move_insn (lopart, lopart0);
	  emit_jump (one_small_one_large);

	  emit_label (large_op0);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, both_ops_large, PROB_UNLIKELY);

	  /* If op1 is sign extended from hmode to mode, but op0 is not,
	     prepare larger, hipart and lopart pseudos and handle it together
	     with small_op0_large_op1.  */
	  emit_move_insn (larger, op0);
	  emit_move_insn (hipart, hipart0);
	  emit_move_insn (lopart, lopart1);

	  emit_label (one_small_one_large);

	  /* lopart is the low part of the operand that is sign extended
	     to mode, larger is the the other operand, hipart is the
	     high part of larger and lopart0 and lopart1 are the low parts
	     of both operands.
	     We perform lopart0 * lopart1 and lopart * hipart widening
	     multiplications.  */
	  tree halfutype = build_nonstandard_integer_type (hprec, 1);
	  ops.op0 = make_tree (halfutype, lopart0);
	  ops.op1 = make_tree (halfutype, lopart1);
	  rtx lo0xlo1
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);

	  ops.op0 = make_tree (halfutype, lopart);
	  ops.op1 = make_tree (halfutype, hipart);
	  rtx loxhi = gen_reg_rtx (mode);
	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (loxhi, tem);

	  /* if (hipart < 0) loxhi -= lopart << (bitsize / 2);  */
	  if (larger_sign == 0)
	    emit_jump (after_hipart_neg);
	  else if (larger_sign != -1)
	    emit_cmp_and_jump_insns (hipart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_hipart_neg, PROB_EVEN);

	  tem = convert_modes (mode, hmode, lopart, 1);
	  tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_hipart_neg);

	  /* if (lopart < 0) loxhi -= larger;  */
	  if (smaller_sign == 0)
	    emit_jump (after_lopart_neg);
	  else if (smaller_sign != -1)
	    emit_cmp_and_jump_insns (lopart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_lopart_neg, PROB_EVEN);

	  tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_lopart_neg);

	  /* loxhi += (uns) lo0xlo1 >> (bitsize / 2);  */
	  tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  /* if (loxhi >> (bitsize / 2)
		 == (hmode) loxhi >> (bitsize / 2 - 1))  */
	  rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
					  NULL_RTX, 0);
	  hipartloxhi = gen_lowpart (hmode, hipartloxhi);
	  rtx lopartloxhi = gen_lowpart (hmode, loxhi);
	  rtx signbitloxhi = expand_shift (RSHIFT_EXPR, hmode, lopartloxhi,
					   hprec - 1, NULL_RTX, 0);

	  emit_cmp_and_jump_insns (signbitloxhi, hipartloxhi, NE, NULL_RTX,
				   hmode, false, do_overflow,
				   PROB_VERY_UNLIKELY);

	  /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1;  */
	  rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
					   NULL_RTX, 1);
	  tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);

	  tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
				     1, OPTAB_DIRECT);
	  if (tem != res)
	    emit_move_insn (res, tem);
	  emit_jump (done_label);

	  emit_label (both_ops_large);

	  /* If both operands are large (not sign extended from hmode),
	     then perform the full multiplication which will be the result
	     of the operation.  The only cases which don't overflow are
	     some cases where both hipart0 and highpart1 are 0 or -1.  */
	  ops.code = MULT_EXPR;
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);

	  if (!op0_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  if (!op1_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  /* At this point hipart{0,1} are both in [-1, 0].  If they are the
	     same, overflow happened if res is negative, if they are different,
	     overflow happened if res is positive.  */
	  if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
	    emit_jump (hipart_different);
	  else if (op0_sign == 1 || op1_sign == 1)
	    emit_cmp_and_jump_insns (hipart0, hipart1, NE, NULL_RTX, hmode,
				     true, hipart_different, PROB_EVEN);

	  emit_cmp_and_jump_insns (res, const0_rtx, LT, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (hipart_different);

	  emit_cmp_and_jump_insns (res, const0_rtx, GE, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (do_overflow);

	  /* Overflow, do full multiplication and fallthru into do_error.  */
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);
	}
      else
	{
	  ops.code = MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_jump (done_label);
	}
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (MULT_EXPR, gimple_location (stmt),
				     TREE_TYPE (arg0), arg0, arg1);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
Пример #14
0
void
ubsan_expand_si_overflow_neg_check (gimple stmt)
{
  rtx res, op1;
  tree lhs, fn, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();

  do_pending_stack_adjust ();
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode = optab_handler (negv3_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[3];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op1, mode);
      create_fixed_operand (&ops[2], do_error);
      if (maybe_expand_insn (icode, 3, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      /* Compute the operation.  On RTL level, the addition is always
	 unsigned.  */
      res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);

      /* Compare the operand with the most negative value.  */
      rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
      emit_cmp_and_jump_insns (op1, minv, NE, NULL_RTX, mode, false,
			       done_label, PROB_VERY_LIKELY);
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (NEGATE_EXPR, gimple_location (stmt),
				     TREE_TYPE (arg1), arg1, NULL_TREE);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
Пример #15
0
void
ubsan_expand_si_overflow_addsub_check (tree_code code, gimple stmt)
{
  rtx res, op0, op1;
  tree lhs, fn, arg0, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg0 = gimple_call_arg (stmt, 0);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();
  do_pending_stack_adjust ();
  op0 = expand_normal (arg0);
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode
    = optab_handler (code == PLUS_EXPR ? addv4_optab : subv4_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[4];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op0, mode);
      create_input_operand (&ops[2], op1, mode);
      create_fixed_operand (&ops[3], do_error);
      if (maybe_expand_insn (icode, 4, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      rtx_code_label *sub_check = gen_label_rtx ();
      int pos_neg = 3;

      /* Compute the operation.  On RTL level, the addition is always
	 unsigned.  */
      res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);

      /* If we can prove one of the arguments (for MINUS_EXPR only
	 the second operand, as subtraction is not commutative) is always
	 non-negative or always negative, we can do just one comparison
	 and conditional jump instead of 2 at runtime, 3 present in the
	 emitted code.  If one of the arguments is CONST_INT, all we
	 need is to make sure it is op1, then the first
	 emit_cmp_and_jump_insns will be just folded.  Otherwise try
	 to use range info if available.  */
      if (code == PLUS_EXPR && CONST_INT_P (op0))
	{
	  rtx tem = op0;
	  op0 = op1;
	  op1 = tem;
	}
      else if (CONST_INT_P (op1))
	;
      else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
	{
	  wide_int arg0_min, arg0_max;
	  if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
	    {
	      if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
		pos_neg = 1;
	      else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
		pos_neg = 2;
	    }
	  if (pos_neg != 3)
	    {
	      rtx tem = op0;
	      op0 = op1;
	      op1 = tem;
	    }
	}
      if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
	{
	  wide_int arg1_min, arg1_max;
	  if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
	    {
	      if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
		pos_neg = 1;
	      else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
		pos_neg = 2;
	    }
	}

      /* If the op1 is negative, we have to use a different check.  */
      if (pos_neg == 3)
	emit_cmp_and_jump_insns (op1, const0_rtx, LT, NULL_RTX, mode,
				 false, sub_check, PROB_EVEN);

      /* Compare the result of the operation with one of the operands.  */
      if (pos_neg & 1)
	emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? GE : LE,
				 NULL_RTX, mode, false, done_label,
				 PROB_VERY_LIKELY);

      /* If we get here, we have to print the error.  */
      if (pos_neg == 3)
	{
	  emit_jump (do_error);

	  emit_label (sub_check);
	}

      /* We have k = a + b for b < 0 here.  k <= a must hold.  */
      if (pos_neg & 2)
	emit_cmp_and_jump_insns (res, op0, code == PLUS_EXPR ? LE : GE,
				 NULL_RTX, mode, false, done_label,
				 PROB_VERY_LIKELY);
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (code, gimple_location (stmt),
				     TREE_TYPE (arg0), arg0, arg1);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
Пример #16
0
/* Expand an AArch64 AdvSIMD builtin(intrinsic).  */
rtx
aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
{
  if (fcode == AARCH64_SIMD_BUILTIN_LANE_CHECK)
    {
      rtx totalsize = expand_normal (CALL_EXPR_ARG (exp, 0));
      rtx elementsize = expand_normal (CALL_EXPR_ARG (exp, 1));
      if (CONST_INT_P (totalsize) && CONST_INT_P (elementsize)
	  && UINTVAL (elementsize) != 0
	  && UINTVAL (totalsize) != 0)
	{
	  rtx lane_idx = expand_normal (CALL_EXPR_ARG (exp, 2));
          if (CONST_INT_P (lane_idx))
	    aarch64_simd_lane_bounds (lane_idx, 0,
				      UINTVAL (totalsize)
				       / UINTVAL (elementsize),
				      exp);
          else
	    error ("%Klane index must be a constant immediate", exp);
	}
      else
	error ("%Ktotal size and element size must be a non-zero constant immediate", exp);
      /* Don't generate any RTL.  */
      return const0_rtx;
    }
  aarch64_simd_builtin_datum *d =
		&aarch64_simd_builtin_data[fcode - AARCH64_SIMD_PATTERN_START];
  enum insn_code icode = d->code;
  builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS + 1];
  int num_args = insn_data[d->code].n_operands;
  int is_void = 0;
  int k;

  is_void = !!(d->qualifiers[0] & qualifier_void);

  num_args += is_void;

  for (k = 1; k < num_args; k++)
    {
      /* We have four arrays of data, each indexed in a different fashion.
	 qualifiers - element 0 always describes the function return type.
	 operands - element 0 is either the operand for return value (if
	   the function has a non-void return type) or the operand for the
	   first argument.
	 expr_args - element 0 always holds the first argument.
	 args - element 0 is always used for the return type.  */
      int qualifiers_k = k;
      int operands_k = k - is_void;
      int expr_args_k = k - 1;

      if (d->qualifiers[qualifiers_k] & qualifier_lane_index)
	args[k] = SIMD_ARG_LANE_INDEX;
      else if (d->qualifiers[qualifiers_k] & qualifier_immediate)
	args[k] = SIMD_ARG_CONSTANT;
      else if (d->qualifiers[qualifiers_k] & qualifier_maybe_immediate)
	{
	  rtx arg
	    = expand_normal (CALL_EXPR_ARG (exp,
					    (expr_args_k)));
	  /* Handle constants only if the predicate allows it.  */
	  bool op_const_int_p =
	    (CONST_INT_P (arg)
	     && (*insn_data[icode].operand[operands_k].predicate)
		(arg, insn_data[icode].operand[operands_k].mode));
	  args[k] = op_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG;
	}
      else
	args[k] = SIMD_ARG_COPY_TO_REG;

    }
  args[k] = SIMD_ARG_STOP;

  /* The interface to aarch64_simd_expand_args expects a 0 if
     the function is void, and a 1 if it is not.  */
  return aarch64_simd_expand_args
	  (target, icode, !is_void, exp, &args[1]);
}
Пример #17
0
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
			  tree exp, builtin_simd_arg *args)
{
  rtx pat;
  tree arg[SIMD_MAX_BUILTIN_ARGS];
  rtx op[SIMD_MAX_BUILTIN_ARGS];
  machine_mode tmode = insn_data[icode].operand[0].mode;
  machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
  int argc = 0;

  if (have_retval
      && (!target
	  || GET_MODE (target) != tmode
	  || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
    target = gen_reg_rtx (tmode);

  for (;;)
    {
      builtin_simd_arg thisarg = args[argc];

      if (thisarg == SIMD_ARG_STOP)
	break;
      else
	{
	  arg[argc] = CALL_EXPR_ARG (exp, argc);
	  op[argc] = expand_normal (arg[argc]);
	  mode[argc] = insn_data[icode].operand[argc + have_retval].mode;

	  switch (thisarg)
	    {
	    case SIMD_ARG_COPY_TO_REG:
	      if (POINTER_TYPE_P (TREE_TYPE (arg[argc])))
		op[argc] = convert_memory_address (Pmode, op[argc]);
	      /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
	      if (!(*insn_data[icode].operand[argc + have_retval].predicate)
		  (op[argc], mode[argc]))
		op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
	      break;

	    case SIMD_ARG_LANE_INDEX:
	      /* Must be a previous operand into which this is an index.  */
	      gcc_assert (argc > 0);
	      if (CONST_INT_P (op[argc]))
		{
		  enum machine_mode vmode = mode[argc - 1];
		  aarch64_simd_lane_bounds (op[argc],
					    0, GET_MODE_NUNITS (vmode));
		  /* Keep to GCC-vector-extension lane indices in the RTL.  */
		  op[argc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[argc])));
		}
	      /* Fall through - if the lane index isn't a constant then
		 the next case will error.  */
	    case SIMD_ARG_CONSTANT:
	      if (!(*insn_data[icode].operand[argc + have_retval].predicate)
		  (op[argc], mode[argc]))
	      {
		error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, "
		       "expected %<const int%>", argc + 1);
		return const0_rtx;
	      }
	      break;

	    case SIMD_ARG_STOP:
	      gcc_unreachable ();
	    }

	  argc++;
	}
    }

  if (have_retval)
    switch (argc)
      {
      case 1:
	pat = GEN_FCN (icode) (target, op[0]);
	break;

      case 2:
	pat = GEN_FCN (icode) (target, op[0], op[1]);
	break;

      case 3:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
	break;

      case 4:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
	break;

      case 5:
	pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
	break;

      default:
	gcc_unreachable ();
      }
  else
    switch (argc)
      {
      case 1:
	pat = GEN_FCN (icode) (op[0]);
	break;

      case 2:
	pat = GEN_FCN (icode) (op[0], op[1]);
	break;

      case 3:
	pat = GEN_FCN (icode) (op[0], op[1], op[2]);
	break;

      case 4:
	pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
	break;

      case 5:
	pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
	break;

      default:
	gcc_unreachable ();
      }

  if (!pat)
    return NULL_RTX;

  emit_insn (pat);

  return target;
}
Пример #18
0
void
do_jump (tree exp, rtx if_false_label, rtx if_true_label)
{
  enum tree_code code = TREE_CODE (exp);
  rtx temp;
  int i;
  tree type;
  enum machine_mode mode;
  rtx drop_through_label = 0;

  switch (code)
    {
    case ERROR_MARK:
      break;

    case INTEGER_CST:
      temp = integer_zerop (exp) ? if_false_label : if_true_label;
      if (temp)
        emit_jump (temp);
      break;

#if 0
      /* This is not true with #pragma weak  */
    case ADDR_EXPR:
      /* The address of something can never be zero.  */
      if (if_true_label)
        emit_jump (if_true_label);
      break;
#endif

    case NOP_EXPR:
      if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF)
        goto normal;
    case CONVERT_EXPR:
      /* If we are narrowing the operand, we have to do the compare in the
         narrower mode.  */
      if ((TYPE_PRECISION (TREE_TYPE (exp))
           < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
        goto normal;
    case NON_LVALUE_EXPR:
    case ABS_EXPR:
    case NEGATE_EXPR:
    case LROTATE_EXPR:
    case RROTATE_EXPR:
      /* These cannot change zero->nonzero or vice versa.  */
      do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
      break;

    case BIT_AND_EXPR:
      /* fold_single_bit_test() converts (X & (1 << C)) into (X >> C) & 1.
         See if the former is preferred for jump tests and restore it
         if so.  */
      if (integer_onep (TREE_OPERAND (exp, 1)))
        {
          tree exp0 = TREE_OPERAND (exp, 0);
          rtx set_label, clr_label;

          /* Strip narrowing integral type conversions.  */
          while ((TREE_CODE (exp0) == NOP_EXPR
                  || TREE_CODE (exp0) == CONVERT_EXPR
                  || TREE_CODE (exp0) == NON_LVALUE_EXPR)
                 && TREE_OPERAND (exp0, 0) != error_mark_node
                 && TYPE_PRECISION (TREE_TYPE (exp0))
                    <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp0, 0))))
            exp0 = TREE_OPERAND (exp0, 0);

          /* "exp0 ^ 1" inverts the sense of the single bit test.  */
          if (TREE_CODE (exp0) == BIT_XOR_EXPR
              && integer_onep (TREE_OPERAND (exp0, 1)))
            {
              exp0 = TREE_OPERAND (exp0, 0);
              clr_label = if_true_label;
              set_label = if_false_label;
            }
          else
            {
              clr_label = if_false_label;
              set_label = if_true_label;
            }

          if (TREE_CODE (exp0) == RSHIFT_EXPR)
            {
              tree arg = TREE_OPERAND (exp0, 0);
              tree shift = TREE_OPERAND (exp0, 1);
              tree argtype = TREE_TYPE (arg);
              if (TREE_CODE (shift) == INTEGER_CST
                  && compare_tree_int (shift, 0) >= 0
                  && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0
                  && prefer_and_bit_test (TYPE_MODE (argtype),
                                          TREE_INT_CST_LOW (shift)))
                {
                  HOST_WIDE_INT mask = (HOST_WIDE_INT) 1
                                       << TREE_INT_CST_LOW (shift);
                  do_jump (build2 (BIT_AND_EXPR, argtype, arg,
                                   build_int_cst_type (argtype, mask)),
                           clr_label, set_label);
                  break;
                }
            }
        }

      /* If we are AND'ing with a small constant, do this comparison in the
         smallest type that fits.  If the machine doesn't have comparisons
         that small, it will be converted back to the wider comparison.
         This helps if we are testing the sign bit of a narrower object.
         combine can't do this for us because it can't know whether a
         ZERO_EXTRACT or a compare in a smaller mode exists, but we do.  */

      if (! SLOW_BYTE_ACCESS
          && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
          && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
          && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
          && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
          && (type = lang_hooks.types.type_for_mode (mode, 1)) != 0
          && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
          && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
              != CODE_FOR_nothing))
        {
          do_jump (fold_convert (type, exp), if_false_label, if_true_label);
          break;
        }
      goto normal;

    case TRUTH_NOT_EXPR:
      do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
      break;

    case COND_EXPR:
      {
        rtx label1 = gen_label_rtx ();
        if (!if_true_label || !if_false_label)
          {
            drop_through_label = gen_label_rtx ();
            if (!if_true_label)
              if_true_label = drop_through_label;
            if (!if_false_label)
              if_false_label = drop_through_label;
          }

        do_pending_stack_adjust ();
        do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX);
        do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
        emit_label (label1);
        do_jump (TREE_OPERAND (exp, 2), if_false_label, if_true_label);
        break;
      }

    case TRUTH_ANDIF_EXPR:
    case TRUTH_ORIF_EXPR:
    case COMPOUND_EXPR:
      /* Lowered by gimplify.c.  */
      gcc_unreachable ();

    case COMPONENT_REF:
    case BIT_FIELD_REF:
    case ARRAY_REF:
    case ARRAY_RANGE_REF:
      {
        HOST_WIDE_INT bitsize, bitpos;
        int unsignedp;
        enum machine_mode mode;
        tree type;
        tree offset;
        int volatilep = 0;

        /* Get description of this reference.  We don't actually care
           about the underlying object here.  */
        get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode,
                             &unsignedp, &volatilep, false);

        type = lang_hooks.types.type_for_size (bitsize, unsignedp);
        if (! SLOW_BYTE_ACCESS
            && type != 0 && bitsize >= 0
            && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
            && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code
                != CODE_FOR_nothing))
          {
            do_jump (fold_convert (type, exp), if_false_label, if_true_label);
            break;
          }
        goto normal;
      }

    case EQ_EXPR:
      {
        tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_FLOAT);
        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_INT);
        
        if (integer_zerop (TREE_OPERAND (exp, 1)))
          do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
                 && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
          do_jump_by_parts_equality (exp, if_false_label, if_true_label);
        else
          do_compare_and_jump (exp, EQ, EQ, if_false_label, if_true_label);
        break;
      }

    case MINUS_EXPR:
      /* Nonzero iff operands of minus differ.  */
      exp = build2 (NE_EXPR, TREE_TYPE (exp),
                    TREE_OPERAND (exp, 0),
                    TREE_OPERAND (exp, 1));
      /* FALLTHRU */
    case NE_EXPR:
      {
        tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_FLOAT);
        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
                    != MODE_COMPLEX_INT);
        
        if (integer_zerop (TREE_OPERAND (exp, 1)))
          do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
           && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
          do_jump_by_parts_equality (exp, if_true_label, if_false_label);
        else
          do_compare_and_jump (exp, NE, NE, if_false_label, if_true_label);
        break;
      }

    case LT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LT, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label);
      else
        do_compare_and_jump (exp, LT, LTU, if_false_label, if_true_label);
      break;

    case LE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LE, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label);
      else
        do_compare_and_jump (exp, LE, LEU, if_false_label, if_true_label);
      break;

    case GT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GT, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label);
      else
        do_compare_and_jump (exp, GT, GTU, if_false_label, if_true_label);
      break;

    case GE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GE, mode, ccp_jump))
        do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label);
      else
        do_compare_and_jump (exp, GE, GEU, if_false_label, if_true_label);
      break;

    case UNORDERED_EXPR:
    case ORDERED_EXPR:
      {
        enum rtx_code cmp, rcmp;
        int do_rev;

        if (code == UNORDERED_EXPR)
          cmp = UNORDERED, rcmp = ORDERED;
        else
          cmp = ORDERED, rcmp = UNORDERED;
        mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));

        do_rev = 0;
        if (! can_compare_p (cmp, mode, ccp_jump)
            && (can_compare_p (rcmp, mode, ccp_jump)
          /* If the target doesn't provide either UNORDERED or ORDERED
             comparisons, canonicalize on UNORDERED for the library.  */
          || rcmp == UNORDERED))
          do_rev = 1;

        if (! do_rev)
          do_compare_and_jump (exp, cmp, cmp, if_false_label, if_true_label);
        else
          do_compare_and_jump (exp, rcmp, rcmp, if_true_label, if_false_label);
      }
      break;

    {
      enum rtx_code rcode1;
      enum tree_code tcode1, tcode2;

      case UNLT_EXPR:
        rcode1 = UNLT;
        tcode1 = UNORDERED_EXPR;
        tcode2 = LT_EXPR;
        goto unordered_bcc;
      case UNLE_EXPR:
        rcode1 = UNLE;
        tcode1 = UNORDERED_EXPR;
        tcode2 = LE_EXPR;
        goto unordered_bcc;
      case UNGT_EXPR:
        rcode1 = UNGT;
        tcode1 = UNORDERED_EXPR;
        tcode2 = GT_EXPR;
        goto unordered_bcc;
      case UNGE_EXPR:
        rcode1 = UNGE;
        tcode1 = UNORDERED_EXPR;
        tcode2 = GE_EXPR;
        goto unordered_bcc;
      case UNEQ_EXPR:
        rcode1 = UNEQ;
        tcode1 = UNORDERED_EXPR;
        tcode2 = EQ_EXPR;
        goto unordered_bcc;
      case LTGT_EXPR:
        /* It is ok for LTGT_EXPR to trap when the result is unordered,
           so expand to (a < b) || (a > b).  */
        rcode1 = LTGT;
        tcode1 = LT_EXPR;
        tcode2 = GT_EXPR;
        goto unordered_bcc;

      unordered_bcc:
        mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
        if (can_compare_p (rcode1, mode, ccp_jump))
          do_compare_and_jump (exp, rcode1, rcode1, if_false_label,
                               if_true_label);
        else
          {
            tree op0 = save_expr (TREE_OPERAND (exp, 0));
            tree op1 = save_expr (TREE_OPERAND (exp, 1));
            tree cmp0, cmp1;

            /* If the target doesn't support combined unordered
               compares, decompose into two comparisons.  */
            if (if_true_label == 0)
              drop_through_label = if_true_label = gen_label_rtx ();
              
            cmp0 = fold_build2 (tcode1, TREE_TYPE (exp), op0, op1);
            cmp1 = fold_build2 (tcode2, TREE_TYPE (exp), op0, op1);
            do_jump (cmp0, 0, if_true_label);
            do_jump (cmp1, if_false_label, if_true_label);
          }
      }
      break;

    case TRUTH_AND_EXPR:
      /* High branch cost, expand as the bitwise AND of the conditions.
         Do the same if the RHS has side effects, because we're effectively
         turning a TRUTH_AND_EXPR into a TRUTH_ANDIF_EXPR.  */
      if (BRANCH_COST >= 4 || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
        goto normal;

      if (if_false_label == NULL_RTX)
        {
          drop_through_label = gen_label_rtx ();
          do_jump (TREE_OPERAND (exp, 0), drop_through_label, NULL_RTX);
          do_jump (TREE_OPERAND (exp, 1), NULL_RTX, if_true_label);
        }
      else
        {
          do_jump (TREE_OPERAND (exp, 0), if_false_label, NULL_RTX);
          do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
        }
      break;

    case TRUTH_OR_EXPR:
      /* High branch cost, expand as the bitwise OR of the conditions.
         Do the same if the RHS has side effects, because we're effectively
         turning a TRUTH_OR_EXPR into a TRUTH_ORIF_EXPR.  */
      if (BRANCH_COST >= 4 || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
        goto normal;

      if (if_true_label == NULL_RTX)
        {
          drop_through_label = gen_label_rtx ();
          do_jump (TREE_OPERAND (exp, 0), NULL_RTX, drop_through_label);
          do_jump (TREE_OPERAND (exp, 1), if_false_label, NULL_RTX);
        }
      else
        {
          do_jump (TREE_OPERAND (exp, 0), NULL_RTX, if_true_label);
          do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label);
        }
      break;

      /* Special case:
          __builtin_expect (<test>, 0)        and
          __builtin_expect (<test>, 1)

         We need to do this here, so that <test> is not converted to a SCC
         operation on machines that use condition code registers and COMPARE
         like the PowerPC, and then the jump is done based on whether the SCC
         operation produced a 1 or 0.  */
    case CALL_EXPR:
      /* Check for a built-in function.  */
      {
        tree fndecl = get_callee_fndecl (exp);
        tree arglist = TREE_OPERAND (exp, 1);

        if (fndecl
            && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
            && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
            && arglist != NULL_TREE
            && TREE_CHAIN (arglist) != NULL_TREE)
          {
            rtx seq = expand_builtin_expect_jump (exp, if_false_label,
                                                  if_true_label);

            if (seq != NULL_RTX)
              {
                emit_insn (seq);
                return;
              }
          }
      }
 
      /* Fall through and generate the normal code.  */
    default:
    normal:
      temp = expand_normal (exp);
      do_pending_stack_adjust ();
      /* The RTL optimizers prefer comparisons against pseudos.  */
      if (GET_CODE (temp) == SUBREG)
        {
          /* Compare promoted variables in their promoted mode.  */
          if (SUBREG_PROMOTED_VAR_P (temp)
              && REG_P (XEXP (temp, 0)))
            temp = XEXP (temp, 0);
          else
            temp = copy_to_reg (temp);
        }
      do_compare_rtx_and_jump (temp, CONST0_RTX (GET_MODE (temp)),
                               NE, TYPE_UNSIGNED (TREE_TYPE (exp)),
                               GET_MODE (temp), NULL_RTX,
                               if_false_label, if_true_label);
    }

  if (drop_through_label)
    {
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}
Пример #19
0
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
			  tree exp, builtin_simd_arg *args)
{
  rtx pat;
  rtx op[SIMD_MAX_BUILTIN_ARGS + 1]; /* First element for result operand.  */
  int opc = 0;

  if (have_retval)
    {
      machine_mode tmode = insn_data[icode].operand[0].mode;
      if (!target
	  || GET_MODE (target) != tmode
	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
	target = gen_reg_rtx (tmode);
      op[opc++] = target;
    }

  for (;;)
    {
      builtin_simd_arg thisarg = args[opc - have_retval];

      if (thisarg == SIMD_ARG_STOP)
	break;
      else
	{
	  tree arg = CALL_EXPR_ARG (exp, opc - have_retval);
	  enum machine_mode mode = insn_data[icode].operand[opc].mode;
	  op[opc] = expand_normal (arg);

	  switch (thisarg)
	    {
	    case SIMD_ARG_COPY_TO_REG:
	      if (POINTER_TYPE_P (TREE_TYPE (arg)))
		op[opc] = convert_memory_address (Pmode, op[opc]);
	      /*gcc_assert (GET_MODE (op[opc]) == mode); */
	      if (!(*insn_data[icode].operand[opc].predicate)
		  (op[opc], mode))
		op[opc] = copy_to_mode_reg (mode, op[opc]);
	      break;

	    case SIMD_ARG_LANE_INDEX:
	      /* Must be a previous operand into which this is an index.  */
	      gcc_assert (opc > 0);
	      if (CONST_INT_P (op[opc]))
		{
		  machine_mode vmode = insn_data[icode].operand[opc - 1].mode;
		  aarch64_simd_lane_bounds (op[opc],
					    0, GET_MODE_NUNITS (vmode), exp);
		  /* Keep to GCC-vector-extension lane indices in the RTL.  */
		  op[opc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[opc])));
		}
	      /* Fall through - if the lane index isn't a constant then
		 the next case will error.  */
	    case SIMD_ARG_CONSTANT:
	      if (!(*insn_data[icode].operand[opc].predicate)
		  (op[opc], mode))
	      {
		error ("%Kargument %d must be a constant immediate",
		       exp, opc + 1 - have_retval);
		return const0_rtx;
	      }
	      break;

	    case SIMD_ARG_STOP:
	      gcc_unreachable ();
	    }

	  opc++;
	}
    }

  switch (opc)
    {
    case 1:
      pat = GEN_FCN (icode) (op[0]);
      break;

    case 2:
      pat = GEN_FCN (icode) (op[0], op[1]);
      break;

    case 3:
      pat = GEN_FCN (icode) (op[0], op[1], op[2]);
      break;

    case 4:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
      break;

    case 5:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
      break;

    case 6:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
      break;

    default:
      gcc_unreachable ();
    }

  if (!pat)
    return NULL_RTX;

  emit_insn (pat);

  return target;
}
Пример #20
0
static void
do_compare_and_jump (tree treeop0, tree treeop1, enum rtx_code signed_code,
		     enum rtx_code unsigned_code, rtx if_false_label,
		     rtx if_true_label, int prob)
{
  rtx op0, op1;
  tree type;
  enum machine_mode mode;
  int unsignedp;
  enum rtx_code code;

  /* Don't crash if the comparison was erroneous.  */
  op0 = expand_normal (treeop0);
  if (TREE_CODE (treeop0) == ERROR_MARK)
    return;

  op1 = expand_normal (treeop1);
  if (TREE_CODE (treeop1) == ERROR_MARK)
    return;

  type = TREE_TYPE (treeop0);
  mode = TYPE_MODE (type);
  if (TREE_CODE (treeop0) == INTEGER_CST
      && (TREE_CODE (treeop1) != INTEGER_CST
          || (GET_MODE_BITSIZE (mode)
              > GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (treeop1))))))
    {
      /* op0 might have been replaced by promoted constant, in which
         case the type of second argument should be used.  */
      type = TREE_TYPE (treeop1);
      mode = TYPE_MODE (type);
    }
  unsignedp = TYPE_UNSIGNED (type);
  code = unsignedp ? unsigned_code : signed_code;

#ifdef HAVE_canonicalize_funcptr_for_compare
  /* If function pointers need to be "canonicalized" before they can
     be reliably compared, then canonicalize them.
     Only do this if *both* sides of the comparison are function pointers.
     If one side isn't, we want a noncanonicalized comparison.  See PR
     middle-end/17564.  */
  if (HAVE_canonicalize_funcptr_for_compare
      && TREE_CODE (TREE_TYPE (treeop0)) == POINTER_TYPE
      && TREE_CODE (TREE_TYPE (TREE_TYPE (treeop0)))
          == FUNCTION_TYPE
      && TREE_CODE (TREE_TYPE (treeop1)) == POINTER_TYPE
      && TREE_CODE (TREE_TYPE (TREE_TYPE (treeop1)))
          == FUNCTION_TYPE)
    {
      rtx new_op0 = gen_reg_rtx (mode);
      rtx new_op1 = gen_reg_rtx (mode);

      emit_insn (gen_canonicalize_funcptr_for_compare (new_op0, op0));
      op0 = new_op0;

      emit_insn (gen_canonicalize_funcptr_for_compare (new_op1, op1));
      op1 = new_op1;
    }
#endif

  do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode,
                           ((mode == BLKmode)
                            ? expr_size (treeop0) : NULL_RTX),
			   if_false_label, if_true_label, prob);
}
Пример #21
0
void
do_jump (tree exp, rtx if_false_label, rtx if_true_label, int prob)
{
  enum tree_code code = TREE_CODE (exp);
  rtx temp;
  int i;
  tree type;
  enum machine_mode mode;
  rtx drop_through_label = 0;

  switch (code)
    {
    case ERROR_MARK:
      break;

    case INTEGER_CST:
      temp = integer_zerop (exp) ? if_false_label : if_true_label;
      if (temp)
        emit_jump (temp);
      break;

#if 0
      /* This is not true with #pragma weak  */
    case ADDR_EXPR:
      /* The address of something can never be zero.  */
      if (if_true_label)
        emit_jump (if_true_label);
      break;
#endif

    case NOP_EXPR:
      if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF)
        goto normal;
    case CONVERT_EXPR:
      /* If we are narrowing the operand, we have to do the compare in the
         narrower mode.  */
      if ((TYPE_PRECISION (TREE_TYPE (exp))
           < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
        goto normal;
    case NON_LVALUE_EXPR:
    case ABS_EXPR:
    case NEGATE_EXPR:
    case LROTATE_EXPR:
    case RROTATE_EXPR:
      /* These cannot change zero->nonzero or vice versa.  */
      do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label, prob);
      break;

    case TRUTH_NOT_EXPR:
      do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label,
	       inv (prob));
      break;

    case COND_EXPR:
      {
	rtx label1 = gen_label_rtx ();
	if (!if_true_label || !if_false_label)
	  {
	    drop_through_label = gen_label_rtx ();
	    if (!if_true_label)
	      if_true_label = drop_through_label;
	    if (!if_false_label)
	      if_false_label = drop_through_label;
	  }

        do_pending_stack_adjust ();
	do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX, -1);
	do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label, prob);
        emit_label (label1);
	do_jump (TREE_OPERAND (exp, 2), if_false_label, if_true_label, prob);
	break;
      }

    case COMPOUND_EXPR:
      /* Lowered by gimplify.c.  */
      gcc_unreachable ();

    case COMPONENT_REF:
    case BIT_FIELD_REF:
    case ARRAY_REF:
    case ARRAY_RANGE_REF:
      {
        HOST_WIDE_INT bitsize, bitpos;
        int unsignedp;
        enum machine_mode mode;
        tree type;
        tree offset;
        int volatilep = 0;

        /* Get description of this reference.  We don't actually care
           about the underlying object here.  */
        get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode,
                             &unsignedp, &volatilep, false);

        type = lang_hooks.types.type_for_size (bitsize, unsignedp);
        if (! SLOW_BYTE_ACCESS
            && type != 0 && bitsize >= 0
            && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
            && have_insn_for (COMPARE, TYPE_MODE (type)))
          {
	    do_jump (fold_convert (type, exp), if_false_label, if_true_label,
		     prob);
            break;
          }
        goto normal;
      }

    case MINUS_EXPR:
      /* Nonzero iff operands of minus differ.  */
      code = NE_EXPR;

      /* FALLTHRU */
    case EQ_EXPR:
    case NE_EXPR:
    case LT_EXPR:
    case LE_EXPR:
    case GT_EXPR:
    case GE_EXPR:
    case ORDERED_EXPR:
    case UNORDERED_EXPR:
    case UNLT_EXPR:
    case UNLE_EXPR:
    case UNGT_EXPR:
    case UNGE_EXPR:
    case UNEQ_EXPR:
    case LTGT_EXPR:
    case TRUTH_ANDIF_EXPR:
    case TRUTH_ORIF_EXPR:
    other_code:
      do_jump_1 (code, TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1),
		 if_false_label, if_true_label, prob);
      break;

    case BIT_AND_EXPR:
      /* fold_single_bit_test() converts (X & (1 << C)) into (X >> C) & 1.
	 See if the former is preferred for jump tests and restore it
	 if so.  */
      if (integer_onep (TREE_OPERAND (exp, 1)))
	{
	  tree exp0 = TREE_OPERAND (exp, 0);
	  rtx set_label, clr_label;
	  int setclr_prob = prob;

	  /* Strip narrowing integral type conversions.  */
	  while (CONVERT_EXPR_P (exp0)
		 && TREE_OPERAND (exp0, 0) != error_mark_node
		 && TYPE_PRECISION (TREE_TYPE (exp0))
		    <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp0, 0))))
	    exp0 = TREE_OPERAND (exp0, 0);

	  /* "exp0 ^ 1" inverts the sense of the single bit test.  */
	  if (TREE_CODE (exp0) == BIT_XOR_EXPR
	      && integer_onep (TREE_OPERAND (exp0, 1)))
	    {
	      exp0 = TREE_OPERAND (exp0, 0);
	      clr_label = if_true_label;
	      set_label = if_false_label;
	      setclr_prob = inv (prob);
	    }
	  else
	    {
	      clr_label = if_false_label;
	      set_label = if_true_label;
	    }

	  if (TREE_CODE (exp0) == RSHIFT_EXPR)
	    {
	      tree arg = TREE_OPERAND (exp0, 0);
	      tree shift = TREE_OPERAND (exp0, 1);
	      tree argtype = TREE_TYPE (arg);
	      if (TREE_CODE (shift) == INTEGER_CST
		  && compare_tree_int (shift, 0) >= 0
		  && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0
		  && prefer_and_bit_test (TYPE_MODE (argtype),
					  TREE_INT_CST_LOW (shift)))
		{
		  unsigned HOST_WIDE_INT mask
		    = (unsigned HOST_WIDE_INT) 1 << TREE_INT_CST_LOW (shift);
		  do_jump (build2 (BIT_AND_EXPR, argtype, arg,
				   build_int_cstu (argtype, mask)),
			   clr_label, set_label, setclr_prob);
		  break;
		}
	    }
	}

      /* If we are AND'ing with a small constant, do this comparison in the
         smallest type that fits.  If the machine doesn't have comparisons
         that small, it will be converted back to the wider comparison.
         This helps if we are testing the sign bit of a narrower object.
         combine can't do this for us because it can't know whether a
         ZERO_EXTRACT or a compare in a smaller mode exists, but we do.  */

      if (! SLOW_BYTE_ACCESS
          && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
          && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
          && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
          && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
          && (type = lang_hooks.types.type_for_mode (mode, 1)) != 0
          && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
          && have_insn_for (COMPARE, TYPE_MODE (type)))
        {
	  do_jump (fold_convert (type, exp), if_false_label, if_true_label,
		   prob);
          break;
        }

      if (TYPE_PRECISION (TREE_TYPE (exp)) > 1
	  || TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
	goto normal;

      /* Boolean comparisons can be compiled as TRUTH_AND_EXPR.  */

    case TRUTH_AND_EXPR:
      /* High branch cost, expand as the bitwise AND of the conditions.
	 Do the same if the RHS has side effects, because we're effectively
	 turning a TRUTH_AND_EXPR into a TRUTH_ANDIF_EXPR.  */
      if (BRANCH_COST (optimize_insn_for_speed_p (),
		       false) >= 4
	  || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
	goto normal;
      code = TRUTH_ANDIF_EXPR;
      goto other_code;

    case BIT_IOR_EXPR:
    case TRUTH_OR_EXPR:
      /* High branch cost, expand as the bitwise OR of the conditions.
	 Do the same if the RHS has side effects, because we're effectively
	 turning a TRUTH_OR_EXPR into a TRUTH_ORIF_EXPR.  */
      if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 4
	  || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
	goto normal;
      code = TRUTH_ORIF_EXPR;
      goto other_code;

      /* Fall through and generate the normal code.  */
    default:
    normal:
      temp = expand_normal (exp);
      do_pending_stack_adjust ();
      /* The RTL optimizers prefer comparisons against pseudos.  */
      if (GET_CODE (temp) == SUBREG)
	{
	  /* Compare promoted variables in their promoted mode.  */
	  if (SUBREG_PROMOTED_VAR_P (temp)
	      && REG_P (XEXP (temp, 0)))
	    temp = XEXP (temp, 0);
	  else
	    temp = copy_to_reg (temp);
	}
      do_compare_rtx_and_jump (temp, CONST0_RTX (GET_MODE (temp)),
			       NE, TYPE_UNSIGNED (TREE_TYPE (exp)),
			       GET_MODE (temp), NULL_RTX,
			       if_false_label, if_true_label, prob);
    }

  if (drop_through_label)
    {
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}