Пример #1
0
static bool
prefer_and_bit_test (enum machine_mode mode, int bitnum)
{
  bool speed_p;
  wide_int mask = wi::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode));

  if (and_test == 0)
    {
      /* Set up rtxes for the two variations.  Use NULL as a placeholder
	 for the BITNUM-based constants.  */
      and_reg = gen_rtx_REG (mode, FIRST_PSEUDO_REGISTER);
      and_test = gen_rtx_AND (mode, and_reg, NULL);
      shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL),
				const1_rtx);
    }
  else
    {
      /* Change the mode of the previously-created rtxes.  */
      PUT_MODE (and_reg, mode);
      PUT_MODE (and_test, mode);
      PUT_MODE (shift_test, mode);
      PUT_MODE (XEXP (shift_test, 0), mode);
    }

  /* Fill in the integers.  */
  XEXP (and_test, 1) = immed_wide_int_const (mask, mode);
  XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum);

  speed_p = optimize_insn_for_speed_p ();
  return (rtx_cost (and_test, IF_THEN_ELSE, 0, speed_p)
	  <= rtx_cost (shift_test, IF_THEN_ELSE, 0, speed_p));
}
Пример #2
0
enum value_range_type
get_range_info (tree name, double_int *min, double_int *max)
{
  enum value_range_type range_type;
  gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name)));
  gcc_assert (min && max);
  range_info_def *ri = SSA_NAME_RANGE_INFO (name);

  /* Return VR_VARYING for SSA_NAMEs with NULL RANGE_INFO or SSA_NAMEs
     with integral types width > 2 * HOST_BITS_PER_WIDE_INT precision.  */
  if (!ri || (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (name)))
	      > 2 * HOST_BITS_PER_WIDE_INT))
    return VR_VARYING;

  /* If min > max, it is VR_ANTI_RANGE.  */
  if (ri->min.cmp (ri->max, TYPE_UNSIGNED (TREE_TYPE (name))) == 1)
    {
      /* VR_ANTI_RANGE ~[min, max] is encoded as [max + 1, min - 1].  */
      range_type = VR_ANTI_RANGE;
      *min = ri->max + double_int_one;
      *max = ri->min - double_int_one;
    }
  else
  {
    /* Otherwise (when min <= max), it is VR_RANGE.  */
    range_type = VR_RANGE;
    *min = ri->min;
    *max = ri->max;
  }
  return range_type;
}
Пример #3
0
void
fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *f_orig,
		  size_t buf_size)
{
  REAL_VALUE_TYPE real_value, base_value, fixed_value;

  signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode) ? UNSIGNED : SIGNED;
  real_2expN (&base_value, GET_MODE_FBIT (f_orig->mode), VOIDmode);
  real_from_integer (&real_value, VOIDmode,
		     wide_int::from (f_orig->data,
				     GET_MODE_PRECISION (f_orig->mode), sgn),
		     sgn);
  real_arithmetic (&fixed_value, RDIV_EXPR, &real_value, &base_value);
  real_to_decimal (str, &fixed_value, buf_size, 0, 1);
}
Пример #4
0
enum value_range_type
get_range_info (const_tree name, double_int *min, double_int *max)
{
  gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name)));
  gcc_assert (min && max);
  range_info_def *ri = SSA_NAME_RANGE_INFO (name);

  /* Return VR_VARYING for SSA_NAMEs with NULL RANGE_INFO or SSA_NAMEs
     with integral types width > 2 * HOST_BITS_PER_WIDE_INT precision.  */
  if (!ri || (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (name)))
	      > 2 * HOST_BITS_PER_WIDE_INT))
    return VR_VARYING;

  *min = ri->min;
  *max = ri->max;
  return SSA_NAME_RANGE_TYPE (name);
}
Пример #5
0
static unsigned int arm_pertask_ssp_rtl_execute(void)
{
	rtx_insn *insn;

	for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
		const char *sym;
		rtx body;
		rtx mask, masked_sp;

		/*
		 * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
		 */
		if (!INSN_P(insn))
			continue;
		body = PATTERN(insn);
		if (GET_CODE(body) != SET ||
		    GET_CODE(SET_SRC(body)) != SYMBOL_REF)
			continue;
		sym = XSTR(SET_SRC(body), 0);
		if (strcmp(sym, "__stack_chk_guard"))
			continue;

		/*
		 * Replace the source of the SET insn with an expression that
		 * produces the address of the copy of the stack canary value
		 * stored in struct thread_info
		 */
		mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
		masked_sp = gen_reg_rtx(Pmode);

		emit_insn_before(gen_rtx_set(masked_sp,
					     gen_rtx_AND(Pmode,
							 stack_pointer_rtx,
							 mask)),
				 insn);

		SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
					     GEN_INT(canary_offset));
	}
	return 0;
}
Пример #6
0
void
fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, machine_mode mode)
{
  REAL_VALUE_TYPE real_value, fixed_value, base_value;
  unsigned int fbit;
  enum fixed_value_range_code temp;
  bool fail;

  f->mode = mode;
  fbit = GET_MODE_FBIT (mode);

  real_from_string (&real_value, str);
  temp = check_real_for_fixed_mode (&real_value, f->mode);
  /* We don't want to warn the case when the _Fract value is 1.0.  */
  if (temp == FIXED_UNDERFLOW
      || temp == FIXED_GT_MAX_EPS
      || (temp == FIXED_MAX_EPS && ALL_ACCUM_MODE_P (f->mode)))
    warning (OPT_Woverflow,
	     "large fixed-point constant implicitly truncated to fixed-point type");
  real_2expN (&base_value, fbit, VOIDmode);
  real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value);
  wide_int w = real_to_integer (&fixed_value, &fail,
				GET_MODE_PRECISION (mode));
  f->data.low = w.ulow ();
  f->data.high = w.elt (1);

  if (temp == FIXED_MAX_EPS && ALL_FRACT_MODE_P (f->mode))
    {
      /* From the spec, we need to evaluate 1 to the maximal value.  */
      f->data.low = -1;
      f->data.high = -1;
      f->data = f->data.zext (GET_MODE_FBIT (f->mode)
				+ GET_MODE_IBIT (f->mode));
    }
  else
    f->data = f->data.ext (SIGNED_FIXED_POINT_MODE_P (f->mode)
			      + GET_MODE_FBIT (f->mode)
			      + GET_MODE_IBIT (f->mode),
			      UNSIGNED_FIXED_POINT_MODE_P (f->mode));
}
Пример #7
0
double_int
get_nonzero_bits (const_tree name)
{
  if (POINTER_TYPE_P (TREE_TYPE (name)))
    {
      struct ptr_info_def *pi = SSA_NAME_PTR_INFO (name);
      if (pi && pi->align)
	{
	  double_int al = double_int::from_uhwi (pi->align - 1);
	  return ((double_int::mask (TYPE_PRECISION (TREE_TYPE (name))) & ~al)
		  | double_int::from_uhwi (pi->misalign));
	}
      return double_int_minus_one;
    }

  range_info_def *ri = SSA_NAME_RANGE_INFO (name);
  if (!ri || (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (name)))
	      > 2 * HOST_BITS_PER_WIDE_INT))
    return double_int_minus_one;

  return ri->nonzero_bits;
}
Пример #8
0
void
ubsan_expand_si_overflow_mul_check (gimple stmt)
{
  rtx res, op0, op1;
  tree lhs, fn, arg0, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg0 = gimple_call_arg (stmt, 0);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();

  do_pending_stack_adjust ();
  op0 = expand_normal (arg0);
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode = optab_handler (mulv4_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[4];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op0, mode);
      create_input_operand (&ops[2], op1, mode);
      create_fixed_operand (&ops[3], do_error);
      if (maybe_expand_insn (icode, 4, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      struct separate_ops ops;
      machine_mode hmode
	= mode_for_size (GET_MODE_PRECISION (mode) / 2, MODE_INT, 1);
      ops.op0 = arg0;
      ops.op1 = arg1;
      ops.op2 = NULL_TREE;
      ops.location = gimple_location (stmt);
      if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
	  && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
	{
	  machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type
	    = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), 0);

	  res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
	  rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res,
				     GET_MODE_PRECISION (mode), NULL_RTX, 0);
	  hipart = gen_lowpart (mode, hipart);
	  res = gen_lowpart (mode, res);
	  rtx signbit = expand_shift (RSHIFT_EXPR, mode, res,
				      GET_MODE_PRECISION (mode) - 1,
				      NULL_RTX, 0);
	  /* RES is low half of the double width result, HIPART
	     the high half.  There was overflow if
	     HIPART is different from RES < 0 ? -1 : 0.  */
	  emit_cmp_and_jump_insns (signbit, hipart, EQ, NULL_RTX, mode,
				   false, done_label, PROB_VERY_LIKELY);
	}
      else if (hmode != BLKmode
	       && 2 * GET_MODE_PRECISION (hmode) == GET_MODE_PRECISION (mode))
	{
	  rtx_code_label *large_op0 = gen_label_rtx ();
	  rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
	  rtx_code_label *one_small_one_large = gen_label_rtx ();
	  rtx_code_label *both_ops_large = gen_label_rtx ();
	  rtx_code_label *after_hipart_neg = gen_label_rtx ();
	  rtx_code_label *after_lopart_neg = gen_label_rtx ();
	  rtx_code_label *do_overflow = gen_label_rtx ();
	  rtx_code_label *hipart_different = gen_label_rtx ();

	  unsigned int hprec = GET_MODE_PRECISION (hmode);
	  rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
				      NULL_RTX, 0);
	  hipart0 = gen_lowpart (hmode, hipart0);
	  rtx lopart0 = gen_lowpart (hmode, op0);
	  rtx signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
				       NULL_RTX, 0);
	  rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
				      NULL_RTX, 0);
	  hipart1 = gen_lowpart (hmode, hipart1);
	  rtx lopart1 = gen_lowpart (hmode, op1);
	  rtx signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
				       NULL_RTX, 0);

	  res = gen_reg_rtx (mode);

	  /* True if op0 resp. op1 are known to be in the range of
	     halfstype.  */
	  bool op0_small_p = false;
	  bool op1_small_p = false;
	  /* True if op0 resp. op1 are known to have all zeros or all ones
	     in the upper half of bits, but are not known to be
	     op{0,1}_small_p.  */
	  bool op0_medium_p = false;
	  bool op1_medium_p = false;
	  /* -1 if op{0,1} is known to be negative, 0 if it is known to be
	     nonnegative, 1 if unknown.  */
	  int op0_sign = 1;
	  int op1_sign = 1;

	  if (TREE_CODE (arg0) == SSA_NAME)
	    {
	      wide_int arg0_min, arg0_max;
	      if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg0_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg0_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op0_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op0_medium_p = true;
		  if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = 0;
		  else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = -1;
		}
	    }
	  if (TREE_CODE (arg1) == SSA_NAME)
	    {
	      wide_int arg1_min, arg1_max;
	      if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg1_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg1_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op1_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op1_medium_p = true;
		  if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = 0;
		  else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = -1;
		}
	    }

	  int smaller_sign = 1;
	  int larger_sign = 1;
	  if (op0_small_p)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op1_sign;
	    }
	  else if (op1_small_p)
	    {
	      smaller_sign = op1_sign;
	      larger_sign = op0_sign;
	    }
	  else if (op0_sign == op1_sign)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op0_sign;
	    }

	  if (!op0_small_p)
	    emit_cmp_and_jump_insns (signbit0, hipart0, NE, NULL_RTX, hmode,
				     false, large_op0, PROB_UNLIKELY);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, small_op0_large_op1,
				     PROB_UNLIKELY);

	  /* If both op0 and op1 are sign extended from hmode to mode,
	     the multiplication will never overflow.  We can do just one
	     hmode x hmode => mode widening multiplication.  */
	  if (GET_CODE (lopart0) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart0) = 1;
	      SUBREG_PROMOTED_SET (lopart0, 0);
	    }
	  if (GET_CODE (lopart1) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart1) = 1;
	      SUBREG_PROMOTED_SET (lopart1, 0);
	    }
	  tree halfstype = build_nonstandard_integer_type (hprec, 0);
	  ops.op0 = make_tree (halfstype, lopart0);
	  ops.op1 = make_tree (halfstype, lopart1);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  rtx thisres
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, thisres);
	  emit_jump (done_label);

	  emit_label (small_op0_large_op1);

	  /* If op0 is sign extended from hmode to mode, but op1 is not,
	     just swap the arguments and handle it as op1 sign extended,
	     op0 not.  */
	  rtx larger = gen_reg_rtx (mode);
	  rtx hipart = gen_reg_rtx (hmode);
	  rtx lopart = gen_reg_rtx (hmode);
	  emit_move_insn (larger, op1);
	  emit_move_insn (hipart, hipart1);
	  emit_move_insn (lopart, lopart0);
	  emit_jump (one_small_one_large);

	  emit_label (large_op0);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, both_ops_large, PROB_UNLIKELY);

	  /* If op1 is sign extended from hmode to mode, but op0 is not,
	     prepare larger, hipart and lopart pseudos and handle it together
	     with small_op0_large_op1.  */
	  emit_move_insn (larger, op0);
	  emit_move_insn (hipart, hipart0);
	  emit_move_insn (lopart, lopart1);

	  emit_label (one_small_one_large);

	  /* lopart is the low part of the operand that is sign extended
	     to mode, larger is the the other operand, hipart is the
	     high part of larger and lopart0 and lopart1 are the low parts
	     of both operands.
	     We perform lopart0 * lopart1 and lopart * hipart widening
	     multiplications.  */
	  tree halfutype = build_nonstandard_integer_type (hprec, 1);
	  ops.op0 = make_tree (halfutype, lopart0);
	  ops.op1 = make_tree (halfutype, lopart1);
	  rtx lo0xlo1
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);

	  ops.op0 = make_tree (halfutype, lopart);
	  ops.op1 = make_tree (halfutype, hipart);
	  rtx loxhi = gen_reg_rtx (mode);
	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (loxhi, tem);

	  /* if (hipart < 0) loxhi -= lopart << (bitsize / 2);  */
	  if (larger_sign == 0)
	    emit_jump (after_hipart_neg);
	  else if (larger_sign != -1)
	    emit_cmp_and_jump_insns (hipart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_hipart_neg, PROB_EVEN);

	  tem = convert_modes (mode, hmode, lopart, 1);
	  tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_hipart_neg);

	  /* if (lopart < 0) loxhi -= larger;  */
	  if (smaller_sign == 0)
	    emit_jump (after_lopart_neg);
	  else if (smaller_sign != -1)
	    emit_cmp_and_jump_insns (lopart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_lopart_neg, PROB_EVEN);

	  tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_lopart_neg);

	  /* loxhi += (uns) lo0xlo1 >> (bitsize / 2);  */
	  tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  /* if (loxhi >> (bitsize / 2)
		 == (hmode) loxhi >> (bitsize / 2 - 1))  */
	  rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
					  NULL_RTX, 0);
	  hipartloxhi = gen_lowpart (hmode, hipartloxhi);
	  rtx lopartloxhi = gen_lowpart (hmode, loxhi);
	  rtx signbitloxhi = expand_shift (RSHIFT_EXPR, hmode, lopartloxhi,
					   hprec - 1, NULL_RTX, 0);

	  emit_cmp_and_jump_insns (signbitloxhi, hipartloxhi, NE, NULL_RTX,
				   hmode, false, do_overflow,
				   PROB_VERY_UNLIKELY);

	  /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1;  */
	  rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
					   NULL_RTX, 1);
	  tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);

	  tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
				     1, OPTAB_DIRECT);
	  if (tem != res)
	    emit_move_insn (res, tem);
	  emit_jump (done_label);

	  emit_label (both_ops_large);

	  /* If both operands are large (not sign extended from hmode),
	     then perform the full multiplication which will be the result
	     of the operation.  The only cases which don't overflow are
	     some cases where both hipart0 and highpart1 are 0 or -1.  */
	  ops.code = MULT_EXPR;
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);

	  if (!op0_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  if (!op1_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  /* At this point hipart{0,1} are both in [-1, 0].  If they are the
	     same, overflow happened if res is negative, if they are different,
	     overflow happened if res is positive.  */
	  if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
	    emit_jump (hipart_different);
	  else if (op0_sign == 1 || op1_sign == 1)
	    emit_cmp_and_jump_insns (hipart0, hipart1, NE, NULL_RTX, hmode,
				     true, hipart_different, PROB_EVEN);

	  emit_cmp_and_jump_insns (res, const0_rtx, LT, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (hipart_different);

	  emit_cmp_and_jump_insns (res, const0_rtx, GE, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (do_overflow);

	  /* Overflow, do full multiplication and fallthru into do_error.  */
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);
	}
      else
	{
	  ops.code = MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_jump (done_label);
	}
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (MULT_EXPR, gimple_location (stmt),
				     TREE_TYPE (arg0), arg0, arg1);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
Пример #9
0
static bool
process_assignment (gimple stmt, gimple_stmt_iterator call, tree *m,
		    tree *a, tree *ass_var)
{
  tree op0, op1 = NULL_TREE, non_ass_var = NULL_TREE;
  tree dest = gimple_assign_lhs (stmt);
  enum tree_code code = gimple_assign_rhs_code (stmt);
  enum gimple_rhs_class rhs_class = get_gimple_rhs_class (code);
  tree src_var = gimple_assign_rhs1 (stmt);

  /* See if this is a simple copy operation of an SSA name to the function
     result.  In that case we may have a simple tail call.  Ignore type
     conversions that can never produce extra code between the function
     call and the function return.  */
  if ((rhs_class == GIMPLE_SINGLE_RHS || gimple_assign_cast_p (stmt))
      && (TREE_CODE (src_var) == SSA_NAME))
    {
      /* Reject a tailcall if the type conversion might need
	 additional code.  */
      if (gimple_assign_cast_p (stmt))
	{
	  if (TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var)))
	    return false;

	  /* Even if the type modes are the same, if the precision of the
	     type is smaller than mode's precision,
	     reduce_to_bit_field_precision would generate additional code.  */
	  if (INTEGRAL_TYPE_P (TREE_TYPE (dest))
	      && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest)))
		  > TYPE_PRECISION (TREE_TYPE (dest))))
	    return false;
	}

      if (src_var != *ass_var)
	return false;

      *ass_var = dest;
      return true;
    }

  switch (rhs_class)
    {
    case GIMPLE_BINARY_RHS:
      op1 = gimple_assign_rhs2 (stmt);

      /* Fall through.  */

    case GIMPLE_UNARY_RHS:
      op0 = gimple_assign_rhs1 (stmt);
      break;

    default:
      return false;
    }

  /* Accumulator optimizations will reverse the order of operations.
     We can only do that for floating-point types if we're assuming
     that addition and multiplication are associative.  */
  if (!flag_associative_math)
    if (FLOAT_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
      return false;

  if (rhs_class == GIMPLE_UNARY_RHS)
    ;
  else if (op0 == *ass_var
	   && (non_ass_var = independent_of_stmt_p (op1, stmt, call)))
    ;
  else if (op1 == *ass_var
	   && (non_ass_var = independent_of_stmt_p (op0, stmt, call)))
    ;
  else
    return false;

  switch (code)
    {
    case PLUS_EXPR:
      *a = non_ass_var;
      *ass_var = dest;
      return true;

    case POINTER_PLUS_EXPR:
      if (op0 != *ass_var)
	return false;
      *a = non_ass_var;
      *ass_var = dest;
      return true;

    case MULT_EXPR:
      *m = non_ass_var;
      *ass_var = dest;
      return true;

    case NEGATE_EXPR:
      *m = build_minus_one_cst (TREE_TYPE (op0));
      *ass_var = dest;
      return true;

    case MINUS_EXPR:
      if (*ass_var == op0)
        *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
      else
        {
	  *m = build_minus_one_cst (TREE_TYPE (non_ass_var));
          *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
        }

      *ass_var = dest;
      return true;

      /* TODO -- Handle POINTER_PLUS_EXPR.  */

    default:
      return false;
    }
}
Пример #10
0
static void
instrument_bool_enum_load (gimple_stmt_iterator *gsi)
{
  gimple stmt = gsi_stmt (*gsi);
  tree rhs = gimple_assign_rhs1 (stmt);
  tree type = TREE_TYPE (rhs);
  tree minv = NULL_TREE, maxv = NULL_TREE;

  if (TREE_CODE (type) == BOOLEAN_TYPE && (flag_sanitize & SANITIZE_BOOL))
    {
      minv = boolean_false_node;
      maxv = boolean_true_node;
    }
  else if (TREE_CODE (type) == ENUMERAL_TYPE
	   && (flag_sanitize & SANITIZE_ENUM)
	   && TREE_TYPE (type) != NULL_TREE
	   && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
	   && (TYPE_PRECISION (TREE_TYPE (type))
	       < GET_MODE_PRECISION (TYPE_MODE (type))))
    {
      minv = TYPE_MIN_VALUE (TREE_TYPE (type));
      maxv = TYPE_MAX_VALUE (TREE_TYPE (type));
    }
  else
    return;

  int modebitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
  HOST_WIDE_INT bitsize, bitpos;
  tree offset;
  enum machine_mode mode;
  int volatilep = 0, unsignedp = 0;
  tree base = get_inner_reference (rhs, &bitsize, &bitpos, &offset, &mode,
				   &unsignedp, &volatilep, false);
  tree utype = build_nonstandard_integer_type (modebitsize, 1);

  if ((TREE_CODE (base) == VAR_DECL && DECL_HARD_REGISTER (base))
      || (bitpos % modebitsize) != 0
      || bitsize != modebitsize
      || GET_MODE_BITSIZE (TYPE_MODE (utype)) != modebitsize
      || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
    return;

  location_t loc = gimple_location (stmt);
  tree ptype = build_pointer_type (TREE_TYPE (rhs));
  tree atype = reference_alias_ptr_type (rhs);
  gimple g = gimple_build_assign (make_ssa_name (ptype, NULL),
				  build_fold_addr_expr (rhs));
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  tree mem = build2 (MEM_REF, utype, gimple_assign_lhs (g),
		     build_int_cst (atype, 0));
  tree urhs = make_ssa_name (utype, NULL);
  g = gimple_build_assign (urhs, mem);
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  minv = fold_convert (utype, minv);
  maxv = fold_convert (utype, maxv);
  if (!integer_zerop (minv))
    {
      g = gimple_build_assign_with_ops (MINUS_EXPR,
					make_ssa_name (utype, NULL),
					urhs, minv);
      gimple_set_location (g, loc);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
    }

  gimple_stmt_iterator gsi2 = *gsi;
  basic_block then_bb, fallthru_bb;
  *gsi = create_cond_insert_point (gsi, true, false, true,
				   &then_bb, &fallthru_bb);
  g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (g),
			 int_const_binop (MINUS_EXPR, maxv, minv),
			 NULL_TREE, NULL_TREE);
  gimple_set_location (g, loc);
  gsi_insert_after (gsi, g, GSI_NEW_STMT);

  gimple_assign_set_rhs_with_ops (&gsi2, NOP_EXPR, urhs, NULL_TREE);
  update_stmt (stmt);

  gsi2 = gsi_after_labels (then_bb);
  if (flag_sanitize_undefined_trap_on_error)
    g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
  else
    {
      tree data = ubsan_create_data ("__ubsan_invalid_value_data", &loc, NULL,
				     ubsan_type_descriptor (type), NULL_TREE);
      data = build_fold_addr_expr_loc (loc, data);
      enum built_in_function bcode
	= flag_sanitize_recover
	  ? BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE
	  : BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT;
      tree fn = builtin_decl_explicit (bcode);

      tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs),
					   true, NULL_TREE, true,
					   GSI_SAME_STMT);
      g = gimple_build_call (fn, 2, data, val);
    }
  gimple_set_location (g, loc);
  gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
}
void
gfc_init_kinds (void)
{
  enum machine_mode mode;
  int i_index, r_index;
  bool saw_i4 = false, saw_i8 = false;
  bool saw_r4 = false, saw_r8 = false, saw_r16 = false;

  for (i_index = 0, mode = MIN_MODE_INT; mode <= MAX_MODE_INT; mode++)
    {
      int kind, bitsize;

      if (!targetm.scalar_mode_supported_p (mode))
	continue;

      /* The middle end doesn't support constants larger than 2*HWI.
	 Perhaps the target hook shouldn't have accepted these either,
	 but just to be safe...  */
      bitsize = GET_MODE_BITSIZE (mode);
      if (bitsize > 2*HOST_BITS_PER_WIDE_INT)
	continue;

      gcc_assert (i_index != MAX_INT_KINDS);

      /* Let the kind equal the bit size divided by 8.  This insulates the
	 programmer from the underlying byte size.  */
      kind = bitsize / 8;

      if (kind == 4)
	saw_i4 = true;
      if (kind == 8)
	saw_i8 = true;

      gfc_integer_kinds[i_index].kind = kind;
      gfc_integer_kinds[i_index].radix = 2;
      gfc_integer_kinds[i_index].digits = bitsize - 1;
      gfc_integer_kinds[i_index].bit_size = bitsize;

      gfc_logical_kinds[i_index].kind = kind;
      gfc_logical_kinds[i_index].bit_size = bitsize;

      i_index += 1;
    }

  /* Set the maximum integer kind.  Used with at least BOZ constants.  */
  gfc_max_integer_kind = gfc_integer_kinds[i_index - 1].kind;

  for (r_index = 0, mode = MIN_MODE_FLOAT; mode <= MAX_MODE_FLOAT; mode++)
    {
      const struct real_format *fmt = REAL_MODE_FORMAT (mode);
      int kind;

      if (fmt == NULL)
	continue;
      if (!targetm.scalar_mode_supported_p (mode))
	continue;

      /* Let the kind equal the precision divided by 8, rounding up.  Again,
	 this insulates the programmer from the underlying byte size.

	 Also, it effectively deals with IEEE extended formats.  There, the
	 total size of the type may equal 16, but it's got 6 bytes of padding
	 and the increased size can get in the way of a real IEEE quad format
	 which may also be supported by the target.

	 We round up so as to handle IA-64 __floatreg (RFmode), which is an
	 82 bit type.  Not to be confused with __float80 (XFmode), which is
	 an 80 bit type also supported by IA-64.  So XFmode should come out
	 to be kind=10, and RFmode should come out to be kind=11.  Egads.  */

      kind = (GET_MODE_PRECISION (mode) + 7) / 8;

      if (kind == 4)
	saw_r4 = true;
      if (kind == 8)
	saw_r8 = true;
      if (kind == 16)
	saw_r16 = true;

      /* Careful we don't stumble a wierd internal mode.  */
      gcc_assert (r_index <= 0 || gfc_real_kinds[r_index-1].kind != kind);
      /* Or have too many modes for the allocated space.  */
      gcc_assert (r_index != MAX_REAL_KINDS);

      gfc_real_kinds[r_index].kind = kind;
      gfc_real_kinds[r_index].radix = fmt->b;
      gfc_real_kinds[r_index].digits = fmt->p;
      gfc_real_kinds[r_index].min_exponent = fmt->emin;
      gfc_real_kinds[r_index].max_exponent = fmt->emax;
      gfc_real_kinds[r_index].mode_precision = GET_MODE_PRECISION (mode);
      r_index += 1;
    }

  /* Choose the default integer kind.  We choose 4 unless the user
     directs us otherwise.  */
  if (gfc_option.flag_default_integer)
    {
      if (!saw_i8)
	fatal_error ("integer kind=8 not available for -fdefault-integer-8 option");
      gfc_default_integer_kind = 8;
    }
  else if (saw_i4)
    gfc_default_integer_kind = 4;
  else
    gfc_default_integer_kind = gfc_integer_kinds[i_index - 1].kind;

  /* Choose the default real kind.  Again, we choose 4 when possible.  */
  if (gfc_option.flag_default_real)
    {
      if (!saw_r8)
	fatal_error ("real kind=8 not available for -fdefault-real-8 option");
      gfc_default_real_kind = 8;
    }
  else if (saw_r4)
    gfc_default_real_kind = 4;
  else
    gfc_default_real_kind = gfc_real_kinds[0].kind;

  /* Choose the default double kind.  If -fdefault-real and -fdefault-double 
     are specified, we use kind=8, if it's available.  If -fdefault-real is
     specified without -fdefault-double, we use kind=16, if it's available.
     Otherwise we do not change anything.  */
  if (gfc_option.flag_default_double && !gfc_option.flag_default_real)
    fatal_error ("Use of -fdefault-double-8 requires -fdefault-real-8");

  if (gfc_option.flag_default_real && gfc_option.flag_default_double && saw_r8)
    gfc_default_double_kind = 8;
  else if (gfc_option.flag_default_real && saw_r16)
    gfc_default_double_kind = 16;
  else if (saw_r4 && saw_r8)
    gfc_default_double_kind = 8;
  else
    {
      /* F95 14.6.3.1: A nonpointer scalar object of type double precision
	 real ... occupies two contiguous numeric storage units.

	 Therefore we must be supplied a kind twice as large as we chose
	 for single precision.  There are loopholes, in that double
	 precision must *occupy* two storage units, though it doesn't have
	 to *use* two storage units.  Which means that you can make this
	 kind artificially wide by padding it.  But at present there are
	 no GCC targets for which a two-word type does not exist, so we
	 just let gfc_validate_kind abort and tell us if something breaks.  */

      gfc_default_double_kind
	= gfc_validate_kind (BT_REAL, gfc_default_real_kind * 2, false);
    }

  /* The default logical kind is constrained to be the same as the
     default integer kind.  Similarly with complex and real.  */
  gfc_default_logical_kind = gfc_default_integer_kind;
  gfc_default_complex_kind = gfc_default_real_kind;

  /* Choose the smallest integer kind for our default character.  */
  gfc_default_character_kind = gfc_integer_kinds[0].kind;

  /* Choose the integer kind the same size as "void*" for our index kind.  */
  gfc_index_integer_kind = POINTER_SIZE / 8;
  /* Pick a kind the same size as the C "int" type.  */
  gfc_c_int_kind = INT_TYPE_SIZE / 8;
}