예제 #1
0
파일: ubsan.c 프로젝트: rgmabs19357/gcc
tree
ubsan_encode_value (tree t)
{
    tree type = TREE_TYPE (t);
    switch (TREE_CODE (type))
    {
    case INTEGER_TYPE:
        if (TYPE_PRECISION (type) <= POINTER_SIZE)
            return fold_build1 (NOP_EXPR, pointer_sized_int_node, t);
        else
            return build_fold_addr_expr (t);
    case REAL_TYPE:
    {
        unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
        if (bitsize <= POINTER_SIZE)
        {
            tree itype = build_nonstandard_integer_type (bitsize, true);
            t = fold_build1 (VIEW_CONVERT_EXPR, itype, t);
            return fold_convert (pointer_sized_int_node, t);
        }
        else
        {
            if (!TREE_ADDRESSABLE (t))
            {
                /* The reason for this is that we don't want to pessimize
                   code by making vars unnecessarily addressable.  */
                tree var = create_tmp_var (TREE_TYPE (t), NULL);
                tree tem = build2 (MODIFY_EXPR, void_type_node, var, t);
                t = build_fold_addr_expr (var);
                return build2 (COMPOUND_EXPR, TREE_TYPE (t), tem, t);
            }
            else
                return build_fold_addr_expr (t);
        }
    }
    default:
        gcc_unreachable ();
    }
}
예제 #2
0
bool
convert_affine_scev (struct loop *loop, tree type,
		     tree *base, tree *step, gimple *at_stmt,
		     bool use_overflow_semantics)
{
  tree ct = TREE_TYPE (*step);
  bool enforce_overflow_semantics;
  bool must_check_src_overflow, must_check_rslt_overflow;
  tree new_base, new_step;
  tree step_type = POINTER_TYPE_P (type) ? sizetype : type;

  /* In general,
     (TYPE) (BASE + STEP * i) = (TYPE) BASE + (TYPE -- sign extend) STEP * i,
     but we must check some assumptions.

     1) If [BASE, +, STEP] wraps, the equation is not valid when precision
        of CT is smaller than the precision of TYPE.  For example, when we
	cast unsigned char [254, +, 1] to unsigned, the values on left side
	are 254, 255, 0, 1, ..., but those on the right side are
	254, 255, 256, 257, ...
     2) In case that we must also preserve the fact that signed ivs do not
        overflow, we must additionally check that the new iv does not wrap.
	For example, unsigned char [125, +, 1] casted to signed char could
	become a wrapping variable with values 125, 126, 127, -128, -127, ...,
	which would confuse optimizers that assume that this does not
	happen.  */
  must_check_src_overflow = TYPE_PRECISION (ct) < TYPE_PRECISION (type);

  enforce_overflow_semantics = (use_overflow_semantics
				&& nowrap_type_p (type));
  if (enforce_overflow_semantics)
    {
      /* We can avoid checking whether the result overflows in the following
	 cases:

	 -- must_check_src_overflow is true, and the range of TYPE is superset
	    of the range of CT -- i.e., in all cases except if CT signed and
	    TYPE unsigned.
         -- both CT and TYPE have the same precision and signedness, and we
	    verify instead that the source does not overflow (this may be
	    easier than verifying it for the result, as we may use the
	    information about the semantics of overflow in CT).  */
      if (must_check_src_overflow)
	{
	  if (TYPE_UNSIGNED (type) && !TYPE_UNSIGNED (ct))
	    must_check_rslt_overflow = true;
	  else
	    must_check_rslt_overflow = false;
	}
      else if (TYPE_UNSIGNED (ct) == TYPE_UNSIGNED (type)
	       && TYPE_PRECISION (ct) == TYPE_PRECISION (type))
	{
	  must_check_rslt_overflow = false;
	  must_check_src_overflow = true;
	}
      else
	must_check_rslt_overflow = true;
    }
  else
    must_check_rslt_overflow = false;

  if (must_check_src_overflow
      && scev_probably_wraps_p (*base, *step, at_stmt, loop,
				use_overflow_semantics))
    return false;

  new_base = chrec_convert (type, *base, at_stmt, use_overflow_semantics);
  /* The step must be sign extended, regardless of the signedness
     of CT and TYPE.  This only needs to be handled specially when
     CT is unsigned -- to avoid e.g. unsigned char [100, +, 255]
     (with values 100, 99, 98, ...) from becoming signed or unsigned
     [100, +, 255] with values 100, 355, ...; the sign-extension is
     performed by default when CT is signed.  */
  new_step = *step;
  if (TYPE_PRECISION (step_type) > TYPE_PRECISION (ct) && TYPE_UNSIGNED (ct))
    {
      tree signed_ct = build_nonstandard_integer_type (TYPE_PRECISION (ct), 0);
      new_step = chrec_convert (signed_ct, new_step, at_stmt,
                                use_overflow_semantics);
    }
  new_step = chrec_convert (step_type, new_step, at_stmt,
			    use_overflow_semantics);

  if (automatically_generated_chrec_p (new_base)
      || automatically_generated_chrec_p (new_step))
    return false;

  if (must_check_rslt_overflow
      /* Note that in this case we cannot use the fact that signed variables
	 do not overflow, as this is what we are verifying for the new iv.  */
      && scev_probably_wraps_p (new_base, new_step, at_stmt, loop, false))
    return false;

  *base = new_base;
  *step = new_step;
  return true;
}
예제 #3
0
파일: internal-fn.c 프로젝트: Alexpux/GCC
void
ubsan_expand_si_overflow_mul_check (gimple stmt)
{
  rtx res, op0, op1;
  tree lhs, fn, arg0, arg1;
  rtx_code_label *done_label, *do_error;
  rtx target = NULL_RTX;

  lhs = gimple_call_lhs (stmt);
  arg0 = gimple_call_arg (stmt, 0);
  arg1 = gimple_call_arg (stmt, 1);
  done_label = gen_label_rtx ();
  do_error = gen_label_rtx ();

  do_pending_stack_adjust ();
  op0 = expand_normal (arg0);
  op1 = expand_normal (arg1);

  machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
  if (lhs)
    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);

  enum insn_code icode = optab_handler (mulv4_optab, mode);
  if (icode != CODE_FOR_nothing)
    {
      struct expand_operand ops[4];
      rtx_insn *last = get_last_insn ();

      res = gen_reg_rtx (mode);
      create_output_operand (&ops[0], res, mode);
      create_input_operand (&ops[1], op0, mode);
      create_input_operand (&ops[2], op1, mode);
      create_fixed_operand (&ops[3], do_error);
      if (maybe_expand_insn (icode, 4, ops))
	{
	  last = get_last_insn ();
	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
	      && JUMP_P (last)
	      && any_condjump_p (last)
	      && !find_reg_note (last, REG_BR_PROB, 0))
	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);
        }
      else
	{
	  delete_insns_since (last);
	  icode = CODE_FOR_nothing;
	}
    }

  if (icode == CODE_FOR_nothing)
    {
      struct separate_ops ops;
      machine_mode hmode
	= mode_for_size (GET_MODE_PRECISION (mode) / 2, MODE_INT, 1);
      ops.op0 = arg0;
      ops.op1 = arg1;
      ops.op2 = NULL_TREE;
      ops.location = gimple_location (stmt);
      if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
	  && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
	{
	  machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type
	    = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), 0);

	  res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
	  rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res,
				     GET_MODE_PRECISION (mode), NULL_RTX, 0);
	  hipart = gen_lowpart (mode, hipart);
	  res = gen_lowpart (mode, res);
	  rtx signbit = expand_shift (RSHIFT_EXPR, mode, res,
				      GET_MODE_PRECISION (mode) - 1,
				      NULL_RTX, 0);
	  /* RES is low half of the double width result, HIPART
	     the high half.  There was overflow if
	     HIPART is different from RES < 0 ? -1 : 0.  */
	  emit_cmp_and_jump_insns (signbit, hipart, EQ, NULL_RTX, mode,
				   false, done_label, PROB_VERY_LIKELY);
	}
      else if (hmode != BLKmode
	       && 2 * GET_MODE_PRECISION (hmode) == GET_MODE_PRECISION (mode))
	{
	  rtx_code_label *large_op0 = gen_label_rtx ();
	  rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
	  rtx_code_label *one_small_one_large = gen_label_rtx ();
	  rtx_code_label *both_ops_large = gen_label_rtx ();
	  rtx_code_label *after_hipart_neg = gen_label_rtx ();
	  rtx_code_label *after_lopart_neg = gen_label_rtx ();
	  rtx_code_label *do_overflow = gen_label_rtx ();
	  rtx_code_label *hipart_different = gen_label_rtx ();

	  unsigned int hprec = GET_MODE_PRECISION (hmode);
	  rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
				      NULL_RTX, 0);
	  hipart0 = gen_lowpart (hmode, hipart0);
	  rtx lopart0 = gen_lowpart (hmode, op0);
	  rtx signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
				       NULL_RTX, 0);
	  rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
				      NULL_RTX, 0);
	  hipart1 = gen_lowpart (hmode, hipart1);
	  rtx lopart1 = gen_lowpart (hmode, op1);
	  rtx signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
				       NULL_RTX, 0);

	  res = gen_reg_rtx (mode);

	  /* True if op0 resp. op1 are known to be in the range of
	     halfstype.  */
	  bool op0_small_p = false;
	  bool op1_small_p = false;
	  /* True if op0 resp. op1 are known to have all zeros or all ones
	     in the upper half of bits, but are not known to be
	     op{0,1}_small_p.  */
	  bool op0_medium_p = false;
	  bool op1_medium_p = false;
	  /* -1 if op{0,1} is known to be negative, 0 if it is known to be
	     nonnegative, 1 if unknown.  */
	  int op0_sign = 1;
	  int op1_sign = 1;

	  if (TREE_CODE (arg0) == SSA_NAME)
	    {
	      wide_int arg0_min, arg0_max;
	      if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg0_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg0_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op0_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op0_medium_p = true;
		  if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = 0;
		  else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
		    op0_sign = -1;
		}
	    }
	  if (TREE_CODE (arg1) == SSA_NAME)
	    {
	      wide_int arg1_min, arg1_max;
	      if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
		{
		  unsigned int mprec0 = wi::min_precision (arg1_min, SIGNED);
		  unsigned int mprec1 = wi::min_precision (arg1_max, SIGNED);
		  if (mprec0 <= hprec && mprec1 <= hprec)
		    op1_small_p = true;
		  else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
		    op1_medium_p = true;
		  if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = 0;
		  else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
		    op1_sign = -1;
		}
	    }

	  int smaller_sign = 1;
	  int larger_sign = 1;
	  if (op0_small_p)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op1_sign;
	    }
	  else if (op1_small_p)
	    {
	      smaller_sign = op1_sign;
	      larger_sign = op0_sign;
	    }
	  else if (op0_sign == op1_sign)
	    {
	      smaller_sign = op0_sign;
	      larger_sign = op0_sign;
	    }

	  if (!op0_small_p)
	    emit_cmp_and_jump_insns (signbit0, hipart0, NE, NULL_RTX, hmode,
				     false, large_op0, PROB_UNLIKELY);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, small_op0_large_op1,
				     PROB_UNLIKELY);

	  /* If both op0 and op1 are sign extended from hmode to mode,
	     the multiplication will never overflow.  We can do just one
	     hmode x hmode => mode widening multiplication.  */
	  if (GET_CODE (lopart0) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart0) = 1;
	      SUBREG_PROMOTED_SET (lopart0, 0);
	    }
	  if (GET_CODE (lopart1) == SUBREG)
	    {
	      SUBREG_PROMOTED_VAR_P (lopart1) = 1;
	      SUBREG_PROMOTED_SET (lopart1, 0);
	    }
	  tree halfstype = build_nonstandard_integer_type (hprec, 0);
	  ops.op0 = make_tree (halfstype, lopart0);
	  ops.op1 = make_tree (halfstype, lopart1);
	  ops.code = WIDEN_MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  rtx thisres
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, thisres);
	  emit_jump (done_label);

	  emit_label (small_op0_large_op1);

	  /* If op0 is sign extended from hmode to mode, but op1 is not,
	     just swap the arguments and handle it as op1 sign extended,
	     op0 not.  */
	  rtx larger = gen_reg_rtx (mode);
	  rtx hipart = gen_reg_rtx (hmode);
	  rtx lopart = gen_reg_rtx (hmode);
	  emit_move_insn (larger, op1);
	  emit_move_insn (hipart, hipart1);
	  emit_move_insn (lopart, lopart0);
	  emit_jump (one_small_one_large);

	  emit_label (large_op0);

	  if (!op1_small_p)
	    emit_cmp_and_jump_insns (signbit1, hipart1, NE, NULL_RTX, hmode,
				     false, both_ops_large, PROB_UNLIKELY);

	  /* If op1 is sign extended from hmode to mode, but op0 is not,
	     prepare larger, hipart and lopart pseudos and handle it together
	     with small_op0_large_op1.  */
	  emit_move_insn (larger, op0);
	  emit_move_insn (hipart, hipart0);
	  emit_move_insn (lopart, lopart1);

	  emit_label (one_small_one_large);

	  /* lopart is the low part of the operand that is sign extended
	     to mode, larger is the the other operand, hipart is the
	     high part of larger and lopart0 and lopart1 are the low parts
	     of both operands.
	     We perform lopart0 * lopart1 and lopart * hipart widening
	     multiplications.  */
	  tree halfutype = build_nonstandard_integer_type (hprec, 1);
	  ops.op0 = make_tree (halfutype, lopart0);
	  ops.op1 = make_tree (halfutype, lopart1);
	  rtx lo0xlo1
	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);

	  ops.op0 = make_tree (halfutype, lopart);
	  ops.op1 = make_tree (halfutype, hipart);
	  rtx loxhi = gen_reg_rtx (mode);
	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (loxhi, tem);

	  /* if (hipart < 0) loxhi -= lopart << (bitsize / 2);  */
	  if (larger_sign == 0)
	    emit_jump (after_hipart_neg);
	  else if (larger_sign != -1)
	    emit_cmp_and_jump_insns (hipart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_hipart_neg, PROB_EVEN);

	  tem = convert_modes (mode, hmode, lopart, 1);
	  tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_hipart_neg);

	  /* if (lopart < 0) loxhi -= larger;  */
	  if (smaller_sign == 0)
	    emit_jump (after_lopart_neg);
	  else if (smaller_sign != -1)
	    emit_cmp_and_jump_insns (lopart, const0_rtx, GE, NULL_RTX, hmode,
				     false, after_lopart_neg, PROB_EVEN);

	  tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  emit_label (after_lopart_neg);

	  /* loxhi += (uns) lo0xlo1 >> (bitsize / 2);  */
	  tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
	  tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
				     1, OPTAB_DIRECT);
	  emit_move_insn (loxhi, tem);

	  /* if (loxhi >> (bitsize / 2)
		 == (hmode) loxhi >> (bitsize / 2 - 1))  */
	  rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
					  NULL_RTX, 0);
	  hipartloxhi = gen_lowpart (hmode, hipartloxhi);
	  rtx lopartloxhi = gen_lowpart (hmode, loxhi);
	  rtx signbitloxhi = expand_shift (RSHIFT_EXPR, hmode, lopartloxhi,
					   hprec - 1, NULL_RTX, 0);

	  emit_cmp_and_jump_insns (signbitloxhi, hipartloxhi, NE, NULL_RTX,
				   hmode, false, do_overflow,
				   PROB_VERY_UNLIKELY);

	  /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1;  */
	  rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
					   NULL_RTX, 1);
	  tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);

	  tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
				     1, OPTAB_DIRECT);
	  if (tem != res)
	    emit_move_insn (res, tem);
	  emit_jump (done_label);

	  emit_label (both_ops_large);

	  /* If both operands are large (not sign extended from hmode),
	     then perform the full multiplication which will be the result
	     of the operation.  The only cases which don't overflow are
	     some cases where both hipart0 and highpart1 are 0 or -1.  */
	  ops.code = MULT_EXPR;
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);

	  if (!op0_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  if (!op1_medium_p)
	    {
	      tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
					 NULL_RTX, 1, OPTAB_DIRECT);
	      emit_cmp_and_jump_insns (tem, const1_rtx, GTU, NULL_RTX, hmode,
				       true, do_error, PROB_VERY_UNLIKELY);
	    }

	  /* At this point hipart{0,1} are both in [-1, 0].  If they are the
	     same, overflow happened if res is negative, if they are different,
	     overflow happened if res is positive.  */
	  if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
	    emit_jump (hipart_different);
	  else if (op0_sign == 1 || op1_sign == 1)
	    emit_cmp_and_jump_insns (hipart0, hipart1, NE, NULL_RTX, hmode,
				     true, hipart_different, PROB_EVEN);

	  emit_cmp_and_jump_insns (res, const0_rtx, LT, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (hipart_different);

	  emit_cmp_and_jump_insns (res, const0_rtx, GE, NULL_RTX, mode, false,
				   do_error, PROB_VERY_UNLIKELY);
	  emit_jump (done_label);

	  emit_label (do_overflow);

	  /* Overflow, do full multiplication and fallthru into do_error.  */
	  ops.op0 = make_tree (TREE_TYPE (arg0), op0);
	  ops.op1 = make_tree (TREE_TYPE (arg0), op1);
	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_move_insn (res, tem);
	}
      else
	{
	  ops.code = MULT_EXPR;
	  ops.type = TREE_TYPE (arg0);
	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
	  emit_jump (done_label);
	}
    }

  emit_label (do_error);
  /* Expand the ubsan builtin call.  */
  push_temp_slots ();
  fn = ubsan_build_overflow_builtin (MULT_EXPR, gimple_location (stmt),
				     TREE_TYPE (arg0), arg0, arg1);
  expand_normal (fn);
  pop_temp_slots ();
  do_pending_stack_adjust ();

  /* We're done.  */
  emit_label (done_label);

  if (lhs)
    emit_move_insn (target, res);
}
예제 #4
0
void
omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
		      struct omp_for_data_loop *loops)
{
  tree t, var, *collapse_iter, *collapse_count;
  tree count = NULL_TREE, iter_type = long_integer_type_node;
  struct omp_for_data_loop *loop;
  int i;
  struct omp_for_data_loop dummy_loop;
  location_t loc = gimple_location (for_stmt);
  bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
  bool distribute = gimple_omp_for_kind (for_stmt)
		    == GF_OMP_FOR_KIND_DISTRIBUTE;
  bool taskloop = gimple_omp_for_kind (for_stmt)
		  == GF_OMP_FOR_KIND_TASKLOOP;
  tree iterv, countv;

  fd->for_stmt = for_stmt;
  fd->pre = NULL;
  if (gimple_omp_for_collapse (for_stmt) > 1)
    fd->loops = loops;
  else
    fd->loops = &fd->loop;

  fd->have_nowait = distribute || simd;
  fd->have_ordered = false;
  fd->collapse = 1;
  fd->ordered = 0;
  fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
  fd->sched_modifiers = 0;
  fd->chunk_size = NULL_TREE;
  fd->simd_schedule = false;
  if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
    fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
  collapse_iter = NULL;
  collapse_count = NULL;

  for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
    switch (OMP_CLAUSE_CODE (t))
      {
      case OMP_CLAUSE_NOWAIT:
	fd->have_nowait = true;
	break;
      case OMP_CLAUSE_ORDERED:
	fd->have_ordered = true;
	if (OMP_CLAUSE_ORDERED_EXPR (t))
	  fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
	break;
      case OMP_CLAUSE_SCHEDULE:
	gcc_assert (!distribute && !taskloop);
	fd->sched_kind
	  = (enum omp_clause_schedule_kind)
	    (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
	fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
			       & ~OMP_CLAUSE_SCHEDULE_MASK);
	fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
	fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
	break;
      case OMP_CLAUSE_DIST_SCHEDULE:
	gcc_assert (distribute);
	fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
	break;
      case OMP_CLAUSE_COLLAPSE:
	fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
	if (fd->collapse > 1)
	  {
	    collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
	    collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
	  }
	break;
      default:
	break;
      }
  if (fd->ordered && fd->collapse == 1 && loops != NULL)
    {
      fd->loops = loops;
      iterv = NULL_TREE;
      countv = NULL_TREE;
      collapse_iter = &iterv;
      collapse_count = &countv;
    }

  /* FIXME: for now map schedule(auto) to schedule(static).
     There should be analysis to determine whether all iterations
     are approximately the same amount of work (then schedule(static)
     is best) or if it varies (then schedule(dynamic,N) is better).  */
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
    {
      fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
      gcc_assert (fd->chunk_size == NULL);
    }
  gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
  if (taskloop)
    fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
    gcc_assert (fd->chunk_size == NULL);
  else if (fd->chunk_size == NULL)
    {
      /* We only need to compute a default chunk size for ordered
	 static loops and dynamic loops.  */
      if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
	  || fd->have_ordered)
	fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
			 ? integer_zero_node : integer_one_node;
    }

  int cnt = fd->ordered ? fd->ordered : fd->collapse;
  for (i = 0; i < cnt; i++)
    {
      if (i == 0 && fd->collapse == 1 && (fd->ordered == 0 || loops == NULL))
	loop = &fd->loop;
      else if (loops != NULL)
	loop = loops + i;
      else
	loop = &dummy_loop;

      loop->v = gimple_omp_for_index (for_stmt, i);
      gcc_assert (SSA_VAR_P (loop->v));
      gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
		  || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
      var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
      loop->n1 = gimple_omp_for_initial (for_stmt, i);

      loop->cond_code = gimple_omp_for_cond (for_stmt, i);
      loop->n2 = gimple_omp_for_final (for_stmt, i);
      gcc_assert (loop->cond_code != NE_EXPR
		  || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKSIMD
		  || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKFOR);
      omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2);

      t = gimple_omp_for_incr (for_stmt, i);
      gcc_assert (TREE_OPERAND (t, 0) == var);
      loop->step = omp_get_for_step_from_incr (loc, t);

      if (simd
	  || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
	      && !fd->have_ordered))
	{
	  if (fd->collapse == 1)
	    iter_type = TREE_TYPE (loop->v);
	  else if (i == 0
		   || TYPE_PRECISION (iter_type)
		      < TYPE_PRECISION (TREE_TYPE (loop->v)))
	    iter_type
	      = build_nonstandard_integer_type
		  (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
	}
      else if (iter_type != long_long_unsigned_type_node)
	{
	  if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
	    iter_type = long_long_unsigned_type_node;
	  else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
		   && TYPE_PRECISION (TREE_TYPE (loop->v))
		      >= TYPE_PRECISION (iter_type))
	    {
	      tree n;

	      if (loop->cond_code == LT_EXPR)
		n = fold_build2_loc (loc,
				 PLUS_EXPR, TREE_TYPE (loop->v),
				 loop->n2, loop->step);
	      else
		n = loop->n1;
	      if (TREE_CODE (n) != INTEGER_CST
		  || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
		iter_type = long_long_unsigned_type_node;
	    }
	  else if (TYPE_PRECISION (TREE_TYPE (loop->v))
		   > TYPE_PRECISION (iter_type))
	    {
	      tree n1, n2;

	      if (loop->cond_code == LT_EXPR)
		{
		  n1 = loop->n1;
		  n2 = fold_build2_loc (loc,
				    PLUS_EXPR, TREE_TYPE (loop->v),
				    loop->n2, loop->step);
		}
	      else
		{
		  n1 = fold_build2_loc (loc,
				    MINUS_EXPR, TREE_TYPE (loop->v),
				    loop->n2, loop->step);
		  n2 = loop->n1;
		}
	      if (TREE_CODE (n1) != INTEGER_CST
		  || TREE_CODE (n2) != INTEGER_CST
		  || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
		  || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
		iter_type = long_long_unsigned_type_node;
	    }
	}

      if (i >= fd->collapse)
	continue;

      if (collapse_count && *collapse_count == NULL)
	{
	  t = fold_binary (loop->cond_code, boolean_type_node,
			   fold_convert (TREE_TYPE (loop->v), loop->n1),
			   fold_convert (TREE_TYPE (loop->v), loop->n2));
	  if (t && integer_zerop (t))
	    count = build_zero_cst (long_long_unsigned_type_node);
	  else if ((i == 0 || count != NULL_TREE)
		   && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
		   && TREE_CONSTANT (loop->n1)
		   && TREE_CONSTANT (loop->n2)
		   && TREE_CODE (loop->step) == INTEGER_CST)
	    {
	      tree itype = TREE_TYPE (loop->v);

	      if (POINTER_TYPE_P (itype))
		itype = signed_type_for (itype);
	      t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
	      t = fold_build2_loc (loc,
			       PLUS_EXPR, itype,
			       fold_convert_loc (loc, itype, loop->step), t);
	      t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
			       fold_convert_loc (loc, itype, loop->n2));
	      t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
			       fold_convert_loc (loc, itype, loop->n1));
	      if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
		t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
				 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
				 fold_build1_loc (loc, NEGATE_EXPR, itype,
					      fold_convert_loc (loc, itype,
								loop->step)));
	      else
		t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
				 fold_convert_loc (loc, itype, loop->step));
	      t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
	      if (count != NULL_TREE)
		count = fold_build2_loc (loc,
				     MULT_EXPR, long_long_unsigned_type_node,
				     count, t);
	      else
		count = t;
	      if (TREE_CODE (count) != INTEGER_CST)
		count = NULL_TREE;
	    }
	  else if (count && !integer_zerop (count))
	    count = NULL_TREE;
	}
    }

  if (count
      && !simd
      && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
	  || fd->have_ordered))
    {
      if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
	iter_type = long_long_unsigned_type_node;
      else
	iter_type = long_integer_type_node;
    }
  else if (collapse_iter && *collapse_iter != NULL)
    iter_type = TREE_TYPE (*collapse_iter);
  fd->iter_type = iter_type;
  if (collapse_iter && *collapse_iter == NULL)
    *collapse_iter = create_tmp_var (iter_type, ".iter");
  if (collapse_count && *collapse_count == NULL)
    {
      if (count)
	*collapse_count = fold_convert_loc (loc, iter_type, count);
      else
	*collapse_count = create_tmp_var (iter_type, ".count");
    }

  if (fd->collapse > 1 || (fd->ordered && loops))
    {
      fd->loop.v = *collapse_iter;
      fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
      fd->loop.n2 = *collapse_count;
      fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
      fd->loop.cond_code = LT_EXPR;
    }
  else if (loops)
    loops[0] = fd->loop;
}
예제 #5
0
파일: ubsan.c 프로젝트: acoxepochlabs/gcc
static void
instrument_bool_enum_load (gimple_stmt_iterator *gsi)
{
  gimple stmt = gsi_stmt (*gsi);
  tree rhs = gimple_assign_rhs1 (stmt);
  tree type = TREE_TYPE (rhs);
  tree minv = NULL_TREE, maxv = NULL_TREE;

  if (TREE_CODE (type) == BOOLEAN_TYPE && (flag_sanitize & SANITIZE_BOOL))
    {
      minv = boolean_false_node;
      maxv = boolean_true_node;
    }
  else if (TREE_CODE (type) == ENUMERAL_TYPE
	   && (flag_sanitize & SANITIZE_ENUM)
	   && TREE_TYPE (type) != NULL_TREE
	   && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
	   && (TYPE_PRECISION (TREE_TYPE (type))
	       < GET_MODE_PRECISION (TYPE_MODE (type))))
    {
      minv = TYPE_MIN_VALUE (TREE_TYPE (type));
      maxv = TYPE_MAX_VALUE (TREE_TYPE (type));
    }
  else
    return;

  int modebitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
  HOST_WIDE_INT bitsize, bitpos;
  tree offset;
  enum machine_mode mode;
  int volatilep = 0, unsignedp = 0;
  tree base = get_inner_reference (rhs, &bitsize, &bitpos, &offset, &mode,
				   &unsignedp, &volatilep, false);
  tree utype = build_nonstandard_integer_type (modebitsize, 1);

  if ((TREE_CODE (base) == VAR_DECL && DECL_HARD_REGISTER (base))
      || (bitpos % modebitsize) != 0
      || bitsize != modebitsize
      || GET_MODE_BITSIZE (TYPE_MODE (utype)) != modebitsize
      || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
    return;

  location_t loc = gimple_location (stmt);
  tree ptype = build_pointer_type (TREE_TYPE (rhs));
  tree atype = reference_alias_ptr_type (rhs);
  gimple g = gimple_build_assign (make_ssa_name (ptype, NULL),
				  build_fold_addr_expr (rhs));
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  tree mem = build2 (MEM_REF, utype, gimple_assign_lhs (g),
		     build_int_cst (atype, 0));
  tree urhs = make_ssa_name (utype, NULL);
  g = gimple_build_assign (urhs, mem);
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  minv = fold_convert (utype, minv);
  maxv = fold_convert (utype, maxv);
  if (!integer_zerop (minv))
    {
      g = gimple_build_assign_with_ops (MINUS_EXPR,
					make_ssa_name (utype, NULL),
					urhs, minv);
      gimple_set_location (g, loc);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
    }

  gimple_stmt_iterator gsi2 = *gsi;
  basic_block then_bb, fallthru_bb;
  *gsi = create_cond_insert_point (gsi, true, false, true,
				   &then_bb, &fallthru_bb);
  g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (g),
			 int_const_binop (MINUS_EXPR, maxv, minv),
			 NULL_TREE, NULL_TREE);
  gimple_set_location (g, loc);
  gsi_insert_after (gsi, g, GSI_NEW_STMT);

  gimple_assign_set_rhs_with_ops (&gsi2, NOP_EXPR, urhs, NULL_TREE);
  update_stmt (stmt);

  gsi2 = gsi_after_labels (then_bb);
  if (flag_sanitize_undefined_trap_on_error)
    g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
  else
    {
      tree data = ubsan_create_data ("__ubsan_invalid_value_data", &loc, NULL,
				     ubsan_type_descriptor (type), NULL_TREE);
      data = build_fold_addr_expr_loc (loc, data);
      enum built_in_function bcode
	= flag_sanitize_recover
	  ? BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE
	  : BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT;
      tree fn = builtin_decl_explicit (bcode);

      tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs),
					   true, NULL_TREE, true,
					   GSI_SAME_STMT);
      g = gimple_build_call (fn, 2, data, val);
    }
  gimple_set_location (g, loc);
  gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
}