Example #1
0
static tree
chrec_convert_1 (tree type, tree chrec, gimple *at_stmt,
		 bool use_overflow_semantics)
{
  tree ct, res;
  tree base, step;
  struct loop *loop;

  if (automatically_generated_chrec_p (chrec))
    return chrec;

  ct = chrec_type (chrec);
  if (useless_type_conversion_p (type, ct))
    return chrec;

  if (!evolution_function_is_affine_p (chrec))
    goto keep_cast;

  loop = get_chrec_loop (chrec);
  base = CHREC_LEFT (chrec);
  step = CHREC_RIGHT (chrec);

  if (convert_affine_scev (loop, type, &base, &step, at_stmt,
			   use_overflow_semantics))
    return build_polynomial_chrec (loop->num, base, step);

  /* If we cannot propagate the cast inside the chrec, just keep the cast.  */
keep_cast:
  /* Fold will not canonicalize (long)(i - 1) to (long)i - 1 because that
     may be more expensive.  We do want to perform this optimization here
     though for canonicalization reasons.  */
  if (use_overflow_semantics
      && (TREE_CODE (chrec) == PLUS_EXPR
	  || TREE_CODE (chrec) == MINUS_EXPR)
      && TREE_CODE (type) == INTEGER_TYPE
      && TREE_CODE (ct) == INTEGER_TYPE
      && TYPE_PRECISION (type) > TYPE_PRECISION (ct)
      && TYPE_OVERFLOW_UNDEFINED (ct))
    res = fold_build2 (TREE_CODE (chrec), type,
		       fold_convert (type, TREE_OPERAND (chrec, 0)),
		       fold_convert (type, TREE_OPERAND (chrec, 1)));
  /* Similar perform the trick that (signed char)((int)x + 2) can be
     narrowed to (signed char)((unsigned char)x + 2).  */
  else if (use_overflow_semantics
	   && TREE_CODE (chrec) == POLYNOMIAL_CHREC
	   && TREE_CODE (ct) == INTEGER_TYPE
	   && TREE_CODE (type) == INTEGER_TYPE
	   && TYPE_OVERFLOW_UNDEFINED (type)
	   && TYPE_PRECISION (type) < TYPE_PRECISION (ct))
    {
      tree utype = unsigned_type_for (type);
      res = build_polynomial_chrec (CHREC_VARIABLE (chrec),
				    fold_convert (utype,
						  CHREC_LEFT (chrec)),
				    fold_convert (utype,
						  CHREC_RIGHT (chrec)));
      res = chrec_convert_1 (type, res, at_stmt, use_overflow_semantics);
    }
  else
    res = fold_convert (type, chrec);

  /* Don't propagate overflows.  */
  if (CONSTANT_CLASS_P (res))
    TREE_OVERFLOW (res) = 0;

  /* But reject constants that don't fit in their type after conversion.
     This can happen if TYPE_MIN_VALUE or TYPE_MAX_VALUE are not the
     natural values associated with TYPE_PRECISION and TYPE_UNSIGNED,
     and can cause problems later when computing niters of loops.  Note
     that we don't do the check before converting because we don't want
     to reject conversions of negative chrecs to unsigned types.  */
  if (TREE_CODE (res) == INTEGER_CST
      && TREE_CODE (type) == INTEGER_TYPE
      && !int_fits_type_p (res, type))
    res = chrec_dont_know;

  return res;
}
Example #2
0
static void
emit_case_bit_tests (gimple swtch, tree index_expr,
		     tree minval, tree range)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gimple shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].hi = 0;
	  test[k].lo = 0;
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
        if (j >= HOST_BITS_PER_WIDE_INT)
	  test[k].hi |= (HOST_WIDE_INT) 1 << (j - HOST_BITS_PER_INT);
	else
	  test[k].lo |= (HOST_WIDE_INT) 1 << j;
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
Example #3
0
static void
emit_case_bit_tests (gswitch *swtch, tree index_expr,
		     tree minval, tree range, tree maxval)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gassign *shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);
  int prec = TYPE_PRECISION (word_type_node);
  wide_int wone = wi::one (prec);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].mask = wi::zero (prec);
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
	test[k].mask |= wi::lshift (wone, j);
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
     the minval subtractions, but it might make the mask constants more
     expensive.  So, compare the costs.  */
  if (compare_tree_int (minval, 0) > 0
      && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
    {
      int cost_diff;
      HOST_WIDE_INT m = tree_to_uhwi (minval);
      rtx reg = gen_raw_REG (word_mode, 10000);
      bool speed_p = optimize_bb_for_speed_p (gimple_bb (swtch));
      cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
					      GEN_INT (-m)), speed_p);
      for (i = 0; i < count; i++)
	{
	  rtx r = immed_wide_int_const (test[i].mask, word_mode);
	  cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	  r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
	  cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	}
      if (cost_diff > 0)
	{
	  for (i = 0; i < count; i++)
	    test[i].mask = wi::lshift (test[i].mask, m);
	  minval = build_zero_cst (TREE_TYPE (minval));
	  range = maxval;
	}
    }

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
static bool
ifcombine_iforif (basic_block inner_cond_bb, basic_block outer_cond_bb)
{
  gimple inner_cond, outer_cond;
  tree name1, name2, bits1, bits2;

  inner_cond = last_stmt (inner_cond_bb);
  if (!inner_cond
      || gimple_code (inner_cond) != GIMPLE_COND)
    return false;

  outer_cond = last_stmt (outer_cond_bb);
  if (!outer_cond
      || gimple_code (outer_cond) != GIMPLE_COND)
    return false;

  /* See if we have two bit tests of the same name in both tests.
     In that case remove the outer test and change the inner one to
     test for name & (bits1 | bits2) != 0.  */
  if (recognize_bits_test (inner_cond, &name1, &bits1)
      && recognize_bits_test (outer_cond, &name2, &bits2))
    {
      gimple_stmt_iterator gsi;
      tree t;

      /* Find the common name which is bit-tested.  */
      if (name1 == name2)
	;
      else if (bits1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else if (name1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	}
      else if (bits1 == name2)
	{
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else
	return false;

      /* As we strip non-widening conversions in finding a common
         name that is tested make sure to end up with an integral
	 type for building the bit operations.  */
      if (TYPE_PRECISION (TREE_TYPE (bits1))
	  >= TYPE_PRECISION (TREE_TYPE (bits2)))
	{
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  name1 = fold_convert (TREE_TYPE (bits1), name1);
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  bits2 = fold_convert (TREE_TYPE (bits1), bits2);
	}
      else
	{
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  name1 = fold_convert (TREE_TYPE (bits2), name1);
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  bits1 = fold_convert (TREE_TYPE (bits2), bits1);
	}

      /* Do it.  */
      gsi = gsi_for_stmt (inner_cond);
      t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (NE_EXPR, boolean_type_node, t,
		       build_int_cst (TREE_TYPE (t), 0));
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing bits or bits test to ");
	  print_generic_expr (dump_file, name1, 0);
	  fprintf (dump_file, " & T != 0\nwith temporary T = ");
	  print_generic_expr (dump_file, bits1, 0);
	  fprintf (dump_file, " | ");
	  print_generic_expr (dump_file, bits2, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  /* See if we have two comparisons that we can merge into one.
     This happens for C++ operator overloading where for example
     GE_EXPR is implemented as GT_EXPR || EQ_EXPR.  */
  else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
	   && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison
	   && operand_equal_p (gimple_cond_lhs (inner_cond),
			       gimple_cond_lhs (outer_cond), 0)
	   && operand_equal_p (gimple_cond_rhs (inner_cond),
			       gimple_cond_rhs (outer_cond), 0))
    {
      enum tree_code code1 = gimple_cond_code (inner_cond);
      enum tree_code code2 = gimple_cond_code (outer_cond);
      enum tree_code code;
      tree t;

#define CHK(a,b) ((code1 == a ## _EXPR && code2 == b ## _EXPR) \
		  || (code2 == a ## _EXPR && code1 == b ## _EXPR))
      /* Merge the two condition codes if possible.  */
      if (code1 == code2)
	code = code1;
      else if (CHK (EQ, LT))
	code = LE_EXPR;
      else if (CHK (EQ, GT))
	code = GE_EXPR;
      else if (CHK (LT, LE))
	code = LE_EXPR;
      else if (CHK (GT, GE))
	code = GE_EXPR;
      else if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (inner_cond)))
	       || flag_unsafe_math_optimizations)
	{
	  if (CHK (LT, GT))
	    code = NE_EXPR;
	  else if (CHK (LT, NE))
	    code = NE_EXPR;
	  else if (CHK (GT, NE))
	    code = NE_EXPR;
	  else
	    return false;
	}
      /* We could check for combinations leading to trivial true/false.  */
      else
	return false;
#undef CHK

      /* Do it.  */
      t = fold_build2 (code, boolean_type_node, gimple_cond_lhs (outer_cond),
		       gimple_cond_rhs (outer_cond));
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing two comparisons to ");
	  print_generic_expr (dump_file, t, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  return false;
}
Example #5
0
tree
ubsan_instrument_shift (location_t loc, enum tree_code code,
			tree op0, tree op1)
{
  tree t, tt = NULL_TREE;
  tree type0 = TREE_TYPE (op0);
  tree type1 = TREE_TYPE (op1);
  if (!INTEGRAL_TYPE_P (type0))
    return NULL_TREE;

  tree op1_utype = unsigned_type_for (type1);
  HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0);
  tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1);

  op0 = unshare_expr (op0);
  op1 = unshare_expr (op1);

  t = fold_convert_loc (loc, op1_utype, op1);
  t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1);

  /* If this is not a signed operation, don't perform overflow checks.
     Also punt on bit-fields.  */
  if (TYPE_OVERFLOW_WRAPS (type0)
      || GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0)
      || (flag_sanitize & SANITIZE_SHIFT_BASE) == 0)
    ;

  /* For signed x << y, in C99/C11, the following:
     (unsigned) x >> (uprecm1 - y)
     if non-zero, is undefined.  */
  else if (code == LSHIFT_EXPR && flag_isoc99 && cxx_dialect < cxx11)
    {
      tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1,
			    fold_convert (op1_utype, unshare_expr (op1)));
      tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
      tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
      tt = fold_build2 (NE_EXPR, boolean_type_node, tt,
			build_int_cst (TREE_TYPE (tt), 0));
    }

  /* For signed x << y, in C++11 and later, the following:
     x < 0 || ((unsigned) x >> (uprecm1 - y))
     if > 1, is undefined.  */
  else if (code == LSHIFT_EXPR && cxx_dialect >= cxx11)
    {
      tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1,
			    fold_convert (op1_utype, unshare_expr (op1)));
      tt = fold_convert_loc (loc, unsigned_type_for (type0),
			     unshare_expr (op0));
      tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
      tt = fold_build2 (GT_EXPR, boolean_type_node, tt,
			build_int_cst (TREE_TYPE (tt), 1));
      x = fold_build2 (LT_EXPR, boolean_type_node, unshare_expr (op0),
		       build_int_cst (type0, 0));
      tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt);
    }

  /* If the condition was folded to 0, no need to instrument
     this expression.  */
  if (integer_zerop (t) && (tt == NULL_TREE || integer_zerop (tt)))
    return NULL_TREE;

  /* In case we have a SAVE_EXPR in a conditional context, we need to
     make sure it gets evaluated before the condition.  */
  t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), unshare_expr (op0), t);

  enum sanitize_code recover_kind = SANITIZE_SHIFT_EXPONENT;
  tree else_t = void_node;
  if (tt)
    {
      if ((flag_sanitize & SANITIZE_SHIFT_EXPONENT) == 0)
	{
	  t = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, t);
	  t = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, t, tt);
	  recover_kind = SANITIZE_SHIFT_BASE;
	}
      else
	{
	  if (flag_sanitize_undefined_trap_on_error
	      || ((!(flag_sanitize_recover & SANITIZE_SHIFT_EXPONENT))
		  == (!(flag_sanitize_recover & SANITIZE_SHIFT_BASE))))
	    t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t, tt);
	  else
	    else_t = tt;
	}
    }

  if (flag_sanitize_undefined_trap_on_error)
    tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
  else
    {
      tree data = ubsan_create_data ("__ubsan_shift_data", 1, &loc,
				     ubsan_type_descriptor (type0),
				     ubsan_type_descriptor (type1), NULL_TREE,
				     NULL_TREE);
      data = build_fold_addr_expr_loc (loc, data);

      enum built_in_function bcode
	= (flag_sanitize_recover & recover_kind)
	  ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS
	  : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT;
      tt = builtin_decl_explicit (bcode);
      op0 = unshare_expr (op0);
      op1 = unshare_expr (op1);
      tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
				ubsan_encode_value (op1));
      if (else_t != void_node)
	{
	  bcode = (flag_sanitize_recover & SANITIZE_SHIFT_BASE)
		  ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS
		  : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT;
	  tree else_tt = builtin_decl_explicit (bcode);
	  op0 = unshare_expr (op0);
	  op1 = unshare_expr (op1);
	  else_tt = build_call_expr_loc (loc, else_tt, 3, data,
					 ubsan_encode_value (op0),
					 ubsan_encode_value (op1));
	  else_t = fold_build3 (COND_EXPR, void_type_node, else_t,
				else_tt, void_node);
	}
    }
  t = fold_build3 (COND_EXPR, void_type_node, t, tt, else_t);

  return t;
}
Example #6
0
static bool
ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
		   basic_block outer_cond_bb, bool outer_inv, bool result_inv)
{
  gimple_stmt_iterator gsi;
  gimple inner_stmt, outer_stmt;
  gcond *inner_cond, *outer_cond;
  tree name1, name2, bit1, bit2, bits1, bits2;

  inner_stmt = last_stmt (inner_cond_bb);
  if (!inner_stmt
      || gimple_code (inner_stmt) != GIMPLE_COND)
    return false;
  inner_cond = as_a <gcond *> (inner_stmt);

  outer_stmt = last_stmt (outer_cond_bb);
  if (!outer_stmt
      || gimple_code (outer_stmt) != GIMPLE_COND)
    return false;
  outer_cond = as_a <gcond *> (outer_stmt);

  /* See if we test a single bit of the same name in both tests.  In
     that case remove the outer test, merging both else edges,
     and change the inner one to test for
     name & (bit1 | bit2) == (bit1 | bit2).  */
  if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv)
      && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv)
      && name1 == name2)
    {
      tree t, t2;

      /* Do it.  */
      gsi = gsi_for_stmt (inner_cond);
      t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
		       build_int_cst (TREE_TYPE (name1), 1), bit1);
      t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
		        build_int_cst (TREE_TYPE (name1), 1), bit2);
      t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
      t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
				     true, GSI_SAME_STMT);
      t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR,
		       boolean_type_node, t2, t);
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond,
	outer_inv ? boolean_false_node : boolean_true_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing double bit test to ");
	  print_generic_expr (dump_file, name1, 0);
	  fprintf (dump_file, " & T == T\nwith temporary T = (1 << ");
	  print_generic_expr (dump_file, bit1, 0);
	  fprintf (dump_file, ") | (1 << ");
	  print_generic_expr (dump_file, bit2, 0);
	  fprintf (dump_file, ")\n");
	}

      return true;
    }

  /* See if we have two bit tests of the same name in both tests.
     In that case remove the outer test and change the inner one to
     test for name & (bits1 | bits2) != 0.  */
  else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv)
      && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv))
    {
      gimple_stmt_iterator gsi;
      tree t;

      /* Find the common name which is bit-tested.  */
      if (name1 == name2)
	;
      else if (bits1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else if (name1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	}
      else if (bits1 == name2)
	{
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else
	return false;

      /* As we strip non-widening conversions in finding a common
         name that is tested make sure to end up with an integral
	 type for building the bit operations.  */
      if (TYPE_PRECISION (TREE_TYPE (bits1))
	  >= TYPE_PRECISION (TREE_TYPE (bits2)))
	{
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  name1 = fold_convert (TREE_TYPE (bits1), name1);
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  bits2 = fold_convert (TREE_TYPE (bits1), bits2);
	}
      else
	{
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  name1 = fold_convert (TREE_TYPE (bits2), name1);
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  bits1 = fold_convert (TREE_TYPE (bits2), bits1);
	}

      /* Do it.  */
      gsi = gsi_for_stmt (inner_cond);
      t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t,
		       build_int_cst (TREE_TYPE (t), 0));
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond,
	outer_inv ? boolean_false_node : boolean_true_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing bits or bits test to ");
	  print_generic_expr (dump_file, name1, 0);
	  fprintf (dump_file, " & T != 0\nwith temporary T = ");
	  print_generic_expr (dump_file, bits1, 0);
	  fprintf (dump_file, " | ");
	  print_generic_expr (dump_file, bits2, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  /* See if we have two comparisons that we can merge into one.  */
  else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
	   && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison)
    {
      tree t;
      enum tree_code inner_cond_code = gimple_cond_code (inner_cond);
      enum tree_code outer_cond_code = gimple_cond_code (outer_cond);

      /* Invert comparisons if necessary (and possible).  */
      if (inner_inv)
	inner_cond_code = invert_tree_comparison (inner_cond_code,
	  HONOR_NANS (gimple_cond_lhs (inner_cond)));
      if (inner_cond_code == ERROR_MARK)
	return false;
      if (outer_inv)
	outer_cond_code = invert_tree_comparison (outer_cond_code,
	  HONOR_NANS (gimple_cond_lhs (outer_cond)));
      if (outer_cond_code == ERROR_MARK)
	return false;
      /* Don't return false so fast, try maybe_fold_or_comparisons?  */

      if (!(t = maybe_fold_and_comparisons (inner_cond_code,
					    gimple_cond_lhs (inner_cond),
					    gimple_cond_rhs (inner_cond),
					    outer_cond_code,
					    gimple_cond_lhs (outer_cond),
					    gimple_cond_rhs (outer_cond))))
	{
	  tree t1, t2;
	  gimple_stmt_iterator gsi;
	  if (!LOGICAL_OP_NON_SHORT_CIRCUIT)
	    return false;
	  /* Only do this optimization if the inner bb contains only the conditional. */
	  if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb)))
	    return false;
	  t1 = fold_build2_loc (gimple_location (inner_cond),
				inner_cond_code,
				boolean_type_node,
				gimple_cond_lhs (inner_cond),
				gimple_cond_rhs (inner_cond));
	  t2 = fold_build2_loc (gimple_location (outer_cond),
				outer_cond_code,
				boolean_type_node,
				gimple_cond_lhs (outer_cond),
				gimple_cond_rhs (outer_cond));
	  t = fold_build2_loc (gimple_location (inner_cond), 
			       TRUTH_AND_EXPR, boolean_type_node, t1, t2);
	  if (result_inv)
	    {
	      t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
	      result_inv = false;
	    }
	  gsi = gsi_for_stmt (inner_cond);
	  t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true,
					  GSI_SAME_STMT);
        }
      if (result_inv)
	t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond,
	outer_inv ? boolean_false_node : boolean_true_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing two comparisons to ");
	  print_generic_expr (dump_file, t, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  return false;
}
Example #7
0
File: c-ubsan.c Project: hnaik/gcc
tree
ubsan_instrument_shift (location_t loc, enum tree_code code,
                        tree op0, tree op1)
{
    tree t, tt = NULL_TREE;
    tree type0 = TREE_TYPE (op0);
    tree type1 = TREE_TYPE (op1);
    tree op1_utype = unsigned_type_for (type1);
    HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0);
    tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1);
    tree precm1 = build_int_cst (type1, op0_prec - 1);

    t = fold_convert_loc (loc, op1_utype, op1);
    t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1);

    /* For signed x << y, in C99/C11, the following:
       (unsigned) x >> (precm1 - y)
       if non-zero, is undefined.  */
    if (code == LSHIFT_EXPR
            && !TYPE_UNSIGNED (type0)
            && flag_isoc99)
    {
        tree x = fold_build2 (MINUS_EXPR, integer_type_node, precm1, op1);
        tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
        tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
        tt = fold_build2 (NE_EXPR, boolean_type_node, tt,
                          build_int_cst (TREE_TYPE (tt), 0));
    }

    /* For signed x << y, in C++11/C++14, the following:
       x < 0 || ((unsigned) x >> (precm1 - y))
       if > 1, is undefined.  */
    if (code == LSHIFT_EXPR
            && !TYPE_UNSIGNED (TREE_TYPE (op0))
            && (cxx_dialect == cxx11 || cxx_dialect == cxx1y))
    {
        tree x = fold_build2 (MINUS_EXPR, integer_type_node, precm1, op1);
        tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
        tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
        tt = fold_build2 (GT_EXPR, boolean_type_node, tt,
                          build_int_cst (TREE_TYPE (tt), 1));
        x = fold_build2 (LT_EXPR, boolean_type_node, op0,
                         build_int_cst (type0, 0));
        tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt);
    }

    /* In case we have a SAVE_EXPR in a conditional context, we need to
       make sure it gets evaluated before the condition.  */
    t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t);
    tree data = ubsan_create_data ("__ubsan_shift_data",
                                   loc, ubsan_type_descriptor (type0),
                                   ubsan_type_descriptor (type1), NULL_TREE);

    data = build_fold_addr_expr_loc (loc, data);

    t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t,
                     tt ? tt : integer_zero_node);
    tt = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS);
    tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
                              ubsan_encode_value (op1));
    t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_zero_node);

    return t;
}
Example #8
0
tree
ubsan_instrument_shift (location_t loc, enum tree_code code,
			tree op0, tree op1)
{
  tree t, tt = NULL_TREE;
  tree type0 = TREE_TYPE (op0);
  tree type1 = TREE_TYPE (op1);
  tree op1_utype = unsigned_type_for (type1);
  HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0);
  tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1);

  t = fold_convert_loc (loc, op1_utype, op1);
  t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1);

  /* For signed x << y, in C99/C11, the following:
     (unsigned) x >> (uprecm1 - y)
     if non-zero, is undefined.  */
  if (code == LSHIFT_EXPR
      && !TYPE_UNSIGNED (type0)
      && flag_isoc99)
    {
      tree x = fold_build2 (MINUS_EXPR, unsigned_type_node, uprecm1,
			    fold_convert (op1_utype, op1));
      tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
      tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
      tt = fold_build2 (NE_EXPR, boolean_type_node, tt,
			build_int_cst (TREE_TYPE (tt), 0));
    }

  /* For signed x << y, in C++11 and later, the following:
     x < 0 || ((unsigned) x >> (uprecm1 - y))
     if > 1, is undefined.  */
  if (code == LSHIFT_EXPR
      && !TYPE_UNSIGNED (TREE_TYPE (op0))
      && (cxx_dialect >= cxx11))
    {
      tree x = fold_build2 (MINUS_EXPR, unsigned_type_node, uprecm1,
			    fold_convert (op1_utype, op1));
      tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
      tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
      tt = fold_build2 (GT_EXPR, boolean_type_node, tt,
			build_int_cst (TREE_TYPE (tt), 1));
      x = fold_build2 (LT_EXPR, boolean_type_node, op0,
		       build_int_cst (type0, 0));
      tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt);
    }

  /* If the condition was folded to 0, no need to instrument
     this expression.  */
  if (integer_zerop (t) && (tt == NULL_TREE || integer_zerop (tt)))
    return NULL_TREE;

  /* In case we have a SAVE_EXPR in a conditional context, we need to
     make sure it gets evaluated before the condition.  If the OP0 is
     an instrumented array reference, mark it as having side effects so
     it's not folded away.  */
  if (flag_sanitize & SANITIZE_BOUNDS)
    {
      tree xop0 = op0;
      while (CONVERT_EXPR_P (xop0))
	xop0 = TREE_OPERAND (xop0, 0);
      if (TREE_CODE (xop0) == ARRAY_REF)
	{
	  TREE_SIDE_EFFECTS (xop0) = 1;
	  TREE_SIDE_EFFECTS (op0) = 1;
	}
    }
  t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t);
  t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t,
		   tt ? tt : integer_zero_node);

  if (flag_sanitize_undefined_trap_on_error)
    tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
  else
    {
      tree data = ubsan_create_data ("__ubsan_shift_data", 1, &loc,
				     ubsan_type_descriptor (type0),
				     ubsan_type_descriptor (type1), NULL_TREE,
				     NULL_TREE);
      data = build_fold_addr_expr_loc (loc, data);

      enum built_in_function bcode
	= (flag_sanitize_recover & SANITIZE_SHIFT)
	  ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS
	  : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT;
      tt = builtin_decl_explicit (bcode);
      tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
				ubsan_encode_value (op1));
    }
  t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_node);

  return t;
}