Пример #1
0
static void
addr_to_parts (tree type, aff_tree *addr, tree iv_cand,
	       tree base_hint, struct mem_address *parts,
               bool speed)
{
  tree part;
  unsigned i;

  parts->symbol = NULL_TREE;
  parts->base = NULL_TREE;
  parts->index = NULL_TREE;
  parts->step = NULL_TREE;

  if (addr->offset != 0)
    parts->offset = wide_int_to_tree (sizetype, addr->offset);
  else
    parts->offset = NULL_TREE;

  /* Try to find a symbol.  */
  move_fixed_address_to_symbol (parts, addr);

  /* No need to do address parts reassociation if the number of parts
     is <= 2 -- in that case, no loop invariant code motion can be
     exposed.  */

  if (!base_hint && (addr->n > 2))
    move_variant_to_index (parts, addr, iv_cand);

  /* First move the most expensive feasible multiplication
     to index.  */
  if (!parts->index)
    most_expensive_mult_to_index (type, parts, addr, speed);

  /* Try to find a base of the reference.  Since at the moment
     there is no reliable way how to distinguish between pointer and its
     offset, this is just a guess.  */
  if (!parts->symbol && base_hint)
    move_hint_to_base (type, parts, base_hint, addr);
  if (!parts->symbol && !parts->base)
    move_pointer_to_base (parts, addr);

  /* Then try to process the remaining elements.  */
  for (i = 0; i < addr->n; i++)
    {
      part = fold_convert (sizetype, addr->elts[i].val);
      if (addr->elts[i].coef != 1)
	part = fold_build2 (MULT_EXPR, sizetype, part,
			    wide_int_to_tree (sizetype, addr->elts[i].coef));
      add_to_parts (parts, part);
    }
  if (addr->rest)
    add_to_parts (parts, fold_convert (sizetype, addr->rest));
}
Пример #2
0
void
aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
{
  unsigned i;
  tree type;

  widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
  if (scale == 0)
    return;

  for (i = 0; i < comb->n; i++)
    if (operand_equal_p (comb->elts[i].val, elt, 0))
      {
	widest_int new_coef
	  = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb->type);
	if (new_coef != 0)
	  {
	    comb->elts[i].coef = new_coef;
	    return;
	  }

	comb->n--;
	comb->elts[i] = comb->elts[comb->n];

	if (comb->rest)
	  {
	    gcc_assert (comb->n == MAX_AFF_ELTS - 1);
	    comb->elts[comb->n].coef = 1;
	    comb->elts[comb->n].val = comb->rest;
	    comb->rest = NULL_TREE;
	    comb->n++;
	  }
	return;
      }
  if (comb->n < MAX_AFF_ELTS)
    {
      comb->elts[comb->n].coef = scale;
      comb->elts[comb->n].val = elt;
      comb->n++;
      return;
    }

  type = comb->type;
  if (POINTER_TYPE_P (type))
    type = sizetype;

  if (scale == 1)
    elt = fold_convert (type, elt);
  else
    elt = fold_build2 (MULT_EXPR, type,
		       fold_convert (type, elt),
		       wide_int_to_tree (type, scale));

  if (comb->rest)
    comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
			      elt);
  else
    comb->rest = elt;
}
Пример #3
0
static tree
gmp_cst_to_tree (tree type, mpz_t val)
{
  tree t = type ? type : integer_type_node;
  mpz_t tmp;

  mpz_init (tmp);
  mpz_set (tmp, val);
  wide_int wi = wi::from_mpz (t, tmp, true);
  mpz_clear (tmp);

  return wide_int_to_tree (t, wi);
}
Пример #4
0
static tree
add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in)
{
  enum tree_code code;

  widest_int scale = wide_int_ext_for_comb (scale_in, type);

  elt = fold_convert (type, elt);
  if (scale == 1)
    {
      if (!expr)
	return elt;

      return fold_build2 (PLUS_EXPR, type, expr, elt);
    }

  if (scale == -1)
    {
      if (!expr)
	return fold_build1 (NEGATE_EXPR, type, elt);

      return fold_build2 (MINUS_EXPR, type, expr, elt);
    }

  if (!expr)
    return fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));

  if (wi::neg_p (scale))
    {
      code = MINUS_EXPR;
      scale = -scale;
    }
  else
    code = PLUS_EXPR;

  elt = fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
  return fold_build2 (code, type, expr, elt);
}
Пример #5
0
static void
aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
			     aff_tree *r)
{
  unsigned i;
  tree aval, type;

  for (i = 0; i < c->n; i++)
    {
      aval = c->elts[i].val;
      if (val)
	{
	  type = TREE_TYPE (aval);
	  aval = fold_build2 (MULT_EXPR, type, aval,
			      fold_convert (type, val));
	}

      aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
    }

  if (c->rest)
    {
      aval = c->rest;
      if (val)
	{
	  type = TREE_TYPE (aval);
	  aval = fold_build2 (MULT_EXPR, type, aval,
			      fold_convert (type, val));
	}

      aff_combination_add_elt (r, aval, coef);
    }

  if (val)
    {
      if (c->offset.is_constant ())
	/* Access coeffs[0] directly, for efficiency.  */
	aff_combination_add_elt (r, val, coef * c->offset.coeffs[0]);
      else
	{
	  /* c->offset is polynomial, so multiply VAL rather than COEF
	     by it.  */
	  tree offset = wide_int_to_tree (TREE_TYPE (val), c->offset);
	  val = fold_build2 (MULT_EXPR, TREE_TYPE (val), val, offset);
	  aff_combination_add_elt (r, val, coef);
	}
    }
  else
    aff_combination_add_cst (r, coef * c->offset);
}
Пример #6
0
tree
aff_combination_to_tree (aff_tree *comb)
{
  tree type = comb->type, base = NULL_TREE, expr = NULL_TREE;
  unsigned i;
  poly_widest_int off;
  int sgn;

  gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);

  i = 0;
  if (POINTER_TYPE_P (type))
    {
      type = sizetype;
      if (comb->n > 0 && comb->elts[0].coef == 1
	  && POINTER_TYPE_P (TREE_TYPE (comb->elts[0].val)))
	{
	  base = comb->elts[0].val;
	  ++i;
	}
    }

  for (; i < comb->n; i++)
    expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef);

  if (comb->rest)
    expr = add_elt_to_tree (expr, type, comb->rest, 1);

  /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
     unsigned.  */
  if (known_lt (comb->offset, 0))
    {
      off = -comb->offset;
      sgn = -1;
    }
  else
    {
      off = comb->offset;
      sgn = 1;
    }
  expr = add_elt_to_tree (expr, type, wide_int_to_tree (type, off), sgn);

  if (base)
    return fold_build_pointer_plus (base, expr);
  else
    return fold_convert (comb->type, expr);
}
Пример #7
0
void
aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
{
  unsigned i, j;

  widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
  if (scale == 1)
    return;

  if (scale == 0)
    {
      aff_combination_zero (comb, comb->type);
      return;
    }

  comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb->type);
  for (i = 0, j = 0; i < comb->n; i++)
    {
      widest_int new_coef
	= wide_int_ext_for_comb (scale * comb->elts[i].coef, comb->type);
      /* A coefficient may become zero due to overflow.  Remove the zero
	 elements.  */
      if (new_coef == 0)
	continue;
      comb->elts[j].coef = new_coef;
      comb->elts[j].val = comb->elts[i].val;
      j++;
    }
  comb->n = j;

  if (comb->rest)
    {
      tree type = comb->type;
      if (POINTER_TYPE_P (type))
	type = sizetype;
      if (comb->n < MAX_AFF_ELTS)
	{
	  comb->elts[comb->n].coef = scale;
	  comb->elts[comb->n].val = comb->rest;
	  comb->rest = NULL_TREE;
	  comb->n++;
	}
      else
	comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
				  wide_int_to_tree (type, scale));
    }
}
Пример #8
0
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
  bool overflow;
  unsigned int i;
  tree res;

  /* Handle the most frequent cases.  */
  if (k == 0)
    return build_int_cst (type, 1);
  if (k == 1)
    return fold_convert (type, n);

  /* Check that k <= n.  */
  if (wi::ltu_p (n, k))
    return NULL_TREE;

  /* Denominator = 2.  */
  wide_int denom = wi::two (TYPE_PRECISION (TREE_TYPE (n)));

  /* Index = Numerator-1.  */
  wide_int idx = wi::sub (n, 1);

  /* Numerator = Numerator*Index = n*(n-1).  */
  wide_int num = wi::smul (n, idx, &overflow);
  if (overflow)
    return NULL_TREE;

  for (i = 3; i <= k; i++)
    {
      /* Index--.  */
      --idx;

      /* Numerator *= Index.  */
      num = wi::smul (num, idx, &overflow);
      if (overflow)
	return NULL_TREE;

      /* Denominator *= i.  */
      denom *= i;
    }

  /* Result = Numerator / Denominator.  */
  wide_int di_res = wi::udiv_trunc (num, denom);
  res = wide_int_to_tree (type, di_res);
  return int_fits_type_p (res, type) ? res : NULL_TREE;
}
Пример #9
0
static tree
cp_ubsan_instrument_vptr (location_t loc, tree op, tree type, bool is_addr,
			  enum ubsan_null_ckind ckind)
{
  type = TYPE_MAIN_VARIANT (type);
  const char *mangled = mangle_type_string (type);
  hashval_t str_hash1 = htab_hash_string (mangled);
  hashval_t str_hash2 = iterative_hash (mangled, strlen (mangled), 0);
  tree str_hash = wide_int_to_tree (uint64_type_node,
				    wi::uhwi (((uint64_t) str_hash1 << 32)
					      | str_hash2, 64));
  if (!is_addr)
    op = build_fold_addr_expr_loc (loc, op);
  op = save_expr (op);
  tree vptr = fold_build3_loc (loc, COMPONENT_REF,
			       TREE_TYPE (TYPE_VFIELD (type)),
			       build_fold_indirect_ref_loc (loc, op),
			       TYPE_VFIELD (type), NULL_TREE);
  vptr = fold_convert_loc (loc, pointer_sized_int_node, vptr);
  vptr = fold_convert_loc (loc, uint64_type_node, vptr);
  if (ckind == UBSAN_DOWNCAST_POINTER)
    {
      tree cond = build2_loc (loc, NE_EXPR, boolean_type_node, op,
			      build_zero_cst (TREE_TYPE (op)));
      /* This is a compiler generated comparison, don't emit
	 e.g. -Wnonnull-compare warning for it.  */
      TREE_NO_WARNING (cond) = 1;
      vptr = build3_loc (loc, COND_EXPR, uint64_type_node, cond,
			 vptr, build_int_cst (uint64_type_node, 0));
    }
  tree ti_decl = get_tinfo_decl (type);
  mark_used (ti_decl);
  tree ptype = build_pointer_type (type);
  tree call
    = build_call_expr_internal_loc (loc, IFN_UBSAN_VPTR,
				    void_type_node, 5, op, vptr, str_hash,
				    build_address (ti_decl),
				    build_int_cst (ptype, ckind));
  TREE_SIDE_EFFECTS (call) = 1;
  return fold_build2 (COMPOUND_EXPR, TREE_TYPE (op), call, op);
}
Пример #10
0
static void
move_variant_to_index (struct mem_address *parts, aff_tree *addr, tree v)
{
  unsigned i;
  tree val = NULL_TREE;

  gcc_assert (!parts->index);
  for (i = 0; i < addr->n; i++)
    {
      val = addr->elts[i].val;
      if (operand_equal_p (val, v, 0))
	break;
    }

  if (i == addr->n)
    return;

  parts->index = fold_convert (sizetype, val);
  parts->step = wide_int_to_tree (sizetype, addr->elts[i].coef);
  aff_combination_remove_elt (addr, i);
}
Пример #11
0
void
aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
{
  unsigned i;
  gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));

  aff_combination_zero (r, c1->type);

  for (i = 0; i < c2->n; i++)
    aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
  if (c2->rest)
    aff_combination_add_product (c1, 1, c2->rest, r);
  if (c2->offset.is_constant ())
    /* Access coeffs[0] directly, for efficiency.  */
    aff_combination_add_product (c1, c2->offset.coeffs[0], NULL, r);
  else
    {
      /* c2->offset is polynomial, so do the multiplication in tree form.  */
      tree offset = wide_int_to_tree (c2->type, c2->offset);
      aff_combination_add_product (c1, 1, offset, r);
    }
}
Пример #12
0
void
tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
{
  aff_tree tmp;
  enum tree_code code;
  tree cst, core, toffset;
  poly_int64 bitpos, bitsize, bytepos;
  machine_mode mode;
  int unsignedp, reversep, volatilep;

  STRIP_NOPS (expr);

  code = TREE_CODE (expr);
  switch (code)
    {
    case POINTER_PLUS_EXPR:
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
      aff_combination_add (comb, &tmp);
      return;

    case PLUS_EXPR:
    case MINUS_EXPR:
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
      if (code == MINUS_EXPR)
	aff_combination_scale (&tmp, -1);
      aff_combination_add (comb, &tmp);
      return;

    case MULT_EXPR:
      cst = TREE_OPERAND (expr, 1);
      if (TREE_CODE (cst) != INTEGER_CST)
	break;
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      aff_combination_scale (comb, wi::to_widest (cst));
      return;

    case NEGATE_EXPR:
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      aff_combination_scale (comb, -1);
      return;

    case BIT_NOT_EXPR:
      /* ~x = -x - 1 */
      tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
      aff_combination_scale (comb, -1);
      aff_combination_add_cst (comb, -1);
      return;

    case ADDR_EXPR:
      /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR.  */
      if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
	{
	  expr = TREE_OPERAND (expr, 0);
	  tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
	  tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
	  aff_combination_add (comb, &tmp);
	  return;
	}
      core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
				  &toffset, &mode, &unsignedp, &reversep,
				  &volatilep);
      if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
	break;
      aff_combination_const (comb, type, bytepos);
      if (TREE_CODE (core) == MEM_REF)
	{
	  tree mem_offset = TREE_OPERAND (core, 1);
	  aff_combination_add_cst (comb, wi::to_poly_widest (mem_offset));
	  core = TREE_OPERAND (core, 0);
	}
      else
	core = build_fold_addr_expr (core);

      if (TREE_CODE (core) == ADDR_EXPR)
	aff_combination_add_elt (comb, core, 1);
      else
	{
	  tree_to_aff_combination (core, type, &tmp);
	  aff_combination_add (comb, &tmp);
	}
      if (toffset)
	{
	  tree_to_aff_combination (toffset, type, &tmp);
	  aff_combination_add (comb, &tmp);
	}
      return;

    case MEM_REF:
      if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
	tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
				 type, comb);
      else if (integer_zerop (TREE_OPERAND (expr, 1)))
	{
	  aff_combination_elt (comb, type, expr);
	  return;
	}
      else
	aff_combination_elt (comb, type,
			     build2 (MEM_REF, TREE_TYPE (expr),
				     TREE_OPERAND (expr, 0),
				     build_int_cst
				      (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
      tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
      aff_combination_add (comb, &tmp);
      return;

    CASE_CONVERT:
      {
	tree otype = TREE_TYPE (expr);
	tree inner = TREE_OPERAND (expr, 0);
	tree itype = TREE_TYPE (inner);
	enum tree_code icode = TREE_CODE (inner);

	/* In principle this is a valid folding, but it isn't necessarily
	   an optimization, so do it here and not in fold_unary.  */
	if ((icode == PLUS_EXPR || icode == MINUS_EXPR || icode == MULT_EXPR)
	    && TREE_CODE (itype) == INTEGER_TYPE
	    && TREE_CODE (otype) == INTEGER_TYPE
	    && TYPE_PRECISION (otype) > TYPE_PRECISION (itype))
	  {
	    tree op0 = TREE_OPERAND (inner, 0), op1 = TREE_OPERAND (inner, 1);

	    /* If inner type has undefined overflow behavior, fold conversion
	       for below two cases:
		 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
		 (T1)(X + X)     -> (T1)X + (T1)X.  */
	    if (TYPE_OVERFLOW_UNDEFINED (itype)
		&& (TREE_CODE (op1) == INTEGER_CST
		    || (icode == PLUS_EXPR && operand_equal_p (op0, op1, 0))))
	      {
		op0 = fold_convert (otype, op0);
		op1 = fold_convert (otype, op1);
		expr = fold_build2 (icode, otype, op0, op1);
		tree_to_aff_combination (expr, type, comb);
		return;
	      }
	    wide_int minv, maxv;
	    /* If inner type has wrapping overflow behavior, fold conversion
	       for below case:
		 (T1)(X - CST) -> (T1)X - (T1)CST
	       if X - CST doesn't overflow by range information.  Also handle
	       (T1)(X + CST) as (T1)(X - (-CST)).  */
	    if (TYPE_UNSIGNED (itype)
		&& TYPE_OVERFLOW_WRAPS (itype)
		&& TREE_CODE (op0) == SSA_NAME
		&& TREE_CODE (op1) == INTEGER_CST
		&& icode != MULT_EXPR
		&& get_range_info (op0, &minv, &maxv) == VR_RANGE)
	      {
		if (icode == PLUS_EXPR)
		  op1 = wide_int_to_tree (itype, -wi::to_wide (op1));
		if (wi::geu_p (minv, wi::to_wide (op1)))
		  {
		    op0 = fold_convert (otype, op0);
		    op1 = fold_convert (otype, op1);
		    expr = fold_build2 (MINUS_EXPR, otype, op0, op1);
		    tree_to_aff_combination (expr, type, comb);
		    return;
		  }
	      }
	  }
      }
      break;

    default:
      {
	if (poly_int_tree_p (expr))
	  {
	    aff_combination_const (comb, type, wi::to_poly_widest (expr));
	    return;
	  }
	break;
      }
    }

  aff_combination_elt (comb, type, expr);
}
Пример #13
0
tree
gfc_conv_mpz_to_tree (mpz_t i, int kind)
{
  wide_int val = wi::from_mpz (gfc_get_int_type (kind), i, true);
  return wide_int_to_tree (gfc_get_int_type (kind), val);
}
Пример #14
0
/* Return the marking bitmap for the class TYPE.  For now this is a
   single word describing the type.  */
tree
get_boehm_type_descriptor (tree type)
{
  unsigned int count, log2_size, ubit;
  int bit;
  int all_bits_set = 1;
  int last_set_index = 0;
  HOST_WIDE_INT last_view_index = -1;
  int pointer_after_end = 0;
  tree field, value, value_type;

  /* If the GC wasn't requested, just use a null pointer.  */
  if (! flag_use_boehm_gc)
    return null_pointer_node;

  value_type = java_type_for_mode (ptr_mode, 1);
  wide_int mask = wi::zero (TYPE_PRECISION (value_type));

  /* If we have a type of unknown size, use a proc.  */
  if (int_size_in_bytes (type) == -1)
    goto procedure_object_descriptor;

  bit = POINTER_SIZE / BITS_PER_UNIT;
  /* The size of this node has to be known.  And, we only support 32
     and 64 bit targets, so we need to know that the log2 is one of
     our values.  */
  log2_size = exact_log2 (bit);
  if (bit == -1 || (log2_size != 2 && log2_size != 3))
    {
      /* This means the GC isn't supported.  We should probably
	 abort or give an error.  Instead, for now, we just silently
	 revert.  FIXME.  */
      return null_pointer_node;
    }
  bit *= BITS_PER_UNIT;

  /* Warning avoidance.  */
  ubit = (unsigned int) bit;

  if (type == class_type_node)
    goto procedure_object_descriptor;

  field = TYPE_FIELDS (type);
  mark_reference_fields (field, &mask, ubit,
			 &pointer_after_end, &all_bits_set,
			 &last_set_index, &last_view_index);

  /* If the object is all pointers, or if the part with pointers fits
     in our bitmap, then we are ok.  Otherwise we have to allocate it
     a different way.  */
  if (all_bits_set != -1 || (pointer_after_end && flag_reduced_reflection))
    {
      /* In this case the initial part of the object is all reference
	 fields, and the end of the object is all non-reference
	 fields.  We represent the mark as a count of the fields,
	 shifted.  In the GC the computation looks something like
	 this:
	 value = DS_LENGTH | WORDS_TO_BYTES (last_set_index + 1);
	 DS_LENGTH is 0.
	 WORDS_TO_BYTES shifts by log2(bytes-per-pointer).

         In the case of flag_reduced_reflection and the bitmap would
         overflow, we tell the gc that the object is all pointers so
         that we don't have to emit reflection data for run time
         marking. */
      count = 0;
      mask = wi::zero (TYPE_PRECISION (value_type));
      ++last_set_index;
      while (last_set_index)
	{
	  if ((last_set_index & 1))
	    mask = wi::set_bit (mask, log2_size + count);
	  last_set_index >>= 1;
	  ++count;
	}
      value = wide_int_to_tree (value_type, mask);
    }
Пример #15
0
static tree
compute_object_offset (const_tree expr, const_tree var)
{
  enum tree_code code = PLUS_EXPR;
  tree base, off, t;

  if (expr == var)
    return size_zero_node;

  switch (TREE_CODE (expr))
    {
    case COMPONENT_REF:
      base = compute_object_offset (TREE_OPERAND (expr, 0), var);
      if (base == error_mark_node)
	return base;

      t = TREE_OPERAND (expr, 1);
      off = size_binop (PLUS_EXPR, DECL_FIELD_OFFSET (t),
			size_int (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (t))
				  / BITS_PER_UNIT));
      break;

    case REALPART_EXPR:
    CASE_CONVERT:
    case VIEW_CONVERT_EXPR:
    case NON_LVALUE_EXPR:
      return compute_object_offset (TREE_OPERAND (expr, 0), var);

    case IMAGPART_EXPR:
      base = compute_object_offset (TREE_OPERAND (expr, 0), var);
      if (base == error_mark_node)
	return base;

      off = TYPE_SIZE_UNIT (TREE_TYPE (expr));
      break;

    case ARRAY_REF:
      base = compute_object_offset (TREE_OPERAND (expr, 0), var);
      if (base == error_mark_node)
	return base;

      t = TREE_OPERAND (expr, 1);
      if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) < 0)
	{
	  code = MINUS_EXPR;
	  t = fold_build1 (NEGATE_EXPR, TREE_TYPE (t), t);
	}
      t = fold_convert (sizetype, t);
      off = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (expr)), t);
      break;

    case MEM_REF:
      gcc_assert (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR);
      return wide_int_to_tree (sizetype, mem_ref_offset (expr));

    default:
      return error_mark_node;
    }

  return size_binop (code, base, off);
}
Пример #16
0
static tree
fold_const_call_1 (built_in_function fn, tree type, tree arg)
{
  machine_mode mode = TYPE_MODE (type);
  machine_mode arg_mode = TYPE_MODE (TREE_TYPE (arg));

  if (integer_cst_p (arg))
    {
      if (SCALAR_INT_MODE_P (mode))
	{
	  wide_int result;
	  if (fold_const_call_ss (&result, fn, arg, TYPE_PRECISION (type),
				  TREE_TYPE (arg)))
	    return wide_int_to_tree (type, result);
	}
      return NULL_TREE;
    }

  if (real_cst_p (arg))
    {
      gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg_mode));
      if (mode == arg_mode)
	{
	  /* real -> real.  */
	  REAL_VALUE_TYPE result;
	  if (fold_const_call_ss (&result, fn, TREE_REAL_CST_PTR (arg),
				  REAL_MODE_FORMAT (mode)))
	    return build_real (type, result);
	}
      else if (COMPLEX_MODE_P (mode)
	       && GET_MODE_INNER (mode) == arg_mode)
	{
	  /* real -> complex real.  */
	  REAL_VALUE_TYPE result_real, result_imag;
	  if (fold_const_call_cs (&result_real, &result_imag, fn,
				  TREE_REAL_CST_PTR (arg),
				  REAL_MODE_FORMAT (arg_mode)))
	    return build_complex (type,
				  build_real (TREE_TYPE (type), result_real),
				  build_real (TREE_TYPE (type), result_imag));
	}
      else if (INTEGRAL_TYPE_P (type))
	{
	  /* real -> int.  */
	  wide_int result;
	  if (fold_const_call_ss (&result, fn,
				  TREE_REAL_CST_PTR (arg),
				  TYPE_PRECISION (type),
				  REAL_MODE_FORMAT (arg_mode)))
	    return wide_int_to_tree (type, result);
	}
      return NULL_TREE;
    }

  if (complex_cst_p (arg))
    {
      gcc_checking_assert (COMPLEX_MODE_P (arg_mode));
      machine_mode inner_mode = GET_MODE_INNER (arg_mode);
      tree argr = TREE_REALPART (arg);
      tree argi = TREE_IMAGPART (arg);
      if (mode == arg_mode
	  && real_cst_p (argr)
	  && real_cst_p (argi))
	{
	  /* complex real -> complex real.  */
	  REAL_VALUE_TYPE result_real, result_imag;
	  if (fold_const_call_cc (&result_real, &result_imag, fn,
				  TREE_REAL_CST_PTR (argr),
				  TREE_REAL_CST_PTR (argi),
				  REAL_MODE_FORMAT (inner_mode)))
	    return build_complex (type,
				  build_real (TREE_TYPE (type), result_real),
				  build_real (TREE_TYPE (type), result_imag));
	}
      if (mode == inner_mode
	  && real_cst_p (argr)
	  && real_cst_p (argi))
	{
	  /* complex real -> real.  */
	  REAL_VALUE_TYPE result;
	  if (fold_const_call_sc (&result, fn,
				  TREE_REAL_CST_PTR (argr),
				  TREE_REAL_CST_PTR (argi),
				  REAL_MODE_FORMAT (inner_mode)))
	    return build_real (type, result);
	}
      return NULL_TREE;
    }

  return NULL_TREE;
}
Пример #17
0
static void
most_expensive_mult_to_index (tree type, struct mem_address *parts,
			      aff_tree *addr, bool speed)
{
  addr_space_t as = TYPE_ADDR_SPACE (type);
  machine_mode address_mode = targetm.addr_space.address_mode (as);
  HOST_WIDE_INT coef;
  unsigned best_mult_cost = 0, acost;
  tree mult_elt = NULL_TREE, elt;
  unsigned i, j;
  enum tree_code op_code;

  offset_int best_mult = 0;
  for (i = 0; i < addr->n; i++)
    {
      if (!wi::fits_shwi_p (addr->elts[i].coef))
	continue;

      coef = addr->elts[i].coef.to_shwi ();
      if (coef == 1
	  || !multiplier_allowed_in_address_p (coef, TYPE_MODE (type), as))
	continue;

      acost = mult_by_coeff_cost (coef, address_mode, speed);

      if (acost > best_mult_cost)
	{
	  best_mult_cost = acost;
	  best_mult = offset_int::from (addr->elts[i].coef, SIGNED);
	}
    }

  if (!best_mult_cost)
    return;

  /* Collect elements multiplied by best_mult.  */
  for (i = j = 0; i < addr->n; i++)
    {
      offset_int amult = offset_int::from (addr->elts[i].coef, SIGNED);
      offset_int amult_neg = -wi::sext (amult, TYPE_PRECISION (addr->type));

      if (amult == best_mult)
	op_code = PLUS_EXPR;
      else if (amult_neg == best_mult)
	op_code = MINUS_EXPR;
      else
	{
	  addr->elts[j] = addr->elts[i];
	  j++;
	  continue;
	}

      elt = fold_convert (sizetype, addr->elts[i].val);
      if (mult_elt)
	mult_elt = fold_build2 (op_code, sizetype, mult_elt, elt);
      else if (op_code == PLUS_EXPR)
	mult_elt = elt;
      else
	mult_elt = fold_build1 (NEGATE_EXPR, sizetype, elt);
    }
  addr->n = j;

  parts->index = mult_elt;
  parts->step = wide_int_to_tree (sizetype, best_mult);
}