Пример #1
0
void
backprop::optimize_assign (gassign *assign, tree lhs, const usage_info *info)
{
  switch (gimple_assign_rhs_code (assign))
    {
    case MULT_EXPR:
    case RDIV_EXPR:
      /* If the sign of the result doesn't matter, strip sign operations
	 from both inputs.  */
      if (info->flags.ignore_sign)
	replace_assign_rhs (assign, lhs,
			    strip_sign_op (gimple_assign_rhs1 (assign)),
			    strip_sign_op (gimple_assign_rhs2 (assign)),
			    NULL_TREE);
      break;

    case COND_EXPR:
      /* If the sign of A ? B : C doesn't matter, strip sign operations
	 from both B and C.  */
      if (info->flags.ignore_sign)
	replace_assign_rhs (assign, lhs,
			    NULL_TREE,
			    strip_sign_op (gimple_assign_rhs2 (assign)),
			    strip_sign_op (gimple_assign_rhs3 (assign)));
      break;

    default:
      break;
    }
}
Пример #2
0
static bool
forward_propagate_addr_into_variable_array_index (tree offset,
						  tree def_rhs,
						  gimple_stmt_iterator *use_stmt_gsi)
{
  tree index;
  gimple offset_def, use_stmt = gsi_stmt (*use_stmt_gsi);

  /* Get the offset's defining statement.  */
  offset_def = SSA_NAME_DEF_STMT (offset);

  /* Try to find an expression for a proper index.  This is either a
     multiplication expression by the element size or just the ssa name we came
     along in case the element size is one. In that case, however, we do not
     allow multiplications because they can be computing index to a higher
     level dimension (PR 37861). */
  if (integer_onep (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs)))))
    {
      if (is_gimple_assign (offset_def)
	  && gimple_assign_rhs_code (offset_def) == MULT_EXPR)
	return false;

      index = offset;
    }
  else
    {
      /* The statement which defines OFFSET before type conversion
         must be a simple GIMPLE_ASSIGN.  */
      if (!is_gimple_assign (offset_def))
	return false;

      /* The RHS of the statement which defines OFFSET must be a
	 multiplication of an object by the size of the array elements. 
	 This implicitly verifies that the size of the array elements
	 is constant.  */
     offset = gimple_assign_rhs1 (offset_def);
     if (gimple_assign_rhs_code (offset_def) != MULT_EXPR
	 || TREE_CODE (gimple_assign_rhs2 (offset_def)) != INTEGER_CST
	 || !simple_cst_equal (gimple_assign_rhs2 (offset_def),
			       TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs)))))
	return false;

      /* The first operand to the MULT_EXPR is the desired index.  */
      index = offset;
    }

  /* Replace the pointer addition with array indexing.  */
  gimple_assign_set_rhs_from_tree (use_stmt_gsi, unshare_expr (def_rhs));
  use_stmt = gsi_stmt (*use_stmt_gsi);
  TREE_OPERAND (TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0), 1)
    = index;

  /* That should have created gimple, so there is no need to
     record information to undo the propagation.  */
  fold_stmt_inplace (use_stmt);
  tidy_after_forward_propagate_addr (use_stmt);
  return true;
}
Пример #3
0
/* Return whether USE_STMT is a floating-point division by DEF.  */
static inline bool
is_division_by (gimple use_stmt, tree def)
{
  return is_gimple_assign (use_stmt)
	 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
	 && gimple_assign_rhs2 (use_stmt) == def
	 /* Do not recognize x / x as valid division, as we are getting
	    confused later by replacing all immediate uses x in such
	    a stmt.  */
	 && gimple_assign_rhs1 (use_stmt) != def;
}
Пример #4
0
static tree
rhs_to_tree (tree type, gimple stmt)
{
  enum tree_code code = gimple_assign_rhs_code (stmt);
  if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
    return fold_build2 (code, type, gimple_assign_rhs1 (stmt),
			gimple_assign_rhs2 (stmt));
  else if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
    return build1 (code, type, gimple_assign_rhs1 (stmt));
  else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
    return gimple_assign_rhs1 (stmt);
  else
    gcc_unreachable ();
}
Пример #5
0
static void
instrument_si_overflow (gimple_stmt_iterator gsi)
{
  gimple stmt = gsi_stmt (gsi);
  tree_code code = gimple_assign_rhs_code (stmt);
  tree lhs = gimple_assign_lhs (stmt);
  tree lhstype = TREE_TYPE (lhs);
  tree a, b;
  gimple g;

  /* If this is not a signed operation, don't instrument anything here.
     Also punt on bit-fields.  */
  if (!INTEGRAL_TYPE_P (lhstype)
      || TYPE_OVERFLOW_WRAPS (lhstype)
      || GET_MODE_BITSIZE (TYPE_MODE (lhstype)) != TYPE_PRECISION (lhstype))
    return;

  switch (code)
    {
    case MINUS_EXPR:
    case PLUS_EXPR:
    case MULT_EXPR:
      /* Transform
	 i = u {+,-,*} 5;
	 into
	 i = UBSAN_CHECK_{ADD,SUB,MUL} (u, 5);  */
      a = gimple_assign_rhs1 (stmt);
      b = gimple_assign_rhs2 (stmt);
      g = gimple_build_call_internal (code == PLUS_EXPR
				      ? IFN_UBSAN_CHECK_ADD
				      : code == MINUS_EXPR
				      ? IFN_UBSAN_CHECK_SUB
				      : IFN_UBSAN_CHECK_MUL, 2, a, b);
      gimple_call_set_lhs (g, lhs);
      gsi_replace (&gsi, g, false);
      break;
    case NEGATE_EXPR:
      /* Represent i = -u;
	 as
	 i = UBSAN_CHECK_SUB (0, u);  */
      a = build_int_cst (lhstype, 0);
      b = gimple_assign_rhs1 (stmt);
      g = gimple_build_call_internal (IFN_UBSAN_CHECK_SUB, 2, a, b);
      gimple_call_set_lhs (g, lhs);
      gsi_replace (&gsi, g, false);
      break;
    default:
      break;
    }
}
Пример #6
0
/* PREV is the CC flag from precvious compares.  The function expands the
   next compare based on G which ops previous compare with CODE.
   PREP_SEQ returns all insns to prepare opearands for compare.
   GEN_SEQ returnss all compare insns.  */
static rtx
expand_ccmp_next (gimple *g, enum tree_code code, rtx prev,
		  rtx *prep_seq, rtx *gen_seq)
{
  enum rtx_code rcode;
  int unsignedp = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (g)));

  gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR);

  rcode = get_rtx_code (gimple_assign_rhs_code (g), unsignedp);

  return targetm.gen_ccmp_next (prep_seq, gen_seq, prev, rcode,
				gimple_assign_rhs1 (g),
				gimple_assign_rhs2 (g),
				get_rtx_code (code, 0));
}
Пример #7
0
static bool
recognize_bits_test (gimple cond, tree *name, tree *bits)
{
  gimple stmt;

  /* Get at the definition of the result of the bit test.  */
  if (gimple_cond_code (cond) != NE_EXPR
      || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
      || !integer_zerop (gimple_cond_rhs (cond)))
    return false;
  stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
  if (!is_gimple_assign (stmt)
      || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR)
    return false;

  *name = get_name_for_bit_test (gimple_assign_rhs1 (stmt));
  *bits = gimple_assign_rhs2 (stmt);

  return true;
}
Пример #8
0
static bool
recognize_single_bit_test (gimple cond, tree *name, tree *bit)
{
  gimple stmt;

  /* Get at the definition of the result of the bit test.  */
  if (gimple_cond_code (cond) != NE_EXPR
      || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
      || !integer_zerop (gimple_cond_rhs (cond)))
    return false;
  stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
  if (!is_gimple_assign (stmt))
    return false;

  /* Look at which bit is tested.  One form to recognize is
     D.1985_5 = state_3(D) >> control1_4(D);
     D.1986_6 = (int) D.1985_5;
     D.1987_7 = op0 & 1;
     if (D.1987_7 != 0)  */
  if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
      && integer_onep (gimple_assign_rhs2 (stmt))
      && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
    {
      tree orig_name = gimple_assign_rhs1 (stmt);

      /* Look through copies and conversions to eventually
	 find the stmt that computes the shift.  */
      stmt = SSA_NAME_DEF_STMT (orig_name);

      while (is_gimple_assign (stmt)
	     && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
		  && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt)))
		      <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))))
		 || gimple_assign_ssa_name_copy_p (stmt)))
	stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));

      /* If we found such, decompose it.  */
      if (is_gimple_assign (stmt)
	  && gimple_assign_rhs_code (stmt) == RSHIFT_EXPR)
	{
	  /* op0 & (1 << op1) */
	  *bit = gimple_assign_rhs2 (stmt);
	  *name = gimple_assign_rhs1 (stmt);
	}
      else
	{
	  /* t & 1 */
	  *bit = integer_zero_node;
	  *name = get_name_for_bit_test (orig_name);
	}

      return true;
    }

  /* Another form is
     D.1987_7 = op0 & (1 << CST)
     if (D.1987_7 != 0)  */
  if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
      && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
      && integer_pow2p (gimple_assign_rhs2 (stmt)))
    {
      *name = gimple_assign_rhs1 (stmt);
      *bit = build_int_cst (integer_type_node,
			    tree_log2 (gimple_assign_rhs2 (stmt)));
      return true;
    }

  /* Another form is
     D.1986_6 = 1 << control1_4(D)
     D.1987_7 = op0 & D.1986_6
     if (D.1987_7 != 0)  */
  if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
      && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
      && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME)
    {
      gimple tmp;

      /* Both arguments of the BIT_AND_EXPR can be the single-bit
	 specifying expression.  */
      tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
      if (is_gimple_assign (tmp)
	  && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
	  && integer_onep (gimple_assign_rhs1 (tmp)))
	{
	  *name = gimple_assign_rhs2 (stmt);
	  *bit = gimple_assign_rhs2 (tmp);
	  return true;
	}

      tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
      if (is_gimple_assign (tmp)
	  && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR
	  && integer_onep (gimple_assign_rhs1 (tmp)))
	{
	  *name = gimple_assign_rhs1 (stmt);
	  *bit = gimple_assign_rhs2 (tmp);
	  return true;
	}
    }

  return false;
}
Пример #9
0
/* Expand conditional compare gimple G.  A typical CCMP sequence is like:

     CC0 = CMP (a, b);
     CC1 = CCMP (NE (CC0, 0), CMP (e, f));
     ...
     CCn = CCMP (NE (CCn-1, 0), CMP (...));

   hook gen_ccmp_first is used to expand the first compare.
   hook gen_ccmp_next is used to expand the following CCMP.
   PREP_SEQ returns all insns to prepare opearand.
   GEN_SEQ returns all compare insns.  */
static rtx
expand_ccmp_expr_1 (gimple *g, rtx *prep_seq, rtx *gen_seq)
{
  tree exp = gimple_assign_rhs_to_tree (g);
  enum tree_code code = TREE_CODE (exp);
  gimple *gs0 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 0));
  gimple *gs1 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 1));
  rtx tmp;
  enum tree_code code0 = gimple_assign_rhs_code (gs0);
  enum tree_code code1 = gimple_assign_rhs_code (gs1);

  gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR);
  gcc_assert (gs0 && gs1 && is_gimple_assign (gs0) && is_gimple_assign (gs1));

  if (TREE_CODE_CLASS (code0) == tcc_comparison)
    {
      if (TREE_CODE_CLASS (code1) == tcc_comparison)
	{
	  int unsignedp0;
	  enum rtx_code rcode0;

	  unsignedp0 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs0)));
	  rcode0 = get_rtx_code (code0, unsignedp0);

	  tmp = targetm.gen_ccmp_first (prep_seq, gen_seq, rcode0,
					gimple_assign_rhs1 (gs0),
					gimple_assign_rhs2 (gs0));
	  if (!tmp)
	    return NULL_RTX;

	  return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq);
	}
      else
	{
	  tmp = expand_ccmp_expr_1 (gs1, prep_seq, gen_seq);
	  if (!tmp)
	    return NULL_RTX;

	  return expand_ccmp_next (gs0, code, tmp, prep_seq, gen_seq);
	}
    }
  else
    {
      gcc_assert (gimple_assign_rhs_code (gs0) == BIT_AND_EXPR
                  || gimple_assign_rhs_code (gs0) == BIT_IOR_EXPR);

      if (TREE_CODE_CLASS (gimple_assign_rhs_code (gs1)) == tcc_comparison)
	{
	  tmp = expand_ccmp_expr_1 (gs0, prep_seq, gen_seq);
	  if (!tmp)
	    return NULL_RTX;

	  return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq);
	}
      else
	{
	  gcc_assert (gimple_assign_rhs_code (gs1) == BIT_AND_EXPR
		      || gimple_assign_rhs_code (gs1) == BIT_IOR_EXPR);
	}
    }

  return NULL_RTX;
}
Пример #10
0
static bool
forward_propagate_addr_expr_1 (tree name, tree def_rhs,
			       gimple_stmt_iterator *use_stmt_gsi,
			       bool single_use_p)
{
  tree lhs, rhs, rhs2, array_ref;
  tree *rhsp, *lhsp;
  gimple use_stmt = gsi_stmt (*use_stmt_gsi);
  enum tree_code rhs_code;

  gcc_assert (TREE_CODE (def_rhs) == ADDR_EXPR);

  lhs = gimple_assign_lhs (use_stmt);
  rhs_code = gimple_assign_rhs_code (use_stmt);
  rhs = gimple_assign_rhs1 (use_stmt);

  /* Trivial cases.  The use statement could be a trivial copy or a
     useless conversion.  Recurse to the uses of the lhs as copyprop does
     not copy through different variant pointers and FRE does not catch
     all useless conversions.  Treat the case of a single-use name and
     a conversion to def_rhs type separate, though.  */
  if (TREE_CODE (lhs) == SSA_NAME
      && ((rhs_code == SSA_NAME && rhs == name)
	  || CONVERT_EXPR_CODE_P (rhs_code)))
    {
      /* Only recurse if we don't deal with a single use or we cannot
	 do the propagation to the current statement.  In particular
	 we can end up with a conversion needed for a non-invariant
	 address which we cannot do in a single statement.  */
      if (!single_use_p
	  || (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs))
	      && !is_gimple_min_invariant (def_rhs)))
	return forward_propagate_addr_expr (lhs, def_rhs);

      gimple_assign_set_rhs1 (use_stmt, unshare_expr (def_rhs));
      if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)))
	gimple_assign_set_rhs_code (use_stmt, TREE_CODE (def_rhs));
      else
	gimple_assign_set_rhs_code (use_stmt, NOP_EXPR);
      return true;
    }

  /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS. 
     ADDR_EXPR will not appear on the LHS.  */
  lhsp = gimple_assign_lhs_ptr (use_stmt);
  while (handled_component_p (*lhsp))
    lhsp = &TREE_OPERAND (*lhsp, 0);
  lhs = *lhsp;

  /* Now see if the LHS node is an INDIRECT_REF using NAME.  If so, 
     propagate the ADDR_EXPR into the use of NAME and fold the result.  */
  if (TREE_CODE (lhs) == INDIRECT_REF
      && TREE_OPERAND (lhs, 0) == name
      && may_propagate_address_into_dereference (def_rhs, lhs)
      && (lhsp != gimple_assign_lhs_ptr (use_stmt)
	  || useless_type_conversion_p (TREE_TYPE (TREE_OPERAND (def_rhs, 0)),
					TREE_TYPE (rhs))))
    {
      *lhsp = unshare_expr (TREE_OPERAND (def_rhs, 0));
      fold_stmt_inplace (use_stmt);
      tidy_after_forward_propagate_addr (use_stmt);

      /* Continue propagating into the RHS if this was not the only use.  */
      if (single_use_p)
	return true;
    }

  /* Strip away any outer COMPONENT_REF, ARRAY_REF or ADDR_EXPR
     nodes from the RHS.  */
  rhsp = gimple_assign_rhs1_ptr (use_stmt);
  while (handled_component_p (*rhsp)
	 || TREE_CODE (*rhsp) == ADDR_EXPR)
    rhsp = &TREE_OPERAND (*rhsp, 0);
  rhs = *rhsp;

  /* Now see if the RHS node is an INDIRECT_REF using NAME.  If so,
     propagate the ADDR_EXPR into the use of NAME and fold the result.  */
  if (TREE_CODE (rhs) == INDIRECT_REF
      && TREE_OPERAND (rhs, 0) == name
      && may_propagate_address_into_dereference (def_rhs, rhs))
    {
      *rhsp = unshare_expr (TREE_OPERAND (def_rhs, 0));
      fold_stmt_inplace (use_stmt);
      tidy_after_forward_propagate_addr (use_stmt);
      return true;
    }

  /* Now see if the RHS node is an INDIRECT_REF using NAME.  If so, 
     propagate the ADDR_EXPR into the use of NAME and try to
     create a VCE and fold the result.  */
  if (TREE_CODE (rhs) == INDIRECT_REF
      && TREE_OPERAND (rhs, 0) == name
      && TYPE_SIZE (TREE_TYPE (rhs))
      && TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0)))
      /* Function decls should not be used for VCE either as it could be a
         function descriptor that we want and not the actual function code.  */
      && TREE_CODE (TREE_OPERAND (def_rhs, 0)) != FUNCTION_DECL
      /* We should not convert volatile loads to non volatile loads. */
      && !TYPE_VOLATILE (TREE_TYPE (rhs))
      && !TYPE_VOLATILE (TREE_TYPE (TREE_OPERAND (def_rhs, 0)))
      && operand_equal_p (TYPE_SIZE (TREE_TYPE (rhs)),
			  TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))), 0)) 
   {
     tree def_rhs_base, new_rhs = unshare_expr (TREE_OPERAND (def_rhs, 0));
     new_rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (rhs), new_rhs);
     if (TREE_CODE (new_rhs) != VIEW_CONVERT_EXPR)
       {
	 /* If we have folded the VIEW_CONVERT_EXPR then the result is only
	    valid if we can replace the whole rhs of the use statement.  */
	 if (rhs != gimple_assign_rhs1 (use_stmt))
	   return false;
	 new_rhs = force_gimple_operand_gsi (use_stmt_gsi, new_rhs, true, NULL,
					     true, GSI_NEW_STMT);
	 gimple_assign_set_rhs1 (use_stmt, new_rhs);
	 tidy_after_forward_propagate_addr (use_stmt);
	 return true;
       }
     /* If the defining rhs comes from an indirect reference, then do not
        convert into a VIEW_CONVERT_EXPR.  */
     def_rhs_base = TREE_OPERAND (def_rhs, 0);
     while (handled_component_p (def_rhs_base))
       def_rhs_base = TREE_OPERAND (def_rhs_base, 0);
     if (!INDIRECT_REF_P (def_rhs_base))
       {
	 /* We may have arbitrary VIEW_CONVERT_EXPRs in a nested component
	    reference.  Place it there and fold the thing.  */
	 *rhsp = new_rhs;
	 fold_stmt_inplace (use_stmt);
	 tidy_after_forward_propagate_addr (use_stmt);
	 return true;
       }
   }

  /* If the use of the ADDR_EXPR is not a POINTER_PLUS_EXPR, there
     is nothing to do. */
  if (gimple_assign_rhs_code (use_stmt) != POINTER_PLUS_EXPR
      || gimple_assign_rhs1 (use_stmt) != name)
    return false;

  /* The remaining cases are all for turning pointer arithmetic into
     array indexing.  They only apply when we have the address of
     element zero in an array.  If that is not the case then there
     is nothing to do.  */
  array_ref = TREE_OPERAND (def_rhs, 0);
  if (TREE_CODE (array_ref) != ARRAY_REF
      || TREE_CODE (TREE_TYPE (TREE_OPERAND (array_ref, 0))) != ARRAY_TYPE
      || !integer_zerop (TREE_OPERAND (array_ref, 1)))
    return false;

  rhs2 = gimple_assign_rhs2 (use_stmt);
  /* Try to optimize &x[0] p+ C where C is a multiple of the size
     of the elements in X into &x[C/element size].  */
  if (TREE_CODE (rhs2) == INTEGER_CST)
    {
      tree new_rhs = maybe_fold_stmt_addition (gimple_expr_type (use_stmt),
					       array_ref, rhs2);
      if (new_rhs)
	{
	  gimple_assign_set_rhs_from_tree (use_stmt_gsi, new_rhs);
	  use_stmt = gsi_stmt (*use_stmt_gsi);
	  update_stmt (use_stmt);
	  tidy_after_forward_propagate_addr (use_stmt);
	  return true;
	}
    }

  /* Try to optimize &x[0] p+ OFFSET where OFFSET is defined by
     converting a multiplication of an index by the size of the
     array elements, then the result is converted into the proper
     type for the arithmetic.  */
  if (TREE_CODE (rhs2) == SSA_NAME
      /* Avoid problems with IVopts creating PLUS_EXPRs with a
	 different type than their operands.  */
      && useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)))
    return forward_propagate_addr_into_variable_array_index (rhs2, def_rhs,
							     use_stmt_gsi);
  return false;
}
Пример #11
0
static bool
process_assignment (gimple stmt, gimple_stmt_iterator call, tree *m,
		    tree *a, tree *ass_var)
{
  tree op0, op1 = NULL_TREE, non_ass_var = NULL_TREE;
  tree dest = gimple_assign_lhs (stmt);
  enum tree_code code = gimple_assign_rhs_code (stmt);
  enum gimple_rhs_class rhs_class = get_gimple_rhs_class (code);
  tree src_var = gimple_assign_rhs1 (stmt);

  /* See if this is a simple copy operation of an SSA name to the function
     result.  In that case we may have a simple tail call.  Ignore type
     conversions that can never produce extra code between the function
     call and the function return.  */
  if ((rhs_class == GIMPLE_SINGLE_RHS || gimple_assign_cast_p (stmt))
      && (TREE_CODE (src_var) == SSA_NAME))
    {
      /* Reject a tailcall if the type conversion might need
	 additional code.  */
      if (gimple_assign_cast_p (stmt))
	{
	  if (TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var)))
	    return false;

	  /* Even if the type modes are the same, if the precision of the
	     type is smaller than mode's precision,
	     reduce_to_bit_field_precision would generate additional code.  */
	  if (INTEGRAL_TYPE_P (TREE_TYPE (dest))
	      && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest)))
		  > TYPE_PRECISION (TREE_TYPE (dest))))
	    return false;
	}

      if (src_var != *ass_var)
	return false;

      *ass_var = dest;
      return true;
    }

  switch (rhs_class)
    {
    case GIMPLE_BINARY_RHS:
      op1 = gimple_assign_rhs2 (stmt);

      /* Fall through.  */

    case GIMPLE_UNARY_RHS:
      op0 = gimple_assign_rhs1 (stmt);
      break;

    default:
      return false;
    }

  /* Accumulator optimizations will reverse the order of operations.
     We can only do that for floating-point types if we're assuming
     that addition and multiplication are associative.  */
  if (!flag_associative_math)
    if (FLOAT_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
      return false;

  if (rhs_class == GIMPLE_UNARY_RHS)
    ;
  else if (op0 == *ass_var
	   && (non_ass_var = independent_of_stmt_p (op1, stmt, call)))
    ;
  else if (op1 == *ass_var
	   && (non_ass_var = independent_of_stmt_p (op0, stmt, call)))
    ;
  else
    return false;

  switch (code)
    {
    case PLUS_EXPR:
      *a = non_ass_var;
      *ass_var = dest;
      return true;

    case POINTER_PLUS_EXPR:
      if (op0 != *ass_var)
	return false;
      *a = non_ass_var;
      *ass_var = dest;
      return true;

    case MULT_EXPR:
      *m = non_ass_var;
      *ass_var = dest;
      return true;

    case NEGATE_EXPR:
      *m = build_minus_one_cst (TREE_TYPE (op0));
      *ass_var = dest;
      return true;

    case MINUS_EXPR:
      if (*ass_var == op0)
        *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
      else
        {
	  *m = build_minus_one_cst (TREE_TYPE (non_ass_var));
          *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
        }

      *ass_var = dest;
      return true;

      /* TODO -- Handle POINTER_PLUS_EXPR.  */

    default:
      return false;
    }
}
Пример #12
0
Файл: ccmp.c Проект: kraj/gcc
/* Expand conditional compare gimple G.  A typical CCMP sequence is like:

     CC0 = CMP (a, b);
     CC1 = CCMP (NE (CC0, 0), CMP (e, f));
     ...
     CCn = CCMP (NE (CCn-1, 0), CMP (...));

   hook gen_ccmp_first is used to expand the first compare.
   hook gen_ccmp_next is used to expand the following CCMP.
   PREP_SEQ returns all insns to prepare opearand.
   GEN_SEQ returns all compare insns.  */
static rtx
expand_ccmp_expr_1 (gimple *g, rtx_insn **prep_seq, rtx_insn **gen_seq)
{
    tree exp = gimple_assign_rhs_to_tree (g);
    tree_code code = TREE_CODE (exp);
    gimple *gs0 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 0));
    gimple *gs1 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 1));
    rtx tmp;
    tree_code code0 = gimple_assign_rhs_code (gs0);
    tree_code code1 = gimple_assign_rhs_code (gs1);

    gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR);
    gcc_assert (gs0 && gs1 && is_gimple_assign (gs0) && is_gimple_assign (gs1));

    if (TREE_CODE_CLASS (code0) == tcc_comparison)
    {
        if (TREE_CODE_CLASS (code1) == tcc_comparison)
        {
            int unsignedp0, unsignedp1;
            rtx_code rcode0, rcode1;
            int speed_p = optimize_insn_for_speed_p ();
            rtx tmp2 = NULL_RTX, ret = NULL_RTX, ret2 = NULL_RTX;
            unsigned cost1 = MAX_COST;
            unsigned cost2 = MAX_COST;

            unsignedp0 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs0)));
            unsignedp1 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs1)));
            rcode0 = get_rtx_code (code0, unsignedp0);
            rcode1 = get_rtx_code (code1, unsignedp1);

            rtx_insn *prep_seq_1, *gen_seq_1;
            tmp = targetm.gen_ccmp_first (&prep_seq_1, &gen_seq_1, rcode0,
                                          gimple_assign_rhs1 (gs0),
                                          gimple_assign_rhs2 (gs0));

            if (tmp != NULL)
            {
                ret = expand_ccmp_next (gs1, code, tmp, &prep_seq_1, &gen_seq_1);
                cost1 = seq_cost (prep_seq_1, speed_p);
                cost1 += seq_cost (gen_seq_1, speed_p);
            }

            /* FIXME: Temporary workaround for PR69619.
               Avoid exponential compile time due to expanding gs0 and gs1 twice.
               If gs0 and gs1 are complex, the cost will be high, so avoid
               reevaluation if above an arbitrary threshold.  */
            rtx_insn *prep_seq_2, *gen_seq_2;
            if (tmp == NULL || cost1 < COSTS_N_INSNS (25))
                tmp2 = targetm.gen_ccmp_first (&prep_seq_2, &gen_seq_2, rcode1,
                                               gimple_assign_rhs1 (gs1),
                                               gimple_assign_rhs2 (gs1));

            if (!tmp && !tmp2)
                return NULL_RTX;

            if (tmp2 != NULL)
            {
                ret2 = expand_ccmp_next (gs0, code, tmp2, &prep_seq_2,
                                         &gen_seq_2);
                cost2 = seq_cost (prep_seq_2, speed_p);
                cost2 += seq_cost (gen_seq_2, speed_p);
            }

            if (cost2 < cost1)
            {
                *prep_seq = prep_seq_2;
                *gen_seq = gen_seq_2;
                return ret2;
            }

            *prep_seq = prep_seq_1;
            *gen_seq = gen_seq_1;
            return ret;
        }
        else
        {
            tmp = expand_ccmp_expr_1 (gs1, prep_seq, gen_seq);
            if (!tmp)
                return NULL_RTX;

            return expand_ccmp_next (gs0, code, tmp, prep_seq, gen_seq);
        }
    }
    else
    {
        gcc_assert (gimple_assign_rhs_code (gs0) == BIT_AND_EXPR
                    || gimple_assign_rhs_code (gs0) == BIT_IOR_EXPR);

        if (TREE_CODE_CLASS (gimple_assign_rhs_code (gs1)) == tcc_comparison)
        {
            tmp = expand_ccmp_expr_1 (gs0, prep_seq, gen_seq);
            if (!tmp)
                return NULL_RTX;

            return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq);
        }
        else
        {
            gcc_assert (gimple_assign_rhs_code (gs1) == BIT_AND_EXPR
                        || gimple_assign_rhs_code (gs1) == BIT_IOR_EXPR);
        }
    }

    return NULL_RTX;
}
static bool
minmax_replacement (basic_block cond_bb, basic_block middle_bb,
                    edge e0, edge e1, gimple phi,
                    tree arg0, tree arg1)
{
    tree result, type;
    gimple cond, new_stmt;
    edge true_edge, false_edge;
    enum tree_code cmp, minmax, ass_code;
    tree smaller, larger, arg_true, arg_false;
    gimple_stmt_iterator gsi, gsi_from;

    type = TREE_TYPE (PHI_RESULT (phi));

    /* The optimization may be unsafe due to NaNs.  */
    if (HONOR_NANS (TYPE_MODE (type)))
        return false;

    cond = last_stmt (cond_bb);
    cmp = gimple_cond_code (cond);
    result = PHI_RESULT (phi);

    /* This transformation is only valid for order comparisons.  Record which
       operand is smaller/larger if the result of the comparison is true.  */
    if (cmp == LT_EXPR || cmp == LE_EXPR)
    {
        smaller = gimple_cond_lhs (cond);
        larger = gimple_cond_rhs (cond);
    }
    else if (cmp == GT_EXPR || cmp == GE_EXPR)
    {
        smaller = gimple_cond_rhs (cond);
        larger = gimple_cond_lhs (cond);
    }
    else
        return false;

    /* We need to know which is the true edge and which is the false
        edge so that we know if have abs or negative abs.  */
    extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);

    /* Forward the edges over the middle basic block.  */
    if (true_edge->dest == middle_bb)
        true_edge = EDGE_SUCC (true_edge->dest, 0);
    if (false_edge->dest == middle_bb)
        false_edge = EDGE_SUCC (false_edge->dest, 0);

    if (true_edge == e0)
    {
        gcc_assert (false_edge == e1);
        arg_true = arg0;
        arg_false = arg1;
    }
    else
    {
        gcc_assert (false_edge == e0);
        gcc_assert (true_edge == e1);
        arg_true = arg1;
        arg_false = arg0;
    }

    if (empty_block_p (middle_bb))
    {
        if (operand_equal_for_phi_arg_p (arg_true, smaller)
                && operand_equal_for_phi_arg_p (arg_false, larger))
        {
            /* Case

               if (smaller < larger)
               rslt = smaller;
               else
               rslt = larger;  */
            minmax = MIN_EXPR;
        }
        else if (operand_equal_for_phi_arg_p (arg_false, smaller)
                 && operand_equal_for_phi_arg_p (arg_true, larger))
            minmax = MAX_EXPR;
        else
            return false;
    }
    else
    {
        /* Recognize the following case, assuming d <= u:

        if (a <= u)
           b = MAX (a, d);
         x = PHI <b, u>

         This is equivalent to

         b = MAX (a, d);
         x = MIN (b, u);  */

        gimple assign = last_and_only_stmt (middle_bb);
        tree lhs, op0, op1, bound;

        if (!assign
                || gimple_code (assign) != GIMPLE_ASSIGN)
            return false;

        lhs = gimple_assign_lhs (assign);
        ass_code = gimple_assign_rhs_code (assign);
        if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
            return false;
        op0 = gimple_assign_rhs1 (assign);
        op1 = gimple_assign_rhs2 (assign);

        if (true_edge->src == middle_bb)
        {
            /* We got here if the condition is true, i.e., SMALLER < LARGER.  */
            if (!operand_equal_for_phi_arg_p (lhs, arg_true))
                return false;

            if (operand_equal_for_phi_arg_p (arg_false, larger))
            {
                /* Case

                if (smaller < larger)
                   {
                     r' = MAX_EXPR (smaller, bound)
                   }
                 r = PHI <r', larger>  --> to be turned to MIN_EXPR.  */
                if (ass_code != MAX_EXPR)
                    return false;

                minmax = MIN_EXPR;
                if (operand_equal_for_phi_arg_p (op0, smaller))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, smaller))
                    bound = op0;
                else
                    return false;

                /* We need BOUND <= LARGER.  */
                if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
                                                    bound, larger)))
                    return false;
            }
            else if (operand_equal_for_phi_arg_p (arg_false, smaller))
            {
                /* Case

                if (smaller < larger)
                   {
                     r' = MIN_EXPR (larger, bound)
                   }
                 r = PHI <r', smaller>  --> to be turned to MAX_EXPR.  */
                if (ass_code != MIN_EXPR)
                    return false;

                minmax = MAX_EXPR;
                if (operand_equal_for_phi_arg_p (op0, larger))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, larger))
                    bound = op0;
                else
                    return false;

                /* We need BOUND >= SMALLER.  */
                if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
                                                    bound, smaller)))
                    return false;
            }
            else
                return false;
        }
        else
        {
            /* We got here if the condition is false, i.e., SMALLER > LARGER.  */
            if (!operand_equal_for_phi_arg_p (lhs, arg_false))
                return false;

            if (operand_equal_for_phi_arg_p (arg_true, larger))
            {
                /* Case

                if (smaller > larger)
                   {
                     r' = MIN_EXPR (smaller, bound)
                   }
                 r = PHI <r', larger>  --> to be turned to MAX_EXPR.  */
                if (ass_code != MIN_EXPR)
                    return false;

                minmax = MAX_EXPR;
                if (operand_equal_for_phi_arg_p (op0, smaller))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, smaller))
                    bound = op0;
                else
                    return false;

                /* We need BOUND >= LARGER.  */
                if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
                                                    bound, larger)))
                    return false;
            }
            else if (operand_equal_for_phi_arg_p (arg_true, smaller))
            {
                /* Case

                if (smaller > larger)
                   {
                     r' = MAX_EXPR (larger, bound)
                   }
                 r = PHI <r', smaller>  --> to be turned to MIN_EXPR.  */
                if (ass_code != MAX_EXPR)
                    return false;

                minmax = MIN_EXPR;
                if (operand_equal_for_phi_arg_p (op0, larger))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, larger))
                    bound = op0;
                else
                    return false;

                /* We need BOUND <= SMALLER.  */
                if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
                                                    bound, smaller)))
                    return false;
            }
            else
                return false;
        }

        /* Move the statement from the middle block.  */
        gsi = gsi_last_bb (cond_bb);
        gsi_from = gsi_last_bb (middle_bb);
        gsi_move_before (&gsi_from, &gsi);
    }

    /* Emit the statement to compute min/max.  */
    result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
    new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
    gsi = gsi_last_bb (cond_bb);
    gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);

    replace_phi_edge_with_variable (cond_bb, e1, phi, result);
    return true;
}
Пример #14
0
static bool
forward_propagate_addr_into_variable_array_index (tree offset,
						  tree def_rhs,
						  gimple_stmt_iterator *use_stmt_gsi)
{
  tree index, tunit;
  gimple offset_def, use_stmt = gsi_stmt (*use_stmt_gsi);
  tree tmp;

  tunit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs)));
  if (!host_integerp (tunit, 1))
    return false;

  /* Get the offset's defining statement.  */
  offset_def = SSA_NAME_DEF_STMT (offset);

  /* Try to find an expression for a proper index.  This is either a
     multiplication expression by the element size or just the ssa name we came
     along in case the element size is one. In that case, however, we do not
     allow multiplications because they can be computing index to a higher
     level dimension (PR 37861). */
  if (integer_onep (tunit))
    {
      if (is_gimple_assign (offset_def)
	  && gimple_assign_rhs_code (offset_def) == MULT_EXPR)
	return false;

      index = offset;
    }
  else
    {
      /* The statement which defines OFFSET before type conversion
         must be a simple GIMPLE_ASSIGN.  */
      if (!is_gimple_assign (offset_def))
	return false;

      /* The RHS of the statement which defines OFFSET must be a
	 multiplication of an object by the size of the array elements.
	 This implicitly verifies that the size of the array elements
	 is constant.  */
     if (gimple_assign_rhs_code (offset_def) == MULT_EXPR
	 && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST
	 && tree_int_cst_equal (gimple_assign_rhs2 (offset_def), tunit))
       {
	 /* The first operand to the MULT_EXPR is the desired index.  */
	 index = gimple_assign_rhs1 (offset_def);
       }
     /* If we have idx * tunit + CST * tunit re-associate that.  */
     else if ((gimple_assign_rhs_code (offset_def) == PLUS_EXPR
	       || gimple_assign_rhs_code (offset_def) == MINUS_EXPR)
	      && TREE_CODE (gimple_assign_rhs1 (offset_def)) == SSA_NAME
	      && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST
	      && (tmp = div_if_zero_remainder (EXACT_DIV_EXPR,
					       gimple_assign_rhs2 (offset_def),
					       tunit)) != NULL_TREE)
       {
	 gimple offset_def2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (offset_def));
	 if (is_gimple_assign (offset_def2)
	     && gimple_assign_rhs_code (offset_def2) == MULT_EXPR
	     && TREE_CODE (gimple_assign_rhs2 (offset_def2)) == INTEGER_CST
	     && tree_int_cst_equal (gimple_assign_rhs2 (offset_def2), tunit))
	   {
	     index = fold_build2 (gimple_assign_rhs_code (offset_def),
				  TREE_TYPE (offset),
				  gimple_assign_rhs1 (offset_def2), tmp);
	   }
	 else
	   return false;
       }
     else
	return false;
    }

  /* Replace the pointer addition with array indexing.  */
  index = force_gimple_operand_gsi (use_stmt_gsi, index, true, NULL_TREE,
				    true, GSI_SAME_STMT);
  gimple_assign_set_rhs_from_tree (use_stmt_gsi, unshare_expr (def_rhs));
  use_stmt = gsi_stmt (*use_stmt_gsi);
  TREE_OPERAND (TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0), 1)
    = index;

  /* That should have created gimple, so there is no need to
     record information to undo the propagation.  */
  fold_stmt_inplace (use_stmt);
  tidy_after_forward_propagate_addr (use_stmt);
  return true;
}
Пример #15
0
bool
gimple_simplify (gimple *stmt,
		 code_helper *rcode, tree *ops,
		 gimple_seq *seq,
		 tree (*valueize)(tree), tree (*top_valueize)(tree))
{
  switch (gimple_code (stmt))
    {
    case GIMPLE_ASSIGN:
      {
	enum tree_code code = gimple_assign_rhs_code (stmt);
	tree type = TREE_TYPE (gimple_assign_lhs (stmt));
	switch (gimple_assign_rhs_class (stmt))
	  {
	  case GIMPLE_SINGLE_RHS:
	    if (code == REALPART_EXPR
		|| code == IMAGPART_EXPR
		|| code == VIEW_CONVERT_EXPR)
	      {
		tree op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
		bool valueized = false;
		op0 = do_valueize (op0, top_valueize, valueized);
		*rcode = code;
		ops[0] = op0;
		return (gimple_resimplify1 (seq, rcode, type, ops, valueize)
			|| valueized);
	      }
	    else if (code == BIT_FIELD_REF)
	      {
		tree rhs1 = gimple_assign_rhs1 (stmt);
		tree op0 = TREE_OPERAND (rhs1, 0);
		bool valueized = false;
		op0 = do_valueize (op0, top_valueize, valueized);
		*rcode = code;
		ops[0] = op0;
		ops[1] = TREE_OPERAND (rhs1, 1);
		ops[2] = TREE_OPERAND (rhs1, 2);
		return (gimple_resimplify3 (seq, rcode, type, ops, valueize)
			|| valueized);
	      }
	    else if (code == SSA_NAME
		     && top_valueize)
	      {
		tree op0 = gimple_assign_rhs1 (stmt);
		tree valueized = top_valueize (op0);
		if (!valueized || op0 == valueized)
		  return false;
		ops[0] = valueized;
		*rcode = TREE_CODE (op0);
		return true;
	      }
	    break;
	  case GIMPLE_UNARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      bool valueized = false;
	      rhs1 = do_valueize (rhs1, top_valueize, valueized);
	      *rcode = code;
	      ops[0] = rhs1;
	      return (gimple_resimplify1 (seq, rcode, type, ops, valueize)
		      || valueized);
	    }
	  case GIMPLE_BINARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      tree rhs2 = gimple_assign_rhs2 (stmt);
	      bool valueized = false;
	      rhs1 = do_valueize (rhs1, top_valueize, valueized);
	      rhs2 = do_valueize (rhs2, top_valueize, valueized);
	      *rcode = code;
	      ops[0] = rhs1;
	      ops[1] = rhs2;
	      return (gimple_resimplify2 (seq, rcode, type, ops, valueize)
		      || valueized);
	    }
	  case GIMPLE_TERNARY_RHS:
	    {
	      bool valueized = false;
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      /* If this is a [VEC_]COND_EXPR first try to simplify an
		 embedded GENERIC condition.  */
	      if (code == COND_EXPR
		  || code == VEC_COND_EXPR)
		{
		  if (COMPARISON_CLASS_P (rhs1))
		    {
		      tree lhs = TREE_OPERAND (rhs1, 0);
		      tree rhs = TREE_OPERAND (rhs1, 1);
		      lhs = do_valueize (lhs, top_valueize, valueized);
		      rhs = do_valueize (rhs, top_valueize, valueized);
		      code_helper rcode2 = TREE_CODE (rhs1);
		      tree ops2[3] = {};
		      ops2[0] = lhs;
		      ops2[1] = rhs;
		      if ((gimple_resimplify2 (seq, &rcode2, TREE_TYPE (rhs1),
					       ops2, valueize)
			   || valueized)
			  && rcode2.is_tree_code ())
			{
			  valueized = true;
			  if (TREE_CODE_CLASS ((enum tree_code)rcode2)
			      == tcc_comparison)
			    rhs1 = build2 (rcode2, TREE_TYPE (rhs1),
					   ops2[0], ops2[1]);
			  else if (rcode2 == SSA_NAME
				   || rcode2 == INTEGER_CST)
			    rhs1 = ops2[0];
			  else
			    valueized = false;
			}
		    }
		}
	      tree rhs2 = gimple_assign_rhs2 (stmt);
	      tree rhs3 = gimple_assign_rhs3 (stmt);
	      rhs1 = do_valueize (rhs1, top_valueize, valueized);
	      rhs2 = do_valueize (rhs2, top_valueize, valueized);
	      rhs3 = do_valueize (rhs3, top_valueize, valueized);
	      *rcode = code;
	      ops[0] = rhs1;
	      ops[1] = rhs2;
	      ops[2] = rhs3;
	      return (gimple_resimplify3 (seq, rcode, type, ops, valueize)
		      || valueized);
	    }
	  default:
	    gcc_unreachable ();
	  }
	break;
      }

    case GIMPLE_CALL:
      /* ???  This way we can't simplify calls with side-effects.  */
      if (gimple_call_lhs (stmt) != NULL_TREE
	  && gimple_call_num_args (stmt) >= 1
	  && gimple_call_num_args (stmt) <= 3)
	{
	  tree fn = gimple_call_fn (stmt);
	  /* ???  Internal function support missing.  */
	  if (!fn)
	    return false;
	  bool valueized = false;
	  fn = do_valueize (fn, top_valueize, valueized);
	  if (TREE_CODE (fn) != ADDR_EXPR
	      || TREE_CODE (TREE_OPERAND (fn, 0)) != FUNCTION_DECL)
	    return false;

	  tree decl = TREE_OPERAND (fn, 0);
	  if (DECL_BUILT_IN_CLASS (decl) != BUILT_IN_NORMAL
	      || !builtin_decl_implicit (DECL_FUNCTION_CODE (decl))
	      || !gimple_builtin_call_types_compatible_p (stmt, decl))
	    return false;

	  tree type = TREE_TYPE (gimple_call_lhs (stmt));
	  *rcode = DECL_FUNCTION_CODE (decl);
	  for (unsigned i = 0; i < gimple_call_num_args (stmt); ++i)
	    {
	      tree arg = gimple_call_arg (stmt, i);
	      ops[i] = do_valueize (arg, top_valueize, valueized);
	    }
	  switch (gimple_call_num_args (stmt))
	    {
	    case 1:
	      return (gimple_resimplify1 (seq, rcode, type, ops, valueize)
		      || valueized);
	    case 2:
	      return (gimple_resimplify2 (seq, rcode, type, ops, valueize)
		      || valueized);
	    case 3:
	      return (gimple_resimplify3 (seq, rcode, type, ops, valueize)
		      || valueized);
	    default:
	     gcc_unreachable ();
	    }
	}
      break;

    case GIMPLE_COND:
      {
	tree lhs = gimple_cond_lhs (stmt);
	tree rhs = gimple_cond_rhs (stmt);
	bool valueized = false;
	lhs = do_valueize (lhs, top_valueize, valueized);
	rhs = do_valueize (rhs, top_valueize, valueized);
	*rcode = gimple_cond_code (stmt);
	ops[0] = lhs;
	ops[1] = rhs;
        return (gimple_resimplify2 (seq, rcode,
				    boolean_type_node, ops, valueize)
		|| valueized);
      }

    default:
      break;
    }

  return false;
}
Пример #16
0
bool
compute_builtin_object_size (tree ptr, int object_size_type,
			     unsigned HOST_WIDE_INT *psize)
{
  gcc_assert (object_size_type >= 0 && object_size_type <= 3);

  /* Set to unknown and overwrite just before returning if the size
     could be determined.  */
  *psize = unknown[object_size_type];

  if (! offset_limit)
    init_offset_limit ();

  if (TREE_CODE (ptr) == ADDR_EXPR)
    return addr_object_size (NULL, ptr, object_size_type, psize);

  if (TREE_CODE (ptr) != SSA_NAME
      || !POINTER_TYPE_P (TREE_TYPE (ptr)))
      return false;

  if (computed[object_size_type] == NULL)
    {
      if (optimize || object_size_type & 1)
	return false;

      /* When not optimizing, rather than failing, make a small effort
	 to determine the object size without the full benefit of
	 the (costly) computation below.  */
      gimple *def = SSA_NAME_DEF_STMT (ptr);
      if (gimple_code (def) == GIMPLE_ASSIGN)
	{
	  tree_code code = gimple_assign_rhs_code (def);
	  if (code == POINTER_PLUS_EXPR)
	    {
	      tree offset = gimple_assign_rhs2 (def);
	      ptr = gimple_assign_rhs1 (def);

	      if (tree_fits_shwi_p (offset)
		  && compute_builtin_object_size (ptr, object_size_type, psize))
		{
		  /* Return zero when the offset is out of bounds.  */
		  unsigned HOST_WIDE_INT off = tree_to_shwi (offset);
		  *psize = off < *psize ? *psize - off : 0;
		  return true;
		}
	    }
	}
      return false;
    }

  if (!bitmap_bit_p (computed[object_size_type], SSA_NAME_VERSION (ptr)))
    {
      struct object_size_info osi;
      bitmap_iterator bi;
      unsigned int i;

      if (num_ssa_names > object_sizes[object_size_type].length ())
	object_sizes[object_size_type].safe_grow (num_ssa_names);
      if (dump_file)
	{
	  fprintf (dump_file, "Computing %s %sobject size for ",
		   (object_size_type & 2) ? "minimum" : "maximum",
		   (object_size_type & 1) ? "sub" : "");
	  print_generic_expr (dump_file, ptr, dump_flags);
	  fprintf (dump_file, ":\n");
	}

      osi.visited = BITMAP_ALLOC (NULL);
      osi.reexamine = BITMAP_ALLOC (NULL);
      osi.object_size_type = object_size_type;
      osi.depths = NULL;
      osi.stack = NULL;
      osi.tos = NULL;

      /* First pass: walk UD chains, compute object sizes that
	 can be computed.  osi.reexamine bitmap at the end will
	 contain what variables were found in dependency cycles
	 and therefore need to be reexamined.  */
      osi.pass = 0;
      osi.changed = false;
      collect_object_sizes_for (&osi, ptr);

      /* Second pass: keep recomputing object sizes of variables
	 that need reexamination, until no object sizes are
	 increased or all object sizes are computed.  */
      if (! bitmap_empty_p (osi.reexamine))
	{
	  bitmap reexamine = BITMAP_ALLOC (NULL);

	  /* If looking for minimum instead of maximum object size,
	     detect cases where a pointer is increased in a loop.
	     Although even without this detection pass 2 would eventually
	     terminate, it could take a long time.  If a pointer is
	     increasing this way, we need to assume 0 object size.
	     E.g. p = &buf[0]; while (cond) p = p + 4;  */
	  if (object_size_type & 2)
	    {
	      osi.depths = XCNEWVEC (unsigned int, num_ssa_names);
	      osi.stack = XNEWVEC (unsigned int, num_ssa_names);
	      osi.tos = osi.stack;
	      osi.pass = 1;
	      /* collect_object_sizes_for is changing
		 osi.reexamine bitmap, so iterate over a copy.  */
	      bitmap_copy (reexamine, osi.reexamine);
	      EXECUTE_IF_SET_IN_BITMAP (reexamine, 0, i, bi)
		if (bitmap_bit_p (osi.reexamine, i))
		  check_for_plus_in_loops (&osi, ssa_name (i));

	      free (osi.depths);
	      osi.depths = NULL;
	      free (osi.stack);
	      osi.stack = NULL;
	      osi.tos = NULL;
	    }

	  do
	    {
	      osi.pass = 2;
	      osi.changed = false;
	      /* collect_object_sizes_for is changing
		 osi.reexamine bitmap, so iterate over a copy.  */
	      bitmap_copy (reexamine, osi.reexamine);
	      EXECUTE_IF_SET_IN_BITMAP (reexamine, 0, i, bi)
		if (bitmap_bit_p (osi.reexamine, i))
		  {
		    collect_object_sizes_for (&osi, ssa_name (i));
		    if (dump_file && (dump_flags & TDF_DETAILS))
		      {
			fprintf (dump_file, "Reexamining ");
			print_generic_expr (dump_file, ssa_name (i),
					    dump_flags);
			fprintf (dump_file, "\n");
		      }
		  }
	    }
	  while (osi.changed);

	  BITMAP_FREE (reexamine);
	}
Пример #17
0
static bool
init_dont_simulate_again (void)
{
  basic_block bb;
  gimple_stmt_iterator gsi;
  gimple phi;
  bool saw_a_complex_op = false;

  FOR_EACH_BB (bb)
    {
      for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  phi = gsi_stmt (gsi);
	  prop_set_simulate_again (phi,
				   is_complex_reg (gimple_phi_result (phi)));
	}

      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt;
	  tree op0, op1;
	  bool sim_again_p;

	  stmt = gsi_stmt (gsi);
	  op0 = op1 = NULL_TREE;

	  /* Most control-altering statements must be initially 
	     simulated, else we won't cover the entire cfg.  */
	  sim_again_p = stmt_ends_bb_p (stmt);

	  switch (gimple_code (stmt))
	    {
	    case GIMPLE_CALL:
	      if (gimple_call_lhs (stmt))
	        sim_again_p = is_complex_reg (gimple_call_lhs (stmt));
	      break;

	    case GIMPLE_ASSIGN:
	      sim_again_p = is_complex_reg (gimple_assign_lhs (stmt));
	      if (gimple_assign_rhs_code (stmt) == REALPART_EXPR
		  || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
		op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
	      else
		op0 = gimple_assign_rhs1 (stmt);
	      if (gimple_num_ops (stmt) > 2)
		op1 = gimple_assign_rhs2 (stmt);
	      break;

	    case GIMPLE_COND:
	      op0 = gimple_cond_lhs (stmt);
	      op1 = gimple_cond_rhs (stmt);
	      break;

	    default:
	      break;
	    }

	  if (op0 || op1)
	    switch (gimple_expr_code (stmt))
	      {
	      case EQ_EXPR:
	      case NE_EXPR:
	      case PLUS_EXPR:
	      case MINUS_EXPR:
	      case MULT_EXPR:
	      case TRUNC_DIV_EXPR:
	      case CEIL_DIV_EXPR:
	      case FLOOR_DIV_EXPR:
	      case ROUND_DIV_EXPR:
	      case RDIV_EXPR:
		if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE
		    || TREE_CODE (TREE_TYPE (op1)) == COMPLEX_TYPE)
		  saw_a_complex_op = true;
		break;

	      case NEGATE_EXPR:
	      case CONJ_EXPR:
		if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE)
		  saw_a_complex_op = true;
		break;

	      case REALPART_EXPR:
	      case IMAGPART_EXPR:
		/* The total store transformation performed during
		  gimplification creates such uninitialized loads
		  and we need to lower the statement to be able
		  to fix things up.  */
		if (TREE_CODE (op0) == SSA_NAME
		    && ssa_undefined_value_p (op0))
		  saw_a_complex_op = true;
		break;

	      default:
		break;
	      }

	  prop_set_simulate_again (stmt, sim_again_p);
	}
    }

  return saw_a_complex_op;
}
Пример #18
0
bool
gimple_simplify (gimple stmt,
		 code_helper *rcode, tree *ops,
		 gimple_seq *seq, tree (*valueize)(tree))
{
  switch (gimple_code (stmt))
    {
    case GIMPLE_ASSIGN:
      {
	enum tree_code code = gimple_assign_rhs_code (stmt);
	tree type = TREE_TYPE (gimple_assign_lhs (stmt));
	switch (gimple_assign_rhs_class (stmt))
	  {
	  case GIMPLE_SINGLE_RHS:
	    if (code == REALPART_EXPR
		|| code == IMAGPART_EXPR
		|| code == VIEW_CONVERT_EXPR)
	      {
		tree op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
		if (valueize && TREE_CODE (op0) == SSA_NAME)
		  {
		    tree tem = valueize (op0);
		    if (tem)
		      op0 = tem;
		  }
		*rcode = code;
		ops[0] = op0;
		return gimple_resimplify1 (seq, rcode, type, ops, valueize);
	      }
	    else if (code == BIT_FIELD_REF)
	      {
		tree rhs1 = gimple_assign_rhs1 (stmt);
		tree op0 = TREE_OPERAND (rhs1, 0);
		if (valueize && TREE_CODE (op0) == SSA_NAME)
		  {
		    tree tem = valueize (op0);
		    if (tem)
		      op0 = tem;
		  }
		*rcode = code;
		ops[0] = op0;
		ops[1] = TREE_OPERAND (rhs1, 1);
		ops[2] = TREE_OPERAND (rhs1, 2);
		return gimple_resimplify3 (seq, rcode, type, ops, valueize);
	      }
	    else if (code == SSA_NAME
		     && valueize)
	      {
		tree op0 = gimple_assign_rhs1 (stmt);
		tree valueized = valueize (op0);
		if (!valueized || op0 == valueized)
		  return false;
		ops[0] = valueized;
		*rcode = TREE_CODE (op0);
		return true;
	      }
	    break;
	  case GIMPLE_UNARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      if (valueize && TREE_CODE (rhs1) == SSA_NAME)
		{
		  tree tem = valueize (rhs1);
		  if (tem)
		    rhs1 = tem;
		}
	      *rcode = code;
	      ops[0] = rhs1;
	      return gimple_resimplify1 (seq, rcode, type, ops, valueize);
	    }
	  case GIMPLE_BINARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      if (valueize && TREE_CODE (rhs1) == SSA_NAME)
		{
		  tree tem = valueize (rhs1);
		  if (tem)
		    rhs1 = tem;
		}
	      tree rhs2 = gimple_assign_rhs2 (stmt);
	      if (valueize && TREE_CODE (rhs2) == SSA_NAME)
		{
		  tree tem = valueize (rhs2);
		  if (tem)
		    rhs2 = tem;
		}
	      *rcode = code;
	      ops[0] = rhs1;
	      ops[1] = rhs2;
	      return gimple_resimplify2 (seq, rcode, type, ops, valueize);
	    }
	  case GIMPLE_TERNARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      if (valueize && TREE_CODE (rhs1) == SSA_NAME)
		{
		  tree tem = valueize (rhs1);
		  if (tem)
		    rhs1 = tem;
		}
	      tree rhs2 = gimple_assign_rhs2 (stmt);
	      if (valueize && TREE_CODE (rhs2) == SSA_NAME)
		{
		  tree tem = valueize (rhs2);
		  if (tem)
		    rhs2 = tem;
		}
	      tree rhs3 = gimple_assign_rhs3 (stmt);
	      if (valueize && TREE_CODE (rhs3) == SSA_NAME)
		{
		  tree tem = valueize (rhs3);
		  if (tem)
		    rhs3 = tem;
		}
	      *rcode = code;
	      ops[0] = rhs1;
	      ops[1] = rhs2;
	      ops[2] = rhs3;
	      return gimple_resimplify3 (seq, rcode, type, ops, valueize);
	    }
	  default:
	    gcc_unreachable ();
	  }
	break;
      }

    case GIMPLE_CALL:
      /* ???  This way we can't simplify calls with side-effects.  */
      if (gimple_call_lhs (stmt) != NULL_TREE)
	{
	  tree fn = gimple_call_fn (stmt);
	  /* ???  Internal function support missing.  */
	  if (!fn)
	    return false;
	  if (valueize && TREE_CODE (fn) == SSA_NAME)
	    {
	      tree tem = valueize (fn);
	      if (tem)
		fn = tem;
	    }
	  if (!fn
	      || TREE_CODE (fn) != ADDR_EXPR
	      || TREE_CODE (TREE_OPERAND (fn, 0)) != FUNCTION_DECL
	      || DECL_BUILT_IN_CLASS (TREE_OPERAND (fn, 0)) != BUILT_IN_NORMAL
	      || !builtin_decl_implicit (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)))
	      || !gimple_builtin_call_types_compatible_p (stmt,
							  TREE_OPERAND (fn, 0)))
	    return false;

	  tree decl = TREE_OPERAND (fn, 0);
	  tree type = TREE_TYPE (gimple_call_lhs (stmt));
	  switch (gimple_call_num_args (stmt))
	    {
	    case 1:
	      {
		tree arg1 = gimple_call_arg (stmt, 0);
		if (valueize && TREE_CODE (arg1) == SSA_NAME)
		  {
		    tree tem = valueize (arg1);
		    if (tem)
		      arg1 = tem;
		  }
		*rcode = DECL_FUNCTION_CODE (decl);
		ops[0] = arg1;
		return gimple_resimplify1 (seq, rcode, type, ops, valueize);
	      }
	    case 2:
	      {
		tree arg1 = gimple_call_arg (stmt, 0);
		if (valueize && TREE_CODE (arg1) == SSA_NAME)
		  {
		    tree tem = valueize (arg1);
		    if (tem)
		      arg1 = tem;
		  }
		tree arg2 = gimple_call_arg (stmt, 1);
		if (valueize && TREE_CODE (arg2) == SSA_NAME)
		  {
		    tree tem = valueize (arg2);
		    if (tem)
		      arg2 = tem;
		  }
		*rcode = DECL_FUNCTION_CODE (decl);
		ops[0] = arg1;
		ops[1] = arg2;
		return gimple_resimplify2 (seq, rcode, type, ops, valueize);
	      }
	    case 3:
	      {
		tree arg1 = gimple_call_arg (stmt, 0);
		if (valueize && TREE_CODE (arg1) == SSA_NAME)
		  {
		    tree tem = valueize (arg1);
		    if (tem)
		      arg1 = tem;
		  }
		tree arg2 = gimple_call_arg (stmt, 1);
		if (valueize && TREE_CODE (arg2) == SSA_NAME)
		  {
		    tree tem = valueize (arg2);
		    if (tem)
		      arg2 = tem;
		  }
		tree arg3 = gimple_call_arg (stmt, 2);
		if (valueize && TREE_CODE (arg3) == SSA_NAME)
		  {
		    tree tem = valueize (arg3);
		    if (tem)
		      arg3 = tem;
		  }
		*rcode = DECL_FUNCTION_CODE (decl);
		ops[0] = arg1;
		ops[1] = arg2;
		ops[2] = arg3;
		return gimple_resimplify3 (seq, rcode, type, ops, valueize);
	      }
	    default:
	      return false;
	    }
	}
      break;

    case GIMPLE_COND:
      {
	tree lhs = gimple_cond_lhs (stmt);
	if (valueize && TREE_CODE (lhs) == SSA_NAME)
	  {
	    tree tem = valueize (lhs);
	    if (tem)
	      lhs = tem;
	  }
	tree rhs = gimple_cond_rhs (stmt);
	if (valueize && TREE_CODE (rhs) == SSA_NAME)
	  {
	    tree tem = valueize (rhs);
	    if (tem)
	      rhs = tem;
	  }
	*rcode = gimple_cond_code (stmt);
	ops[0] = lhs;
	ops[1] = rhs;
        return gimple_resimplify2 (seq, rcode, boolean_type_node, ops, valueize);
      }

    default:
      break;
    }

  return false;
}
Пример #19
0
static gimple
vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
{
  gimple stmt;
  tree oprnd0, oprnd1;
  tree oprnd00, oprnd01;
  stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
  tree type, half_type;
  gimple pattern_stmt;
  tree prod_type;
  loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
  struct loop *loop = LOOP_VINFO_LOOP (loop_info);
  tree var;

  if (!is_gimple_assign (last_stmt))
    return NULL;

  type = gimple_expr_type (last_stmt);

  /* Look for the following pattern
          DX = (TYPE1) X;
          DY = (TYPE1) Y;
          DPROD = DX * DY;
          DDPROD = (TYPE2) DPROD;
          sum_1 = DDPROD + sum_0;
     In which
     - DX is double the size of X
     - DY is double the size of Y
     - DX, DY, DPROD all have the same type
     - sum is the same size of DPROD or bigger
     - sum has been recognized as a reduction variable.

     This is equivalent to:
       DPROD = X w* Y;          #widen mult
       sum_1 = DPROD w+ sum_0;  #widen summation
     or
       DPROD = X w* Y;          #widen mult
       sum_1 = DPROD + sum_0;   #summation
   */

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

  if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
    return NULL;

  if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
    {
      /* Has been detected as widening-summation?  */

      stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
      type = gimple_expr_type (stmt);
      if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
        return NULL;
      oprnd0 = gimple_assign_rhs1 (stmt);
      oprnd1 = gimple_assign_rhs2 (stmt);
      half_type = TREE_TYPE (oprnd0);
    }
  else
    {
      gimple def_stmt;

      if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
        return NULL;
      oprnd0 = gimple_assign_rhs1 (last_stmt);
      oprnd1 = gimple_assign_rhs2 (last_stmt);
      if (!types_compatible_p (TREE_TYPE (oprnd0), type)
	  || !types_compatible_p (TREE_TYPE (oprnd1), type))
        return NULL;
      stmt = last_stmt;

      if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt))
        {
          stmt = def_stmt;
          oprnd0 = gimple_assign_rhs1 (stmt);
        }
      else
        half_type = type;
    }

  /* So far so good. Since last_stmt was detected as a (summation) reduction,
     we know that oprnd1 is the reduction variable (defined by a loop-header
     phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
     Left to check that oprnd0 is defined by a (widen_)mult_expr  */

  prod_type = half_type;
  stmt = SSA_NAME_DEF_STMT (oprnd0);

  /* It could not be the dot_prod pattern if the stmt is outside the loop.  */
  if (!gimple_bb (stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
    return NULL;

  /* FORNOW.  Can continue analyzing the def-use chain when this stmt in a phi
     inside the loop (in case we are analyzing an outer-loop).  */
  if (!is_gimple_assign (stmt))
    return NULL;
  stmt_vinfo = vinfo_for_stmt (stmt);
  gcc_assert (stmt_vinfo);
  if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
    return NULL;
  if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
    return NULL;
  if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
    {
      /* Has been detected as a widening multiplication?  */

      stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
      if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
        return NULL;
      stmt_vinfo = vinfo_for_stmt (stmt);
      gcc_assert (stmt_vinfo);
      gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
      oprnd00 = gimple_assign_rhs1 (stmt);
      oprnd01 = gimple_assign_rhs2 (stmt);
    }
  else
    {
      tree half_type0, half_type1;
      gimple def_stmt;
      tree oprnd0, oprnd1;

      oprnd0 = gimple_assign_rhs1 (stmt);
      oprnd1 = gimple_assign_rhs2 (stmt);
      if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
          || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
        return NULL;
      if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt))
        return NULL;
      oprnd00 = gimple_assign_rhs1 (def_stmt);
      if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt))
        return NULL;
      oprnd01 = gimple_assign_rhs1 (def_stmt);
      if (!types_compatible_p (half_type0, half_type1))
        return NULL;
      if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
	return NULL;
    }

  half_type = TREE_TYPE (oprnd00);
  *type_in = half_type;
  *type_out = type;

  /* Pattern detected. Create a stmt to be used to replace the pattern: */
  var = vect_recog_temp_ssa_var (type, NULL);
  pattern_stmt = gimple_build_assign_with_ops3 (DOT_PROD_EXPR, var,
						oprnd00, oprnd01, oprnd1);

  if (vect_print_dump_info (REPORT_DETAILS))
    {
      fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
    }

  /* We don't allow changing the order of the computation in the inner-loop
     when doing outer-loop vectorization.  */
  gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));

  return pattern_stmt;
}
Пример #20
0
static gimple
vect_recog_widen_mult_pattern (gimple last_stmt,
			       tree *type_in,
			       tree *type_out)
{
  gimple def_stmt0, def_stmt1;
  tree oprnd0, oprnd1;
  tree type, half_type0, half_type1;
  gimple pattern_stmt;
  tree vectype, vectype_out;
  tree dummy;
  tree var;
  enum tree_code dummy_code;
  int dummy_int;
  VEC (tree, heap) *dummy_vec;

  if (!is_gimple_assign (last_stmt))
    return NULL;

  type = gimple_expr_type (last_stmt);

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

  if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
    return NULL;

  oprnd0 = gimple_assign_rhs1 (last_stmt);
  oprnd1 = gimple_assign_rhs2 (last_stmt);
  if (!types_compatible_p (TREE_TYPE (oprnd0), type)
      || !types_compatible_p (TREE_TYPE (oprnd1), type))
    return NULL;

  /* Check argument 0 */
  if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
    return NULL;
  oprnd0 = gimple_assign_rhs1 (def_stmt0);

  /* Check argument 1 */
  if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
    return NULL;
  oprnd1 = gimple_assign_rhs1 (def_stmt1);

  if (!types_compatible_p (half_type0, half_type1))
    return NULL;

  /* Pattern detected.  */
  if (vect_print_dump_info (REPORT_DETAILS))
    fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");

  /* Check target support  */
  vectype = get_vectype_for_scalar_type (half_type0);
  vectype_out = get_vectype_for_scalar_type (type);
  if (!vectype
      || !vectype_out
      || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
					  vectype_out, vectype,
					  &dummy, &dummy, &dummy_code,
					  &dummy_code, &dummy_int, &dummy_vec))
    return NULL;

  *type_in = vectype;
  *type_out = vectype_out;

  /* Pattern supported. Create a stmt to be used to replace the pattern: */
  var = vect_recog_temp_ssa_var (type, NULL);
  pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
					       oprnd1);
  SSA_NAME_DEF_STMT (var) = pattern_stmt;

  if (vect_print_dump_info (REPORT_DETAILS))
    print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);

  return pattern_stmt;
}
Пример #21
0
expr_hash_elt::expr_hash_elt (gimple *stmt, tree orig_lhs)
{
  enum gimple_code code = gimple_code (stmt);
  struct hashable_expr *expr = this->expr ();

  if (code == GIMPLE_ASSIGN)
    {
      enum tree_code subcode = gimple_assign_rhs_code (stmt);

      switch (get_gimple_rhs_class (subcode))
        {
        case GIMPLE_SINGLE_RHS:
	  expr->kind = EXPR_SINGLE;
	  expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
	  expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
	  break;
        case GIMPLE_UNARY_RHS:
	  expr->kind = EXPR_UNARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  if (CONVERT_EXPR_CODE_P (subcode))
	    subcode = NOP_EXPR;
	  expr->ops.unary.op = subcode;
	  expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
	  break;
        case GIMPLE_BINARY_RHS:
	  expr->kind = EXPR_BINARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  expr->ops.binary.op = subcode;
	  expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
	  expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
	  break;
        case GIMPLE_TERNARY_RHS:
	  expr->kind = EXPR_TERNARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  expr->ops.ternary.op = subcode;
	  expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
	  expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
	  expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
	  break;
        default:
          gcc_unreachable ();
        }
    }
  else if (code == GIMPLE_COND)
    {
      expr->type = boolean_type_node;
      expr->kind = EXPR_BINARY;
      expr->ops.binary.op = gimple_cond_code (stmt);
      expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
      expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
    }
  else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
    {
      size_t nargs = gimple_call_num_args (call_stmt);
      size_t i;

      gcc_assert (gimple_call_lhs (call_stmt));

      expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
      expr->kind = EXPR_CALL;
      expr->ops.call.fn_from = call_stmt;

      if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
        expr->ops.call.pure = true;
      else
        expr->ops.call.pure = false;

      expr->ops.call.nargs = nargs;
      expr->ops.call.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
    }
  else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
    {
      expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
      expr->kind = EXPR_SINGLE;
      expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
    }
  else if (code == GIMPLE_GOTO)
    {
      expr->type = TREE_TYPE (gimple_goto_dest (stmt));
      expr->kind = EXPR_SINGLE;
      expr->ops.single.rhs = gimple_goto_dest (stmt);
    }
  else if (code == GIMPLE_PHI)
    {
      size_t nargs = gimple_phi_num_args (stmt);
      size_t i;

      expr->type = TREE_TYPE (gimple_phi_result (stmt));
      expr->kind = EXPR_PHI;
      expr->ops.phi.nargs = nargs;
      expr->ops.phi.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
    }
  else
    gcc_unreachable ();

  m_lhs = orig_lhs;
  m_vop = gimple_vuse (stmt);
  m_hash = avail_expr_hash (this);
  m_stamp = this;
}
Пример #22
0
static gimple
vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
{
  gimple stmt;
  tree oprnd0, oprnd1;
  stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
  tree type, half_type;
  gimple pattern_stmt;
  loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
  struct loop *loop = LOOP_VINFO_LOOP (loop_info);
  tree var;

  if (!is_gimple_assign (last_stmt))
    return NULL;

  type = gimple_expr_type (last_stmt);

  /* Look for the following pattern
          DX = (TYPE) X;
          sum_1 = DX + sum_0;
     In which DX is at least double the size of X, and sum_1 has been
     recognized as a reduction variable.
   */

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

  if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
    return NULL;

  if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
    return NULL;

  oprnd0 = gimple_assign_rhs1 (last_stmt);
  oprnd1 = gimple_assign_rhs2 (last_stmt);
  if (!types_compatible_p (TREE_TYPE (oprnd0), type)
      || !types_compatible_p (TREE_TYPE (oprnd1), type))
    return NULL;

  /* So far so good. Since last_stmt was detected as a (summation) reduction,
     we know that oprnd1 is the reduction variable (defined by a loop-header
     phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
     Left to check that oprnd0 is defined by a cast from type 'type' to type
     'TYPE'.  */

  if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
    return NULL;

  oprnd0 = gimple_assign_rhs1 (stmt);
  *type_in = half_type;
  *type_out = type;

  /* Pattern detected. Create a stmt to be used to replace the pattern: */
  var = vect_recog_temp_ssa_var (type, NULL);
  pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
					       oprnd0, oprnd1);
  SSA_NAME_DEF_STMT (var) = pattern_stmt;

  if (vect_print_dump_info (REPORT_DETAILS))
    {
      fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
    }

  /* We don't allow changing the order of the computation in the inner-loop
     when doing outer-loop vectorization.  */
  gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));

  return pattern_stmt;
}
Пример #23
0
static bool
tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
			 int upper_bound)
{
  basic_block *body = get_loop_body (loop);
  gimple_stmt_iterator gsi;
  unsigned int i;
  bool after_exit;
  vec<basic_block> path = get_loop_hot_path (loop);

  size->overall = 0;
  size->eliminated_by_peeling = 0;
  size->last_iteration = 0;
  size->last_iteration_eliminated_by_peeling = 0;
  size->num_pure_calls_on_hot_path = 0;
  size->num_non_pure_calls_on_hot_path = 0;
  size->non_call_stmts_on_hot_path = 0;
  size->num_branches_on_hot_path = 0;
  size->constant_iv = 0;

  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
  for (i = 0; i < loop->num_nodes; i++)
    {
      if (edge_to_cancel && body[i] != edge_to_cancel->src
	  && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
	after_exit = true;
      else
	after_exit = false;
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);

      for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple *stmt = gsi_stmt (gsi);
	  int num = estimate_num_insns (stmt, &eni_size_weights);
	  bool likely_eliminated = false;
	  bool likely_eliminated_last = false;
	  bool likely_eliminated_peeled = false;

	  if (dump_file && (dump_flags & TDF_DETAILS))
	    {
	      fprintf (dump_file, "  size: %3i ", num);
	      print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
	    }

	  /* Look for reasons why we might optimize this stmt away. */

	  if (gimple_has_side_effects (stmt))
	    ;
	  /* Exit conditional.  */
	  else if (exit && body[i] == exit->src
		   && stmt == last_stmt (exit->src))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Exit condition will be eliminated "
			 "in peeled copies.\n");
	      likely_eliminated_peeled = true;
	    }
	  else if (edge_to_cancel && body[i] == edge_to_cancel->src
		   && stmt == last_stmt (edge_to_cancel->src))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Exit condition will be eliminated "
			 "in last copy.\n");
	      likely_eliminated_last = true;
	    }
	  /* Sets of IV variables  */
	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
	      && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Induction variable computation will"
			 " be folded away.\n");
	      likely_eliminated = true;
	    }
	  /* Assignments of IV variables.  */
	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
		   && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
		   && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
		   && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
		       || constant_after_peeling (gimple_assign_rhs2 (stmt),
		       				  stmt, loop)))
	    {
	      size->constant_iv = true;
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Constant expression will be folded away.\n");
	      likely_eliminated = true;
	    }
	  /* Conditionals.  */
	  else if ((gimple_code (stmt) == GIMPLE_COND
		    && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
		    && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)
		    /* We don't simplify all constant compares so make sure
		       they are not both constant already.  See PR70288.  */
		    && (! is_gimple_min_invariant (gimple_cond_lhs (stmt))
			|| ! is_gimple_min_invariant (gimple_cond_rhs (stmt))))
		   || (gimple_code (stmt) == GIMPLE_SWITCH
		       && constant_after_peeling (gimple_switch_index (
						    as_a <gswitch *> (stmt)),
						  stmt, loop)
		       && ! is_gimple_min_invariant (gimple_switch_index (
						       as_a <gswitch *> (stmt)))))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Constant conditional.\n");
	      likely_eliminated = true;
	    }

	  size->overall += num;
	  if (likely_eliminated || likely_eliminated_peeled)
	    size->eliminated_by_peeling += num;
	  if (!after_exit)
	    {
	      size->last_iteration += num;
	      if (likely_eliminated || likely_eliminated_last)
		size->last_iteration_eliminated_by_peeling += num;
	    }
	  if ((size->overall * 3 / 2 - size->eliminated_by_peeling
	      - size->last_iteration_eliminated_by_peeling) > upper_bound)
	    {
              free (body);
	      path.release ();
	      return true;
	    }
	}
    }
  while (path.length ())
    {
      basic_block bb = path.pop ();
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple *stmt = gsi_stmt (gsi);
	  if (gimple_code (stmt) == GIMPLE_CALL)
	    {
	      int flags = gimple_call_flags (stmt);
	      tree decl = gimple_call_fndecl (stmt);

	      if (decl && DECL_IS_BUILTIN (decl)
		  && is_inexpensive_builtin (decl))
		;
	      else if (flags & (ECF_PURE | ECF_CONST))
		size->num_pure_calls_on_hot_path++;
	      else
		size->num_non_pure_calls_on_hot_path++;
	      size->num_branches_on_hot_path ++;
	    }
	  else if (gimple_code (stmt) != GIMPLE_CALL
		   && gimple_code (stmt) != GIMPLE_DEBUG)
	    size->non_call_stmts_on_hot_path++;
	  if (((gimple_code (stmt) == GIMPLE_COND
	        && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
		    || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
	       || (gimple_code (stmt) == GIMPLE_SWITCH
		   && !constant_after_peeling (gimple_switch_index (
						 as_a <gswitch *> (stmt)),
					       stmt, loop)))
	      && (!exit || bb != exit->src))
	    size->num_branches_on_hot_path++;
	}
    }
  path.release ();
  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
    	     size->eliminated_by_peeling, size->last_iteration,
	     size->last_iteration_eliminated_by_peeling);

  free (body);
  return false;
}
Пример #24
0
/* Compute value of PTR and put it into address RES.  */
static void
chkp_collect_value (tree ptr, address_t &res)
{
    gimple *def_stmt;
    enum gimple_code code;
    enum tree_code rhs_code;
    address_t addr;
    tree rhs1;

    if (TREE_CODE (ptr) == INTEGER_CST)
    {
        chkp_add_addr_item (res, ptr, NULL);
        return;
    }
    else if (TREE_CODE (ptr) == ADDR_EXPR)
    {
        chkp_collect_addr_value (ptr, res);
        return;
    }
    else if (TREE_CODE (ptr) != SSA_NAME)
    {
        chkp_add_addr_item (res, integer_one_node, ptr);
        return;
    }

    /* Now we handle the case when polynomial is computed
       for SSA NAME.  */
    def_stmt = SSA_NAME_DEF_STMT (ptr);
    code = gimple_code (def_stmt);

    /* Currently we do not walk through statements other
       than assignment.  */
    if (code != GIMPLE_ASSIGN)
    {
        chkp_add_addr_item (res, integer_one_node, ptr);
        return;
    }

    rhs_code = gimple_assign_rhs_code (def_stmt);
    rhs1 = gimple_assign_rhs1 (def_stmt);

    switch (rhs_code)
    {
    case SSA_NAME:
    case INTEGER_CST:
    case ADDR_EXPR:
        chkp_collect_value (rhs1, res);
        break;

    case PLUS_EXPR:
    case POINTER_PLUS_EXPR:
        chkp_collect_value (rhs1, res);
        addr.pol.create (0);
        chkp_collect_value (gimple_assign_rhs2 (def_stmt), addr);
        chkp_add_addr_addr (res, addr);
        addr.pol.release ();
        break;

    case MINUS_EXPR:
        chkp_collect_value (rhs1, res);
        addr.pol.create (0);
        chkp_collect_value (gimple_assign_rhs2 (def_stmt), addr);
        chkp_sub_addr_addr (res, addr);
        addr.pol.release ();
        break;

    case MULT_EXPR:
        if (TREE_CODE (rhs1) == SSA_NAME
                && TREE_CODE (gimple_assign_rhs2 (def_stmt)) == INTEGER_CST)
        {
            chkp_collect_value (rhs1, res);
            chkp_mult_addr (res, gimple_assign_rhs2 (def_stmt));
        }
        else if (TREE_CODE (gimple_assign_rhs2 (def_stmt)) == SSA_NAME
                 && TREE_CODE (rhs1) == INTEGER_CST)
        {
            chkp_collect_value (gimple_assign_rhs2 (def_stmt), res);
            chkp_mult_addr (res, rhs1);
        }
        else
            chkp_add_addr_item (res, integer_one_node, ptr);
        break;

    default:
        chkp_add_addr_item (res, integer_one_node, ptr);
        break;
    }
}