static bool
check_range (gimple swtch)
{
  tree min_case, max_case;
  unsigned int branch_num = gimple_switch_num_labels (swtch);
  tree range_max;

  /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
     is a default label which is the last in the vector.  */

  min_case = gimple_switch_label (swtch, 1);
  info.range_min = CASE_LOW (min_case);

  gcc_assert (branch_num > 1);
  gcc_assert (CASE_LOW (gimple_switch_label (swtch, 0)) == NULL_TREE);
  max_case = gimple_switch_label (swtch, branch_num - 1);
  if (CASE_HIGH (max_case) != NULL_TREE)
    range_max = CASE_HIGH (max_case);
  else
    range_max = CASE_LOW (max_case);

  gcc_assert (info.range_min);
  gcc_assert (range_max);

  info.range_size = int_const_binop (MINUS_EXPR, range_max, info.range_min, 0);

  gcc_assert (info.range_size);
  if (!host_integerp (info.range_size, 1))
    {
      info.reason = "index range way too large or otherwise unusable.\n";
      return false;
    }

  if ((unsigned HOST_WIDE_INT) tree_low_cst (info.range_size, 1)
      > ((unsigned) branch_num * SWITCH_CONVERSION_BRANCH_RATIO))
    {
      info.reason = "the maximum range-branch ratio exceeded.\n";
      return false;
    }

  return true;
}
Example #2
0
static unsigned HOST_WIDE_INT
addr_object_size (tree ptr, int object_size_type)
{
  tree pt_var;

  gcc_assert (TREE_CODE (ptr) == ADDR_EXPR);

  pt_var = TREE_OPERAND (ptr, 0);
  if (REFERENCE_CLASS_P (pt_var))
    pt_var = get_base_address (pt_var);

  if (pt_var
      && (SSA_VAR_P (pt_var) || TREE_CODE (pt_var) == STRING_CST)
      && TYPE_SIZE_UNIT (TREE_TYPE (pt_var))
      && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1)
      && (unsigned HOST_WIDE_INT)
	 tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1) < offset_limit)
    {
      tree bytes;

      if (pt_var != TREE_OPERAND (ptr, 0))
	{
	  tree var;

	  if (object_size_type & 1)
	    {
	      var = TREE_OPERAND (ptr, 0);

	      while (var != pt_var
		      && TREE_CODE (var) != BIT_FIELD_REF
		      && TREE_CODE (var) != COMPONENT_REF
		      && TREE_CODE (var) != ARRAY_REF
		      && TREE_CODE (var) != ARRAY_RANGE_REF
		      && TREE_CODE (var) != REALPART_EXPR
		      && TREE_CODE (var) != IMAGPART_EXPR)
		var = TREE_OPERAND (var, 0);
	      if (var != pt_var && TREE_CODE (var) == ARRAY_REF)
		var = TREE_OPERAND (var, 0);
	      if (! TYPE_SIZE_UNIT (TREE_TYPE (var))
		  || ! host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (var)), 1)
		  || tree_int_cst_lt (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)),
				      TYPE_SIZE_UNIT (TREE_TYPE (var))))
		var = pt_var;
	    }
	  else
	    var = pt_var;

	  bytes = compute_object_offset (TREE_OPERAND (ptr, 0), var);
	  if (bytes != error_mark_node)
	    {
	      if (TREE_CODE (bytes) == INTEGER_CST
		  && tree_int_cst_lt (TYPE_SIZE_UNIT (TREE_TYPE (var)), bytes))
		bytes = size_zero_node;
	      else
		bytes = size_binop (MINUS_EXPR,
				    TYPE_SIZE_UNIT (TREE_TYPE (var)), bytes);
	    }
	}
      else
	bytes = TYPE_SIZE_UNIT (TREE_TYPE (pt_var));

      if (host_integerp (bytes, 1))
	return tree_low_cst (bytes, 1);
    }

  return unknown[object_size_type];
}
Example #3
0
static unsigned HOST_WIDE_INT
addr_object_size (struct object_size_info *osi, const_tree ptr,
		  int object_size_type)
{
  tree pt_var, pt_var_size = NULL_TREE, var_size, bytes;

  gcc_assert (TREE_CODE (ptr) == ADDR_EXPR);

  pt_var = TREE_OPERAND (ptr, 0);
  while (handled_component_p (pt_var))
    pt_var = TREE_OPERAND (pt_var, 0);

  if (pt_var
      && TREE_CODE (pt_var) == MEM_REF)
    {
      unsigned HOST_WIDE_INT sz;

      if (!osi || (object_size_type & 1) != 0
	  || TREE_CODE (TREE_OPERAND (pt_var, 0)) != SSA_NAME)
	{
	  sz = compute_builtin_object_size (TREE_OPERAND (pt_var, 0),
					    object_size_type & ~1);
	}
      else
	{
	  tree var = TREE_OPERAND (pt_var, 0);
	  if (osi->pass == 0)
	    collect_object_sizes_for (osi, var);
	  if (bitmap_bit_p (computed[object_size_type],
			    SSA_NAME_VERSION (var)))
	    sz = object_sizes[object_size_type][SSA_NAME_VERSION (var)];
	  else
	    sz = unknown[object_size_type];
	}
      if (sz != unknown[object_size_type])
	{
	  double_int dsz = double_int::from_uhwi (sz) - mem_ref_offset (pt_var);
	  if (dsz.is_negative ())
	    sz = 0;
	  else if (dsz.fits_uhwi ())
	    sz = dsz.to_uhwi ();
	  else
	    sz = unknown[object_size_type];
	}

      if (sz != unknown[object_size_type] && sz < offset_limit)
	pt_var_size = size_int (sz);
    }
  else if (pt_var
	   && DECL_P (pt_var)
	   && host_integerp (DECL_SIZE_UNIT (pt_var), 1)
	   && (unsigned HOST_WIDE_INT)
	        tree_low_cst (DECL_SIZE_UNIT (pt_var), 1) < offset_limit)
    pt_var_size = DECL_SIZE_UNIT (pt_var);
  else if (pt_var
	   && TREE_CODE (pt_var) == STRING_CST
	   && TYPE_SIZE_UNIT (TREE_TYPE (pt_var))
	   && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1)
	   && (unsigned HOST_WIDE_INT)
	      tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1)
	      < offset_limit)
    pt_var_size = TYPE_SIZE_UNIT (TREE_TYPE (pt_var));
  else
    return unknown[object_size_type];

  if (pt_var != TREE_OPERAND (ptr, 0))
    {
      tree var;

      if (object_size_type & 1)
	{
	  var = TREE_OPERAND (ptr, 0);

	  while (var != pt_var
		 && TREE_CODE (var) != BIT_FIELD_REF
		 && TREE_CODE (var) != COMPONENT_REF
		 && TREE_CODE (var) != ARRAY_REF
		 && TREE_CODE (var) != ARRAY_RANGE_REF
		 && TREE_CODE (var) != REALPART_EXPR
		 && TREE_CODE (var) != IMAGPART_EXPR)
	    var = TREE_OPERAND (var, 0);
	  if (var != pt_var && TREE_CODE (var) == ARRAY_REF)
	    var = TREE_OPERAND (var, 0);
	  if (! TYPE_SIZE_UNIT (TREE_TYPE (var))
	      || ! host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (var)), 1)
	      || (pt_var_size
		  && tree_int_cst_lt (pt_var_size,
				      TYPE_SIZE_UNIT (TREE_TYPE (var)))))
	    var = pt_var;
	  else if (var != pt_var && TREE_CODE (pt_var) == MEM_REF)
	    {
	      tree v = var;
	      /* For &X->fld, compute object size only if fld isn't the last
		 field, as struct { int i; char c[1]; } is often used instead
		 of flexible array member.  */
	      while (v && v != pt_var)
		switch (TREE_CODE (v))
		  {
		  case ARRAY_REF:
		    if (TYPE_SIZE_UNIT (TREE_TYPE (TREE_OPERAND (v, 0)))
			&& TREE_CODE (TREE_OPERAND (v, 1)) == INTEGER_CST)
		      {
			tree domain
			  = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (v, 0)));
			if (domain
			    && TYPE_MAX_VALUE (domain)
			    && TREE_CODE (TYPE_MAX_VALUE (domain))
			       == INTEGER_CST
			    && tree_int_cst_lt (TREE_OPERAND (v, 1),
						TYPE_MAX_VALUE (domain)))
			  {
			    v = NULL_TREE;
			    break;
			  }
		      }
		    v = TREE_OPERAND (v, 0);
		    break;
		  case REALPART_EXPR:
		  case IMAGPART_EXPR:
		    v = NULL_TREE;
		    break;
		  case COMPONENT_REF:
		    if (TREE_CODE (TREE_TYPE (v)) != ARRAY_TYPE)
		      {
			v = NULL_TREE;
			break;
		      }
		    while (v != pt_var && TREE_CODE (v) == COMPONENT_REF)
		      if (TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0)))
			  != UNION_TYPE
			  && TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0)))
			  != QUAL_UNION_TYPE)
			break;
		      else
			v = TREE_OPERAND (v, 0);
		    if (TREE_CODE (v) == COMPONENT_REF
			&& TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0)))
			   == RECORD_TYPE)
		      {
			tree fld_chain = DECL_CHAIN (TREE_OPERAND (v, 1));
			for (; fld_chain; fld_chain = DECL_CHAIN (fld_chain))
			  if (TREE_CODE (fld_chain) == FIELD_DECL)
			    break;

			if (fld_chain)
			  {
			    v = NULL_TREE;
			    break;
			  }
			v = TREE_OPERAND (v, 0);
		      }
		    while (v != pt_var && TREE_CODE (v) == COMPONENT_REF)
		      if (TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0)))
			  != UNION_TYPE
			  && TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0)))
			  != QUAL_UNION_TYPE)
			break;
		      else
			v = TREE_OPERAND (v, 0);
		    if (v != pt_var)
		      v = NULL_TREE;
		    else
		      v = pt_var;
		    break;
		  default:
		    v = pt_var;
		    break;
		  }
	      if (v == pt_var)
		var = pt_var;
	    }
	}
      else
	var = pt_var;

      if (var != pt_var)
	var_size = TYPE_SIZE_UNIT (TREE_TYPE (var));
      else if (!pt_var_size)
	return unknown[object_size_type];
      else
	var_size = pt_var_size;
      bytes = compute_object_offset (TREE_OPERAND (ptr, 0), var);
      if (bytes != error_mark_node)
	{
	  if (TREE_CODE (bytes) == INTEGER_CST
	      && tree_int_cst_lt (var_size, bytes))
	    bytes = size_zero_node;
	  else
	    bytes = size_binop (MINUS_EXPR, var_size, bytes);
	}
      if (var != pt_var
	  && pt_var_size
	  && TREE_CODE (pt_var) == MEM_REF
	  && bytes != error_mark_node)
	{
	  tree bytes2 = compute_object_offset (TREE_OPERAND (ptr, 0), pt_var);
	  if (bytes2 != error_mark_node)
	    {
	      if (TREE_CODE (bytes2) == INTEGER_CST
		  && tree_int_cst_lt (pt_var_size, bytes2))
		bytes2 = size_zero_node;
	      else
		bytes2 = size_binop (MINUS_EXPR, pt_var_size, bytes2);
	      bytes = size_binop (MIN_EXPR, bytes, bytes2);
	    }
	}
    }
  else if (!pt_var_size)
    return unknown[object_size_type];
  else
    bytes = pt_var_size;

  if (host_integerp (bytes, 1))
    return tree_low_cst (bytes, 1);

  return unknown[object_size_type];
}
static bool
forward_propagate_addr_into_variable_array_index (tree offset,
						  tree def_rhs,
						  gimple_stmt_iterator *use_stmt_gsi)
{
  tree index, tunit;
  gimple offset_def, use_stmt = gsi_stmt (*use_stmt_gsi);
  tree tmp;

  tunit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs)));
  if (!host_integerp (tunit, 1))
    return false;

  /* Get the offset's defining statement.  */
  offset_def = SSA_NAME_DEF_STMT (offset);

  /* Try to find an expression for a proper index.  This is either a
     multiplication expression by the element size or just the ssa name we came
     along in case the element size is one. In that case, however, we do not
     allow multiplications because they can be computing index to a higher
     level dimension (PR 37861). */
  if (integer_onep (tunit))
    {
      if (is_gimple_assign (offset_def)
	  && gimple_assign_rhs_code (offset_def) == MULT_EXPR)
	return false;

      index = offset;
    }
  else
    {
      /* The statement which defines OFFSET before type conversion
         must be a simple GIMPLE_ASSIGN.  */
      if (!is_gimple_assign (offset_def))
	return false;

      /* The RHS of the statement which defines OFFSET must be a
	 multiplication of an object by the size of the array elements.
	 This implicitly verifies that the size of the array elements
	 is constant.  */
     if (gimple_assign_rhs_code (offset_def) == MULT_EXPR
	 && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST
	 && tree_int_cst_equal (gimple_assign_rhs2 (offset_def), tunit))
       {
	 /* The first operand to the MULT_EXPR is the desired index.  */
	 index = gimple_assign_rhs1 (offset_def);
       }
     /* If we have idx * tunit + CST * tunit re-associate that.  */
     else if ((gimple_assign_rhs_code (offset_def) == PLUS_EXPR
	       || gimple_assign_rhs_code (offset_def) == MINUS_EXPR)
	      && TREE_CODE (gimple_assign_rhs1 (offset_def)) == SSA_NAME
	      && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST
	      && (tmp = div_if_zero_remainder (EXACT_DIV_EXPR,
					       gimple_assign_rhs2 (offset_def),
					       tunit)) != NULL_TREE)
       {
	 gimple offset_def2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (offset_def));
	 if (is_gimple_assign (offset_def2)
	     && gimple_assign_rhs_code (offset_def2) == MULT_EXPR
	     && TREE_CODE (gimple_assign_rhs2 (offset_def2)) == INTEGER_CST
	     && tree_int_cst_equal (gimple_assign_rhs2 (offset_def2), tunit))
	   {
	     index = fold_build2 (gimple_assign_rhs_code (offset_def),
				  TREE_TYPE (offset),
				  gimple_assign_rhs1 (offset_def2), tmp);
	   }
	 else
	   return false;
       }
     else
	return false;
    }

  /* Replace the pointer addition with array indexing.  */
  index = force_gimple_operand_gsi (use_stmt_gsi, index, true, NULL_TREE,
				    true, GSI_SAME_STMT);
  gimple_assign_set_rhs_from_tree (use_stmt_gsi, unshare_expr (def_rhs));
  use_stmt = gsi_stmt (*use_stmt_gsi);
  TREE_OPERAND (TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0), 1)
    = index;

  /* That should have created gimple, so there is no need to
     record information to undo the propagation.  */
  fold_stmt_inplace (use_stmt);
  tidy_after_forward_propagate_addr (use_stmt);
  return true;
}
Example #5
0
// adapt gcc4.9 practice to gcc4.8 functions
bool
tree_fits_uhwi_p (const_tree t)
{
  return host_integerp (t, 1);
}
Example #6
0
static int
plain_type_1 (tree type, int level)
{
  if (type == 0)
    type = void_type_node;
  else if (type == error_mark_node)
    type = integer_type_node;
  else
    type = TYPE_MAIN_VARIANT (type);

  switch (TREE_CODE (type))
    {
    case VOID_TYPE:
    case NULLPTR_TYPE:
      return T_VOID;
    case BOOLEAN_TYPE:
    case INTEGER_TYPE:
      {
	int size = int_size_in_bytes (type) * BITS_PER_UNIT;

	/* Carefully distinguish all the standard types of C,
	   without messing up if the language is not C.
	   Note that we check only for the names that contain spaces;
	   other names might occur by coincidence in other languages.  */
	if (TYPE_NAME (type) != 0
	    && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
	    && DECL_NAME (TYPE_NAME (type)) != 0
	    && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE)
	  {
	    const char *const name
	      = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)));

	    if (!strcmp (name, "char"))
	      return T_CHAR;
	    if (!strcmp (name, "unsigned char"))
	      return T_UCHAR;
	    if (!strcmp (name, "signed char"))
	      return T_CHAR;
	    if (!strcmp (name, "int"))
	      return T_INT;
	    if (!strcmp (name, "unsigned int"))
	      return T_UINT;
	    if (!strcmp (name, "short int"))
	      return T_SHORT;
	    if (!strcmp (name, "short unsigned int"))
	      return T_USHORT;
	    if (!strcmp (name, "long int"))
	      return T_LONG;
	    if (!strcmp (name, "long unsigned int"))
	      return T_ULONG;
	  }

	if (size == INT_TYPE_SIZE)
	  return (TYPE_UNSIGNED (type) ? T_UINT : T_INT);
	if (size == CHAR_TYPE_SIZE)
	  return (TYPE_UNSIGNED (type) ? T_UCHAR : T_CHAR);
	if (size == SHORT_TYPE_SIZE)
	  return (TYPE_UNSIGNED (type) ? T_USHORT : T_SHORT);
	if (size == LONG_TYPE_SIZE)
	  return (TYPE_UNSIGNED (type) ? T_ULONG : T_LONG);
	if (size == LONG_LONG_TYPE_SIZE)	/* better than nothing */
	  return (TYPE_UNSIGNED (type) ? T_ULONG : T_LONG);
	return 0;
      }

    case REAL_TYPE:
      {
	int precision = TYPE_PRECISION (type);
	if (precision == FLOAT_TYPE_SIZE)
	  return T_FLOAT;
	if (precision == DOUBLE_TYPE_SIZE)
	  return T_DOUBLE;
#ifdef EXTENDED_SDB_BASIC_TYPES
	if (precision == LONG_DOUBLE_TYPE_SIZE)
	  return T_LNGDBL;
#else
	if (precision == LONG_DOUBLE_TYPE_SIZE)
	  return T_DOUBLE;	/* better than nothing */
#endif
	return 0;
      }

    case ARRAY_TYPE:
      {
	int m;
	if (level >= 6)
	  return T_VOID;
	else
	  m = plain_type_1 (TREE_TYPE (type), level+1);
	if (sdb_n_dims < SDB_MAX_DIM)
	  sdb_dims[sdb_n_dims++]
	    = (TYPE_DOMAIN (type)
	       && TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != 0
	       && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != 0
	       && host_integerp (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
	       && host_integerp (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0)
	       ? (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
		  - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1)
	       : 0);

	return PUSH_DERIVED_LEVEL (DT_ARY, m);
      }

    case RECORD_TYPE:
    case UNION_TYPE:
    case QUAL_UNION_TYPE:
    case ENUMERAL_TYPE:
      {
	const char *tag;
#ifdef SDB_ALLOW_FORWARD_REFERENCES
	sdbout_record_type_name (type);
#endif
#ifndef SDB_ALLOW_UNKNOWN_REFERENCES
	if ((TREE_ASM_WRITTEN (type) && KNOWN_TYPE_TAG (type) != 0)
#ifdef SDB_ALLOW_FORWARD_REFERENCES
	    || TYPE_MODE (type) != VOIDmode
#endif
	    )
#endif
	  {
	    /* Output the referenced structure tag name
	       only if the .def has already been finished.
	       At least on 386, the Unix assembler
	       cannot handle forward references to tags.  */
	    /* But the 88100, it requires them, sigh...  */
	    /* And the MIPS requires unknown refs as well...  */
	    tag = KNOWN_TYPE_TAG (type);
	    PUT_SDB_TAG (tag);
	    /* These 3 lines used to follow the close brace.
	       However, a size of 0 without a tag implies a tag of 0,
	       so if we don't know a tag, we can't mention the size.  */
	    sdb_type_size = int_size_in_bytes (type);
	    if (sdb_type_size < 0)
	      sdb_type_size = 0;
	  }
	return ((TREE_CODE (type) == RECORD_TYPE) ? T_STRUCT
		: (TREE_CODE (type) == UNION_TYPE) ? T_UNION
		: (TREE_CODE (type) == QUAL_UNION_TYPE) ? T_UNION
		: T_ENUM);
      }
    case POINTER_TYPE:
    case REFERENCE_TYPE:
      {
	int m;
	if (level >= 6)
	  return T_VOID;
	else
	  m = plain_type_1 (TREE_TYPE (type), level+1);
	return PUSH_DERIVED_LEVEL (DT_PTR, m);
      }
    case FUNCTION_TYPE:
    case METHOD_TYPE:
      {
	int m;
	if (level >= 6)
	  return T_VOID;
	else
	  m = plain_type_1 (TREE_TYPE (type), level+1);
	return PUSH_DERIVED_LEVEL (DT_FCN, m);
      }
    default:
      return 0;
    }
}
Example #7
0
static void
sdbout_one_type (tree type)
{
  if (current_function_decl != NULL_TREE
      && DECL_SECTION_NAME (current_function_decl) != NULL_TREE)
    ; /* Don't change section amid function.  */
  else
    switch_to_section (text_section);

  switch (TREE_CODE (type))
    {
    case RECORD_TYPE:
    case UNION_TYPE:
    case QUAL_UNION_TYPE:
    case ENUMERAL_TYPE:
      type = TYPE_MAIN_VARIANT (type);
      /* Don't output a type twice.  */
      if (TREE_ASM_WRITTEN (type))
	/* James said test TREE_ASM_BEING_WRITTEN here.  */
	return;

      /* Output nothing if type is not yet defined.  */
      if (!COMPLETE_TYPE_P (type))
	return;

      TREE_ASM_WRITTEN (type) = 1;

      /* This is reputed to cause trouble with the following case,
	 but perhaps checking TYPE_SIZE above will fix it.  */

      /* Here is a testcase:

	struct foo {
	  struct badstr *bbb;
	} forwardref;

	typedef struct intermediate {
	  int aaaa;
	} intermediate_ref;

	typedef struct badstr {
	  int ccccc;
	} badtype;   */

      /* This change, which ought to make better output,
	 used to make the COFF assembler unhappy.
	 Changes involving KNOWN_TYPE_TAG may fix the problem.  */
      /* Before really doing anything, output types we want to refer to.  */
      /* Note that in version 1 the following two lines
	 are not used if forward references are in use.  */
      if (TREE_CODE (type) != ENUMERAL_TYPE)
	sdbout_field_types (type);

      /* Output a structure type.  */
      {
	int size = int_size_in_bytes (type);
	int member_scl = 0;
	tree tem;

	/* Record the type tag, but not in its permanent place just yet.  */
	sdbout_record_type_name (type);

	PUT_SDB_DEF (KNOWN_TYPE_TAG (type));

	switch (TREE_CODE (type))
	  {
	  case UNION_TYPE:
	  case QUAL_UNION_TYPE:
	    PUT_SDB_SCL (C_UNTAG);
	    PUT_SDB_TYPE (T_UNION);
	    member_scl = C_MOU;
	    break;

	  case RECORD_TYPE:
	    PUT_SDB_SCL (C_STRTAG);
	    PUT_SDB_TYPE (T_STRUCT);
	    member_scl = C_MOS;
	    break;

	  case ENUMERAL_TYPE:
	    PUT_SDB_SCL (C_ENTAG);
	    PUT_SDB_TYPE (T_ENUM);
	    member_scl = C_MOE;
	    break;

	  default:
	    break;
	  }

	PUT_SDB_SIZE (size);
	PUT_SDB_ENDEF;

	/* Print out the base class information with fields
	   named after the types they hold.  */
	/* This is only relevant to aggregate types.  TYPE_BINFO is used
	   for other purposes in an ENUMERAL_TYPE, so we must exclude that
	   case.  */
	if (TREE_CODE (type) != ENUMERAL_TYPE && TYPE_BINFO (type))
	  {
	    int i;
	    tree binfo, child;

	    for (binfo = TYPE_BINFO (type), i = 0;
		 BINFO_BASE_ITERATE (binfo, i, child); i++)
	      {
		tree child_type = BINFO_TYPE (child);
		tree child_type_name;

		if (TYPE_NAME (child_type) == 0)
		  continue;
		if (TREE_CODE (TYPE_NAME (child_type)) == IDENTIFIER_NODE)
		  child_type_name = TYPE_NAME (child_type);
		else if (TREE_CODE (TYPE_NAME (child_type)) == TYPE_DECL)
		  {
		    child_type_name = DECL_NAME (TYPE_NAME (child_type));
		    if (child_type_name && template_name_p (child_type_name))
		      child_type_name
			= DECL_ASSEMBLER_NAME (TYPE_NAME (child_type));
		  }
		else
		  continue;

		PUT_SDB_DEF (IDENTIFIER_POINTER (child_type_name));
		PUT_SDB_INT_VAL (tree_low_cst (BINFO_OFFSET (child), 0));
		PUT_SDB_SCL (member_scl);
		sdbout_type (BINFO_TYPE (child));
		PUT_SDB_ENDEF;
	      }
	  }

	/* Output the individual fields.  */

	if (TREE_CODE (type) == ENUMERAL_TYPE)
	  {
	    for (tem = TYPE_VALUES (type); tem; tem = TREE_CHAIN (tem))
	      {
	        tree value = TREE_VALUE (tem);

	        if (TREE_CODE (value) == CONST_DECL)
	          value = DECL_INITIAL (value);

	        if (host_integerp (value, 0))
		  {
		    PUT_SDB_DEF (IDENTIFIER_POINTER (TREE_PURPOSE (tem)));
		    PUT_SDB_INT_VAL (tree_low_cst (value, 0));
		    PUT_SDB_SCL (C_MOE);
		    PUT_SDB_TYPE (T_MOE);
		    PUT_SDB_ENDEF;
		  }
	      }
	  }
	else			/* record or union type */
	  for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem))
	    /* Output the name, type, position (in bits), size (in bits)
	       of each field.  */

	    /* Omit here the nameless fields that are used to skip bits.
	       Also omit fields with variable size or position.
	       Also omit non FIELD_DECL nodes that GNU C++ may put here.  */
	    if (TREE_CODE (tem) == FIELD_DECL
		&& DECL_NAME (tem)
		&& DECL_SIZE (tem)
		&& host_integerp (DECL_SIZE (tem), 1)
		&& host_integerp (bit_position (tem), 0))
	      {
		const char *name;

		name = IDENTIFIER_POINTER (DECL_NAME (tem));
		PUT_SDB_DEF (name);
		if (DECL_BIT_FIELD_TYPE (tem))
		  {
		    PUT_SDB_INT_VAL (int_bit_position (tem));
		    PUT_SDB_SCL (C_FIELD);
		    sdbout_type (DECL_BIT_FIELD_TYPE (tem));
		    PUT_SDB_SIZE (tree_low_cst (DECL_SIZE (tem), 1));
		  }
		else
		  {
		    PUT_SDB_INT_VAL (int_bit_position (tem) / BITS_PER_UNIT);
		    PUT_SDB_SCL (member_scl);
		    sdbout_type (TREE_TYPE (tem));
		  }
		PUT_SDB_ENDEF;
	      }
	/* Output end of a structure,union, or enumeral definition.  */

	PUT_SDB_PLAIN_DEF ("eos");
	PUT_SDB_INT_VAL (size);
	PUT_SDB_SCL (C_EOS);
	PUT_SDB_TAG (KNOWN_TYPE_TAG (type));
	PUT_SDB_SIZE (size);
	PUT_SDB_ENDEF;
	break;
      }

    default:
      break;
    }
}
Example #8
0
static gimple
vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
{
  tree fn, base, exp = NULL;
  gimple stmt;
  tree var;

  if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
    return NULL;

  fn = gimple_call_fndecl (last_stmt);
  if (fn == NULL_TREE || DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL)
   return NULL;

  switch (DECL_FUNCTION_CODE (fn))
    {
    case BUILT_IN_POWIF:
    case BUILT_IN_POWI:
    case BUILT_IN_POWF:
    case BUILT_IN_POW:
      base = gimple_call_arg (last_stmt, 0);
      exp = gimple_call_arg (last_stmt, 1);
      if (TREE_CODE (exp) != REAL_CST
	  && TREE_CODE (exp) != INTEGER_CST)
        return NULL;
      break;

    default:
      return NULL;
    }

  /* We now have a pow or powi builtin function call with a constant
     exponent.  */

  *type_out = NULL_TREE;

  /* Catch squaring.  */
  if ((host_integerp (exp, 0)
       && tree_low_cst (exp, 0) == 2)
      || (TREE_CODE (exp) == REAL_CST
          && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
    {
      *type_in = TREE_TYPE (base);

      var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
      stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
      SSA_NAME_DEF_STMT (var) = stmt;
      return stmt;
    }

  /* Catch square root.  */
  if (TREE_CODE (exp) == REAL_CST
      && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
    {
      tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
      *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
      if (*type_in)
	{
	  gimple stmt = gimple_build_call (newfn, 1, base);
	  if (vectorizable_function (stmt, *type_in, *type_in)
	      != NULL_TREE)
	    {
	      var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
	      gimple_call_set_lhs (stmt, var);
	      return stmt;
	    }
	}
    }

  return NULL;
}
Example #9
0
static unsigned
self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
		     struct loop *loop)
{
  tree stride, access_fn;
  HOST_WIDE_INT *strides, astride;
  VEC (tree, heap) *access_fns;
  tree ref = DR_REF (dr);
  unsigned i, ret = ~0u;

  /* In the following example:

     for (i = 0; i < N; i++)
       for (j = 0; j < N; j++)
         use (a[j][i]);
     the same cache line is accessed each N steps (except if the change from
     i to i + 1 crosses the boundary of the cache line).  Thus, for self-reuse,
     we cannot rely purely on the results of the data dependence analysis.

     Instead, we compute the stride of the reference in each loop, and consider
     the innermost loop in that the stride is less than cache size.  */

  strides = XCNEWVEC (HOST_WIDE_INT, n);
  access_fns = DR_ACCESS_FNS (dr);

  for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
    {
      /* Keep track of the reference corresponding to the subscript, so that we
	 know its stride.  */
      while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
	ref = TREE_OPERAND (ref, 0);
      
      if (TREE_CODE (ref) == ARRAY_REF)
	{
	  stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
	  if (host_integerp (stride, 1))
	    astride = tree_low_cst (stride, 1);
	  else
	    astride = L1_CACHE_LINE_SIZE;

	  ref = TREE_OPERAND (ref, 0);
	}
      else
	astride = 1;

      add_subscript_strides (access_fn, astride, strides, n, loop);
    }

  for (i = n; i-- > 0; )
    {
      unsigned HOST_WIDE_INT s;

      s = strides[i] < 0 ?  -strides[i] : strides[i];

      if (s < (unsigned) L1_CACHE_LINE_SIZE
	  && (loop_sizes[i]
	      > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
	{
	  ret = loop_sizes[i];
	  break;
	}
    }

  free (strides);
  return ret;
}
Example #10
0
static bool
try_unroll_loop_completely (struct loop *loop,
			    edge exit, tree niter,
			    enum unroll_level ul,
			    HOST_WIDE_INT maxiter,
			    location_t locus)
{
  unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns;
  gimple cond;
  struct loop_size size;
  bool n_unroll_found = false;
  edge edge_to_cancel = NULL;

  /* See if we proved number of iterations to be low constant.

     EXIT is an edge that will be removed in all but last iteration of 
     the loop.

     EDGE_TO_CACNEL is an edge that will be removed from the last iteration
     of the unrolled sequence and is expected to make the final loop not
     rolling. 

     If the number of execution of loop is determined by standard induction
     variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
     from the iv test.  */
  if (host_integerp (niter, 1))
    {
      n_unroll = tree_low_cst (niter, 1);
      n_unroll_found = true;
      edge_to_cancel = EDGE_SUCC (exit->src, 0);
      if (edge_to_cancel == exit)
	edge_to_cancel = EDGE_SUCC (exit->src, 1);
    }
  /* We do not know the number of iterations and thus we can not eliminate
     the EXIT edge.  */
  else
    exit = NULL;

  /* See if we can improve our estimate by using recorded loop bounds.  */
  if (maxiter >= 0
      && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
    {
      n_unroll = maxiter;
      n_unroll_found = true;
      /* Loop terminates before the IV variable test, so we can not
	 remove it in the last iteration.  */
      edge_to_cancel = NULL;
    }

  if (!n_unroll_found)
    return false;

  max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
  if (n_unroll > max_unroll)
    return false;

  if (!edge_to_cancel)
    edge_to_cancel = loop_edge_to_cancel (loop);

  if (n_unroll)
    {
      sbitmap wont_exit;
      edge e;
      unsigned i;
      bool large;
      vec<edge> to_remove = vNULL;
      if (ul == UL_SINGLE_ITER)
	return false;

      large = tree_estimate_loop_size
		 (loop, exit, edge_to_cancel, &size,
		  PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
      ninsns = size.overall;
      if (large)
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
		     loop->num);
	  return false;
	}

      unr_insns = estimated_unrolled_size (&size, n_unroll);
      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  fprintf (dump_file, "  Loop size: %d\n", (int) ninsns);
	  fprintf (dump_file, "  Estimated size after unrolling: %d\n",
		   (int) unr_insns);
	}

      /* If the code is going to shrink, we don't need to be extra cautious
	 on guessing if the unrolling is going to be profitable.  */
      if (unr_insns
	  /* If there is IV variable that will become constant, we save
	     one instruction in the loop prologue we do not account
	     otherwise.  */
	  <= ninsns + (size.constant_iv != false))
	;
      /* We unroll only inner loops, because we do not consider it profitable
	 otheriwse.  We still can cancel loopback edge of not rolling loop;
	 this is always a good idea.  */
      else if (ul == UL_NO_GROWTH)
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
		     loop->num);
	  return false;
	}
      /* Outer loops tend to be less interesting candidates for complette
	 unrolling unless we can do a lot of propagation into the inner loop
	 body.  For now we disable outer loop unrolling when the code would
	 grow.  */
      else if (loop->inner)
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Not unrolling loop %d: "
		     "it is not innermost and code would grow.\n",
		     loop->num);
	  return false;
	}
      /* If there is call on a hot path through the loop, then
	 there is most probably not much to optimize.  */
      else if (size.num_non_pure_calls_on_hot_path)
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Not unrolling loop %d: "
		     "contains call and code would grow.\n",
		     loop->num);
	  return false;
	}
      /* If there is pure/const call in the function, then we
	 can still optimize the unrolled loop body if it contains
	 some other interesting code than the calls and code
	 storing or cumulating the return value.  */
      else if (size.num_pure_calls_on_hot_path
	       /* One IV increment, one test, one ivtmp store
		  and one usefull stmt.  That is about minimal loop
		  doing pure call.  */
	       && (size.non_call_stmts_on_hot_path
		   <= 3 + size.num_pure_calls_on_hot_path))
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Not unrolling loop %d: "
		     "contains just pure calls and code would grow.\n",
		     loop->num);
	  return false;
	}
      /* Complette unrolling is major win when control flow is removed and
	 one big basic block is created.  If the loop contains control flow
	 the optimization may still be a win because of eliminating the loop
	 overhead but it also may blow the branch predictor tables.
	 Limit number of branches on the hot path through the peeled
	 sequence.  */
      else if (size.num_branches_on_hot_path * (int)n_unroll
	       > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Not unrolling loop %d: "
		     " number of branches on hot path in the unrolled sequence"
		     " reach --param max-peel-branches limit.\n",
		     loop->num);
	  return false;
	}
      else if (unr_insns
	       > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Not unrolling loop %d: "
		     "(--param max-completely-peeled-insns limit reached).\n",
		     loop->num);
	  return false;
	}

      initialize_original_copy_tables ();
      wont_exit = sbitmap_alloc (n_unroll + 1);
      bitmap_ones (wont_exit);
      bitmap_clear_bit (wont_exit, 0);

      if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
						 n_unroll, wont_exit,
						 exit, &to_remove,
						 DLTHE_FLAG_UPDATE_FREQ
						 | DLTHE_FLAG_COMPLETTE_PEEL))
	{
          free_original_copy_tables ();
	  free (wont_exit);
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file, "Failed to duplicate the loop\n");
	  return false;
	}

      FOR_EACH_VEC_ELT (to_remove, i, e)
	{
	  bool ok = remove_path (e);
	  gcc_assert (ok);
	}

      to_remove.release ();
      free (wont_exit);
      free_original_copy_tables ();
    }