Пример #1
0
void
omp_adjust_for_condition (location_t loc, enum tree_code *cond_code, tree *n2)
{
  switch (*cond_code)
    {
    case LT_EXPR:
    case GT_EXPR:
    case NE_EXPR:
      break;
    case LE_EXPR:
      if (POINTER_TYPE_P (TREE_TYPE (*n2)))
	*n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, 1);
      else
	*n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (*n2), *n2,
			       build_int_cst (TREE_TYPE (*n2), 1));
      *cond_code = LT_EXPR;
      break;
    case GE_EXPR:
      if (POINTER_TYPE_P (TREE_TYPE (*n2)))
	*n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, -1);
      else
	*n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (*n2), *n2,
			       build_int_cst (TREE_TYPE (*n2), 1));
      *cond_code = GT_EXPR;
      break;
    default:
      gcc_unreachable ();
    }
}
Пример #2
0
/* Call malloc to allocate size bytes of memory, with special conditions:
      + if size == 0, return a malloced area of size 1,
      + if malloc returns NULL, issue a runtime error.  */
tree
gfc_call_malloc (stmtblock_t * block, tree type, tree size)
{
  tree tmp, msg, malloc_result, null_result, res, malloc_tree;
  stmtblock_t block2;

  size = gfc_evaluate_now (size, block);

  if (TREE_TYPE (size) != TREE_TYPE (size_type_node))
    size = fold_convert (size_type_node, size);

  /* Create a variable to hold the result.  */
  res = gfc_create_var (prvoid_type_node, NULL);

  /* Call malloc.  */
  gfc_start_block (&block2);

  size = fold_build2_loc (input_location, MAX_EXPR, size_type_node, size,
			  build_int_cst (size_type_node, 1));

  malloc_tree = builtin_decl_explicit (BUILT_IN_MALLOC);
  gfc_add_modify (&block2, res,
		  fold_convert (prvoid_type_node,
				build_call_expr_loc (input_location,
						     malloc_tree, 1, size)));

  /* Optionally check whether malloc was successful.  */
  if (gfc_option.rtcheck & GFC_RTCHECK_MEM)
    {
      null_result = fold_build2_loc (input_location, EQ_EXPR,
				     boolean_type_node, res,
				     build_int_cst (pvoid_type_node, 0));
      msg = gfc_build_addr_expr (pchar_type_node,
	      gfc_build_localized_cstring_const ("Memory allocation failed"));
      tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
			     null_result,
	      build_call_expr_loc (input_location,
				   gfor_fndecl_os_error, 1, msg),
				   build_empty_stmt (input_location));
      gfc_add_expr_to_block (&block2, tmp);
    }

  malloc_result = gfc_finish_block (&block2);

  gfc_add_expr_to_block (block, malloc_result);

  if (type != NULL)
    res = fold_convert (type, res);
  return res;
}
Пример #3
0
static void perturb_latent_entropy(basic_block bb, tree rhs)
{
	gimple_stmt_iterator gsi;
	gimple assign;
	tree addxorrol, temp;

	// 1. create temporary copy of latent_entropy
	temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
	add_referenced_var(temp);

	// 2. read...
	temp = make_ssa_name(temp, NULL);
	assign = gimple_build_assign(temp, latent_entropy_decl);
	SSA_NAME_DEF_STMT(temp) = assign;
	add_referenced_var(latent_entropy_decl);
	gsi = gsi_after_labels(bb);
	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
	update_stmt(assign);

	// 3. ...modify...
	addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
	temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
	assign = gimple_build_assign(temp, addxorrol);
	SSA_NAME_DEF_STMT(temp) = assign;
	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
	update_stmt(assign);

	// 4. ...write latent_entropy
	assign = gimple_build_assign(latent_entropy_decl, temp);
	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
	update_stmt(assign);
}
Пример #4
0
tree
gfc_truthvalue_conversion (tree expr)
{
  switch (TREE_CODE (TREE_TYPE (expr)))
    {
    case BOOLEAN_TYPE:
      if (TREE_TYPE (expr) == boolean_type_node)
	return expr;
      else if (COMPARISON_CLASS_P (expr))
	{
	  TREE_TYPE (expr) = boolean_type_node;
	  return expr;
	}
      else if (TREE_CODE (expr) == NOP_EXPR)
        return fold_build1_loc (input_location, NOP_EXPR,
			    boolean_type_node, TREE_OPERAND (expr, 0));
      else
        return fold_build1_loc (input_location, NOP_EXPR, boolean_type_node,
				expr);

    case INTEGER_TYPE:
      if (TREE_CODE (expr) == INTEGER_CST)
	return integer_zerop (expr) ? boolean_false_node : boolean_true_node;
      else
        return fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
				expr, build_int_cst (TREE_TYPE (expr), 0));

    default:
      internal_error ("Unexpected type in truthvalue_conversion");
    }
}
Пример #5
0
/* Allocate memory, using an optional status argument.
 
   This function follows the following pseudo-code:

    void *
    allocate (size_t size, integer_type stat)
    {
      void *newmem;
    
      if (stat requested)
	stat = 0;

      newmem = malloc (MAX (size, 1));
      if (newmem == NULL)
      {
        if (stat)
          *stat = LIBERROR_ALLOCATION;
        else
	  runtime_error ("Allocation would exceed memory limit");
      }
      return newmem;
    }  */
void
gfc_allocate_using_malloc (stmtblock_t * block, tree pointer,
			   tree size, tree status)
{
  tree tmp, on_error, error_cond;
  tree status_type = status ? TREE_TYPE (status) : NULL_TREE;

  /* Evaluate size only once, and make sure it has the right type.  */
  size = gfc_evaluate_now (size, block);
  if (TREE_TYPE (size) != TREE_TYPE (size_type_node))
    size = fold_convert (size_type_node, size);

  /* If successful and stat= is given, set status to 0.  */
  if (status != NULL_TREE)
      gfc_add_expr_to_block (block,
	     fold_build2_loc (input_location, MODIFY_EXPR, status_type,
			      status, build_int_cst (status_type, 0)));

  /* The allocation itself.  */
  gfc_add_modify (block, pointer,
	  fold_convert (TREE_TYPE (pointer),
		build_call_expr_loc (input_location,
			     builtin_decl_explicit (BUILT_IN_MALLOC), 1,
			     fold_build2_loc (input_location,
				      MAX_EXPR, size_type_node, size,
				      build_int_cst (size_type_node, 1)))));

  /* What to do in case of error.  */
  if (status != NULL_TREE)
    on_error = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
			status, build_int_cst (status_type, LIBERROR_ALLOCATION));
  else
    on_error = build_call_expr_loc (input_location, gfor_fndecl_os_error, 1,
		    gfc_build_addr_expr (pchar_type_node,
				 gfc_build_localized_cstring_const
				 ("Allocation would exceed memory limit")));

  error_cond = fold_build2_loc (input_location, EQ_EXPR,
				boolean_type_node, pointer,
				build_int_cst (prvoid_type_node, 0));
  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
			 gfc_unlikely (error_cond), on_error,
			 build_empty_stmt (input_location));

  gfc_add_expr_to_block (block, tmp);
}
Пример #6
0
void
gfc_trans_runtime_check (bool error, bool once, tree cond, stmtblock_t * pblock,
			 locus * where, const char * msgid, ...)
{
  va_list ap;
  stmtblock_t block;
  tree body;
  tree tmp;
  tree tmpvar = NULL;

  if (integer_zerop (cond))
    return;

  if (once)
    {
       tmpvar = gfc_create_var (boolean_type_node, "print_warning");
       TREE_STATIC (tmpvar) = 1;
       DECL_INITIAL (tmpvar) = boolean_true_node;
       gfc_add_expr_to_block (pblock, tmpvar);
    }

  gfc_start_block (&block);

  /* The code to generate the error.  */
  va_start (ap, msgid);
  gfc_add_expr_to_block (&block,
			 trans_runtime_error_vararg (error, where,
						     msgid, ap));

  if (once)
    gfc_add_modify (&block, tmpvar, boolean_false_node);

  body = gfc_finish_block (&block);

  if (integer_onep (cond))
    {
      gfc_add_expr_to_block (pblock, body);
    }
  else
    {
      /* Tell the compiler that this isn't likely.  */
      if (once)
	cond = fold_build2_loc (where->lb->location, TRUTH_AND_EXPR,
				long_integer_type_node, tmpvar, cond);
      else
	cond = fold_convert (long_integer_type_node, cond);

      tmp = build_int_cst (long_integer_type_node, 0);
      cond = build_call_expr_loc (where->lb->location,
			      built_in_decls[BUILT_IN_EXPECT], 2, cond, tmp);
      cond = fold_convert (boolean_type_node, cond);

      tmp = fold_build3_loc (where->lb->location, COND_EXPR, void_type_node,
			     cond, body,
			     build_empty_stmt (where->lb->location));
      gfc_add_expr_to_block (pblock, tmp);
    }
}
Пример #7
0
/* Reallocate MEM so it has SIZE bytes of data.  This behaves like the
   following pseudo-code:

void *
internal_realloc (void *mem, size_t size)
{
  res = realloc (mem, size);
  if (!res && size != 0)
    _gfortran_os_error ("Allocation would exceed memory limit");

  if (size == 0)
    return NULL;

  return res;
}  */
tree
gfc_call_realloc (stmtblock_t * block, tree mem, tree size)
{
  tree msg, res, nonzero, zero, null_result, tmp;
  tree type = TREE_TYPE (mem);

  size = gfc_evaluate_now (size, block);

  if (TREE_TYPE (size) != TREE_TYPE (size_type_node))
    size = fold_convert (size_type_node, size);

  /* Create a variable to hold the result.  */
  res = gfc_create_var (type, NULL);

  /* Call realloc and check the result.  */
  tmp = build_call_expr_loc (input_location,
			 builtin_decl_explicit (BUILT_IN_REALLOC), 2,
			 fold_convert (pvoid_type_node, mem), size);
  gfc_add_modify (block, res, fold_convert (type, tmp));
  null_result = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
				 res, build_int_cst (pvoid_type_node, 0));
  nonzero = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, size,
			     build_int_cst (size_type_node, 0));
  null_result = fold_build2_loc (input_location, TRUTH_AND_EXPR, boolean_type_node,
				 null_result, nonzero);
  msg = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const
			     ("Allocation would exceed memory limit"));
  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
			 null_result,
			 build_call_expr_loc (input_location,
					      gfor_fndecl_os_error, 1, msg),
			 build_empty_stmt (input_location));
  gfc_add_expr_to_block (block, tmp);

  /* if (size == 0) then the result is NULL.  */
  tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, res,
			 build_int_cst (type, 0));
  zero = fold_build1_loc (input_location, TRUTH_NOT_EXPR, boolean_type_node,
			  nonzero);
  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, zero, tmp,
			 build_empty_stmt (input_location));
  gfc_add_expr_to_block (block, tmp);

  return res;
}
Пример #8
0
tree
gfc_build_array_ref (tree base, tree offset, tree decl)
{
  tree type = TREE_TYPE (base);
  tree tmp;

  gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
  type = TREE_TYPE (type);

  if (DECL_P (base))
    TREE_ADDRESSABLE (base) = 1;

  /* Strip NON_LVALUE_EXPR nodes.  */
  STRIP_TYPE_NOPS (offset);

  /* If the array reference is to a pointer, whose target contains a
     subreference, use the span that is stored with the backend decl
     and reference the element with pointer arithmetic.  */
  if (decl && (TREE_CODE (decl) == FIELD_DECL
		 || TREE_CODE (decl) == VAR_DECL
		 || TREE_CODE (decl) == PARM_DECL)
	&& GFC_DECL_SUBREF_ARRAY_P (decl)
	&& !integer_zerop (GFC_DECL_SPAN(decl)))
    {
      offset = fold_build2_loc (input_location, MULT_EXPR,
				gfc_array_index_type,
				offset, GFC_DECL_SPAN(decl));
      tmp = gfc_build_addr_expr (pvoid_type_node, base);
      tmp = fold_build2_loc (input_location, POINTER_PLUS_EXPR,
			     pvoid_type_node, tmp,
			     fold_convert (sizetype, offset));
      tmp = fold_convert (build_pointer_type (type), tmp);
      if (!TYPE_STRING_FLAG (type))
	tmp = build_fold_indirect_ref_loc (input_location, tmp);
      return tmp;
    }
  else
    /* Otherwise use a straightforward array reference.  */
    return build4_loc (input_location, ARRAY_REF, type, base, offset,
		       NULL_TREE, NULL_TREE);
}
Пример #9
0
static tree
check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
{
  tree t;

  if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
      || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
    return error_mark_node;

  if (exp == decl)
    return build_int_cst (TREE_TYPE (exp), 0);

  switch (TREE_CODE (exp))
    {
    CASE_CONVERT:
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
      if (t != error_mark_node)
        return fold_convert_loc (loc, TREE_TYPE (exp), t);
      break;
    case MINUS_EXPR:
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
      if (t != error_mark_node)
        return fold_build2_loc (loc, MINUS_EXPR,
			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
      break;
    case PLUS_EXPR:
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
      if (t != error_mark_node)
        return fold_build2_loc (loc, PLUS_EXPR,
			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
      if (t != error_mark_node)
        return fold_build2_loc (loc, PLUS_EXPR,
			    TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
      break;
    default:
      break;
    }

  return error_mark_node;
}
Пример #10
0
/* Allocate memory, using an optional status argument.
 
   This function follows the following pseudo-code:

    void *
    allocate (size_t size, void** token, int *stat, char* errmsg, int errlen)
    {
      void *newmem;

      newmem = _caf_register (size, regtype, token, &stat, errmsg, errlen);
      return newmem;
    }  */
static void
gfc_allocate_using_lib (stmtblock_t * block, tree pointer, tree size,
			tree token, tree status, tree errmsg, tree errlen)
{
  tree tmp, pstat;

  gcc_assert (token != NULL_TREE);

  /* Evaluate size only once, and make sure it has the right type.  */
  size = gfc_evaluate_now (size, block);
  if (TREE_TYPE (size) != TREE_TYPE (size_type_node))
    size = fold_convert (size_type_node, size);

  /* The allocation itself.  */
  if (status == NULL_TREE)
    pstat  = null_pointer_node;
  else
    pstat  = gfc_build_addr_expr (NULL_TREE, status);

  if (errmsg == NULL_TREE)
    {
      gcc_assert(errlen == NULL_TREE);
      errmsg = null_pointer_node;
      errlen = build_int_cst (integer_type_node, 0);
    }

  tmp = build_call_expr_loc (input_location,
	     gfor_fndecl_caf_register, 6,
	     fold_build2_loc (input_location,
			      MAX_EXPR, size_type_node, size,
			      build_int_cst (size_type_node, 1)),
	     build_int_cst (integer_type_node,
			    GFC_CAF_COARRAY_ALLOC),
	     token, pstat, errmsg, errlen);

  tmp = fold_build2_loc (input_location, MODIFY_EXPR,
			 TREE_TYPE (pointer), pointer,
			 fold_convert ( TREE_TYPE (pointer), tmp));
  gfc_add_expr_to_block (block, tmp);
}
Пример #11
0
static inline tree
build_size_arg_loc (location_t loc, tree nb_iter, tree op,
		    gimple_seq *stmt_list)
{
  gimple_seq stmts;
  tree x = fold_build2_loc (loc, MULT_EXPR, size_type_node,
			    fold_convert_loc (loc, size_type_node, nb_iter),
			    fold_convert_loc (loc, size_type_node,
					      TYPE_SIZE_UNIT (TREE_TYPE (op))));
  x = force_gimple_operand (x, &stmts, true, NULL);
  gimple_seq_add_seq (stmt_list, stmts);

  return x;
}
Пример #12
0
static tree
rhs_to_tree (tree type, gimple stmt)
{
  location_t loc = gimple_location (stmt);
  enum tree_code code = gimple_assign_rhs_code (stmt);
  if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
    return fold_build2_loc (loc, code, type, gimple_assign_rhs1 (stmt),
			gimple_assign_rhs2 (stmt));
  else if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
    return build1 (code, type, gimple_assign_rhs1 (stmt));
  else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
    return gimple_assign_rhs1 (stmt);
  else
    gcc_unreachable ();
}
Пример #13
0
static void perturb_local_entropy(basic_block bb, tree local_entropy)
{
	gimple_stmt_iterator gsi;
	gimple assign;
	tree addxorrol, rhs;
	enum tree_code op;

	op = get_op(&rhs);
	addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
	assign = gimple_build_assign(local_entropy, addxorrol);
	gsi = gsi_after_labels(bb);
	gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
	update_stmt(assign);
//debug_bb(bb);
}
Пример #14
0
static void
generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
		      gimple_stmt_iterator bsi)
{
  tree addr_base, nb_bytes;
  bool res = false;
  gimple_seq stmt_list = NULL, stmts;
  gimple fn_call;
  tree mem, fn;
  struct data_reference *dr = XCNEW (struct data_reference);
  location_t loc = gimple_location (stmt);

  DR_STMT (dr) = stmt;
  DR_REF (dr) = op0;
  res = dr_analyze_innermost (dr);
  gcc_assert (res && stride_of_unit_type_p (DR_STEP (dr), TREE_TYPE (op0)));

  nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
  addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
  addr_base = fold_convert_loc (loc, sizetype, addr_base);

  /* Test for a negative stride, iterating over every element.  */
  if (integer_zerop (size_binop (PLUS_EXPR,
				 TYPE_SIZE_UNIT (TREE_TYPE (op0)),
				 fold_convert (sizetype, DR_STEP (dr)))))
    {
      addr_base = size_binop_loc (loc, MINUS_EXPR, addr_base,
				  fold_convert_loc (loc, sizetype, nb_bytes));
      addr_base = size_binop_loc (loc, PLUS_EXPR, addr_base,
				  TYPE_SIZE_UNIT (TREE_TYPE (op0)));
    }

  addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
			       TREE_TYPE (DR_BASE_ADDRESS (dr)),
			       DR_BASE_ADDRESS (dr), addr_base);
  mem = force_gimple_operand (addr_base, &stmts, true, NULL);
  gimple_seq_add_seq (&stmt_list, stmts);

  fn = build_fold_addr_expr (implicit_built_in_decls [BUILT_IN_MEMSET]);
  fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
  gimple_seq_add_stmt (&stmt_list, fn_call);
  gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);

  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "generated memset zero\n");

  free_data_ref (dr);
}
Пример #15
0
/* Free a given variable, if it's not NULL.  */
tree
gfc_call_free (tree var)
{
  stmtblock_t block;
  tree tmp, cond, call;

  if (TREE_TYPE (var) != TREE_TYPE (pvoid_type_node))
    var = fold_convert (pvoid_type_node, var);

  gfc_start_block (&block);
  var = gfc_evaluate_now (var, &block);
  cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, var,
			  build_int_cst (pvoid_type_node, 0));
  call = build_call_expr_loc (input_location,
			      built_in_decls[BUILT_IN_FREE], 1, var);
  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, call,
			 build_empty_stmt (input_location));
  gfc_add_expr_to_block (&block, tmp);

  return gfc_finish_block (&block);
}
Пример #16
0
void
gfc_add_modify_loc (location_t loc, stmtblock_t * pblock, tree lhs, tree rhs)
{
  tree tmp;

#ifdef ENABLE_CHECKING
  tree t1, t2;
  t1 = TREE_TYPE (rhs);
  t2 = TREE_TYPE (lhs);
  /* Make sure that the types of the rhs and the lhs are the same
     for scalar assignments.  We should probably have something
     similar for aggregates, but right now removing that check just
     breaks everything.  */
  gcc_assert (t1 == t2
	      || AGGREGATE_TYPE_P (TREE_TYPE (lhs)));
#endif

  tmp = fold_build2_loc (loc, MODIFY_EXPR, void_type_node, lhs,
			 rhs);
  gfc_add_expr_to_block (pblock, tmp);
}
Пример #17
0
tree
convert (tree type, tree expr)
{
  tree e = expr;
  enum tree_code code = TREE_CODE (type);
  const char *invalid_conv_diag;
  tree ret;
  location_t loc = EXPR_LOCATION (expr);

  if (type == error_mark_node
      || expr == error_mark_node
      || TREE_TYPE (expr) == error_mark_node)
    return error_mark_node;

  if ((invalid_conv_diag
       = targetm.invalid_conversion (TREE_TYPE (expr), type)))
    {
      error (invalid_conv_diag);
      return error_mark_node;
    }

  if (type == TREE_TYPE (expr))
    return expr;
  ret = targetm.convert_to_type (type, expr);
  if (ret)
      return ret;

  STRIP_TYPE_NOPS (e);

  if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr))
      && (TREE_CODE (TREE_TYPE (expr)) != COMPLEX_TYPE
	  || TREE_CODE (e) == COMPLEX_EXPR))
    return fold_convert_loc (loc, type, expr);
  if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK)
    return error_mark_node;
  if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE)
    {
      error ("void value not ignored as it ought to be");
      return error_mark_node;
    }

  switch (code)
    {
    case VOID_TYPE:
      return fold_convert_loc (loc, type, e);

    case INTEGER_TYPE:
    case ENUMERAL_TYPE:
      ret = convert_to_integer (type, e);
      goto maybe_fold;

    case BOOLEAN_TYPE:
      return fold_convert_loc
	(loc, type, c_objc_common_truthvalue_conversion (input_location, expr));

    case POINTER_TYPE:
    case REFERENCE_TYPE:
      ret = convert_to_pointer (type, e);
      goto maybe_fold;

    case REAL_TYPE:
      ret = convert_to_real (type, e);
      goto maybe_fold;

    case FIXED_POINT_TYPE:
      ret = convert_to_fixed (type, e);
      goto maybe_fold;

    case COMPLEX_TYPE:
      /* If converting from COMPLEX_TYPE to a different COMPLEX_TYPE
	 and e is not COMPLEX_EXPR, convert_to_complex uses save_expr,
	 but for the C FE c_save_expr needs to be called instead.  */
      if (TREE_CODE (TREE_TYPE (e)) == COMPLEX_TYPE)
	{
	  if (TREE_CODE (e) != COMPLEX_EXPR)
	    {
	      tree subtype = TREE_TYPE (type);
	      tree elt_type = TREE_TYPE (TREE_TYPE (e));

	      if (in_late_binary_op)
		e = save_expr (e);
	      else
		e = c_save_expr (e);
	      ret
		= fold_build2_loc (loc, COMPLEX_EXPR, type,
				   convert (subtype,
					    fold_build1 (REALPART_EXPR,
							 elt_type, e)),
				   convert (subtype,
					    fold_build1 (IMAGPART_EXPR,
							 elt_type, e)));
	      goto maybe_fold;
	    }
	}
      ret = convert_to_complex (type, e);
      goto maybe_fold;

    case VECTOR_TYPE:
      ret = convert_to_vector (type, e);
      goto maybe_fold;

    case RECORD_TYPE:
    case UNION_TYPE:
      if (lang_hooks.types_compatible_p (type, TREE_TYPE (expr)))
	return e;
      break;

    default:
      break;

    maybe_fold:
      if (TREE_CODE (ret) != C_MAYBE_CONST_EXPR)
	ret = fold (ret);
      return ret;
    }

  error ("conversion to non-scalar type requested");
  return error_mark_node;
}
Пример #18
0
static bool
ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
		   basic_block outer_cond_bb, bool outer_inv, bool result_inv)
{
  gimple_stmt_iterator gsi;
  gimple inner_stmt, outer_stmt;
  gcond *inner_cond, *outer_cond;
  tree name1, name2, bit1, bit2, bits1, bits2;

  inner_stmt = last_stmt (inner_cond_bb);
  if (!inner_stmt
      || gimple_code (inner_stmt) != GIMPLE_COND)
    return false;
  inner_cond = as_a <gcond *> (inner_stmt);

  outer_stmt = last_stmt (outer_cond_bb);
  if (!outer_stmt
      || gimple_code (outer_stmt) != GIMPLE_COND)
    return false;
  outer_cond = as_a <gcond *> (outer_stmt);

  /* See if we test a single bit of the same name in both tests.  In
     that case remove the outer test, merging both else edges,
     and change the inner one to test for
     name & (bit1 | bit2) == (bit1 | bit2).  */
  if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv)
      && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv)
      && name1 == name2)
    {
      tree t, t2;

      /* Do it.  */
      gsi = gsi_for_stmt (inner_cond);
      t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
		       build_int_cst (TREE_TYPE (name1), 1), bit1);
      t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
		        build_int_cst (TREE_TYPE (name1), 1), bit2);
      t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
      t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
				     true, GSI_SAME_STMT);
      t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR,
		       boolean_type_node, t2, t);
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond,
	outer_inv ? boolean_false_node : boolean_true_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing double bit test to ");
	  print_generic_expr (dump_file, name1, 0);
	  fprintf (dump_file, " & T == T\nwith temporary T = (1 << ");
	  print_generic_expr (dump_file, bit1, 0);
	  fprintf (dump_file, ") | (1 << ");
	  print_generic_expr (dump_file, bit2, 0);
	  fprintf (dump_file, ")\n");
	}

      return true;
    }

  /* See if we have two bit tests of the same name in both tests.
     In that case remove the outer test and change the inner one to
     test for name & (bits1 | bits2) != 0.  */
  else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv)
      && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv))
    {
      gimple_stmt_iterator gsi;
      tree t;

      /* Find the common name which is bit-tested.  */
      if (name1 == name2)
	;
      else if (bits1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else if (name1 == bits2)
	{
	  t = name2;
	  name2 = bits2;
	  bits2 = t;
	}
      else if (bits1 == name2)
	{
	  t = name1;
	  name1 = bits1;
	  bits1 = t;
	}
      else
	return false;

      /* As we strip non-widening conversions in finding a common
         name that is tested make sure to end up with an integral
	 type for building the bit operations.  */
      if (TYPE_PRECISION (TREE_TYPE (bits1))
	  >= TYPE_PRECISION (TREE_TYPE (bits2)))
	{
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  name1 = fold_convert (TREE_TYPE (bits1), name1);
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  bits2 = fold_convert (TREE_TYPE (bits1), bits2);
	}
      else
	{
	  bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
	  name1 = fold_convert (TREE_TYPE (bits2), name1);
	  bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
	  bits1 = fold_convert (TREE_TYPE (bits2), bits1);
	}

      /* Do it.  */
      gsi = gsi_for_stmt (inner_cond);
      t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
				    true, GSI_SAME_STMT);
      t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t,
		       build_int_cst (TREE_TYPE (t), 0));
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond,
	outer_inv ? boolean_false_node : boolean_true_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing bits or bits test to ");
	  print_generic_expr (dump_file, name1, 0);
	  fprintf (dump_file, " & T != 0\nwith temporary T = ");
	  print_generic_expr (dump_file, bits1, 0);
	  fprintf (dump_file, " | ");
	  print_generic_expr (dump_file, bits2, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  /* See if we have two comparisons that we can merge into one.  */
  else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
	   && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison)
    {
      tree t;
      enum tree_code inner_cond_code = gimple_cond_code (inner_cond);
      enum tree_code outer_cond_code = gimple_cond_code (outer_cond);

      /* Invert comparisons if necessary (and possible).  */
      if (inner_inv)
	inner_cond_code = invert_tree_comparison (inner_cond_code,
	  HONOR_NANS (gimple_cond_lhs (inner_cond)));
      if (inner_cond_code == ERROR_MARK)
	return false;
      if (outer_inv)
	outer_cond_code = invert_tree_comparison (outer_cond_code,
	  HONOR_NANS (gimple_cond_lhs (outer_cond)));
      if (outer_cond_code == ERROR_MARK)
	return false;
      /* Don't return false so fast, try maybe_fold_or_comparisons?  */

      if (!(t = maybe_fold_and_comparisons (inner_cond_code,
					    gimple_cond_lhs (inner_cond),
					    gimple_cond_rhs (inner_cond),
					    outer_cond_code,
					    gimple_cond_lhs (outer_cond),
					    gimple_cond_rhs (outer_cond))))
	{
	  tree t1, t2;
	  gimple_stmt_iterator gsi;
	  if (!LOGICAL_OP_NON_SHORT_CIRCUIT)
	    return false;
	  /* Only do this optimization if the inner bb contains only the conditional. */
	  if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb)))
	    return false;
	  t1 = fold_build2_loc (gimple_location (inner_cond),
				inner_cond_code,
				boolean_type_node,
				gimple_cond_lhs (inner_cond),
				gimple_cond_rhs (inner_cond));
	  t2 = fold_build2_loc (gimple_location (outer_cond),
				outer_cond_code,
				boolean_type_node,
				gimple_cond_lhs (outer_cond),
				gimple_cond_rhs (outer_cond));
	  t = fold_build2_loc (gimple_location (inner_cond), 
			       TRUTH_AND_EXPR, boolean_type_node, t1, t2);
	  if (result_inv)
	    {
	      t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
	      result_inv = false;
	    }
	  gsi = gsi_for_stmt (inner_cond);
	  t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true,
					  GSI_SAME_STMT);
        }
      if (result_inv)
	t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
      t = canonicalize_cond_expr_cond (t);
      if (!t)
	return false;
      gimple_cond_set_condition_from_tree (inner_cond, t);
      update_stmt (inner_cond);

      /* Leave CFG optimization to cfg_cleanup.  */
      gimple_cond_set_condition_from_tree (outer_cond,
	outer_inv ? boolean_false_node : boolean_true_node);
      update_stmt (outer_cond);

      if (dump_file)
	{
	  fprintf (dump_file, "optimizing two comparisons to ");
	  print_generic_expr (dump_file, t, 0);
	  fprintf (dump_file, "\n");
	}

      return true;
    }

  return false;
}
Пример #19
0
bool
gfc_add_finalizer_call (stmtblock_t *block, gfc_expr *expr2)
{
  tree tmp;
  gfc_ref *ref;
  gfc_expr *expr;
  gfc_expr *final_expr = NULL;
  gfc_expr *elem_size = NULL;
  bool has_finalizer = false;

  if (!expr2 || (expr2->ts.type != BT_DERIVED && expr2->ts.type != BT_CLASS))
    return false;

  if (expr2->ts.type == BT_DERIVED)
    {
      gfc_is_finalizable (expr2->ts.u.derived, &final_expr);
      if (!final_expr)
        return false;
    }

  /* If we have a class array, we need go back to the class
     container. */
  expr = gfc_copy_expr (expr2);

  if (expr->ref && expr->ref->next && !expr->ref->next->next
      && expr->ref->next->type == REF_ARRAY
      && expr->ref->type == REF_COMPONENT
      && strcmp (expr->ref->u.c.component->name, "_data") == 0)
    {
      gfc_free_ref_list (expr->ref);
      expr->ref = NULL;
    }
  else
    for (ref = expr->ref; ref; ref = ref->next)
      if (ref->next && ref->next->next && !ref->next->next->next
         && ref->next->next->type == REF_ARRAY
         && ref->next->type == REF_COMPONENT
         && strcmp (ref->next->u.c.component->name, "_data") == 0)
       {
         gfc_free_ref_list (ref->next);
         ref->next = NULL;
       }

  if (expr->ts.type == BT_CLASS)
    {
      has_finalizer = gfc_is_finalizable (expr->ts.u.derived, NULL);

      if (!expr2->rank && !expr2->ref && CLASS_DATA (expr2->symtree->n.sym)->as)
	expr->rank = CLASS_DATA (expr2->symtree->n.sym)->as->rank;

      final_expr = gfc_copy_expr (expr);
      gfc_add_vptr_component (final_expr);
      gfc_add_component_ref (final_expr, "_final");

      elem_size = gfc_copy_expr (expr);
      gfc_add_vptr_component (elem_size);
      gfc_add_component_ref (elem_size, "_size");
    }

  gcc_assert (final_expr->expr_type == EXPR_VARIABLE);

  tmp = gfc_build_final_call (expr->ts, final_expr, expr,
			      false, elem_size);

  if (expr->ts.type == BT_CLASS && !has_finalizer)
    {
      tree cond;
      gfc_se se;

      gfc_init_se (&se, NULL);
      se.want_pointer = 1;
      gfc_conv_expr (&se, final_expr);
      cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
			      se.expr, build_int_cst (TREE_TYPE (se.expr), 0));

      /* For CLASS(*) not only sym->_vtab->_final can be NULL
	 but already sym->_vtab itself.  */
      if (UNLIMITED_POLY (expr))
	{
	  tree cond2;
	  gfc_expr *vptr_expr;

	  vptr_expr = gfc_copy_expr (expr);
	  gfc_add_vptr_component (vptr_expr);

	  gfc_init_se (&se, NULL);
	  se.want_pointer = 1;
	  gfc_conv_expr (&se, vptr_expr);
	  gfc_free_expr (vptr_expr);

	  cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
				   se.expr,
				   build_int_cst (TREE_TYPE (se.expr), 0));
	  cond = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
				  boolean_type_node, cond2, cond);
	}

      tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
			     cond, tmp, build_empty_stmt (input_location));
    }

  gfc_add_expr_to_block (block, tmp);

  return true;
}
Пример #20
0
/* The method walks the node hierarchy to the topmost node. This is
   exactly how its done in mudflap and has been borrowed.
*/
static tree
mf_walk_comp_ref(tree *tp, tree type, location_t location, \
        tree *addr_store, tree *base_store)
{
    tree var, t, addr, base, size;

    t = *tp;

    int component_ref_only = (TREE_CODE (t) == COMPONENT_REF);
    /* If we have a bitfield component reference, we must note the
       innermost addressable object in ELT, from which we will
       construct the byte-addressable bounds of the bitfield.  */
    tree elt = NULL_TREE;
    int bitfield_ref_p = (TREE_CODE (t) == COMPONENT_REF
            && DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1)));

    /* Iterate to the top of the ARRAY_REF/COMPONENT_REF
       containment hierarchy to find the outermost VAR_DECL.  */
    var = TREE_OPERAND (t, 0);
    while (1)
    {
        if (bitfield_ref_p && elt == NULL_TREE
                && (TREE_CODE (var) == ARRAY_REF
                    || TREE_CODE (var) == COMPONENT_REF))
            elt = var;

        if (TREE_CODE (var) == ARRAY_REF)
        {
            component_ref_only = 0;
            var = TREE_OPERAND (var, 0);
        }
        else if (TREE_CODE (var) == COMPONENT_REF)
            var = TREE_OPERAND (var, 0);
        else if (INDIRECT_REF_P (var)
                || TREE_CODE (var) == MEM_REF)
        {
            base = TREE_OPERAND (var, 0);
            break;
        }
        else if (TREE_CODE (var) == VIEW_CONVERT_EXPR)
        {
            var = TREE_OPERAND (var, 0);
            if (CONSTANT_CLASS_P (var)
                    && TREE_CODE (var) != STRING_CST)
                return NULL_TREE;
        }
        else
        {
            DEBUGLOG("TREE_CODE(temp) : %s comp_ref_only = %d eligigle = %d\n", \
                    tree_code_name[(int)TREE_CODE(var)], component_ref_only, \
                    mf_decl_eligible_p(var));
            gcc_assert (TREE_CODE (var) == VAR_DECL
                    || TREE_CODE (var) == SSA_NAME /* TODO: Check this */
                    || TREE_CODE (var) == PARM_DECL
                    || TREE_CODE (var) == RESULT_DECL
                    || TREE_CODE (var) == STRING_CST);
            /* Don't instrument this access if the underlying
               variable is not "eligible".  This test matches
               those arrays that have only known-valid indexes,
               and thus are not labeled TREE_ADDRESSABLE.  */
            if (! mf_decl_eligible_p (var)) //TODO is this needed? || component_ref_only)
                return NULL_TREE;
            else
            {
                base = build1 (ADDR_EXPR,
                        build_pointer_type (TREE_TYPE (var)), var);
                break;
            }
        }
    }

    /* Handle the case of ordinary non-indirection structure
       accesses.  These have only nested COMPONENT_REF nodes (no
       INDIRECT_REF), but pass through the above filter loop.
       Note that it's possible for such a struct variable to match
       the eligible_p test because someone else might take its
       address sometime.  */

    /* We need special processing for bitfield components, because
       their addresses cannot be taken.  */
    if (bitfield_ref_p)
    {
        tree field = TREE_OPERAND (t, 1);

        if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST)
            size = DECL_SIZE_UNIT (field);

        if (elt)
            elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)),
                    elt);
        addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base);
        addr = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
                addr, fold_convert_loc (location, sizetype,
                    byte_position (field)));
    }
    else
        addr = build1 (ADDR_EXPR, build_pointer_type (type), t);

    if (addr_store)
        *addr_store = addr;

    if (base_store)
        *base_store = addr;

    return var;
}
Пример #21
0
static bool
generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
		      gimple_stmt_iterator bsi)
{
  tree addr_base;
  tree nb_bytes = NULL;
  bool res = false;
  gimple_seq stmts = NULL, stmt_list = NULL;
  gimple fn_call;
  tree mem, fndecl, fntype, fn;
  gimple_stmt_iterator i;
  struct data_reference *dr = XCNEW (struct data_reference);
  location_t loc = gimple_location (stmt);

  DR_STMT (dr) = stmt;
  DR_REF (dr) = op0;
  if (!dr_analyze_innermost (dr))
    goto end;

  /* Test for a positive stride, iterating over every element.  */
  if (integer_zerop (fold_build2_loc (loc,
				  MINUS_EXPR, integer_type_node, DR_STEP (dr),
				  TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
    {
      tree offset = fold_convert_loc (loc, sizetype,
				      size_binop_loc (loc, PLUS_EXPR,
						      DR_OFFSET (dr),
						      DR_INIT (dr)));
      addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
			       TREE_TYPE (DR_BASE_ADDRESS (dr)),
			       DR_BASE_ADDRESS (dr), offset);
    }

  /* Test for a negative stride, iterating over every element.  */
  else if (integer_zerop (fold_build2_loc (loc, PLUS_EXPR, integer_type_node,
				       TYPE_SIZE_UNIT (TREE_TYPE (op0)),
				       DR_STEP (dr))))
    {
      nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
      addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
      addr_base = fold_build2_loc (loc, MINUS_EXPR, sizetype, addr_base,
			       fold_convert_loc (loc, sizetype, nb_bytes));
      addr_base = force_gimple_operand (addr_base, &stmts, true, NULL);
      gimple_seq_add_seq (&stmt_list, stmts);

      addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
			       TREE_TYPE (DR_BASE_ADDRESS (dr)),
			       DR_BASE_ADDRESS (dr), addr_base);
    }
  else
    goto end;

  mem = force_gimple_operand (addr_base, &stmts, true, NULL);
  gimple_seq_add_seq (&stmt_list, stmts);

  fndecl = implicit_built_in_decls [BUILT_IN_MEMSET];
  fntype = TREE_TYPE (fndecl);
  fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl);

  if (!nb_bytes)
    nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
  fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
  gimple_seq_add_stmt (&stmt_list, fn_call);

  for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i))
    {
      gimple s = gsi_stmt (i);
      update_stmt_if_modified (s);
    }

  gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);
  res = true;

  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "generated memset zero\n");

 end:
  free_data_ref (dr);
  return res;
}
Пример #22
0
static tree
check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
{
  tree t;

  if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
      || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
    return error_mark_node;

  if (exp == decl)
    return build_int_cst (TREE_TYPE (exp), 0);

  switch (TREE_CODE (exp))
    {
    CASE_CONVERT:
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
      if (t != error_mark_node)
        return fold_convert_loc (loc, TREE_TYPE (exp), t);
      break;
    case MINUS_EXPR:
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
      if (t != error_mark_node)
        return fold_build2_loc (loc, MINUS_EXPR,
			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
      break;
    case PLUS_EXPR:
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
      if (t != error_mark_node)
        return fold_build2_loc (loc, PLUS_EXPR,
			    TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
      t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
      if (t != error_mark_node)
        return fold_build2_loc (loc, PLUS_EXPR,
			    TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
      break;
    case COMPOUND_EXPR:
      {
	/* cp_build_modify_expr forces preevaluation of the RHS to make
	   sure that it is evaluated before the lvalue-rvalue conversion
	   is applied to the LHS.  Reconstruct the original expression.  */
	tree op0 = TREE_OPERAND (exp, 0);
	if (TREE_CODE (op0) == TARGET_EXPR
	    && !VOID_TYPE_P (TREE_TYPE (op0)))
	  {
	    tree op1 = TREE_OPERAND (exp, 1);
	    tree temp = TARGET_EXPR_SLOT (op0);
	    if (BINARY_CLASS_P (op1)
		&& TREE_OPERAND (op1, 1) == temp)
	      {
		op1 = copy_node (op1);
		TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
		return check_omp_for_incr_expr (loc, op1, decl);
	      }
	  }
	break;
      }
    default:
      break;
    }

  return error_mark_node;
}
Пример #23
0
void
omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
		      struct omp_for_data_loop *loops)
{
  tree t, var, *collapse_iter, *collapse_count;
  tree count = NULL_TREE, iter_type = long_integer_type_node;
  struct omp_for_data_loop *loop;
  int i;
  struct omp_for_data_loop dummy_loop;
  location_t loc = gimple_location (for_stmt);
  bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
  bool distribute = gimple_omp_for_kind (for_stmt)
		    == GF_OMP_FOR_KIND_DISTRIBUTE;
  bool taskloop = gimple_omp_for_kind (for_stmt)
		  == GF_OMP_FOR_KIND_TASKLOOP;
  tree iterv, countv;

  fd->for_stmt = for_stmt;
  fd->pre = NULL;
  if (gimple_omp_for_collapse (for_stmt) > 1)
    fd->loops = loops;
  else
    fd->loops = &fd->loop;

  fd->have_nowait = distribute || simd;
  fd->have_ordered = false;
  fd->collapse = 1;
  fd->ordered = 0;
  fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
  fd->sched_modifiers = 0;
  fd->chunk_size = NULL_TREE;
  fd->simd_schedule = false;
  if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
    fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
  collapse_iter = NULL;
  collapse_count = NULL;

  for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
    switch (OMP_CLAUSE_CODE (t))
      {
      case OMP_CLAUSE_NOWAIT:
	fd->have_nowait = true;
	break;
      case OMP_CLAUSE_ORDERED:
	fd->have_ordered = true;
	if (OMP_CLAUSE_ORDERED_EXPR (t))
	  fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
	break;
      case OMP_CLAUSE_SCHEDULE:
	gcc_assert (!distribute && !taskloop);
	fd->sched_kind
	  = (enum omp_clause_schedule_kind)
	    (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
	fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
			       & ~OMP_CLAUSE_SCHEDULE_MASK);
	fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
	fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
	break;
      case OMP_CLAUSE_DIST_SCHEDULE:
	gcc_assert (distribute);
	fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
	break;
      case OMP_CLAUSE_COLLAPSE:
	fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
	if (fd->collapse > 1)
	  {
	    collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
	    collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
	  }
	break;
      default:
	break;
      }
  if (fd->ordered && fd->collapse == 1 && loops != NULL)
    {
      fd->loops = loops;
      iterv = NULL_TREE;
      countv = NULL_TREE;
      collapse_iter = &iterv;
      collapse_count = &countv;
    }

  /* FIXME: for now map schedule(auto) to schedule(static).
     There should be analysis to determine whether all iterations
     are approximately the same amount of work (then schedule(static)
     is best) or if it varies (then schedule(dynamic,N) is better).  */
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
    {
      fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
      gcc_assert (fd->chunk_size == NULL);
    }
  gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
  if (taskloop)
    fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
    gcc_assert (fd->chunk_size == NULL);
  else if (fd->chunk_size == NULL)
    {
      /* We only need to compute a default chunk size for ordered
	 static loops and dynamic loops.  */
      if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
	  || fd->have_ordered)
	fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
			 ? integer_zero_node : integer_one_node;
    }

  int cnt = fd->ordered ? fd->ordered : fd->collapse;
  for (i = 0; i < cnt; i++)
    {
      if (i == 0 && fd->collapse == 1 && (fd->ordered == 0 || loops == NULL))
	loop = &fd->loop;
      else if (loops != NULL)
	loop = loops + i;
      else
	loop = &dummy_loop;

      loop->v = gimple_omp_for_index (for_stmt, i);
      gcc_assert (SSA_VAR_P (loop->v));
      gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
		  || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
      var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
      loop->n1 = gimple_omp_for_initial (for_stmt, i);

      loop->cond_code = gimple_omp_for_cond (for_stmt, i);
      loop->n2 = gimple_omp_for_final (for_stmt, i);
      gcc_assert (loop->cond_code != NE_EXPR
		  || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKSIMD
		  || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKFOR);
      omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2);

      t = gimple_omp_for_incr (for_stmt, i);
      gcc_assert (TREE_OPERAND (t, 0) == var);
      loop->step = omp_get_for_step_from_incr (loc, t);

      if (simd
	  || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
	      && !fd->have_ordered))
	{
	  if (fd->collapse == 1)
	    iter_type = TREE_TYPE (loop->v);
	  else if (i == 0
		   || TYPE_PRECISION (iter_type)
		      < TYPE_PRECISION (TREE_TYPE (loop->v)))
	    iter_type
	      = build_nonstandard_integer_type
		  (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
	}
      else if (iter_type != long_long_unsigned_type_node)
	{
	  if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
	    iter_type = long_long_unsigned_type_node;
	  else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
		   && TYPE_PRECISION (TREE_TYPE (loop->v))
		      >= TYPE_PRECISION (iter_type))
	    {
	      tree n;

	      if (loop->cond_code == LT_EXPR)
		n = fold_build2_loc (loc,
				 PLUS_EXPR, TREE_TYPE (loop->v),
				 loop->n2, loop->step);
	      else
		n = loop->n1;
	      if (TREE_CODE (n) != INTEGER_CST
		  || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
		iter_type = long_long_unsigned_type_node;
	    }
	  else if (TYPE_PRECISION (TREE_TYPE (loop->v))
		   > TYPE_PRECISION (iter_type))
	    {
	      tree n1, n2;

	      if (loop->cond_code == LT_EXPR)
		{
		  n1 = loop->n1;
		  n2 = fold_build2_loc (loc,
				    PLUS_EXPR, TREE_TYPE (loop->v),
				    loop->n2, loop->step);
		}
	      else
		{
		  n1 = fold_build2_loc (loc,
				    MINUS_EXPR, TREE_TYPE (loop->v),
				    loop->n2, loop->step);
		  n2 = loop->n1;
		}
	      if (TREE_CODE (n1) != INTEGER_CST
		  || TREE_CODE (n2) != INTEGER_CST
		  || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
		  || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
		iter_type = long_long_unsigned_type_node;
	    }
	}

      if (i >= fd->collapse)
	continue;

      if (collapse_count && *collapse_count == NULL)
	{
	  t = fold_binary (loop->cond_code, boolean_type_node,
			   fold_convert (TREE_TYPE (loop->v), loop->n1),
			   fold_convert (TREE_TYPE (loop->v), loop->n2));
	  if (t && integer_zerop (t))
	    count = build_zero_cst (long_long_unsigned_type_node);
	  else if ((i == 0 || count != NULL_TREE)
		   && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
		   && TREE_CONSTANT (loop->n1)
		   && TREE_CONSTANT (loop->n2)
		   && TREE_CODE (loop->step) == INTEGER_CST)
	    {
	      tree itype = TREE_TYPE (loop->v);

	      if (POINTER_TYPE_P (itype))
		itype = signed_type_for (itype);
	      t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
	      t = fold_build2_loc (loc,
			       PLUS_EXPR, itype,
			       fold_convert_loc (loc, itype, loop->step), t);
	      t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
			       fold_convert_loc (loc, itype, loop->n2));
	      t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
			       fold_convert_loc (loc, itype, loop->n1));
	      if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
		t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
				 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
				 fold_build1_loc (loc, NEGATE_EXPR, itype,
					      fold_convert_loc (loc, itype,
								loop->step)));
	      else
		t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
				 fold_convert_loc (loc, itype, loop->step));
	      t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
	      if (count != NULL_TREE)
		count = fold_build2_loc (loc,
				     MULT_EXPR, long_long_unsigned_type_node,
				     count, t);
	      else
		count = t;
	      if (TREE_CODE (count) != INTEGER_CST)
		count = NULL_TREE;
	    }
	  else if (count && !integer_zerop (count))
	    count = NULL_TREE;
	}
    }

  if (count
      && !simd
      && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
	  || fd->have_ordered))
    {
      if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
	iter_type = long_long_unsigned_type_node;
      else
	iter_type = long_integer_type_node;
    }
  else if (collapse_iter && *collapse_iter != NULL)
    iter_type = TREE_TYPE (*collapse_iter);
  fd->iter_type = iter_type;
  if (collapse_iter && *collapse_iter == NULL)
    *collapse_iter = create_tmp_var (iter_type, ".iter");
  if (collapse_count && *collapse_count == NULL)
    {
      if (count)
	*collapse_count = fold_convert_loc (loc, iter_type, count);
      else
	*collapse_count = create_tmp_var (iter_type, ".count");
    }

  if (fd->collapse > 1 || (fd->ordered && loops))
    {
      fd->loop.v = *collapse_iter;
      fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
      fd->loop.n2 = *collapse_count;
      fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
      fd->loop.cond_code = LT_EXPR;
    }
  else if (loops)
    loops[0] = fd->loop;
}
Пример #24
0
/* User-deallocate; we emit the code directly from the front-end, and the
   logic is the same as the previous library function:

    void
    deallocate (void *pointer, GFC_INTEGER_4 * stat)
    {
      if (!pointer)
	{
	  if (stat)
	    *stat = 1;
	  else
	    runtime_error ("Attempt to DEALLOCATE unallocated memory.");
	}
      else
	{
	  free (pointer);
	  if (stat)
	    *stat = 0;
	}
    }

   In this front-end version, status doesn't have to be GFC_INTEGER_4.
   Moreover, if CAN_FAIL is true, then we will not emit a runtime error,
   even when no status variable is passed to us (this is used for
   unconditional deallocation generated by the front-end at end of
   each procedure).
   
   If a runtime-message is possible, `expr' must point to the original
   expression being deallocated for its locus and variable name.

   For coarrays, "pointer" must be the array descriptor and not its
   "data" component.  */
tree
gfc_deallocate_with_status (tree pointer, tree status, tree errmsg,
			    tree errlen, tree label_finish,
			    bool can_fail, gfc_expr* expr, bool coarray)
{
  stmtblock_t null, non_null;
  tree cond, tmp, error;
  tree status_type = NULL_TREE;
  tree caf_decl = NULL_TREE;

  if (coarray)
    {
      gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (pointer)));
      caf_decl = pointer;
      pointer = gfc_conv_descriptor_data_get (caf_decl);
      STRIP_NOPS (pointer);
    }

  cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, pointer,
			  build_int_cst (TREE_TYPE (pointer), 0));

  /* When POINTER is NULL, we set STATUS to 1 if it's present, otherwise
     we emit a runtime error.  */
  gfc_start_block (&null);
  if (!can_fail)
    {
      tree varname;

      gcc_assert (expr && expr->expr_type == EXPR_VARIABLE && expr->symtree);

      varname = gfc_build_cstring_const (expr->symtree->name);
      varname = gfc_build_addr_expr (pchar_type_node, varname);

      error = gfc_trans_runtime_error (true, &expr->where,
				       "Attempt to DEALLOCATE unallocated '%s'",
				       varname);
    }
  else
    error = build_empty_stmt (input_location);

  if (status != NULL_TREE && !integer_zerop (status))
    {
      tree cond2;

      status_type = TREE_TYPE (TREE_TYPE (status));
      cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
			       status, build_int_cst (TREE_TYPE (status), 0));
      tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
			     fold_build1_loc (input_location, INDIRECT_REF,
					      status_type, status),
			     build_int_cst (status_type, 1));
      error = fold_build3_loc (input_location, COND_EXPR, void_type_node,
			       cond2, tmp, error);
    }

  gfc_add_expr_to_block (&null, error);

  /* When POINTER is not NULL, we free it.  */
  gfc_start_block (&non_null);
  if (!coarray || gfc_option.coarray != GFC_FCOARRAY_LIB)
    {
      tmp = build_call_expr_loc (input_location,
				 builtin_decl_explicit (BUILT_IN_FREE), 1,
				 fold_convert (pvoid_type_node, pointer));
      gfc_add_expr_to_block (&non_null, tmp);

      if (status != NULL_TREE && !integer_zerop (status))
	{
	  /* We set STATUS to zero if it is present.  */
	  tree status_type = TREE_TYPE (TREE_TYPE (status));
	  tree cond2;

	  cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
				   status,
				   build_int_cst (TREE_TYPE (status), 0));
	  tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
				 fold_build1_loc (input_location, INDIRECT_REF,
						  status_type, status),
				 build_int_cst (status_type, 0));
	  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
				 gfc_unlikely (cond2), tmp,
				 build_empty_stmt (input_location));
	  gfc_add_expr_to_block (&non_null, tmp);
	}
    }
  else
    {
      tree caf_type, token, cond2;
      tree pstat = null_pointer_node;

      if (errmsg == NULL_TREE)
	{
	  gcc_assert (errlen == NULL_TREE);
	  errmsg = null_pointer_node;
	  errlen = build_zero_cst (integer_type_node);
	}
      else
	{
	  gcc_assert (errlen != NULL_TREE);
	  if (!POINTER_TYPE_P (TREE_TYPE (errmsg)))
	    errmsg = gfc_build_addr_expr (NULL_TREE, errmsg);
	}

      caf_type = TREE_TYPE (caf_decl);

      if (status != NULL_TREE && !integer_zerop (status))
	{
	  gcc_assert (status_type == integer_type_node);
	  pstat = status;
	}

      if (GFC_DESCRIPTOR_TYPE_P (caf_type)
	  && GFC_TYPE_ARRAY_AKIND (caf_type) == GFC_ARRAY_ALLOCATABLE)
	token = gfc_conv_descriptor_token (caf_decl);
      else if (DECL_LANG_SPECIFIC (caf_decl)
	       && GFC_DECL_TOKEN (caf_decl) != NULL_TREE)
	token = GFC_DECL_TOKEN (caf_decl);
      else
	{
	  gcc_assert (GFC_ARRAY_TYPE_P (caf_type)
		      && GFC_TYPE_ARRAY_CAF_TOKEN (caf_type) != NULL_TREE);
	  token = GFC_TYPE_ARRAY_CAF_TOKEN (caf_type);
	}

      token = gfc_build_addr_expr  (NULL_TREE, token);
      tmp = build_call_expr_loc (input_location,
	     gfor_fndecl_caf_deregister, 4,
	     token, pstat, errmsg, errlen);
      gfc_add_expr_to_block (&non_null, tmp);

      if (status != NULL_TREE)
	{
	  tree stat = build_fold_indirect_ref_loc (input_location, status);

	  TREE_USED (label_finish) = 1;
	  tmp = build1_v (GOTO_EXPR, label_finish);
	  cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
				   stat, build_zero_cst (TREE_TYPE (stat)));
	  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
        			 gfc_unlikely (cond2), tmp,
				 build_empty_stmt (input_location));
	  gfc_add_expr_to_block (&non_null, tmp);
	}
    }

  return fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
			  gfc_finish_block (&null),
			  gfc_finish_block (&non_null));
}
Пример #25
0
/* Generate code for an ALLOCATE statement when the argument is an
   allocatable variable.  If the variable is currently allocated, it is an
   error to allocate it again.
 
   This function follows the following pseudo-code:
  
    void *
    allocate_allocatable (void *mem, size_t size, integer_type stat)
    {
      if (mem == NULL)
	return allocate (size, stat);
      else
      {
	if (stat)
	  stat = LIBERROR_ALLOCATION;
	else
	  runtime_error ("Attempting to allocate already allocated variable");
      }
    }
    
    expr must be set to the original expression being allocated for its locus
    and variable name in case a runtime error has to be printed.  */
void
gfc_allocate_allocatable (stmtblock_t * block, tree mem, tree size, tree token,
			  tree status, tree errmsg, tree errlen, tree label_finish,
			  gfc_expr* expr)
{
  stmtblock_t alloc_block;
  tree tmp, null_mem, alloc, error;
  tree type = TREE_TYPE (mem);

  if (TREE_TYPE (size) != TREE_TYPE (size_type_node))
    size = fold_convert (size_type_node, size);

  null_mem = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
					    boolean_type_node, mem,
					    build_int_cst (type, 0)));

  /* If mem is NULL, we call gfc_allocate_using_malloc or
     gfc_allocate_using_lib.  */
  gfc_start_block (&alloc_block);

  if (gfc_option.coarray == GFC_FCOARRAY_LIB
      && gfc_expr_attr (expr).codimension)
    {
      tree cond;

      gfc_allocate_using_lib (&alloc_block, mem, size, token, status,
			      errmsg, errlen);
      if (status != NULL_TREE)
	{
	  TREE_USED (label_finish) = 1;
	  tmp = build1_v (GOTO_EXPR, label_finish);
	  cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
				  status, build_zero_cst (TREE_TYPE (status)));
	  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
				 gfc_unlikely (cond), tmp,
				 build_empty_stmt (input_location));
	  gfc_add_expr_to_block (&alloc_block, tmp);
	}
    }
  else
    gfc_allocate_using_malloc (&alloc_block, mem, size, status);

  alloc = gfc_finish_block (&alloc_block);

  /* If mem is not NULL, we issue a runtime error or set the
     status variable.  */
  if (expr)
    {
      tree varname;

      gcc_assert (expr->expr_type == EXPR_VARIABLE && expr->symtree);
      varname = gfc_build_cstring_const (expr->symtree->name);
      varname = gfc_build_addr_expr (pchar_type_node, varname);

      error = gfc_trans_runtime_error (true, &expr->where,
				       "Attempting to allocate already"
				       " allocated variable '%s'",
				       varname);
    }
  else
    error = gfc_trans_runtime_error (true, NULL,
				     "Attempting to allocate already allocated"
				     " variable");

  if (status != NULL_TREE)
    {
      tree status_type = TREE_TYPE (status);

      error = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
	      status, build_int_cst (status_type, LIBERROR_ALLOCATION));
    }

  tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, null_mem,
			 error, alloc);
  gfc_add_expr_to_block (block, tmp);
}
Пример #26
0
static void
mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
		location_t location, tree dirflag)
{
	tree type, base=NULL_TREE, limit, addr, size, t, elt=NULL_TREE;
	tree temp, field, offset;
	bool check_red_flag = 0, instrumented = 0;
	tree fncall_param_val;
	gimple is_char_red_call;
	tree temp_instr, type_node;

    // TODO fix this to use our flag
	/* Don't instrument read operations.  */
	if (dirflag == integer_zero_node && flag_mudflap_ignore_reads)
		return;

	DEBUGLOG("TREE_CODE(t) = %s, mf_decl_eligible_p : %d\n", 
			tree_code_name[(int)TREE_CODE(*tp)], mf_decl_eligible_p(*tp));

	t = *tp;
	type = TREE_TYPE (t);

	if (type == error_mark_node)
		return;

	size = TYPE_SIZE_UNIT (type);

	/* Don't instrument marked nodes.  */
	if (mf_marked_p (t) && !mf_decl_eligible_p(t)){
		DEBUGLOG("Returning Here - 1\n");
		return;
	}

    if (TREE_CODE(t) == ADDR_EXPR || \
            TREE_CODE(t) == COMPONENT_REF || \
            TREE_CODE(t) == ARRAY_REF || \
            (TREE_CODE(t) == VAR_DECL && mf_decl_eligible_p(t)))
    {
        DEBUGLOG("------ INSTRUMENTING NODES ---------\n");
        temp = TREE_OPERAND(t, 0);

        if(temp && (TREE_CODE(temp) == STRING_CST || \
                TREE_CODE(temp) == FUNCTION_DECL)) // TODO Check this out? What do you do in this case?
            return;

        DEBUGLOG("TREE_CODE(temp) : %s\n", tree_code_name[(int)TREE_CODE(temp)]);

        if (TREE_CODE(t) == VAR_DECL)
            *tp = mf_walk_n_instrument(tp, &instrumented);
        else
            TREE_OPERAND(t,0) = mf_walk_n_instrument(&(TREE_OPERAND(t,0)), &instrumented);

        if (TREE_CODE(t) == ADDR_EXPR)
            return;
    } 

    DEBUGLOG("Pass2 derefs: entering deref section\n");

    type_node = NULL_TREE;
    //TODO move this to appropriate cases
    t = *tp;
	switch (TREE_CODE (t))
	{
		case ARRAY_REF:
		case COMPONENT_REF: // TODO check if following works for comp refs
			{ 
                DEBUGLOG("------ INSIDE CASE COMPONENT_REF  ---------\n");
                HOST_WIDE_INT bitsize, bitpos;
                tree inner, offset;
                int volatilep, unsignedp;
                enum machine_mode mode1;
                check_red_flag = 1; 
                inner = get_inner_reference (t, &bitsize, &bitpos, &offset,
                        &mode1, &unsignedp, &volatilep, false);
                if (!offset)
                    offset = size_zero_node;
                offset = size_binop (PLUS_EXPR, offset,
                        size_int (bitpos / BITS_PER_UNIT));
                addr = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
                        build1 (ADDR_EXPR, build_pointer_type(type), inner), offset);
                break; // TODO continue?
            }

		case INDIRECT_REF:
			DEBUGLOG("------ INSIDE CASE INDIRECT_REF  ---------\n");
			check_red_flag = 1;
			addr = TREE_OPERAND (t, 0);
            break; // TODO continue?

		case MEM_REF:
			DEBUGLOG("------ INSIDE CASE MEM_REF  ---------\n");
			check_red_flag = 1;
			addr = fold_build2_loc (location, POINTER_PLUS_EXPR, TREE_TYPE (TREE_OPERAND (t, 0)),
					TREE_OPERAND (t, 0), fold_convert (sizetype, TREE_OPERAND (t, 1)));
            break;

		case TARGET_MEM_REF:
			DEBUGLOG("------ INSIDE CASE TARGET_MEM_REF  ---------\n");
			check_red_flag = 1;
			addr = tree_mem_ref_addr (ptr_type_node, t);
			break; // TODO do you want to do this case? find out what it does.

		case ARRAY_RANGE_REF:
			DEBUGLOG("------ INSIDE CASE ARRAY_RANGE_REF  ---------\n");
			DEBUGLOG("------ TODO not handled yet---------\n");
			return;

		case BIT_FIELD_REF:
			DEBUGLOG("------ INSIDE CASE BIT_FIELD_REF  ---------\n");
			DEBUGLOG("------ TODO not handled yet---------\n");
			return;

		default:
			DEBUGLOG("------ INSIDE CASE DEFAULT  ---------\n");
			if(mf_decl_eligible_p(t))
			{
                DEBUGLOG("Do you want to be here?\n");
                return;
				/*if((*tp = mx_xform_instrument_pass2(t)) == NULL_TREE){
					DEBUGLOG("Failed to set tree operand\n");
					return;
				}*/
			}
	}


    // Add the call to is_char_red
    if (check_red_flag) {
        DEBUGLOG("Entering is_char_red\n");
        fncall_param_val = fold_build2_loc (location, MEM_REF, ptr_type_node, addr, \
                            build_int_cst(build_pointer_type(type), 0));
        fncall_param_val = fold_convert_loc (location, unsigned_type_node, fncall_param_val);
        is_char_red_call = gimple_build_call (lbc_is_char_red_fndecl, 3, fncall_param_val, size, \
                            fold_convert_loc(location, ptr_type_node, addr));
        gimple_set_location (is_char_red_call, location);
        //debug_gimple_stmt(is_char_red_call);
        gsi_insert_before (iter, is_char_red_call, GSI_SAME_STMT);
        DEBUGLOG("Done with is_char_red\n");
    }
    DEBUGLOG("Exiting derefs \n");
}
Пример #27
0
tree
gfc_deallocate_scalar_with_status (tree pointer, tree status, bool can_fail,
				   gfc_expr* expr, gfc_typespec ts)
{
  stmtblock_t null, non_null;
  tree cond, tmp, error;

  cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, pointer,
			  build_int_cst (TREE_TYPE (pointer), 0));

  /* When POINTER is NULL, we set STATUS to 1 if it's present, otherwise
     we emit a runtime error.  */
  gfc_start_block (&null);
  if (!can_fail)
    {
      tree varname;

      gcc_assert (expr && expr->expr_type == EXPR_VARIABLE && expr->symtree);

      varname = gfc_build_cstring_const (expr->symtree->name);
      varname = gfc_build_addr_expr (pchar_type_node, varname);

      error = gfc_trans_runtime_error (true, &expr->where,
				       "Attempt to DEALLOCATE unallocated '%s'",
				       varname);
    }
  else
    error = build_empty_stmt (input_location);

  if (status != NULL_TREE && !integer_zerop (status))
    {
      tree status_type = TREE_TYPE (TREE_TYPE (status));
      tree cond2;

      cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
			       status, build_int_cst (TREE_TYPE (status), 0));
      tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
			     fold_build1_loc (input_location, INDIRECT_REF,
					      status_type, status),
			     build_int_cst (status_type, 1));
      error = fold_build3_loc (input_location, COND_EXPR, void_type_node,
			       cond2, tmp, error);
    }

  gfc_add_expr_to_block (&null, error);

  /* When POINTER is not NULL, we free it.  */
  gfc_start_block (&non_null);
  
  /* Free allocatable components.  */
  if (ts.type == BT_DERIVED && ts.u.derived->attr.alloc_comp)
    {
      tmp = build_fold_indirect_ref_loc (input_location, pointer);
      tmp = gfc_deallocate_alloc_comp (ts.u.derived, tmp, 0);
      gfc_add_expr_to_block (&non_null, tmp);
    }
  else if (ts.type == BT_CLASS
	   && ts.u.derived->components->ts.u.derived->attr.alloc_comp)
    {
      tmp = build_fold_indirect_ref_loc (input_location, pointer);
      tmp = gfc_deallocate_alloc_comp (ts.u.derived->components->ts.u.derived,
				       tmp, 0);
      gfc_add_expr_to_block (&non_null, tmp);
    }
  
  tmp = build_call_expr_loc (input_location,
			     builtin_decl_explicit (BUILT_IN_FREE), 1,
			     fold_convert (pvoid_type_node, pointer));
  gfc_add_expr_to_block (&non_null, tmp);

  if (status != NULL_TREE && !integer_zerop (status))
    {
      /* We set STATUS to zero if it is present.  */
      tree status_type = TREE_TYPE (TREE_TYPE (status));
      tree cond2;

      cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
			       status, build_int_cst (TREE_TYPE (status), 0));
      tmp = fold_build2_loc (input_location, MODIFY_EXPR, status_type,
			     fold_build1_loc (input_location, INDIRECT_REF,
					      status_type, status),
			     build_int_cst (status_type, 0));
      tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond2,
			     tmp, build_empty_stmt (input_location));
      gfc_add_expr_to_block (&non_null, tmp);
    }

  return fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
			  gfc_finish_block (&null),
			  gfc_finish_block (&non_null));
}
Пример #28
0
static void
mf_build_check_statement_for (tree base, tree limit,
                              gimple_stmt_iterator *instr_gsi,
                              location_t location, tree dirflag)
{
  gimple_stmt_iterator gsi;
  basic_block cond_bb, then_bb, join_bb;
  edge e;
  tree cond, t, u, v;
  tree mf_base;
  tree mf_elem;
  tree mf_limit;
  gimple g;
  gimple_seq seq, stmts;

  /* We first need to split the current basic block, and start altering
     the CFG.  This allows us to insert the statements we're about to
     construct into the right basic blocks.  */

  cond_bb = gimple_bb (gsi_stmt (*instr_gsi));
  gsi = *instr_gsi;
  gsi_prev (&gsi);
  if (! gsi_end_p (gsi))
    e = split_block (cond_bb, gsi_stmt (gsi));
  else
    e = split_block_after_labels (cond_bb);
  cond_bb = e->src;
  join_bb = e->dest;

  /* A recap at this point: join_bb is the basic block at whose head
     is the gimple statement for which this check expression is being
     built.  cond_bb is the (possibly new, synthetic) basic block the
     end of which will contain the cache-lookup code, and a
     conditional that jumps to the cache-miss code or, much more
     likely, over to join_bb.  */

  /* Create the bb that contains the cache-miss fallback block (mf_check).  */
  then_bb = create_empty_bb (cond_bb);
  make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
  make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU);

  /* Mark the pseudo-fallthrough edge from cond_bb to join_bb.  */
  e = find_edge (cond_bb, join_bb);
  e->flags = EDGE_FALSE_VALUE;
  e->count = cond_bb->count;
  e->probability = REG_BR_PROB_BASE;

  /* Update dominance info.  Note that bb_join's data was
     updated by split_block.  */
  if (dom_info_available_p (CDI_DOMINATORS))
    {
      set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
    }

  /* Update loop info.  */
  if (current_loops)
    add_bb_to_loop (then_bb, cond_bb->loop_father);

  /* Build our local variables.  */
  mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem");
  mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base");
  mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit");

  /* Build: __mf_base = (uintptr_t) <base address expression>.  */
  seq = NULL;
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (base));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_base, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_limit = (uintptr_t) <limit address expression>.  */
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (limit));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_limit, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift)
                                            & __mf_mask].  */
  t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base,
              flag_mudflap_threads ? mf_cache_shift_decl
	       : mf_cache_shift_decl_l);
  t = build2 (BIT_AND_EXPR, mf_uintptr_type, t,
              flag_mudflap_threads ? mf_cache_mask_decl
	       : mf_cache_mask_decl_l);
  t = build4 (ARRAY_REF,
              TREE_TYPE (TREE_TYPE (mf_cache_array_decl)),
              mf_cache_array_decl, t, NULL_TREE, NULL_TREE);
  t = build1 (ADDR_EXPR, mf_cache_structptr_type, t);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_elem, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Quick validity check.

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))
        {
          __mf_check ();
          ... and only if single-threaded:
          __mf_lookup_shift_1 = f...;
          __mf_lookup_mask_l = ...;
        }

     It is expected that this body of code is rarely executed so we mark
     the edge to the THEN clause of the conditional jump as unlikely.  */

  /* Construct t <-- '__mf_elem->low  > __mf_base'.  */
  t = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              TYPE_FIELDS (mf_cache_struct_type), NULL_TREE);
  t = build2 (GT_EXPR, boolean_type_node, t, mf_base);

  /* Construct '__mf_elem->high < __mf_limit'.

     First build:
        1) u <--  '__mf_elem->high'
        2) v <--  '__mf_limit'.

     Then build 'u <-- (u < v).  */

  u = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              DECL_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE);

  v = mf_limit;

  u = build2 (LT_EXPR, boolean_type_node, u, v);

  /* Build the composed conditional: t <-- 't || u'.  Then store the
     result of the evaluation of 't' in a temporary variable which we
     can use as the condition for the conditional jump.  */
  t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  cond = create_tmp_reg (boolean_type_node, "__mf_unlikely_cond");
  g = gimple_build_assign  (cond, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build the conditional jump.  'cond' is just a temporary so we can
     simply build a void COND_EXPR.  We do need labels in both arms though.  */
  g = gimple_build_cond (NE_EXPR, cond, boolean_false_node, NULL_TREE,
			 NULL_TREE);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* At this point, after so much hard work, we have only constructed
     the conditional jump,

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))

     The lowered GIMPLE tree representing this code is in the statement
     list starting at 'head'.

     We can insert this now in the current basic block, i.e. the one that
     the statement we're instrumenting was originally in.  */
  gsi = gsi_last_bb (cond_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  /*  Now build up the body of the cache-miss handling:

     __mf_check();
     refresh *_l vars.

     This is the body of the conditional.  */

  seq = NULL;
  /* u is a string, so it is already a gimple value.  */
  u = mf_file_function_line_tree (location);
  /* NB: we pass the overall [base..limit] range to mf_check.  */
  v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
		   fold_build2_loc (location,
				MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
		   build_int_cst (mf_uintptr_type, 1));
  v = force_gimple_operand (v, &stmts, true, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_call (mf_check_fndecl, 4, mf_base, v, dirflag, u);
  gimple_seq_add_stmt (&seq, g);

  if (! flag_mudflap_threads)
    {
      if (stmt_ends_bb_p (g))
	{
	  gsi = gsi_start_bb (then_bb);
	  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
	  e = split_block (then_bb, g);
	  then_bb = e->dest;
	  seq = NULL;
	}

      g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl);
      gimple_seq_add_stmt (&seq, g);

      g = gimple_build_assign (mf_cache_mask_decl_l, mf_cache_mask_decl);
      gimple_seq_add_stmt (&seq, g);
    }

  /* Insert the check code in the THEN block.  */
  gsi = gsi_start_bb (then_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  *instr_gsi = gsi_start_bb (join_bb);
}
Пример #29
0
static void
mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
                   location_t location, tree dirflag)
{
  tree type, base, limit, addr, size, t;

  /* Don't instrument read operations.  */
  if (dirflag == integer_zero_node && flag_mudflap_ignore_reads)
    return;

  /* Don't instrument marked nodes.  */
  if (mf_marked_p (*tp))
    return;

  t = *tp;
  type = TREE_TYPE (t);

  if (type == error_mark_node)
    return;

  size = TYPE_SIZE_UNIT (type);

  switch (TREE_CODE (t))
    {
    case ARRAY_REF:
    case COMPONENT_REF:
      {
        /* This is trickier than it may first appear.  The reason is
           that we are looking at expressions from the "inside out" at
           this point.  We may have a complex nested aggregate/array
           expression (e.g. "a.b[i].c"), maybe with an indirection as
           the leftmost operator ("p->a.b.d"), where instrumentation
           is necessary.  Or we may have an innocent "a.b.c"
           expression that must not be instrumented.  We need to
           recurse all the way down the nesting structure to figure it
           out: looking just at the outer node is not enough.  */
        tree var;
        int component_ref_only = (TREE_CODE (t) == COMPONENT_REF);
	/* If we have a bitfield component reference, we must note the
	   innermost addressable object in ELT, from which we will
	   construct the byte-addressable bounds of the bitfield.  */
	tree elt = NULL_TREE;
	int bitfield_ref_p = (TREE_CODE (t) == COMPONENT_REF
			      && DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1)));

        /* Iterate to the top of the ARRAY_REF/COMPONENT_REF
           containment hierarchy to find the outermost VAR_DECL.  */
        var = TREE_OPERAND (t, 0);
        while (1)
          {
	    if (bitfield_ref_p && elt == NULL_TREE
		&& (TREE_CODE (var) == ARRAY_REF
		    || TREE_CODE (var) == COMPONENT_REF))
	      elt = var;

            if (TREE_CODE (var) == ARRAY_REF)
              {
                component_ref_only = 0;
                var = TREE_OPERAND (var, 0);
              }
            else if (TREE_CODE (var) == COMPONENT_REF)
              var = TREE_OPERAND (var, 0);
            else if (INDIRECT_REF_P (var)
		     || TREE_CODE (var) == MEM_REF)
              {
		base = TREE_OPERAND (var, 0);
                break;
              }
            else if (TREE_CODE (var) == VIEW_CONVERT_EXPR)
	      {
		var = TREE_OPERAND (var, 0);
		if (CONSTANT_CLASS_P (var)
		    && TREE_CODE (var) != STRING_CST)
		  return;
	      }
            else
              {
                gcc_assert (TREE_CODE (var) == VAR_DECL
                            || TREE_CODE (var) == PARM_DECL
                            || TREE_CODE (var) == RESULT_DECL
                            || TREE_CODE (var) == STRING_CST);
                /* Don't instrument this access if the underlying
                   variable is not "eligible".  This test matches
                   those arrays that have only known-valid indexes,
                   and thus are not labeled TREE_ADDRESSABLE.  */
                if (! mf_decl_eligible_p (var) || component_ref_only)
                  return;
                else
		  {
		    base = build1 (ADDR_EXPR,
				   build_pointer_type (TREE_TYPE (var)), var);
		    break;
		  }
              }
          }

        /* Handle the case of ordinary non-indirection structure
           accesses.  These have only nested COMPONENT_REF nodes (no
           INDIRECT_REF), but pass through the above filter loop.
           Note that it's possible for such a struct variable to match
           the eligible_p test because someone else might take its
           address sometime.  */

        /* We need special processing for bitfield components, because
           their addresses cannot be taken.  */
        if (bitfield_ref_p)
          {
            tree field = TREE_OPERAND (t, 1);

            if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST)
              size = DECL_SIZE_UNIT (field);

	    if (elt)
	      elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)),
			    elt);
            addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base);
            addr = fold_build_pointer_plus_loc (location,
						addr, byte_position (field));
          }
        else
          addr = build1 (ADDR_EXPR, build_pointer_type (type), t);

        limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type,
                             fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
					  fold_convert (mf_uintptr_type, addr),
					  size),
                             integer_one_node);
      }
      break;

    case INDIRECT_REF:
      addr = TREE_OPERAND (t, 0);
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc
	(location, fold_build_pointer_plus_loc (location, base, size), -1);
      break;

    case MEM_REF:
      if (addr_expr_of_non_mem_decl_p (TREE_OPERAND (t, 0)))
	return;

      addr = fold_build_pointer_plus_loc (location, TREE_OPERAND (t, 0),
					  TREE_OPERAND (t, 1));
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc (location,
			   fold_build_pointer_plus_loc (location,
							base, size), -1);
      break;

    case TARGET_MEM_REF:
      if (addr_expr_of_non_mem_decl_p (TMR_BASE (t)))
	return;

      addr = tree_mem_ref_addr (ptr_type_node, t);
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc (location,
			   fold_build_pointer_plus_loc (location,
							base, size), -1);
      break;

    case ARRAY_RANGE_REF:
      warning (OPT_Wmudflap,
	       "mudflap checking not yet implemented for ARRAY_RANGE_REF");
      return;

    case BIT_FIELD_REF:
      /* ??? merge with COMPONENT_REF code above? */
      {
        tree ofs, rem, bpu;

        /* If we're not dereferencing something, then the access
           must be ok.  */
        if (TREE_CODE (TREE_OPERAND (t, 0)) != INDIRECT_REF)
          return;

        bpu = bitsize_int (BITS_PER_UNIT);
        ofs = fold_convert (bitsizetype, TREE_OPERAND (t, 2));
        rem = size_binop_loc (location, TRUNC_MOD_EXPR, ofs, bpu);
        ofs = size_binop_loc (location, TRUNC_DIV_EXPR, ofs, bpu);

        size = fold_convert (bitsizetype, TREE_OPERAND (t, 1));
        size = size_binop_loc (location, PLUS_EXPR, size, rem);
        size = size_binop_loc (location, CEIL_DIV_EXPR, size, bpu);
        size = fold_convert (sizetype, size);

        addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
        addr = fold_convert (ptr_type_node, addr);
        addr = fold_build_pointer_plus_loc (location, addr, ofs);

        base = addr;
        limit = fold_build_pointer_plus_hwi_loc (location,
                             fold_build_pointer_plus_loc (location,
							  base, size), -1);
      }
      break;

    default:
      return;
    }

  mf_build_check_statement_for (base, limit, iter, location, dirflag);
}
Пример #30
0
tree
gfc_build_array_ref (tree base, tree offset, tree decl)
{
  tree type = TREE_TYPE (base);
  tree tmp;
  tree span;

  if (GFC_ARRAY_TYPE_P (type) && GFC_TYPE_ARRAY_RANK (type) == 0)
    {
      gcc_assert (GFC_TYPE_ARRAY_CORANK (type) > 0);

      return fold_convert (TYPE_MAIN_VARIANT (type), base);
    }

  /* Scalar coarray, there is nothing to do.  */
  if (TREE_CODE (type) != ARRAY_TYPE)
    {
      gcc_assert (decl == NULL_TREE);
      gcc_assert (integer_zerop (offset));
      return base;
    }

  type = TREE_TYPE (type);

  if (DECL_P (base))
    TREE_ADDRESSABLE (base) = 1;

  /* Strip NON_LVALUE_EXPR nodes.  */
  STRIP_TYPE_NOPS (offset);

  /* If the array reference is to a pointer, whose target contains a
     subreference, use the span that is stored with the backend decl
     and reference the element with pointer arithmetic.  */
  if (decl && (TREE_CODE (decl) == FIELD_DECL
		 || TREE_CODE (decl) == VAR_DECL
		 || TREE_CODE (decl) == PARM_DECL)
	&& ((GFC_DECL_SUBREF_ARRAY_P (decl)
	      && !integer_zerop (GFC_DECL_SPAN(decl)))
	   || GFC_DECL_CLASS (decl)))
    {
      if (GFC_DECL_CLASS (decl))
	{
	  /* Allow for dummy arguments and other good things.  */
	  if (POINTER_TYPE_P (TREE_TYPE (decl)))
	    decl = build_fold_indirect_ref_loc (input_location, decl);

	  /* Check if '_data' is an array descriptor. If it is not,
	     the array must be one of the components of the class object,
	     so return a normal array reference.  */
	  if (!GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (gfc_class_data_get (decl))))
	    return build4_loc (input_location, ARRAY_REF, type, base,
			       offset, NULL_TREE, NULL_TREE);

	  span = gfc_vtable_size_get (decl);
	}
      else if (GFC_DECL_SUBREF_ARRAY_P (decl))
	span = GFC_DECL_SPAN(decl);
      else
	gcc_unreachable ();

      offset = fold_build2_loc (input_location, MULT_EXPR,
				gfc_array_index_type,
				offset, span);
      tmp = gfc_build_addr_expr (pvoid_type_node, base);
      tmp = fold_build_pointer_plus_loc (input_location, tmp, offset);
      tmp = fold_convert (build_pointer_type (type), tmp);
      if (!TYPE_STRING_FLAG (type))
	tmp = build_fold_indirect_ref_loc (input_location, tmp);
      return tmp;
    }
  else
    /* Otherwise use a straightforward array reference.  */
    return build4_loc (input_location, ARRAY_REF, type, base, offset,
		       NULL_TREE, NULL_TREE);
}