Пример #1
0
int
cp_gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
  int saved_stmts_are_full_exprs_p = 0;
  enum tree_code code = TREE_CODE (*expr_p);
  enum gimplify_status ret;

  if (STATEMENT_CODE_P (code))
    {
      saved_stmts_are_full_exprs_p = stmts_are_full_exprs_p ();
      current_stmt_tree ()->stmts_are_full_exprs_p
	= STMT_IS_FULL_EXPR_P (*expr_p);
    }

  switch (code)
    {
    case PTRMEM_CST:
      *expr_p = cplus_expand_constant (*expr_p);
      ret = GS_OK;
      break;

    case AGGR_INIT_EXPR:
      simplify_aggr_init_expr (expr_p);
      ret = GS_OK;
      break;

    case VEC_INIT_EXPR:
      {
	location_t loc = input_location;
	tree init = VEC_INIT_EXPR_INIT (*expr_p);
	int from_array = (init && TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE);
	gcc_assert (EXPR_HAS_LOCATION (*expr_p));
	input_location = EXPR_LOCATION (*expr_p);
	*expr_p = build_vec_init (VEC_INIT_EXPR_SLOT (*expr_p), NULL_TREE,
				  init, VEC_INIT_EXPR_VALUE_INIT (*expr_p),
				  from_array,
				  tf_warning_or_error);
	ret = GS_OK;
	input_location = loc;
      }
      break;

    case THROW_EXPR:
      /* FIXME communicate throw type to back end, probably by moving
	 THROW_EXPR into ../tree.def.  */
      *expr_p = TREE_OPERAND (*expr_p, 0);
      ret = GS_OK;
      break;

    case MUST_NOT_THROW_EXPR:
      ret = gimplify_must_not_throw_expr (expr_p, pre_p);
      break;

      /* We used to do this for MODIFY_EXPR as well, but that's unsafe; the
	 LHS of an assignment might also be involved in the RHS, as in bug
	 25979.  */
    case INIT_EXPR:
      cp_gimplify_init_expr (expr_p);
      if (TREE_CODE (*expr_p) != INIT_EXPR)
	return GS_OK;
      /* Otherwise fall through.  */
    case MODIFY_EXPR:
      {
	/* If the back end isn't clever enough to know that the lhs and rhs
	   types are the same, add an explicit conversion.  */
	tree op0 = TREE_OPERAND (*expr_p, 0);
	tree op1 = TREE_OPERAND (*expr_p, 1);

	if (!error_operand_p (op0)
	    && !error_operand_p (op1)
	    && (TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (op0))
		|| TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (op1)))
	    && !useless_type_conversion_p (TREE_TYPE (op1), TREE_TYPE (op0)))
	  TREE_OPERAND (*expr_p, 1) = build1 (VIEW_CONVERT_EXPR,
					      TREE_TYPE (op0), op1);

	else if ((is_gimple_lvalue (op1) || INDIRECT_REF_P (op1)
		  || (TREE_CODE (op1) == CONSTRUCTOR
		      && CONSTRUCTOR_NELTS (op1) == 0
		      && !TREE_CLOBBER_P (op1))
		  || (TREE_CODE (op1) == CALL_EXPR
		      && !CALL_EXPR_RETURN_SLOT_OPT (op1)))
		 && is_really_empty_class (TREE_TYPE (op0)))
	  {
	    /* Remove any copies of empty classes.  We check that the RHS
	       has a simple form so that TARGET_EXPRs and non-empty
	       CONSTRUCTORs get reduced properly, and we leave the return
	       slot optimization alone because it isn't a copy (FIXME so it
	       shouldn't be represented as one).

	       Also drop volatile variables on the RHS to avoid infinite
	       recursion from gimplify_expr trying to load the value.  */
	    if (!TREE_SIDE_EFFECTS (op1)
		|| (DECL_P (op1) && TREE_THIS_VOLATILE (op1)))
	      *expr_p = op0;
	    else if (TREE_CODE (op1) == MEM_REF
		     && TREE_THIS_VOLATILE (op1))
	      {
		/* Similarly for volatile MEM_REFs on the RHS.  */
		if (!TREE_SIDE_EFFECTS (TREE_OPERAND (op1, 0)))
		  *expr_p = op0;
		else
		  *expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
				    TREE_OPERAND (op1, 0), op0);
	      }
	    else
	      *expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
				op0, op1);
	  }
      }
      ret = GS_OK;
      break;

    case EMPTY_CLASS_EXPR:
      /* We create an empty CONSTRUCTOR with RECORD_TYPE.  */
      *expr_p = build_constructor (TREE_TYPE (*expr_p), NULL);
      ret = GS_OK;
      break;

    case BASELINK:
      *expr_p = BASELINK_FUNCTIONS (*expr_p);
      ret = GS_OK;
      break;

    case TRY_BLOCK:
      genericize_try_block (expr_p);
      ret = GS_OK;
      break;

    case HANDLER:
      genericize_catch_block (expr_p);
      ret = GS_OK;
      break;

    case EH_SPEC_BLOCK:
      genericize_eh_spec_block (expr_p);
      ret = GS_OK;
      break;

    case USING_STMT:
      gcc_unreachable ();

    case FOR_STMT:
      gimplify_for_stmt (expr_p, pre_p);
      ret = GS_OK;
      break;

    case WHILE_STMT:
      gimplify_while_stmt (expr_p, pre_p);
      ret = GS_OK;
      break;

    case DO_STMT:
      gimplify_do_stmt (expr_p, pre_p);
      ret = GS_OK;
      break;

    case SWITCH_STMT:
      gimplify_switch_stmt (expr_p, pre_p);
      ret = GS_OK;
      break;

    case OMP_FOR:
      ret = cp_gimplify_omp_for (expr_p, pre_p);
      break;

    case CONTINUE_STMT:
      gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_CONTINUE, NOT_TAKEN));
      gimple_seq_add_stmt (pre_p, gimple_build_goto (get_bc_label (bc_continue)));
      *expr_p = NULL_TREE;
      ret = GS_ALL_DONE;
      break;

    case BREAK_STMT:
      gimple_seq_add_stmt (pre_p, gimple_build_goto (get_bc_label (bc_break)));
      *expr_p = NULL_TREE;
      ret = GS_ALL_DONE;
      break;

    case EXPR_STMT:
      gimplify_expr_stmt (expr_p);
      ret = GS_OK;
      break;

    case UNARY_PLUS_EXPR:
      {
	tree arg = TREE_OPERAND (*expr_p, 0);
	tree type = TREE_TYPE (*expr_p);
	*expr_p = (TREE_TYPE (arg) != type) ? fold_convert (type, arg)
					    : arg;
	ret = GS_OK;
      }
      break;

    default:
      ret = (enum gimplify_status) c_gimplify_expr (expr_p, pre_p, post_p);
      break;
    }

  /* Restore saved state.  */
  if (STATEMENT_CODE_P (code))
    current_stmt_tree ()->stmts_are_full_exprs_p
      = saved_stmts_are_full_exprs_p;

  return ret;
}
Пример #2
0
/* Synthesize a CALL_EXPR and a TRY_FINALLY_EXPR, for this chain of
   _DECLs if appropriate.  Arrange to call the __mf_register function
   now, and the __mf_unregister function later for each.  Return the
   gimple sequence after synthesis.  */
gimple_seq
mx_register_decls (tree decl, gimple_seq seq, location_t location)
{
  gimple_seq finally_stmts = NULL;
  gimple_stmt_iterator initially_stmts = gsi_start (seq);

  while (decl != NULL_TREE)
    {
      if (mf_decl_eligible_p (decl)
          /* Not already processed.  */
          && ! mf_marked_p (decl)
          /* Automatic variable.  */
          && ! DECL_EXTERNAL (decl)
          && ! TREE_STATIC (decl))
        {
          tree size = NULL_TREE, variable_name;
          gimple unregister_fncall, register_fncall;
	  tree unregister_fncall_param, register_fncall_param;

	  /* Variable-sized objects should have sizes already been
	     gimplified when we got here. */
	  size = fold_convert (size_type_node,
			       TYPE_SIZE_UNIT (TREE_TYPE (decl)));
	  gcc_assert (is_gimple_val (size));


          unregister_fncall_param =
	    mf_mark (build1 (ADDR_EXPR,
			     build_pointer_type (TREE_TYPE (decl)),
			     decl));
          /* __mf_unregister (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK) */
          unregister_fncall = gimple_build_call (mf_unregister_fndecl, 3,
						 unregister_fncall_param,
						 size,
						 integer_three_node);


          variable_name = mf_varname_tree (decl);
          register_fncall_param =
	    mf_mark (build1 (ADDR_EXPR,
			     build_pointer_type (TREE_TYPE (decl)),
			     decl));
          /* __mf_register (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK,
	                    "name") */
	  register_fncall = gimple_build_call (mf_register_fndecl, 4,
					       register_fncall_param,
					       size,
					       integer_three_node,
					       variable_name);


          /* Accumulate the two calls.  */
	  gimple_set_location (register_fncall, location);
	  gimple_set_location (unregister_fncall, location);

          /* Add the __mf_register call at the current appending point.  */
          if (gsi_end_p (initially_stmts))
	    {
	      if (!mf_artificial (decl))
		warning (OPT_Wmudflap,
			 "mudflap cannot track %qE in stub function",
			 DECL_NAME (decl));
	    }
	  else
	    {
	      gsi_insert_before (&initially_stmts, register_fncall,
				 GSI_SAME_STMT);

	      /* Accumulate the FINALLY piece.  */
	      gimple_seq_add_stmt (&finally_stmts, unregister_fncall);
	    }
          mf_mark (decl);
        }

      decl = DECL_CHAIN (decl);
    }

  /* Actually, (initially_stmts!=NULL) <=> (finally_stmts!=NULL) */
  if (finally_stmts != NULL)
    {
      gimple stmt = gimple_build_try (seq, finally_stmts, GIMPLE_TRY_FINALLY);
      gimple_seq new_seq = NULL;

      gimple_seq_add_stmt (&new_seq, stmt);
      return new_seq;
    }
   else
    return seq;
}
Пример #3
0
static void
mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
                   location_t location, tree dirflag)
{
  tree type, base, limit, addr, size, t;

  /* Don't instrument read operations.  */
  if (dirflag == integer_zero_node && flag_mudflap_ignore_reads)
    return;

  /* Don't instrument marked nodes.  */
  if (mf_marked_p (*tp))
    return;

  t = *tp;
  type = TREE_TYPE (t);

  if (type == error_mark_node)
    return;

  size = TYPE_SIZE_UNIT (type);

  switch (TREE_CODE (t))
    {
    case ARRAY_REF:
    case COMPONENT_REF:
      {
        /* This is trickier than it may first appear.  The reason is
           that we are looking at expressions from the "inside out" at
           this point.  We may have a complex nested aggregate/array
           expression (e.g. "a.b[i].c"), maybe with an indirection as
           the leftmost operator ("p->a.b.d"), where instrumentation
           is necessary.  Or we may have an innocent "a.b.c"
           expression that must not be instrumented.  We need to
           recurse all the way down the nesting structure to figure it
           out: looking just at the outer node is not enough.  */
        tree var;
        int component_ref_only = (TREE_CODE (t) == COMPONENT_REF);
	/* If we have a bitfield component reference, we must note the
	   innermost addressable object in ELT, from which we will
	   construct the byte-addressable bounds of the bitfield.  */
	tree elt = NULL_TREE;
	int bitfield_ref_p = (TREE_CODE (t) == COMPONENT_REF
			      && DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1)));

        /* Iterate to the top of the ARRAY_REF/COMPONENT_REF
           containment hierarchy to find the outermost VAR_DECL.  */
        var = TREE_OPERAND (t, 0);
        while (1)
          {
	    if (bitfield_ref_p && elt == NULL_TREE
		&& (TREE_CODE (var) == ARRAY_REF
		    || TREE_CODE (var) == COMPONENT_REF))
	      elt = var;

            if (TREE_CODE (var) == ARRAY_REF)
              {
                component_ref_only = 0;
                var = TREE_OPERAND (var, 0);
              }
            else if (TREE_CODE (var) == COMPONENT_REF)
              var = TREE_OPERAND (var, 0);
            else if (INDIRECT_REF_P (var)
		     || TREE_CODE (var) == MEM_REF)
              {
		base = TREE_OPERAND (var, 0);
                break;
              }
            else if (TREE_CODE (var) == VIEW_CONVERT_EXPR)
	      {
		var = TREE_OPERAND (var, 0);
		if (CONSTANT_CLASS_P (var)
		    && TREE_CODE (var) != STRING_CST)
		  return;
	      }
            else
              {
                gcc_assert (TREE_CODE (var) == VAR_DECL
                            || TREE_CODE (var) == PARM_DECL
                            || TREE_CODE (var) == RESULT_DECL
                            || TREE_CODE (var) == STRING_CST);
                /* Don't instrument this access if the underlying
                   variable is not "eligible".  This test matches
                   those arrays that have only known-valid indexes,
                   and thus are not labeled TREE_ADDRESSABLE.  */
                if (! mf_decl_eligible_p (var) || component_ref_only)
                  return;
                else
		  {
		    base = build1 (ADDR_EXPR,
				   build_pointer_type (TREE_TYPE (var)), var);
		    break;
		  }
              }
          }

        /* Handle the case of ordinary non-indirection structure
           accesses.  These have only nested COMPONENT_REF nodes (no
           INDIRECT_REF), but pass through the above filter loop.
           Note that it's possible for such a struct variable to match
           the eligible_p test because someone else might take its
           address sometime.  */

        /* We need special processing for bitfield components, because
           their addresses cannot be taken.  */
        if (bitfield_ref_p)
          {
            tree field = TREE_OPERAND (t, 1);

            if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST)
              size = DECL_SIZE_UNIT (field);

	    if (elt)
	      elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)),
			    elt);
            addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base);
            addr = fold_build_pointer_plus_loc (location,
						addr, byte_position (field));
          }
        else
          addr = build1 (ADDR_EXPR, build_pointer_type (type), t);

        limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type,
                             fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
					  fold_convert (mf_uintptr_type, addr),
					  size),
                             integer_one_node);
      }
      break;

    case INDIRECT_REF:
      addr = TREE_OPERAND (t, 0);
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc
	(location, fold_build_pointer_plus_loc (location, base, size), -1);
      break;

    case MEM_REF:
      addr = fold_build_pointer_plus_loc (location, TREE_OPERAND (t, 0),
					  TREE_OPERAND (t, 1));
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc (location,
			   fold_build_pointer_plus_loc (location,
							base, size), -1);
      break;

    case TARGET_MEM_REF:
      addr = tree_mem_ref_addr (ptr_type_node, t);
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc (location,
			   fold_build_pointer_plus_loc (location,
							base, size), -1);
      break;

    case ARRAY_RANGE_REF:
      warning (OPT_Wmudflap,
	       "mudflap checking not yet implemented for ARRAY_RANGE_REF");
      return;

    case BIT_FIELD_REF:
      /* ??? merge with COMPONENT_REF code above? */
      {
        tree ofs, rem, bpu;

        /* If we're not dereferencing something, then the access
           must be ok.  */
        if (TREE_CODE (TREE_OPERAND (t, 0)) != INDIRECT_REF)
          return;

        bpu = bitsize_int (BITS_PER_UNIT);
        ofs = fold_convert (bitsizetype, TREE_OPERAND (t, 2));
        rem = size_binop_loc (location, TRUNC_MOD_EXPR, ofs, bpu);
        ofs = size_binop_loc (location, TRUNC_DIV_EXPR, ofs, bpu);

        size = fold_convert (bitsizetype, TREE_OPERAND (t, 1));
        size = size_binop_loc (location, PLUS_EXPR, size, rem);
        size = size_binop_loc (location, CEIL_DIV_EXPR, size, bpu);
        size = fold_convert (sizetype, size);

        addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
        addr = fold_convert (ptr_type_node, addr);
        addr = fold_build_pointer_plus_loc (location, addr, ofs);

        base = addr;
        limit = fold_build_pointer_plus_hwi_loc (location,
                             fold_build_pointer_plus_loc (location,
							  base, size), -1);
      }
      break;

    default:
      return;
    }

  mf_build_check_statement_for (base, limit, iter, location, dirflag);
}
Пример #4
0
static void
resolve_omp_atomic (gfc_code *code)
{
  gfc_code *atomic_code = code;
  gfc_symbol *var;
  gfc_expr *expr2, *expr2_tmp;

  code = code->block->next;
  gcc_assert (code->op == EXEC_ASSIGN);
  gcc_assert ((atomic_code->ext.omp_atomic != GFC_OMP_ATOMIC_CAPTURE
	       && code->next == NULL)
	      || (atomic_code->ext.omp_atomic == GFC_OMP_ATOMIC_CAPTURE
		  && code->next != NULL
		  && code->next->op == EXEC_ASSIGN
		  && code->next->next == NULL));

  if (code->expr1->expr_type != EXPR_VARIABLE
      || code->expr1->symtree == NULL
      || code->expr1->rank != 0
      || (code->expr1->ts.type != BT_INTEGER
	  && code->expr1->ts.type != BT_REAL
	  && code->expr1->ts.type != BT_COMPLEX
	  && code->expr1->ts.type != BT_LOGICAL))
    {
      gfc_error ("!$OMP ATOMIC statement must set a scalar variable of "
		 "intrinsic type at %L", &code->loc);
      return;
    }

  var = code->expr1->symtree->n.sym;
  expr2 = is_conversion (code->expr2, false);
  if (expr2 == NULL)
    {
      if (atomic_code->ext.omp_atomic == GFC_OMP_ATOMIC_READ
	  || atomic_code->ext.omp_atomic == GFC_OMP_ATOMIC_WRITE)
	expr2 = is_conversion (code->expr2, true);
      if (expr2 == NULL)
	expr2 = code->expr2;
    }

  switch (atomic_code->ext.omp_atomic)
    {
    case GFC_OMP_ATOMIC_READ:
      if (expr2->expr_type != EXPR_VARIABLE
	  || expr2->symtree == NULL
	  || expr2->rank != 0
	  || (expr2->ts.type != BT_INTEGER
	      && expr2->ts.type != BT_REAL
	      && expr2->ts.type != BT_COMPLEX
	      && expr2->ts.type != BT_LOGICAL))
	gfc_error ("!$OMP ATOMIC READ statement must read from a scalar "
		   "variable of intrinsic type at %L", &expr2->where);
      return;
    case GFC_OMP_ATOMIC_WRITE:
      if (expr2->rank != 0 || expr_references_sym (code->expr2, var, NULL))
	gfc_error ("expr in !$OMP ATOMIC WRITE assignment var = expr "
		   "must be scalar and cannot reference var at %L",
		   &expr2->where);
      return;
    case GFC_OMP_ATOMIC_CAPTURE:
      expr2_tmp = expr2;
      if (expr2 == code->expr2)
	{
	  expr2_tmp = is_conversion (code->expr2, true);
	  if (expr2_tmp == NULL)
	    expr2_tmp = expr2;
	}
      if (expr2_tmp->expr_type == EXPR_VARIABLE)
	{
	  if (expr2_tmp->symtree == NULL
	      || expr2_tmp->rank != 0
	      || (expr2_tmp->ts.type != BT_INTEGER
		  && expr2_tmp->ts.type != BT_REAL
		  && expr2_tmp->ts.type != BT_COMPLEX
		  && expr2_tmp->ts.type != BT_LOGICAL)
	      || expr2_tmp->symtree->n.sym == var)
	    {
	      gfc_error ("!$OMP ATOMIC CAPTURE capture statement must read from "
			 "a scalar variable of intrinsic type at %L",
			 &expr2_tmp->where);
	      return;
	    }
	  var = expr2_tmp->symtree->n.sym;
	  code = code->next;
	  if (code->expr1->expr_type != EXPR_VARIABLE
	      || code->expr1->symtree == NULL
	      || code->expr1->rank != 0
	      || (code->expr1->ts.type != BT_INTEGER
		  && code->expr1->ts.type != BT_REAL
		  && code->expr1->ts.type != BT_COMPLEX
		  && code->expr1->ts.type != BT_LOGICAL))
	    {
	      gfc_error ("!$OMP ATOMIC CAPTURE update statement must set "
			 "a scalar variable of intrinsic type at %L",
			 &code->expr1->where);
	      return;
	    }
	  if (code->expr1->symtree->n.sym != var)
	    {
	      gfc_error ("!$OMP ATOMIC CAPTURE capture statement reads from "
			 "different variable than update statement writes "
			 "into at %L", &code->expr1->where);
	      return;
	    }
	  expr2 = is_conversion (code->expr2, false);
	  if (expr2 == NULL)
	    expr2 = code->expr2;
	}
      break;
    default:
      break;
    }

  if (expr2->expr_type == EXPR_OP)
    {
      gfc_expr *v = NULL, *e, *c;
      gfc_intrinsic_op op = expr2->value.op.op;
      gfc_intrinsic_op alt_op = INTRINSIC_NONE;

      switch (op)
	{
	case INTRINSIC_PLUS:
	  alt_op = INTRINSIC_MINUS;
	  break;
	case INTRINSIC_TIMES:
	  alt_op = INTRINSIC_DIVIDE;
	  break;
	case INTRINSIC_MINUS:
	  alt_op = INTRINSIC_PLUS;
	  break;
	case INTRINSIC_DIVIDE:
	  alt_op = INTRINSIC_TIMES;
	  break;
	case INTRINSIC_AND:
	case INTRINSIC_OR:
	  break;
	case INTRINSIC_EQV:
	  alt_op = INTRINSIC_NEQV;
	  break;
	case INTRINSIC_NEQV:
	  alt_op = INTRINSIC_EQV;
	  break;
	default:
	  gfc_error ("!$OMP ATOMIC assignment operator must be "
		     "+, *, -, /, .AND., .OR., .EQV. or .NEQV. at %L",
		     &expr2->where);
	  return;
	}

      /* Check for var = var op expr resp. var = expr op var where
	 expr doesn't reference var and var op expr is mathematically
	 equivalent to var op (expr) resp. expr op var equivalent to
	 (expr) op var.  We rely here on the fact that the matcher
	 for x op1 y op2 z where op1 and op2 have equal precedence
	 returns (x op1 y) op2 z.  */
      e = expr2->value.op.op2;
      if (e->expr_type == EXPR_VARIABLE
	  && e->symtree != NULL
	  && e->symtree->n.sym == var)
	v = e;
      else if ((c = is_conversion (e, true)) != NULL
	       && c->expr_type == EXPR_VARIABLE
	       && c->symtree != NULL
	       && c->symtree->n.sym == var)
	v = c;
      else
	{
	  gfc_expr **p = NULL, **q;
	  for (q = &expr2->value.op.op1; (e = *q) != NULL; )
	    if (e->expr_type == EXPR_VARIABLE
		&& e->symtree != NULL
		&& e->symtree->n.sym == var)
	      {
		v = e;
		break;
	      }
	    else if ((c = is_conversion (e, true)) != NULL)
	      q = &e->value.function.actual->expr;
	    else if (e->expr_type != EXPR_OP
		     || (e->value.op.op != op
			 && e->value.op.op != alt_op)
		     || e->rank != 0)
	      break;
	    else
	      {
		p = q;
		q = &e->value.op.op1;
	      }

	  if (v == NULL)
	    {
	      gfc_error ("!$OMP ATOMIC assignment must be var = var op expr "
			 "or var = expr op var at %L", &expr2->where);
	      return;
	    }

	  if (p != NULL)
	    {
	      e = *p;
	      switch (e->value.op.op)
		{
		case INTRINSIC_MINUS:
		case INTRINSIC_DIVIDE:
		case INTRINSIC_EQV:
		case INTRINSIC_NEQV:
		  gfc_error ("!$OMP ATOMIC var = var op expr not "
			     "mathematically equivalent to var = var op "
			     "(expr) at %L", &expr2->where);
		  break;
		default:
		  break;
		}

	      /* Canonicalize into var = var op (expr).  */
	      *p = e->value.op.op2;
	      e->value.op.op2 = expr2;
	      e->ts = expr2->ts;
	      if (code->expr2 == expr2)
		code->expr2 = expr2 = e;
	      else
		code->expr2->value.function.actual->expr = expr2 = e;

	      if (!gfc_compare_types (&expr2->value.op.op1->ts, &expr2->ts))
		{
		  for (p = &expr2->value.op.op1; *p != v;
		       p = &(*p)->value.function.actual->expr)
		    ;
		  *p = NULL;
		  gfc_free_expr (expr2->value.op.op1);
		  expr2->value.op.op1 = v;
		  gfc_convert_type (v, &expr2->ts, 2);
		}
	    }
	}

      if (e->rank != 0 || expr_references_sym (code->expr2, var, v))
	{
	  gfc_error ("expr in !$OMP ATOMIC assignment var = var op expr "
		     "must be scalar and cannot reference var at %L",
		     &expr2->where);
	  return;
	}
    }
  else if (expr2->expr_type == EXPR_FUNCTION
	   && expr2->value.function.isym != NULL
	   && expr2->value.function.esym == NULL
	   && expr2->value.function.actual != NULL
	   && expr2->value.function.actual->next != NULL)
    {
      gfc_actual_arglist *arg, *var_arg;

      switch (expr2->value.function.isym->id)
	{
	case GFC_ISYM_MIN:
	case GFC_ISYM_MAX:
	  break;
	case GFC_ISYM_IAND:
	case GFC_ISYM_IOR:
	case GFC_ISYM_IEOR:
	  if (expr2->value.function.actual->next->next != NULL)
	    {
	      gfc_error ("!$OMP ATOMIC assignment intrinsic IAND, IOR "
			 "or IEOR must have two arguments at %L",
			 &expr2->where);
	      return;
	    }
	  break;
	default:
	  gfc_error ("!$OMP ATOMIC assignment intrinsic must be "
		     "MIN, MAX, IAND, IOR or IEOR at %L",
		     &expr2->where);
	  return;
	}

      var_arg = NULL;
      for (arg = expr2->value.function.actual; arg; arg = arg->next)
	{
	  if ((arg == expr2->value.function.actual
	       || (var_arg == NULL && arg->next == NULL))
	      && arg->expr->expr_type == EXPR_VARIABLE
	      && arg->expr->symtree != NULL
	      && arg->expr->symtree->n.sym == var)
	    var_arg = arg;
	  else if (expr_references_sym (arg->expr, var, NULL))
	    gfc_error ("!$OMP ATOMIC intrinsic arguments except one must not "
		       "reference '%s' at %L", var->name, &arg->expr->where);
	  if (arg->expr->rank != 0)
	    gfc_error ("!$OMP ATOMIC intrinsic arguments must be scalar "
		       "at %L", &arg->expr->where);
	}

      if (var_arg == NULL)
	{
	  gfc_error ("First or last !$OMP ATOMIC intrinsic argument must "
		     "be '%s' at %L", var->name, &expr2->where);
	  return;
	}

      if (var_arg != expr2->value.function.actual)
	{
	  /* Canonicalize, so that var comes first.  */
	  gcc_assert (var_arg->next == NULL);
	  for (arg = expr2->value.function.actual;
	       arg->next != var_arg; arg = arg->next)
	    ;
	  var_arg->next = expr2->value.function.actual;
	  expr2->value.function.actual = var_arg;
	  arg->next = NULL;
	}
    }
  else
    gfc_error ("!$OMP ATOMIC assignment must have an operator or intrinsic "
	       "on right hand side at %L", &expr2->where);

  if (atomic_code->ext.omp_atomic == GFC_OMP_ATOMIC_CAPTURE && code->next)
    {
      code = code->next;
      if (code->expr1->expr_type != EXPR_VARIABLE
	  || code->expr1->symtree == NULL
	  || code->expr1->rank != 0
	  || (code->expr1->ts.type != BT_INTEGER
	      && code->expr1->ts.type != BT_REAL
	      && code->expr1->ts.type != BT_COMPLEX
	      && code->expr1->ts.type != BT_LOGICAL))
	{
	  gfc_error ("!$OMP ATOMIC CAPTURE capture statement must set "
		     "a scalar variable of intrinsic type at %L",
		     &code->expr1->where);
	  return;
	}

      expr2 = is_conversion (code->expr2, false);
      if (expr2 == NULL)
	{
	  expr2 = is_conversion (code->expr2, true);
	  if (expr2 == NULL)
	    expr2 = code->expr2;
	}

      if (expr2->expr_type != EXPR_VARIABLE
	  || expr2->symtree == NULL
	  || expr2->rank != 0
	  || (expr2->ts.type != BT_INTEGER
	      && expr2->ts.type != BT_REAL
	      && expr2->ts.type != BT_COMPLEX
	      && expr2->ts.type != BT_LOGICAL))
	{
	  gfc_error ("!$OMP ATOMIC CAPTURE capture statement must read "
		     "from a scalar variable of intrinsic type at %L",
		     &expr2->where);
	  return;
	}
      if (expr2->symtree->n.sym != var)
	{
	  gfc_error ("!$OMP ATOMIC CAPTURE capture statement reads from "
		     "different variable than update statement writes "
		     "into at %L", &expr2->where);
	  return;
	}
    }
}
Пример #5
0
static void
resolve_omp_clauses (gfc_code *code)
{
  gfc_omp_clauses *omp_clauses = code->ext.omp_clauses;
  gfc_namelist *n;
  int list;
  static const char *clause_names[]
    = { "PRIVATE", "FIRSTPRIVATE", "LASTPRIVATE", "COPYPRIVATE", "SHARED",
	"COPYIN", "REDUCTION" };

  if (omp_clauses == NULL)
    return;

  if (omp_clauses->if_expr)
    {
      gfc_expr *expr = omp_clauses->if_expr;
      if (gfc_resolve_expr (expr) == FAILURE
	  || expr->ts.type != BT_LOGICAL || expr->rank != 0)
	gfc_error ("IF clause at %L requires a scalar LOGICAL expression",
		   &expr->where);
    }
  if (omp_clauses->final_expr)
    {
      gfc_expr *expr = omp_clauses->final_expr;
      if (gfc_resolve_expr (expr) == FAILURE
	  || expr->ts.type != BT_LOGICAL || expr->rank != 0)
	gfc_error ("FINAL clause at %L requires a scalar LOGICAL expression",
		   &expr->where);
    }
  if (omp_clauses->num_threads)
    {
      gfc_expr *expr = omp_clauses->num_threads;
      if (gfc_resolve_expr (expr) == FAILURE
	  || expr->ts.type != BT_INTEGER || expr->rank != 0)
	gfc_error ("NUM_THREADS clause at %L requires a scalar "
		   "INTEGER expression", &expr->where);
    }
  if (omp_clauses->chunk_size)
    {
      gfc_expr *expr = omp_clauses->chunk_size;
      if (gfc_resolve_expr (expr) == FAILURE
	  || expr->ts.type != BT_INTEGER || expr->rank != 0)
	gfc_error ("SCHEDULE clause's chunk_size at %L requires "
		   "a scalar INTEGER expression", &expr->where);
    }

  /* Check that no symbol appears on multiple clauses, except that
     a symbol can appear on both firstprivate and lastprivate.  */
  for (list = 0; list < OMP_LIST_NUM; list++)
    for (n = omp_clauses->lists[list]; n; n = n->next)
      {
	n->sym->mark = 0;
	if (n->sym->attr.flavor == FL_VARIABLE)
	  continue;
	if (n->sym->attr.flavor == FL_PROCEDURE
	    && n->sym->result == n->sym
	    && n->sym->attr.function)
	  {
	    if (gfc_current_ns->proc_name == n->sym
		|| (gfc_current_ns->parent
		    && gfc_current_ns->parent->proc_name == n->sym))
	      continue;
	    if (gfc_current_ns->proc_name->attr.entry_master)
	      {
		gfc_entry_list *el = gfc_current_ns->entries;
		for (; el; el = el->next)
		  if (el->sym == n->sym)
		    break;
		if (el)
		  continue;
	      }
	    if (gfc_current_ns->parent
		&& gfc_current_ns->parent->proc_name->attr.entry_master)
	      {
		gfc_entry_list *el = gfc_current_ns->parent->entries;
		for (; el; el = el->next)
		  if (el->sym == n->sym)
		    break;
		if (el)
		  continue;
	      }
	    if (n->sym->attr.proc_pointer)
	      continue;
	  }
	gfc_error ("Object '%s' is not a variable at %L", n->sym->name,
		   &code->loc);
      }

  for (list = 0; list < OMP_LIST_NUM; list++)
    if (list != OMP_LIST_FIRSTPRIVATE && list != OMP_LIST_LASTPRIVATE)
      for (n = omp_clauses->lists[list]; n; n = n->next)
	{
	  if (n->sym->mark)
	    gfc_error ("Symbol '%s' present on multiple clauses at %L",
		       n->sym->name, &code->loc);
	  else
	    n->sym->mark = 1;
	}

  gcc_assert (OMP_LIST_LASTPRIVATE == OMP_LIST_FIRSTPRIVATE + 1);
  for (list = OMP_LIST_FIRSTPRIVATE; list <= OMP_LIST_LASTPRIVATE; list++)
    for (n = omp_clauses->lists[list]; n; n = n->next)
      if (n->sym->mark)
	{
	  gfc_error ("Symbol '%s' present on multiple clauses at %L",
		     n->sym->name, &code->loc);
	  n->sym->mark = 0;
	}

  for (n = omp_clauses->lists[OMP_LIST_FIRSTPRIVATE]; n; n = n->next)
    {
      if (n->sym->mark)
	gfc_error ("Symbol '%s' present on multiple clauses at %L",
		   n->sym->name, &code->loc);
      else
	n->sym->mark = 1;
    }
  for (n = omp_clauses->lists[OMP_LIST_LASTPRIVATE]; n; n = n->next)
    n->sym->mark = 0;

  for (n = omp_clauses->lists[OMP_LIST_LASTPRIVATE]; n; n = n->next)
    {
      if (n->sym->mark)
	gfc_error ("Symbol '%s' present on multiple clauses at %L",
		   n->sym->name, &code->loc);
      else
	n->sym->mark = 1;
    }
  for (list = 0; list < OMP_LIST_NUM; list++)
    if ((n = omp_clauses->lists[list]) != NULL)
      {
	const char *name;

	if (list < OMP_LIST_REDUCTION_FIRST)
	  name = clause_names[list];
	else if (list <= OMP_LIST_REDUCTION_LAST)
	  name = clause_names[OMP_LIST_REDUCTION_FIRST];
	else
	  gcc_unreachable ();

	switch (list)
	  {
	  case OMP_LIST_COPYIN:
	    for (; n != NULL; n = n->next)
	      {
		if (!n->sym->attr.threadprivate)
		  gfc_error ("Non-THREADPRIVATE object '%s' in COPYIN clause"
			     " at %L", n->sym->name, &code->loc);
		if (n->sym->ts.type == BT_DERIVED && n->sym->ts.u.derived->attr.alloc_comp)
		  gfc_error ("COPYIN clause object '%s' at %L has ALLOCATABLE components",
			     n->sym->name, &code->loc);
	      }
	    break;
	  case OMP_LIST_COPYPRIVATE:
	    for (; n != NULL; n = n->next)
	      {
		if (n->sym->as && n->sym->as->type == AS_ASSUMED_SIZE)
		  gfc_error ("Assumed size array '%s' in COPYPRIVATE clause "
			     "at %L", n->sym->name, &code->loc);
		if (n->sym->ts.type == BT_DERIVED && n->sym->ts.u.derived->attr.alloc_comp)
		  gfc_error ("COPYPRIVATE clause object '%s' at %L has ALLOCATABLE components",
			     n->sym->name, &code->loc);
	      }
	    break;
	  case OMP_LIST_SHARED:
	    for (; n != NULL; n = n->next)
	      {
		if (n->sym->attr.threadprivate)
		  gfc_error ("THREADPRIVATE object '%s' in SHARED clause at "
			     "%L", n->sym->name, &code->loc);
		if (n->sym->attr.cray_pointee)
		  gfc_error ("Cray pointee '%s' in SHARED clause at %L",
			    n->sym->name, &code->loc);
	      }
	    break;
	  default:
	    for (; n != NULL; n = n->next)
	      {
		if (n->sym->attr.threadprivate)
		  gfc_error ("THREADPRIVATE object '%s' in %s clause at %L",
			     n->sym->name, name, &code->loc);
		if (n->sym->attr.cray_pointee)
		  gfc_error ("Cray pointee '%s' in %s clause at %L",
			    n->sym->name, name, &code->loc);
		if (list != OMP_LIST_PRIVATE)
		  {
		    if (n->sym->attr.pointer
			&& list >= OMP_LIST_REDUCTION_FIRST
			&& list <= OMP_LIST_REDUCTION_LAST)
		      gfc_error ("POINTER object '%s' in %s clause at %L",
				 n->sym->name, name, &code->loc);
		    /* Variables in REDUCTION-clauses must be of intrinsic type (flagged below).  */
		    if ((list < OMP_LIST_REDUCTION_FIRST || list > OMP_LIST_REDUCTION_LAST)
			 && n->sym->ts.type == BT_DERIVED
			 && n->sym->ts.u.derived->attr.alloc_comp)
		      gfc_error ("%s clause object '%s' has ALLOCATABLE components at %L",
				 name, n->sym->name, &code->loc);
		    if (n->sym->attr.cray_pointer
			&& list >= OMP_LIST_REDUCTION_FIRST
			&& list <= OMP_LIST_REDUCTION_LAST)
		      gfc_error ("Cray pointer '%s' in %s clause at %L",
				 n->sym->name, name, &code->loc);
		  }
		if (n->sym->as && n->sym->as->type == AS_ASSUMED_SIZE)
		  gfc_error ("Assumed size array '%s' in %s clause at %L",
			     n->sym->name, name, &code->loc);
		if (n->sym->attr.in_namelist
		    && (list < OMP_LIST_REDUCTION_FIRST
			|| list > OMP_LIST_REDUCTION_LAST))
		  gfc_error ("Variable '%s' in %s clause is used in "
			     "NAMELIST statement at %L",
			     n->sym->name, name, &code->loc);
		switch (list)
		  {
		  case OMP_LIST_PLUS:
		  case OMP_LIST_MULT:
		  case OMP_LIST_SUB:
		    if (!gfc_numeric_ts (&n->sym->ts))
		      gfc_error ("%c REDUCTION variable '%s' at %L must be of numeric type, got %s",
				 list == OMP_LIST_PLUS ? '+'
				 : list == OMP_LIST_MULT ? '*' : '-',
				 n->sym->name, &code->loc,
				 gfc_typename (&n->sym->ts));
		    break;
		  case OMP_LIST_AND:
		  case OMP_LIST_OR:
		  case OMP_LIST_EQV:
		  case OMP_LIST_NEQV:
		    if (n->sym->ts.type != BT_LOGICAL)
		      gfc_error ("%s REDUCTION variable '%s' must be LOGICAL "
				 "at %L",
				 list == OMP_LIST_AND ? ".AND."
				 : list == OMP_LIST_OR ? ".OR."
				 : list == OMP_LIST_EQV ? ".EQV." : ".NEQV.",
				 n->sym->name, &code->loc);
		    break;
		  case OMP_LIST_MAX:
		  case OMP_LIST_MIN:
		    if (n->sym->ts.type != BT_INTEGER
			&& n->sym->ts.type != BT_REAL)
		      gfc_error ("%s REDUCTION variable '%s' must be "
				 "INTEGER or REAL at %L",
				 list == OMP_LIST_MAX ? "MAX" : "MIN",
				 n->sym->name, &code->loc);
		    break;
		  case OMP_LIST_IAND:
		  case OMP_LIST_IOR:
		  case OMP_LIST_IEOR:
		    if (n->sym->ts.type != BT_INTEGER)
		      gfc_error ("%s REDUCTION variable '%s' must be INTEGER "
				 "at %L",
				 list == OMP_LIST_IAND ? "IAND"
				 : list == OMP_LIST_MULT ? "IOR" : "IEOR",
				 n->sym->name, &code->loc);
		    break;
		  /* Workaround for PR middle-end/26316, nothing really needs
		     to be done here for OMP_LIST_PRIVATE.  */
		  case OMP_LIST_PRIVATE:
		    gcc_assert (code->op != EXEC_NOP);
		  default:
		    break;
		  }
	      }
	    break;
	  }
      }
}
Пример #6
0
tree
decl_attributes (tree *node, tree attributes, int flags)
{
  tree a;
  tree returned_attrs = NULL_TREE;

  if (TREE_TYPE (*node) == error_mark_node)
    return NULL_TREE;

  if (!attributes_initialized)
    init_attributes ();

  /* If this is a function and the user used #pragma GCC optimize, add the
     options to the attribute((optimize(...))) list.  */
  if (TREE_CODE (*node) == FUNCTION_DECL && current_optimize_pragma)
    {
      tree cur_attr = lookup_attribute ("optimize", attributes);
      tree opts = copy_list (current_optimize_pragma);

      if (! cur_attr)
	attributes
	  = tree_cons (get_identifier ("optimize"), opts, attributes);
      else
	TREE_VALUE (cur_attr) = chainon (opts, TREE_VALUE (cur_attr));
    }

  if (TREE_CODE (*node) == FUNCTION_DECL
      && optimization_current_node != optimization_default_node
      && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (*node))
    DECL_FUNCTION_SPECIFIC_OPTIMIZATION (*node) = optimization_current_node;

  /* If this is a function and the user used #pragma GCC target, add the
     options to the attribute((target(...))) list.  */
  if (TREE_CODE (*node) == FUNCTION_DECL
      && current_target_pragma
      && targetm.target_option.valid_attribute_p (*node, NULL_TREE,
						  current_target_pragma, 0))
    {
      tree cur_attr = lookup_attribute ("target", attributes);
      tree opts = copy_list (current_target_pragma);

      if (! cur_attr)
	attributes = tree_cons (get_identifier ("target"), opts, attributes);
      else
	TREE_VALUE (cur_attr) = chainon (opts, TREE_VALUE (cur_attr));
    }

  targetm.insert_attributes (*node, &attributes);

  for (a = attributes; a; a = TREE_CHAIN (a))
    {
      tree name = TREE_PURPOSE (a);
      tree args = TREE_VALUE (a);
      tree *anode = node;
      const struct attribute_spec *spec = lookup_attribute_spec (name);
      bool no_add_attrs = 0;
      tree fn_ptr_tmp = NULL_TREE;

      if (spec == NULL)
	{
	  warning (OPT_Wattributes, "%qs attribute directive ignored",
		   IDENTIFIER_POINTER (name));
	  continue;
	}
      else if (list_length (args) < spec->min_length
	       || (spec->max_length >= 0
		   && list_length (args) > spec->max_length))
	{
	  error ("wrong number of arguments specified for %qs attribute",
		 IDENTIFIER_POINTER (name));
	  continue;
	}
      gcc_assert (is_attribute_p (spec->name, name));

      /* If this is a lock attribute and the purpose field of the args is
         an error_mark_node, the attribute arguments have not been parsed yet
         (as we delay the parsing of the attribute arguments until after the
         whole class has been parsed). So don't handle this attribute now
         but simply replace the error_mark_node with the current decl node
         (which we will need when we call this routine again later).  */
      if (args
          && TREE_PURPOSE (args) == error_mark_node
          && is_lock_attribute_with_args (name))
        {
          TREE_PURPOSE (args) = *node;
          continue;
        }

      if (spec->decl_required && !DECL_P (*anode))
	{
	  if (flags & ((int) ATTR_FLAG_DECL_NEXT
		       | (int) ATTR_FLAG_FUNCTION_NEXT
		       | (int) ATTR_FLAG_ARRAY_NEXT))
	    {
	      /* Pass on this attribute to be tried again.  */
	      returned_attrs = tree_cons (name, args, returned_attrs);
	      continue;
	    }
	  else
	    {
	      warning (OPT_Wattributes, "%qs attribute does not apply to types",
		       IDENTIFIER_POINTER (name));
	      continue;
	    }
	}

      /* If we require a type, but were passed a decl, set up to make a
	 new type and update the one in the decl.  ATTR_FLAG_TYPE_IN_PLACE
	 would have applied if we'd been passed a type, but we cannot modify
	 the decl's type in place here.  */
      if (spec->type_required && DECL_P (*anode))
	{
	  anode = &TREE_TYPE (*anode);
	  /* Allow ATTR_FLAG_TYPE_IN_PLACE for the type's naming decl.  */
	  if (!(TREE_CODE (*anode) == TYPE_DECL
		&& *anode == TYPE_NAME (TYPE_MAIN_VARIANT
					(TREE_TYPE (*anode)))))
	    flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE;
	}

      if (spec->function_type_required && TREE_CODE (*anode) != FUNCTION_TYPE
	  && TREE_CODE (*anode) != METHOD_TYPE)
	{
	  if (TREE_CODE (*anode) == POINTER_TYPE
	      && (TREE_CODE (TREE_TYPE (*anode)) == FUNCTION_TYPE
		  || TREE_CODE (TREE_TYPE (*anode)) == METHOD_TYPE))
	    {
	      /* OK, this is a bit convoluted.  We can't just make a copy
		 of the pointer type and modify its TREE_TYPE, because if
		 we change the attributes of the target type the pointer
		 type needs to have a different TYPE_MAIN_VARIANT.  So we
		 pull out the target type now, frob it as appropriate, and
		 rebuild the pointer type later.

		 This would all be simpler if attributes were part of the
		 declarator, grumble grumble.  */
	      fn_ptr_tmp = TREE_TYPE (*anode);
	      anode = &fn_ptr_tmp;
	      flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE;
	    }
	  else if (flags & (int) ATTR_FLAG_FUNCTION_NEXT)
	    {
	      /* Pass on this attribute to be tried again.  */
	      returned_attrs = tree_cons (name, args, returned_attrs);
	      continue;
	    }

	  if (TREE_CODE (*anode) != FUNCTION_TYPE
	      && TREE_CODE (*anode) != METHOD_TYPE)
	    {
	      warning (OPT_Wattributes,
		       "%qs attribute only applies to function types",
		       IDENTIFIER_POINTER (name));
	      continue;
	    }
	}

      if (TYPE_P (*anode)
	  && (flags & (int) ATTR_FLAG_TYPE_IN_PLACE)
	  && TYPE_SIZE (*anode) != NULL_TREE)
	{
	  warning (OPT_Wattributes, "type attributes ignored after type is already defined");
	  continue;
	}

      if (spec->handler != NULL)
        {
          tree ret_attr = (*spec->handler) (anode, name, args,
                                            flags, &no_add_attrs);
          if (ret_attr)
            {
              /* For the lock attributes whose arguments (i.e. locks) are not
                 supported or the names are not in scope, we would demote the
                 attributes. For example, if 'foo' is not in scope in the
                 attribute "guarded_by(foo->lock), the attribute would be
                 downgraded to a "guarded" attribute. And in this case, the
                 handler would return the new, demoted attribute which is
                 appended to the current one so that it is handled in the next
                 iteration.  */
              if (is_lock_attribute_with_args (name))
                {
                  gcc_assert (no_add_attrs);
                  TREE_CHAIN (ret_attr) = TREE_CHAIN (a);
                  TREE_CHAIN (a) = ret_attr;
                  continue;
                }
              else
                returned_attrs = chainon (ret_attr, returned_attrs);
            }
        }

      /* Layout the decl in case anything changed.  */
      if (spec->type_required && DECL_P (*node)
	  && (TREE_CODE (*node) == VAR_DECL
	      || TREE_CODE (*node) == PARM_DECL
	      || TREE_CODE (*node) == RESULT_DECL))
	relayout_decl (*node);

      if (!no_add_attrs)
	{
	  tree old_attrs;
	  tree a;

	  if (DECL_P (*anode))
	    old_attrs = DECL_ATTRIBUTES (*anode);
	  else
	    old_attrs = TYPE_ATTRIBUTES (*anode);

	  for (a = lookup_attribute (spec->name, old_attrs);
	       a != NULL_TREE;
	       a = lookup_attribute (spec->name, TREE_CHAIN (a)))
	    {
	      if (simple_cst_equal (TREE_VALUE (a), args) == 1)
		break;
              /* If a lock attribute of the same kind is already on the decl,
                 don't add this one again. Instead, merge the arguments.  */
              if (is_lock_attribute_with_args (name))
                {
                  merge_lock_attr_args (a, args);
                  break;
                }
	    }

	  if (a == NULL_TREE)
	    {
	      /* This attribute isn't already in the list.  */
	      if (DECL_P (*anode))
		DECL_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs);
	      else if (flags & (int) ATTR_FLAG_TYPE_IN_PLACE)
		{
		  TYPE_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs);
		  /* If this is the main variant, also push the attributes
		     out to the other variants.  */
		  if (*anode == TYPE_MAIN_VARIANT (*anode))
		    {
		      tree variant;
		      for (variant = *anode; variant;
			   variant = TYPE_NEXT_VARIANT (variant))
			{
			  if (TYPE_ATTRIBUTES (variant) == old_attrs)
			    TYPE_ATTRIBUTES (variant)
			      = TYPE_ATTRIBUTES (*anode);
			  else if (!lookup_attribute
				   (spec->name, TYPE_ATTRIBUTES (variant)))
			    TYPE_ATTRIBUTES (variant) = tree_cons
			      (name, args, TYPE_ATTRIBUTES (variant));
			}
		    }
		}
	      else
		*anode = build_type_attribute_variant (*anode,
						       tree_cons (name, args,
								  old_attrs));
	    }
	}

      if (fn_ptr_tmp)
	{
	  /* Rebuild the function pointer type and put it in the
	     appropriate place.  */
	  fn_ptr_tmp = build_pointer_type (fn_ptr_tmp);
	  if (DECL_P (*node))
	    TREE_TYPE (*node) = fn_ptr_tmp;
	  else
	    {
	      gcc_assert (TREE_CODE (*node) == POINTER_TYPE);
	      *node = fn_ptr_tmp;
	    }
	}
    }

  return returned_attrs;
}
Пример #7
0
static void
lto_varpool_replace_node (varpool_node *vnode,
			  varpool_node *prevailing_node)
{
  gcc_assert (!vnode->definition || prevailing_node->definition);
  gcc_assert (!vnode->analyzed || prevailing_node->analyzed);

  prevailing_node->clone_referring (vnode);
  if (vnode->force_output)
    prevailing_node->force_output = true;
  if (vnode->forced_by_abi)
    prevailing_node->forced_by_abi = true;

  /* Be sure we can garbage collect the initializer.  */
  if (DECL_INITIAL (vnode->decl)
      && vnode->decl != prevailing_node->decl)
    DECL_INITIAL (vnode->decl) = error_mark_node;

  /* Check and report ODR violations on virtual tables.  */
  if (DECL_VIRTUAL_P (vnode->decl) || DECL_VIRTUAL_P (prevailing_node->decl))
    compare_virtual_tables (prevailing_node, vnode);

  if (vnode->tls_model != prevailing_node->tls_model)
    {
      bool error = false;

      /* Non-TLS and TLS never mix together.  Also emulated model is not
	 compatible with anything else.  */
      if (prevailing_node->tls_model == TLS_MODEL_NONE
	  || prevailing_node->tls_model == TLS_MODEL_EMULATED
	  || vnode->tls_model == TLS_MODEL_NONE
	  || vnode->tls_model == TLS_MODEL_EMULATED)
	error = true;
      /* Linked is silently supporting transitions
	 GD -> IE, GD -> LE, LD -> LE, IE -> LE, LD -> IE.
	 Do the same transitions and error out on others.  */
      else if ((prevailing_node->tls_model == TLS_MODEL_REAL
		|| prevailing_node->tls_model == TLS_MODEL_LOCAL_DYNAMIC)
	       && (vnode->tls_model == TLS_MODEL_INITIAL_EXEC
		   || vnode->tls_model == TLS_MODEL_LOCAL_EXEC))
	prevailing_node->tls_model = vnode->tls_model;
      else if ((vnode->tls_model == TLS_MODEL_REAL
		|| vnode->tls_model == TLS_MODEL_LOCAL_DYNAMIC)
	       && (prevailing_node->tls_model == TLS_MODEL_INITIAL_EXEC
		   || prevailing_node->tls_model == TLS_MODEL_LOCAL_EXEC))
	;
      else if (prevailing_node->tls_model == TLS_MODEL_INITIAL_EXEC
	       && vnode->tls_model == TLS_MODEL_LOCAL_EXEC)
	prevailing_node->tls_model = vnode->tls_model;
      else if (vnode->tls_model == TLS_MODEL_INITIAL_EXEC
	       && prevailing_node->tls_model == TLS_MODEL_LOCAL_EXEC)
	;
      else
	error = true;
      if (error)
	{
	  error_at (DECL_SOURCE_LOCATION (vnode->decl),
		    "%qD is defined with tls model %s", vnode->decl, tls_model_names [vnode->tls_model]);
	  inform (DECL_SOURCE_LOCATION (prevailing_node->decl),
		  "previously defined here as %s",
		  tls_model_names [prevailing_node->tls_model]);
	}
    }
  /* Finally remove the replaced node.  */
  vnode->remove ();
}
Пример #8
0
rtx
fr30_move_double (rtx * operands)
{
  rtx src  = operands[1];
  rtx dest = operands[0];
  enum rtx_code src_code = GET_CODE (src);
  enum rtx_code dest_code = GET_CODE (dest);
  machine_mode mode = GET_MODE (dest);
  rtx val;

  start_sequence ();

  if (dest_code == REG)
    {
      if (src_code == REG)
	{
	  int reverse = (REGNO (dest) == REGNO (src) + 1);
	  
	  /* We normally copy the low-numbered register first.  However, if
	     the first register of operand 0 is the same as the second register
	     of operand 1, we must copy in the opposite order.  */
	  emit_insn (gen_rtx_SET (operand_subword (dest, reverse, TRUE, mode),
				  operand_subword (src,  reverse, TRUE, mode)));
	  
	  emit_insn
	    (gen_rtx_SET (operand_subword (dest, !reverse, TRUE, mode),
			  operand_subword (src,  !reverse, TRUE, mode)));
	}
      else if (src_code == MEM)
	{
	  rtx addr = XEXP (src, 0);
	  rtx dest0 = operand_subword (dest, 0, TRUE, mode);
	  rtx dest1 = operand_subword (dest, 1, TRUE, mode);
	  rtx new_mem;
	  
	  gcc_assert (GET_CODE (addr) == REG);
	  
	  /* Copy the address before clobbering it.  See PR 34174.  */
	  emit_insn (gen_rtx_SET (dest1, addr));
	  emit_insn (gen_rtx_SET (dest0, adjust_address (src, SImode, 0)));
	  emit_insn (gen_rtx_SET (dest1, plus_constant (SImode, dest1,
							UNITS_PER_WORD)));

	  new_mem = gen_rtx_MEM (SImode, dest1);
	  MEM_COPY_ATTRIBUTES (new_mem, src);
	      
	  emit_insn (gen_rtx_SET (dest1, new_mem));
	}
      else if (src_code == CONST_INT || src_code == CONST_DOUBLE)
	{
	  rtx words[2];
	  split_double (src, &words[0], &words[1]);
	  emit_insn (gen_rtx_SET (operand_subword (dest, 0, TRUE, mode),
				  words[0]));
      
	  emit_insn (gen_rtx_SET (operand_subword (dest, 1, TRUE, mode),
				  words[1]));
	}
    }
  else if (src_code == REG && dest_code == MEM)
    {
      rtx addr = XEXP (dest, 0);
      rtx src0;
      rtx src1;

      gcc_assert (GET_CODE (addr) == REG);

      src0 = operand_subword (src, 0, TRUE, mode);
      src1 = operand_subword (src, 1, TRUE, mode);

      emit_move_insn (adjust_address (dest, SImode, 0), src0);

      if (REGNO (addr) == STACK_POINTER_REGNUM
	  || REGNO (addr) == FRAME_POINTER_REGNUM)
	emit_insn (gen_rtx_SET (adjust_address (dest, SImode, UNITS_PER_WORD),
				src1));
      else
	{
	  rtx new_mem;
	  rtx scratch_reg_r0 = gen_rtx_REG (SImode, 0);

	  /* We need a scratch register to hold the value of 'address + 4'.
	     We use r0 for this purpose. It is used for example for long
	     jumps and is already marked to not be used by normal register
	     allocation.  */
	  emit_insn (gen_movsi_internal (scratch_reg_r0, addr));
	  emit_insn (gen_addsi_small_int (scratch_reg_r0, scratch_reg_r0,
					  GEN_INT (UNITS_PER_WORD)));
	  new_mem = gen_rtx_MEM (SImode, scratch_reg_r0);
	  MEM_COPY_ATTRIBUTES (new_mem, dest);
	  emit_move_insn (new_mem, src1);
	  emit_insn (gen_blockage ());
	}
    }
  else
    /* This should have been prevented by the constraints on movdi_insn.  */
    gcc_unreachable ();

  val = get_insns ();
  end_sequence ();

  return val;
}
Пример #9
0
static basic_block
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
{
  edge eg;
  edge_iterator ei;
  basic_block pre_exit;

  /* The only non-call predecessor at this stage is a block with a
     fallthrough edge; there can be at most one, but there could be
     none at all, e.g. when exit is called.  */
  pre_exit = 0;
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
    if (eg->flags & EDGE_FALLTHRU)
      {
	basic_block src_bb = eg->src;
	rtx_insn *last_insn;
	rtx ret_reg;

	gcc_assert (!pre_exit);
	/* If this function returns a value at the end, we have to
	   insert the final mode switch before the return value copy
	   to its hard register.  */
	if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
	    && GET_CODE (PATTERN (last_insn)) == USE
	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
	  {
	    int ret_start = REGNO (ret_reg);
	    int nregs = REG_NREGS (ret_reg);
	    int ret_end = ret_start + nregs;
	    bool short_block = false;
	    bool multi_reg_return = false;
	    bool forced_late_switch = false;
	    rtx_insn *before_return_copy;

	    do
	      {
		rtx_insn *return_copy = PREV_INSN (last_insn);
		rtx return_copy_pat, copy_reg;
		int copy_start, copy_num;
		int j;

		if (NONDEBUG_INSN_P (return_copy))
		  {
		    /* When using SJLJ exceptions, the call to the
		       unregister function is inserted between the
		       clobber of the return value and the copy.
		       We do not want to split the block before this
		       or any other call; if we have not found the
		       copy yet, the copy must have been deleted.  */
		    if (CALL_P (return_copy))
		      {
			short_block = true;
			break;
		      }
		    return_copy_pat = PATTERN (return_copy);
		    switch (GET_CODE (return_copy_pat))
		      {
		      case USE:
			/* Skip USEs of multiple return registers.
			   __builtin_apply pattern is also handled here.  */
			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
			    && (targetm.calls.function_value_regno_p
				(REGNO (XEXP (return_copy_pat, 0)))))
			  {
			    multi_reg_return = true;
			    last_insn = return_copy;
			    continue;
			  }
			break;

		      case ASM_OPERANDS:
			/* Skip barrier insns.  */
			if (!MEM_VOLATILE_P (return_copy_pat))
			  break;

			/* Fall through.  */

		      case ASM_INPUT:
		      case UNSPEC_VOLATILE:
			last_insn = return_copy;
			continue;

		      default:
			break;
		      }

		    /* If the return register is not (in its entirety)
		       likely spilled, the return copy might be
		       partially or completely optimized away.  */
		    return_copy_pat = single_set (return_copy);
		    if (!return_copy_pat)
		      {
			return_copy_pat = PATTERN (return_copy);
			if (GET_CODE (return_copy_pat) != CLOBBER)
			  break;
			else if (!optimize)
			  {
			    /* This might be (clobber (reg [<result>]))
			       when not optimizing.  Then check if
			       the previous insn is the clobber for
			       the return register.  */
			    copy_reg = SET_DEST (return_copy_pat);
			    if (GET_CODE (copy_reg) == REG
				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
			      {
				if (INSN_P (PREV_INSN (return_copy)))
				  {
				    return_copy = PREV_INSN (return_copy);
				    return_copy_pat = PATTERN (return_copy);
				    if (GET_CODE (return_copy_pat) != CLOBBER)
				      break;
				  }
			      }
			  }
		      }
		    copy_reg = SET_DEST (return_copy_pat);
		    if (GET_CODE (copy_reg) == REG)
		      copy_start = REGNO (copy_reg);
		    else if (GET_CODE (copy_reg) == SUBREG
			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
		      copy_start = REGNO (SUBREG_REG (copy_reg));
		    else
		      {
			/* When control reaches end of non-void function,
			   there are no return copy insns at all.  This
			   avoids an ice on that invalid function.  */
			if (ret_start + nregs == ret_end)
			  short_block = true;
			break;
		      }
		    if (!targetm.calls.function_value_regno_p (copy_start))
		      copy_num = 0;
		    else
		      copy_num
			= hard_regno_nregs[copy_start][GET_MODE (copy_reg)];

		    /* If the return register is not likely spilled, - as is
		       the case for floating point on SH4 - then it might
		       be set by an arithmetic operation that needs a
		       different mode than the exit block.  */
		    for (j = n_entities - 1; j >= 0; j--)
		      {
			int e = entity_map[j];
			int mode =
			  targetm.mode_switching.needed (e, return_copy);

			if (mode != num_modes[e]
			    && mode != targetm.mode_switching.exit (e))
			  break;
		      }
		    if (j >= 0)
		      {
			/* __builtin_return emits a sequence of loads to all
			   return registers.  One of them might require
			   another mode than MODE_EXIT, even if it is
			   unrelated to the return value, so we want to put
			   the final mode switch after it.  */
			if (multi_reg_return
			    && targetm.calls.function_value_regno_p
			        (copy_start))
			  forced_late_switch = true;

			/* For the SH4, floating point loads depend on fpscr,
			   thus we might need to put the final mode switch
			   after the return value copy.  That is still OK,
			   because a floating point return value does not
			   conflict with address reloads.  */
			if (copy_start >= ret_start
			    && copy_start + copy_num <= ret_end
			    && OBJECT_P (SET_SRC (return_copy_pat)))
			  forced_late_switch = true;
			break;
		      }
		    if (copy_num == 0)
		      {
			last_insn = return_copy;
			continue;
		      }

		    if (copy_start >= ret_start
			&& copy_start + copy_num <= ret_end)
		      nregs -= copy_num;
		    else if (!multi_reg_return
			     || !targetm.calls.function_value_regno_p
				 (copy_start))
		      break;
		    last_insn = return_copy;
		  }
		/* ??? Exception handling can lead to the return value
		   copy being already separated from the return value use,
		   as in  unwind-dw2.c .
		   Similarly, conditionally returning without a value,
		   and conditionally using builtin_return can lead to an
		   isolated use.  */
		if (return_copy == BB_HEAD (src_bb))
		  {
		    short_block = true;
		    break;
		  }
		last_insn = return_copy;
	      }
	    while (nregs);

	    /* If we didn't see a full return value copy, verify that there
	       is a plausible reason for this.  If some, but not all of the
	       return register is likely spilled, we can expect that there
	       is a copy for the likely spilled part.  */
	    gcc_assert (!nregs
			|| forced_late_switch
			|| short_block
			|| !(targetm.class_likely_spilled_p
			     (REGNO_REG_CLASS (ret_start)))
			|| (nregs
			    != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
			/* For multi-hard-register floating point
		   	   values, sometimes the likely-spilled part
		   	   is ordinarily copied first, then the other
		   	   part is set with an arithmetic operation.
		   	   This doesn't actually cause reload
		   	   failures, so let it pass.  */
			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
			    && nregs != 1));

	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
	      {
		before_return_copy
		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
		/* Instructions preceding LAST_INSN in the same block might
		   require a different mode than MODE_EXIT, so if we might
		   have such instructions, keep them in a separate block
		   from pre_exit.  */
		src_bb = split_block (src_bb,
				      PREV_INSN (before_return_copy))->dest;
	      }
	    else
	      before_return_copy = last_insn;
	    pre_exit = split_block (src_bb, before_return_copy)->src;
	  }
	else
	  {
	    pre_exit = split_edge (eg);
	  }
      }

  return pre_exit;
}
Пример #10
0
void
fr30_expand_prologue (void)
{
  int regno;
  rtx insn;

  if (! current_frame_info.initialised)
    fr30_compute_frame_size (0, 0);

  /* This cases shouldn't happen.  Catch it now.  */
  gcc_assert (current_frame_info.total_size || !current_frame_info.gmask);

  /* Allocate space for register arguments if this is a variadic function.  */
  if (current_frame_info.pretend_size)
    {
      int regs_to_save = current_frame_info.pretend_size / UNITS_PER_WORD;
      
      /* Push argument registers into the pretend arg area.  */
      for (regno = FIRST_ARG_REGNUM + FR30_NUM_ARG_REGS; regno --, regs_to_save --;)
        {
	  insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, regno)));
	  RTX_FRAME_RELATED_P (insn) = 1;
	}
    }

  if (current_frame_info.gmask)
    {
      /* Save any needed call-saved regs.  */
      for (regno = STACK_POINTER_REGNUM; regno--;)
	{
	  if ((current_frame_info.gmask & (1 << regno)) != 0)
	    {
	      insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, regno)));
	      RTX_FRAME_RELATED_P (insn) = 1;
	    }
	}
    }

  /* Save return address if necessary.  */
  if (current_frame_info.save_rp)
    {
      insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, 
      						     RETURN_POINTER_REGNUM)));
      RTX_FRAME_RELATED_P (insn) = 1;
    }

  /* Save old frame pointer and create new one, if necessary.  */
  if (current_frame_info.save_fp)
    {
      if (current_frame_info.frame_size < ((1 << 10) - UNITS_PER_WORD))
        {
	  int enter_size = current_frame_info.frame_size + UNITS_PER_WORD;
	  rtx pattern;
	  
	  insn = emit_insn (gen_enter_func (GEN_INT (enter_size)));
          RTX_FRAME_RELATED_P (insn) = 1;
	  
	  pattern = PATTERN (insn);
	  
	  /* Also mark all 3 subexpressions as RTX_FRAME_RELATED_P. */
          if (GET_CODE (pattern) == PARALLEL)
            {
              int x;
              for (x = XVECLEN (pattern, 0); x--;)
		{
		  rtx part = XVECEXP (pattern, 0, x);
		  
		  /* One of the insns in the ENTER pattern updates the
		     frame pointer.  If we do not actually need the frame
		     pointer in this function then this is a side effect
		     rather than a desired effect, so we do not mark that
		     insn as being related to the frame set up.  Doing this
		     allows us to compile the crash66.C test file in the
		     G++ testsuite.  */
		  if (! frame_pointer_needed
		      && GET_CODE (part) == SET
		      && SET_DEST (part) == hard_frame_pointer_rtx)
		    RTX_FRAME_RELATED_P (part) = 0;
		  else
		    RTX_FRAME_RELATED_P (part) = 1;
		}
            }
	}
      else
	{
	  insn = emit_insn (gen_movsi_push (frame_pointer_rtx));
          RTX_FRAME_RELATED_P (insn) = 1;

	  if (frame_pointer_needed)
	    {
	      insn = emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
	      RTX_FRAME_RELATED_P (insn) = 1;
	    }
	}
    }

  /* Allocate the stack frame.  */
  if (current_frame_info.frame_size == 0)
    ; /* Nothing to do.  */
  else if (current_frame_info.save_fp
	   && current_frame_info.frame_size < ((1 << 10) - UNITS_PER_WORD))
    ; /* Nothing to do.  */
  else if (current_frame_info.frame_size <= 512)
    {
      insn = emit_insn (gen_add_to_stack
			 (GEN_INT (- (signed) current_frame_info.frame_size)));
      RTX_FRAME_RELATED_P (insn) = 1;
    }
  else
    {
      rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
      insn = emit_insn (gen_movsi (tmp, GEN_INT (current_frame_info.frame_size)));
      RTX_FRAME_RELATED_P (insn) = 1;
      insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
      RTX_FRAME_RELATED_P (insn) = 1;
    }

  if (crtl->profile)
    emit_insn (gen_blockage ());
}
Пример #11
0
void
fr30_print_operand (FILE *file, rtx x, int code)
{
  rtx x0;
  
  switch (code)
    {
    case '#':
      /* Output a :D if this instruction is delayed.  */
      if (dbr_sequence_length () != 0)
	fputs (":D", file);
      return;
      
    case 'p':
      /* Compute the register name of the second register in a hi/lo
	 register pair.  */
      if (GET_CODE (x) != REG)
	output_operand_lossage ("fr30_print_operand: unrecognized %%p code");
      else
	fprintf (file, "r%d", REGNO (x) + 1);
      return;
      
    case 'b':
      /* Convert GCC's comparison operators into FR30 comparison codes.  */
      switch (GET_CODE (x))
	{
	case EQ:  fprintf (file, "eq"); break;
	case NE:  fprintf (file, "ne"); break;
	case LT:  fprintf (file, "lt"); break;
	case LE:  fprintf (file, "le"); break;
	case GT:  fprintf (file, "gt"); break;
	case GE:  fprintf (file, "ge"); break;
	case LTU: fprintf (file, "c"); break;
	case LEU: fprintf (file, "ls"); break;
	case GTU: fprintf (file, "hi"); break;
	case GEU: fprintf (file, "nc");  break;
	default:
	  output_operand_lossage ("fr30_print_operand: unrecognized %%b code");
	  break;
	}
      return;
      
    case 'B':
      /* Convert GCC's comparison operators into the complimentary FR30
	 comparison codes.  */
      switch (GET_CODE (x))
	{
	case EQ:  fprintf (file, "ne"); break;
	case NE:  fprintf (file, "eq"); break;
	case LT:  fprintf (file, "ge"); break;
	case LE:  fprintf (file, "gt"); break;
	case GT:  fprintf (file, "le"); break;
	case GE:  fprintf (file, "lt"); break;
	case LTU: fprintf (file, "nc"); break;
	case LEU: fprintf (file, "hi"); break;
	case GTU: fprintf (file, "ls"); break;
	case GEU: fprintf (file, "c"); break;
	default:
	  output_operand_lossage ("fr30_print_operand: unrecognized %%B code");
	  break;
	}
      return;

    case 'A':
      /* Print a signed byte value as an unsigned value.  */
      if (GET_CODE (x) != CONST_INT)
	output_operand_lossage ("fr30_print_operand: invalid operand to %%A code");
      else
	{
	  HOST_WIDE_INT val;
	  
	  val = INTVAL (x);

	  val &= 0xff;

	  fprintf (file, HOST_WIDE_INT_PRINT_DEC, val);
	}
      return;
      
    case 'x':
      if (GET_CODE (x) != CONST_INT
	  || INTVAL (x) < 16
	  || INTVAL (x) > 32)
	output_operand_lossage ("fr30_print_operand: invalid %%x code");
      else
	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) - 16);
      return;

    case 'F':
      if (GET_CODE (x) != CONST_DOUBLE)
	output_operand_lossage ("fr30_print_operand: invalid %%F code");
      else
	{
	  char str[30];

	  real_to_decimal (str, CONST_DOUBLE_REAL_VALUE (x),
			   sizeof (str), 0, 1);
	  fputs (str, file);
	}
      return;
      
    case 0:
      /* Handled below.  */
      break;
      
    default:
      fprintf (stderr, "unknown code = %x\n", code);
      output_operand_lossage ("fr30_print_operand: unknown code");
      return;
    }

  switch (GET_CODE (x))
    {
    case REG:
      fputs (reg_names [REGNO (x)], file);
      break;

    case MEM:
      x0 = XEXP (x,0);
      
      switch (GET_CODE (x0))
	{
	case REG:
	  gcc_assert ((unsigned) REGNO (x0) < ARRAY_SIZE (reg_names));
	  fprintf (file, "@%s", reg_names [REGNO (x0)]);
	  break;

	case PLUS:
	  if (GET_CODE (XEXP (x0, 0)) != REG
	      || REGNO (XEXP (x0, 0)) < FRAME_POINTER_REGNUM
	      || REGNO (XEXP (x0, 0)) > STACK_POINTER_REGNUM
	      || GET_CODE (XEXP (x0, 1)) != CONST_INT)
	    {
	      fprintf (stderr, "bad INDEXed address:");
	      debug_rtx (x);
	      output_operand_lossage ("fr30_print_operand: unhandled MEM");
	    }
	  else if (REGNO (XEXP (x0, 0)) == FRAME_POINTER_REGNUM)
	    {
	      HOST_WIDE_INT val = INTVAL (XEXP (x0, 1));
	      if (val < -(1 << 9) || val > ((1 << 9) - 4))
		{
		  fprintf (stderr, "frame INDEX out of range:");
		  debug_rtx (x);
		  output_operand_lossage ("fr30_print_operand: unhandled MEM");
		}
	      fprintf (file, "@(r14, #" HOST_WIDE_INT_PRINT_DEC ")", val);
	    }
	  else
	    {
	      HOST_WIDE_INT val = INTVAL (XEXP (x0, 1));
	      if (val < 0 || val > ((1 << 6) - 4))
		{
		  fprintf (stderr, "stack INDEX out of range:");
		  debug_rtx (x);
		  output_operand_lossage ("fr30_print_operand: unhandled MEM");
		}
	      fprintf (file, "@(r15, #" HOST_WIDE_INT_PRINT_DEC ")", val);
	    }
	  break;
	  
	case SYMBOL_REF:
	  output_address (VOIDmode, x0);
	  break;
	  
	default:
	  fprintf (stderr, "bad MEM code = %x\n", GET_CODE (x0));
	  debug_rtx (x);
	  output_operand_lossage ("fr30_print_operand: unhandled MEM");
	  break;
	}
      break;
      
    case CONST_DOUBLE :
      /* We handle SFmode constants here as output_addr_const doesn't.  */
      if (GET_MODE (x) == SFmode)
	{
	  long l;

	  REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
	  fprintf (file, "0x%08lx", l);
	  break;
	}

      /* Fall through.  Let output_addr_const deal with it.  */
    default:
      output_addr_const (file, x);
      break;
    }

  return;
}
Пример #12
0
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
			  tree exp, builtin_simd_arg *args,
			  enum machine_mode builtin_mode)
{
  rtx pat;
  rtx op[SIMD_MAX_BUILTIN_ARGS + 1]; /* First element for result operand.  */
  int opc = 0;

  if (have_retval)
    {
      machine_mode tmode = insn_data[icode].operand[0].mode;
      if (!target
	  || GET_MODE (target) != tmode
	  || !(*insn_data[icode].operand[0].predicate) (target, tmode))
	target = gen_reg_rtx (tmode);
      op[opc++] = target;
    }

  for (;;)
    {
      builtin_simd_arg thisarg = args[opc - have_retval];

      if (thisarg == SIMD_ARG_STOP)
	break;
      else
	{
	  tree arg = CALL_EXPR_ARG (exp, opc - have_retval);
	  enum machine_mode mode = insn_data[icode].operand[opc].mode;
	  op[opc] = expand_normal (arg);

	  switch (thisarg)
	    {
	    case SIMD_ARG_COPY_TO_REG:
	      if (POINTER_TYPE_P (TREE_TYPE (arg)))
		op[opc] = convert_memory_address (Pmode, op[opc]);
	      /*gcc_assert (GET_MODE (op[opc]) == mode); */
	      if (!(*insn_data[icode].operand[opc].predicate)
		  (op[opc], mode))
		op[opc] = copy_to_mode_reg (mode, op[opc]);
	      break;

	    case SIMD_ARG_STRUCT_LOAD_STORE_LANE_INDEX:
	      gcc_assert (opc > 1);
	      if (CONST_INT_P (op[opc]))
		{
		  aarch64_simd_lane_bounds (op[opc], 0,
					    GET_MODE_NUNITS (builtin_mode),
					    exp);
		  /* Keep to GCC-vector-extension lane indices in the RTL.  */
		  op[opc] =
		    GEN_INT (ENDIAN_LANE_N (builtin_mode, INTVAL (op[opc])));
		}
	      goto constant_arg;

	    case SIMD_ARG_LANE_INDEX:
	      /* Must be a previous operand into which this is an index.  */
	      gcc_assert (opc > 0);
	      if (CONST_INT_P (op[opc]))
		{
		  machine_mode vmode = insn_data[icode].operand[opc - 1].mode;
		  aarch64_simd_lane_bounds (op[opc],
					    0, GET_MODE_NUNITS (vmode), exp);
		  /* Keep to GCC-vector-extension lane indices in the RTL.  */
		  op[opc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[opc])));
		}
	      /* Fall through - if the lane index isn't a constant then
		 the next case will error.  */
	    case SIMD_ARG_CONSTANT:
constant_arg:
	      if (!(*insn_data[icode].operand[opc].predicate)
		  (op[opc], mode))
	      {
		error ("%Kargument %d must be a constant immediate",
		       exp, opc + 1 - have_retval);
		return const0_rtx;
	      }
	      break;

	    case SIMD_ARG_STOP:
	      gcc_unreachable ();
	    }

	  opc++;
	}
    }

  switch (opc)
    {
    case 1:
      pat = GEN_FCN (icode) (op[0]);
      break;

    case 2:
      pat = GEN_FCN (icode) (op[0], op[1]);
      break;

    case 3:
      pat = GEN_FCN (icode) (op[0], op[1], op[2]);
      break;

    case 4:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
      break;

    case 5:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
      break;

    case 6:
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
      break;

    default:
      gcc_unreachable ();
    }

  if (!pat)
    return NULL_RTX;

  emit_insn (pat);

  return target;
}
Пример #13
0
static void
aarch64_init_simd_builtins (void)
{
  unsigned int i, fcode = AARCH64_SIMD_PATTERN_START;

  aarch64_init_simd_builtin_types ();

  /* Strong-typing hasn't been implemented for all AdvSIMD builtin intrinsics.
     Therefore we need to preserve the old __builtin scalar types.  It can be
     removed once all the intrinsics become strongly typed using the qualifier
     system.  */
  aarch64_init_simd_builtin_scalar_types ();
 
  tree lane_check_fpr = build_function_type_list (void_type_node,
						  size_type_node,
						  size_type_node,
						  intSI_type_node,
						  NULL);
  aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_LANE_CHECK] =
      add_builtin_function ("__builtin_aarch64_im_lane_boundsi", lane_check_fpr,
			    AARCH64_SIMD_BUILTIN_LANE_CHECK, BUILT_IN_MD,
			    NULL, NULL_TREE);

  for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
    {
      bool print_type_signature_p = false;
      char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 };
      aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i];
      char namebuf[60];
      tree ftype = NULL;
      tree fndecl = NULL;

      d->fcode = fcode;

      /* We must track two variables here.  op_num is
	 the operand number as in the RTL pattern.  This is
	 required to access the mode (e.g. V4SF mode) of the
	 argument, from which the base type can be derived.
	 arg_num is an index in to the qualifiers data, which
	 gives qualifiers to the type (e.g. const unsigned).
	 The reason these two variables may differ by one is the
	 void return type.  While all return types take the 0th entry
	 in the qualifiers array, there is no operand for them in the
	 RTL pattern.  */
      int op_num = insn_data[d->code].n_operands - 1;
      int arg_num = d->qualifiers[0] & qualifier_void
		      ? op_num + 1
		      : op_num;
      tree return_type = void_type_node, args = void_list_node;
      tree eltype;

      /* Build a function type directly from the insn_data for this
	 builtin.  The build_function_type () function takes care of
	 removing duplicates for us.  */
      for (; op_num >= 0; arg_num--, op_num--)
	{
	  machine_mode op_mode = insn_data[d->code].operand[op_num].mode;
	  enum aarch64_type_qualifiers qualifiers = d->qualifiers[arg_num];

	  if (qualifiers & qualifier_unsigned)
	    {
	      type_signature[arg_num] = 'u';
	      print_type_signature_p = true;
	    }
	  else if (qualifiers & qualifier_poly)
	    {
	      type_signature[arg_num] = 'p';
	      print_type_signature_p = true;
	    }
	  else
	    type_signature[arg_num] = 's';

	  /* Skip an internal operand for vget_{low, high}.  */
	  if (qualifiers & qualifier_internal)
	    continue;

	  /* Some builtins have different user-facing types
	     for certain arguments, encoded in d->mode.  */
	  if (qualifiers & qualifier_map_mode)
	      op_mode = d->mode;

	  /* For pointers, we want a pointer to the basic type
	     of the vector.  */
	  if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
	    op_mode = GET_MODE_INNER (op_mode);

	  eltype = aarch64_simd_builtin_type
		     (op_mode,
		      (qualifiers & qualifier_unsigned) != 0,
		      (qualifiers & qualifier_poly) != 0);
	  gcc_assert (eltype != NULL);

	  /* Add qualifiers.  */
	  if (qualifiers & qualifier_const)
	    eltype = build_qualified_type (eltype, TYPE_QUAL_CONST);

	  if (qualifiers & qualifier_pointer)
	      eltype = build_pointer_type (eltype);

	  /* If we have reached arg_num == 0, we are at a non-void
	     return type.  Otherwise, we are still processing
	     arguments.  */
	  if (arg_num == 0)
	    return_type = eltype;
	  else
	    args = tree_cons (NULL_TREE, eltype, args);
	}

      ftype = build_function_type (return_type, args);

      gcc_assert (ftype != NULL);

      if (print_type_signature_p)
	snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s_%s",
		  d->name, type_signature);
      else
	snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s",
		  d->name);

      fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
				     NULL, NULL_TREE);
      aarch64_builtin_decls[fcode] = fndecl;
    }
}
Пример #14
0
static tree
cp_genericize_r (tree *stmt_p, int *walk_subtrees, void *data)
{
  tree stmt = *stmt_p;
  struct cp_genericize_data *wtd = (struct cp_genericize_data *) data;
  struct pointer_set_t *p_set = wtd->p_set;

  /* If in an OpenMP context, note var uses.  */
  if (__builtin_expect (wtd->omp_ctx != NULL, 0)
      && (TREE_CODE (stmt) == VAR_DECL
	  || TREE_CODE (stmt) == PARM_DECL
	  || TREE_CODE (stmt) == RESULT_DECL)
      && omp_var_to_track (stmt))
    omp_cxx_notice_variable (wtd->omp_ctx, stmt);

  if (is_invisiref_parm (stmt)
      /* Don't dereference parms in a thunk, pass the references through. */
      && !(DECL_THUNK_P (current_function_decl)
	   && TREE_CODE (stmt) == PARM_DECL))
    {
      *stmt_p = convert_from_reference (stmt);
      *walk_subtrees = 0;
      return NULL;
    }

  /* Map block scope extern declarations to visible declarations with the
     same name and type in outer scopes if any.  */
  if (cp_function_chain->extern_decl_map
      && (TREE_CODE (stmt) == FUNCTION_DECL || TREE_CODE (stmt) == VAR_DECL)
      && DECL_EXTERNAL (stmt))
    {
      struct cxx_int_tree_map *h, in;
      in.uid = DECL_UID (stmt);
      h = (struct cxx_int_tree_map *)
	  htab_find_with_hash (cp_function_chain->extern_decl_map,
			       &in, in.uid);
      if (h)
	{
	  *stmt_p = h->to;
	  *walk_subtrees = 0;
	  return NULL;
	}
    }

  /* Other than invisiref parms, don't walk the same tree twice.  */
  if (pointer_set_contains (p_set, stmt))
    {
      *walk_subtrees = 0;
      return NULL_TREE;
    }

  if (TREE_CODE (stmt) == ADDR_EXPR
      && is_invisiref_parm (TREE_OPERAND (stmt, 0)))
    {
      /* If in an OpenMP context, note var uses.  */
      if (__builtin_expect (wtd->omp_ctx != NULL, 0)
	  && omp_var_to_track (TREE_OPERAND (stmt, 0)))
	omp_cxx_notice_variable (wtd->omp_ctx, TREE_OPERAND (stmt, 0));
      *stmt_p = convert (TREE_TYPE (stmt), TREE_OPERAND (stmt, 0));
      *walk_subtrees = 0;
    }
  else if (TREE_CODE (stmt) == RETURN_EXPR
	   && TREE_OPERAND (stmt, 0)
	   && is_invisiref_parm (TREE_OPERAND (stmt, 0)))
    /* Don't dereference an invisiref RESULT_DECL inside a RETURN_EXPR.  */
    *walk_subtrees = 0;
  else if (TREE_CODE (stmt) == OMP_CLAUSE)
    switch (OMP_CLAUSE_CODE (stmt))
      {
      case OMP_CLAUSE_LASTPRIVATE:
	/* Don't dereference an invisiref in OpenMP clauses.  */
	if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
	  {
	    *walk_subtrees = 0;
	    if (OMP_CLAUSE_LASTPRIVATE_STMT (stmt))
	      cp_walk_tree (&OMP_CLAUSE_LASTPRIVATE_STMT (stmt),
			    cp_genericize_r, data, NULL);
	  }
	break;
      case OMP_CLAUSE_PRIVATE:
	/* Don't dereference an invisiref in OpenMP clauses.  */
	if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
	  *walk_subtrees = 0;
	else if (wtd->omp_ctx != NULL)
	  {
	    /* Private clause doesn't cause any references to the
	       var in outer contexts, avoid calling
	       omp_cxx_notice_variable for it.  */
	    struct cp_genericize_omp_taskreg *old = wtd->omp_ctx;
	    wtd->omp_ctx = NULL;
	    cp_walk_tree (&OMP_CLAUSE_DECL (stmt), cp_genericize_r,
			  data, NULL);
	    wtd->omp_ctx = old;
	    *walk_subtrees = 0;
	  }
	break;
      case OMP_CLAUSE_SHARED:
      case OMP_CLAUSE_FIRSTPRIVATE:
      case OMP_CLAUSE_COPYIN:
      case OMP_CLAUSE_COPYPRIVATE:
	/* Don't dereference an invisiref in OpenMP clauses.  */
	if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
	  *walk_subtrees = 0;
	break;
      case OMP_CLAUSE_REDUCTION:
	gcc_assert (!is_invisiref_parm (OMP_CLAUSE_DECL (stmt)));
	break;
      default:
	break;
      }
  else if (IS_TYPE_OR_DECL_P (stmt))
    *walk_subtrees = 0;

  /* Due to the way voidify_wrapper_expr is written, we don't get a chance
     to lower this construct before scanning it, so we need to lower these
     before doing anything else.  */
  else if (TREE_CODE (stmt) == CLEANUP_STMT)
    *stmt_p = build2 (CLEANUP_EH_ONLY (stmt) ? TRY_CATCH_EXPR
					     : TRY_FINALLY_EXPR,
		      void_type_node,
		      CLEANUP_BODY (stmt),
		      CLEANUP_EXPR (stmt));

  else if (TREE_CODE (stmt) == IF_STMT)
    {
      genericize_if_stmt (stmt_p);
      /* *stmt_p has changed, tail recurse to handle it again.  */
      return cp_genericize_r (stmt_p, walk_subtrees, data);
    }

  /* COND_EXPR might have incompatible types in branches if one or both
     arms are bitfields.  Fix it up now.  */
  else if (TREE_CODE (stmt) == COND_EXPR)
    {
      tree type_left
	= (TREE_OPERAND (stmt, 1)
	   ? is_bitfield_expr_with_lowered_type (TREE_OPERAND (stmt, 1))
	   : NULL_TREE);
      tree type_right
	= (TREE_OPERAND (stmt, 2)
	   ? is_bitfield_expr_with_lowered_type (TREE_OPERAND (stmt, 2))
	   : NULL_TREE);
      if (type_left
	  && !useless_type_conversion_p (TREE_TYPE (stmt),
					 TREE_TYPE (TREE_OPERAND (stmt, 1))))
	{
	  TREE_OPERAND (stmt, 1)
	    = fold_convert (type_left, TREE_OPERAND (stmt, 1));
	  gcc_assert (useless_type_conversion_p (TREE_TYPE (stmt),
						 type_left));
	}
      if (type_right
	  && !useless_type_conversion_p (TREE_TYPE (stmt),
					 TREE_TYPE (TREE_OPERAND (stmt, 2))))
	{
	  TREE_OPERAND (stmt, 2)
	    = fold_convert (type_right, TREE_OPERAND (stmt, 2));
	  gcc_assert (useless_type_conversion_p (TREE_TYPE (stmt),
						 type_right));
	}
    }

  else if (TREE_CODE (stmt) == BIND_EXPR)
    {
      if (__builtin_expect (wtd->omp_ctx != NULL, 0))
	{
	  tree decl;
	  for (decl = BIND_EXPR_VARS (stmt); decl; decl = DECL_CHAIN (decl))
	    if (TREE_CODE (decl) == VAR_DECL
		&& !DECL_EXTERNAL (decl)
		&& omp_var_to_track (decl))
	      {
		splay_tree_node n
		  = splay_tree_lookup (wtd->omp_ctx->variables,
				       (splay_tree_key) decl);
		if (n == NULL)
		  splay_tree_insert (wtd->omp_ctx->variables,
				     (splay_tree_key) decl,
				     TREE_STATIC (decl)
				     ? OMP_CLAUSE_DEFAULT_SHARED
				     : OMP_CLAUSE_DEFAULT_PRIVATE);
	      }
	}
      VEC_safe_push (tree, heap, wtd->bind_expr_stack, stmt);
      cp_walk_tree (&BIND_EXPR_BODY (stmt),
		    cp_genericize_r, data, NULL);
      VEC_pop (tree, wtd->bind_expr_stack);
    }

  else if (TREE_CODE (stmt) == USING_STMT)
    {
      tree block = NULL_TREE;

      /* Get the innermost inclosing GIMPLE_BIND that has a non NULL
         BLOCK, and append an IMPORTED_DECL to its
	 BLOCK_VARS chained list.  */
      if (wtd->bind_expr_stack)
	{
	  int i;
	  for (i = VEC_length (tree, wtd->bind_expr_stack) - 1; i >= 0; i--)
	    if ((block = BIND_EXPR_BLOCK (VEC_index (tree,
						     wtd->bind_expr_stack, i))))
	      break;
	}
      if (block)
	{
	  tree using_directive;
	  gcc_assert (TREE_OPERAND (stmt, 0));

	  using_directive = make_node (IMPORTED_DECL);
	  TREE_TYPE (using_directive) = void_type_node;

	  IMPORTED_DECL_ASSOCIATED_DECL (using_directive)
	    = TREE_OPERAND (stmt, 0);
	  DECL_CHAIN (using_directive) = BLOCK_VARS (block);
	  BLOCK_VARS (block) = using_directive;
	}
      /* The USING_STMT won't appear in GENERIC.  */
      *stmt_p = build1 (NOP_EXPR, void_type_node, integer_zero_node);
      *walk_subtrees = 0;
    }

  else if (TREE_CODE (stmt) == DECL_EXPR
	   && TREE_CODE (DECL_EXPR_DECL (stmt)) == USING_DECL)
    {
      /* Using decls inside DECL_EXPRs are just dropped on the floor.  */
      *stmt_p = build1 (NOP_EXPR, void_type_node, integer_zero_node);
      *walk_subtrees = 0;
    }
  else if (TREE_CODE (stmt) == OMP_PARALLEL || TREE_CODE (stmt) == OMP_TASK)
    {
      struct cp_genericize_omp_taskreg omp_ctx;
      tree c, decl;
      splay_tree_node n;

      *walk_subtrees = 0;
      cp_walk_tree (&OMP_CLAUSES (stmt), cp_genericize_r, data, NULL);
      omp_ctx.is_parallel = TREE_CODE (stmt) == OMP_PARALLEL;
      omp_ctx.default_shared = omp_ctx.is_parallel;
      omp_ctx.outer = wtd->omp_ctx;
      omp_ctx.variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
      wtd->omp_ctx = &omp_ctx;
      for (c = OMP_CLAUSES (stmt); c; c = OMP_CLAUSE_CHAIN (c))
	switch (OMP_CLAUSE_CODE (c))
	  {
	  case OMP_CLAUSE_SHARED:
	  case OMP_CLAUSE_PRIVATE:
	  case OMP_CLAUSE_FIRSTPRIVATE:
	  case OMP_CLAUSE_LASTPRIVATE:
	    decl = OMP_CLAUSE_DECL (c);
	    if (decl == error_mark_node || !omp_var_to_track (decl))
	      break;
	    n = splay_tree_lookup (omp_ctx.variables, (splay_tree_key) decl);
	    if (n != NULL)
	      break;
	    splay_tree_insert (omp_ctx.variables, (splay_tree_key) decl,
			       OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
			       ? OMP_CLAUSE_DEFAULT_SHARED
			       : OMP_CLAUSE_DEFAULT_PRIVATE);
	    if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE
		&& omp_ctx.outer)
	      omp_cxx_notice_variable (omp_ctx.outer, decl);
	    break;
	  case OMP_CLAUSE_DEFAULT:
	    if (OMP_CLAUSE_DEFAULT_KIND (c) == OMP_CLAUSE_DEFAULT_SHARED)
	      omp_ctx.default_shared = true;
	  default:
	    break;
	  }
      cp_walk_tree (&OMP_BODY (stmt), cp_genericize_r, data, NULL);
      wtd->omp_ctx = omp_ctx.outer;
      splay_tree_delete (omp_ctx.variables);
    }
  else if (TREE_CODE (stmt) == CONVERT_EXPR)
    gcc_assert (!CONVERT_EXPR_VBASE_PATH (stmt));

  pointer_set_insert (p_set, *stmt_p);

  return NULL;
}
static void
build_constructors (gimple swtch)
{
  unsigned i, branch_num = gimple_switch_num_labels (swtch);
  tree pos = info.range_min;

  for (i = 1; i < branch_num; i++)
    {
      tree cs = gimple_switch_label (swtch, i);
      basic_block bb = label_to_block (CASE_LABEL (cs));
      edge e;
      tree high;
      gimple_stmt_iterator gsi;
      int j;

      if (bb == info.final_bb)
	e = find_edge (info.switch_bb, bb);
      else
	e = single_succ_edge (bb);
      gcc_assert (e);

      while (tree_int_cst_lt (pos, CASE_LOW (cs)))
	{
	  int k;
	  for (k = 0; k < info.phi_count; k++)
	    {
	      constructor_elt *elt;

	      elt = VEC_quick_push (constructor_elt,
				    info.constructors[k], NULL);
	      elt->index = int_const_binop (MINUS_EXPR, pos,
					    info.range_min);
	      elt->value = info.default_values[k];
	    }

	  pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
	}
      gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));

      j = 0;
      if (CASE_HIGH (cs))
	high = CASE_HIGH (cs);
      else
	high = CASE_LOW (cs);
      for (gsi = gsi_start_phis (info.final_bb);
	   !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple phi = gsi_stmt (gsi);
	  tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
	  tree low = CASE_LOW (cs);
	  pos = CASE_LOW (cs);

	  do
	    {
	      constructor_elt *elt;

	      elt = VEC_quick_push (constructor_elt,
				    info.constructors[j], NULL);
	      elt->index = int_const_binop (MINUS_EXPR, pos, info.range_min);
	      elt->value = val;

	      pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
	    } while (!tree_int_cst_lt (high, pos)
		     && tree_int_cst_lt (low, pos));
	  j++;
	}
    }
}
Пример #16
0
static int
optimize_mode_switching (void)
{
  int e;
  basic_block bb;
  bool need_commit = false;
  static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
#define N_ENTITIES ARRAY_SIZE (num_modes)
  int entity_map[N_ENTITIES];
  struct bb_info *bb_info[N_ENTITIES];
  int i, j;
  int n_entities = 0;
  int max_num_modes = 0;
  bool emitted ATTRIBUTE_UNUSED = false;
  basic_block post_entry = 0;
  basic_block pre_exit = 0;
  struct edge_list *edge_list = 0;

  /* These bitmaps are used for the LCM algorithm.  */
  sbitmap *kill, *del, *insert, *antic, *transp, *comp;
  sbitmap *avin, *avout;

  for (e = N_ENTITIES - 1; e >= 0; e--)
    if (OPTIMIZE_MODE_SWITCHING (e))
      {
	int entry_exit_extra = 0;

	/* Create the list of segments within each basic block.
	   If NORMAL_MODE is defined, allow for two extra
	   blocks split from the entry and exit block.  */
	if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	  entry_exit_extra = 3;

	bb_info[n_entities]
	  = XCNEWVEC (struct bb_info,
		      last_basic_block_for_fn (cfun) + entry_exit_extra);
	entity_map[n_entities++] = e;
	if (num_modes[e] > max_num_modes)
	  max_num_modes = num_modes[e];
      }

  if (! n_entities)
    return 0;

  /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined.  */
  gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
	      || (!targetm.mode_switching.entry
		  && !targetm.mode_switching.exit));

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    {
      /* Split the edge from the entry block, so that we can note that
	 there NORMAL_MODE is supplied.  */
      post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
      pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
    }

  df_analyze ();

  /* Create the bitmap vectors.  */
  antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				 n_entities * max_num_modes);
  comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);

  bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));

  for (j = n_entities - 1; j >= 0; j--)
    {
      int e = entity_map[j];
      int no_mode = num_modes[e];
      struct bb_info *info = bb_info[j];
      rtx_insn *insn;

      /* Determine what the first use (if any) need for a mode of entity E is.
	 This will be the mode that is anticipatable for this block.
	 Also compute the initial transparency settings.  */
      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr;
	  int last_mode = no_mode;
	  bool any_set_required = false;
	  HARD_REG_SET live_now;

	  info[bb->index].mode_out = info[bb->index].mode_in = no_mode;

	  REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));

	  /* Pretend the mode is clobbered across abnormal edges.  */
	  {
	    edge_iterator ei;
	    edge eg;
	    FOR_EACH_EDGE (eg, ei, bb->preds)
	      if (eg->flags & EDGE_COMPLEX)
		break;
	    if (eg)
	      {
		rtx_insn *ins_pos = BB_HEAD (bb);
		if (LABEL_P (ins_pos))
		  ins_pos = NEXT_INSN (ins_pos);
		gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
		if (ins_pos != BB_END (bb))
		  ins_pos = NEXT_INSN (ins_pos);
		ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
		add_seginfo (info + bb->index, ptr);
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	      }
	  }

	  FOR_BB_INSNS (bb, insn)
	    {
	      if (INSN_P (insn))
		{
		  int mode = targetm.mode_switching.needed (e, insn);
		  rtx link;

		  if (mode != no_mode && mode != last_mode)
		    {
		      any_set_required = true;
		      last_mode = mode;
		      ptr = new_seginfo (mode, insn, bb->index, live_now);
		      add_seginfo (info + bb->index, ptr);
		      for (i = 0; i < no_mode; i++)
			clear_mode_bit (transp[bb->index], j, i);
		    }

		  if (targetm.mode_switching.after)
		    last_mode = targetm.mode_switching.after (e, last_mode,
							      insn);

		  /* Update LIVE_NOW.  */
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_DEAD)
		      reg_dies (XEXP (link, 0), &live_now);

		  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_UNUSED)
		      reg_dies (XEXP (link, 0), &live_now);
		}
	    }

	  info[bb->index].computing = last_mode;
	  /* Check for blocks without ANY mode requirements.
	     N.B. because of MODE_AFTER, last_mode might still
	     be different from no_mode, in which case we need to
	     mark the block as nontransparent.  */
	  if (!any_set_required)
	    {
	      ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
	      add_seginfo (info + bb->index, ptr);
	      if (last_mode != no_mode)
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	    }
	}
      if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	{
	  int mode = targetm.mode_switching.entry (e);

	  info[post_entry->index].mode_out =
	    info[post_entry->index].mode_in = no_mode;
	  if (pre_exit)
	    {
	      info[pre_exit->index].mode_out =
		info[pre_exit->index].mode_in = no_mode;
	    }

	  if (mode != no_mode)
	    {
	      bb = post_entry;

	      /* By always making this nontransparent, we save
		 an extra check in make_preds_opaque.  We also
		 need this to avoid confusing pre_edge_lcm when
		 antic is cleared but transp and comp are set.  */
	      for (i = 0; i < no_mode; i++)
		clear_mode_bit (transp[bb->index], j, i);

	      /* Insert a fake computing definition of MODE into entry
		 blocks which compute no mode. This represents the mode on
		 entry.  */
	      info[bb->index].computing = mode;

	      if (pre_exit)
		info[pre_exit->index].seginfo->mode =
		  targetm.mode_switching.exit (e);
	    }
	}

      /* Set the anticipatable and computing arrays.  */
      for (i = 0; i < no_mode; i++)
	{
	  int m = targetm.mode_switching.priority (entity_map[j], i);

	  FOR_EACH_BB_FN (bb, cfun)
	    {
	      if (info[bb->index].seginfo->mode == m)
		set_mode_bit (antic[bb->index], j, m);

	      if (info[bb->index].computing == m)
		set_mode_bit (comp[bb->index], j, m);
	    }
	}
    }

  /* Calculate the optimal locations for the
     placement mode switches to modes with priority I.  */

  FOR_EACH_BB_FN (bb, cfun)
    bitmap_not (kill[bb->index], transp[bb->index]);

  edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
				kill, avin, avout, &insert, &del);

  for (j = n_entities - 1; j >= 0; j--)
    {
      int no_mode = num_modes[entity_map[j]];

      /* Insert all mode sets that have been inserted by lcm.  */

      for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
	{
	  edge eg = INDEX_EDGE (edge_list, ed);

	  eg->aux = (void *)(intptr_t)-1;

	  for (i = 0; i < no_mode; i++)
	    {
	      int m = targetm.mode_switching.priority (entity_map[j], i);
	      if (mode_bit_p (insert[ed], j, m))
		{
		  eg->aux = (void *)(intptr_t)m;
		  break;
		}
	    }
	}

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct bb_info *info = bb_info[j];
	  int last_mode = no_mode;

	  /* intialize mode in availability for bb.  */
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avout[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_out = last_mode;

	  /* intialize mode out availability for bb.  */
	  last_mode = no_mode;
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avin[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_in = last_mode;

	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (del[bb->index], j, i))
	      info[bb->index].seginfo->mode = no_mode;
	}

      /* Now output the remaining mode sets in all the segments.  */

      /* In case there was no mode inserted. the mode information on the edge
	 might not be complete.
	 Update mode info on edges and commit pending mode sets.  */
      need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);

      /* Reset modes for next entity.  */
      clear_aux_for_edges ();

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr, *next;
	  int cur_mode = bb_info[j][bb->index].mode_in;

	  for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
	    {
	      next = ptr->next;
	      if (ptr->mode != no_mode)
		{
		  rtx_insn *mode_set;

		  rtl_profile_for_bb (bb);
		  start_sequence ();

		  targetm.mode_switching.emit (entity_map[j], ptr->mode,
					       cur_mode, ptr->regs_live);
		  mode_set = get_insns ();
		  end_sequence ();

		  /* modes kill each other inside a basic block.  */
		  cur_mode = ptr->mode;

		  /* Insert MODE_SET only if it is nonempty.  */
		  if (mode_set != NULL_RTX)
		    {
		      emitted = true;
		      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
			/* We need to emit the insns in a FIFO-like manner,
			   i.e. the first to be emitted at our insertion
			   point ends up first in the instruction steam.
			   Because we made sure that NOTE_INSN_BASIC_BLOCK is
			   only used for initially empty basic blocks, we
			   can achieve this by appending at the end of
			   the block.  */
			emit_insn_after
			  (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
		      else
			emit_insn_before (mode_set, ptr->insn_ptr);
		    }

		  default_rtl_profile ();
		}

	      free (ptr);
	    }
	}

      free (bb_info[j]);
    }

  free_edge_list (edge_list);

  /* Finished. Free up all the things we've allocated.  */
  sbitmap_vector_free (del);
  sbitmap_vector_free (insert);
  sbitmap_vector_free (kill);
  sbitmap_vector_free (antic);
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
  sbitmap_vector_free (avin);
  sbitmap_vector_free (avout);

  if (need_commit)
    commit_edge_insertions ();

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    cleanup_cfg (CLEANUP_NO_INSN_DEL);
  else if (!need_commit && !emitted)
    return 0;

  return 1;
}
Пример #17
0
static void
init_attributes (void)
{
  size_t i;
  int k;

  attribute_tables[0] = lang_hooks.common_attribute_table;
  attribute_tables[1] = lang_hooks.attribute_table;
  attribute_tables[2] = lang_hooks.format_attribute_table;
  attribute_tables[3] = targetm.attribute_table;

  /* Translate NULL pointers to pointers to the empty table.  */
  for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
    if (attribute_tables[i] == NULL)
      attribute_tables[i] = empty_attribute_table;

#ifdef ENABLE_CHECKING
  /* Make some sanity checks on the attribute tables.  */
  for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
    {
      int j;

      for (j = 0; attribute_tables[i][j].name != NULL; j++)
	{
	  /* The name must not begin and end with __.  */
	  const char *name = attribute_tables[i][j].name;
	  int len = strlen (name);

	  gcc_assert (!(name[0] == '_' && name[1] == '_'
			&& name[len - 1] == '_' && name[len - 2] == '_'));

	  /* The minimum and maximum lengths must be consistent.  */
	  gcc_assert (attribute_tables[i][j].min_length >= 0);

	  gcc_assert (attribute_tables[i][j].max_length == -1
		      || (attribute_tables[i][j].max_length
			  >= attribute_tables[i][j].min_length));

	  /* An attribute cannot require both a DECL and a TYPE.  */
	  gcc_assert (!attribute_tables[i][j].decl_required
		      || !attribute_tables[i][j].type_required);

	  /* If an attribute requires a function type, in particular
	     it requires a type.  */
	  gcc_assert (!attribute_tables[i][j].function_type_required
		      || attribute_tables[i][j].type_required);
	}
    }

  /* Check that each name occurs just once in each table.  */
  for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
    {
      int j, k;
      for (j = 0; attribute_tables[i][j].name != NULL; j++)
	for (k = j + 1; attribute_tables[i][k].name != NULL; k++)
	  gcc_assert (strcmp (attribute_tables[i][j].name,
			      attribute_tables[i][k].name));
    }
  /* Check that no name occurs in more than one table.  */
  for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
    {
      size_t j, k, l;

      for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
	for (k = 0; attribute_tables[i][k].name != NULL; k++)
	  for (l = 0; attribute_tables[j][l].name != NULL; l++)
	    gcc_assert (strcmp (attribute_tables[i][k].name,
				attribute_tables[j][l].name));
    }
#endif

  attribute_hash = htab_create (200, hash_attr, eq_attr, NULL);
  for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
    for (k = 0; attribute_tables[i][k].name != NULL; k++)
      {
        register_attribute (&attribute_tables[i][k]);
      }
  invoke_plugin_callbacks (PLUGIN_ATTRIBUTES, NULL);
  attributes_initialized = true;
}
Пример #18
0
bool
i386_pe_type_dllimport_p (tree decl)
{
   gcc_assert (TREE_CODE (decl) == VAR_DECL 
               || TREE_CODE (decl) == FUNCTION_DECL);

   if (TARGET_NOP_FUN_DLLIMPORT && TREE_CODE (decl) == FUNCTION_DECL)
     return false;

   /* We ignore the dllimport attribute for inline member functions.
      This differs from MSVC behavior which treats it like GNUC
      'extern inline' extension.  Also ignore for template
      instantiations with linkonce semantics and artificial methods.  */
    if (TREE_CODE (decl) ==  FUNCTION_DECL
        && (DECL_DECLARED_INLINE_P (decl)
	    || DECL_TEMPLATE_INSTANTIATION (decl)
	    || DECL_ARTIFICIAL (decl)))
      return false;

   /* Since we can't treat a pointer to a dllimport'd symbol as a
       constant address, we turn off the attribute on C++ virtual
       methods to allow creation of vtables using thunks.  */
    else if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE
	     && DECL_VIRTUAL_P (decl))
      {
	/* Even though we ignore the attribute from the start, warn if we later see
	   an out-of class definition, as we do for other member functions in
	   tree.c:merge_dllimport_decl_attributes.  If this is the key method, the
	   definition may affect the import-export status of vtables, depending
           on how we handle MULTIPLE_SYMBOL_SPACES in cp/decl2.c.   */
	if (DECL_INITIAL (decl))
	  {
	    warning ("%q+D redeclared without dllimport attribute: "
		    "previous dllimport ignored", decl);
#ifdef PE_DLL_DEBUG
	    if (decl == CLASSTYPE_KEY_METHOD (DECL_CONTEXT (decl)))            
	      warning ("key method %q+D of dllimport'd class defined"
		       decl);
#endif
	  }
	return false;
      }

      /* Don't mark defined functions as dllimport.  This code will only be
         reached if we see a non-inline function defined out-of-class.  */
    else if (TREE_CODE (decl) ==  FUNCTION_DECL
	     && (DECL_INITIAL (decl)))
      return false;

    /*  Don't allow definitions of static data members in dllimport class,
        If vtable data is marked as DECL_EXTERNAL, import it; otherwise just
        ignore the class attribute.  */
    else if (TREE_CODE (decl) == VAR_DECL
	     && TREE_STATIC (decl) && TREE_PUBLIC (decl)
	     && !DECL_EXTERNAL (decl))
      {
	if (!DECL_VIRTUAL_P (decl))
	     error ("definition of static data member %q+D of "
		    "dllimport'd class", decl);
	return false;
      }

    return true;
}
Пример #19
0
tree
ubsan_instrument_division (location_t loc, tree op0, tree op1)
{
  tree t, tt;
  tree type = TREE_TYPE (op0);

  /* At this point both operands should have the same type,
     because they are already converted to RESULT_TYPE.
     Use TYPE_MAIN_VARIANT since typedefs can confuse us.  */
  gcc_assert (TYPE_MAIN_VARIANT (TREE_TYPE (op0))
	      == TYPE_MAIN_VARIANT (TREE_TYPE (op1)));

  if (TREE_CODE (type) == INTEGER_TYPE
      && (flag_sanitize & SANITIZE_DIVIDE))
    t = fold_build2 (EQ_EXPR, boolean_type_node,
		     op1, build_int_cst (type, 0));
  else if (TREE_CODE (type) == REAL_TYPE
	   && (flag_sanitize & SANITIZE_FLOAT_DIVIDE))
    t = fold_build2 (EQ_EXPR, boolean_type_node,
		     op1, build_real (type, dconst0));
  else
    return NULL_TREE;

  /* We check INT_MIN / -1 only for signed types.  */
  if (TREE_CODE (type) == INTEGER_TYPE
      && (flag_sanitize & SANITIZE_DIVIDE)
      && !TYPE_UNSIGNED (type))
    {
      tree x;
      tt = fold_build2 (EQ_EXPR, boolean_type_node, op1,
			build_int_cst (type, -1));
      x = fold_build2 (EQ_EXPR, boolean_type_node, op0,
		       TYPE_MIN_VALUE (type));
      x = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, x, tt);
      t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t, x);
    }

  /* If the condition was folded to 0, no need to instrument
     this expression.  */
  if (integer_zerop (t))
    return NULL_TREE;

  /* In case we have a SAVE_EXPR in a conditional context, we need to
     make sure it gets evaluated before the condition.  If the OP0 is
     an instrumented array reference, mark it as having side effects so
     it's not folded away.  */
  if (flag_sanitize & SANITIZE_BOUNDS)
    {
      tree xop0 = op0;
      while (CONVERT_EXPR_P (xop0))
	xop0 = TREE_OPERAND (xop0, 0);
      if (TREE_CODE (xop0) == ARRAY_REF)
	{
	  TREE_SIDE_EFFECTS (xop0) = 1;
	  TREE_SIDE_EFFECTS (op0) = 1;
	}
    }
  t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t);
  if (flag_sanitize_undefined_trap_on_error)
    tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
  else
    {
      tree data = ubsan_create_data ("__ubsan_overflow_data", 1, &loc,
				     ubsan_type_descriptor (type), NULL_TREE,
				     NULL_TREE);
      data = build_fold_addr_expr_loc (loc, data);
      enum built_in_function bcode
	= (flag_sanitize_recover & SANITIZE_DIVIDE)
	  ? BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW
	  : BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW_ABORT;
      tt = builtin_decl_explicit (bcode);
      tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
				ubsan_encode_value (op1));
    }
  t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_node);

  return t;
}
Пример #20
0
/* Write a constant expression in binary form to a buffer.  */
int
gfc_target_encode_expr (gfc_expr *source, unsigned char *buffer,
			size_t buffer_size)
{
  if (source == NULL)
    return 0;

  if (source->expr_type == EXPR_ARRAY)
    return encode_array (source, buffer, buffer_size);

  gcc_assert (source->expr_type == EXPR_CONSTANT
	      || source->expr_type == EXPR_STRUCTURE
	      || source->expr_type == EXPR_SUBSTRING);

  /* If we already have a target-memory representation, we use that rather 
     than recreating one.  */
  if (source->representation.string)
    {
      memcpy (buffer, source->representation.string,
	      source->representation.length);
      return source->representation.length;
    }

  switch (source->ts.type)
    {
    case BT_INTEGER:
      return encode_integer (source->ts.kind, source->value.integer, buffer,
			     buffer_size);
    case BT_REAL:
      return encode_float (source->ts.kind, source->value.real, buffer,
			   buffer_size);
    case BT_COMPLEX:
      return encode_complex (source->ts.kind, source->value.complex,
			     buffer, buffer_size);
    case BT_LOGICAL:
      return encode_logical (source->ts.kind, source->value.logical, buffer,
			     buffer_size);
    case BT_CHARACTER:
      if (source->expr_type == EXPR_CONSTANT || source->ref == NULL)
	return gfc_encode_character (source->ts.kind,
				     source->value.character.length,
				     source->value.character.string,
				     buffer, buffer_size);
      else
	{
	  int start, end;

	  gcc_assert (source->expr_type == EXPR_SUBSTRING);
	  gfc_extract_int (source->ref->u.ss.start, &start);
	  gfc_extract_int (source->ref->u.ss.end, &end);
	  return gfc_encode_character (source->ts.kind, MAX(end - start + 1, 0),
				       &source->value.character.string[start-1],
				       buffer, buffer_size);
	}

    case BT_DERIVED:
      return encode_derived (source, buffer, buffer_size);
    default:
      gfc_internal_error ("Invalid expression in gfc_target_encode_expr.");
      return 0;
    }
}
Пример #21
0
static void
lto_cgraph_replace_node (struct cgraph_node *node,
			 struct cgraph_node *prevailing_node)
{
  struct cgraph_edge *e, *next;
  bool compatible_p;

  if (symtab->dump_file)
    {
      fprintf (symtab->dump_file, "Replacing cgraph node %s/%i by %s/%i"
 	       " for symbol %s\n",
	       node->name (), node->order,
	       prevailing_node->name (),
	       prevailing_node->order,
	       IDENTIFIER_POINTER ((*targetm.asm_out.mangle_assembler_name)
		 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)))));
    }

  /* Merge node flags.  */
  if (node->force_output)
    prevailing_node->mark_force_output ();
  if (node->forced_by_abi)
    prevailing_node->forced_by_abi = true;
  if (node->address_taken)
    {
      gcc_assert (!prevailing_node->global.inlined_to);
      prevailing_node->mark_address_taken ();
    }
  if (node->definition && prevailing_node->definition
      && DECL_COMDAT (node->decl) && DECL_COMDAT (prevailing_node->decl))
    prevailing_node->merged_comdat = true;

  /* Redirect all incoming edges.  */
  compatible_p
    = types_compatible_p (TREE_TYPE (TREE_TYPE (prevailing_node->decl)),
			  TREE_TYPE (TREE_TYPE (node->decl)));
  for (e = node->callers; e; e = next)
    {
      next = e->next_caller;
      e->redirect_callee (prevailing_node);
      /* If there is a mismatch between the supposed callee return type and
	 the real one do not attempt to inline this function.
	 ???  We really need a way to match function signatures for ABI
	 compatibility and perform related promotions at inlining time.  */
      if (!compatible_p)
	e->call_stmt_cannot_inline_p = 1;
    }
  /* Redirect incomming references.  */
  prevailing_node->clone_referring (node);

  /* Fix instrumentation references.  */
  if (node->instrumented_version)
    {
      gcc_assert (node->instrumentation_clone
		  == prevailing_node->instrumentation_clone);
      node->instrumented_version->instrumented_version = prevailing_node;
      if (!prevailing_node->instrumented_version)
	prevailing_node->instrumented_version = node->instrumented_version;
      /* Need to reset node->instrumented_version to NULL,
	 otherwise node removal code would reset
	 node->instrumented_version->instrumented_version.  */
      node->instrumented_version = NULL;
    }

  lto_free_function_in_decl_state_for_node (node);

  if (node->decl != prevailing_node->decl)
    node->release_body ();

  /* Finally remove the replaced node.  */
  node->remove ();
}
Пример #22
0
bool
gfc_convert_boz (gfc_expr *expr, gfc_typespec *ts)
{
  size_t buffer_size, boz_bit_size, ts_bit_size;
  int index;
  unsigned char *buffer;

  if (!expr->is_boz)
    return true;

  gcc_assert (expr->expr_type == EXPR_CONSTANT
	      && expr->ts.type == BT_INTEGER);

  /* Don't convert BOZ to logical, character, derived etc.  */
  if (ts->type == BT_REAL)
    {
      buffer_size = size_float (ts->kind);
      ts_bit_size = buffer_size * 8;
    }
  else if (ts->type == BT_COMPLEX)
    {
      buffer_size = size_complex (ts->kind);
      ts_bit_size = buffer_size * 8 / 2;
    }
  else
    return true;

  /* Convert BOZ to the smallest possible integer kind.  */
  boz_bit_size = mpz_sizeinbase (expr->value.integer, 2);

  if (boz_bit_size > ts_bit_size)
    {
      gfc_error_now ("BOZ constant at %L is too large (%ld vs %ld bits)",
		     &expr->where, (long) boz_bit_size, (long) ts_bit_size);
      return false;
    }

  for (index = 0; gfc_integer_kinds[index].kind != 0; ++index)
    if ((unsigned) gfc_integer_kinds[index].bit_size >= ts_bit_size)
      break;

  expr->ts.kind = gfc_integer_kinds[index].kind;
  buffer_size = MAX (buffer_size, size_integer (expr->ts.kind));

  buffer = (unsigned char*)alloca (buffer_size);
  encode_integer (expr->ts.kind, expr->value.integer, buffer, buffer_size);
  mpz_clear (expr->value.integer);

  if (ts->type == BT_REAL)
    {
      mpfr_init (expr->value.real);
      gfc_interpret_float (ts->kind, buffer, buffer_size, expr->value.real);
    }
  else
    {
      mpc_init2 (expr->value.complex, mpfr_get_default_prec());
      gfc_interpret_complex (ts->kind, buffer, buffer_size,
			     expr->value.complex);
    }
  expr->is_boz = 0;  
  expr->ts.type = ts->type;
  expr->ts.kind = ts->kind;

  return true;
}
Пример #23
0
static void
resolve_omp_do (gfc_code *code)
{
  gfc_code *do_code, *c;
  int list, i, collapse;
  gfc_namelist *n;
  gfc_symbol *dovar;

  if (code->ext.omp_clauses)
    resolve_omp_clauses (code);

  do_code = code->block->next;
  collapse = code->ext.omp_clauses->collapse;
  if (collapse <= 0)
    collapse = 1;
  for (i = 1; i <= collapse; i++)
    {
      if (do_code->op == EXEC_DO_WHILE)
	{
	  gfc_error ("!$OMP DO cannot be a DO WHILE or DO without loop control "
		     "at %L", &do_code->loc);
	  break;
	}
      gcc_assert (do_code->op == EXEC_DO);
      if (do_code->ext.iterator->var->ts.type != BT_INTEGER)
	gfc_error ("!$OMP DO iteration variable must be of type integer at %L",
		   &do_code->loc);
      dovar = do_code->ext.iterator->var->symtree->n.sym;
      if (dovar->attr.threadprivate)
	gfc_error ("!$OMP DO iteration variable must not be THREADPRIVATE "
		   "at %L", &do_code->loc);
      if (code->ext.omp_clauses)
	for (list = 0; list < OMP_LIST_NUM; list++)
	  if (list != OMP_LIST_PRIVATE && list != OMP_LIST_LASTPRIVATE)
	    for (n = code->ext.omp_clauses->lists[list]; n; n = n->next)
	      if (dovar == n->sym)
		{
		  gfc_error ("!$OMP DO iteration variable present on clause "
			     "other than PRIVATE or LASTPRIVATE at %L",
			     &do_code->loc);
		  break;
		}
      if (i > 1)
	{
	  gfc_code *do_code2 = code->block->next;
	  int j;

	  for (j = 1; j < i; j++)
	    {
	      gfc_symbol *ivar = do_code2->ext.iterator->var->symtree->n.sym;
	      if (dovar == ivar
		  || gfc_find_sym_in_expr (ivar, do_code->ext.iterator->start)
		  || gfc_find_sym_in_expr (ivar, do_code->ext.iterator->end)
		  || gfc_find_sym_in_expr (ivar, do_code->ext.iterator->step))
		{
		  gfc_error ("!$OMP DO collapsed loops don't form rectangular iteration space at %L",
			     &do_code->loc);
		  break;
		}
	      if (j < i)
		break;
	      do_code2 = do_code2->block->next;
	    }
	}
      if (i == collapse)
	break;
      for (c = do_code->next; c; c = c->next)
	if (c->op != EXEC_NOP && c->op != EXEC_CONTINUE)
	  {
	    gfc_error ("collapsed !$OMP DO loops not perfectly nested at %L",
		       &c->loc);
	    break;
	  }
      if (c)
	break;
      do_code = do_code->block;
      if (do_code->op != EXEC_DO && do_code->op != EXEC_DO_WHILE)
	{
	  gfc_error ("not enough DO loops for collapsed !$OMP DO at %L",
		     &code->loc);
	  break;
	}
      do_code = do_code->next;
      if (do_code == NULL
	  || (do_code->op != EXEC_DO && do_code->op != EXEC_DO_WHILE))
	{
	  gfc_error ("not enough DO loops for collapsed !$OMP DO at %L",
		     &code->loc);
	  break;
	}
    }
}
Пример #24
0
/* These are for splay_tree_new_ggc.  */
void *
ggc_splay_alloc (int sz, void *nl)
{
  gcc_assert (!nl);
  return ggc_alloc (sz);
}
Пример #25
0
static void
create_common (gfc_common_head *com, segment_info *head, bool saw_equiv)
{
    segment_info *s, *next_s;
    tree union_type;
    tree *field_link;
    tree field;
    tree field_init = NULL_TREE;
    record_layout_info rli;
    tree decl;
    bool is_init = false;
    bool is_saved = false;

    /* Declare the variables inside the common block.
       If the current common block contains any equivalence object, then
       make a UNION_TYPE node, otherwise RECORD_TYPE. This will let the
       alias analyzer work well when there is no address overlapping for
       common variables in the current common block.  */
    if (saw_equiv)
        union_type = make_node (UNION_TYPE);
    else
        union_type = make_node (RECORD_TYPE);

    rli = start_record_layout (union_type);
    field_link = &TYPE_FIELDS (union_type);

    /* Check for overlapping initializers and replace them with a single,
       artificial field that contains all the data.  */
    if (saw_equiv)
        field = get_init_field (head, union_type, &field_init, rli);
    else
        field = NULL_TREE;

    if (field != NULL_TREE)
    {
        is_init = true;
        *field_link = field;
        field_link = &DECL_CHAIN (field);
    }

    for (s = head; s; s = s->next)
    {
        build_field (s, union_type, rli);

        /* Link the field into the type.  */
        *field_link = s->field;
        field_link = &DECL_CHAIN (s->field);

        /* Has initial value.  */
        if (s->sym->value)
            is_init = true;

        /* Has SAVE attribute.  */
        if (s->sym->attr.save)
            is_saved = true;
    }

    finish_record_layout (rli, true);

    if (com)
        decl = build_common_decl (com, union_type, is_init);
    else
        decl = build_equiv_decl (union_type, is_init, is_saved);

    if (is_init)
    {
        tree ctor, tmp;
        vec<constructor_elt, va_gc> *v = NULL;

        if (field != NULL_TREE && field_init != NULL_TREE)
            CONSTRUCTOR_APPEND_ELT (v, field, field_init);
        else
            for (s = head; s; s = s->next)
            {
                if (s->sym->value)
                {
                    /* Add the initializer for this field.  */
                    tmp = gfc_conv_initializer (s->sym->value, &s->sym->ts,
                                                TREE_TYPE (s->field),
                                                s->sym->attr.dimension,
                                                s->sym->attr.pointer
                                                || s->sym->attr.allocatable, false);

                    CONSTRUCTOR_APPEND_ELT (v, s->field, tmp);
                }
            }

        gcc_assert (!v->is_empty ());
        ctor = build_constructor (union_type, v);
        TREE_CONSTANT (ctor) = 1;
        TREE_STATIC (ctor) = 1;
        DECL_INITIAL (decl) = ctor;

#ifdef ENABLE_CHECKING
        {
            tree field, value;
            unsigned HOST_WIDE_INT idx;
            FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, field, value)
            gcc_assert (TREE_CODE (field) == FIELD_DECL);
        }
#endif
    }

    /* Build component reference for each variable.  */
    for (s = head; s; s = next_s)
    {
        tree var_decl;

        var_decl = build_decl (s->sym->declared_at.lb->location,
                               VAR_DECL, DECL_NAME (s->field),
                               TREE_TYPE (s->field));
        TREE_STATIC (var_decl) = TREE_STATIC (decl);
        /* Mark the variable as used in order to avoid warnings about
        unused variables.  */
        TREE_USED (var_decl) = 1;
        if (s->sym->attr.use_assoc)
            DECL_IGNORED_P (var_decl) = 1;
        if (s->sym->attr.target)
            TREE_ADDRESSABLE (var_decl) = 1;
        /* Fake variables are not visible from other translation units. */
        TREE_PUBLIC (var_decl) = 0;

        /* To preserve identifier names in COMMON, chain to procedure
           scope unless at top level in a module definition.  */
        if (com
                && s->sym->ns->proc_name
                && s->sym->ns->proc_name->attr.flavor == FL_MODULE)
            var_decl = pushdecl_top_level (var_decl);
        else
            gfc_add_decl_to_function (var_decl);

        SET_DECL_VALUE_EXPR (var_decl,
                             fold_build3_loc (input_location, COMPONENT_REF,
                                              TREE_TYPE (s->field),
                                              decl, s->field, NULL_TREE));
        DECL_HAS_VALUE_EXPR_P (var_decl) = 1;
        GFC_DECL_COMMON_OR_EQUIV (var_decl) = 1;

        if (s->sym->attr.assign)
        {
            gfc_allocate_lang_decl (var_decl);
            GFC_DECL_ASSIGN (var_decl) = 1;
            GFC_DECL_STRING_LEN (var_decl) = GFC_DECL_STRING_LEN (s->field);
            GFC_DECL_ASSIGN_ADDR (var_decl) = GFC_DECL_ASSIGN_ADDR (s->field);
        }

        s->sym->backend_decl = var_decl;

        next_s = s->next;
        free (s);
    }
}
Пример #26
0
void
fma_node::set_head (du_head *head)
{
  gcc_assert (!this->m_head);
  this->m_head = head;
}
Пример #27
0
static tree
mf_varname_tree (tree decl)
{
  static pretty_printer buf_rec;
  static int initialized = 0;
  pretty_printer *buf = & buf_rec;
  const char *buf_contents;
  tree result;

  gcc_assert (decl);

  if (!initialized)
    {
      pp_construct (buf, /* prefix */ NULL, /* line-width */ 0);
      initialized = 1;
    }
  pp_clear_output_area (buf);

  /* Add FILENAME[:LINENUMBER[:COLUMNNUMBER]].  */
  {
    expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (decl));
    const char *sourcefile;
    unsigned sourceline = xloc.line;
    unsigned sourcecolumn = 0;
    sourcecolumn = xloc.column;
    sourcefile = xloc.file;
    if (sourcefile == NULL && current_function_decl != NULL_TREE)
      sourcefile = DECL_SOURCE_FILE (current_function_decl);
    if (sourcefile == NULL)
      sourcefile = "<unknown file>";

    pp_string (buf, sourcefile);

    if (sourceline != 0)
      {
        pp_string (buf, ":");
        pp_decimal_int (buf, sourceline);

        if (sourcecolumn != 0)
          {
            pp_string (buf, ":");
            pp_decimal_int (buf, sourcecolumn);
          }
      }
  }

  if (current_function_decl != NULL_TREE)
    {
      /* Add (FUNCTION) */
      pp_string (buf, " (");
      {
        const char *funcname = NULL;
        if (DECL_NAME (current_function_decl))
          funcname = lang_hooks.decl_printable_name (current_function_decl, 1);
        if (funcname == NULL)
          funcname = "anonymous fn";

        pp_string (buf, funcname);
      }
      pp_string (buf, ") ");
    }
  else
    pp_string (buf, " ");

  /* Add <variable-declaration>, possibly demangled.  */
  {
    const char *declname = NULL;

    if (DECL_NAME (decl) != NULL)
      {
	if (strcmp ("GNU C++", lang_hooks.name) == 0)
	  {
	    /* The gcc/cp decl_printable_name hook doesn't do as good a job as
	       the libiberty demangler.  */
	    declname = cplus_demangle (IDENTIFIER_POINTER (DECL_NAME (decl)),
				       DMGL_AUTO | DMGL_VERBOSE);
	  }
	if (declname == NULL)
	  declname = lang_hooks.decl_printable_name (decl, 3);
      }
    if (declname == NULL)
      declname = "<unnamed variable>";

    pp_string (buf, declname);
  }

  /* Return the lot as a new STRING_CST.  */
  buf_contents = pp_base_formatted_text (buf);
  result = mf_build_string (buf_contents);
  pp_clear_output_area (buf);

  return result;
}
static bool
check_process_case (tree cs)
{
  tree ldecl;
  basic_block label_bb, following_bb;
  edge e;

  ldecl = CASE_LABEL (cs);
  label_bb = label_to_block (ldecl);

  e = find_edge (info.switch_bb, label_bb);
  gcc_assert (e);

  if (CASE_LOW (cs) == NULL_TREE)
    {
      /* Default branch.  */
      info.default_prob = e->probability;
      info.default_count = e->count;
    }
  else
    {
      int i;
      info.other_count += e->count;
      for (i = 0; i < 2; i++)
	if (info.bit_test_bb[i] == label_bb)
	  break;
	else if (info.bit_test_bb[i] == NULL)
	  {
	    info.bit_test_bb[i] = label_bb;
	    info.bit_test_uniq++;
	    break;
	  }
      if (i == 2)
	info.bit_test_uniq = 3;
      if (CASE_HIGH (cs) != NULL_TREE
	  && ! tree_int_cst_equal (CASE_LOW (cs), CASE_HIGH (cs)))
	info.bit_test_count += 2;
      else
	info.bit_test_count++;
    }

  if (!label_bb)
    {
      info.reason = "  Bad case - cs BB  label is NULL\n";
      return false;
    }

  if (!single_pred_p (label_bb))
    {
      if (info.final_bb && info.final_bb != label_bb)
	{
	  info.reason = "  Bad case - a non-final BB has two predecessors\n";
	  return false; /* sth complex going on in this branch  */
	}

      following_bb = label_bb;
    }
  else
    {
      if (!empty_block_p (label_bb))
	{
	  info.reason = "  Bad case - a non-final BB not empty\n";
	  return false;
	}

      e = single_succ_edge (label_bb);
      following_bb = single_succ (label_bb);
    }

  if (!info.final_bb)
    info.final_bb = following_bb;
  else if (info.final_bb != following_bb)
    {
      info.reason = "  Bad case - different final BB\n";
      return false; /* the only successor is not common for all the branches */
    }

  return true;
}
/* Define the float.h constants for TYPE using NAME_PREFIX, FP_SUFFIX,
   and FP_CAST. */
static void
builtin_define_float_constants (const char *name_prefix, 
		                const char *fp_suffix, 
				const char *fp_cast, 
				tree type)
{
  /* Used to convert radix-based values to base 10 values in several cases.

     In the max_exp -> max_10_exp conversion for 128-bit IEEE, we need at
     least 6 significant digits for correct results.  Using the fraction
     formed by (log(2)*1e6)/(log(10)*1e6) overflows a 32-bit integer as an
     intermediate; perhaps someone can find a better approximation, in the
     mean time, I suspect using doubles won't harm the bootstrap here.  */

  const double log10_2 = .30102999566398119521;
  double log10_b;
  const struct real_format *fmt;

  char name[64], buf[128];
  int dig, min_10_exp, max_10_exp;
  int decimal_dig;

  fmt = REAL_MODE_FORMAT (TYPE_MODE (type));
  gcc_assert (fmt->b != 10);

  /* The radix of the exponent representation.  */
  if (type == float_type_node)
    builtin_define_with_int_value ("__FLT_RADIX__", fmt->b);
  log10_b = log10_2;

  /* The number of radix digits, p, in the floating-point significand.  */
  sprintf (name, "__%s_MANT_DIG__", name_prefix);
  builtin_define_with_int_value (name, fmt->p);

  /* The number of decimal digits, q, such that any floating-point number
     with q decimal digits can be rounded into a floating-point number with
     p radix b digits and back again without change to the q decimal digits,

	p log10 b			if b is a power of 10
	floor((p - 1) log10 b)		otherwise
  */
  dig = (fmt->p - 1) * log10_b;
  sprintf (name, "__%s_DIG__", name_prefix);
  builtin_define_with_int_value (name, dig);

  /* The minimum negative int x such that b**(x-1) is a normalized float.  */
  sprintf (name, "__%s_MIN_EXP__", name_prefix);
  sprintf (buf, "(%d)", fmt->emin);
  builtin_define_with_value (name, buf, 0);

  /* The minimum negative int x such that 10**x is a normalized float,

	  ceil (log10 (b ** (emin - 1)))
	= ceil (log10 (b) * (emin - 1))

     Recall that emin is negative, so the integer truncation calculates
     the ceiling, not the floor, in this case.  */
  min_10_exp = (fmt->emin - 1) * log10_b;
  sprintf (name, "__%s_MIN_10_EXP__", name_prefix);
  sprintf (buf, "(%d)", min_10_exp);
  builtin_define_with_value (name, buf, 0);

  /* The maximum int x such that b**(x-1) is a representable float.  */
  sprintf (name, "__%s_MAX_EXP__", name_prefix);
  builtin_define_with_int_value (name, fmt->emax);

  /* The maximum int x such that 10**x is in the range of representable
     finite floating-point numbers,

	  floor (log10((1 - b**-p) * b**emax))
	= floor (log10(1 - b**-p) + log10(b**emax))
	= floor (log10(1 - b**-p) + log10(b)*emax)

     The safest thing to do here is to just compute this number.  But since
     we don't link cc1 with libm, we cannot.  We could implement log10 here
     a series expansion, but that seems too much effort because:

     Note that the first term, for all extant p, is a number exceedingly close
     to zero, but slightly negative.  Note that the second term is an integer
     scaling an irrational number, and that because of the floor we are only
     interested in its integral portion.

     In order for the first term to have any effect on the integral portion
     of the second term, the second term has to be exceedingly close to an
     integer itself (e.g. 123.000000000001 or something).  Getting a result
     that close to an integer requires that the irrational multiplicand have
     a long series of zeros in its expansion, which doesn't occur in the
     first 20 digits or so of log10(b).

     Hand-waving aside, crunching all of the sets of constants above by hand
     does not yield a case for which the first term is significant, which
     in the end is all that matters.  */
  max_10_exp = fmt->emax * log10_b;
  sprintf (name, "__%s_MAX_10_EXP__", name_prefix);
  builtin_define_with_int_value (name, max_10_exp);

  /* The number of decimal digits, n, such that any floating-point number
     can be rounded to n decimal digits and back again without change to
     the value.

	p * log10(b)			if b is a power of 10
	ceil(1 + p * log10(b))		otherwise

     The only macro we care about is this number for the widest supported
     floating type, but we want this value for rendering constants below.  */
  {
    double d_decimal_dig = 1 + fmt->p * log10_b;
    decimal_dig = d_decimal_dig;
    if (decimal_dig < d_decimal_dig)
      decimal_dig++;
  }
  if (type == long_double_type_node)
    builtin_define_with_int_value ("__DECIMAL_DIG__", decimal_dig);

  /* Since, for the supported formats, B is always a power of 2, we
     construct the following numbers directly as a hexadecimal
     constants.  */
  get_max_float (fmt, buf, sizeof (buf));
  
  sprintf (name, "__%s_MAX__", name_prefix);
  builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);

  /* The minimum normalized positive floating-point number,
     b**(emin-1).  */
  sprintf (name, "__%s_MIN__", name_prefix);
  sprintf (buf, "0x1p%d", fmt->emin - 1);
  builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);

  /* The difference between 1 and the least value greater than 1 that is
     representable in the given floating point type, b**(1-p).  */
  sprintf (name, "__%s_EPSILON__", name_prefix);
  if (fmt->pnan < fmt->p)
    /* This is an IBM extended double format, so 1.0 + any double is
       representable precisely.  */
      sprintf (buf, "0x1p%d", fmt->emin - fmt->p);
    else
      sprintf (buf, "0x1p%d", 1 - fmt->p);
  builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);

  /* For C++ std::numeric_limits<T>::denorm_min.  The minimum denormalized
     positive floating-point number, b**(emin-p).  Zero for formats that
     don't support denormals.  */
  sprintf (name, "__%s_DENORM_MIN__", name_prefix);
  if (fmt->has_denorm)
    {
      sprintf (buf, "0x1p%d", fmt->emin - fmt->p);
      builtin_define_with_hex_fp_value (name, type, decimal_dig,
					buf, fp_suffix, fp_cast);
    }
  else
    {
      sprintf (buf, "0.0%s", fp_suffix);
      builtin_define_with_value (name, buf, 0);
    }

  sprintf (name, "__%s_HAS_DENORM__", name_prefix);
  builtin_define_with_value (name, fmt->has_denorm ? "1" : "0", 0);

  /* For C++ std::numeric_limits<T>::has_infinity.  */
  sprintf (name, "__%s_HAS_INFINITY__", name_prefix);
  builtin_define_with_int_value (name,
				 MODE_HAS_INFINITIES (TYPE_MODE (type)));
  /* For C++ std::numeric_limits<T>::has_quiet_NaN.  We do not have a
     predicate to distinguish a target that has both quiet and
     signalling NaNs from a target that has only quiet NaNs or only
     signalling NaNs, so we assume that a target that has any kind of
     NaN has quiet NaNs.  */
  sprintf (name, "__%s_HAS_QUIET_NAN__", name_prefix);
  builtin_define_with_int_value (name, MODE_HAS_NANS (TYPE_MODE (type)));
}
Пример #30
0
void
cp_genericize (tree fndecl)
{
  tree t;
  struct cp_genericize_data wtd;

  /* Fix up the types of parms passed by invisible reference.  */
  for (t = DECL_ARGUMENTS (fndecl); t; t = DECL_CHAIN (t))
    if (TREE_ADDRESSABLE (TREE_TYPE (t)))
      {
	/* If a function's arguments are copied to create a thunk,
	   then DECL_BY_REFERENCE will be set -- but the type of the
	   argument will be a pointer type, so we will never get
	   here.  */
	gcc_assert (!DECL_BY_REFERENCE (t));
	gcc_assert (DECL_ARG_TYPE (t) != TREE_TYPE (t));
	TREE_TYPE (t) = DECL_ARG_TYPE (t);
	DECL_BY_REFERENCE (t) = 1;
	TREE_ADDRESSABLE (t) = 0;
	relayout_decl (t);
      }

  /* Do the same for the return value.  */
  if (TREE_ADDRESSABLE (TREE_TYPE (DECL_RESULT (fndecl))))
    {
      t = DECL_RESULT (fndecl);
      TREE_TYPE (t) = build_reference_type (TREE_TYPE (t));
      DECL_BY_REFERENCE (t) = 1;
      TREE_ADDRESSABLE (t) = 0;
      relayout_decl (t);
      if (DECL_NAME (t))
	{
	  /* Adjust DECL_VALUE_EXPR of the original var.  */
	  tree outer = outer_curly_brace_block (current_function_decl);
	  tree var;

	  if (outer)
	    for (var = BLOCK_VARS (outer); var; var = DECL_CHAIN (var))
	      if (DECL_NAME (t) == DECL_NAME (var)
		  && DECL_HAS_VALUE_EXPR_P (var)
		  && DECL_VALUE_EXPR (var) == t)
		{
		  tree val = convert_from_reference (t);
		  SET_DECL_VALUE_EXPR (var, val);
		  break;
		}
	}
    }

  /* If we're a clone, the body is already GIMPLE.  */
  if (DECL_CLONED_FUNCTION_P (fndecl))
    return;

  /* We do want to see every occurrence of the parms, so we can't just use
     walk_tree's hash functionality.  */
  wtd.p_set = pointer_set_create ();
  wtd.bind_expr_stack = NULL;
  wtd.omp_ctx = NULL;
  cp_walk_tree (&DECL_SAVED_TREE (fndecl), cp_genericize_r, &wtd, NULL);
  pointer_set_destroy (wtd.p_set);
  VEC_free (tree, heap, wtd.bind_expr_stack);

  /* Do everything else.  */
  c_genericize (fndecl);

  gcc_assert (bc_label[bc_break] == NULL);
  gcc_assert (bc_label[bc_continue] == NULL);
}