Пример #1
0
bool
gimple_simplify (gimple stmt,
		 code_helper *rcode, tree *ops,
		 gimple_seq *seq,
		 tree (*valueize)(tree), tree (*top_valueize)(tree))
{
  switch (gimple_code (stmt))
    {
    case GIMPLE_ASSIGN:
      {
	enum tree_code code = gimple_assign_rhs_code (stmt);
	tree type = TREE_TYPE (gimple_assign_lhs (stmt));
	switch (gimple_assign_rhs_class (stmt))
	  {
	  case GIMPLE_SINGLE_RHS:
	    if (code == REALPART_EXPR
		|| code == IMAGPART_EXPR
		|| code == VIEW_CONVERT_EXPR)
	      {
		tree op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
		if (top_valueize && TREE_CODE (op0) == SSA_NAME)
		  {
		    tree tem = top_valueize (op0);
		    if (tem)
		      op0 = tem;
		  }
		*rcode = code;
		ops[0] = op0;
		return gimple_resimplify1 (seq, rcode, type, ops, valueize);
	      }
	    else if (code == BIT_FIELD_REF)
	      {
		tree rhs1 = gimple_assign_rhs1 (stmt);
		tree op0 = TREE_OPERAND (rhs1, 0);
		if (top_valueize && TREE_CODE (op0) == SSA_NAME)
		  {
		    tree tem = top_valueize (op0);
		    if (tem)
		      op0 = tem;
		  }
		*rcode = code;
		ops[0] = op0;
		ops[1] = TREE_OPERAND (rhs1, 1);
		ops[2] = TREE_OPERAND (rhs1, 2);
		return gimple_resimplify3 (seq, rcode, type, ops, valueize);
	      }
	    else if (code == SSA_NAME
		     && top_valueize)
	      {
		tree op0 = gimple_assign_rhs1 (stmt);
		tree valueized = top_valueize (op0);
		if (!valueized || op0 == valueized)
		  return false;
		ops[0] = valueized;
		*rcode = TREE_CODE (op0);
		return true;
	      }
	    break;
	  case GIMPLE_UNARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      if (top_valueize && TREE_CODE (rhs1) == SSA_NAME)
		{
		  tree tem = top_valueize (rhs1);
		  if (tem)
		    rhs1 = tem;
		}
	      *rcode = code;
	      ops[0] = rhs1;
	      return gimple_resimplify1 (seq, rcode, type, ops, valueize);
	    }
	  case GIMPLE_BINARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      if (top_valueize && TREE_CODE (rhs1) == SSA_NAME)
		{
		  tree tem = top_valueize (rhs1);
		  if (tem)
		    rhs1 = tem;
		}
	      tree rhs2 = gimple_assign_rhs2 (stmt);
	      if (top_valueize && TREE_CODE (rhs2) == SSA_NAME)
		{
		  tree tem = top_valueize (rhs2);
		  if (tem)
		    rhs2 = tem;
		}
	      *rcode = code;
	      ops[0] = rhs1;
	      ops[1] = rhs2;
	      return gimple_resimplify2 (seq, rcode, type, ops, valueize);
	    }
	  case GIMPLE_TERNARY_RHS:
	    {
	      tree rhs1 = gimple_assign_rhs1 (stmt);
	      if (top_valueize && TREE_CODE (rhs1) == SSA_NAME)
		{
		  tree tem = top_valueize (rhs1);
		  if (tem)
		    rhs1 = tem;
		}
	      tree rhs2 = gimple_assign_rhs2 (stmt);
	      if (top_valueize && TREE_CODE (rhs2) == SSA_NAME)
		{
		  tree tem = top_valueize (rhs2);
		  if (tem)
		    rhs2 = tem;
		}
	      tree rhs3 = gimple_assign_rhs3 (stmt);
	      if (top_valueize && TREE_CODE (rhs3) == SSA_NAME)
		{
		  tree tem = top_valueize (rhs3);
		  if (tem)
		    rhs3 = tem;
		}
	      *rcode = code;
	      ops[0] = rhs1;
	      ops[1] = rhs2;
	      ops[2] = rhs3;
	      return gimple_resimplify3 (seq, rcode, type, ops, valueize);
	    }
	  default:
	    gcc_unreachable ();
	  }
	break;
      }

    case GIMPLE_CALL:
      /* ???  This way we can't simplify calls with side-effects.  */
      if (gimple_call_lhs (stmt) != NULL_TREE)
	{
	  tree fn = gimple_call_fn (stmt);
	  /* ???  Internal function support missing.  */
	  if (!fn)
	    return false;
	  if (top_valueize && TREE_CODE (fn) == SSA_NAME)
	    {
	      tree tem = top_valueize (fn);
	      if (tem)
		fn = tem;
	    }
	  if (!fn
	      || TREE_CODE (fn) != ADDR_EXPR
	      || TREE_CODE (TREE_OPERAND (fn, 0)) != FUNCTION_DECL
	      || DECL_BUILT_IN_CLASS (TREE_OPERAND (fn, 0)) != BUILT_IN_NORMAL
	      || !builtin_decl_implicit (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)))
	      || !gimple_builtin_call_types_compatible_p (stmt,
							  TREE_OPERAND (fn, 0)))
	    return false;

	  tree decl = TREE_OPERAND (fn, 0);
	  tree type = TREE_TYPE (gimple_call_lhs (stmt));
	  switch (gimple_call_num_args (stmt))
	    {
	    case 1:
	      {
		tree arg1 = gimple_call_arg (stmt, 0);
		if (top_valueize && TREE_CODE (arg1) == SSA_NAME)
		  {
		    tree tem = top_valueize (arg1);
		    if (tem)
		      arg1 = tem;
		  }
		*rcode = DECL_FUNCTION_CODE (decl);
		ops[0] = arg1;
		return gimple_resimplify1 (seq, rcode, type, ops, valueize);
	      }
	    case 2:
	      {
		tree arg1 = gimple_call_arg (stmt, 0);
		if (top_valueize && TREE_CODE (arg1) == SSA_NAME)
		  {
		    tree tem = top_valueize (arg1);
		    if (tem)
		      arg1 = tem;
		  }
		tree arg2 = gimple_call_arg (stmt, 1);
		if (top_valueize && TREE_CODE (arg2) == SSA_NAME)
		  {
		    tree tem = top_valueize (arg2);
		    if (tem)
		      arg2 = tem;
		  }
		*rcode = DECL_FUNCTION_CODE (decl);
		ops[0] = arg1;
		ops[1] = arg2;
		return gimple_resimplify2 (seq, rcode, type, ops, valueize);
	      }
	    case 3:
	      {
		tree arg1 = gimple_call_arg (stmt, 0);
		if (top_valueize && TREE_CODE (arg1) == SSA_NAME)
		  {
		    tree tem = top_valueize (arg1);
		    if (tem)
		      arg1 = tem;
		  }
		tree arg2 = gimple_call_arg (stmt, 1);
		if (top_valueize && TREE_CODE (arg2) == SSA_NAME)
		  {
		    tree tem = top_valueize (arg2);
		    if (tem)
		      arg2 = tem;
		  }
		tree arg3 = gimple_call_arg (stmt, 2);
		if (top_valueize && TREE_CODE (arg3) == SSA_NAME)
		  {
		    tree tem = top_valueize (arg3);
		    if (tem)
		      arg3 = tem;
		  }
		*rcode = DECL_FUNCTION_CODE (decl);
		ops[0] = arg1;
		ops[1] = arg2;
		ops[2] = arg3;
		return gimple_resimplify3 (seq, rcode, type, ops, valueize);
	      }
	    default:
	      return false;
	    }
	}
      break;

    case GIMPLE_COND:
      {
	tree lhs = gimple_cond_lhs (stmt);
	if (top_valueize && TREE_CODE (lhs) == SSA_NAME)
	  {
	    tree tem = top_valueize (lhs);
	    if (tem)
	      lhs = tem;
	  }
	tree rhs = gimple_cond_rhs (stmt);
	if (top_valueize && TREE_CODE (rhs) == SSA_NAME)
	  {
	    tree tem = top_valueize (rhs);
	    if (tem)
	      rhs = tem;
	  }
	*rcode = gimple_cond_code (stmt);
	ops[0] = lhs;
	ops[1] = rhs;
        return gimple_resimplify2 (seq, rcode, boolean_type_node, ops, valueize);
      }

    default:
      break;
    }

  return false;
}
Пример #2
0
int
rtx_equal_p (rtx x, rtx y)
{
  int i;
  int j;
  enum rtx_code code;
  const char *fmt;

  if (x == y)
    return 1;
  if (x == 0 || y == 0)
    return 0;

  code = GET_CODE (x);
  /* Rtx's of different codes cannot be equal.  */
  if (code != GET_CODE (y))
    return 0;

  /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
     (REG:SI x) and (REG:HI x) are NOT equivalent.  */

  if (GET_MODE (x) != GET_MODE (y))
    return 0;

  /* Some RTL can be compared nonrecursively.  */
  switch (code)
    {
    case REG:
      return (REGNO (x) == REGNO (y));

    case LABEL_REF:
      return XEXP (x, 0) == XEXP (y, 0);

    case SYMBOL_REF:
      return XSTR (x, 0) == XSTR (y, 0);

    case SCRATCH:
    case CONST_DOUBLE:
    case CONST_INT:
      return 0;

    default:
      break;
    }

  /* Compare the elements.  If any pair of corresponding elements
     fail to match, return 0 for the whole thing.  */

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      switch (fmt[i])
	{
	case 'w':
	  if (XWINT (x, i) != XWINT (y, i))
	    return 0;
	  break;

	case 'n':
	case 'i':
	  if (XINT (x, i) != XINT (y, i))
	    return 0;
	  break;

	case 'V':
	case 'E':
	  /* Two vectors must have the same length.  */
	  if (XVECLEN (x, i) != XVECLEN (y, i))
	    return 0;

	  /* And the corresponding elements must match.  */
	  for (j = 0; j < XVECLEN (x, i); j++)
	    if (rtx_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
	      return 0;
	  break;

	case 'e':
	  if (rtx_equal_p (XEXP (x, i), XEXP (y, i)) == 0)
	    return 0;
	  break;

	case 'S':
	case 's':
	  if ((XSTR (x, i) || XSTR (y, i))
	      && (! XSTR (x, i) || ! XSTR (y, i)
		  || strcmp (XSTR (x, i), XSTR (y, i))))
	    return 0;
	  break;

	case 'u':
	  /* These are just backpointers, so they don't matter.  */
	  break;

	case '0':
	case 't':
	  break;

	  /* It is believed that rtx's at this level will never
	     contain anything but integers and other rtx's,
	     except for within LABEL_REFs and SYMBOL_REFs.  */
	default:
	  gcc_unreachable ();
	}
    }
  return 1;
}
Пример #3
0
static void
lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
{
  gimple stmt = gsi_stmt (*gsi);

  gimple_set_block (stmt, data->block);

  switch (gimple_code (stmt))
    {
    case GIMPLE_BIND:
      lower_gimple_bind (gsi, data);
      /* Propagate fallthruness.  */
      return;

    case GIMPLE_COND:
    case GIMPLE_GOTO:
    case GIMPLE_SWITCH:
      data->cannot_fallthru = true;
      gsi_next (gsi);
      return;

    case GIMPLE_RETURN:
      if (data->cannot_fallthru)
	{
	  gsi_remove (gsi, false);
	  /* Propagate fallthruness.  */
	}
      else
	{
	  lower_gimple_return (gsi, data);
	  data->cannot_fallthru = true;
	}
      return;

    case GIMPLE_TRY:
      if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
	lower_try_catch (gsi, data);
      else
	{
	  /* It must be a GIMPLE_TRY_FINALLY.  */
	  bool cannot_fallthru;
	  lower_sequence (gimple_try_eval_ptr (stmt), data);
	  cannot_fallthru = data->cannot_fallthru;

	  /* The finally clause is always executed after the try clause,
	     so if it does not fall through, then the try-finally will not
	     fall through.  Otherwise, if the try clause does not fall
	     through, then when the finally clause falls through it will
	     resume execution wherever the try clause was going.  So the
	     whole try-finally will only fall through if both the try
	     clause and the finally clause fall through.  */
	  data->cannot_fallthru = false;
	  lower_sequence (gimple_try_cleanup_ptr (stmt), data);
	  data->cannot_fallthru |= cannot_fallthru;
	  gsi_next (gsi);
	}
      return;

    case GIMPLE_EH_ELSE:
      lower_sequence (gimple_eh_else_n_body_ptr (stmt), data);
      lower_sequence (gimple_eh_else_e_body_ptr (stmt), data);
      break;

    case GIMPLE_NOP:
    case GIMPLE_ASM:
    case GIMPLE_ASSIGN:
    case GIMPLE_PREDICT:
    case GIMPLE_LABEL:
    case GIMPLE_EH_MUST_NOT_THROW:
    case GIMPLE_OMP_FOR:
    case GIMPLE_OMP_SECTIONS:
    case GIMPLE_OMP_SECTIONS_SWITCH:
    case GIMPLE_OMP_SECTION:
    case GIMPLE_OMP_SINGLE:
    case GIMPLE_OMP_MASTER:
    case GIMPLE_OMP_ORDERED:
    case GIMPLE_OMP_CRITICAL:
    case GIMPLE_OMP_RETURN:
    case GIMPLE_OMP_ATOMIC_LOAD:
    case GIMPLE_OMP_ATOMIC_STORE:
    case GIMPLE_OMP_CONTINUE:
      break;

    case GIMPLE_CALL:
      {
	tree decl = gimple_call_fndecl (stmt);
	unsigned i;

	for (i = 0; i < gimple_call_num_args (stmt); i++)
	  {
	    tree arg = gimple_call_arg (stmt, i);
	    if (EXPR_P (arg))
	      TREE_SET_BLOCK (arg, data->block);
	  }

	if (decl
	    && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
	    && DECL_FUNCTION_CODE (decl) == BUILT_IN_SETJMP)
	  {
	    lower_builtin_setjmp (gsi);
	    data->cannot_fallthru = false;
	    data->calls_builtin_setjmp = true;
	    return;
	  }

	if (decl && (flags_from_decl_or_type (decl) & ECF_NORETURN))
	  {
	    data->cannot_fallthru = true;
	    gsi_next (gsi);
	    return;
	  }
      }
      break;

    case GIMPLE_OMP_PARALLEL:
    case GIMPLE_OMP_TASK:
      data->cannot_fallthru = false;
      lower_omp_directive (gsi, data);
      data->cannot_fallthru = false;
      return;

    case GIMPLE_TRANSACTION:
      lower_sequence (gimple_transaction_body_ptr (stmt), data);
      break;

    default:
      gcc_unreachable ();
    }

  data->cannot_fallthru = false;
  gsi_next (gsi);
}
Пример #4
0
static tree
expand_sec_reduce_builtin (tree an_builtin_fn, tree *new_var)
{
  tree new_var_type = NULL_TREE, func_parm, new_yes_expr, new_no_expr;
  tree array_ind_value = NULL_TREE, new_no_ind, new_yes_ind, new_no_list;
  tree new_yes_list, new_cond_expr, new_expr = NULL_TREE; 
  vec<tree, va_gc> *array_list = NULL, *array_operand = NULL;
  size_t list_size = 0, rank = 0, ii = 0;
  tree  body, an_init, loop_with_init = alloc_stmt_list ();
  tree array_op0, comp_node = NULL_TREE;
  tree call_fn = NULL_TREE, identity_value = NULL_TREE;
  tree init = NULL_TREE, cond_init = NULL_TREE;
  enum tree_code code = NOP_EXPR;
  location_t location = UNKNOWN_LOCATION;
  vec<vec<an_parts> > an_info = vNULL;
  auto_vec<an_loop_parts> an_loop_info;
  enum built_in_function an_type =
    is_cilkplus_reduce_builtin (CALL_EXPR_FN (an_builtin_fn));
  vec <tree, va_gc> *func_args;
  
  if (an_type == BUILT_IN_NONE)
    return NULL_TREE;

  if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE
      && an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING)
    func_parm = CALL_EXPR_ARG (an_builtin_fn, 0);
  else
    {
      call_fn = CALL_EXPR_ARG (an_builtin_fn, 2);

      /* We need to do this because we are "faking" the builtin function types,
	 so the compiler does a bunch of typecasts and this will get rid of
	 all that!  */
      STRIP_NOPS (call_fn);
      if (TREE_CODE (call_fn) != OVERLOAD
	  && TREE_CODE (call_fn) != FUNCTION_DECL)
	call_fn = TREE_OPERAND (call_fn, 0);
      identity_value = CALL_EXPR_ARG (an_builtin_fn, 0);
      func_parm = CALL_EXPR_ARG (an_builtin_fn, 1);
      STRIP_NOPS (identity_value);
    }
  STRIP_NOPS (func_parm);
  
  location = EXPR_LOCATION (an_builtin_fn);
  
  /* Note about using find_rank (): If find_rank returns false, then it must
     have already reported an error, thus we just return an error_mark_node
     without any doing any error emission.  */  
  if (!find_rank (location, an_builtin_fn, an_builtin_fn, true, &rank))
      return error_mark_node;
  if (rank == 0)
    {
      error_at (location, "Invalid builtin arguments");
      return error_mark_node;
    }
  else if (rank > 1 
	   && (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND
	       || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND))
    { 
      error_at (location, "__sec_reduce_min_ind or __sec_reduce_max_ind cannot "
		"have arrays with dimension greater than 1");
      return error_mark_node;
    }
  
  extract_array_notation_exprs (func_parm, true, &array_list);
  list_size = vec_safe_length (array_list);
  switch (an_type)
    {
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN:
      new_var_type = TREE_TYPE ((*array_list)[0]);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO:
      new_var_type = boolean_type_node;
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND:
      new_var_type = size_type_node;
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE:
      if (call_fn && identity_value)
	new_var_type = TREE_TYPE ((*array_list)[0]);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING:
      new_var_type = NULL_TREE;
      break;
    default:
      gcc_unreachable ();
    }
    
  if (new_var_type && TREE_CODE (new_var_type) == ARRAY_TYPE)
    new_var_type = TREE_TYPE (new_var_type);
  an_loop_info.safe_grow_cleared (rank);

  an_init = push_stmt_list ();

  /* Assign the array notation components to variable so that they can satisfy
     the exec-once rule.  */
  for (ii = 0; ii < list_size; ii++)
    if (TREE_CODE ((*array_list)[ii]) == ARRAY_NOTATION_REF)
      {
	tree anode = (*array_list)[ii];
	make_triplet_val_inv (&ARRAY_NOTATION_START (anode));
	make_triplet_val_inv (&ARRAY_NOTATION_LENGTH (anode));
	make_triplet_val_inv (&ARRAY_NOTATION_STRIDE (anode));
      }
  cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info);
  for (ii = 0; ii < rank; ii++)
    {
      tree typ = ptrdiff_type_node;

      /* In this place, we are using get_temp_regvar instead of 
	 create_temporary_var if an_type is SEC_REDUCE_MAX/MIN_IND because
	 the array_ind_value depends on this value being initalized to 0.  */
      if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND
	  || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND) 
	an_loop_info[ii].var = get_temp_regvar (typ, build_zero_cst (typ));
      else
	{
	  an_loop_info[ii].var = create_temporary_var (typ);
	  add_decl_expr (an_loop_info[ii].var);
	}
      an_loop_info[ii].ind_init = 
	build_x_modify_expr (location, an_loop_info[ii].var, INIT_EXPR,
			     build_zero_cst (typ), tf_warning_or_error);
    }
  array_operand = create_array_refs (location, an_info, an_loop_info,
				      list_size, rank);
  replace_array_notations (&func_parm, true, array_list, array_operand);
  
  if (!TREE_TYPE (func_parm))      
    TREE_TYPE (func_parm) = TREE_TYPE ((*array_list)[0]);
  
  create_cmp_incr (location, &an_loop_info, rank, an_info, tf_warning_or_error);
  if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND
      || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND) 
    array_ind_value = get_temp_regvar (TREE_TYPE (func_parm), func_parm);

  array_op0 = (*array_operand)[0];
  if (INDIRECT_REF_P (array_op0))
    array_op0 = TREE_OPERAND (array_op0, 0);
  switch (an_type)
    {
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD:
      code = PLUS_EXPR;
      init = build_zero_cst (new_var_type);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL:
      code = MULT_EXPR;
      init = build_one_cst (new_var_type);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO:
      code = ((an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO) ? EQ_EXPR
	: NE_EXPR);
      init = build_zero_cst (new_var_type);
      cond_init = build_one_cst (new_var_type);
      comp_node = build_zero_cst (TREE_TYPE (func_parm));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO:
      code = ((an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO) ? NE_EXPR
	: EQ_EXPR);
      init = build_one_cst (new_var_type);
      cond_init = build_zero_cst (new_var_type);
      comp_node = build_zero_cst (TREE_TYPE (func_parm));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX:
      code = MAX_EXPR;
      init = (TYPE_MIN_VALUE (new_var_type) ? TYPE_MIN_VALUE (new_var_type)
	: func_parm);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN:
      code = MIN_EXPR;
      init = (TYPE_MAX_VALUE (new_var_type) ? TYPE_MAX_VALUE (new_var_type)
	: func_parm);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND:
      code = (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND ? LE_EXPR
	: GE_EXPR);
      init = an_loop_info[0].var;
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE:
      init = identity_value;
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING:
      init = NULL_TREE;
      break;
    default:
      gcc_unreachable ();
    }

  if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING)
    *new_var = get_temp_regvar (new_var_type, init);
  else
    *new_var = NULL_TREE;

  switch (an_type)
    {
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL:      
      new_expr = build_x_modify_expr (location, *new_var, code, func_parm,
				      tf_warning_or_error);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO:
      /* In all these cases, assume the false case is true and as soon as
	 we find a true case,  set the true flag on and latch it in.  */
      new_yes_expr = build_x_modify_expr (location, *new_var, NOP_EXPR,
					  cond_init, tf_warning_or_error);
      new_no_expr = build_x_modify_expr (location, *new_var, NOP_EXPR,
					 *new_var, tf_warning_or_error);
      new_cond_expr = build_x_binary_op
	(location, code, func_parm, TREE_CODE (func_parm), comp_node,
	 TREE_CODE (comp_node), NULL, tf_warning_or_error);
      new_expr = build_x_conditional_expr (location, new_cond_expr,
					   new_yes_expr, new_no_expr,
					   tf_warning_or_error);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN:
      new_cond_expr = build_x_binary_op
	(location, code, *new_var, TREE_CODE (*new_var), func_parm,
	 TREE_CODE (func_parm), NULL, tf_warning_or_error);
      new_expr = build_x_modify_expr (location, *new_var, NOP_EXPR, func_parm,
				      tf_warning_or_error);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND:
      new_yes_expr = build_x_modify_expr (location, array_ind_value, NOP_EXPR,
					  func_parm, tf_warning_or_error);
      new_no_expr = build_x_modify_expr (location, array_ind_value, NOP_EXPR,
					 array_ind_value, tf_warning_or_error);
      if (list_size > 1)
	new_yes_ind = build_x_modify_expr (location, *new_var, NOP_EXPR,
					   an_loop_info[0].var,
					   tf_warning_or_error);
      else
	new_yes_ind = build_x_modify_expr (location, *new_var, NOP_EXPR,
					   TREE_OPERAND (array_op0, 1),
					   tf_warning_or_error);
      new_no_ind = build_x_modify_expr (location, *new_var, NOP_EXPR, *new_var,
					tf_warning_or_error);
      new_yes_list = alloc_stmt_list ();
      append_to_statement_list (new_yes_ind, &new_yes_list);
      append_to_statement_list (new_yes_expr, &new_yes_list);

      new_no_list = alloc_stmt_list ();
      append_to_statement_list (new_no_ind, &new_no_list);
      append_to_statement_list (new_no_expr, &new_no_list);

      new_cond_expr = build_x_binary_op (location, code, array_ind_value,
					 TREE_CODE (array_ind_value), func_parm,
					 TREE_CODE (func_parm), NULL,
					 tf_warning_or_error);
      new_expr = build_x_conditional_expr (location, new_cond_expr,
					   new_yes_list, new_no_list,
					   tf_warning_or_error);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING:
      func_args = make_tree_vector ();
      if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE)
	vec_safe_push (func_args, *new_var);
      else
	vec_safe_push (func_args, identity_value);
      vec_safe_push (func_args, func_parm);

      new_expr = finish_call_expr (call_fn, &func_args, false, true,
				   tf_warning_or_error);
      if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE)
	new_expr = build_x_modify_expr (location, *new_var, NOP_EXPR, new_expr,
					tf_warning_or_error);
      release_tree_vector (func_args);
      break;
    default:
      gcc_unreachable ();
    }
  an_init = pop_stmt_list (an_init);
  append_to_statement_list (an_init, &loop_with_init);
  body = new_expr;

  for (ii = 0; ii < rank; ii++)
    {
      tree new_loop = push_stmt_list ();
      create_an_loop (an_loop_info[ii].ind_init, an_loop_info[ii].cmp,
		      an_loop_info[ii].incr, body);
      body = pop_stmt_list (new_loop);
    }
  append_to_statement_list (body, &loop_with_init);

  release_vec_vec (an_info);

  return loop_with_init;
}
Пример #5
0
static const char *
gen_type (const char *ret_val, tree t, formals_style style)
{
  tree chain_p;

  /* If there is a typedef name for this type, use it.  */
  if (TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL)
    data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
  else
    {
      switch (TREE_CODE (t))
	{
	case POINTER_TYPE:
	  if (TYPE_READONLY (t))
	    ret_val = concat ("const ", ret_val, NULL);
	  if (TYPE_VOLATILE (t))
	    ret_val = concat ("volatile ", ret_val, NULL);

	  ret_val = concat ("*", ret_val, NULL);

	  if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
	    ret_val = concat ("(", ret_val, ")", NULL);

	  ret_val = gen_type (ret_val, TREE_TYPE (t), style);

	  return ret_val;

	case ARRAY_TYPE:
	  if (!COMPLETE_TYPE_P (t) || TREE_CODE (TYPE_SIZE (t)) != INTEGER_CST)
	    ret_val = gen_type (concat (ret_val, "[]", NULL),
				TREE_TYPE (t), style);
	  else if (int_size_in_bytes (t) == 0)
	    ret_val = gen_type (concat (ret_val, "[0]", NULL),
				TREE_TYPE (t), style);
	  else
	    {
	      int size = (int_size_in_bytes (t) / int_size_in_bytes (TREE_TYPE (t)));
	      char buff[10];
	      sprintf (buff, "[%d]", size);
	      ret_val = gen_type (concat (ret_val, buff, NULL),
				  TREE_TYPE (t), style);
	    }
	  break;

	case FUNCTION_TYPE:
	  ret_val = gen_type (concat (ret_val,
				      gen_formal_list_for_type (t, style),
				      NULL),
			      TREE_TYPE (t), style);
	  break;

	case IDENTIFIER_NODE:
	  data_type = IDENTIFIER_POINTER (t);
	  break;

	/* The following three cases are complicated by the fact that a
	   user may do something really stupid, like creating a brand new
	   "anonymous" type specification in a formal argument list (or as
	   part of a function return type specification).  For example:

		int f (enum { red, green, blue } color);

	   In such cases, we have no name that we can put into the prototype
	   to represent the (anonymous) type.  Thus, we have to generate the
	   whole darn type specification.  Yuck!  */

	case RECORD_TYPE:
	  if (TYPE_NAME (t))
	    data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
	  else
	    {
	      data_type = "";
	      chain_p = TYPE_FIELDS (t);
	      while (chain_p)
		{
		  data_type = concat (data_type, gen_decl (chain_p, 0, ansi),
				      NULL);
		  chain_p = TREE_CHAIN (chain_p);
		  data_type = concat (data_type, "; ", NULL);
		}
	      data_type = concat ("{ ", data_type, "}", NULL);
	    }
	  data_type = concat ("struct ", data_type, NULL);
	  break;

	case UNION_TYPE:
	  if (TYPE_NAME (t))
	    data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
	  else
	    {
	      data_type = "";
	      chain_p = TYPE_FIELDS (t);
	      while (chain_p)
		{
		  data_type = concat (data_type, gen_decl (chain_p, 0, ansi),
				      NULL);
		  chain_p = TREE_CHAIN (chain_p);
		  data_type = concat (data_type, "; ", NULL);
		}
	      data_type = concat ("{ ", data_type, "}", NULL);
	    }
	  data_type = concat ("union ", data_type, NULL);
	  break;

	case ENUMERAL_TYPE:
	  if (TYPE_NAME (t))
	    data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
	  else
	    {
	      data_type = "";
	      chain_p = TYPE_VALUES (t);
	      while (chain_p)
		{
		  data_type = concat (data_type,
			IDENTIFIER_POINTER (TREE_PURPOSE (chain_p)), NULL);
		  chain_p = TREE_CHAIN (chain_p);
		  if (chain_p)
		    data_type = concat (data_type, ", ", NULL);
		}
	      data_type = concat ("{ ", data_type, " }", NULL);
	    }
	  data_type = concat ("enum ", data_type, NULL);
	  break;

	case TYPE_DECL:
	  data_type = IDENTIFIER_POINTER (DECL_NAME (t));
	  break;

	case INTEGER_TYPE:
	  data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
	  /* Normally, `unsigned' is part of the deal.  Not so if it comes
	     with a type qualifier.  */
	  if (TYPE_UNSIGNED (t) && TYPE_QUALS (t))
	    data_type = concat ("unsigned ", data_type, NULL);
	  break;

	case REAL_TYPE:
	  data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
	  break;

	case VOID_TYPE:
	  data_type = "void";
	  break;

	case ERROR_MARK:
	  data_type = "[ERROR]";
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  if (TYPE_READONLY (t))
    ret_val = concat ("const ", ret_val, NULL);
  if (TYPE_VOLATILE (t))
    ret_val = concat ("volatile ", ret_val, NULL);
  if (TYPE_RESTRICT (t))
    ret_val = concat ("restrict ", ret_val, NULL);
  return ret_val;
}
Пример #6
0
void
do_jump (tree exp, rtx if_false_label, rtx if_true_label, int prob)
{
  enum tree_code code = TREE_CODE (exp);
  rtx temp;
  int i;
  tree type;
  enum machine_mode mode;
  rtx drop_through_label = 0;

  switch (code)
    {
    case ERROR_MARK:
      break;

    case INTEGER_CST:
      temp = integer_zerop (exp) ? if_false_label : if_true_label;
      if (temp)
        emit_jump (temp);
      break;

#if 0
      /* This is not true with #pragma weak  */
    case ADDR_EXPR:
      /* The address of something can never be zero.  */
      if (if_true_label)
        emit_jump (if_true_label);
      break;
#endif

    case NOP_EXPR:
      if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF
          || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF)
        goto normal;
    case CONVERT_EXPR:
      /* If we are narrowing the operand, we have to do the compare in the
         narrower mode.  */
      if ((TYPE_PRECISION (TREE_TYPE (exp))
           < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
        goto normal;
    case NON_LVALUE_EXPR:
    case ABS_EXPR:
    case NEGATE_EXPR:
    case LROTATE_EXPR:
    case RROTATE_EXPR:
      /* These cannot change zero->nonzero or vice versa.  */
      do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label, prob);
      break;

    case TRUTH_NOT_EXPR:
      do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label,
	       inv (prob));
      break;

    case COND_EXPR:
      {
	rtx label1 = gen_label_rtx ();
	if (!if_true_label || !if_false_label)
	  {
	    drop_through_label = gen_label_rtx ();
	    if (!if_true_label)
	      if_true_label = drop_through_label;
	    if (!if_false_label)
	      if_false_label = drop_through_label;
	  }

        do_pending_stack_adjust ();
	do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX, -1);
	do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label, prob);
        emit_label (label1);
	do_jump (TREE_OPERAND (exp, 2), if_false_label, if_true_label, prob);
	break;
      }

    case COMPOUND_EXPR:
      /* Lowered by gimplify.c.  */
      gcc_unreachable ();

    case MINUS_EXPR:
      /* Nonzero iff operands of minus differ.  */
      code = NE_EXPR;

      /* FALLTHRU */
    case EQ_EXPR:
    case NE_EXPR:
    case LT_EXPR:
    case LE_EXPR:
    case GT_EXPR:
    case GE_EXPR:
    case ORDERED_EXPR:
    case UNORDERED_EXPR:
    case UNLT_EXPR:
    case UNLE_EXPR:
    case UNGT_EXPR:
    case UNGE_EXPR:
    case UNEQ_EXPR:
    case LTGT_EXPR:
    case TRUTH_ANDIF_EXPR:
    case TRUTH_ORIF_EXPR:
    other_code:
      do_jump_1 (code, TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1),
		 if_false_label, if_true_label, prob);
      break;

    case BIT_AND_EXPR:
      /* fold_single_bit_test() converts (X & (1 << C)) into (X >> C) & 1.
	 See if the former is preferred for jump tests and restore it
	 if so.  */
      if (integer_onep (TREE_OPERAND (exp, 1)))
	{
	  tree exp0 = TREE_OPERAND (exp, 0);
	  rtx set_label, clr_label;
	  int setclr_prob = prob;

	  /* Strip narrowing integral type conversions.  */
	  while (CONVERT_EXPR_P (exp0)
		 && TREE_OPERAND (exp0, 0) != error_mark_node
		 && TYPE_PRECISION (TREE_TYPE (exp0))
		    <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp0, 0))))
	    exp0 = TREE_OPERAND (exp0, 0);

	  /* "exp0 ^ 1" inverts the sense of the single bit test.  */
	  if (TREE_CODE (exp0) == BIT_XOR_EXPR
	      && integer_onep (TREE_OPERAND (exp0, 1)))
	    {
	      exp0 = TREE_OPERAND (exp0, 0);
	      clr_label = if_true_label;
	      set_label = if_false_label;
	      setclr_prob = inv (prob);
	    }
	  else
	    {
	      clr_label = if_false_label;
	      set_label = if_true_label;
	    }

	  if (TREE_CODE (exp0) == RSHIFT_EXPR)
	    {
	      tree arg = TREE_OPERAND (exp0, 0);
	      tree shift = TREE_OPERAND (exp0, 1);
	      tree argtype = TREE_TYPE (arg);
	      if (TREE_CODE (shift) == INTEGER_CST
		  && compare_tree_int (shift, 0) >= 0
		  && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0
		  && prefer_and_bit_test (TYPE_MODE (argtype),
					  TREE_INT_CST_LOW (shift)))
		{
		  unsigned HOST_WIDE_INT mask
		    = (unsigned HOST_WIDE_INT) 1 << TREE_INT_CST_LOW (shift);
		  do_jump (build2 (BIT_AND_EXPR, argtype, arg,
				   build_int_cstu (argtype, mask)),
			   clr_label, set_label, setclr_prob);
		  break;
		}
	    }
	}

      /* If we are AND'ing with a small constant, do this comparison in the
         smallest type that fits.  If the machine doesn't have comparisons
         that small, it will be converted back to the wider comparison.
         This helps if we are testing the sign bit of a narrower object.
         combine can't do this for us because it can't know whether a
         ZERO_EXTRACT or a compare in a smaller mode exists, but we do.  */

      if (! SLOW_BYTE_ACCESS
          && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
          && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
          && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
          && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
          && (type = lang_hooks.types.type_for_mode (mode, 1)) != 0
          && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
          && have_insn_for (COMPARE, TYPE_MODE (type)))
        {
	  do_jump (fold_convert (type, exp), if_false_label, if_true_label,
		   prob);
          break;
        }

      if (TYPE_PRECISION (TREE_TYPE (exp)) > 1
	  || TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
	goto normal;

      /* Boolean comparisons can be compiled as TRUTH_AND_EXPR.  */

    case TRUTH_AND_EXPR:
      /* High branch cost, expand as the bitwise AND of the conditions.
	 Do the same if the RHS has side effects, because we're effectively
	 turning a TRUTH_AND_EXPR into a TRUTH_ANDIF_EXPR.  */
      if (BRANCH_COST (optimize_insn_for_speed_p (),
		       false) >= 4
	  || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
	goto normal;
      code = TRUTH_ANDIF_EXPR;
      goto other_code;

    case BIT_IOR_EXPR:
    case TRUTH_OR_EXPR:
      /* High branch cost, expand as the bitwise OR of the conditions.
	 Do the same if the RHS has side effects, because we're effectively
	 turning a TRUTH_OR_EXPR into a TRUTH_ORIF_EXPR.  */
      if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 4
	  || TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1)))
	goto normal;
      code = TRUTH_ORIF_EXPR;
      goto other_code;

      /* Fall through and generate the normal code.  */
    default:
    normal:
      temp = expand_normal (exp);
      do_pending_stack_adjust ();
      /* The RTL optimizers prefer comparisons against pseudos.  */
      if (GET_CODE (temp) == SUBREG)
	{
	  /* Compare promoted variables in their promoted mode.  */
	  if (SUBREG_PROMOTED_VAR_P (temp)
	      && REG_P (XEXP (temp, 0)))
	    temp = XEXP (temp, 0);
	  else
	    temp = copy_to_reg (temp);
	}
      do_compare_rtx_and_jump (temp, CONST0_RTX (GET_MODE (temp)),
			       NE, TYPE_UNSIGNED (TREE_TYPE (exp)),
			       GET_MODE (temp), NULL_RTX,
			       if_false_label, if_true_label, prob);
    }

  if (drop_through_label)
    {
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}
Пример #7
0
void
do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp,
			 enum machine_mode mode, rtx size, rtx if_false_label,
			 rtx if_true_label, int prob)
{
  rtx tem;
  rtx dummy_label = NULL_RTX;

  /* Reverse the comparison if that is safe and we want to jump if it is
     false.  Also convert to the reverse comparison if the target can
     implement it.  */
  if ((! if_true_label
       || ! can_compare_p (code, mode, ccp_jump))
      && (! FLOAT_MODE_P (mode)
	  || code == ORDERED || code == UNORDERED
	  || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
	  || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
    {
      enum rtx_code rcode;
      if (FLOAT_MODE_P (mode))
        rcode = reverse_condition_maybe_unordered (code);
      else
        rcode = reverse_condition (code);

      /* Canonicalize to UNORDERED for the libcall.  */
      if (can_compare_p (rcode, mode, ccp_jump)
	  || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
	{
          tem = if_true_label;
          if_true_label = if_false_label;
          if_false_label = tem;
	  code = rcode;
	  prob = inv (prob);
	}
    }

  /* If one operand is constant, make it the second one.  Only do this
     if the other operand is not constant as well.  */

  if (swap_commutative_operands_p (op0, op1))
    {
      tem = op0;
      op0 = op1;
      op1 = tem;
      code = swap_condition (code);
    }

  do_pending_stack_adjust ();

  code = unsignedp ? unsigned_condition (code) : code;
  if (0 != (tem = simplify_relational_operation (code, mode, VOIDmode,
						 op0, op1)))
    {
      if (CONSTANT_P (tem))
	{
	  rtx label = (tem == const0_rtx || tem == CONST0_RTX (mode))
		      ? if_false_label : if_true_label;
	  if (label)
	    emit_jump (label);
	  return;
	}

      code = GET_CODE (tem);
      mode = GET_MODE (tem);
      op0 = XEXP (tem, 0);
      op1 = XEXP (tem, 1);
      unsignedp = (code == GTU || code == LTU || code == GEU || code == LEU);
    }

  if (! if_true_label)
    dummy_label = if_true_label = gen_label_rtx ();

  if (GET_MODE_CLASS (mode) == MODE_INT
      && ! can_compare_p (code, mode, ccp_jump))
    {
      switch (code)
	{
	case LTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GTU:
	  do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GEU:
	  do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case LT:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_false_label, if_true_label, prob);
	  break;

	case LE:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case GT:
	  do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
					if_false_label, if_true_label, prob);
	  break;

	case GE:
	  do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
					if_true_label, if_false_label,
					inv (prob));
	  break;

	case EQ:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
					 if_true_label, prob);
	  break;

	case NE:
	  do_jump_by_parts_equality_rtx (mode, op0, op1, if_true_label,
					 if_false_label, inv (prob));
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  else
    {
      if (SCALAR_FLOAT_MODE_P (mode)
	  && ! can_compare_p (code, mode, ccp_jump)
	  && can_compare_p (swap_condition (code), mode, ccp_jump))
	{
	  rtx tmp;
	  code = swap_condition (code);
	  tmp = op0;
	  op0 = op1;
	  op1 = tmp;
	}
      else if (SCALAR_FLOAT_MODE_P (mode)
	       && ! can_compare_p (code, mode, ccp_jump)
	       /* Never split ORDERED and UNORDERED.
		  These must be implemented.  */
	       && (code != ORDERED && code != UNORDERED)
               /* Split a floating-point comparison if
		  we can jump on other conditions...  */
	       && (have_insn_for (COMPARE, mode)
	           /* ... or if there is no libcall for it.  */
	           || code_to_optab (code) == unknown_optab))
        {
	  enum rtx_code first_code;
	  bool and_them = split_comparison (code, mode, &first_code, &code);

	  /* If there are no NaNs, the first comparison should always fall
	     through.  */
	  if (!HONOR_NANS (mode))
	    gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));

	  else
	    {
	      int first_prob = prob;
	      if (first_code == UNORDERED)
		first_prob = REG_BR_PROB_BASE / 100;
	      else if (first_code == ORDERED)
		first_prob = REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100;
	      if (and_them)
		{
		  rtx dest_label;
		  /* If we only jump if true, just bypass the second jump.  */
		  if (! if_false_label)
		    {
		      if (! dummy_label)
		        dummy_label = gen_label_rtx ();
		      dest_label = dummy_label;
		    }
		  else
		    dest_label = if_false_label;
                  do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					   size, dest_label, NULL_RTX,
					   first_prob);
		}
              else
                do_compare_rtx_and_jump (op0, op1, first_code, unsignedp, mode,
					 size, NULL_RTX, if_true_label,
					 first_prob);
	    }
	}

      emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp,
			       if_true_label, prob);
    }

  if (if_false_label)
    emit_jump (if_false_label);
  if (dummy_label)
    emit_label (dummy_label);
}
static tree
record_reference (tree *tp, int *walk_subtrees, void *data)
{
  tree t = *tp;
  tree decl;
  struct record_reference_ctx *ctx = (struct record_reference_ctx *)data;

  t = canonicalize_constructor_val (t);
  if (!t)
    t = *tp;
  else if (t != *tp)
    *tp = t;

  switch (TREE_CODE (t))
    {
    case VAR_DECL:
    case FUNCTION_DECL:
      gcc_unreachable ();
      break;

    case FDESC_EXPR:
    case ADDR_EXPR:
      /* Record dereferences to the functions.  This makes the
	 functions reachable unconditionally.  */
      decl = get_base_var (*tp);
      if (TREE_CODE (decl) == FUNCTION_DECL)
	{
	  struct cgraph_node *node = cgraph_get_create_node (decl);
	  if (!ctx->only_vars)
	    cgraph_mark_address_taken_node (node);
	  ipa_record_reference (NULL, ctx->varpool_node, node, NULL,
			        IPA_REF_ADDR, NULL);
	}

      if (TREE_CODE (decl) == VAR_DECL)
	{
	  struct varpool_node *vnode = varpool_node (decl);
	  if (lang_hooks.callgraph.analyze_expr)
	    lang_hooks.callgraph.analyze_expr (&decl, walk_subtrees);
	  varpool_mark_needed_node (vnode);
	  ipa_record_reference (NULL, ctx->varpool_node,
				NULL, vnode,
				IPA_REF_ADDR, NULL);
	}
      *walk_subtrees = 0;
      break;

    default:
      /* Save some cycles by not walking types and declaration as we
	 won't find anything useful there anyway.  */
      if (IS_TYPE_OR_DECL_P (*tp))
	{
	  *walk_subtrees = 0;
	  break;
	}

      if ((unsigned int) TREE_CODE (t) >= LAST_AND_UNUSED_TREE_CODE)
	return lang_hooks.callgraph.analyze_expr (tp, walk_subtrees);
      break;
    }

  return NULL_TREE;
}
/*
 * process java-specific compiler command-line options
 * return 0, but do not complain if the option is not recognized.
 */
static int
java_handle_option (size_t scode, const char *arg, int value)
{
  enum opt_code code = (enum opt_code) scode;

  switch (code)
    {
    case OPT_I:
      jcf_path_include_arg (arg);
      break;

    case OPT_M:
      jcf_dependency_init (1);
      dependency_tracking |= DEPEND_ENABLE;
      break;

    case OPT_MD_:
      jcf_dependency_init (1);
      dependency_tracking |= DEPEND_SET_FILE | DEPEND_ENABLE;
      break;

    case OPT_MF:
      jcf_dependency_set_dep_file (arg);
      dependency_tracking |= DEPEND_FILE_ALREADY_SET;
      break;

    case OPT_MM:
      jcf_dependency_init (0);
      dependency_tracking |= DEPEND_ENABLE;
      break;

    case OPT_MMD_:
      jcf_dependency_init (0);
      dependency_tracking |= DEPEND_SET_FILE | DEPEND_ENABLE;
      break;

    case OPT_MP:
      jcf_dependency_print_dummies ();
      break;

    case OPT_MT:
      jcf_dependency_set_target (arg);
      dependency_tracking |= DEPEND_TARGET_SET;
      break;

    case OPT_Wall:
      flag_wall = value;
      /* When -Wall given, enable -Wunused.  We do this because the C
	 compiler does it, and people expect it.  */
      warn_unused = value;
      break;

    case OPT_fenable_assertions_:
      add_enable_assert (arg, value);
      break;

    case OPT_fenable_assertions:
      add_enable_assert ("", value);
      break;

    case OPT_fdisable_assertions_:
      add_enable_assert (arg, !value);
      break;

    case OPT_fdisable_assertions:
      add_enable_assert ("", !value);
      break;

    case OPT_fassume_compiled_:
      add_assume_compiled (arg, !value);
      break;

    case OPT_fassume_compiled:
      add_assume_compiled ("", !value);
      break;

    case OPT_fbootclasspath_:
      jcf_path_bootclasspath_arg (arg);
      break;

    case OPT_faux_classpath:
    case OPT_fclasspath_:
    case OPT_fCLASSPATH_:
      jcf_path_classpath_arg (arg);
      break;

    case OPT_fcompile_resource_:
      resource_name = arg;
      break;

    case OPT_fdump_:
      if (!dump_switch_p (arg))
	return 0;
      break;

    case OPT_fencoding_:
      /* Nothing.  */
      break;

    case OPT_fextdirs_:
      jcf_path_extdirs_arg (arg);
      break;

    case OPT_foutput_class_dir_:
      /* FIXME: remove; this is handled by ecj1 now.  */
      break;

    case OPT_version:
      v_flag = 1;
      break;
      
    case OPT_fsource_filename_:
      java_read_sourcefilenames (arg);
      break;
      
    default:
      if (cl_options[code].flags & CL_Java)
	break;
      gcc_unreachable ();
    }

  return 1;
}
Пример #10
0
/* Type promotion for variable arguments.  */
tree
lhd_type_promotes_to (tree ARG_UNUSED (type))
{
  gcc_unreachable ();
}
Пример #11
0
recording::type *
builtins_manager::make_type (enum jit_builtin_type type_id)
{
  /* Use builtin-types.def to construct a switch statement, with each
     case deferring to one of the methods below:
       - DEF_PRIMITIVE_TYPE is handled as a call to make_primitive_type.
       - the various DEF_FUNCTION_TYPE_n are handled by variadic calls
	 to make_fn_type.
       - similarly for DEF_FUNCTION_TYPE_VAR_n, but setting the
	"is_variadic" argument.
       - DEF_POINTER_TYPE is handled by make_ptr_type.
     That should handle everything, but just in case we also suppy a
     gcc_unreachable default clause.  */
  switch (type_id)
    {
#define DEF_PRIMITIVE_TYPE(ENUM, VALUE) \
      case ENUM: return make_primitive_type (ENUM);
#define DEF_FUNCTION_TYPE_0(ENUM, RETURN) \
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 0);
#define DEF_FUNCTION_TYPE_1(ENUM, RETURN, ARG1) \
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 1, ARG1);
#define DEF_FUNCTION_TYPE_2(ENUM, RETURN, ARG1, ARG2) \
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 2, ARG1, ARG2);
#define DEF_FUNCTION_TYPE_3(ENUM, RETURN, ARG1, ARG2, ARG3) \
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 3, ARG1, ARG2, ARG3);
#define DEF_FUNCTION_TYPE_4(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4) \
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 4, ARG1, ARG2, ARG3, ARG4);
#define DEF_FUNCTION_TYPE_5(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5)	\
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 5, ARG1, ARG2, ARG3, ARG4, ARG5);
#define DEF_FUNCTION_TYPE_6(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, \
			    ARG6)					\
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 6, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
#define DEF_FUNCTION_TYPE_7(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, \
			    ARG6, ARG7)					\
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 7, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7);
#define DEF_FUNCTION_TYPE_8(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5, \
			    ARG6, ARG7, ARG8)				\
      case ENUM: return make_fn_type (ENUM, RETURN, 0, 8, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, \
				      ARG7, ARG8);
#define DEF_FUNCTION_TYPE_VAR_0(ENUM, RETURN) \
      case ENUM: return make_fn_type (ENUM, RETURN, 1, 0);
#define DEF_FUNCTION_TYPE_VAR_1(ENUM, RETURN, ARG1) \
      case ENUM: return make_fn_type (ENUM, RETURN, 1, 1, ARG1);
#define DEF_FUNCTION_TYPE_VAR_2(ENUM, RETURN, ARG1, ARG2) \
      case ENUM: return make_fn_type (ENUM, RETURN, 1, 2, ARG1, ARG2);
#define DEF_FUNCTION_TYPE_VAR_3(ENUM, RETURN, ARG1, ARG2, ARG3) \
      case ENUM: return make_fn_type (ENUM, RETURN, 1, 3, ARG1, ARG2, ARG3);
#define DEF_FUNCTION_TYPE_VAR_4(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4) \
      case ENUM: return make_fn_type (ENUM, RETURN, 1, 4, ARG1, ARG2, ARG3, ARG4);
#define DEF_FUNCTION_TYPE_VAR_5(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4, ARG5) \
      case ENUM: return make_fn_type (ENUM, RETURN, 1, 5, ARG1, ARG2, ARG3, ARG4, ARG5);
#define DEF_POINTER_TYPE(ENUM, TYPE) \
      case ENUM: return make_ptr_type (ENUM, TYPE);

#include "builtin-types.def"

#undef DEF_PRIMITIVE_TYPE
#undef DEF_FUNCTION_TYPE_0
#undef DEF_FUNCTION_TYPE_1
#undef DEF_FUNCTION_TYPE_2
#undef DEF_FUNCTION_TYPE_3
#undef DEF_FUNCTION_TYPE_4
#undef DEF_FUNCTION_TYPE_5
#undef DEF_FUNCTION_TYPE_6
#undef DEF_FUNCTION_TYPE_7
#undef DEF_FUNCTION_TYPE_8
#undef DEF_FUNCTION_TYPE_VAR_0
#undef DEF_FUNCTION_TYPE_VAR_1
#undef DEF_FUNCTION_TYPE_VAR_2
#undef DEF_FUNCTION_TYPE_VAR_3
#undef DEF_FUNCTION_TYPE_VAR_4
#undef DEF_FUNCTION_TYPE_VAR_5
#undef DEF_POINTER_TYPE

    default:
      gcc_unreachable ();
    }
}
Пример #12
0
/* Verify cgraph nodes of given cgraph node.  */
void
verify_cgraph_node (struct cgraph_node *node)
{
  struct cgraph_edge *e;
  struct cgraph_node *main_clone;
  struct function *this_cfun = DECL_STRUCT_FUNCTION (node->decl);
  struct function *saved_cfun = cfun;
  basic_block this_block;
  gimple_stmt_iterator gsi;
  bool error_found = false;

  if (errorcount || sorrycount)
    return;

  timevar_push (TV_CGRAPH_VERIFY);
  /* debug_generic_stmt needs correct cfun */
  set_cfun (this_cfun);
  for (e = node->callees; e; e = e->next_callee)
    if (e->aux)
      {
	error ("aux field set for edge %s->%s",
	       cgraph_node_name (e->caller), cgraph_node_name (e->callee));
	error_found = true;
      }
  if (node->count < 0)
    {
      error ("Execution count is negative");
      error_found = true;
    }
  for (e = node->callers; e; e = e->next_caller)
    {
      if (e->count < 0)
	{
	  error ("caller edge count is negative");
	  error_found = true;
	}
      if (e->frequency < 0)
	{
	  error ("caller edge frequency is negative");
	  error_found = true;
	}
      if (e->frequency > CGRAPH_FREQ_MAX)
	{
	  error ("caller edge frequency is too large");
	  error_found = true;
	}
      if (!e->inline_failed)
	{
	  if (node->global.inlined_to
	      != (e->caller->global.inlined_to
		  ? e->caller->global.inlined_to : e->caller))
	    {
	      error ("inlined_to pointer is wrong");
	      error_found = true;
	    }
	  if (node->callers->next_caller)
	    {
	      error ("multiple inline callers");
	      error_found = true;
	    }
	}
      else
	if (node->global.inlined_to)
	  {
	    error ("inlined_to pointer set for noninline callers");
	    error_found = true;
	  }
    }
  if (!node->callers && node->global.inlined_to)
    {
      error ("inlined_to pointer is set but no predecessors found");
      error_found = true;
    }
  if (node->global.inlined_to == node)
    {
      error ("inlined_to pointer refers to itself");
      error_found = true;
    }

  for (main_clone = cgraph_node (node->decl); main_clone;
       main_clone = main_clone->next_clone)
    if (main_clone == node)
      break;
  if (!cgraph_node (node->decl))
    {
      error ("node not found in cgraph_hash");
      error_found = true;
    }

  if (node->analyzed
      && !TREE_ASM_WRITTEN (node->decl)
      && (!DECL_EXTERNAL (node->decl) || node->global.inlined_to))
    {
      if (this_cfun->cfg)
	{
	  /* The nodes we're interested in are never shared, so walk
	     the tree ignoring duplicates.  */
	  struct pointer_set_t *visited_nodes = pointer_set_create ();
	  /* Reach the trees by walking over the CFG, and note the
	     enclosing basic-blocks in the call edges.  */
	  FOR_EACH_BB_FN (this_block, this_cfun)
	    for (gsi = gsi_start_bb (this_block);
                 !gsi_end_p (gsi);
                 gsi_next (&gsi))
	      {
		gimple stmt = gsi_stmt (gsi);
		tree decl;
		if (is_gimple_call (stmt) && (decl = gimple_call_fndecl (stmt)))
		  {
		    struct cgraph_edge *e = cgraph_edge (node, stmt);
		    if (e)
		      {
			if (e->aux)
			  {
			    error ("shared call_stmt:");
			    debug_gimple_stmt (stmt);
			    error_found = true;
			  }
			if (e->callee->decl != cgraph_node (decl)->decl
			    && e->inline_failed)
			  {
			    error ("edge points to wrong declaration:");
			    debug_tree (e->callee->decl);
			    fprintf (stderr," Instead of:");
			    debug_tree (decl);
			  }
			e->aux = (void *)1;
		      }
		    else
		      {
			error ("missing callgraph edge for call stmt:");
			debug_gimple_stmt (stmt);
			error_found = true;
		      }
		  }
	      }
	  pointer_set_destroy (visited_nodes);
	}
      else
	/* No CFG available?!  */
	gcc_unreachable ();

      for (e = node->callees; e; e = e->next_callee)
	{
	  if (!e->aux && !e->indirect_call)
	    {
	      error ("edge %s->%s has no corresponding call_stmt",
		     cgraph_node_name (e->caller),
		     cgraph_node_name (e->callee));
	      debug_gimple_stmt (e->call_stmt);
	      error_found = true;
	    }
	  e->aux = 0;
	}
    }
Пример #13
0
bool
cgraph_process_new_functions (void)
{
  bool output = false;
  tree fndecl;
  struct cgraph_node *node;

  /*  Note that this queue may grow as its being processed, as the new
      functions may generate new ones.  */
  while (cgraph_new_nodes)
    {
      node = cgraph_new_nodes;
      fndecl = node->decl;
      cgraph_new_nodes = cgraph_new_nodes->next_needed;
      switch (cgraph_state)
	{
	case CGRAPH_STATE_CONSTRUCTION:
	  /* At construction time we just need to finalize function and move
	     it into reachable functions list.  */

	  node->next_needed = NULL;
	  cgraph_finalize_function (fndecl, false);
	  cgraph_mark_reachable_node (node);
	  output = true;
	  break;

	case CGRAPH_STATE_IPA:
	case CGRAPH_STATE_IPA_SSA:
	  /* When IPA optimization already started, do all essential
	     transformations that has been already performed on the whole
	     cgraph but not on this function.  */

	  gimple_register_cfg_hooks ();
	  if (!node->analyzed)
	    cgraph_analyze_function (node);
	  push_cfun (DECL_STRUCT_FUNCTION (fndecl));
	  current_function_decl = fndecl;
	  compute_inline_parameters (node);
	  if ((cgraph_state == CGRAPH_STATE_IPA_SSA
	      && !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (fndecl)))
	      /* When not optimizing, be sure we run early local passes anyway
		 to expand OMP.  */
	      || !optimize)
	    execute_pass_list (pass_early_local_passes.pass.sub);
	  free_dominance_info (CDI_POST_DOMINATORS);
	  free_dominance_info (CDI_DOMINATORS);
	  pop_cfun ();
	  current_function_decl = NULL;
	  break;

	case CGRAPH_STATE_EXPANSION:
	  /* Functions created during expansion shall be compiled
	     directly.  */
	  node->output = 0;
	  cgraph_expand_function (node);
	  break;

	default:
	  gcc_unreachable ();
	  break;
	}
      cgraph_call_function_insertion_hooks (node);
    }
  return output;
}
Пример #14
0
static void
gen_exp (rtx x, enum rtx_code subroutine_type, char *used)
{
  RTX_CODE code;
  int i;
  int len;
  const char *fmt;

  if (x == 0)
    {
      printf ("NULL_RTX");
      return;
    }

  code = GET_CODE (x);

  switch (code)
    {
    case MATCH_OPERAND:
    case MATCH_DUP:
      if (used)
	{
	  if (used[XINT (x, 0)])
	    {
	      printf ("copy_rtx (operand%d)", XINT (x, 0));
	      return;
	    }
	  used[XINT (x, 0)] = 1;
	}
      printf ("operand%d", XINT (x, 0));
      return;

    case MATCH_OP_DUP:
      printf ("gen_rtx_fmt_");
      for (i = 0; i < XVECLEN (x, 1); i++)
	printf ("e");
      printf (" (GET_CODE (operand%d), ", XINT (x, 0));
      if (GET_MODE (x) == VOIDmode)
	printf ("GET_MODE (operand%d)", XINT (x, 0));
      else
	printf ("%smode", GET_MODE_NAME (GET_MODE (x)));
      for (i = 0; i < XVECLEN (x, 1); i++)
	{
	  printf (",\n\t\t");
	  gen_exp (XVECEXP (x, 1, i), subroutine_type, used);
	}
      printf (")");
      return;

    case MATCH_OPERATOR:
      printf ("gen_rtx_fmt_");
      for (i = 0; i < XVECLEN (x, 2); i++)
	printf ("e");
      printf (" (GET_CODE (operand%d)", XINT (x, 0));
      printf (", %smode", GET_MODE_NAME (GET_MODE (x)));
      for (i = 0; i < XVECLEN (x, 2); i++)
	{
	  printf (",\n\t\t");
	  gen_exp (XVECEXP (x, 2, i), subroutine_type, used);
	}
      printf (")");
      return;

    case MATCH_PARALLEL:
    case MATCH_PAR_DUP:
      printf ("operand%d", XINT (x, 0));
      return;

    case MATCH_SCRATCH:
      gen_rtx_scratch (x, subroutine_type);
      return;

    case ADDRESS:
      fatal ("ADDRESS expression code used in named instruction pattern");

    case PC:
      printf ("pc_rtx");
      return;
    case RETURN:
      printf ("ret_rtx");
      return;
    case SIMPLE_RETURN:
      printf ("simple_return_rtx");
      return;
    case CLOBBER:
      if (REG_P (XEXP (x, 0)))
	{
	  printf ("gen_hard_reg_clobber (%smode, %i)", GET_MODE_NAME (GET_MODE (XEXP (x, 0))),
			  			     REGNO (XEXP (x, 0)));
	  return;
	}
      break;

    case CC0:
      printf ("cc0_rtx");
      return;

    case CONST_INT:
      if (INTVAL (x) == 0)
	printf ("const0_rtx");
      else if (INTVAL (x) == 1)
	printf ("const1_rtx");
      else if (INTVAL (x) == -1)
	printf ("constm1_rtx");
      else if (-MAX_SAVED_CONST_INT <= INTVAL (x)
	  && INTVAL (x) <= MAX_SAVED_CONST_INT)
	printf ("const_int_rtx[MAX_SAVED_CONST_INT + (%d)]",
		(int) INTVAL (x));
      else if (INTVAL (x) == STORE_FLAG_VALUE)
	printf ("const_true_rtx");
      else
	{
	  printf ("GEN_INT (");
	  printf (HOST_WIDE_INT_PRINT_DEC_C, INTVAL (x));
	  printf (")");
	}
      return;

    case CONST_DOUBLE:
    case CONST_FIXED:
      /* These shouldn't be written in MD files.  Instead, the appropriate
	 routines in varasm.c should be called.  */
      gcc_unreachable ();

    default:
      break;
    }

  printf ("gen_rtx_");
  print_code (code);
  printf (" (%smode", GET_MODE_NAME (GET_MODE (x)));

  fmt = GET_RTX_FORMAT (code);
  len = GET_RTX_LENGTH (code);
  for (i = 0; i < len; i++)
    {
      if (fmt[i] == '0')
	break;
      printf (",\n\t");
      switch (fmt[i])
	{
	case 'e': case 'u':
	  gen_exp (XEXP (x, i), subroutine_type, used);
	  break;

	case 'i':
	  printf ("%u", XINT (x, i));
	  break;

	case 's':
	  printf ("\"%s\"", XSTR (x, i));
	  break;

	case 'E':
	  {
	    int j;
	    printf ("gen_rtvec (%d", XVECLEN (x, i));
	    for (j = 0; j < XVECLEN (x, i); j++)
	      {
		printf (",\n\t\t");
		gen_exp (XVECEXP (x, i, j), subroutine_type, used);
	      }
	    printf (")");
	    break;
	  }

	default:
	  gcc_unreachable ();
	}
    }
  printf (")");
}
Пример #15
0
static void
gen_int_relational (enum rtx_code code,	
		    rtx result,	
		    rtx cmp0,	
		    rtx cmp1,	
		    rtx destination)	
{
  enum machine_mode mode;
  int branch_p;

  mode = GET_MODE (cmp0);
  if (mode == VOIDmode)
    mode = GET_MODE (cmp1);

  /* Is this a branch or compare.  */
  branch_p = (destination != 0);

  /* Instruction set doesn't support LE or LT, so swap operands and use 
     GE, GT.  */
  switch (code)
    {
    case LE:
    case LT:
    case LEU:
    case LTU:
      {
	rtx temp;

	code = swap_condition (code);
	temp = cmp0;
	cmp0 = cmp1;
	cmp1 = temp;
	break;
      }
    default:
      break;
    }

  if (branch_p)
    {
      rtx insn, cond, label;

      /* Operands must be in registers.  */
      if (!register_operand (cmp0, mode))
	cmp0 = force_reg (mode, cmp0);
      if (!register_operand (cmp1, mode))
	cmp1 = force_reg (mode, cmp1);

      /* Generate conditional branch instruction.  */
      cond = gen_rtx_fmt_ee (code, mode, cmp0, cmp1);
      label = gen_rtx_LABEL_REF (VOIDmode, destination);
      insn = gen_rtx_SET (VOIDmode, pc_rtx,
			  gen_rtx_IF_THEN_ELSE (VOIDmode,
						cond, label, pc_rtx));
      emit_jump_insn (insn);
    }
  else
    {
      /* We can't have const_ints in cmp0, other than 0.  */
      if ((GET_CODE (cmp0) == CONST_INT) && (INTVAL (cmp0) != 0))
	cmp0 = force_reg (mode, cmp0);

      /* If the comparison is against an int not in legal range
         move it into a register.  */
      if (GET_CODE (cmp1) == CONST_INT)
	{
	  switch (code)
	    {
	    case EQ:
	    case NE:
	    case LE:
	    case LT:
	    case GE:
	    case GT:
	      if (!satisfies_constraint_K (cmp1))
		cmp1 = force_reg (mode, cmp1);
	      break;
	    case LEU:
	    case LTU:
	    case GEU:
	    case GTU:
	      if (!satisfies_constraint_L (cmp1))
		cmp1 = force_reg (mode, cmp1);
	      break;
	    default:
	      gcc_unreachable ();
	    }
	}

      /* Generate compare instruction.  */
      emit_move_insn (result, gen_rtx_fmt_ee (code, mode, cmp0, cmp1));
    }
}
Пример #16
0
/* Internal function to either define or undef the appropriate system
   macros.  */
static void
ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
			     enum processor_type arch,
			     enum processor_type tune,
			     enum fpmath_unit fpmath,
			     void (*def_or_undef) (cpp_reader *,
						   const char *))
{
  /* For some of the k6/pentium varients there weren't separate ISA bits to
     identify which tune/arch flag was passed, so figure it out here.  */
  size_t arch_len = strlen (ix86_arch_string);
  size_t tune_len = strlen (ix86_tune_string);
  int last_arch_char = ix86_arch_string[arch_len - 1];
  int last_tune_char = ix86_tune_string[tune_len - 1];

  /* Built-ins based on -march=.  */
  switch (arch)
    {
    case PROCESSOR_I386:
      break;
    case PROCESSOR_I486:
      def_or_undef (parse_in, "__i486");
      def_or_undef (parse_in, "__i486__");
      break;
    case PROCESSOR_PENTIUM:
      def_or_undef (parse_in, "__i586");
      def_or_undef (parse_in, "__i586__");
      def_or_undef (parse_in, "__pentium");
      def_or_undef (parse_in, "__pentium__");
      if (isa_flag & OPTION_MASK_ISA_MMX)
	def_or_undef (parse_in, "__pentium_mmx__");
      break;
    case PROCESSOR_PENTIUMPRO:
      def_or_undef (parse_in, "__i686");
      def_or_undef (parse_in, "__i686__");
      def_or_undef (parse_in, "__pentiumpro");
      def_or_undef (parse_in, "__pentiumpro__");
      break;
    case PROCESSOR_GEODE:
      def_or_undef (parse_in, "__geode");
      def_or_undef (parse_in, "__geode__");
      break;
    case PROCESSOR_K6:
      def_or_undef (parse_in, "__k6");
      def_or_undef (parse_in, "__k6__");
      if (last_arch_char == '2')
	def_or_undef (parse_in, "__k6_2__");
      else if (last_arch_char == '3')
	def_or_undef (parse_in, "__k6_3__");
      else if (isa_flag & OPTION_MASK_ISA_3DNOW)
	def_or_undef (parse_in, "__k6_3__");
      break;
    case PROCESSOR_ATHLON:
      def_or_undef (parse_in, "__athlon");
      def_or_undef (parse_in, "__athlon__");
      if (isa_flag & OPTION_MASK_ISA_SSE)
	def_or_undef (parse_in, "__athlon_sse__");
      break;
    case PROCESSOR_K8:
      def_or_undef (parse_in, "__k8");
      def_or_undef (parse_in, "__k8__");
      break;
    case PROCESSOR_AMDFAM10:
      def_or_undef (parse_in, "__amdfam10");
      def_or_undef (parse_in, "__amdfam10__");
      break;
    case PROCESSOR_BDVER1:
      def_or_undef (parse_in, "__bdver1");
      def_or_undef (parse_in, "__bdver1__");
      break;
    case PROCESSOR_BDVER2:
      def_or_undef (parse_in, "__bdver2");
      def_or_undef (parse_in, "__bdver2__");
      break;
    case PROCESSOR_BDVER3:
      def_or_undef (parse_in, "__bdver3");
      def_or_undef (parse_in, "__bdver3__");
      break;
    case PROCESSOR_BDVER4:
      def_or_undef (parse_in, "__bdver4");
      def_or_undef (parse_in, "__bdver4__");
      break;
    case PROCESSOR_BTVER1:
      def_or_undef (parse_in, "__btver1");
      def_or_undef (parse_in, "__btver1__");
      break;
    case PROCESSOR_BTVER2:
      def_or_undef (parse_in, "__btver2");
      def_or_undef (parse_in, "__btver2__");
      break;
    case PROCESSOR_PENTIUM4:
      def_or_undef (parse_in, "__pentium4");
      def_or_undef (parse_in, "__pentium4__");
      break;
    case PROCESSOR_NOCONA:
      def_or_undef (parse_in, "__nocona");
      def_or_undef (parse_in, "__nocona__");
      break;
    case PROCESSOR_CORE2:
      def_or_undef (parse_in, "__core2");
      def_or_undef (parse_in, "__core2__");
      break;
    case PROCESSOR_NEHALEM:
      def_or_undef (parse_in, "__corei7");
      def_or_undef (parse_in, "__corei7__");
      def_or_undef (parse_in, "__nehalem");
      def_or_undef (parse_in, "__nehalem__");
      break;
    case PROCESSOR_SANDYBRIDGE:
      def_or_undef (parse_in, "__corei7_avx");
      def_or_undef (parse_in, "__corei7_avx__");
      def_or_undef (parse_in, "__sandybridge");
      def_or_undef (parse_in, "__sandybridge__");
      break;
    case PROCESSOR_HASWELL:
      def_or_undef (parse_in, "__core_avx2");
      def_or_undef (parse_in, "__core_avx2__");
      def_or_undef (parse_in, "__haswell");
      def_or_undef (parse_in, "__haswell__");
      break;
    case PROCESSOR_BONNELL:
      def_or_undef (parse_in, "__atom");
      def_or_undef (parse_in, "__atom__");
      def_or_undef (parse_in, "__bonnell");
      def_or_undef (parse_in, "__bonnell__");
      break;
    case PROCESSOR_SILVERMONT:
      def_or_undef (parse_in, "__slm");
      def_or_undef (parse_in, "__slm__");
      def_or_undef (parse_in, "__silvermont");
      def_or_undef (parse_in, "__silvermont__");
      break;
    /* use PROCESSOR_max to not set/unset the arch macro.  */
    case PROCESSOR_max:
      break;
    case PROCESSOR_GENERIC:
      gcc_unreachable ();
    }

  /* Built-ins based on -mtune=.  */
  switch (tune)
    {
    case PROCESSOR_I386:
      def_or_undef (parse_in, "__tune_i386__");
      break;
    case PROCESSOR_I486:
      def_or_undef (parse_in, "__tune_i486__");
      break;
    case PROCESSOR_PENTIUM:
      def_or_undef (parse_in, "__tune_i586__");
      def_or_undef (parse_in, "__tune_pentium__");
      if (last_tune_char == 'x')
	def_or_undef (parse_in, "__tune_pentium_mmx__");
      break;
    case PROCESSOR_PENTIUMPRO:
      def_or_undef (parse_in, "__tune_i686__");
      def_or_undef (parse_in, "__tune_pentiumpro__");
      switch (last_tune_char)
	{
	case '3':
	  def_or_undef (parse_in, "__tune_pentium3__");
	  /* FALLTHRU */
	case '2':
	  def_or_undef (parse_in, "__tune_pentium2__");
	  break;
	}
      break;
    case PROCESSOR_GEODE:
      def_or_undef (parse_in, "__tune_geode__");
      break;
    case PROCESSOR_K6:
      def_or_undef (parse_in, "__tune_k6__");
      if (last_tune_char == '2')
	def_or_undef (parse_in, "__tune_k6_2__");
      else if (last_tune_char == '3')
	def_or_undef (parse_in, "__tune_k6_3__");
      else if (isa_flag & OPTION_MASK_ISA_3DNOW)
	def_or_undef (parse_in, "__tune_k6_3__");
      break;
    case PROCESSOR_ATHLON:
      def_or_undef (parse_in, "__tune_athlon__");
      if (isa_flag & OPTION_MASK_ISA_SSE)
	def_or_undef (parse_in, "__tune_athlon_sse__");
      break;
    case PROCESSOR_K8:
      def_or_undef (parse_in, "__tune_k8__");
      break;
    case PROCESSOR_AMDFAM10:
      def_or_undef (parse_in, "__tune_amdfam10__");
      break;
    case PROCESSOR_BDVER1:
      def_or_undef (parse_in, "__tune_bdver1__");
      break;
    case PROCESSOR_BDVER2:
      def_or_undef (parse_in, "__tune_bdver2__");
      break;
    case PROCESSOR_BDVER3:
      def_or_undef (parse_in, "__tune_bdver3__");
      break;
    case PROCESSOR_BDVER4:
      def_or_undef (parse_in, "__tune_bdver4__");
      break;
    case PROCESSOR_BTVER1:
      def_or_undef (parse_in, "__tune_btver1__");
      break;
    case PROCESSOR_BTVER2:
      def_or_undef (parse_in, "__tune_btver2__");
       break;
    case PROCESSOR_PENTIUM4:
      def_or_undef (parse_in, "__tune_pentium4__");
      break;
    case PROCESSOR_NOCONA:
      def_or_undef (parse_in, "__tune_nocona__");
      break;
    case PROCESSOR_CORE2:
      def_or_undef (parse_in, "__tune_core2__");
      break;
    case PROCESSOR_NEHALEM:
      def_or_undef (parse_in, "__tune_corei7__");
      def_or_undef (parse_in, "__tune_nehalem__");
      break;
    case PROCESSOR_SANDYBRIDGE:
      def_or_undef (parse_in, "__tune_corei7_avx__");
      def_or_undef (parse_in, "__tune_sandybridge__");
      break;
    case PROCESSOR_HASWELL:
      def_or_undef (parse_in, "__tune_core_avx2__");
      def_or_undef (parse_in, "__tune_haswell__");
      break;
    case PROCESSOR_BONNELL:
      def_or_undef (parse_in, "__tune_atom__");
      def_or_undef (parse_in, "__tune_bonnell__");
      break;
    case PROCESSOR_SILVERMONT:
      def_or_undef (parse_in, "__tune_slm__");
      def_or_undef (parse_in, "__tune_silvermont__");
      break;
    case PROCESSOR_GENERIC:
      break;
    /* use PROCESSOR_max to not set/unset the tune macro.  */
    case PROCESSOR_max:
      break;
    }

  switch (ix86_cmodel)
    {
    case CM_SMALL:
    case CM_SMALL_PIC:
      def_or_undef (parse_in, "__code_model_small__");
      break;
    case CM_MEDIUM:
    case CM_MEDIUM_PIC:
      def_or_undef (parse_in, "__code_model_medium__");
      break;
    case CM_LARGE:
    case CM_LARGE_PIC:
      def_or_undef (parse_in, "__code_model_large__");
      break;
    case CM_32:
      def_or_undef (parse_in, "__code_model_32__");
      break;
    case CM_KERNEL:
      def_or_undef (parse_in, "__code_model_kernel__");
      break;
    default:
      ;
    }

  if (isa_flag & OPTION_MASK_ISA_MMX)
    def_or_undef (parse_in, "__MMX__");
  if (isa_flag & OPTION_MASK_ISA_3DNOW)
    def_or_undef (parse_in, "__3dNOW__");
  if (isa_flag & OPTION_MASK_ISA_3DNOW_A)
    def_or_undef (parse_in, "__3dNOW_A__");
  if (isa_flag & OPTION_MASK_ISA_SSE)
    def_or_undef (parse_in, "__SSE__");
  if (isa_flag & OPTION_MASK_ISA_SSE2)
    def_or_undef (parse_in, "__SSE2__");
  if (isa_flag & OPTION_MASK_ISA_SSE3)
    def_or_undef (parse_in, "__SSE3__");
  if (isa_flag & OPTION_MASK_ISA_SSSE3)
    def_or_undef (parse_in, "__SSSE3__");
  if (isa_flag & OPTION_MASK_ISA_SSE4_1)
    def_or_undef (parse_in, "__SSE4_1__");
  if (isa_flag & OPTION_MASK_ISA_SSE4_2)
    def_or_undef (parse_in, "__SSE4_2__");
  if (isa_flag & OPTION_MASK_ISA_AES)
    def_or_undef (parse_in, "__AES__");
  if (isa_flag & OPTION_MASK_ISA_SHA)
    def_or_undef (parse_in, "__SHA__");
  if (isa_flag & OPTION_MASK_ISA_PCLMUL)
    def_or_undef (parse_in, "__PCLMUL__");
  if (isa_flag & OPTION_MASK_ISA_AVX)
    def_or_undef (parse_in, "__AVX__");
  if (isa_flag & OPTION_MASK_ISA_AVX2)
    def_or_undef (parse_in, "__AVX2__");
  if (isa_flag & OPTION_MASK_ISA_AVX512F)
    def_or_undef (parse_in, "__AVX512F__");
  if (isa_flag & OPTION_MASK_ISA_AVX512ER)
    def_or_undef (parse_in, "__AVX512ER__");
  if (isa_flag & OPTION_MASK_ISA_AVX512CD)
    def_or_undef (parse_in, "__AVX512CD__");
  if (isa_flag & OPTION_MASK_ISA_AVX512PF)
    def_or_undef (parse_in, "__AVX512PF__");
  if (isa_flag & OPTION_MASK_ISA_FMA)
    def_or_undef (parse_in, "__FMA__");
  if (isa_flag & OPTION_MASK_ISA_RTM)
    def_or_undef (parse_in, "__RTM__");
  if (isa_flag & OPTION_MASK_ISA_SSE4A)
    def_or_undef (parse_in, "__SSE4A__");
  if (isa_flag & OPTION_MASK_ISA_FMA4)
    def_or_undef (parse_in, "__FMA4__");
  if (isa_flag & OPTION_MASK_ISA_XOP)
    def_or_undef (parse_in, "__XOP__");
  if (isa_flag & OPTION_MASK_ISA_LWP)
    def_or_undef (parse_in, "__LWP__");
  if (isa_flag & OPTION_MASK_ISA_ABM)
    def_or_undef (parse_in, "__ABM__");
  if (isa_flag & OPTION_MASK_ISA_BMI)
    def_or_undef (parse_in, "__BMI__");
  if (isa_flag & OPTION_MASK_ISA_BMI2)
    def_or_undef (parse_in, "__BMI2__");
  if (isa_flag & OPTION_MASK_ISA_LZCNT)
    def_or_undef (parse_in, "__LZCNT__");
  if (isa_flag & OPTION_MASK_ISA_TBM)
    def_or_undef (parse_in, "__TBM__");
  if (isa_flag & OPTION_MASK_ISA_POPCNT)
    def_or_undef (parse_in, "__POPCNT__");
  if (isa_flag & OPTION_MASK_ISA_FSGSBASE)
    def_or_undef (parse_in, "__FSGSBASE__");
  if (isa_flag & OPTION_MASK_ISA_RDRND)
    def_or_undef (parse_in, "__RDRND__");
  if (isa_flag & OPTION_MASK_ISA_F16C)
    def_or_undef (parse_in, "__F16C__");
  if (isa_flag & OPTION_MASK_ISA_RDSEED)
    def_or_undef (parse_in, "__RDSEED__");
  if (isa_flag & OPTION_MASK_ISA_PRFCHW)
    def_or_undef (parse_in, "__PRFCHW__");
  if (isa_flag & OPTION_MASK_ISA_ADX)
    def_or_undef (parse_in, "__ADX__");
  if (isa_flag & OPTION_MASK_ISA_FXSR)
    def_or_undef (parse_in, "__FXSR__");
  if (isa_flag & OPTION_MASK_ISA_XSAVE)
    def_or_undef (parse_in, "__XSAVE__");
  if (isa_flag & OPTION_MASK_ISA_XSAVEOPT)
    def_or_undef (parse_in, "__XSAVEOPT__");
  if ((fpmath & FPMATH_SSE) && (isa_flag & OPTION_MASK_ISA_SSE))
    def_or_undef (parse_in, "__SSE_MATH__");
  if ((fpmath & FPMATH_SSE) && (isa_flag & OPTION_MASK_ISA_SSE2))
    def_or_undef (parse_in, "__SSE2_MATH__");
}
Пример #17
0
void
do_jump_1 (enum tree_code code, tree op0, tree op1,
	   rtx if_false_label, rtx if_true_label, int prob)
{
  enum machine_mode mode;
  rtx drop_through_label = 0;

  switch (code)
    {
    case EQ_EXPR:
      {
        tree inner_type = TREE_TYPE (op0);

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_FLOAT);
	gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_INT);

        if (integer_zerop (op1))
	  do_jump (op0, if_true_label, if_false_label, inv (prob));
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
                 && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
	  do_jump_by_parts_equality (op0, op1, if_false_label, if_true_label,
				     prob);
        else
	  do_compare_and_jump (op0, op1, EQ, EQ, if_false_label, if_true_label,
			       prob);
        break;
      }

    case NE_EXPR:
      {
        tree inner_type = TREE_TYPE (op0);

        gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_FLOAT);
	gcc_assert (GET_MODE_CLASS (TYPE_MODE (inner_type))
		    != MODE_COMPLEX_INT);

        if (integer_zerop (op1))
	  do_jump (op0, if_false_label, if_true_label, prob);
        else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
           && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
	  do_jump_by_parts_equality (op0, op1, if_true_label, if_false_label,
				     inv (prob));
        else
	  do_compare_and_jump (op0, op1, NE, NE, if_false_label, if_true_label,
			       prob);
        break;
      }

    case LT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LT, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 1, if_false_label, if_true_label,
				  prob);
      else
	do_compare_and_jump (op0, op1, LT, LTU, if_false_label, if_true_label,
			     prob);
      break;

    case LE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (LE, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 0, if_true_label, if_false_label,
				  inv (prob));
      else
	do_compare_and_jump (op0, op1, LE, LEU, if_false_label, if_true_label,
			     prob);
      break;

    case GT_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GT, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 0, if_false_label, if_true_label,
				  prob);
      else
	do_compare_and_jump (op0, op1, GT, GTU, if_false_label, if_true_label,
			     prob);
      break;

    case GE_EXPR:
      mode = TYPE_MODE (TREE_TYPE (op0));
      if (GET_MODE_CLASS (mode) == MODE_INT
          && ! can_compare_p (GE, mode, ccp_jump))
	do_jump_by_parts_greater (op0, op1, 1, if_true_label, if_false_label,
				  inv (prob));
      else
	do_compare_and_jump (op0, op1, GE, GEU, if_false_label, if_true_label,
			     prob);
      break;

    case ORDERED_EXPR:
      do_compare_and_jump (op0, op1, ORDERED, ORDERED,
			   if_false_label, if_true_label, prob);
      break;

    case UNORDERED_EXPR:
      do_compare_and_jump (op0, op1, UNORDERED, UNORDERED,
			   if_false_label, if_true_label, prob);
      break;

    case UNLT_EXPR:
      do_compare_and_jump (op0, op1, UNLT, UNLT, if_false_label, if_true_label,
			   prob);
      break;

    case UNLE_EXPR:
      do_compare_and_jump (op0, op1, UNLE, UNLE, if_false_label, if_true_label,
			   prob);
      break;

    case UNGT_EXPR:
      do_compare_and_jump (op0, op1, UNGT, UNGT, if_false_label, if_true_label,
			   prob);
      break;

    case UNGE_EXPR:
      do_compare_and_jump (op0, op1, UNGE, UNGE, if_false_label, if_true_label,
			   prob);
      break;

    case UNEQ_EXPR:
      do_compare_and_jump (op0, op1, UNEQ, UNEQ, if_false_label, if_true_label,
			   prob);
      break;

    case LTGT_EXPR:
      do_compare_and_jump (op0, op1, LTGT, LTGT, if_false_label, if_true_label,
			   prob);
      break;

    case TRUTH_ANDIF_EXPR:
      {
        /* Spread the probability that the expression is false evenly between
           the two conditions. So the first condition is false half the total
           probability of being false. The second condition is false the other
           half of the total probability of being false, so its jump has a false
           probability of half the total, relative to the probability we
           reached it (i.e. the first condition was true).  */
        int op0_prob = -1;
        int op1_prob = -1;
        if (prob != -1)
          {
            int false_prob = inv (prob);
            int op0_false_prob = false_prob / 2;
            int op1_false_prob = GCOV_COMPUTE_SCALE ((false_prob / 2),
                                                     inv (op0_false_prob));
            /* Get the probability that each jump below is true.  */
            op0_prob = inv (op0_false_prob);
            op1_prob = inv (op1_false_prob);
          }
        if (if_false_label == NULL_RTX)
          {
            drop_through_label = gen_label_rtx ();
            do_jump (op0, drop_through_label, NULL_RTX, op0_prob);
            do_jump (op1, NULL_RTX, if_true_label, op1_prob);
          }
        else
          {
            do_jump (op0, if_false_label, NULL_RTX, op0_prob);
            do_jump (op1, if_false_label, if_true_label, op1_prob);
          }
        break;
      }

    case TRUTH_ORIF_EXPR:
      {
        /* Spread the probability evenly between the two conditions. So
           the first condition has half the total probability of being true.
           The second condition has the other half of the total probability,
           so its jump has a probability of half the total, relative to
           the probability we reached it (i.e. the first condition was false).  */
        int op0_prob = -1;
        int op1_prob = -1;
        if (prob != -1)
          {
            op0_prob = prob / 2;
            op1_prob = GCOV_COMPUTE_SCALE ((prob / 2), inv (op0_prob));
          }
        if (if_true_label == NULL_RTX)
          {
            drop_through_label = gen_label_rtx ();
            do_jump (op0, NULL_RTX, drop_through_label, op0_prob);
            do_jump (op1, if_false_label, NULL_RTX, op1_prob);
          }
        else
          {
            do_jump (op0, NULL_RTX, if_true_label, op0_prob);
            do_jump (op1, if_false_label, if_true_label, op1_prob);
          }
        break;
      }

    default:
      gcc_unreachable ();
    }

  if (drop_through_label)
    {
      do_pending_stack_adjust ();
      emit_label (drop_through_label);
    }
}
Пример #18
0
Файл: rtl.c Проект: gvz/avr-gcc
rtx
copy_rtx (rtx orig)
{
  rtx copy;
  int i, j;
  RTX_CODE code;
  const char *format_ptr;

  code = GET_CODE (orig);

  switch (code)
    {
    case REG:
    case DEBUG_EXPR:
    case VALUE:
    CASE_CONST_ANY:
    case SYMBOL_REF:
    case CODE_LABEL:
    case PC:
    case CC0:
    case RETURN:
    case SIMPLE_RETURN:
    case SCRATCH:
      /* SCRATCH must be shared because they represent distinct values.  */
      return orig;
    case CLOBBER:
      /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
         clobbers or clobbers of hard registers that originated as pseudos.
         This is needed to allow safe register renaming.  */
      if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER
	  && ORIGINAL_REGNO (XEXP (orig, 0)) == REGNO (XEXP (orig, 0)))
	return orig;
      break;

    case CONST:
      if (shared_const_p (orig))
	return orig;
      break;

      /* A MEM with a constant address is not sharable.  The problem is that
	 the constant address may need to be reloaded.  If the mem is shared,
	 then reloading one copy of this mem will cause all copies to appear
	 to have been reloaded.  */

    default:
      break;
    }

  /* Copy the various flags, fields, and other information.  We assume
     that all fields need copying, and then clear the fields that should
     not be copied.  That is the sensible default behavior, and forces
     us to explicitly document why we are *not* copying a flag.  */
  copy = shallow_copy_rtx (orig);

  /* We do not copy the USED flag, which is used as a mark bit during
     walks over the RTL.  */
  RTX_FLAG (copy, used) = 0;

  format_ptr = GET_RTX_FORMAT (GET_CODE (copy));

  for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
    switch (*format_ptr++)
      {
      case 'e':
	if (XEXP (orig, i) != NULL)
	  XEXP (copy, i) = copy_rtx (XEXP (orig, i));
	break;

      case 'E':
      case 'V':
	if (XVEC (orig, i) != NULL)
	  {
	    XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
	    for (j = 0; j < XVECLEN (copy, i); j++)
	      XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j));
	  }
	break;

      case 't':
      case 'w':
      case 'i':
      case 's':
      case 'S':
      case 'T':
      case 'u':
      case 'B':
      case '0':
	/* These are left unchanged.  */
	break;

      default:
	gcc_unreachable ();
      }
  return copy;
}
Пример #19
0
bool
split_comparison (enum rtx_code code, enum machine_mode mode,
		  enum rtx_code *code1, enum rtx_code *code2)
{
  switch (code)
    {
    case LT:
      *code1 = ORDERED;
      *code2 = UNLT;
      return true;
    case LE:
      *code1 = ORDERED;
      *code2 = UNLE;
      return true;
    case GT:
      *code1 = ORDERED;
      *code2 = UNGT;
      return true;
    case GE:
      *code1 = ORDERED;
      *code2 = UNGE;
      return true;
    case EQ:
      *code1 = ORDERED;
      *code2 = UNEQ;
      return true;
    case NE:
      *code1 = UNORDERED;
      *code2 = LTGT;
      return false;
    case UNLT:
      *code1 = UNORDERED;
      *code2 = LT;
      return false;
    case UNLE:
      *code1 = UNORDERED;
      *code2 = LE;
      return false;
    case UNGT:
      *code1 = UNORDERED;
      *code2 = GT;
      return false;
    case UNGE:
      *code1 = UNORDERED;
      *code2 = GE;
      return false;
    case UNEQ:
      *code1 = UNORDERED;
      *code2 = EQ;
      return false;
    case LTGT:
      /* Do not turn a trapping comparison into a non-trapping one.  */
      if (HONOR_SNANS (mode))
	{
          *code1 = LT;
          *code2 = GT;
          return false;
	}
      else
	{
	  *code1 = ORDERED;
	  *code2 = NE;
	  return true;
	}
    default:
      gcc_unreachable ();
    }
}
Пример #20
0
Файл: rtl.c Проект: gvz/avr-gcc
int
rtx_equal_p (const_rtx x, const_rtx y)
{
  int i;
  int j;
  enum rtx_code code;
  const char *fmt;

  if (x == y)
    return 1;
  if (x == 0 || y == 0)
    return 0;

  code = GET_CODE (x);
  /* Rtx's of different codes cannot be equal.  */
  if (code != GET_CODE (y))
    return 0;

  /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
     (REG:SI x) and (REG:HI x) are NOT equivalent.  */

  if (GET_MODE (x) != GET_MODE (y))
    return 0;

  /* MEMs referring to different address space are not equivalent.  */
  if (code == MEM && MEM_ADDR_SPACE (x) != MEM_ADDR_SPACE (y))
    return 0;

  /* Some RTL can be compared nonrecursively.  */
  switch (code)
    {
    case REG:
      return (REGNO (x) == REGNO (y));

    case LABEL_REF:
      return XEXP (x, 0) == XEXP (y, 0);

    case SYMBOL_REF:
      return XSTR (x, 0) == XSTR (y, 0);

    case DEBUG_EXPR:
    case VALUE:
    case SCRATCH:
    CASE_CONST_UNIQUE:
      return 0;

    case DEBUG_IMPLICIT_PTR:
      return DEBUG_IMPLICIT_PTR_DECL (x)
	     == DEBUG_IMPLICIT_PTR_DECL (y);

    case DEBUG_PARAMETER_REF:
      return DEBUG_PARAMETER_REF_DECL (x)
	     == DEBUG_PARAMETER_REF_DECL (y);

    case ENTRY_VALUE:
      return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));

    default:
      break;
    }

  /* Compare the elements.  If any pair of corresponding elements
     fail to match, return 0 for the whole thing.  */

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      switch (fmt[i])
	{
	case 'w':
	  if (XWINT (x, i) != XWINT (y, i))
	    return 0;
	  break;

	case 'n':
	case 'i':
	  if (XINT (x, i) != XINT (y, i))
	    {
#ifndef GENERATOR_FILE
	      if (((code == ASM_OPERANDS && i == 6)
		   || (code == ASM_INPUT && i == 1))
		  && XINT (x, i) == XINT (y, i))
		break;
#endif
	      return 0;
	    }
	  break;

	case 'V':
	case 'E':
	  /* Two vectors must have the same length.  */
	  if (XVECLEN (x, i) != XVECLEN (y, i))
	    return 0;

	  /* And the corresponding elements must match.  */
	  for (j = 0; j < XVECLEN (x, i); j++)
	    if (rtx_equal_p (XVECEXP (x, i, j),  XVECEXP (y, i, j)) == 0)
	      return 0;
	  break;

	case 'e':
	  if (rtx_equal_p (XEXP (x, i), XEXP (y, i)) == 0)
	    return 0;
	  break;

	case 'S':
	case 's':
	  if ((XSTR (x, i) || XSTR (y, i))
	      && (! XSTR (x, i) || ! XSTR (y, i)
		  || strcmp (XSTR (x, i), XSTR (y, i))))
	    return 0;
	  break;

	case 'u':
	  /* These are just backpointers, so they don't matter.  */
	  break;

	case '0':
	case 't':
	  break;

	  /* It is believed that rtx's at this level will never
	     contain anything but integers and other rtx's,
	     except for within LABEL_REFs and SYMBOL_REFs.  */
	default:
	  gcc_unreachable ();
	}
    }
  return 1;
}
Пример #21
0
void
sdbout_symbol (tree decl, int local)
{
  tree type = TREE_TYPE (decl);
  tree context = NULL_TREE;
  rtx value;
  int regno = -1;
  const char *name;

  /* If we are called before sdbout_init is run, just save the symbol
     for later.  */
  if (!sdbout_initialized)
    {
      preinit_symbols = tree_cons (0, decl, preinit_symbols);
      return;
    }

  sdbout_one_type (type);

  switch (TREE_CODE (decl))
    {
    case CONST_DECL:
      /* Enum values are defined by defining the enum type.  */
      return;

    case FUNCTION_DECL:
      /* Don't mention a nested function under its parent.  */
      context = decl_function_context (decl);
      if (context == current_function_decl)
	return;
      /* Check DECL_INITIAL to distinguish declarations from definitions.
	 Don't output debug info here for declarations; they will have
	 a DECL_INITIAL value of 0.  */
      if (! DECL_INITIAL (decl))
	return;
      if (!MEM_P (DECL_RTL (decl))
	  || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF)
	return;
      PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
      PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0));
      PUT_SDB_SCL (TREE_PUBLIC (decl) ? C_EXT : C_STAT);
      break;

    case TYPE_DECL:
      /* Done with tagged types.  */
      if (DECL_NAME (decl) == 0)
	return;
      if (DECL_IGNORED_P (decl))
	return;
      /* Don't output intrinsic types.  GAS chokes on SDB .def
	 statements that contain identifiers with embedded spaces
	 (eg "unsigned long").  */
      if (DECL_IS_BUILTIN (decl))
	return;

      /* Output typedef name.  */
      if (template_name_p (DECL_NAME (decl)))
	PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
      else
	PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_NAME (decl)));
      PUT_SDB_SCL (C_TPDEF);
      break;

    case PARM_DECL:
      /* Parm decls go in their own separate chains
	 and are output by sdbout_reg_parms and sdbout_parms.  */
      gcc_unreachable ();

    case VAR_DECL:
      /* Don't mention a variable that is external.
	 Let the file that defines it describe it.  */
      if (DECL_EXTERNAL (decl))
	return;

      /* Ignore __FUNCTION__, etc.  */
      if (DECL_IGNORED_P (decl))
	return;

      /* If there was an error in the declaration, don't dump core
	 if there is no RTL associated with the variable doesn't
	 exist.  */
      if (!DECL_RTL_SET_P (decl))
	return;

      SET_DECL_RTL (decl,
		    eliminate_regs (DECL_RTL (decl), VOIDmode, NULL_RTX));
#ifdef LEAF_REG_REMAP
      if (crtl->uses_only_leaf_regs)
	leaf_renumber_regs_insn (DECL_RTL (decl));
#endif
      value = DECL_RTL (decl);

      /* Don't mention a variable at all
	 if it was completely optimized into nothingness.

	 If DECL was from an inline function, then its rtl
	 is not identically the rtl that was used in this
	 particular compilation.  */
      if (REG_P (value))
	{
	  regno = REGNO (value);
	  if (regno >= FIRST_PSEUDO_REGISTER)
	    return;
	}
      else if (GET_CODE (value) == SUBREG)
	{
	  while (GET_CODE (value) == SUBREG)
	    value = SUBREG_REG (value);
	  if (REG_P (value))
	    {
	      if (REGNO (value) >= FIRST_PSEUDO_REGISTER)
		return;
	    }
	  regno = REGNO (alter_subreg (&value));
	  SET_DECL_RTL (decl, value);
	}
      /* Don't output anything if an auto variable
	 gets RTL that is static.
	 GAS version 2.2 can't handle such output.  */
      else if (MEM_P (value) && CONSTANT_P (XEXP (value, 0))
	       && ! TREE_STATIC (decl))
	return;

      /* Emit any structure, union, or enum type that has not been output.
	 This occurs for tag-less structs (et al) used to declare variables
	 within functions.  */
      if (TREE_CODE (type) == ENUMERAL_TYPE
	  || TREE_CODE (type) == RECORD_TYPE
	  || TREE_CODE (type) == UNION_TYPE
	  || TREE_CODE (type) == QUAL_UNION_TYPE)
	{
	  if (COMPLETE_TYPE_P (type)		/* not a forward reference */
	      && KNOWN_TYPE_TAG (type) == 0)	/* not yet declared */
	    sdbout_one_type (type);
	}

      /* Defer SDB information for top-level initialized variables! */
      if (! local
	  && MEM_P (value)
	  && DECL_INITIAL (decl))
	return;

      /* C++ in 2.3 makes nameless symbols.  That will be fixed later.
	 For now, avoid crashing.  */
      if (DECL_NAME (decl) == NULL_TREE)
	return;

      /* Record the name for, starting a symtab entry.  */
      if (local)
	name = IDENTIFIER_POINTER (DECL_NAME (decl));
      else
	name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));

      if (MEM_P (value)
	  && GET_CODE (XEXP (value, 0)) == SYMBOL_REF)
	{
	  PUT_SDB_DEF (name);
	  if (TREE_PUBLIC (decl))
	    {
	      PUT_SDB_VAL (XEXP (value, 0));
	      PUT_SDB_SCL (C_EXT);
	    }
	  else
	    {
	      PUT_SDB_VAL (XEXP (value, 0));
	      PUT_SDB_SCL (C_STAT);
	    }
	}
      else if (regno >= 0)
	{
	  PUT_SDB_DEF (name);
	  PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (regno));
	  PUT_SDB_SCL (C_REG);
	}
      else if (MEM_P (value)
	       && (MEM_P (XEXP (value, 0))
		   || (REG_P (XEXP (value, 0))
		       && REGNO (XEXP (value, 0)) != HARD_FRAME_POINTER_REGNUM
		       && REGNO (XEXP (value, 0)) != STACK_POINTER_REGNUM)))
	/* If the value is indirect by memory or by a register
	   that isn't the frame pointer
	   then it means the object is variable-sized and address through
	   that register or stack slot.  COFF has no way to represent this
	   so all we can do is output the variable as a pointer.  */
	{
	  PUT_SDB_DEF (name);
	  if (REG_P (XEXP (value, 0)))
	    {
	      PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (XEXP (value, 0))));
	      PUT_SDB_SCL (C_REG);
	    }
	  else
	    {
	      /* DECL_RTL looks like (MEM (MEM (PLUS (REG...)
		 (CONST_INT...)))).
		 We want the value of that CONST_INT.  */
	      /* Encore compiler hates a newline in a macro arg, it seems.  */
	      PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET
			       (XEXP (XEXP (value, 0), 0)));
	      PUT_SDB_SCL (C_AUTO);
	    }

	  /* Effectively do build_pointer_type, but don't cache this type,
	     since it might be temporary whereas the type it points to
	     might have been saved for inlining.  */
	  /* Don't use REFERENCE_TYPE because dbx can't handle that.  */
	  type = make_node (POINTER_TYPE);
	  TREE_TYPE (type) = TREE_TYPE (decl);
	}
      else if (MEM_P (value)
	       && ((GET_CODE (XEXP (value, 0)) == PLUS
		    && REG_P (XEXP (XEXP (value, 0), 0))
		    && CONST_INT_P (XEXP (XEXP (value, 0), 1)))
		   /* This is for variables which are at offset zero from
		      the frame pointer.  This happens on the Alpha.
		      Non-frame pointer registers are excluded above.  */
		   || (REG_P (XEXP (value, 0)))))
	{
	  /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
	     or (MEM (REG...)).  We want the value of that CONST_INT
	     or zero.  */
	  PUT_SDB_DEF (name);
	  PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (value, 0)));
	  PUT_SDB_SCL (C_AUTO);
	}
      else
	{
	  /* It is something we don't know how to represent for SDB.  */
	  return;
	}
      break;

    default:
      break;
    }
  PUT_SDB_TYPE (plain_type (type));
  PUT_SDB_ENDEF;
}
Пример #22
0
bool
for_each_index (tree *addr_p, bool (*cbck) (tree, tree *, void *), void *data)
{
  tree *nxt, *idx;

  for (; ; addr_p = nxt)
    {
      switch (TREE_CODE (*addr_p))
	{
	case SSA_NAME:
	  return cbck (*addr_p, addr_p, data);

	case MISALIGNED_INDIRECT_REF:
	case ALIGN_INDIRECT_REF:
	case INDIRECT_REF:
	  nxt = &TREE_OPERAND (*addr_p, 0);
	  return cbck (*addr_p, nxt, data);

	case BIT_FIELD_REF:
	case VIEW_CONVERT_EXPR:
	case REALPART_EXPR:
	case IMAGPART_EXPR:
	  nxt = &TREE_OPERAND (*addr_p, 0);
	  break;

	case COMPONENT_REF:
	  /* If the component has varying offset, it behaves like index
	     as well.  */
	  idx = &TREE_OPERAND (*addr_p, 2);
	  if (*idx
	      && !cbck (*addr_p, idx, data))
	    return false;

	  nxt = &TREE_OPERAND (*addr_p, 0);
	  break;

	case ARRAY_REF:
	case ARRAY_RANGE_REF:
	  nxt = &TREE_OPERAND (*addr_p, 0);
	  if (!cbck (*addr_p, &TREE_OPERAND (*addr_p, 1), data))
	    return false;
	  break;

	case VAR_DECL:
	case PARM_DECL:
	case STRING_CST:
	case RESULT_DECL:
	case VECTOR_CST:
	case COMPLEX_CST:
	case INTEGER_CST:
	case REAL_CST:
	case FIXED_CST:
	case CONSTRUCTOR:
	  return true;

	case TARGET_MEM_REF:
	  idx = &TMR_BASE (*addr_p);
	  if (*idx
	      && !cbck (*addr_p, idx, data))
	    return false;
	  idx = &TMR_INDEX (*addr_p);
	  if (*idx
	      && !cbck (*addr_p, idx, data))
	    return false;
	  return true;

	default:
    	  gcc_unreachable ();
	}
    }
}
Пример #23
0
static tree
expand_an_in_modify_expr (location_t location, tree lhs,
			  enum tree_code modifycode, tree rhs,
			  tsubst_flags_t complain)
{
  tree array_expr_lhs = NULL_TREE, array_expr_rhs = NULL_TREE;
  tree array_expr = NULL_TREE;
  tree body = NULL_TREE;
  auto_vec<tree> cond_expr;
  vec<tree, va_gc> *lhs_array_operand = NULL, *rhs_array_operand = NULL;
  size_t lhs_rank = 0, rhs_rank = 0, ii = 0;
  vec<tree, va_gc> *rhs_list = NULL, *lhs_list = NULL;
  size_t rhs_list_size = 0, lhs_list_size = 0;
  tree new_modify_expr, new_var = NULL_TREE, builtin_loop, scalar_mods;
  bool found_builtin_fn = false;
  tree an_init, loop_with_init = alloc_stmt_list ();
  vec<vec<an_parts> > lhs_an_info = vNULL, rhs_an_info = vNULL;
  auto_vec<an_loop_parts> lhs_an_loop_info, rhs_an_loop_info;
  tree lhs_len, rhs_len;

  if (!find_rank (location, rhs, rhs, false, &rhs_rank))
    return error_mark_node;
  extract_array_notation_exprs (rhs, false, &rhs_list);
  rhs_list_size = vec_safe_length (rhs_list);
  an_init = push_stmt_list ();
  if (rhs_rank)
    {
      scalar_mods = replace_invariant_exprs (&rhs);
      if (scalar_mods)
	finish_expr_stmt (scalar_mods);
    }
  for (ii = 0; ii < rhs_list_size; ii++)
    {
      tree rhs_node = (*rhs_list)[ii];
      if (TREE_CODE (rhs_node) == CALL_EXPR)
	{
	  builtin_loop = expand_sec_reduce_builtin (rhs_node, &new_var);
	  if (builtin_loop == error_mark_node)
	    return error_mark_node;
	  else if (builtin_loop)
	    {
	      finish_expr_stmt (builtin_loop);
	      found_builtin_fn = true;
	      if (new_var)
		{
		  vec <tree, va_gc> *rhs_sub_list = NULL, *new_var_list = NULL;
		  vec_safe_push (rhs_sub_list, rhs_node);
		  vec_safe_push (new_var_list, new_var);
		  replace_array_notations (&rhs, false, rhs_sub_list,
					   new_var_list);
		}
	    }
	}
    }
  lhs_rank = 0;
  rhs_rank = 0;
  if (!find_rank (location, lhs, lhs, true, &lhs_rank)
      || !find_rank (location, rhs, rhs, true, &rhs_rank))
    {
      pop_stmt_list (an_init);
      return error_mark_node;
    }

  /* If both are scalar, then the only reason why we will get this far is if
     there is some array notations inside it and was using a builtin array
     notation functions.  If so, we have already broken those guys up and now 
     a simple build_x_modify_expr would do.  */
  if (lhs_rank == 0 && rhs_rank == 0)
    {
      if (found_builtin_fn)
	{
	  new_modify_expr = build_x_modify_expr (location, lhs,
						 modifycode, rhs, complain);
	  finish_expr_stmt (new_modify_expr);
	  pop_stmt_list (an_init);
	  return an_init;
	}
      else
	gcc_unreachable ();
    }

  /* If for some reason location is not set, then find if LHS or RHS has
     location info.  If so, then use that so we atleast have an idea.  */
  if (location == UNKNOWN_LOCATION)
    {
      if (EXPR_LOCATION (lhs) != UNKNOWN_LOCATION)
	location = EXPR_LOCATION (lhs);
      else if (EXPR_LOCATION (rhs) != UNKNOWN_LOCATION)
	location = EXPR_LOCATION (rhs);
    }
      
  /* We need this when we have a scatter issue.  */
  extract_array_notation_exprs (lhs, true, &lhs_list);
  rhs_list = NULL;
  extract_array_notation_exprs (rhs, true, &rhs_list);
  rhs_list_size = vec_safe_length (rhs_list);
  lhs_list_size = vec_safe_length (lhs_list);
    
  if (lhs_rank == 0 && rhs_rank != 0)
    {
      error_at (location, "%qE cannot be scalar when %qE is not", lhs, rhs);
      return error_mark_node;
    }
  if (lhs_rank != 0 && rhs_rank != 0 && lhs_rank != rhs_rank)
    {
      error_at (location, "rank mismatch between %qE and %qE", lhs, rhs);
      return error_mark_node;
    }
  
  /* Assign the array notation components to variable so that they can satisfy
     the execute-once rule.  */
  for (ii = 0; ii < lhs_list_size; ii++)
    {
      tree anode = (*lhs_list)[ii];
      make_triplet_val_inv (&ARRAY_NOTATION_START (anode));
      make_triplet_val_inv (&ARRAY_NOTATION_LENGTH (anode));
      make_triplet_val_inv (&ARRAY_NOTATION_STRIDE (anode));
    }
  for (ii = 0; ii < rhs_list_size; ii++)
    if ((*rhs_list)[ii] && TREE_CODE ((*rhs_list)[ii]) == ARRAY_NOTATION_REF)
      {
	tree aa = (*rhs_list)[ii];
	make_triplet_val_inv (&ARRAY_NOTATION_START (aa));
	make_triplet_val_inv (&ARRAY_NOTATION_LENGTH (aa));
	make_triplet_val_inv (&ARRAY_NOTATION_STRIDE (aa));
      }
  lhs_an_loop_info.safe_grow_cleared (lhs_rank);
  
  if (rhs_rank)
    rhs_an_loop_info.safe_grow_cleared (rhs_rank);

  cond_expr.safe_grow_cleared (MAX (lhs_rank, rhs_rank));
  cilkplus_extract_an_triplets (lhs_list, lhs_list_size, lhs_rank,
				&lhs_an_info);
  if (rhs_list)
    cilkplus_extract_an_triplets (rhs_list, rhs_list_size, rhs_rank,
				  &rhs_an_info);
  if (length_mismatch_in_expr_p (EXPR_LOCATION (lhs), lhs_an_info)
      || (rhs_list && length_mismatch_in_expr_p (EXPR_LOCATION (rhs),
						 rhs_an_info)))
    {
      pop_stmt_list (an_init);
      goto error;
    }
  rhs_len = ((rhs_list_size > 0 && rhs_rank > 0) ?
    rhs_an_info[0][0].length : NULL_TREE);
  lhs_len = ((lhs_list_size > 0 && lhs_rank > 0) ?
    lhs_an_info[0][0].length : NULL_TREE);
  if (lhs_list_size > 0 && rhs_list_size > 0 && lhs_rank > 0 && rhs_rank > 0
      && TREE_CODE (lhs_len) == INTEGER_CST && rhs_len
      && TREE_CODE (rhs_len) == INTEGER_CST 
      && !tree_int_cst_equal (rhs_len, lhs_len))
    { 
      error_at (location, "length mismatch between LHS and RHS"); 
      pop_stmt_list (an_init); 
      goto error;
    }
   for (ii = 0; ii < lhs_rank; ii++) 
     {
       tree typ = ptrdiff_type_node; 
       lhs_an_loop_info[ii].var = create_temporary_var (typ);
       add_decl_expr (lhs_an_loop_info[ii].var);
       lhs_an_loop_info[ii].ind_init = build_x_modify_expr 
	 (location, lhs_an_loop_info[ii].var, INIT_EXPR, build_zero_cst (typ), 
	  complain);
     }
   
   if (rhs_list_size > 0)
     {
       rhs_array_operand = fix_sec_implicit_args (location, rhs_list,
						  lhs_an_loop_info, lhs_rank,
						  lhs); 
       if (!rhs_array_operand)
	 goto error;
     }
  replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
  rhs_list_size = 0;
  rhs_list = NULL;
  extract_array_notation_exprs (rhs, true, &rhs_list);
  rhs_list_size = vec_safe_length (rhs_list);    

  for (ii = 0; ii < rhs_rank; ii++)
    {
      tree typ = ptrdiff_type_node;
      rhs_an_loop_info[ii].var = create_temporary_var (typ);
      add_decl_expr (rhs_an_loop_info[ii].var);
      rhs_an_loop_info[ii].ind_init = build_x_modify_expr
	(location, rhs_an_loop_info[ii].var, INIT_EXPR, build_zero_cst (typ), 
	 complain);
    }

  if (lhs_rank)
    {
      lhs_array_operand =
	create_array_refs (location, lhs_an_info, lhs_an_loop_info,
			    lhs_list_size, lhs_rank);
      replace_array_notations (&lhs, true, lhs_list, lhs_array_operand);
    }
  
  if (rhs_array_operand)
    vec_safe_truncate (rhs_array_operand, 0);
  if (rhs_rank)
    {
      rhs_array_operand = create_array_refs (location, rhs_an_info,
					      rhs_an_loop_info, rhs_list_size,
					      rhs_rank);
      /* Replace all the array refs created by the above function because this
	 variable is blown away by the fix_sec_implicit_args function below.  */
      replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
      vec_safe_truncate (rhs_array_operand , 0);
      rhs_array_operand = fix_sec_implicit_args (location, rhs_list,
						 rhs_an_loop_info, rhs_rank,
						 rhs);
      if (!rhs_array_operand)
	goto error;
      replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
    }

  array_expr_rhs = rhs;
  array_expr_lhs = lhs;
  
  array_expr = build_x_modify_expr (location, array_expr_lhs, modifycode,
				    array_expr_rhs, complain);
  create_cmp_incr (location, &lhs_an_loop_info, lhs_rank, lhs_an_info,
		   complain);
  if (rhs_rank) 
    create_cmp_incr (location, &rhs_an_loop_info, rhs_rank, rhs_an_info, 
		     complain);
  for (ii = 0; ii < MAX (rhs_rank, lhs_rank); ii++)
    if (ii < lhs_rank && ii < rhs_rank)
      cond_expr[ii] = build_x_binary_op
	(location, TRUTH_ANDIF_EXPR, lhs_an_loop_info[ii].cmp,
	 TREE_CODE (lhs_an_loop_info[ii].cmp), rhs_an_loop_info[ii].cmp,
	 TREE_CODE (rhs_an_loop_info[ii].cmp), NULL, complain);
    else if (ii < lhs_rank && ii >= rhs_rank)
      cond_expr[ii] = lhs_an_loop_info[ii].cmp;
    else
      /* No need to compare ii < rhs_rank && ii >= lhs_rank because in a valid 
	 Array notation expression, rank of RHS cannot be greater than LHS.  */
      gcc_unreachable ();
  
  an_init = pop_stmt_list (an_init);
  append_to_statement_list (an_init, &loop_with_init);
  body = array_expr;
  for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++)
    {
      tree incr_list = alloc_stmt_list ();
      tree init_list = alloc_stmt_list ();
      tree new_loop = push_stmt_list ();

      if (lhs_rank)
	{
	  append_to_statement_list (lhs_an_loop_info[ii].ind_init, &init_list);
	  append_to_statement_list (lhs_an_loop_info[ii].incr, &incr_list);
	}
      if (rhs_rank)
	{
	  append_to_statement_list (rhs_an_loop_info[ii].ind_init, &init_list);
	  append_to_statement_list (rhs_an_loop_info[ii].incr, &incr_list);
	}
      create_an_loop (init_list, cond_expr[ii], incr_list, body);
      body = pop_stmt_list (new_loop);
    }
  append_to_statement_list (body, &loop_with_init);

  release_vec_vec (lhs_an_info);
  release_vec_vec (rhs_an_info);

  return loop_with_init;

error:
  release_vec_vec (lhs_an_info);
  release_vec_vec (rhs_an_info);

  return error_mark_node;
}
Пример #24
0
static tree
fix_builtin_array_notation_fn (tree an_builtin_fn, tree *new_var)
{
  tree new_var_type = NULL_TREE, func_parm, new_expr, new_yes_expr, new_no_expr;
  tree array_ind_value = NULL_TREE, new_no_ind, new_yes_ind, new_no_list;
  tree new_yes_list, new_cond_expr, new_var_init = NULL_TREE;
  tree new_exp_init = NULL_TREE;
  vec<tree, va_gc> *array_list = NULL, *array_operand = NULL;
  size_t list_size = 0, rank = 0, ii = 0;
  tree loop_init, array_op0;
  tree identity_value = NULL_TREE, call_fn = NULL_TREE, new_call_expr, body;
  location_t location = UNKNOWN_LOCATION;
  tree loop_with_init = alloc_stmt_list ();
  vec<vec<an_parts> > an_info = vNULL;
  vec<an_loop_parts> an_loop_info = vNULL;
  enum built_in_function an_type =
    is_cilkplus_reduce_builtin (CALL_EXPR_FN (an_builtin_fn));
  if (an_type == BUILT_IN_NONE)
    return NULL_TREE;

  /* Builtin call should contain at least one argument.  */
  if (call_expr_nargs (an_builtin_fn) == 0)
    {
      error_at (EXPR_LOCATION (an_builtin_fn), "Invalid builtin arguments");
      return error_mark_node;
    }

  if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE
      || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING)
    {
      call_fn = CALL_EXPR_ARG (an_builtin_fn, 2);
      if (TREE_CODE (call_fn) == ADDR_EXPR)
	call_fn = TREE_OPERAND (call_fn, 0);
      identity_value = CALL_EXPR_ARG (an_builtin_fn, 0);
      func_parm = CALL_EXPR_ARG (an_builtin_fn, 1);
    }
  else
    func_parm = CALL_EXPR_ARG (an_builtin_fn, 0);
  
  /* Fully fold any EXCESSIVE_PRECISION EXPR that can occur in the function
     parameter.  */
  func_parm = c_fully_fold (func_parm, false, NULL);
  if (func_parm == error_mark_node)
    return error_mark_node;
  
  location = EXPR_LOCATION (an_builtin_fn);
  
  if (!find_rank (location, an_builtin_fn, an_builtin_fn, true, &rank))
    return error_mark_node;
 
  if (rank == 0)
    {
      error_at (location, "Invalid builtin arguments");
      return error_mark_node;
    }
  else if (rank > 1 
	   && (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND
	       || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND))
    {
      error_at (location, "__sec_reduce_min_ind or __sec_reduce_max_ind cannot"
		" have arrays with dimension greater than 1");
      return error_mark_node;
    }
  
  extract_array_notation_exprs (func_parm, true, &array_list);
  list_size = vec_safe_length (array_list);
  switch (an_type)
    {
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN:
      new_var_type = TREE_TYPE ((*array_list)[0]);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO:
      new_var_type = integer_type_node;
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND:
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND:
      new_var_type = integer_type_node;
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE:
      if (call_fn && identity_value) 
	new_var_type = TREE_TYPE ((*array_list)[0]);
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING:
      new_var_type = NULL_TREE;
      break;
    default:
      gcc_unreachable (); 
    }

  an_loop_info.safe_grow_cleared (rank);
  cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info);
  loop_init = alloc_stmt_list ();

  for (ii = 0; ii < rank; ii++)
    {
      an_loop_info[ii].var = create_tmp_var (integer_type_node);
      an_loop_info[ii].ind_init =
	build_modify_expr (location, an_loop_info[ii].var,
			   TREE_TYPE (an_loop_info[ii].var), NOP_EXPR,
			   location,
			   build_int_cst (TREE_TYPE (an_loop_info[ii].var), 0),
			   TREE_TYPE (an_loop_info[ii].var));	
    }
  array_operand = create_array_refs (location, an_info, an_loop_info,
				     list_size, rank);
  replace_array_notations (&func_parm, true, array_list, array_operand);

  create_cmp_incr (location, &an_loop_info, rank, an_info);
  if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING)
    {
      *new_var = build_decl (location, VAR_DECL, NULL_TREE, new_var_type);
      gcc_assert (*new_var && *new_var != error_mark_node);
    }
  else
    *new_var = NULL_TREE;
  
  if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND
      || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND)
    array_ind_value = build_decl (location, VAR_DECL, NULL_TREE, 
				  TREE_TYPE (func_parm));
  array_op0 = (*array_operand)[0];
  if (TREE_CODE (array_op0) == INDIRECT_REF)
    array_op0 = TREE_OPERAND (array_op0, 0);
  switch (an_type)
    {
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_zero_cst (new_var_type), new_var_type);
      new_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), PLUS_EXPR,
	 location, func_parm, TREE_TYPE (func_parm));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_one_cst (new_var_type), new_var_type);
      new_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), MULT_EXPR,
	 location, func_parm, TREE_TYPE (func_parm));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_one_cst (new_var_type), new_var_type);
      /* Initially you assume everything is zero, now if we find a case where 
	 it is NOT true, then we set the result to false. Otherwise 
	 we just keep the previous value.  */
      new_yes_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_zero_cst (TREE_TYPE (*new_var)),
	 TREE_TYPE (*new_var));
      new_no_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_cond_expr = build2 (NE_EXPR, TREE_TYPE (func_parm), func_parm,
			      build_zero_cst (TREE_TYPE (func_parm)));
      new_expr = build_conditional_expr
	(location, new_cond_expr, false, new_yes_expr,
	 TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_one_cst (new_var_type), new_var_type);
      /* Initially you assume everything is non-zero, now if we find a case
	 where it is NOT true, then we set the result to false.  Otherwise
	 we just keep the previous value.  */
      new_yes_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_zero_cst (TREE_TYPE (*new_var)),
	 TREE_TYPE (*new_var));
      new_no_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_cond_expr = build2 (EQ_EXPR, TREE_TYPE (func_parm), func_parm,
			      build_zero_cst (TREE_TYPE (func_parm)));
      new_expr = build_conditional_expr
	(location, new_cond_expr, false, new_yes_expr,
	 TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_zero_cst (new_var_type), new_var_type);
      /* Initially we assume there are NO zeros in the list. When we find 
	 a non-zero, we keep the previous value.  If we find a zero, we 
	 set the value to true.  */
      new_yes_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_one_cst (new_var_type), new_var_type);
      new_no_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_cond_expr = build2 (EQ_EXPR, TREE_TYPE (func_parm), func_parm,
			      build_zero_cst (TREE_TYPE (func_parm)));
      new_expr = build_conditional_expr
	(location, new_cond_expr, false, new_yes_expr,
	 TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr));   
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_zero_cst (new_var_type), new_var_type);
      /* Initially we assume there are NO non-zeros in the list. When we find 
	 a zero, we keep the previous value.  If we find a non-zero, we set 
	 the value to true.  */
      new_yes_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_one_cst (new_var_type), new_var_type);
      new_no_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_cond_expr = build2 (NE_EXPR, TREE_TYPE (func_parm), func_parm,
			      build_zero_cst (TREE_TYPE (func_parm)));
      new_expr = build_conditional_expr
	(location, new_cond_expr, false, new_yes_expr,
	 TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr));   
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX:
      if (TYPE_MIN_VALUE (new_var_type))
	new_var_init = build_modify_expr
	  (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	   location, TYPE_MIN_VALUE (new_var_type), new_var_type);
      else
	new_var_init = build_modify_expr
	  (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	   location, func_parm, new_var_type);
      new_no_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_yes_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, func_parm, TREE_TYPE (*new_var));
      new_expr = build_conditional_expr
	(location,
	 build2 (LT_EXPR, TREE_TYPE (*new_var), *new_var, func_parm), false,
	 new_yes_expr, TREE_TYPE (*new_var), new_no_expr, TREE_TYPE (*new_var));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN:
      if (TYPE_MAX_VALUE (new_var_type))
	new_var_init = build_modify_expr
	  (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	   location, TYPE_MAX_VALUE (new_var_type), new_var_type);
      else
	new_var_init = build_modify_expr
	  (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	   location, func_parm, new_var_type);
      new_no_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_yes_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, func_parm, TREE_TYPE (*new_var));
      new_expr = build_conditional_expr
	(location,
	 build2 (GT_EXPR, TREE_TYPE (*new_var), *new_var, func_parm), false,
	 new_yes_expr, TREE_TYPE (*new_var), new_no_expr, TREE_TYPE (*new_var));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_zero_cst (new_var_type), new_var_type);
      new_exp_init = build_modify_expr
	(location, array_ind_value, TREE_TYPE (array_ind_value),
	 NOP_EXPR, location, func_parm, TREE_TYPE (func_parm));
      new_no_ind = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_no_expr = build_modify_expr
	(location, array_ind_value, TREE_TYPE (array_ind_value),
	 NOP_EXPR,
	 location, array_ind_value, TREE_TYPE (array_ind_value));
      if (list_size > 1)
	{
	  new_yes_ind = build_modify_expr
	    (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	     location, an_loop_info[0].var, TREE_TYPE (an_loop_info[0].var));
	  new_yes_expr = build_modify_expr
	    (location, array_ind_value, TREE_TYPE (array_ind_value),
	     NOP_EXPR,
	     location, func_parm, TREE_TYPE ((*array_operand)[0]));
	}
      else
	{
	  new_yes_ind = build_modify_expr
	    (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	     location, TREE_OPERAND (array_op0, 1),
	     TREE_TYPE (TREE_OPERAND (array_op0, 1)));
	  new_yes_expr = build_modify_expr
	    (location, array_ind_value, TREE_TYPE (array_ind_value),
	     NOP_EXPR,
	     location, func_parm, TREE_OPERAND (array_op0, 1));
	}
      new_yes_list = alloc_stmt_list ();
      append_to_statement_list (new_yes_ind, &new_yes_list);
      append_to_statement_list (new_yes_expr, &new_yes_list);

      new_no_list = alloc_stmt_list ();
      append_to_statement_list (new_no_ind, &new_no_list);
      append_to_statement_list (new_no_expr, &new_no_list);
 
      new_expr = build_conditional_expr
	(location,
	 build2 (LE_EXPR, TREE_TYPE (array_ind_value), array_ind_value,
		 func_parm),
	 false,
	 new_yes_list, TREE_TYPE (*new_var), new_no_list, TREE_TYPE (*new_var));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, build_zero_cst (new_var_type), new_var_type);
      new_exp_init = build_modify_expr
	(location, array_ind_value, TREE_TYPE (array_ind_value),
	 NOP_EXPR, location, func_parm, TREE_TYPE (func_parm));
      new_no_ind = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, *new_var, TREE_TYPE (*new_var));
      new_no_expr = build_modify_expr
	(location, array_ind_value, TREE_TYPE (array_ind_value),
	 NOP_EXPR,
	 location, array_ind_value, TREE_TYPE (array_ind_value));
      if (list_size > 1)
	{
	  new_yes_ind = build_modify_expr
	    (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	     location, an_loop_info[0].var, TREE_TYPE (an_loop_info[0].var));
	  new_yes_expr = build_modify_expr
	    (location, array_ind_value, TREE_TYPE (array_ind_value),
	     NOP_EXPR,
	     location, func_parm, TREE_TYPE (array_op0));
	}
      else
	{
	  new_yes_ind = build_modify_expr
	    (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	     location, TREE_OPERAND (array_op0, 1),
	     TREE_TYPE (TREE_OPERAND (array_op0, 1)));
	  new_yes_expr = build_modify_expr
	    (location, array_ind_value, TREE_TYPE (array_ind_value),
	     NOP_EXPR,
	     location, func_parm, TREE_OPERAND (array_op0, 1));
	}
      new_yes_list = alloc_stmt_list ();
      append_to_statement_list (new_yes_ind, &new_yes_list);
      append_to_statement_list (new_yes_expr, &new_yes_list);

      new_no_list = alloc_stmt_list ();
      append_to_statement_list (new_no_ind, &new_no_list);
      append_to_statement_list (new_no_expr, &new_no_list);
 
      new_expr = build_conditional_expr
	(location,
	 build2 (GE_EXPR, TREE_TYPE (array_ind_value), array_ind_value,
		 func_parm),
	 false,
	 new_yes_list, TREE_TYPE (*new_var), new_no_list, TREE_TYPE (*new_var));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE:
      new_var_init = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, identity_value, new_var_type);
      new_call_expr = build_call_expr (call_fn, 2, *new_var, func_parm);
      new_expr = build_modify_expr
	(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
	 location, new_call_expr, TREE_TYPE (*new_var));
      break;
    case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING:
      new_expr = build_call_expr (call_fn, 2, identity_value, func_parm);
      break;
    default:
      gcc_unreachable ();
      break;
    }

  for (ii = 0; ii < rank; ii++)
    append_to_statement_list (an_loop_info[ii].ind_init, &loop_init);

  if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND
      || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND)
    append_to_statement_list (new_exp_init, &loop_init);
  if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING)
    append_to_statement_list (new_var_init, &loop_init);

  append_to_statement_list_force (loop_init, &loop_with_init);
  body = new_expr;
  for (ii = 0; ii < rank; ii++)
    {
      tree new_loop = push_stmt_list ();
      c_finish_loop (location, an_loop_info[ii].cmp, an_loop_info[ii].incr,
		     body, NULL_TREE, NULL_TREE, true);
      body = pop_stmt_list (new_loop);
    }
  append_to_statement_list_force (body, &loop_with_init);

  an_info.release ();
  an_loop_info.release ();
  
  return loop_with_init;
}
Пример #25
0
rtx
copy_rtx (rtx orig)
{
  rtx copy;
  int i, j;
  RTX_CODE code;
  const char *format_ptr;

  code = GET_CODE (orig);

  switch (code)
    {
    case REG:
    case CONST_INT:
    case CONST_DOUBLE:
    case CONST_VECTOR:
    case SYMBOL_REF:
    case CODE_LABEL:
    case PC:
    case CC0:
    case SCRATCH:
      /* SCRATCH must be shared because they represent distinct values.  */
      return orig;
    case CLOBBER:
      if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER)
	return orig;
      break;

    case CONST:
      /* CONST can be shared if it contains a SYMBOL_REF.  If it contains
	 a LABEL_REF, it isn't sharable.  */
      if (GET_CODE (XEXP (orig, 0)) == PLUS
	  && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF
	  && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT)
	return orig;
      break;

      /* A MEM with a constant address is not sharable.  The problem is that
	 the constant address may need to be reloaded.  If the mem is shared,
	 then reloading one copy of this mem will cause all copies to appear
	 to have been reloaded.  */

    default:
      break;
    }

  /* Copy the various flags, fields, and other information.  We assume
     that all fields need copying, and then clear the fields that should
     not be copied.  That is the sensible default behavior, and forces
     us to explicitly document why we are *not* copying a flag.  */
  copy = shallow_copy_rtx (orig);

  /* We do not copy the USED flag, which is used as a mark bit during
     walks over the RTL.  */
  RTX_FLAG (copy, used) = 0;

  /* We do not copy FRAME_RELATED for INSNs.  */
  if (INSN_P (orig))
    RTX_FLAG (copy, frame_related) = 0;
  RTX_FLAG (copy, jump) = RTX_FLAG (orig, jump);
  RTX_FLAG (copy, call) = RTX_FLAG (orig, call);

  format_ptr = GET_RTX_FORMAT (GET_CODE (copy));

  for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
    switch (*format_ptr++)
      {
      case 'e':
	if (XEXP (orig, i) != NULL)
	  XEXP (copy, i) = copy_rtx (XEXP (orig, i));
	break;

      case 'E':
      case 'V':
	if (XVEC (orig, i) != NULL)
	  {
	    XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
	    for (j = 0; j < XVECLEN (copy, i); j++)
	      XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j));
	  }
	break;

      case 't':
      case 'w':
      case 'i':
      case 's':
      case 'S':
      case 'T':
      case 'u':
      case 'B':
      case '0':
	/* These are left unchanged.  */
	break;

      default:
	gcc_unreachable ();
      }
  return copy;
}
Пример #26
0
tree
build_array_notation_expr (location_t location, tree lhs, tree lhs_origtype,
			   enum tree_code modifycode, location_t rhs_loc,
			   tree rhs, tree rhs_origtype)
{
  bool found_builtin_fn = false;
  tree array_expr_lhs = NULL_TREE, array_expr_rhs = NULL_TREE;
  tree array_expr = NULL_TREE;
  tree an_init = NULL_TREE;
  vec<tree> cond_expr = vNULL;
  tree body, loop_with_init = alloc_stmt_list();
  tree scalar_mods = NULL_TREE;
  vec<tree, va_gc> *rhs_array_operand = NULL, *lhs_array_operand = NULL;
  size_t lhs_rank = 0, rhs_rank = 0;
  size_t ii = 0;
  vec<tree, va_gc> *lhs_list = NULL, *rhs_list = NULL;
  tree new_modify_expr, new_var = NULL_TREE, builtin_loop = NULL_TREE;
  size_t rhs_list_size = 0, lhs_list_size = 0; 
  vec<vec<an_parts> > lhs_an_info = vNULL, rhs_an_info = vNULL;
  vec<an_loop_parts> lhs_an_loop_info = vNULL, rhs_an_loop_info = vNULL;
  
  /* If either of this is true, an error message must have been send out
     already.  Not necessary to send out multiple error messages.  */
  if (lhs == error_mark_node || rhs == error_mark_node)
    return error_mark_node;
  
  if (!find_rank (location, rhs, rhs, false, &rhs_rank))
    return error_mark_node;
  
  extract_array_notation_exprs (rhs, false, &rhs_list);
  rhs_list_size = vec_safe_length (rhs_list);
  an_init = push_stmt_list ();
  if (rhs_rank)
    {
      scalar_mods = replace_invariant_exprs (&rhs);
      if (scalar_mods)
	add_stmt (scalar_mods);
    }
  for (ii = 0; ii < rhs_list_size; ii++)
    {
      tree rhs_node = (*rhs_list)[ii];
      if (TREE_CODE (rhs_node) == CALL_EXPR)
	{
	  builtin_loop = fix_builtin_array_notation_fn (rhs_node, &new_var);
	  if (builtin_loop == error_mark_node)
	    {
	      pop_stmt_list (an_init); 
	      return error_mark_node;
	    }
	  else if (builtin_loop)
	    {
	      add_stmt (builtin_loop);
	      found_builtin_fn = true;
	      if (new_var)
		{
		  vec<tree, va_gc> *rhs_sub_list = NULL, *new_var_list = NULL;
		  vec_safe_push (rhs_sub_list, rhs_node);
		  vec_safe_push (new_var_list, new_var);
		  replace_array_notations (&rhs, false, rhs_sub_list,
					   new_var_list);
		}
	    }
	}
    }

  lhs_rank = 0;
  rhs_rank = 0;
  if (!find_rank (location, lhs, lhs, true, &lhs_rank))
    {
      pop_stmt_list (an_init);
      return error_mark_node;
    }
  
  if (!find_rank (location, rhs, rhs, true, &rhs_rank))
    {
      pop_stmt_list (an_init);
      return error_mark_node;
    }

  if (lhs_rank == 0 && rhs_rank == 0)
    {
      if (found_builtin_fn)
	{
	  new_modify_expr = build_modify_expr (location, lhs, lhs_origtype,
					       modifycode, rhs_loc, rhs,
					       rhs_origtype);
	  add_stmt (new_modify_expr);
	  pop_stmt_list (an_init);	  
	  return an_init;
	}
      else
	{
	  pop_stmt_list (an_init);
	  return NULL_TREE;
	}
    }
  rhs_list_size = 0;
  rhs_list = NULL;
  extract_array_notation_exprs (rhs, true, &rhs_list);
  extract_array_notation_exprs (lhs, true, &lhs_list);
  rhs_list_size = vec_safe_length (rhs_list);
  lhs_list_size = vec_safe_length (lhs_list);
  
  if (lhs_rank == 0 && rhs_rank != 0)
    {
      tree rhs_base = rhs;
      if (TREE_CODE (rhs_base) == ARRAY_NOTATION_REF)
	{
	  for (ii = 0; ii < (size_t) rhs_rank; ii++)
	    rhs_base = ARRAY_NOTATION_ARRAY (rhs);
      
	  error_at (location, "%qE cannot be scalar when %qE is not", lhs,
		    rhs_base);
	  return error_mark_node;
	}
      else
	{
	  error_at (location, "%qE cannot be scalar when %qE is not", lhs,
		    rhs_base);
	  return error_mark_node;
	}
    }
  if (lhs_rank != 0 && rhs_rank != 0 && lhs_rank != rhs_rank)
    {
      error_at (location, "rank mismatch between %qE and %qE", lhs, rhs);
      pop_stmt_list (an_init);
      return error_mark_node;
    }
  
  /* Here we assign the array notation components to variable so that we can
     satisfy the exec once rule.  */
  for (ii = 0; ii < lhs_list_size; ii++)
    { 
      tree array_node = (*lhs_list)[ii];
      make_triplet_val_inv (location, &ARRAY_NOTATION_START (array_node));
      make_triplet_val_inv (location, &ARRAY_NOTATION_LENGTH (array_node));
      make_triplet_val_inv (location, &ARRAY_NOTATION_STRIDE (array_node));
    }
  for (ii = 0; ii < rhs_list_size; ii++)
    if ((*rhs_list)[ii] && TREE_CODE ((*rhs_list)[ii]) == ARRAY_NOTATION_REF)
      {  
	tree array_node = (*rhs_list)[ii];
	make_triplet_val_inv (location, &ARRAY_NOTATION_START (array_node));
	make_triplet_val_inv (location, &ARRAY_NOTATION_LENGTH (array_node));
	make_triplet_val_inv (location, &ARRAY_NOTATION_STRIDE (array_node));
      }
  
  cond_expr.safe_grow_cleared (MAX (lhs_rank, rhs_rank));

  lhs_an_loop_info.safe_grow_cleared (lhs_rank);
  if (rhs_rank)
    rhs_an_loop_info.safe_grow_cleared (rhs_rank);

  cilkplus_extract_an_triplets (lhs_list, lhs_list_size, lhs_rank,
				&lhs_an_info);
  if (rhs_rank)
    {
      rhs_an_loop_info.safe_grow_cleared (rhs_rank);
      cilkplus_extract_an_triplets (rhs_list, rhs_list_size, rhs_rank,
				    &rhs_an_info);
    }
  if (length_mismatch_in_expr_p (EXPR_LOCATION (lhs), lhs_an_info)
      || (rhs_rank
	  && length_mismatch_in_expr_p (EXPR_LOCATION (rhs), rhs_an_info)))
    {
      pop_stmt_list (an_init);
      return error_mark_node;
    }
  if (lhs_list_size > 0 && rhs_list_size > 0 && lhs_rank > 0 && rhs_rank > 0
      && TREE_CODE (lhs_an_info[0][0].length) == INTEGER_CST
      && rhs_an_info[0][0].length
      && TREE_CODE (rhs_an_info[0][0].length) == INTEGER_CST)
    {
      HOST_WIDE_INT l_length = int_cst_value (lhs_an_info[0][0].length);
      HOST_WIDE_INT r_length = int_cst_value (rhs_an_info[0][0].length);
      /* Length can be negative or positive.  As long as the magnitude is OK,
	 then the array notation is valid.  */
      if (absu_hwi (l_length) != absu_hwi (r_length))
	{
	  error_at (location, "length mismatch between LHS and RHS");
	  pop_stmt_list (an_init);
	  return error_mark_node;
	}
    }
  for (ii = 0; ii < lhs_rank; ii++)
    if (lhs_an_info[0][ii].is_vector)
      {
	lhs_an_loop_info[ii].var = create_tmp_var (integer_type_node);
	lhs_an_loop_info[ii].ind_init = build_modify_expr
	  (location, lhs_an_loop_info[ii].var,
	   TREE_TYPE (lhs_an_loop_info[ii].var), NOP_EXPR,
	   location, build_zero_cst (TREE_TYPE (lhs_an_loop_info[ii].var)),
	   TREE_TYPE (lhs_an_loop_info[ii].var));
      }
  for (ii = 0; ii < rhs_rank; ii++)
    {
      /* When we have a polynomial, we assume that the indices are of type 
	 integer.  */
      rhs_an_loop_info[ii].var = create_tmp_var (integer_type_node);
      rhs_an_loop_info[ii].ind_init = build_modify_expr
	(location, rhs_an_loop_info[ii].var,
	 TREE_TYPE (rhs_an_loop_info[ii].var), NOP_EXPR,
	 location, build_int_cst (TREE_TYPE (rhs_an_loop_info[ii].var), 0),
	 TREE_TYPE (rhs_an_loop_info[ii].var));
    }
  if (lhs_rank)
    {
      lhs_array_operand = create_array_refs
	(location, lhs_an_info, lhs_an_loop_info, lhs_list_size, lhs_rank);
      replace_array_notations (&lhs, true, lhs_list, lhs_array_operand);
      array_expr_lhs = lhs;
    }
  if (rhs_array_operand)
    vec_safe_truncate (rhs_array_operand, 0);
  if (rhs_rank)
    {
      rhs_array_operand = create_array_refs
	(location, rhs_an_info, rhs_an_loop_info, rhs_list_size, rhs_rank);
      replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
      vec_safe_truncate (rhs_array_operand, 0);
      rhs_array_operand = fix_sec_implicit_args (location, rhs_list,
						 rhs_an_loop_info, rhs_rank,
						 rhs);
      if (!rhs_array_operand)
	return error_mark_node;
      replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
    }
  else if (rhs_list_size > 0)
    {
      rhs_array_operand = fix_sec_implicit_args (location, rhs_list,
						 lhs_an_loop_info, lhs_rank,
						 lhs);
      if (!rhs_array_operand)
	return error_mark_node;
      replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
    }
  array_expr_lhs = lhs;
  array_expr_rhs = rhs;
  array_expr = build_modify_expr (location, array_expr_lhs, lhs_origtype, 
				  modifycode, rhs_loc, array_expr_rhs, 
				  rhs_origtype);
  create_cmp_incr (location, &lhs_an_loop_info, lhs_rank, lhs_an_info);
  if (rhs_rank)
    create_cmp_incr (location, &rhs_an_loop_info, rhs_rank, rhs_an_info);
  
  for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++)
    if (ii < lhs_rank && ii < rhs_rank)
      cond_expr[ii] = build2 (TRUTH_ANDIF_EXPR, boolean_type_node,
			      lhs_an_loop_info[ii].cmp,
			      rhs_an_loop_info[ii].cmp);
    else if (ii < lhs_rank && ii >= rhs_rank)
      cond_expr[ii] = lhs_an_loop_info[ii].cmp;
    else
      gcc_unreachable ();

  an_init = pop_stmt_list (an_init);
  append_to_statement_list_force (an_init, &loop_with_init);
  body = array_expr;
  for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++)
    {
      tree incr_list = alloc_stmt_list ();
      tree new_loop = push_stmt_list ();
      if (lhs_rank)
	add_stmt (lhs_an_loop_info[ii].ind_init);
      if (rhs_rank)
	add_stmt (rhs_an_loop_info[ii].ind_init);
      if (lhs_rank)
	append_to_statement_list_force (lhs_an_loop_info[ii].incr, &incr_list);
      if (rhs_rank && rhs_an_loop_info[ii].incr)
	append_to_statement_list_force (rhs_an_loop_info[ii].incr, &incr_list);
      c_finish_loop (location, cond_expr[ii], incr_list, body, NULL_TREE,
		     NULL_TREE, true);
      body = pop_stmt_list (new_loop);
    }
  append_to_statement_list_force (body, &loop_with_init);

  lhs_an_info.release ();
  lhs_an_loop_info.release ();
  if (rhs_rank)
    {
      rhs_an_info.release ();
      rhs_an_loop_info.release ();
    }
  cond_expr.release ();
  return loop_with_init;
}
Пример #27
0
void
print_pattern (char *buf, const_rtx x, int verbose)
{
  char t1[BUF_LEN], t2[BUF_LEN], t3[BUF_LEN];

  switch (GET_CODE (x))
    {
    case SET:
      print_value (t1, SET_DEST (x), verbose);
      print_value (t2, SET_SRC (x), verbose);
      sprintf (buf, "%s=%s", t1, t2);
      break;
    case RETURN:
      sprintf (buf, "return");
      break;
    case SIMPLE_RETURN:
      sprintf (buf, "simple_return");
      break;
    case CALL:
      print_exp (buf, x, verbose);
      break;
    case CLOBBER:
      print_value (t1, XEXP (x, 0), verbose);
      sprintf (buf, "clobber %s", t1);
      break;
    case USE:
      print_value (t1, XEXP (x, 0), verbose);
      sprintf (buf, "use %s", t1);
      break;
    case VAR_LOCATION:
      print_value (t1, PAT_VAR_LOCATION_LOC (x), verbose);
      sprintf (buf, "loc %s", t1);
      break;
    case COND_EXEC:
      if (GET_CODE (COND_EXEC_TEST (x)) == NE
	  && XEXP (COND_EXEC_TEST (x), 1) == const0_rtx)
	print_value (t1, XEXP (COND_EXEC_TEST (x), 0), verbose);
      else if (GET_CODE (COND_EXEC_TEST (x)) == EQ
	       && XEXP (COND_EXEC_TEST (x), 1) == const0_rtx)
	{
	  t1[0] = '!';
	  print_value (t1 + 1, XEXP (COND_EXEC_TEST (x), 0), verbose);
	}
      else
	print_value (t1, COND_EXEC_TEST (x), verbose);
      print_pattern (t2, COND_EXEC_CODE (x), verbose);
      sprintf (buf, "(%s) %s", t1, t2);
      break;
    case PARALLEL:
      {
	int i;

	sprintf (t1, "{");
	for (i = 0; i < XVECLEN (x, 0); i++)
	  {
	    print_pattern (t2, XVECEXP (x, 0, i), verbose);
	    sprintf (t3, "%s%s;", t1, t2);
	    strcpy (t1, t3);
	  }
	sprintf (buf, "%s}", t1);
      }
      break;
    case SEQUENCE:
      /* Should never see SEQUENCE codes until after reorg.  */
      gcc_unreachable ();
    case ASM_INPUT:
      sprintf (buf, "asm {%s}", XSTR (x, 0));
      break;
    case ADDR_VEC:
      /* Fall through.  */
    case ADDR_DIFF_VEC:
      print_value (buf, XEXP (x, 0), verbose);
      break;
    case TRAP_IF:
      print_value (t1, TRAP_CONDITION (x), verbose);
      sprintf (buf, "trap_if %s", t1);
      break;
    case UNSPEC:
      {
	int i;

	sprintf (t1, "unspec{");
	for (i = 0; i < XVECLEN (x, 0); i++)
	  {
	    print_pattern (t2, XVECEXP (x, 0, i), verbose);
	    sprintf (t3, "%s%s;", t1, t2);
	    strcpy (t1, t3);
	  }
	sprintf (buf, "%s}", t1);
      }
      break;
    case UNSPEC_VOLATILE:
      {
	int i;

	sprintf (t1, "unspec/v{");
	for (i = 0; i < XVECLEN (x, 0); i++)
	  {
	    print_pattern (t2, XVECEXP (x, 0, i), verbose);
	    sprintf (t3, "%s%s;", t1, t2);
	    strcpy (t1, t3);
	  }
	sprintf (buf, "%s}", t1);
      }
      break;
    default:
      print_value (buf, x, verbose);
    }
}				/* print_pattern */
Пример #28
0
static void
lto_symtab_merge_decls_1 (symtab_node first)
{
  symtab_node e, prevailing;
  bool diagnosed_p = false;

  if (cgraph_dump_file)
    {
      fprintf (cgraph_dump_file, "Merging nodes for %s. Candidates:\n",
	       symtab_node_asm_name (first));
      for (e = first; e; e = e->symbol.next_sharing_asm_name)
	if (TREE_PUBLIC (e->symbol.decl))
	  dump_symtab_node (cgraph_dump_file, e);
    }

  /* Compute the symbol resolutions.  This is a no-op when using the
     linker plugin and resolution was decided by the linker.  */
  prevailing = lto_symtab_resolve_symbols (first);

  /* If there's not a prevailing symbol yet it's an external reference.
     Happens a lot during ltrans.  Choose the first symbol with a
     cgraph or a varpool node.  */
  if (!prevailing)
    {
      prevailing = first;
      /* For variables chose with a priority variant with vnode
	 attached (i.e. from unit where external declaration of
	 variable is actually used).
	 When there are multiple variants, chose one with size.
	 This is needed for C++ typeinfos, for example in
	 lto/20081204-1 there are typeifos in both units, just
	 one of them do have size.  */
      if (TREE_CODE (prevailing->symbol.decl) == VAR_DECL)
	{
	  for (e = prevailing->symbol.next_sharing_asm_name;
	       e; e = e->symbol.next_sharing_asm_name)
	    if (!COMPLETE_TYPE_P (TREE_TYPE (prevailing->symbol.decl))
		&& COMPLETE_TYPE_P (TREE_TYPE (e->symbol.decl))
		&& lto_symtab_symbol_p (e))
	      prevailing = e;
	}
      /* For variables prefer the non-builtin if one is available.  */
      else if (TREE_CODE (prevailing->symbol.decl) == FUNCTION_DECL)
	{
	  for (e = first; e; e = e->symbol.next_sharing_asm_name)
	    if (TREE_CODE (e->symbol.decl) == FUNCTION_DECL
		&& !DECL_BUILT_IN (e->symbol.decl)
		&& lto_symtab_symbol_p (e))
	      {
		prevailing = e;
		break;
	      }
	}
    }

  symtab_prevail_in_asm_name_hash (prevailing);

  /* Diagnose mismatched objects.  */
  for (e = prevailing->symbol.next_sharing_asm_name;
       e; e = e->symbol.next_sharing_asm_name)
    {
      if (TREE_CODE (prevailing->symbol.decl)
	  == TREE_CODE (e->symbol.decl))
	continue;
      if (!lto_symtab_symbol_p (e))
	continue;

      switch (TREE_CODE (prevailing->symbol.decl))
	{
	case VAR_DECL:
	  gcc_assert (TREE_CODE (e->symbol.decl) == FUNCTION_DECL);
	  error_at (DECL_SOURCE_LOCATION (e->symbol.decl),
		    "variable %qD redeclared as function",
		    prevailing->symbol.decl);
	  break;

	case FUNCTION_DECL:
	  gcc_assert (TREE_CODE (e->symbol.decl) == VAR_DECL);
	  error_at (DECL_SOURCE_LOCATION (e->symbol.decl),
		    "function %qD redeclared as variable",
		    prevailing->symbol.decl);
	  break;

	default:
	  gcc_unreachable ();
	}

      diagnosed_p = true;
    }
  if (diagnosed_p)
      inform (DECL_SOURCE_LOCATION (prevailing->symbol.decl),
	      "previously declared here");

  /* Merge the chain to the single prevailing decl and diagnose
     mismatches.  */
  lto_symtab_merge_decls_2 (prevailing, diagnosed_p);

  if (cgraph_dump_file)
    {
      fprintf (cgraph_dump_file, "After resolution:\n");
      for (e = prevailing; e; e = e->symbol.next_sharing_asm_name)
	dump_symtab_node (cgraph_dump_file, e);
    }
}
Пример #29
0
PyObject *
get_operand_as_object(const_rtx in_rtx, int idx, char fmt)
{
    const char *str;

    /* The operand types are described in gcc/rtl.c */
    switch (fmt) {

    case 'T': /* pointer to a string, with special meaning */
        str = XTMPL (in_rtx, idx);
        goto string;

    case 'S': /* optional pointer to a string */
    case 's': /* a pointer to a string */
        str = XSTR (in_rtx, idx);
    string:
        return PyGccStringOrNone(str);

    case '0': /* unused, or used in a phase-dependent manner */
        Py_RETURN_NONE; /* for now */

    case 'e': /* pointer to an rtl expression */
        /* Nested expression: */
        return PyGccRtl_New(
                   gcc_private_make_rtl_insn(XEXP (in_rtx, idx)));

    case 'E':
    case 'V':
        /* Nested list of expressions */
        {
            PyObject *list = PyList_New(XVECLEN (in_rtx, idx));
            int j;
            if (!list) {
                return NULL;
            }
            for (j = 0; j < XVECLEN (in_rtx, idx); j++) {
                PyObject *item = PyGccRtl_New(
                                     gcc_private_make_rtl_insn(XVECEXP (in_rtx, idx, j)));
                if (!item) {
                    Py_DECREF(list);
                    return NULL;
                }
                if (-1 == PyList_Append(list, item)) {
                    Py_DECREF(item);
                    Py_DECREF(list);
                    return NULL;
                }
                Py_DECREF(item);
            }
            return list;
        }

    case 'w':
        return PyGccInt_FromLong(XWINT (in_rtx, idx));

    case 'i':
        return PyGccInt_FromLong(XINT (in_rtx, idx));

    case 'n':
        /* Return NOTE_INSN names rather than integer codes.  */
        return PyGccStringOrNone(GET_NOTE_INSN_NAME (XINT (in_rtx, idx)));

    case 'u': /* a pointer to another insn */
        Py_RETURN_NONE; /* for now */

    case 't':
        return PyGccTree_New(gcc_private_make_tree(XTREE (in_rtx, idx)));

    case '*':
        Py_RETURN_NONE; /* for now */

    case 'B':
        return PyGccBasicBlock_New(
            gcc_private_make_cfg_block(XBBDEF (in_rtx, idx)));

    default:
        gcc_unreachable ();
    }
}
Пример #30
0
bool
AbstractTaskFactory::ValidateFAIOZs()
{
  ClearValidationErrors();
  bool valid = true;

  for (unsigned i = 0; i < task.TaskSize() && valid; i++) {
    const auto &tp = task.GetPoint(i);
    const auto ozsize = GetOZSize(tp.GetObservationZone());

    switch (GetType(tp)) {
    case TaskPointFactoryType::START_BGA:
    case TaskPointFactoryType::START_CYLINDER:
      valid = false;
      break;

    case TaskPointFactoryType::START_SECTOR:
      if (ozsize > fixed(1000.01))
        valid = false;

      break;
    case TaskPointFactoryType::START_LINE:
      if (ozsize > fixed(2000.01))
        valid = false;

      break;

    case TaskPointFactoryType::FAI_SECTOR:
      break;

    case TaskPointFactoryType::AST_CYLINDER:
      if (ozsize > fixed(500.01))
        valid = false;

      break;

    case TaskPointFactoryType::KEYHOLE_SECTOR:
    case TaskPointFactoryType::BGAFIXEDCOURSE_SECTOR:
    case TaskPointFactoryType::BGAENHANCEDOPTION_SECTOR:
    case TaskPointFactoryType::MAT_CYLINDER:
    case TaskPointFactoryType::AAT_CYLINDER:
    case TaskPointFactoryType::AAT_SEGMENT:
    case TaskPointFactoryType::AAT_ANNULAR_SECTOR:
    case TaskPointFactoryType::AAT_KEYHOLE:
    case TaskPointFactoryType::SYMMETRIC_QUADRANT:
      valid = false;
      break;

    case TaskPointFactoryType::FINISH_SECTOR:
      break;
    case TaskPointFactoryType::FINISH_LINE:
      if (ozsize > fixed(2000.01))
        valid = false;

      break;

    case TaskPointFactoryType::FINISH_CYLINDER:
      valid = false;
      break;

    case TaskPointFactoryType::COUNT:
      gcc_unreachable();
    }
  }

  if (!valid)
    AddValidationError(TaskValidationErrorType::NON_FAI_OZS);

  return valid;
}