Exemplo n.º 1
0
bool
is_gimple_val (tree t)
{
  /* Make loads from volatiles and memory vars explicit.  */
  if (is_gimple_variable (t)
      && is_gimple_reg_type (TREE_TYPE (t))
      && !is_gimple_reg (t))
    return false;

  /* FIXME make these decls.  That can happen only when we expose the
     entire landing-pad construct at the tree level.  */
  if (TREE_CODE (t) == EXC_PTR_EXPR || TREE_CODE (t) == FILTER_EXPR)
    return 1;

  return (is_gimple_variable (t) || is_gimple_min_invariant (t));
}
Exemplo n.º 2
0
tree
force_gimple_operand_1 (tree expr, gimple_seq *stmts,
                        gimple_predicate gimple_test_f, tree var)
{
    enum gimplify_status ret;
    location_t saved_location;

    *stmts = NULL;

    /* gimple_test_f might be more strict than is_gimple_val, make
       sure we pass both.  Just checking gimple_test_f doesn't work
       because most gimple predicates do not work recursively.  */
    if (is_gimple_val (expr)
            && (*gimple_test_f) (expr))
        return expr;

    push_gimplify_context (gimple_in_ssa_p (cfun), true);
    saved_location = input_location;
    input_location = UNKNOWN_LOCATION;

    if (var)
    {
        if (gimple_in_ssa_p (cfun) && is_gimple_reg (var))
            var = make_ssa_name (var, NULL);
        expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr);
    }

    if (TREE_CODE (expr) != MODIFY_EXPR
            && TREE_TYPE (expr) == void_type_node)
    {
        gimplify_and_add (expr, stmts);
        expr = NULL_TREE;
    }
    else
    {
        ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue);
        gcc_assert (ret != GS_ERROR);
    }

    input_location = saved_location;
    pop_gimplify_context (NULL);

    return expr;
}
Exemplo n.º 3
0
static void
execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
{
  use_operand_p use_p;
  imm_use_iterator use_iter;
  struct occurrence *occ;
  int count = 0, threshold;

  gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));

  FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
    {
      gimple use_stmt = USE_STMT (use_p);
      if (is_division_by (use_stmt, def))
	{
	  register_division_in (gimple_bb (use_stmt));
	  count++;
	}
    }
Exemplo n.º 4
0
static enum ssa_prop_result
copy_prop_visit_phi_node (gimple phi)
{
  enum ssa_prop_result retval;
  unsigned i;
  prop_value_t phi_val = { 0, NULL_TREE };

  tree lhs = gimple_phi_result (phi);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "\nVisiting PHI node: ");
      print_gimple_stmt (dump_file, phi, 0, dump_flags);
      fprintf (dump_file, "\n\n");
    }

  for (i = 0; i < gimple_phi_num_args (phi); i++)
    {
      prop_value_t *arg_val;
      tree arg = gimple_phi_arg_def (phi, i);
      edge e = gimple_phi_arg_edge (phi, i);

      /* We don't care about values flowing through non-executable
	 edges.  */
      if (!(e->flags & EDGE_EXECUTABLE))
	continue;

      /* Constants in the argument list never generate a useful copy.
	 Similarly, names that flow through abnormal edges cannot be
	 used to derive copies.  */
      if (TREE_CODE (arg) != SSA_NAME || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (arg))
	{
	  phi_val.value = lhs;
	  break;
	}

      /* Avoid copy propagation from an inner into an outer loop.
	 Otherwise, this may move loop variant variables outside of
	 their loops and prevent coalescing opportunities.  If the
	 value was loop invariant, it will be hoisted by LICM and
	 exposed for copy propagation.  Not a problem for virtual
	 operands though.  */
      if (is_gimple_reg (lhs)
	  && loop_depth_of_name (arg) > loop_depth_of_name (lhs))
	{
	  phi_val.value = lhs;
	  break;
	}

      /* If the LHS appears in the argument list, ignore it.  It is
	 irrelevant as a copy.  */
      if (arg == lhs || get_last_copy_of (arg) == lhs)
	continue;

      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  fprintf (dump_file, "\tArgument #%d: ", i);
	  dump_copy_of (dump_file, arg);
	  fprintf (dump_file, "\n");
	}

      arg_val = get_copy_of_val (arg);

      /* If the LHS didn't have a value yet, make it a copy of the
	 first argument we find.  Notice that while we make the LHS be
	 a copy of the argument itself, we take the memory reference
	 from the argument's value so that we can compare it to the
	 memory reference of all the other arguments.  */
      if (phi_val.value == NULL_TREE)
	{
	  phi_val.value = arg_val->value ? arg_val->value : arg;
	  continue;
	}

      /* If PHI_VAL and ARG don't have a common copy-of chain, then
	 this PHI node cannot be a copy operation.  Also, if we are
	 copy propagating stores and these two arguments came from
	 different memory references, they cannot be considered
	 copies.  */
      if (get_last_copy_of (phi_val.value) != get_last_copy_of (arg))
	{
	  phi_val.value = lhs;
	  break;
	}
    }

  if (phi_val.value &&  may_propagate_copy (lhs, phi_val.value)
      && set_copy_of_val (lhs, phi_val.value))
    retval = (phi_val.value != lhs) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
  else
    retval = SSA_PROP_NOT_INTERESTING;

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "\nPHI node ");
      dump_copy_of (dump_file, lhs);
      fprintf (dump_file, "\nTelling the propagator to ");
      if (retval == SSA_PROP_INTERESTING)
	fprintf (dump_file, "add SSA edges out of this PHI and continue.");
      else if (retval == SSA_PROP_VARYING)
	fprintf (dump_file, "add SSA edges out of this PHI and never visit again.");
      else
	fprintf (dump_file, "do nothing with SSA edges and keep iterating.");
      fprintf (dump_file, "\n\n");
    }

  return retval;
}
Exemplo n.º 5
0
static void
find_tail_calls (basic_block bb, struct tailcall **ret)
{
  tree ass_var = NULL_TREE, ret_var, func, param;
  gimple stmt, call = NULL;
  gimple_stmt_iterator gsi, agsi;
  bool tail_recursion;
  struct tailcall *nw;
  edge e;
  tree m, a;
  basic_block abb;
  size_t idx;
  tree var;

  if (!single_succ_p (bb))
    return;

  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
    {
      stmt = gsi_stmt (gsi);

      /* Ignore labels, returns, clobbers and debug stmts.  */
      if (gimple_code (stmt) == GIMPLE_LABEL
	  || gimple_code (stmt) == GIMPLE_RETURN
	  || gimple_clobber_p (stmt)
	  || is_gimple_debug (stmt))
	continue;

      /* Check for a call.  */
      if (is_gimple_call (stmt))
	{
	  call = stmt;
	  ass_var = gimple_call_lhs (stmt);
	  break;
	}

      /* If the statement references memory or volatile operands, fail.  */
      if (gimple_references_memory_p (stmt)
	  || gimple_has_volatile_ops (stmt))
	return;
    }

  if (gsi_end_p (gsi))
    {
      edge_iterator ei;
      /* Recurse to the predecessors.  */
      FOR_EACH_EDGE (e, ei, bb->preds)
	find_tail_calls (e->src, ret);

      return;
    }

  /* If the LHS of our call is not just a simple register, we can't
     transform this into a tail or sibling call.  This situation happens,
     in (e.g.) "*p = foo()" where foo returns a struct.  In this case
     we won't have a temporary here, but we need to carry out the side
     effect anyway, so tailcall is impossible.

     ??? In some situations (when the struct is returned in memory via
     invisible argument) we could deal with this, e.g. by passing 'p'
     itself as that argument to foo, but it's too early to do this here,
     and expand_call() will not handle it anyway.  If it ever can, then
     we need to revisit this here, to allow that situation.  */
  if (ass_var && !is_gimple_reg (ass_var))
    return;

  /* We found the call, check whether it is suitable.  */
  tail_recursion = false;
  func = gimple_call_fndecl (call);
  if (func
      && !DECL_BUILT_IN (func)
      && recursive_call_p (current_function_decl, func))
    {
      tree arg;

      for (param = DECL_ARGUMENTS (func), idx = 0;
	   param && idx < gimple_call_num_args (call);
	   param = DECL_CHAIN (param), idx ++)
	{
	  arg = gimple_call_arg (call, idx);
	  if (param != arg)
	    {
	      /* Make sure there are no problems with copying.  The parameter
	         have a copyable type and the two arguments must have reasonably
	         equivalent types.  The latter requirement could be relaxed if
	         we emitted a suitable type conversion statement.  */
	      if (!is_gimple_reg_type (TREE_TYPE (param))
		  || !useless_type_conversion_p (TREE_TYPE (param),
					         TREE_TYPE (arg)))
		break;

	      /* The parameter should be a real operand, so that phi node
		 created for it at the start of the function has the meaning
		 of copying the value.  This test implies is_gimple_reg_type
		 from the previous condition, however this one could be
		 relaxed by being more careful with copying the new value
		 of the parameter (emitting appropriate GIMPLE_ASSIGN and
		 updating the virtual operands).  */
	      if (!is_gimple_reg (param))
		break;
	    }
	}
      if (idx == gimple_call_num_args (call) && !param)
	tail_recursion = true;
    }

  /* Make sure the tail invocation of this function does not refer
     to local variables.  */
  FOR_EACH_LOCAL_DECL (cfun, idx, var)
    {
      if (TREE_CODE (var) != PARM_DECL
	  && auto_var_in_fn_p (var, cfun->decl)
	  && (ref_maybe_used_by_stmt_p (call, var)
	      || call_may_clobber_ref_p (call, var)))
	return;
    }

  /* Now check the statements after the call.  None of them has virtual
     operands, so they may only depend on the call through its return
     value.  The return value should also be dependent on each of them,
     since we are running after dce.  */
  m = NULL_TREE;
  a = NULL_TREE;

  abb = bb;
  agsi = gsi;
  while (1)
    {
      tree tmp_a = NULL_TREE;
      tree tmp_m = NULL_TREE;
      gsi_next (&agsi);

      while (gsi_end_p (agsi))
	{
	  ass_var = propagate_through_phis (ass_var, single_succ_edge (abb));
	  abb = single_succ (abb);
	  agsi = gsi_start_bb (abb);
	}

      stmt = gsi_stmt (agsi);

      if (gimple_code (stmt) == GIMPLE_LABEL)
	continue;

      if (gimple_code (stmt) == GIMPLE_RETURN)
	break;

      if (gimple_clobber_p (stmt))
	continue;

      if (is_gimple_debug (stmt))
	continue;

      if (gimple_code (stmt) != GIMPLE_ASSIGN)
	return;

      /* This is a gimple assign. */
      if (! process_assignment (stmt, gsi, &tmp_m, &tmp_a, &ass_var))
	return;

      if (tmp_a)
	{
	  tree type = TREE_TYPE (tmp_a);
	  if (a)
	    a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a);
	  else
	    a = tmp_a;
	}
      if (tmp_m)
	{
	  tree type = TREE_TYPE (tmp_m);
	  if (m)
	    m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m);
	  else
	    m = tmp_m;

	  if (a)
	    a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m);
	}
    }

  /* See if this is a tail call we can handle.  */
  ret_var = gimple_return_retval (stmt);

  /* We may proceed if there either is no return value, or the return value
     is identical to the call's return.  */
  if (ret_var
      && (ret_var != ass_var))
    return;

  /* If this is not a tail recursive call, we cannot handle addends or
     multiplicands.  */
  if (!tail_recursion && (m || a))
    return;

  /* For pointers only allow additions.  */
  if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
    return;

  nw = XNEW (struct tailcall);

  nw->call_gsi = gsi;

  nw->tail_recursion = tail_recursion;

  nw->mult = m;
  nw->add = a;

  nw->next = *ret;
  *ret = nw;
}
Exemplo n.º 6
0
bool
may_propagate_copy (tree dest, tree orig)
{
  tree type_d = TREE_TYPE (dest);
  tree type_o = TREE_TYPE (orig);

  /* Do not copy between types for which we *do* need a conversion.  */
  if (!tree_ssa_useless_type_conversion_1 (type_d, type_o))
    return false;

  /* FIXME.  GIMPLE is allowing pointer assignments and comparisons of
     pointers that have different alias sets.  This means that these
     pointers will have different memory tags associated to them.

     If we allow copy propagation in these cases, statements de-referencing
     the new pointer will now have a reference to a different memory tag
     with potentially incorrect SSA information.

     This was showing up in libjava/java/util/zip/ZipFile.java with code
     like:

     	struct java.io.BufferedInputStream *T.660;
	struct java.io.BufferedInputStream *T.647;
	struct java.io.InputStream *is;
	struct java.io.InputStream *is.662;
	[ ... ]
	T.660 = T.647;
	is = T.660;	<-- This ought to be type-casted
	is.662 = is;

     Also, f/name.c exposed a similar problem with a COND_EXPR predicate
     that was causing DOM to generate and equivalence with two pointers of
     alias-incompatible types:

     	struct _ffename_space *n;
	struct _ffename *ns;
	[ ... ]
	if (n == ns)
	  goto lab;
	...
	lab:
	return n;

     I think that GIMPLE should emit the appropriate type-casts.  For the
     time being, blocking copy-propagation in these cases is the safe thing
     to do.  */
  if (TREE_CODE (dest) == SSA_NAME
      && TREE_CODE (orig) == SSA_NAME
      && POINTER_TYPE_P (type_d)
      && POINTER_TYPE_P (type_o))
    {
      tree mt_dest = var_ann (SSA_NAME_VAR (dest))->type_mem_tag;
      tree mt_orig = var_ann (SSA_NAME_VAR (orig))->type_mem_tag;
      if (mt_dest && mt_orig && mt_dest != mt_orig)
	return false;
      else if (!lang_hooks.types_compatible_p (type_d, type_o))
	return false;
      else if (get_alias_set (TREE_TYPE (type_d)) != 
	       get_alias_set (TREE_TYPE (type_o)))
	return false;
    }

  /* If the destination is a SSA_NAME for a virtual operand, then we have
     some special cases to handle.  */
  if (TREE_CODE (dest) == SSA_NAME && !is_gimple_reg (dest))
    {
      /* If both operands are SSA_NAMEs referring to virtual operands, then
	 we can always propagate.  */
      if (TREE_CODE (orig) == SSA_NAME
	  && !is_gimple_reg (orig))
	return true;

      /* We have a "copy" from something like a constant into a virtual
	 operand.  Reject these.  */
      return false;
    }

  /* If ORIG flows in from an abnormal edge, it cannot be propagated.  */
  if (TREE_CODE (orig) == SSA_NAME
      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
    return false;

  /* If DEST is an SSA_NAME that flows from an abnormal edge, then it
     cannot be replaced.  */
  if (TREE_CODE (dest) == SSA_NAME
      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest))
    return false;

  /* Anything else is OK.  */
  return true;
}
Exemplo n.º 7
0
void
gimple_regimplify_operands (gimple stmt, gimple_stmt_iterator *gsi_p)
{
    size_t i, num_ops;
    tree lhs;
    gimple_seq pre = NULL;
    gimple post_stmt = NULL;

    push_gimplify_context (gimple_in_ssa_p (cfun));

    switch (gimple_code (stmt))
    {
    case GIMPLE_COND:
        gimplify_expr (gimple_cond_lhs_ptr (stmt), &pre, NULL,
                       is_gimple_val, fb_rvalue);
        gimplify_expr (gimple_cond_rhs_ptr (stmt), &pre, NULL,
                       is_gimple_val, fb_rvalue);
        break;
    case GIMPLE_SWITCH:
        gimplify_expr (gimple_switch_index_ptr (stmt), &pre, NULL,
                       is_gimple_val, fb_rvalue);
        break;
    case GIMPLE_OMP_ATOMIC_LOAD:
        gimplify_expr (gimple_omp_atomic_load_rhs_ptr (stmt), &pre, NULL,
                       is_gimple_val, fb_rvalue);
        break;
    case GIMPLE_ASM:
    {
        size_t i, noutputs = gimple_asm_noutputs (stmt);
        const char *constraint, **oconstraints;
        bool allows_mem, allows_reg, is_inout;

        oconstraints
            = (const char **) alloca ((noutputs) * sizeof (const char *));
        for (i = 0; i < noutputs; i++)
        {
            tree op = gimple_asm_output_op (stmt, i);
            constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op)));
            oconstraints[i] = constraint;
            parse_output_constraint (&constraint, i, 0, 0, &allows_mem,
                                     &allows_reg, &is_inout);
            gimplify_expr (&TREE_VALUE (op), &pre, NULL,
                           is_inout ? is_gimple_min_lval : is_gimple_lvalue,
                           fb_lvalue | fb_mayfail);
        }
        for (i = 0; i < gimple_asm_ninputs (stmt); i++)
        {
            tree op = gimple_asm_input_op (stmt, i);
            constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op)));
            parse_input_constraint (&constraint, 0, 0, noutputs, 0,
                                    oconstraints, &allows_mem, &allows_reg);
            if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (op))) && allows_mem)
                allows_reg = 0;
            if (!allows_reg && allows_mem)
                gimplify_expr (&TREE_VALUE (op), &pre, NULL,
                               is_gimple_lvalue, fb_lvalue | fb_mayfail);
            else
                gimplify_expr (&TREE_VALUE (op), &pre, NULL,
                               is_gimple_asm_val, fb_rvalue);
        }
    }
    break;
    default:
        /* NOTE: We start gimplifying operands from last to first to
        make sure that side-effects on the RHS of calls, assignments
         and ASMs are executed before the LHS.  The ordering is not
         important for other statements.  */
        num_ops = gimple_num_ops (stmt);
        for (i = num_ops; i > 0; i--)
        {
            tree op = gimple_op (stmt, i - 1);
            if (op == NULL_TREE)
                continue;
            if (i == 1 && (is_gimple_call (stmt) || is_gimple_assign (stmt)))
                gimplify_expr (&op, &pre, NULL, is_gimple_lvalue, fb_lvalue);
            else if (i == 2
                     && is_gimple_assign (stmt)
                     && num_ops == 2
                     && get_gimple_rhs_class (gimple_expr_code (stmt))
                     == GIMPLE_SINGLE_RHS)
                gimplify_expr (&op, &pre, NULL,
                               rhs_predicate_for (gimple_assign_lhs (stmt)),
                               fb_rvalue);
            else if (i == 2 && is_gimple_call (stmt))
            {
                if (TREE_CODE (op) == FUNCTION_DECL)
                    continue;
                gimplify_expr (&op, &pre, NULL, is_gimple_call_addr, fb_rvalue);
            }
            else
                gimplify_expr (&op, &pre, NULL, is_gimple_val, fb_rvalue);
            gimple_set_op (stmt, i - 1, op);
        }

        lhs = gimple_get_lhs (stmt);
        /* If the LHS changed it in a way that requires a simple RHS,
        create temporary.  */
        if (lhs && !is_gimple_reg (lhs))
        {
            bool need_temp = false;

            if (is_gimple_assign (stmt)
                    && num_ops == 2
                    && get_gimple_rhs_class (gimple_expr_code (stmt))
                    == GIMPLE_SINGLE_RHS)
                gimplify_expr (gimple_assign_rhs1_ptr (stmt), &pre, NULL,
                               rhs_predicate_for (gimple_assign_lhs (stmt)),
                               fb_rvalue);
            else if (is_gimple_reg (lhs))
            {
                if (is_gimple_reg_type (TREE_TYPE (lhs)))
                {
                    if (is_gimple_call (stmt))
                    {
                        i = gimple_call_flags (stmt);
                        if ((i & ECF_LOOPING_CONST_OR_PURE)
                                || !(i & (ECF_CONST | ECF_PURE)))
                            need_temp = true;
                    }
                    if (stmt_can_throw_internal (stmt))
                        need_temp = true;
                }
            }
            else
            {
                if (is_gimple_reg_type (TREE_TYPE (lhs)))
                    need_temp = true;
                else if (TYPE_MODE (TREE_TYPE (lhs)) != BLKmode)
                {
                    if (is_gimple_call (stmt))
                    {
                        tree fndecl = gimple_call_fndecl (stmt);

                        if (!aggregate_value_p (TREE_TYPE (lhs), fndecl)
                                && !(fndecl && DECL_RESULT (fndecl)
                                     && DECL_BY_REFERENCE (DECL_RESULT (fndecl))))
                            need_temp = true;
                    }
                    else
                        need_temp = true;
                }
            }
            if (need_temp)
            {
                tree temp = create_tmp_reg (TREE_TYPE (lhs), NULL);
                if (gimple_in_ssa_p (cfun))
                    temp = make_ssa_name (temp, NULL);
                gimple_set_lhs (stmt, temp);
                post_stmt = gimple_build_assign (lhs, temp);
            }
        }
        break;
    }

    if (!gimple_seq_empty_p (pre))
        gsi_insert_seq_before (gsi_p, pre, GSI_SAME_STMT);
    if (post_stmt)
        gsi_insert_after (gsi_p, post_stmt, GSI_NEW_STMT);

    pop_gimplify_context (NULL);
}
Exemplo n.º 8
0
tree
walk_gimple_op (gimple *stmt, walk_tree_fn callback_op,
		struct walk_stmt_info *wi)
{
  hash_set<tree> *pset = (wi) ? wi->pset : NULL;
  unsigned i;
  tree ret = NULL_TREE;

  switch (gimple_code (stmt))
    {
    case GIMPLE_ASSIGN:
      /* Walk the RHS operands.  If the LHS is of a non-renamable type or
         is a register variable, we may use a COMPONENT_REF on the RHS.  */
      if (wi)
	{
	  tree lhs = gimple_assign_lhs (stmt);
	  wi->val_only
	    = (is_gimple_reg_type (TREE_TYPE (lhs)) && !is_gimple_reg (lhs))
	      || gimple_assign_rhs_class (stmt) != GIMPLE_SINGLE_RHS;
	}

      for (i = 1; i < gimple_num_ops (stmt); i++)
	{
	  ret = walk_tree (gimple_op_ptr (stmt, i), callback_op, wi,
			   pset);
	  if (ret)
	    return ret;
	}

      /* Walk the LHS.  If the RHS is appropriate for a memory, we
	 may use a COMPONENT_REF on the LHS.  */
      if (wi)
	{
          /* If the RHS is of a non-renamable type or is a register variable,
	     we may use a COMPONENT_REF on the LHS.  */
	  tree rhs1 = gimple_assign_rhs1 (stmt);
	  wi->val_only
	    = (is_gimple_reg_type (TREE_TYPE (rhs1)) && !is_gimple_reg (rhs1))
	      || gimple_assign_rhs_class (stmt) != GIMPLE_SINGLE_RHS;
	  wi->is_lhs = true;
	}

      ret = walk_tree (gimple_op_ptr (stmt, 0), callback_op, wi, pset);
      if (ret)
	return ret;

      if (wi)
	{
	  wi->val_only = true;
	  wi->is_lhs = false;
	}
      break;

    case GIMPLE_CALL:
      if (wi)
	{
	  wi->is_lhs = false;
	  wi->val_only = true;
	}

      ret = walk_tree (gimple_call_chain_ptr (as_a <gcall *> (stmt)),
		       callback_op, wi, pset);
      if (ret)
        return ret;

      ret = walk_tree (gimple_call_fn_ptr (stmt), callback_op, wi, pset);
      if (ret)
        return ret;

      for (i = 0; i < gimple_call_num_args (stmt); i++)
	{
	  if (wi)
	    wi->val_only
	      = is_gimple_reg_type (TREE_TYPE (gimple_call_arg (stmt, i)));
	  ret = walk_tree (gimple_call_arg_ptr (stmt, i), callback_op, wi,
			   pset);
	  if (ret)
	    return ret;
	}

      if (gimple_call_lhs (stmt))
	{
	  if (wi)
	    {
	      wi->is_lhs = true;
	      wi->val_only
		= is_gimple_reg_type (TREE_TYPE (gimple_call_lhs (stmt)));
	    }

	  ret = walk_tree (gimple_call_lhs_ptr (stmt), callback_op, wi, pset);
	  if (ret)
	    return ret;
	}

      if (wi)
	{
	  wi->is_lhs = false;
	  wi->val_only = true;
	}
      break;

    case GIMPLE_CATCH:
      ret = walk_tree (gimple_catch_types_ptr (as_a <gcatch *> (stmt)),
		       callback_op, wi, pset);
      if (ret)
	return ret;
      break;

    case GIMPLE_EH_FILTER:
      ret = walk_tree (gimple_eh_filter_types_ptr (stmt), callback_op, wi,
		       pset);
      if (ret)
	return ret;
      break;

    case GIMPLE_ASM:
      ret = walk_gimple_asm (as_a <gasm *> (stmt), callback_op, wi);
      if (ret)
	return ret;
      break;

    case GIMPLE_OMP_CONTINUE:
      {
	gomp_continue *cont_stmt = as_a <gomp_continue *> (stmt);
	ret = walk_tree (gimple_omp_continue_control_def_ptr (cont_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;

	ret = walk_tree (gimple_omp_continue_control_use_ptr (cont_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
      }
      break;

    case GIMPLE_OMP_CRITICAL:
      {
	gomp_critical *omp_stmt = as_a <gomp_critical *> (stmt);
	ret = walk_tree (gimple_omp_critical_name_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
	ret = walk_tree (gimple_omp_critical_clauses_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
      }
      break;

    case GIMPLE_OMP_ORDERED:
      {
	gomp_ordered *omp_stmt = as_a <gomp_ordered *> (stmt);
	ret = walk_tree (gimple_omp_ordered_clauses_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
      }
      break;

    case GIMPLE_OMP_FOR:
      ret = walk_tree (gimple_omp_for_clauses_ptr (stmt), callback_op, wi,
		       pset);
      if (ret)
	return ret;
      for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
	{
	  ret = walk_tree (gimple_omp_for_index_ptr (stmt, i), callback_op,
			   wi, pset);
	  if (ret)
	    return ret;
	  ret = walk_tree (gimple_omp_for_initial_ptr (stmt, i), callback_op,
			   wi, pset);
	  if (ret)
	    return ret;
	  ret = walk_tree (gimple_omp_for_final_ptr (stmt, i), callback_op,
			   wi, pset);
	  if (ret)
	    return ret;
	  ret = walk_tree (gimple_omp_for_incr_ptr (stmt, i), callback_op,
			   wi, pset);
	  if (ret)
	    return ret;
	}
      break;

    case GIMPLE_OMP_PARALLEL:
      {
	gomp_parallel *omp_par_stmt = as_a <gomp_parallel *> (stmt);
	ret = walk_tree (gimple_omp_parallel_clauses_ptr (omp_par_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
	ret = walk_tree (gimple_omp_parallel_child_fn_ptr (omp_par_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
	ret = walk_tree (gimple_omp_parallel_data_arg_ptr (omp_par_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
      }
      break;

    case GIMPLE_OMP_TASK:
      ret = walk_tree (gimple_omp_task_clauses_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;
      ret = walk_tree (gimple_omp_task_child_fn_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;
      ret = walk_tree (gimple_omp_task_data_arg_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;
      ret = walk_tree (gimple_omp_task_copy_fn_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;
      ret = walk_tree (gimple_omp_task_arg_size_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;
      ret = walk_tree (gimple_omp_task_arg_align_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;
      break;

    case GIMPLE_OMP_SECTIONS:
      ret = walk_tree (gimple_omp_sections_clauses_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;
      ret = walk_tree (gimple_omp_sections_control_ptr (stmt), callback_op,
		       wi, pset);
      if (ret)
	return ret;

      break;

    case GIMPLE_OMP_SINGLE:
      ret = walk_tree (gimple_omp_single_clauses_ptr (stmt), callback_op, wi,
		       pset);
      if (ret)
	return ret;
      break;

    case GIMPLE_OMP_TARGET:
      {
	gomp_target *omp_stmt = as_a <gomp_target *> (stmt);
	ret = walk_tree (gimple_omp_target_clauses_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
	ret = walk_tree (gimple_omp_target_child_fn_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
	ret = walk_tree (gimple_omp_target_data_arg_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
      }
      break;

    case GIMPLE_OMP_TEAMS:
      ret = walk_tree (gimple_omp_teams_clauses_ptr (stmt), callback_op, wi,
		       pset);
      if (ret)
	return ret;
      break;

    case GIMPLE_OMP_ATOMIC_LOAD:
      {
	gomp_atomic_load *omp_stmt = as_a <gomp_atomic_load *> (stmt);
	ret = walk_tree (gimple_omp_atomic_load_lhs_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
	ret = walk_tree (gimple_omp_atomic_load_rhs_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
      }
      break;

    case GIMPLE_OMP_ATOMIC_STORE:
      {
	gomp_atomic_store *omp_stmt = as_a <gomp_atomic_store *> (stmt);
	ret = walk_tree (gimple_omp_atomic_store_val_ptr (omp_stmt),
			 callback_op, wi, pset);
	if (ret)
	  return ret;
      }
      break;

    case GIMPLE_TRANSACTION:
      ret = walk_tree (gimple_transaction_label_ptr (
			 as_a <gtransaction *> (stmt)),
		       callback_op, wi, pset);
      if (ret)
	return ret;
      break;

    case GIMPLE_OMP_RETURN:
      ret = walk_tree (gimple_omp_return_lhs_ptr (stmt), callback_op, wi,
		       pset);
      if (ret)
	return ret;
      break;

      /* Tuples that do not have operands.  */
    case GIMPLE_NOP:
    case GIMPLE_RESX:
    case GIMPLE_PREDICT:
      break;

    default:
      {
	enum gimple_statement_structure_enum gss;
	gss = gimple_statement_structure (stmt);
	if (gss == GSS_WITH_OPS || gss == GSS_WITH_MEM_OPS)
	  for (i = 0; i < gimple_num_ops (stmt); i++)
	    {
	      ret = walk_tree (gimple_op_ptr (stmt, i), callback_op, wi, pset);
	      if (ret)
		return ret;
	    }
      }
      break;
    }

  return NULL_TREE;
}
Exemplo n.º 9
0
static bool
generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
{
  unsigned i, x;
  gimple_stmt_iterator bsi;
  basic_block *bbs;

  if (copy_p)
    {
      loop = copy_loop_before (loop);
      create_preheader (loop, CP_SIMPLE_PREHEADERS);
      create_bb_after_loop (loop);
    }

  if (loop == NULL)
    return false;

  /* Remove stmts not in the PARTITION bitmap.  The order in which we
     visit the phi nodes and the statements is exactly as in
     stmts_from_loop.  */
  bbs = get_loop_body_in_dom_order (loop);

  if (MAY_HAVE_DEBUG_STMTS)
    for (x = 0, i = 0; i < loop->num_nodes; i++)
      {
	basic_block bb = bbs[i];

	for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	  if (!bitmap_bit_p (partition, x++))
	    reset_debug_uses (gsi_stmt (bsi));

	for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	  {
	    gimple stmt = gsi_stmt (bsi);
	    if (gimple_code (stmt) != GIMPLE_LABEL
		&& !is_gimple_debug (stmt)
		&& !bitmap_bit_p (partition, x++))
	      reset_debug_uses (stmt);
	  }
      }

  for (x = 0, i = 0; i < loop->num_nodes; i++)
    {
      basic_block bb = bbs[i];

      for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
	if (!bitmap_bit_p (partition, x++))
	  {
	    gimple phi = gsi_stmt (bsi);
	    if (!is_gimple_reg (gimple_phi_result (phi)))
	      mark_virtual_phi_result_for_renaming (phi);
	    remove_phi_node (&bsi, true);
	  }
	else
	  gsi_next (&bsi);

      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
	{
	  gimple stmt = gsi_stmt (bsi);
	  if (gimple_code (stmt) != GIMPLE_LABEL
	      && !is_gimple_debug (stmt)
	      && !bitmap_bit_p (partition, x++))
	    {
	      unlink_stmt_vdef (stmt);
	      gsi_remove (&bsi, true);
	      release_defs (stmt);
	    }
	  else
	    gsi_next (&bsi);
	}
    }

  free (bbs);
  return true;
}
Exemplo n.º 10
0
static bool
generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
{
  bool res = false;
  unsigned i, x = 0;
  basic_block *bbs;
  gimple write = NULL;
  tree op0, op1;
  gimple_stmt_iterator bsi;
  tree nb_iter = number_of_exit_cond_executions (loop);

  if (!nb_iter || nb_iter == chrec_dont_know)
    return false;

  bbs = get_loop_body_in_dom_order (loop);

  for (i = 0; i < loop->num_nodes; i++)
    {
      basic_block bb = bbs[i];

      for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	x++;

      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	{
	  gimple stmt = gsi_stmt (bsi);

	  if (bitmap_bit_p (partition, x++)
	      && is_gimple_assign (stmt)
	      && !is_gimple_reg (gimple_assign_lhs (stmt)))
	    {
	      /* Don't generate the builtins when there are more than
		 one memory write.  */
	      if (write != NULL)
		goto end;

	      write = stmt;
	    }
	}
    }

  if (!write)
    goto end;

  op0 = gimple_assign_lhs (write);
  op1 = gimple_assign_rhs1 (write);

  if (!(TREE_CODE (op0) == ARRAY_REF
	|| TREE_CODE (op0) == INDIRECT_REF))
    goto end;

  /* The new statements will be placed before LOOP.  */
  bsi = gsi_last_bb (loop_preheader_edge (loop)->src);

  if (gimple_assign_rhs_code (write) == INTEGER_CST
      && (integer_zerop (op1) || real_zerop (op1)))
    res = generate_memset_zero (write, op0, nb_iter, bsi);

  /* If this is the last partition for which we generate code, we have
     to destroy the loop.  */
  if (res && !copy_p)
    {
      unsigned nbbs = loop->num_nodes;
      basic_block src = loop_preheader_edge (loop)->src;
      basic_block dest = single_exit (loop)->dest;
      prop_phis (dest);
      make_edge (src, dest, EDGE_FALLTHRU);
      cancel_loop_tree (loop);

      for (i = 0; i < nbbs; i++)
	delete_basic_block (bbs[i]);

      set_immediate_dominator (CDI_DOMINATORS, dest,
			       recompute_dominator (CDI_DOMINATORS, dest));
    }

 end:
  free (bbs);
  return res;
}
Exemplo n.º 11
0
static unsigned int
rename_ssa_copies (void)
{
  var_map map;
  basic_block bb;
  gimple_stmt_iterator gsi;
  tree var, part_var;
  gimple stmt, phi;
  unsigned x;
  FILE *debug;
  bool updated = false;

  if (dump_file && (dump_flags & TDF_DETAILS))
    debug = dump_file;
  else
    debug = NULL;

  map = init_var_map (num_ssa_names);

  FOR_EACH_BB (bb)
    {
      /* Scan for real copies.  */
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  stmt = gsi_stmt (gsi);
	  if (gimple_assign_ssa_name_copy_p (stmt))
	    {
	      tree lhs = gimple_assign_lhs (stmt);
	      tree rhs = gimple_assign_rhs1 (stmt);

	      updated |= copy_rename_partition_coalesce (map, lhs, rhs, debug);
	    }
	}
    }

  FOR_EACH_BB (bb)
    {
      /* Treat PHI nodes as copies between the result and each argument.  */
      for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
        {
          size_t i;
	  tree res;

	  phi = gsi_stmt (gsi);
	  res = gimple_phi_result (phi);

	  /* Do not process virtual SSA_NAMES.  */
	  if (!is_gimple_reg (SSA_NAME_VAR (res)))
	    continue;

          for (i = 0; i < gimple_phi_num_args (phi); i++)
            {
              tree arg = gimple_phi_arg (phi, i)->def;
              if (TREE_CODE (arg) == SSA_NAME)
		updated |= copy_rename_partition_coalesce (map, res, arg, debug);
            }
        }
    }

  if (debug)
    dump_var_map (debug, map);

  /* Now one more pass to make all elements of a partition share the same
     root variable.  */

  for (x = 1; x < num_ssa_names; x++)
    {
      part_var = partition_to_var (map, x);
      if (!part_var)
        continue;
      var = ssa_name (x);
      if (debug)
        {
	  if (SSA_NAME_VAR (var) != SSA_NAME_VAR (part_var))
	    {
	      fprintf (debug, "Coalesced ");
	      print_generic_expr (debug, var, TDF_SLIM);
	      fprintf (debug, " to ");
	      print_generic_expr (debug, part_var, TDF_SLIM);
	      fprintf (debug, "\n");
	    }
	}
      replace_ssa_name_symbol (var, SSA_NAME_VAR (part_var));
    }

  delete_var_map (map);
  return updated ? TODO_remove_unused_locals : 0;
}
void
merge_alias_info (tree orig_name, tree new_name)
{
    tree new_sym = SSA_NAME_VAR (new_name);
    tree orig_sym = SSA_NAME_VAR (orig_name);
    var_ann_t new_ann = var_ann (new_sym);
    var_ann_t orig_ann = var_ann (orig_sym);

    /* No merging necessary when memory partitions are involved.  */
    if (factoring_name_p (new_name))
    {
        gcc_assert (!is_gimple_reg (orig_sym));
        return;
    }
    else if (factoring_name_p (orig_name))
    {
        gcc_assert (!is_gimple_reg (new_sym));
        return;
    }

    gcc_assert (POINTER_TYPE_P (TREE_TYPE (orig_name))
                && POINTER_TYPE_P (TREE_TYPE (new_name)));

#if defined ENABLE_CHECKING
    gcc_assert (useless_type_conversion_p (TREE_TYPE (orig_name),
                                           TREE_TYPE (new_name)));

    /* Check that flow-sensitive information is compatible.  Notice that
       we may not merge flow-sensitive information here.  This function
       is called when propagating equivalences dictated by the IL, like
       a copy operation P_i = Q_j, and from equivalences dictated by
       control-flow, like if (P_i == Q_j).

       In the former case, P_i and Q_j are equivalent in every block
       dominated by the assignment, so their flow-sensitive information
       is always the same.  However, in the latter case, the pointers
       P_i and Q_j are only equivalent in one of the sub-graphs out of
       the predicate, so their flow-sensitive information is not the
       same in every block dominated by the predicate.

       Since we cannot distinguish one case from another in this
       function, we can only make sure that if P_i and Q_j have
       flow-sensitive information, they should be compatible.

       As callers of merge_alias_info are supposed to call may_propagate_copy
       first, the following check is redundant.  Thus, only do it if checking
       is enabled.  */
    if (SSA_NAME_PTR_INFO (orig_name) && SSA_NAME_PTR_INFO (new_name))
    {
        struct ptr_info_def *orig_ptr_info = SSA_NAME_PTR_INFO (orig_name);
        struct ptr_info_def *new_ptr_info = SSA_NAME_PTR_INFO (new_name);

        /* Note that pointer NEW and ORIG may actually have different
        pointed-to variables (e.g., PR 18291 represented in
         testsuite/gcc.c-torture/compile/pr18291.c).  However, since
         NEW is being copy-propagated into ORIG, it must always be
         true that the pointed-to set for pointer NEW is the same, or
         a subset, of the pointed-to set for pointer ORIG.  If this
         isn't the case, we shouldn't have been able to do the
         propagation of NEW into ORIG.  */
        if (orig_ptr_info->name_mem_tag
                && new_ptr_info->name_mem_tag
                && orig_ptr_info->pt_vars
                && new_ptr_info->pt_vars)
            gcc_assert (bitmap_intersect_p (new_ptr_info->pt_vars,
                                            orig_ptr_info->pt_vars));
    }
#endif

    /* Synchronize the symbol tags.  If both pointers had a tag and they
       are different, then something has gone wrong.  Symbol tags can
       always be merged because they are flow insensitive, all the SSA
       names of the same base DECL share the same symbol tag.  */
    if (new_ann->symbol_mem_tag == NULL_TREE)
        new_ann->symbol_mem_tag = orig_ann->symbol_mem_tag;
    else if (orig_ann->symbol_mem_tag == NULL_TREE)
        orig_ann->symbol_mem_tag = new_ann->symbol_mem_tag;
    else
        gcc_assert (new_ann->symbol_mem_tag == orig_ann->symbol_mem_tag);

    /* Copy flow-sensitive alias information in case that NEW_NAME
       didn't get a NMT but was set to pt_anything for optimization
       purposes.  In case ORIG_NAME has a NMT we can safely use its
       flow-sensitive alias information as a conservative estimate.  */
    if (SSA_NAME_PTR_INFO (orig_name)
            && SSA_NAME_PTR_INFO (orig_name)->name_mem_tag
            && (!SSA_NAME_PTR_INFO (new_name)
                || !SSA_NAME_PTR_INFO (new_name)->name_mem_tag))
    {
        struct ptr_info_def *orig_ptr_info = SSA_NAME_PTR_INFO (orig_name);
        struct ptr_info_def *new_ptr_info = get_ptr_info (new_name);
        memcpy (new_ptr_info, orig_ptr_info, sizeof (struct ptr_info_def));
    }
}
Exemplo n.º 13
0
static void
expand_used_vars (void)
{
  tree t, outer_block = DECL_INITIAL (current_function_decl);

  /* Compute the phase of the stack frame for this function.  */
  {
    int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
    int off = STARTING_FRAME_OFFSET % align;
    frame_phase = off ? align - off : 0;
  }

  /* Set TREE_USED on all variables in the unexpanded_var_list.  */
  for (t = cfun->unexpanded_var_list; t; t = TREE_CHAIN (t))
    TREE_USED (TREE_VALUE (t)) = 1;

  /* Clear TREE_USED on all variables associated with a block scope.  */
  clear_tree_used (outer_block);

  /* At this point all variables on the unexpanded_var_list with TREE_USED
     set are not associated with any block scope.  Lay them out.  */
  for (t = cfun->unexpanded_var_list; t; t = TREE_CHAIN (t))
    {
      tree var = TREE_VALUE (t);
      bool expand_now = false;

      /* We didn't set a block for static or extern because it's hard
	 to tell the difference between a global variable (re)declared
	 in a local scope, and one that's really declared there to
	 begin with.  And it doesn't really matter much, since we're
	 not giving them stack space.  Expand them now.  */
      if (TREE_STATIC (var) || DECL_EXTERNAL (var))
	expand_now = true;

      /* Any variable that could have been hoisted into an SSA_NAME
	 will have been propagated anywhere the optimizers chose,
	 i.e. not confined to their original block.  Allocate them
	 as if they were defined in the outermost scope.  */
      else if (is_gimple_reg (var))
	expand_now = true;

      /* If the variable is not associated with any block, then it
	 was created by the optimizers, and could be live anywhere
	 in the function.  */
      else if (TREE_USED (var))
	expand_now = true;

      /* Finally, mark all variables on the list as used.  We'll use
	 this in a moment when we expand those associated with scopes.  */
      TREE_USED (var) = 1;

      if (expand_now)
	expand_one_var (var, true);
    }
  cfun->unexpanded_var_list = NULL_TREE;

  /* At this point, all variables within the block tree with TREE_USED
     set are actually used by the optimized function.  Lay them out.  */
  expand_used_vars_for_block (outer_block, true);

  if (stack_vars_num > 0)
    {
      /* Due to the way alias sets work, no variables with non-conflicting
	 alias sets may be assigned the same address.  Add conflicts to 
	 reflect this.  */
      add_alias_set_conflicts ();

      /* Now that we have collected all stack variables, and have computed a 
	 minimal interference graph, attempt to save some stack space.  */
      partition_stack_vars ();
      if (dump_file)
	dump_stack_var_partition ();

      /* Assign rtl to each variable based on these partitions.  */
      expand_stack_vars ();

      /* Free up stack variable graph data.  */
      XDELETEVEC (stack_vars);
      XDELETEVEC (stack_vars_sorted);
      XDELETEVEC (stack_vars_conflict);
      stack_vars = NULL;
      stack_vars_alloc = stack_vars_num = 0;
      stack_vars_conflict = NULL;
      stack_vars_conflict_alloc = 0;
    }

  /* If the target requires that FRAME_OFFSET be aligned, do it.  */
  if (STACK_ALIGNMENT_NEEDED)
    {
      HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
      if (!FRAME_GROWS_DOWNWARD)
	frame_offset += align - 1;
      frame_offset &= -align;
    }
}
Exemplo n.º 14
0
static void
init_copy_prop (void)
{
  basic_block bb;

  copy_of = XCNEWVEC (prop_value_t, num_ssa_names);

  FOR_EACH_BB (bb)
    {
      gimple_stmt_iterator si;
      int depth = bb->loop_depth;

      for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
	{
	  gimple stmt = gsi_stmt (si);
	  ssa_op_iter iter;
          tree def;

	  /* The only statements that we care about are those that may
	     generate useful copies.  We also need to mark conditional
	     jumps so that their outgoing edges are added to the work
	     lists of the propagator.

	     Avoid copy propagation from an inner into an outer loop.
	     Otherwise, this may move loop variant variables outside of
	     their loops and prevent coalescing opportunities.  If the
	     value was loop invariant, it will be hoisted by LICM and
	     exposed for copy propagation.
	     ???  This doesn't make sense.  */
	  if (stmt_ends_bb_p (stmt))
            prop_set_simulate_again (stmt, true);
	  else if (stmt_may_generate_copy (stmt)
                   /* Since we are iterating over the statements in
                      BB, not the phi nodes, STMT will always be an
                      assignment.  */
                   && loop_depth_of_name (gimple_assign_rhs1 (stmt)) <= depth)
            prop_set_simulate_again (stmt, true);
	  else
            prop_set_simulate_again (stmt, false);

	  /* Mark all the outputs of this statement as not being
	     the copy of anything.  */
	  FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
            if (!prop_simulate_again_p (stmt))
	      set_copy_of_val (def, def);
	}

      for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
	{
          gimple phi = gsi_stmt (si);
          tree def;

	  def = gimple_phi_result (phi);
	  if (!is_gimple_reg (def))
            prop_set_simulate_again (phi, false);
	  else
            prop_set_simulate_again (phi, true);

	  if (!prop_simulate_again_p (phi))
	    set_copy_of_val (def, def);
	}
    }
}
Exemplo n.º 15
0
static bool
generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
{
  bool res = false;
  unsigned i, x = 0;
  basic_block *bbs;
  gimple write = NULL;
  gimple_stmt_iterator bsi;
  tree nb_iter = number_of_exit_cond_executions (loop);

  if (!nb_iter || nb_iter == chrec_dont_know)
    return false;

  bbs = get_loop_body_in_dom_order (loop);

  for (i = 0; i < loop->num_nodes; i++)
    {
      basic_block bb = bbs[i];

      for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	x++;

      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	{
	  gimple stmt = gsi_stmt (bsi);

	  if (gimple_code (stmt) == GIMPLE_LABEL
	      || is_gimple_debug (stmt))
	    continue;

	  if (!bitmap_bit_p (partition, x++))
	    continue;

	  /* If the stmt has uses outside of the loop fail.  */
	  if (stmt_has_scalar_dependences_outside_loop (stmt))
	    goto end;

	  if (is_gimple_assign (stmt)
	      && !is_gimple_reg (gimple_assign_lhs (stmt)))
	    {
	      /* Don't generate the builtins when there are more than
		 one memory write.  */
	      if (write != NULL)
		goto end;

	      write = stmt;
	      if (bb == loop->latch)
		nb_iter = number_of_latch_executions (loop);
	    }
	}
    }

  if (!stmt_with_adjacent_zero_store_dr_p (write))
    goto end;

  /* The new statements will be placed before LOOP.  */
  bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
  generate_memset_zero (write, gimple_assign_lhs (write), nb_iter, bsi);
  res = true;

  /* If this is the last partition for which we generate code, we have
     to destroy the loop.  */
  if (!copy_p)
    {
      unsigned nbbs = loop->num_nodes;
      edge exit = single_exit (loop);
      basic_block src = loop_preheader_edge (loop)->src, dest = exit->dest;
      redirect_edge_pred (exit, src);
      exit->flags &= ~(EDGE_TRUE_VALUE|EDGE_FALSE_VALUE);
      exit->flags |= EDGE_FALLTHRU;
      cancel_loop_tree (loop);
      rescan_loop_exit (exit, false, true);

      for (i = 0; i < nbbs; i++)
	delete_basic_block (bbs[i]);

      set_immediate_dominator (CDI_DOMINATORS, dest,
			       recompute_dominator (CDI_DOMINATORS, dest));
    }

 end:
  free (bbs);
  return res;
}
Exemplo n.º 16
0
static void
init_copy_prop (void)
{
  basic_block bb;

  copy_of = XCNEWVEC (prop_value_t, num_ssa_names);

  cached_last_copy_of = XCNEWVEC (tree, num_ssa_names);

  FOR_EACH_BB (bb)
    {
      gimple_stmt_iterator si;
      int depth = bb->loop_depth;

      for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
	{
	  gimple stmt = gsi_stmt (si);
	  ssa_op_iter iter;
          tree def;

	  /* The only statements that we care about are those that may
	     generate useful copies.  We also need to mark conditional
	     jumps so that their outgoing edges are added to the work
	     lists of the propagator.

	     Avoid copy propagation from an inner into an outer loop.
	     Otherwise, this may move loop variant variables outside of
	     their loops and prevent coalescing opportunities.  If the
	     value was loop invariant, it will be hoisted by LICM and
	     exposed for copy propagation.  */
	  if (stmt_ends_bb_p (stmt))
            prop_set_simulate_again (stmt, true);
	  else if (stmt_may_generate_copy (stmt)
                   /* Since we are iterating over the statements in
                      BB, not the phi nodes, STMT will always be an
                      assignment.  */
                   && loop_depth_of_name (gimple_assign_rhs1 (stmt)) <= depth)
            prop_set_simulate_again (stmt, true);
	  else
            prop_set_simulate_again (stmt, false);

	  /* Mark all the outputs of this statement as not being
	     the copy of anything.  */
	  FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
            if (!prop_simulate_again_p (stmt))
	      set_copy_of_val (def, def);
	    else
	      cached_last_copy_of[SSA_NAME_VERSION (def)] = def;
	}

      for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
	{
          gimple phi = gsi_stmt (si);
          tree def;

	  def = gimple_phi_result (phi);
	  if (!is_gimple_reg (def)
	      /* In loop-closed SSA form do not copy-propagate through
	         PHI nodes.  Technically this is only needed for loop
		 exit PHIs, but this is difficult to query.  */
	      || (current_loops
		  && gimple_phi_num_args (phi) == 1
		  && loops_state_satisfies_p (LOOP_CLOSED_SSA)))
            prop_set_simulate_again (phi, false);
	  else
            prop_set_simulate_again (phi, true);

	  if (!prop_simulate_again_p (phi))
	    set_copy_of_val (def, def);
	  else
	    cached_last_copy_of[SSA_NAME_VERSION (def)] = def;
	}
    }
}
Exemplo n.º 17
0
static bool
is_complex_reg (tree lhs)
{
  return TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE && is_gimple_reg (lhs);
}
Exemplo n.º 18
0
static bool
verify_phi_args (tree phi, basic_block bb, basic_block *definition_block)
{
  edge e;
  bool err = false;
  unsigned i, phi_num_args = PHI_NUM_ARGS (phi);

  if (EDGE_COUNT (bb->preds) != phi_num_args)
    {
      error ("incoming edge count does not match number of PHI arguments");
      err = true;
      goto error;
    }

  for (i = 0; i < phi_num_args; i++)
    {
      use_operand_p op_p = PHI_ARG_DEF_PTR (phi, i);
      tree op = USE_FROM_PTR (op_p);


      e = EDGE_PRED (bb, i);

      if (op == NULL_TREE)
	{
	  error ("PHI argument is missing for edge %d->%d",
	         e->src->index,
		 e->dest->index);
	  err = true;
	  goto error;
	}

      if (TREE_CODE (op) != SSA_NAME && !is_gimple_min_invariant (op))
	{
	  error ("PHI argument is not SSA_NAME, or invariant");
	  err = true;
	}

      if (TREE_CODE (op) == SSA_NAME)
	err = verify_use (e->src, definition_block[SSA_NAME_VERSION (op)], op_p,
			  phi, e->flags & EDGE_ABNORMAL,
			  !is_gimple_reg (PHI_RESULT (phi)),
			  NULL);

      if (e->dest != bb)
	{
	  error ("wrong edge %d->%d for PHI argument",
	         e->src->index, e->dest->index);
	  err = true;
	}

      if (err)
	{
	  fprintf (stderr, "PHI argument\n");
	  print_generic_stmt (stderr, op, TDF_VOPS);
	  goto error;
	}
    }

error:
  if (err)
    {
      fprintf (stderr, "for PHI node\n");
      print_generic_stmt (stderr, phi, TDF_VOPS);
    }


  return err;
}