コード例 #1
0
void
backprop::process_block (basic_block bb)
{
  for (gimple_stmt_iterator gsi = gsi_last_bb (bb); !gsi_end_p (gsi);
       gsi_prev (&gsi))
    {
      tree lhs = gimple_get_lhs (gsi_stmt (gsi));
      if (lhs && TREE_CODE (lhs) == SSA_NAME)
	process_var (lhs);
    }
  for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi);
       gsi_next (&gpi))
    process_var (gimple_phi_result (gpi.phi ()));
}
コード例 #2
0
ファイル: tree-outof-ssa.c プロジェクト: Quantumboost/gcc
static void
set_location_for_edge (edge e)
{
  if (e->goto_locus)
    {
      set_curr_insn_source_location (e->goto_locus);
      set_curr_insn_block (e->goto_block);
    }
  else
    {
      basic_block bb = e->src;
      gimple_stmt_iterator gsi;

      do
	{
	  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_debug (stmt))
		continue;
	      if (gimple_has_location (stmt) || gimple_block (stmt))
		{
		  set_curr_insn_source_location (gimple_location (stmt));
		  set_curr_insn_block (gimple_block (stmt));
		  return;
		}
	    }
	  /* Nothing found in this basic block.  Make a half-assed attempt
	     to continue with another block.  */
	  if (single_pred_p (bb))
	    bb = single_pred (bb);
	  else
	    bb = e->src;
	}
      while (bb != e->src);
    }
}
コード例 #3
0
ファイル: tree-mudflap.c プロジェクト: ChaosJohn/gcc
static void
mf_build_check_statement_for (tree base, tree limit,
                              gimple_stmt_iterator *instr_gsi,
                              location_t location, tree dirflag)
{
  gimple_stmt_iterator gsi;
  basic_block cond_bb, then_bb, join_bb;
  edge e;
  tree cond, t, u, v;
  tree mf_base;
  tree mf_elem;
  tree mf_limit;
  gimple g;
  gimple_seq seq, stmts;

  /* We first need to split the current basic block, and start altering
     the CFG.  This allows us to insert the statements we're about to
     construct into the right basic blocks.  */

  cond_bb = gimple_bb (gsi_stmt (*instr_gsi));
  gsi = *instr_gsi;
  gsi_prev (&gsi);
  if (! gsi_end_p (gsi))
    e = split_block (cond_bb, gsi_stmt (gsi));
  else
    e = split_block_after_labels (cond_bb);
  cond_bb = e->src;
  join_bb = e->dest;

  /* A recap at this point: join_bb is the basic block at whose head
     is the gimple statement for which this check expression is being
     built.  cond_bb is the (possibly new, synthetic) basic block the
     end of which will contain the cache-lookup code, and a
     conditional that jumps to the cache-miss code or, much more
     likely, over to join_bb.  */

  /* Create the bb that contains the cache-miss fallback block (mf_check).  */
  then_bb = create_empty_bb (cond_bb);
  make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
  make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU);

  /* Mark the pseudo-fallthrough edge from cond_bb to join_bb.  */
  e = find_edge (cond_bb, join_bb);
  e->flags = EDGE_FALSE_VALUE;
  e->count = cond_bb->count;
  e->probability = REG_BR_PROB_BASE;

  /* Update dominance info.  Note that bb_join's data was
     updated by split_block.  */
  if (dom_info_available_p (CDI_DOMINATORS))
    {
      set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
    }

  /* Update loop info.  */
  if (current_loops)
    add_bb_to_loop (then_bb, cond_bb->loop_father);

  /* Build our local variables.  */
  mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem");
  mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base");
  mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit");

  /* Build: __mf_base = (uintptr_t) <base address expression>.  */
  seq = NULL;
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (base));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_base, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_limit = (uintptr_t) <limit address expression>.  */
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (limit));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_limit, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift)
                                            & __mf_mask].  */
  t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base,
              flag_mudflap_threads ? mf_cache_shift_decl
	       : mf_cache_shift_decl_l);
  t = build2 (BIT_AND_EXPR, mf_uintptr_type, t,
              flag_mudflap_threads ? mf_cache_mask_decl
	       : mf_cache_mask_decl_l);
  t = build4 (ARRAY_REF,
              TREE_TYPE (TREE_TYPE (mf_cache_array_decl)),
              mf_cache_array_decl, t, NULL_TREE, NULL_TREE);
  t = build1 (ADDR_EXPR, mf_cache_structptr_type, t);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_elem, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Quick validity check.

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))
        {
          __mf_check ();
          ... and only if single-threaded:
          __mf_lookup_shift_1 = f...;
          __mf_lookup_mask_l = ...;
        }

     It is expected that this body of code is rarely executed so we mark
     the edge to the THEN clause of the conditional jump as unlikely.  */

  /* Construct t <-- '__mf_elem->low  > __mf_base'.  */
  t = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              TYPE_FIELDS (mf_cache_struct_type), NULL_TREE);
  t = build2 (GT_EXPR, boolean_type_node, t, mf_base);

  /* Construct '__mf_elem->high < __mf_limit'.

     First build:
        1) u <--  '__mf_elem->high'
        2) v <--  '__mf_limit'.

     Then build 'u <-- (u < v).  */

  u = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              DECL_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE);

  v = mf_limit;

  u = build2 (LT_EXPR, boolean_type_node, u, v);

  /* Build the composed conditional: t <-- 't || u'.  Then store the
     result of the evaluation of 't' in a temporary variable which we
     can use as the condition for the conditional jump.  */
  t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  cond = create_tmp_reg (boolean_type_node, "__mf_unlikely_cond");
  g = gimple_build_assign  (cond, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build the conditional jump.  'cond' is just a temporary so we can
     simply build a void COND_EXPR.  We do need labels in both arms though.  */
  g = gimple_build_cond (NE_EXPR, cond, boolean_false_node, NULL_TREE,
			 NULL_TREE);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* At this point, after so much hard work, we have only constructed
     the conditional jump,

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))

     The lowered GIMPLE tree representing this code is in the statement
     list starting at 'head'.

     We can insert this now in the current basic block, i.e. the one that
     the statement we're instrumenting was originally in.  */
  gsi = gsi_last_bb (cond_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  /*  Now build up the body of the cache-miss handling:

     __mf_check();
     refresh *_l vars.

     This is the body of the conditional.  */

  seq = NULL;
  /* u is a string, so it is already a gimple value.  */
  u = mf_file_function_line_tree (location);
  /* NB: we pass the overall [base..limit] range to mf_check.  */
  v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
		   fold_build2_loc (location,
				MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
		   build_int_cst (mf_uintptr_type, 1));
  v = force_gimple_operand (v, &stmts, true, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_call (mf_check_fndecl, 4, mf_base, v, dirflag, u);
  gimple_seq_add_stmt (&seq, g);

  if (! flag_mudflap_threads)
    {
      if (stmt_ends_bb_p (g))
	{
	  gsi = gsi_start_bb (then_bb);
	  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
	  e = split_block (then_bb, g);
	  then_bb = e->dest;
	  seq = NULL;
	}

      g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl);
      gimple_seq_add_stmt (&seq, g);

      g = gimple_build_assign (mf_cache_mask_decl_l, mf_cache_mask_decl);
      gimple_seq_add_stmt (&seq, g);
    }

  /* Insert the check code in the THEN block.  */
  gsi = gsi_start_bb (then_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  *instr_gsi = gsi_start_bb (join_bb);
}
コード例 #4
0
ファイル: tree-tailcall.c プロジェクト: CookieChen/gcc
static void
find_tail_calls (basic_block bb, struct tailcall **ret)
{
  tree ass_var = NULL_TREE, ret_var, func, param;
  gimple stmt, call = NULL;
  gimple_stmt_iterator gsi, agsi;
  bool tail_recursion;
  struct tailcall *nw;
  edge e;
  tree m, a;
  basic_block abb;
  size_t idx;
  tree var;

  if (!single_succ_p (bb))
    return;

  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
    {
      stmt = gsi_stmt (gsi);

      /* Ignore labels, returns, clobbers and debug stmts.  */
      if (gimple_code (stmt) == GIMPLE_LABEL
	  || gimple_code (stmt) == GIMPLE_RETURN
	  || gimple_clobber_p (stmt)
	  || is_gimple_debug (stmt))
	continue;

      /* Check for a call.  */
      if (is_gimple_call (stmt))
	{
	  call = stmt;
	  ass_var = gimple_call_lhs (stmt);
	  break;
	}

      /* If the statement references memory or volatile operands, fail.  */
      if (gimple_references_memory_p (stmt)
	  || gimple_has_volatile_ops (stmt))
	return;
    }

  if (gsi_end_p (gsi))
    {
      edge_iterator ei;
      /* Recurse to the predecessors.  */
      FOR_EACH_EDGE (e, ei, bb->preds)
	find_tail_calls (e->src, ret);

      return;
    }

  /* If the LHS of our call is not just a simple register, we can't
     transform this into a tail or sibling call.  This situation happens,
     in (e.g.) "*p = foo()" where foo returns a struct.  In this case
     we won't have a temporary here, but we need to carry out the side
     effect anyway, so tailcall is impossible.

     ??? In some situations (when the struct is returned in memory via
     invisible argument) we could deal with this, e.g. by passing 'p'
     itself as that argument to foo, but it's too early to do this here,
     and expand_call() will not handle it anyway.  If it ever can, then
     we need to revisit this here, to allow that situation.  */
  if (ass_var && !is_gimple_reg (ass_var))
    return;

  /* We found the call, check whether it is suitable.  */
  tail_recursion = false;
  func = gimple_call_fndecl (call);
  if (func
      && !DECL_BUILT_IN (func)
      && recursive_call_p (current_function_decl, func))
    {
      tree arg;

      for (param = DECL_ARGUMENTS (func), idx = 0;
	   param && idx < gimple_call_num_args (call);
	   param = DECL_CHAIN (param), idx ++)
	{
	  arg = gimple_call_arg (call, idx);
	  if (param != arg)
	    {
	      /* Make sure there are no problems with copying.  The parameter
	         have a copyable type and the two arguments must have reasonably
	         equivalent types.  The latter requirement could be relaxed if
	         we emitted a suitable type conversion statement.  */
	      if (!is_gimple_reg_type (TREE_TYPE (param))
		  || !useless_type_conversion_p (TREE_TYPE (param),
					         TREE_TYPE (arg)))
		break;

	      /* The parameter should be a real operand, so that phi node
		 created for it at the start of the function has the meaning
		 of copying the value.  This test implies is_gimple_reg_type
		 from the previous condition, however this one could be
		 relaxed by being more careful with copying the new value
		 of the parameter (emitting appropriate GIMPLE_ASSIGN and
		 updating the virtual operands).  */
	      if (!is_gimple_reg (param))
		break;
	    }
	}
      if (idx == gimple_call_num_args (call) && !param)
	tail_recursion = true;
    }

  /* Make sure the tail invocation of this function does not refer
     to local variables.  */
  FOR_EACH_LOCAL_DECL (cfun, idx, var)
    {
      if (TREE_CODE (var) != PARM_DECL
	  && auto_var_in_fn_p (var, cfun->decl)
	  && (ref_maybe_used_by_stmt_p (call, var)
	      || call_may_clobber_ref_p (call, var)))
	return;
    }

  /* Now check the statements after the call.  None of them has virtual
     operands, so they may only depend on the call through its return
     value.  The return value should also be dependent on each of them,
     since we are running after dce.  */
  m = NULL_TREE;
  a = NULL_TREE;

  abb = bb;
  agsi = gsi;
  while (1)
    {
      tree tmp_a = NULL_TREE;
      tree tmp_m = NULL_TREE;
      gsi_next (&agsi);

      while (gsi_end_p (agsi))
	{
	  ass_var = propagate_through_phis (ass_var, single_succ_edge (abb));
	  abb = single_succ (abb);
	  agsi = gsi_start_bb (abb);
	}

      stmt = gsi_stmt (agsi);

      if (gimple_code (stmt) == GIMPLE_LABEL)
	continue;

      if (gimple_code (stmt) == GIMPLE_RETURN)
	break;

      if (gimple_clobber_p (stmt))
	continue;

      if (is_gimple_debug (stmt))
	continue;

      if (gimple_code (stmt) != GIMPLE_ASSIGN)
	return;

      /* This is a gimple assign. */
      if (! process_assignment (stmt, gsi, &tmp_m, &tmp_a, &ass_var))
	return;

      if (tmp_a)
	{
	  tree type = TREE_TYPE (tmp_a);
	  if (a)
	    a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a);
	  else
	    a = tmp_a;
	}
      if (tmp_m)
	{
	  tree type = TREE_TYPE (tmp_m);
	  if (m)
	    m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m);
	  else
	    m = tmp_m;

	  if (a)
	    a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m);
	}
    }

  /* See if this is a tail call we can handle.  */
  ret_var = gimple_return_retval (stmt);

  /* We may proceed if there either is no return value, or the return value
     is identical to the call's return.  */
  if (ret_var
      && (ret_var != ass_var))
    return;

  /* If this is not a tail recursive call, we cannot handle addends or
     multiplicands.  */
  if (!tail_recursion && (m || a))
    return;

  /* For pointers only allow additions.  */
  if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
    return;

  nw = XNEW (struct tailcall);

  nw->call_gsi = gsi;

  nw->tail_recursion = tail_recursion;

  nw->mult = m;
  nw->add = a;

  nw->next = *ret;
  *ret = nw;
}
コード例 #5
0
ファイル: tree-chkp-opt.c プロジェクト: paranoiacblack/gcc
/* Find memcpy, mempcpy, memmove and memset calls, perform
   checks before call and then call no_chk version of
   functions.  We do it on O2 to enable inlining of these
   functions during expand.

   Also try to find memcpy, mempcpy, memmove and memset calls
   which are known to not write pointers to memory and use
   faster function versions for them.  */
static void
chkp_optimize_string_function_calls (void)
{
    basic_block bb;

    if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file, "Searching for replaceable string function calls...\n");

    FOR_EACH_BB_FN (bb, cfun)
    {
        gimple_stmt_iterator i;

        for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
        {
            gimple *stmt = gsi_stmt (i);
            tree fndecl;

            if (gimple_code (stmt) != GIMPLE_CALL
                    || !gimple_call_with_bounds_p (stmt))
                continue;

            fndecl = gimple_call_fndecl (stmt);

            if (!fndecl || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
                continue;

            if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMCPY_CHKP
                    || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMPCPY_CHKP
                    || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMMOVE_CHKP
                    || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP)
            {
                tree dst = gimple_call_arg (stmt, 0);
                tree dst_bnd = gimple_call_arg (stmt, 1);
                bool is_memset = DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP;
                tree size = gimple_call_arg (stmt, is_memset ? 3 : 4);
                tree fndecl_nochk;
                gimple_stmt_iterator j;
                basic_block check_bb;
                address_t size_val;
                int sign;
                bool known;

                /* We may replace call with corresponding __chkp_*_nobnd
                call in case destination pointer base type is not
                 void or pointer.  */
                if (POINTER_TYPE_P (TREE_TYPE (dst))
                        && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (dst)))
                        && !chkp_type_has_pointer (TREE_TYPE (TREE_TYPE (dst))))
                {
                    tree fndecl_nobnd
                        = chkp_get_nobnd_fndecl (DECL_FUNCTION_CODE (fndecl));

                    if (fndecl_nobnd)
                        fndecl = fndecl_nobnd;
                }

                fndecl_nochk = chkp_get_nochk_fndecl (DECL_FUNCTION_CODE (fndecl));

                if (fndecl_nochk)
                    fndecl = fndecl_nochk;

                if (fndecl != gimple_call_fndecl (stmt))
                {
                    if (dump_file && (dump_flags & TDF_DETAILS))
                    {
                        fprintf (dump_file, "Replacing call: ");
                        print_gimple_stmt (dump_file, stmt, 0,
                                           TDF_VOPS|TDF_MEMSYMS);
                    }

                    gimple_call_set_fndecl (stmt, fndecl);

                    if (dump_file && (dump_flags & TDF_DETAILS))
                    {
                        fprintf (dump_file, "With a new call: ");
                        print_gimple_stmt (dump_file, stmt, 0,
                                           TDF_VOPS|TDF_MEMSYMS);
                    }
                }

                /* If there is no nochk version of function then
                do nothing.  Otherwise insert checks before
                 the call.  */
                if (!fndecl_nochk)
                    continue;

                /* If size passed to call is known and > 0
                then we may insert checks unconditionally.  */
                size_val.pol.create (0);
                chkp_collect_value (size, size_val);
                known = chkp_is_constant_addr (size_val, &sign);
                size_val.pol.release ();

                /* If we are not sure size is not zero then we have
                to perform runtime check for size and perform
                 checks only when size is not zero.  */
                if (!known)
                {
                    gimple *check = gimple_build_cond (NE_EXPR,
                                                       size,
                                                       size_zero_node,
                                                       NULL_TREE,
                                                       NULL_TREE);

                    /* Split block before string function call.  */
                    gsi_prev (&i);
                    check_bb = insert_cond_bb (bb, gsi_stmt (i), check);

                    /* Set position for checks.  */
                    j = gsi_last_bb (check_bb);

                    /* The block was splitted and therefore we
                       need to set iterator to its end.  */
                    i = gsi_last_bb (bb);
                }
                /* If size is known to be zero then no checks
                should be performed.  */
                else if (!sign)
                    continue;
                else
                    j = i;

                size = size_binop (MINUS_EXPR, size, size_one_node);
                if (!is_memset)
                {
                    tree src = gimple_call_arg (stmt, 2);
                    tree src_bnd = gimple_call_arg (stmt, 3);

                    chkp_check_mem_access (src, fold_build_pointer_plus (src, size),
                                           src_bnd, j, gimple_location (stmt),
                                           integer_zero_node);
                }

                chkp_check_mem_access (dst, fold_build_pointer_plus (dst, size),
                                       dst_bnd, j, gimple_location (stmt),
                                       integer_one_node);

            }
        }
    }
コード例 #6
0
ファイル: tree-tailcall.c プロジェクト: chinabin/gcc-tiny
static void
eliminate_tail_call (struct tailcall *t)
{
  tree param, rslt;
  gimple *stmt, *call;
  tree arg;
  size_t idx;
  basic_block bb, first;
  edge e;
  gphi *phi;
  gphi_iterator gpi;
  gimple_stmt_iterator gsi;
  gimple *orig_stmt;

  stmt = orig_stmt = gsi_stmt (t->call_gsi);
  bb = gsi_bb (t->call_gsi);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "Eliminated tail recursion in bb %d : ",
	       bb->index);
      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
      fprintf (dump_file, "\n");
    }

  gcc_assert (is_gimple_call (stmt));

  first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));

  /* Remove the code after call_gsi that will become unreachable.  The
     possibly unreachable code in other blocks is removed later in
     cfg cleanup.  */
  gsi = t->call_gsi;
  gimple_stmt_iterator gsi2 = gsi_last_bb (gimple_bb (gsi_stmt (gsi)));
  while (gsi_stmt (gsi2) != gsi_stmt (gsi))
    {
      gimple *t = gsi_stmt (gsi2);
      /* Do not remove the return statement, so that redirect_edge_and_branch
	 sees how the block ends.  */
      if (gimple_code (t) != GIMPLE_RETURN)
	{
	  gimple_stmt_iterator gsi3 = gsi2;
	  gsi_prev (&gsi2);
	  gsi_remove (&gsi3, true);
	  release_defs (t);
	}
      else
	gsi_prev (&gsi2);
    }

  /* Number of executions of function has reduced by the tailcall.  */
  e = single_succ_edge (gsi_bb (t->call_gsi));
  decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e));
  decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count,
		    EDGE_FREQUENCY (e));
  if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
    decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e));

  /* Replace the call by a jump to the start of function.  */
  e = redirect_edge_and_branch (single_succ_edge (gsi_bb (t->call_gsi)),
				first);
  gcc_assert (e);
  PENDING_STMT (e) = NULL;

  /* Add phi node entries for arguments.  The ordering of the phi nodes should
     be the same as the ordering of the arguments.  */
  for (param = DECL_ARGUMENTS (current_function_decl),
	 idx = 0, gpi = gsi_start_phis (first);
       param;
       param = DECL_CHAIN (param), idx++)
    {
      if (!arg_needs_copy_p (param))
	continue;

      arg = gimple_call_arg (stmt, idx);
      phi = gpi.phi ();
      gcc_assert (param == SSA_NAME_VAR (PHI_RESULT (phi)));

      add_phi_arg (phi, arg, e, gimple_location (stmt));
      gsi_next (&gpi);
    }

  /* Update the values of accumulators.  */
  adjust_accumulator_values (t->call_gsi, t->mult, t->add, e);

  call = gsi_stmt (t->call_gsi);
  rslt = gimple_call_lhs (call);
  if (rslt != NULL_TREE)
    {
      /* Result of the call will no longer be defined.  So adjust the
	 SSA_NAME_DEF_STMT accordingly.  */
      SSA_NAME_DEF_STMT (rslt) = gimple_build_nop ();
    }

  gsi_remove (&t->call_gsi, true);
  release_defs (call);
}