Esempio n. 1
0
/* INSN is being scheduled after LAST.  Update counters.  */
static void
begin_schedule_ready (rtx insn, rtx last)
{
  sched_rgn_n_insns++;

  if (BLOCK_FOR_INSN (insn) == last_bb
      /* INSN is a jump in the last block, ...  */
      && control_flow_insn_p (insn)
      /* that is going to be moved over some instructions.  */
      && last != PREV_INSN (insn))
    {
      edge e;
      basic_block bb;

      /* An obscure special case, where we do have partially dead
	 instruction scheduled after last control flow instruction.
	 In this case we can create new basic block.  It is
	 always exactly one basic block last in the sequence.  */

      e = find_fallthru_edge (last_bb->succs);

      gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));

      gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
			   && !IS_SPECULATION_CHECK_P (insn)
			   && BB_HEAD (last_bb) != insn
			   && BB_END (last_bb) == insn);

      {
	rtx x;

	x = NEXT_INSN (insn);
	if (e)
	  gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
	else
	  gcc_checking_assert (BARRIER_P (x));
      }

      if (e)
	{
	  bb = split_edge (e);
	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
	}
      else
	/* Create an empty unreachable block after the INSN.  */
	bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);

      /* split_edge () creates BB before E->DEST.  Keep in mind, that
	 this operation extends scheduling region till the end of BB.
	 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
	 of the scheduling region.  */
      current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
      gcc_assert (current_sched_info->next_tail);

      /* Append new basic block to the end of the ebb.  */
      sched_init_only_bb (bb, last_bb);
      gcc_assert (last_bb == bb);
    }
}
Esempio n. 2
0
static bool
shrink_wrap_one_built_in_call (gimple bi_call)
{
  gimple_stmt_iterator bi_call_bsi;
  basic_block bi_call_bb, join_tgt_bb, guard_bb, guard_bb0;
  edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
  edge bi_call_in_edge0, guard_bb_in_edge;
  unsigned tn_cond_stmts, nconds;
  unsigned ci;
  gimple cond_expr = NULL;
  gimple cond_expr_start;
  tree bi_call_label_decl;
  gimple bi_call_label;

  auto_vec<gimple, 12> conds;
  gen_shrink_wrap_conditions (bi_call, conds, &nconds);

  /* This can happen if the condition generator decides
     it is not beneficial to do the transformation.  Just
     return false and do not do any transformation for
     the call.  */
  if (nconds == 0)
    return false;

  bi_call_bb = gimple_bb (bi_call);

  /* Now find the join target bb -- split bi_call_bb if needed.  */
  if (stmt_ends_bb_p (bi_call))
    {
      /* If the call must be the last in the bb, don't split the block,
	 it could e.g. have EH edges.  */
      join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs);
      if (join_tgt_in_edge_from_call == NULL)
        return false;
    }
  else
    join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);

  bi_call_bsi = gsi_for_stmt (bi_call);

  join_tgt_bb = join_tgt_in_edge_from_call->dest;

  /* Now it is time to insert the first conditional expression
     into bi_call_bb and split this bb so that bi_call is
     shrink-wrapped.  */
  tn_cond_stmts = conds.length ();
  cond_expr = NULL;
  cond_expr_start = conds[0];
  for (ci = 0; ci < tn_cond_stmts; ci++)
    {
      gimple c = conds[ci];
      gcc_assert (c || ci != 0);
      if (!c)
        break;
      gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT);
      cond_expr = c;
    }
  nconds--;
  ci++;
  gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);

  /* Now the label.  */
  bi_call_label_decl = create_artificial_label (gimple_location (bi_call));
  bi_call_label = gimple_build_label (bi_call_label_decl);
  gsi_insert_before (&bi_call_bsi, bi_call_label, GSI_SAME_STMT);

  bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
  bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
  bi_call_in_edge0->flags |= EDGE_TRUE_VALUE;
  guard_bb0 = bi_call_bb;
  bi_call_bb = bi_call_in_edge0->dest;
  join_tgt_in_edge_fall_thru = make_edge (guard_bb0, join_tgt_bb,
                                          EDGE_FALSE_VALUE);

  bi_call_in_edge0->probability = REG_BR_PROB_BASE * ERR_PROB;
  bi_call_in_edge0->count =
      apply_probability (guard_bb0->count,
			 bi_call_in_edge0->probability);
  join_tgt_in_edge_fall_thru->probability =
      inverse_probability (bi_call_in_edge0->probability);
  join_tgt_in_edge_fall_thru->count =
      guard_bb0->count - bi_call_in_edge0->count;

  /* Code generation for the rest of the conditions  */
  guard_bb = guard_bb0;
  while (nconds > 0)
    {
      unsigned ci0;
      edge bi_call_in_edge;
      gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
      ci0 = ci;
      cond_expr_start = conds[ci0];
      for (; ci < tn_cond_stmts; ci++)
        {
          gimple c = conds[ci];
          gcc_assert (c || ci != ci0);
          if (!c)
            break;
          gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT);
          cond_expr = c;
        }
      nconds--;
      ci++;
      gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
      guard_bb_in_edge = split_block (guard_bb, cond_expr);
      guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
      guard_bb_in_edge->flags |= EDGE_FALSE_VALUE;

      bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_TRUE_VALUE);

      bi_call_in_edge->probability = REG_BR_PROB_BASE * ERR_PROB;
      bi_call_in_edge->count =
	  apply_probability (guard_bb->count,
			     bi_call_in_edge->probability);
      guard_bb_in_edge->probability =
          inverse_probability (bi_call_in_edge->probability);
      guard_bb_in_edge->count = guard_bb->count - bi_call_in_edge->count;
    }

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      location_t loc;
      loc = gimple_location (bi_call);
      fprintf (dump_file,
               "%s:%d: note: function call is shrink-wrapped"
               " into error conditions.\n",
               LOCATION_FILE (loc), LOCATION_LINE (loc));
    }

  return true;
}
static void
shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
					  unsigned int nconds)
{
  gimple_stmt_iterator bi_call_bsi;
  basic_block bi_call_bb, join_tgt_bb, guard_bb;
  edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
  edge bi_call_in_edge0, guard_bb_in_edge;
  unsigned tn_cond_stmts;
  unsigned ci;
  gimple *cond_expr = NULL;
  gimple *cond_expr_start;

  /* The cfg we want to create looks like this:

	   [guard n-1]         <- guard_bb (old block)
	     |    \
	     | [guard n-2]                   }
	     |    / \                        }
	     |   /  ...                      } new blocks
	     |  /  [guard 0]                 }
	     | /    /   |                    }
	    [ call ]    |     <- bi_call_bb  }
	     | \        |
	     |  \       |
	     |   [ join ]     <- join_tgt_bb (old iff call must end bb)
	     |
	 possible EH edges (only if [join] is old)

     When [join] is new, the immediate dominators for these blocks are:

     1. [guard n-1]: unchanged
     2. [call]: [guard n-1]
     3. [guard m]: [guard m+1] for 0 <= m <= n-2
     4. [join]: [guard n-1]

     We punt for the more complex case case of [join] being old and
     simply free the dominance info.  We also punt on postdominators,
     which aren't expected to be available at this point anyway.  */
  bi_call_bb = gimple_bb (bi_call);

  /* Now find the join target bb -- split bi_call_bb if needed.  */
  if (stmt_ends_bb_p (bi_call))
    {
      /* We checked that there was a fallthrough edge in
	 can_guard_call_p.  */
      join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs);
      gcc_assert (join_tgt_in_edge_from_call);
      /* We don't want to handle PHIs.  */
      if (EDGE_COUNT (join_tgt_in_edge_from_call->dest->preds) > 1)
	join_tgt_bb = split_edge (join_tgt_in_edge_from_call);
      else
	{
	  join_tgt_bb = join_tgt_in_edge_from_call->dest;
	  /* We may have degenerate PHIs in the destination.  Propagate
	     those out.  */
	  for (gphi_iterator i = gsi_start_phis (join_tgt_bb); !gsi_end_p (i);)
	    {
	      gphi *phi = i.phi ();
	      replace_uses_by (gimple_phi_result (phi),
			       gimple_phi_arg_def (phi, 0));
	      remove_phi_node (&i, true);
	    }
	}
    }
  else
    {
      join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
      join_tgt_bb = join_tgt_in_edge_from_call->dest;
    }

  bi_call_bsi = gsi_for_stmt (bi_call);

  /* Now it is time to insert the first conditional expression
     into bi_call_bb and split this bb so that bi_call is
     shrink-wrapped.  */
  tn_cond_stmts = conds.length ();
  cond_expr = NULL;
  cond_expr_start = conds[0];
  for (ci = 0; ci < tn_cond_stmts; ci++)
    {
      gimple *c = conds[ci];
      gcc_assert (c || ci != 0);
      if (!c)
        break;
      gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT);
      cond_expr = c;
    }
  ci++;
  gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);

  typedef std::pair<edge, edge> edge_pair;
  auto_vec<edge_pair, 8> edges;

  bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
  bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
  bi_call_in_edge0->flags |= EDGE_FALSE_VALUE;
  guard_bb = bi_call_bb;
  bi_call_bb = bi_call_in_edge0->dest;
  join_tgt_in_edge_fall_thru = make_edge (guard_bb, join_tgt_bb,
                                          EDGE_TRUE_VALUE);

  edges.reserve (nconds);
  edges.quick_push (edge_pair (bi_call_in_edge0, join_tgt_in_edge_fall_thru));

  /* Code generation for the rest of the conditions  */
  for (unsigned int i = 1; i < nconds; ++i)
    {
      unsigned ci0;
      edge bi_call_in_edge;
      gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
      ci0 = ci;
      cond_expr_start = conds[ci0];
      for (; ci < tn_cond_stmts; ci++)
        {
	  gimple *c = conds[ci];
          gcc_assert (c || ci != ci0);
          if (!c)
            break;
          gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT);
          cond_expr = c;
        }
      ci++;
      gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
      guard_bb_in_edge = split_block (guard_bb, cond_expr);
      guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
      guard_bb_in_edge->flags |= EDGE_TRUE_VALUE;

      bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_FALSE_VALUE);
      edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge));
    }

  /* Now update the probability and profile information, processing the
     guards in order of execution.

     There are two approaches we could take here.  On the one hand we
     could assign a probability of X to the call block and distribute
     that probability among its incoming edges.  On the other hand we
     could assign a probability of X to each individual call edge.

     The choice only affects calls that have more than one condition.
     In those cases, the second approach would give the call block
     a greater probability than the first.  However, the difference
     is only small, and our chosen X is a pure guess anyway.

     Here we take the second approach because it's slightly simpler
     and because it's easy to see that it doesn't lose profile counts.  */
  bi_call_bb->count = profile_count::zero ();
  while (!edges.is_empty ())
    {
      edge_pair e = edges.pop ();
      edge call_edge = e.first;
      edge nocall_edge = e.second;
      basic_block src_bb = call_edge->src;
      gcc_assert (src_bb == nocall_edge->src);

      call_edge->probability = profile_probability::very_unlikely ();
      nocall_edge->probability = profile_probability::always ()
				 - call_edge->probability;

      bi_call_bb->count += call_edge->count ();

      if (nocall_edge->dest != join_tgt_bb)
	nocall_edge->dest->count = src_bb->count - bi_call_bb->count;
    }

  if (dom_info_available_p (CDI_DOMINATORS))
    {
      /* The split_blocks leave [guard 0] as the immediate dominator
	 of [call] and [call] as the immediate dominator of [join].
	 Fix them up.  */
      set_immediate_dominator (CDI_DOMINATORS, bi_call_bb, guard_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_tgt_bb, guard_bb);
    }

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      location_t loc;
      loc = gimple_location (bi_call);
      fprintf (dump_file,
               "%s:%d: note: function call is shrink-wrapped"
               " into error conditions.\n",
               LOCATION_FILE (loc), LOCATION_LINE (loc));
    }
}
static bool
can_guard_call_p (gimple *call)
{
  return (!stmt_ends_bb_p (call)
	  || find_fallthru_edge (gimple_bb (call)->succs));
}