static void
remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
{
  gimple_stmt_iterator gsi;
  edge e;
  edge_iterator ei;

  gsi = gsi_last_bb (bb);

  /* If the duplicate ends with a control statement, then remove it.

     Note that if we are duplicating the template block rather than the
     original basic block, then the duplicate might not have any real
     statements in it.  */
  if (!gsi_end_p (gsi)
      && gsi_stmt (gsi)
      && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
    gsi_remove (&gsi, true);

  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
    {
      if (e->dest != dest_bb)
	remove_edge (e);
      else
	ei_next (&ei);
    }
}
Exemplo n.º 2
0
bool
potentially_threadable_block (basic_block bb)
{
  gimple_stmt_iterator gsi;

  /* Special case.  We can get blocks that are forwarders, but are
     not optimized away because they forward from outside a loop
     to the loop header.   We want to thread through them as we can
     sometimes thread to the loop exit, which is obviously profitable.
     the interesting case here is when the block has PHIs.  */
  if (gsi_end_p (gsi_start_nondebug_bb (bb))
      && !gsi_end_p (gsi_start_phis (bb)))
    return true;

  /* If BB has a single successor or a single predecessor, then
     there is no threading opportunity.  */
  if (single_succ_p (bb) || single_pred_p (bb))
    return false;

  /* If BB does not end with a conditional, switch or computed goto,
     then there is no threading opportunity.  */
  gsi = gsi_last_bb (bb);
  if (gsi_end_p (gsi)
      || ! gsi_stmt (gsi)
      || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
    return false;

  return true;
}
Exemplo n.º 3
0
void
gsi_move_to_bb_end (gimple_stmt_iterator *from, basic_block bb)
{
  gimple_stmt_iterator last = gsi_last_bb (bb);
  gcc_checking_assert (gsi_bb (last) == bb);

  /* Have to check gsi_end_p because it could be an empty block.  */
  if (!gsi_end_p (last) && is_ctrl_stmt (gsi_stmt (last)))
    gsi_move_before (from, &last);
  else
    gsi_move_after (from, &last);
}
Exemplo n.º 4
0
void
backprop::process_block (basic_block bb)
{
  for (gimple_stmt_iterator gsi = gsi_last_bb (bb); !gsi_end_p (gsi);
       gsi_prev (&gsi))
    {
      tree lhs = gimple_get_lhs (gsi_stmt (gsi));
      if (lhs && TREE_CODE (lhs) == SSA_NAME)
	process_var (lhs);
    }
  for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi);
       gsi_next (&gpi))
    process_var (gimple_phi_result (gpi.phi ()));
}
static void
replace_phi_edge_with_variable (basic_block cond_block,
                                edge e, gimple phi, tree new_tree)
{
    basic_block bb = gimple_bb (phi);
    basic_block block_to_remove;
    gimple_stmt_iterator gsi;

    /* Change the PHI argument to new.  */
    SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);

    /* Remove the empty basic block.  */
    if (EDGE_SUCC (cond_block, 0)->dest == bb)
    {
        EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
        EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
        EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
        EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;

        block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
    }
    else
    {
        EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
        EDGE_SUCC (cond_block, 1)->flags
        &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
        EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
        EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;

        block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
    }
    delete_basic_block (block_to_remove);

    /* Eliminate the COND_EXPR at the end of COND_BLOCK.  */
    gsi = gsi_last_bb (cond_block);
    gsi_remove (&gsi, true);

    if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
                 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
                 cond_block->index,
                 bb->index);
}
Exemplo n.º 6
0
static void
create_canonical_iv (struct loop *loop, edge exit, tree niter)
{
  edge in;
  tree type, var;
  gcond *cond;
  gimple_stmt_iterator incr_at;
  enum tree_code cmp;

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
      print_generic_expr (dump_file, niter, TDF_SLIM);
      fprintf (dump_file, " iterations.\n");
    }

  cond = as_a <gcond *> (last_stmt (exit->src));
  in = EDGE_SUCC (exit->src, 0);
  if (in == exit)
    in = EDGE_SUCC (exit->src, 1);

  /* Note that we do not need to worry about overflows, since
     type of niter is always unsigned and all comparisons are
     just for equality/nonequality -- i.e. everything works
     with a modulo arithmetics.  */

  type = TREE_TYPE (niter);
  niter = fold_build2 (PLUS_EXPR, type,
		       niter,
		       build_int_cst (type, 1));
  incr_at = gsi_last_bb (in->src);
  create_iv (niter,
	     build_int_cst (type, -1),
	     NULL_TREE, loop,
	     &incr_at, false, NULL, &var);

  cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
  gimple_cond_set_code (cond, cmp);
  gimple_cond_set_lhs (cond, var);
  gimple_cond_set_rhs (cond, build_int_cst (type, 0));
  update_stmt (cond);
}
Exemplo n.º 7
0
static void
adjust_return_value (basic_block bb, tree m, tree a)
{
  tree retval;
  gimple ret_stmt = gimple_seq_last_stmt (bb_seq (bb));
  gimple_stmt_iterator gsi = gsi_last_bb (bb);

  gcc_assert (gimple_code (ret_stmt) == GIMPLE_RETURN);

  retval = gimple_return_retval (ret_stmt);
  if (!retval || retval == error_mark_node)
    return;

  if (m)
    retval = adjust_return_value_with_ops (MULT_EXPR, "mul_tmp", m_acc, retval,
					   gsi);
  if (a)
    retval = adjust_return_value_with_ops (PLUS_EXPR, "acc_tmp", a_acc, retval,
					   gsi);
  gimple_return_set_retval (ret_stmt, retval);
  update_stmt (ret_stmt);
}
bool
potentially_threadable_block (basic_block bb)
{
  gimple_stmt_iterator gsi;

  /* If BB has a single successor or a single predecessor, then
     there is no threading opportunity.  */
  if (single_succ_p (bb) || single_pred_p (bb))
    return false;

  /* If BB does not end with a conditional, switch or computed goto,
     then there is no threading opportunity.  */
  gsi = gsi_last_bb (bb);
  if (gsi_end_p (gsi)
      || ! gsi_stmt (gsi)
      || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
	  && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
    return false;

  return true;
}
Exemplo n.º 9
0
static void
set_location_for_edge (edge e)
{
  if (e->goto_locus)
    {
      set_curr_insn_source_location (e->goto_locus);
      set_curr_insn_block (e->goto_block);
    }
  else
    {
      basic_block bb = e->src;
      gimple_stmt_iterator gsi;

      do
	{
	  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_debug (stmt))
		continue;
	      if (gimple_has_location (stmt) || gimple_block (stmt))
		{
		  set_curr_insn_source_location (gimple_location (stmt));
		  set_curr_insn_block (gimple_block (stmt));
		  return;
		}
	    }
	  /* Nothing found in this basic block.  Make a half-assed attempt
	     to continue with another block.  */
	  if (single_pred_p (bb))
	    bb = single_pred (bb);
	  else
	    bb = e->src;
	}
      while (bb != e->src);
    }
}
Exemplo n.º 10
0
static void
find_tail_calls (basic_block bb, struct tailcall **ret)
{
  tree ass_var = NULL_TREE, ret_var, func, param;
  gimple stmt, call = NULL;
  gimple_stmt_iterator gsi, agsi;
  bool tail_recursion;
  struct tailcall *nw;
  edge e;
  tree m, a;
  basic_block abb;
  size_t idx;
  tree var;

  if (!single_succ_p (bb))
    return;

  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
    {
      stmt = gsi_stmt (gsi);

      /* Ignore labels, returns, clobbers and debug stmts.  */
      if (gimple_code (stmt) == GIMPLE_LABEL
	  || gimple_code (stmt) == GIMPLE_RETURN
	  || gimple_clobber_p (stmt)
	  || is_gimple_debug (stmt))
	continue;

      /* Check for a call.  */
      if (is_gimple_call (stmt))
	{
	  call = stmt;
	  ass_var = gimple_call_lhs (stmt);
	  break;
	}

      /* If the statement references memory or volatile operands, fail.  */
      if (gimple_references_memory_p (stmt)
	  || gimple_has_volatile_ops (stmt))
	return;
    }

  if (gsi_end_p (gsi))
    {
      edge_iterator ei;
      /* Recurse to the predecessors.  */
      FOR_EACH_EDGE (e, ei, bb->preds)
	find_tail_calls (e->src, ret);

      return;
    }

  /* If the LHS of our call is not just a simple register, we can't
     transform this into a tail or sibling call.  This situation happens,
     in (e.g.) "*p = foo()" where foo returns a struct.  In this case
     we won't have a temporary here, but we need to carry out the side
     effect anyway, so tailcall is impossible.

     ??? In some situations (when the struct is returned in memory via
     invisible argument) we could deal with this, e.g. by passing 'p'
     itself as that argument to foo, but it's too early to do this here,
     and expand_call() will not handle it anyway.  If it ever can, then
     we need to revisit this here, to allow that situation.  */
  if (ass_var && !is_gimple_reg (ass_var))
    return;

  /* We found the call, check whether it is suitable.  */
  tail_recursion = false;
  func = gimple_call_fndecl (call);
  if (func
      && !DECL_BUILT_IN (func)
      && recursive_call_p (current_function_decl, func))
    {
      tree arg;

      for (param = DECL_ARGUMENTS (func), idx = 0;
	   param && idx < gimple_call_num_args (call);
	   param = DECL_CHAIN (param), idx ++)
	{
	  arg = gimple_call_arg (call, idx);
	  if (param != arg)
	    {
	      /* Make sure there are no problems with copying.  The parameter
	         have a copyable type and the two arguments must have reasonably
	         equivalent types.  The latter requirement could be relaxed if
	         we emitted a suitable type conversion statement.  */
	      if (!is_gimple_reg_type (TREE_TYPE (param))
		  || !useless_type_conversion_p (TREE_TYPE (param),
					         TREE_TYPE (arg)))
		break;

	      /* The parameter should be a real operand, so that phi node
		 created for it at the start of the function has the meaning
		 of copying the value.  This test implies is_gimple_reg_type
		 from the previous condition, however this one could be
		 relaxed by being more careful with copying the new value
		 of the parameter (emitting appropriate GIMPLE_ASSIGN and
		 updating the virtual operands).  */
	      if (!is_gimple_reg (param))
		break;
	    }
	}
      if (idx == gimple_call_num_args (call) && !param)
	tail_recursion = true;
    }

  /* Make sure the tail invocation of this function does not refer
     to local variables.  */
  FOR_EACH_LOCAL_DECL (cfun, idx, var)
    {
      if (TREE_CODE (var) != PARM_DECL
	  && auto_var_in_fn_p (var, cfun->decl)
	  && (ref_maybe_used_by_stmt_p (call, var)
	      || call_may_clobber_ref_p (call, var)))
	return;
    }

  /* Now check the statements after the call.  None of them has virtual
     operands, so they may only depend on the call through its return
     value.  The return value should also be dependent on each of them,
     since we are running after dce.  */
  m = NULL_TREE;
  a = NULL_TREE;

  abb = bb;
  agsi = gsi;
  while (1)
    {
      tree tmp_a = NULL_TREE;
      tree tmp_m = NULL_TREE;
      gsi_next (&agsi);

      while (gsi_end_p (agsi))
	{
	  ass_var = propagate_through_phis (ass_var, single_succ_edge (abb));
	  abb = single_succ (abb);
	  agsi = gsi_start_bb (abb);
	}

      stmt = gsi_stmt (agsi);

      if (gimple_code (stmt) == GIMPLE_LABEL)
	continue;

      if (gimple_code (stmt) == GIMPLE_RETURN)
	break;

      if (gimple_clobber_p (stmt))
	continue;

      if (is_gimple_debug (stmt))
	continue;

      if (gimple_code (stmt) != GIMPLE_ASSIGN)
	return;

      /* This is a gimple assign. */
      if (! process_assignment (stmt, gsi, &tmp_m, &tmp_a, &ass_var))
	return;

      if (tmp_a)
	{
	  tree type = TREE_TYPE (tmp_a);
	  if (a)
	    a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a);
	  else
	    a = tmp_a;
	}
      if (tmp_m)
	{
	  tree type = TREE_TYPE (tmp_m);
	  if (m)
	    m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m);
	  else
	    m = tmp_m;

	  if (a)
	    a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m);
	}
    }

  /* See if this is a tail call we can handle.  */
  ret_var = gimple_return_retval (stmt);

  /* We may proceed if there either is no return value, or the return value
     is identical to the call's return.  */
  if (ret_var
      && (ret_var != ass_var))
    return;

  /* If this is not a tail recursive call, we cannot handle addends or
     multiplicands.  */
  if (!tail_recursion && (m || a))
    return;

  /* For pointers only allow additions.  */
  if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
    return;

  nw = XNEW (struct tailcall);

  nw->call_gsi = gsi;

  nw->tail_recursion = tail_recursion;

  nw->mult = m;
  nw->add = a;

  nw->next = *ret;
  *ret = nw;
}
Exemplo n.º 11
0
static bool
gimple_find_edge_insert_loc (edge e, gimple_stmt_iterator *gsi,
			     basic_block *new_bb)
{
  basic_block dest, src;
  gimple *tmp;

  dest = e->dest;

  /* If the destination has one predecessor which has no PHI nodes,
     insert there.  Except for the exit block.

     The requirement for no PHI nodes could be relaxed.  Basically we
     would have to examine the PHIs to prove that none of them used
     the value set by the statement we want to insert on E.  That
     hardly seems worth the effort.  */
 restart:
  if (single_pred_p (dest)
      && gimple_seq_empty_p (phi_nodes (dest))
      && dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
    {
      *gsi = gsi_start_bb (dest);
      if (gsi_end_p (*gsi))
	return true;

      /* Make sure we insert after any leading labels.  */
      tmp = gsi_stmt (*gsi);
      while (gimple_code (tmp) == GIMPLE_LABEL)
	{
	  gsi_next (gsi);
	  if (gsi_end_p (*gsi))
	    break;
	  tmp = gsi_stmt (*gsi);
	}

      if (gsi_end_p (*gsi))
	{
	  *gsi = gsi_last_bb (dest);
	  return true;
	}
      else
	return false;
    }

  /* If the source has one successor, the edge is not abnormal and
     the last statement does not end a basic block, insert there.
     Except for the entry block.  */
  src = e->src;
  if ((e->flags & EDGE_ABNORMAL) == 0
      && single_succ_p (src)
      && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
    {
      *gsi = gsi_last_bb (src);
      if (gsi_end_p (*gsi))
	return true;

      tmp = gsi_stmt (*gsi);
      if (!stmt_ends_bb_p (tmp))
	return true;

      switch (gimple_code (tmp))
	{
	case GIMPLE_RETURN:
	case GIMPLE_RESX:
	  return false;
	default:
	  break;
        }
    }

  /* Otherwise, create a new basic block, and split this edge.  */
  dest = split_edge (e);
  if (new_bb)
    *new_bb = dest;
  e = single_pred_edge (dest);
  goto restart;
}
Exemplo n.º 12
0
static void
eliminate_tail_call (struct tailcall *t)
{
  tree param, rslt;
  gimple *stmt, *call;
  tree arg;
  size_t idx;
  basic_block bb, first;
  edge e;
  gphi *phi;
  gphi_iterator gpi;
  gimple_stmt_iterator gsi;
  gimple *orig_stmt;

  stmt = orig_stmt = gsi_stmt (t->call_gsi);
  bb = gsi_bb (t->call_gsi);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "Eliminated tail recursion in bb %d : ",
	       bb->index);
      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
      fprintf (dump_file, "\n");
    }

  gcc_assert (is_gimple_call (stmt));

  first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));

  /* Remove the code after call_gsi that will become unreachable.  The
     possibly unreachable code in other blocks is removed later in
     cfg cleanup.  */
  gsi = t->call_gsi;
  gimple_stmt_iterator gsi2 = gsi_last_bb (gimple_bb (gsi_stmt (gsi)));
  while (gsi_stmt (gsi2) != gsi_stmt (gsi))
    {
      gimple *t = gsi_stmt (gsi2);
      /* Do not remove the return statement, so that redirect_edge_and_branch
	 sees how the block ends.  */
      if (gimple_code (t) != GIMPLE_RETURN)
	{
	  gimple_stmt_iterator gsi3 = gsi2;
	  gsi_prev (&gsi2);
	  gsi_remove (&gsi3, true);
	  release_defs (t);
	}
      else
	gsi_prev (&gsi2);
    }

  /* Number of executions of function has reduced by the tailcall.  */
  e = single_succ_edge (gsi_bb (t->call_gsi));
  decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e));
  decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count,
		    EDGE_FREQUENCY (e));
  if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
    decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e));

  /* Replace the call by a jump to the start of function.  */
  e = redirect_edge_and_branch (single_succ_edge (gsi_bb (t->call_gsi)),
				first);
  gcc_assert (e);
  PENDING_STMT (e) = NULL;

  /* Add phi node entries for arguments.  The ordering of the phi nodes should
     be the same as the ordering of the arguments.  */
  for (param = DECL_ARGUMENTS (current_function_decl),
	 idx = 0, gpi = gsi_start_phis (first);
       param;
       param = DECL_CHAIN (param), idx++)
    {
      if (!arg_needs_copy_p (param))
	continue;

      arg = gimple_call_arg (stmt, idx);
      phi = gpi.phi ();
      gcc_assert (param == SSA_NAME_VAR (PHI_RESULT (phi)));

      add_phi_arg (phi, arg, e, gimple_location (stmt));
      gsi_next (&gpi);
    }

  /* Update the values of accumulators.  */
  adjust_accumulator_values (t->call_gsi, t->mult, t->add, e);

  call = gsi_stmt (t->call_gsi);
  rslt = gimple_call_lhs (call);
  if (rslt != NULL_TREE)
    {
      /* Result of the call will no longer be defined.  So adjust the
	 SSA_NAME_DEF_STMT accordingly.  */
      SSA_NAME_DEF_STMT (rslt) = gimple_build_nop ();
    }

  gsi_remove (&t->call_gsi, true);
  release_defs (call);
}
Exemplo n.º 13
0
static void
emit_case_bit_tests (gswitch *swtch, tree index_expr,
		     tree minval, tree range, tree maxval)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gassign *shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);
  int prec = TYPE_PRECISION (word_type_node);
  wide_int wone = wi::one (prec);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].mask = wi::zero (prec);
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
	test[k].mask |= wi::lshift (wone, j);
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
     the minval subtractions, but it might make the mask constants more
     expensive.  So, compare the costs.  */
  if (compare_tree_int (minval, 0) > 0
      && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
    {
      int cost_diff;
      HOST_WIDE_INT m = tree_to_uhwi (minval);
      rtx reg = gen_raw_REG (word_mode, 10000);
      bool speed_p = optimize_bb_for_speed_p (gimple_bb (swtch));
      cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
					      GEN_INT (-m)), speed_p);
      for (i = 0; i < count; i++)
	{
	  rtx r = immed_wide_int_const (test[i].mask, word_mode);
	  cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	  r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
	  cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	}
      if (cost_diff > 0)
	{
	  for (i = 0; i < count; i++)
	    test[i].mask = wi::lshift (test[i].mask, m);
	  minval = build_zero_cst (TREE_TYPE (minval));
	  range = maxval;
	}
    }

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
static bool
abs_replacement (basic_block cond_bb, basic_block middle_bb,
                 edge e0 ATTRIBUTE_UNUSED, edge e1,
                 gimple phi, tree arg0, tree arg1)
{
    tree result;
    gimple new_stmt, cond;
    gimple_stmt_iterator gsi;
    edge true_edge, false_edge;
    gimple assign;
    edge e;
    tree rhs, lhs;
    bool negate;
    enum tree_code cond_code;

    /* If the type says honor signed zeros we cannot do this
       optimization.  */
    if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
        return false;

    /* OTHER_BLOCK must have only one executable statement which must have the
       form arg0 = -arg1 or arg1 = -arg0.  */

    assign = last_and_only_stmt (middle_bb);
    /* If we did not find the proper negation assignment, then we can not
       optimize.  */
    if (assign == NULL)
        return false;

    /* If we got here, then we have found the only executable statement
       in OTHER_BLOCK.  If it is anything other than arg = -arg1 or
       arg1 = -arg0, then we can not optimize.  */
    if (gimple_code (assign) != GIMPLE_ASSIGN)
        return false;

    lhs = gimple_assign_lhs (assign);

    if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
        return false;

    rhs = gimple_assign_rhs1 (assign);

    /* The assignment has to be arg0 = -arg1 or arg1 = -arg0.  */
    if (!(lhs == arg0 && rhs == arg1)
            && !(lhs == arg1 && rhs == arg0))
        return false;

    cond = last_stmt (cond_bb);
    result = PHI_RESULT (phi);

    /* Only relationals comparing arg[01] against zero are interesting.  */
    cond_code = gimple_cond_code (cond);
    if (cond_code != GT_EXPR && cond_code != GE_EXPR
            && cond_code != LT_EXPR && cond_code != LE_EXPR)
        return false;

    /* Make sure the conditional is arg[01] OP y.  */
    if (gimple_cond_lhs (cond) != rhs)
        return false;

    if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
            ? real_zerop (gimple_cond_rhs (cond))
            : integer_zerop (gimple_cond_rhs (cond)))
        ;
    else
        return false;

    /* We need to know which is the true edge and which is the false
       edge so that we know if have abs or negative abs.  */
    extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);

    /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
       will need to negate the result.  Similarly for LT_EXPR/LE_EXPR if
       the false edge goes to OTHER_BLOCK.  */
    if (cond_code == GT_EXPR || cond_code == GE_EXPR)
        e = true_edge;
    else
        e = false_edge;

    if (e->dest == middle_bb)
        negate = true;
    else
        negate = false;

    result = duplicate_ssa_name (result, NULL);

    if (negate)
    {
        tree tmp = create_tmp_var (TREE_TYPE (result), NULL);
        add_referenced_var (tmp);
        lhs = make_ssa_name (tmp, NULL);
    }
    else
        lhs = result;

    /* Build the modify expression with abs expression.  */
    new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);

    gsi = gsi_last_bb (cond_bb);
    gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);

    if (negate)
    {
        /* Get the right GSI.  We want to insert after the recently
        added ABS_EXPR statement (which we know is the first statement
         in the block.  */
        new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);

        gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
    }

    replace_phi_edge_with_variable (cond_bb, e1, phi, result);

    /* Note that we optimized this PHI.  */
    return true;
}
static bool
minmax_replacement (basic_block cond_bb, basic_block middle_bb,
                    edge e0, edge e1, gimple phi,
                    tree arg0, tree arg1)
{
    tree result, type;
    gimple cond, new_stmt;
    edge true_edge, false_edge;
    enum tree_code cmp, minmax, ass_code;
    tree smaller, larger, arg_true, arg_false;
    gimple_stmt_iterator gsi, gsi_from;

    type = TREE_TYPE (PHI_RESULT (phi));

    /* The optimization may be unsafe due to NaNs.  */
    if (HONOR_NANS (TYPE_MODE (type)))
        return false;

    cond = last_stmt (cond_bb);
    cmp = gimple_cond_code (cond);
    result = PHI_RESULT (phi);

    /* This transformation is only valid for order comparisons.  Record which
       operand is smaller/larger if the result of the comparison is true.  */
    if (cmp == LT_EXPR || cmp == LE_EXPR)
    {
        smaller = gimple_cond_lhs (cond);
        larger = gimple_cond_rhs (cond);
    }
    else if (cmp == GT_EXPR || cmp == GE_EXPR)
    {
        smaller = gimple_cond_rhs (cond);
        larger = gimple_cond_lhs (cond);
    }
    else
        return false;

    /* We need to know which is the true edge and which is the false
        edge so that we know if have abs or negative abs.  */
    extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);

    /* Forward the edges over the middle basic block.  */
    if (true_edge->dest == middle_bb)
        true_edge = EDGE_SUCC (true_edge->dest, 0);
    if (false_edge->dest == middle_bb)
        false_edge = EDGE_SUCC (false_edge->dest, 0);

    if (true_edge == e0)
    {
        gcc_assert (false_edge == e1);
        arg_true = arg0;
        arg_false = arg1;
    }
    else
    {
        gcc_assert (false_edge == e0);
        gcc_assert (true_edge == e1);
        arg_true = arg1;
        arg_false = arg0;
    }

    if (empty_block_p (middle_bb))
    {
        if (operand_equal_for_phi_arg_p (arg_true, smaller)
                && operand_equal_for_phi_arg_p (arg_false, larger))
        {
            /* Case

               if (smaller < larger)
               rslt = smaller;
               else
               rslt = larger;  */
            minmax = MIN_EXPR;
        }
        else if (operand_equal_for_phi_arg_p (arg_false, smaller)
                 && operand_equal_for_phi_arg_p (arg_true, larger))
            minmax = MAX_EXPR;
        else
            return false;
    }
    else
    {
        /* Recognize the following case, assuming d <= u:

        if (a <= u)
           b = MAX (a, d);
         x = PHI <b, u>

         This is equivalent to

         b = MAX (a, d);
         x = MIN (b, u);  */

        gimple assign = last_and_only_stmt (middle_bb);
        tree lhs, op0, op1, bound;

        if (!assign
                || gimple_code (assign) != GIMPLE_ASSIGN)
            return false;

        lhs = gimple_assign_lhs (assign);
        ass_code = gimple_assign_rhs_code (assign);
        if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
            return false;
        op0 = gimple_assign_rhs1 (assign);
        op1 = gimple_assign_rhs2 (assign);

        if (true_edge->src == middle_bb)
        {
            /* We got here if the condition is true, i.e., SMALLER < LARGER.  */
            if (!operand_equal_for_phi_arg_p (lhs, arg_true))
                return false;

            if (operand_equal_for_phi_arg_p (arg_false, larger))
            {
                /* Case

                if (smaller < larger)
                   {
                     r' = MAX_EXPR (smaller, bound)
                   }
                 r = PHI <r', larger>  --> to be turned to MIN_EXPR.  */
                if (ass_code != MAX_EXPR)
                    return false;

                minmax = MIN_EXPR;
                if (operand_equal_for_phi_arg_p (op0, smaller))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, smaller))
                    bound = op0;
                else
                    return false;

                /* We need BOUND <= LARGER.  */
                if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
                                                    bound, larger)))
                    return false;
            }
            else if (operand_equal_for_phi_arg_p (arg_false, smaller))
            {
                /* Case

                if (smaller < larger)
                   {
                     r' = MIN_EXPR (larger, bound)
                   }
                 r = PHI <r', smaller>  --> to be turned to MAX_EXPR.  */
                if (ass_code != MIN_EXPR)
                    return false;

                minmax = MAX_EXPR;
                if (operand_equal_for_phi_arg_p (op0, larger))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, larger))
                    bound = op0;
                else
                    return false;

                /* We need BOUND >= SMALLER.  */
                if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
                                                    bound, smaller)))
                    return false;
            }
            else
                return false;
        }
        else
        {
            /* We got here if the condition is false, i.e., SMALLER > LARGER.  */
            if (!operand_equal_for_phi_arg_p (lhs, arg_false))
                return false;

            if (operand_equal_for_phi_arg_p (arg_true, larger))
            {
                /* Case

                if (smaller > larger)
                   {
                     r' = MIN_EXPR (smaller, bound)
                   }
                 r = PHI <r', larger>  --> to be turned to MAX_EXPR.  */
                if (ass_code != MIN_EXPR)
                    return false;

                minmax = MAX_EXPR;
                if (operand_equal_for_phi_arg_p (op0, smaller))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, smaller))
                    bound = op0;
                else
                    return false;

                /* We need BOUND >= LARGER.  */
                if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
                                                    bound, larger)))
                    return false;
            }
            else if (operand_equal_for_phi_arg_p (arg_true, smaller))
            {
                /* Case

                if (smaller > larger)
                   {
                     r' = MAX_EXPR (larger, bound)
                   }
                 r = PHI <r', smaller>  --> to be turned to MIN_EXPR.  */
                if (ass_code != MAX_EXPR)
                    return false;

                minmax = MIN_EXPR;
                if (operand_equal_for_phi_arg_p (op0, larger))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, larger))
                    bound = op0;
                else
                    return false;

                /* We need BOUND <= SMALLER.  */
                if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
                                                    bound, smaller)))
                    return false;
            }
            else
                return false;
        }

        /* Move the statement from the middle block.  */
        gsi = gsi_last_bb (cond_bb);
        gsi_from = gsi_last_bb (middle_bb);
        gsi_move_before (&gsi_from, &gsi);
    }

    /* Emit the statement to compute min/max.  */
    result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
    new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
    gsi = gsi_last_bb (cond_bb);
    gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);

    replace_phi_edge_with_variable (cond_bb, e1, phi, result);
    return true;
}
Exemplo n.º 16
0
static void
emit_case_bit_tests (gimple swtch, tree index_expr,
		     tree minval, tree range)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gimple shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].hi = 0;
	  test[k].lo = 0;
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
        if (j >= HOST_BITS_PER_WIDE_INT)
	  test[k].hi |= (HOST_WIDE_INT) 1 << (j - HOST_BITS_PER_INT);
	else
	  test[k].lo |= (HOST_WIDE_INT) 1 << j;
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
Exemplo n.º 17
0
static bool
generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
{
  bool res = false;
  unsigned i, x = 0;
  basic_block *bbs;
  gimple write = NULL;
  tree op0, op1;
  gimple_stmt_iterator bsi;
  tree nb_iter = number_of_exit_cond_executions (loop);

  if (!nb_iter || nb_iter == chrec_dont_know)
    return false;

  bbs = get_loop_body_in_dom_order (loop);

  for (i = 0; i < loop->num_nodes; i++)
    {
      basic_block bb = bbs[i];

      for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	x++;

      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	{
	  gimple stmt = gsi_stmt (bsi);

	  if (bitmap_bit_p (partition, x++)
	      && is_gimple_assign (stmt)
	      && !is_gimple_reg (gimple_assign_lhs (stmt)))
	    {
	      /* Don't generate the builtins when there are more than
		 one memory write.  */
	      if (write != NULL)
		goto end;

	      write = stmt;
	    }
	}
    }

  if (!write)
    goto end;

  op0 = gimple_assign_lhs (write);
  op1 = gimple_assign_rhs1 (write);

  if (!(TREE_CODE (op0) == ARRAY_REF
	|| TREE_CODE (op0) == INDIRECT_REF))
    goto end;

  /* The new statements will be placed before LOOP.  */
  bsi = gsi_last_bb (loop_preheader_edge (loop)->src);

  if (gimple_assign_rhs_code (write) == INTEGER_CST
      && (integer_zerop (op1) || real_zerop (op1)))
    res = generate_memset_zero (write, op0, nb_iter, bsi);

  /* If this is the last partition for which we generate code, we have
     to destroy the loop.  */
  if (res && !copy_p)
    {
      unsigned nbbs = loop->num_nodes;
      basic_block src = loop_preheader_edge (loop)->src;
      basic_block dest = single_exit (loop)->dest;
      prop_phis (dest);
      make_edge (src, dest, EDGE_FALLTHRU);
      cancel_loop_tree (loop);

      for (i = 0; i < nbbs; i++)
	delete_basic_block (bbs[i]);

      set_immediate_dominator (CDI_DOMINATORS, dest,
			       recompute_dominator (CDI_DOMINATORS, dest));
    }

 end:
  free (bbs);
  return res;
}
Exemplo n.º 18
0
unsigned int
execute_fixup_cfg (void)
{
  basic_block bb;
  gimple_stmt_iterator gsi;
  int todo = gimple_in_ssa_p (cfun) ? TODO_verify_ssa : 0;
  gcov_type count_scale;
  edge e;
  edge_iterator ei;

  count_scale
      = GCOV_COMPUTE_SCALE (cgraph_get_node (current_function_decl)->count,
                            ENTRY_BLOCK_PTR->count);

  ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count;
  EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count,
                                       count_scale);

  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
    e->count = apply_scale (e->count, count_scale);

  FOR_EACH_BB (bb)
    {
      bb->count = apply_scale (bb->count, count_scale);
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt = gsi_stmt (gsi);
	  tree decl = is_gimple_call (stmt)
		      ? gimple_call_fndecl (stmt)
		      : NULL;
	  if (decl)
	    {
	      int flags = gimple_call_flags (stmt);
	      if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
		{
		  if (gimple_purge_dead_abnormal_call_edges (bb))
		    todo |= TODO_cleanup_cfg;

		  if (gimple_in_ssa_p (cfun))
		    {
		      todo |= TODO_update_ssa | TODO_cleanup_cfg;
		      update_stmt (stmt);
		    }
		}

	      if (flags & ECF_NORETURN
		  && fixup_noreturn_call (stmt))
		todo |= TODO_cleanup_cfg;
	     }

	  if (maybe_clean_eh_stmt (stmt)
	      && gimple_purge_dead_eh_edges (bb))
	    todo |= TODO_cleanup_cfg;
	}

      FOR_EACH_EDGE (e, ei, bb->succs)
        e->count = apply_scale (e->count, count_scale);

      /* If we have a basic block with no successors that does not
	 end with a control statement or a noreturn call end it with
	 a call to __builtin_unreachable.  This situation can occur
	 when inlining a noreturn call that does in fact return.  */
      if (EDGE_COUNT (bb->succs) == 0)
	{
	  gimple stmt = last_stmt (bb);
	  if (!stmt
	      || (!is_ctrl_stmt (stmt)
		  && (!is_gimple_call (stmt)
		      || (gimple_call_flags (stmt) & ECF_NORETURN) == 0)))
	    {
	      stmt = gimple_build_call
		  (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
	      gimple_stmt_iterator gsi = gsi_last_bb (bb);
	      gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
	    }
	}
    }
  if (count_scale != REG_BR_PROB_BASE)
    compute_function_frequency ();

  /* We just processed all calls.  */
  if (cfun->gimple_df)
    vec_free (MODIFIED_NORETURN_CALLS (cfun));

  /* Dump a textual representation of the flowgraph.  */
  if (dump_file)
    gimple_dump_cfg (dump_file, dump_flags);

  if (current_loops
      && (todo & TODO_cleanup_cfg))
    loops_state_set (LOOPS_NEED_FIXUP);

  return todo;
}
Exemplo n.º 19
0
/* Find memcpy, mempcpy, memmove and memset calls, perform
   checks before call and then call no_chk version of
   functions.  We do it on O2 to enable inlining of these
   functions during expand.

   Also try to find memcpy, mempcpy, memmove and memset calls
   which are known to not write pointers to memory and use
   faster function versions for them.  */
static void
chkp_optimize_string_function_calls (void)
{
    basic_block bb;

    if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file, "Searching for replaceable string function calls...\n");

    FOR_EACH_BB_FN (bb, cfun)
    {
        gimple_stmt_iterator i;

        for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
        {
            gimple *stmt = gsi_stmt (i);
            tree fndecl;

            if (gimple_code (stmt) != GIMPLE_CALL
                    || !gimple_call_with_bounds_p (stmt))
                continue;

            fndecl = gimple_call_fndecl (stmt);

            if (!fndecl || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
                continue;

            if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMCPY_CHKP
                    || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMPCPY_CHKP
                    || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMMOVE_CHKP
                    || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP)
            {
                tree dst = gimple_call_arg (stmt, 0);
                tree dst_bnd = gimple_call_arg (stmt, 1);
                bool is_memset = DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP;
                tree size = gimple_call_arg (stmt, is_memset ? 3 : 4);
                tree fndecl_nochk;
                gimple_stmt_iterator j;
                basic_block check_bb;
                address_t size_val;
                int sign;
                bool known;

                /* We may replace call with corresponding __chkp_*_nobnd
                call in case destination pointer base type is not
                 void or pointer.  */
                if (POINTER_TYPE_P (TREE_TYPE (dst))
                        && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (dst)))
                        && !chkp_type_has_pointer (TREE_TYPE (TREE_TYPE (dst))))
                {
                    tree fndecl_nobnd
                        = chkp_get_nobnd_fndecl (DECL_FUNCTION_CODE (fndecl));

                    if (fndecl_nobnd)
                        fndecl = fndecl_nobnd;
                }

                fndecl_nochk = chkp_get_nochk_fndecl (DECL_FUNCTION_CODE (fndecl));

                if (fndecl_nochk)
                    fndecl = fndecl_nochk;

                if (fndecl != gimple_call_fndecl (stmt))
                {
                    if (dump_file && (dump_flags & TDF_DETAILS))
                    {
                        fprintf (dump_file, "Replacing call: ");
                        print_gimple_stmt (dump_file, stmt, 0,
                                           TDF_VOPS|TDF_MEMSYMS);
                    }

                    gimple_call_set_fndecl (stmt, fndecl);

                    if (dump_file && (dump_flags & TDF_DETAILS))
                    {
                        fprintf (dump_file, "With a new call: ");
                        print_gimple_stmt (dump_file, stmt, 0,
                                           TDF_VOPS|TDF_MEMSYMS);
                    }
                }

                /* If there is no nochk version of function then
                do nothing.  Otherwise insert checks before
                 the call.  */
                if (!fndecl_nochk)
                    continue;

                /* If size passed to call is known and > 0
                then we may insert checks unconditionally.  */
                size_val.pol.create (0);
                chkp_collect_value (size, size_val);
                known = chkp_is_constant_addr (size_val, &sign);
                size_val.pol.release ();

                /* If we are not sure size is not zero then we have
                to perform runtime check for size and perform
                 checks only when size is not zero.  */
                if (!known)
                {
                    gimple *check = gimple_build_cond (NE_EXPR,
                                                       size,
                                                       size_zero_node,
                                                       NULL_TREE,
                                                       NULL_TREE);

                    /* Split block before string function call.  */
                    gsi_prev (&i);
                    check_bb = insert_cond_bb (bb, gsi_stmt (i), check);

                    /* Set position for checks.  */
                    j = gsi_last_bb (check_bb);

                    /* The block was splitted and therefore we
                       need to set iterator to its end.  */
                    i = gsi_last_bb (bb);
                }
                /* If size is known to be zero then no checks
                should be performed.  */
                else if (!sign)
                    continue;
                else
                    j = i;

                size = size_binop (MINUS_EXPR, size, size_one_node);
                if (!is_memset)
                {
                    tree src = gimple_call_arg (stmt, 2);
                    tree src_bnd = gimple_call_arg (stmt, 3);

                    chkp_check_mem_access (src, fold_build_pointer_plus (src, size),
                                           src_bnd, j, gimple_location (stmt),
                                           integer_zero_node);
                }

                chkp_check_mem_access (dst, fold_build_pointer_plus (dst, size),
                                       dst_bnd, j, gimple_location (stmt),
                                       integer_one_node);

            }
        }
    }
Exemplo n.º 20
0
static void
mf_build_check_statement_for (tree base, tree limit,
                              gimple_stmt_iterator *instr_gsi,
                              location_t location, tree dirflag)
{
  gimple_stmt_iterator gsi;
  basic_block cond_bb, then_bb, join_bb;
  edge e;
  tree cond, t, u, v;
  tree mf_base;
  tree mf_elem;
  tree mf_limit;
  gimple g;
  gimple_seq seq, stmts;

  /* We first need to split the current basic block, and start altering
     the CFG.  This allows us to insert the statements we're about to
     construct into the right basic blocks.  */

  cond_bb = gimple_bb (gsi_stmt (*instr_gsi));
  gsi = *instr_gsi;
  gsi_prev (&gsi);
  if (! gsi_end_p (gsi))
    e = split_block (cond_bb, gsi_stmt (gsi));
  else
    e = split_block_after_labels (cond_bb);
  cond_bb = e->src;
  join_bb = e->dest;

  /* A recap at this point: join_bb is the basic block at whose head
     is the gimple statement for which this check expression is being
     built.  cond_bb is the (possibly new, synthetic) basic block the
     end of which will contain the cache-lookup code, and a
     conditional that jumps to the cache-miss code or, much more
     likely, over to join_bb.  */

  /* Create the bb that contains the cache-miss fallback block (mf_check).  */
  then_bb = create_empty_bb (cond_bb);
  make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
  make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU);

  /* Mark the pseudo-fallthrough edge from cond_bb to join_bb.  */
  e = find_edge (cond_bb, join_bb);
  e->flags = EDGE_FALSE_VALUE;
  e->count = cond_bb->count;
  e->probability = REG_BR_PROB_BASE;

  /* Update dominance info.  Note that bb_join's data was
     updated by split_block.  */
  if (dom_info_available_p (CDI_DOMINATORS))
    {
      set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
    }

  /* Update loop info.  */
  if (current_loops)
    add_bb_to_loop (then_bb, cond_bb->loop_father);

  /* Build our local variables.  */
  mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem");
  mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base");
  mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit");

  /* Build: __mf_base = (uintptr_t) <base address expression>.  */
  seq = NULL;
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (base));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_base, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_limit = (uintptr_t) <limit address expression>.  */
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (limit));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_limit, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift)
                                            & __mf_mask].  */
  t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base,
              flag_mudflap_threads ? mf_cache_shift_decl
	       : mf_cache_shift_decl_l);
  t = build2 (BIT_AND_EXPR, mf_uintptr_type, t,
              flag_mudflap_threads ? mf_cache_mask_decl
	       : mf_cache_mask_decl_l);
  t = build4 (ARRAY_REF,
              TREE_TYPE (TREE_TYPE (mf_cache_array_decl)),
              mf_cache_array_decl, t, NULL_TREE, NULL_TREE);
  t = build1 (ADDR_EXPR, mf_cache_structptr_type, t);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_elem, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Quick validity check.

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))
        {
          __mf_check ();
          ... and only if single-threaded:
          __mf_lookup_shift_1 = f...;
          __mf_lookup_mask_l = ...;
        }

     It is expected that this body of code is rarely executed so we mark
     the edge to the THEN clause of the conditional jump as unlikely.  */

  /* Construct t <-- '__mf_elem->low  > __mf_base'.  */
  t = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              TYPE_FIELDS (mf_cache_struct_type), NULL_TREE);
  t = build2 (GT_EXPR, boolean_type_node, t, mf_base);

  /* Construct '__mf_elem->high < __mf_limit'.

     First build:
        1) u <--  '__mf_elem->high'
        2) v <--  '__mf_limit'.

     Then build 'u <-- (u < v).  */

  u = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              DECL_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE);

  v = mf_limit;

  u = build2 (LT_EXPR, boolean_type_node, u, v);

  /* Build the composed conditional: t <-- 't || u'.  Then store the
     result of the evaluation of 't' in a temporary variable which we
     can use as the condition for the conditional jump.  */
  t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  cond = create_tmp_reg (boolean_type_node, "__mf_unlikely_cond");
  g = gimple_build_assign  (cond, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build the conditional jump.  'cond' is just a temporary so we can
     simply build a void COND_EXPR.  We do need labels in both arms though.  */
  g = gimple_build_cond (NE_EXPR, cond, boolean_false_node, NULL_TREE,
			 NULL_TREE);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* At this point, after so much hard work, we have only constructed
     the conditional jump,

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))

     The lowered GIMPLE tree representing this code is in the statement
     list starting at 'head'.

     We can insert this now in the current basic block, i.e. the one that
     the statement we're instrumenting was originally in.  */
  gsi = gsi_last_bb (cond_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  /*  Now build up the body of the cache-miss handling:

     __mf_check();
     refresh *_l vars.

     This is the body of the conditional.  */

  seq = NULL;
  /* u is a string, so it is already a gimple value.  */
  u = mf_file_function_line_tree (location);
  /* NB: we pass the overall [base..limit] range to mf_check.  */
  v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
		   fold_build2_loc (location,
				MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
		   build_int_cst (mf_uintptr_type, 1));
  v = force_gimple_operand (v, &stmts, true, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_call (mf_check_fndecl, 4, mf_base, v, dirflag, u);
  gimple_seq_add_stmt (&seq, g);

  if (! flag_mudflap_threads)
    {
      if (stmt_ends_bb_p (g))
	{
	  gsi = gsi_start_bb (then_bb);
	  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
	  e = split_block (then_bb, g);
	  then_bb = e->dest;
	  seq = NULL;
	}

      g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl);
      gimple_seq_add_stmt (&seq, g);

      g = gimple_build_assign (mf_cache_mask_decl_l, mf_cache_mask_decl);
      gimple_seq_add_stmt (&seq, g);
    }

  /* Insert the check code in the THEN block.  */
  gsi = gsi_start_bb (then_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  *instr_gsi = gsi_start_bb (join_bb);
}
Exemplo n.º 21
0
static bool
generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
{
  bool res = false;
  unsigned i, x = 0;
  basic_block *bbs;
  gimple write = NULL;
  gimple_stmt_iterator bsi;
  tree nb_iter = number_of_exit_cond_executions (loop);

  if (!nb_iter || nb_iter == chrec_dont_know)
    return false;

  bbs = get_loop_body_in_dom_order (loop);

  for (i = 0; i < loop->num_nodes; i++)
    {
      basic_block bb = bbs[i];

      for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	x++;

      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	{
	  gimple stmt = gsi_stmt (bsi);

	  if (gimple_code (stmt) == GIMPLE_LABEL
	      || is_gimple_debug (stmt))
	    continue;

	  if (!bitmap_bit_p (partition, x++))
	    continue;

	  /* If the stmt has uses outside of the loop fail.  */
	  if (stmt_has_scalar_dependences_outside_loop (stmt))
	    goto end;

	  if (is_gimple_assign (stmt)
	      && !is_gimple_reg (gimple_assign_lhs (stmt)))
	    {
	      /* Don't generate the builtins when there are more than
		 one memory write.  */
	      if (write != NULL)
		goto end;

	      write = stmt;
	      if (bb == loop->latch)
		nb_iter = number_of_latch_executions (loop);
	    }
	}
    }

  if (!stmt_with_adjacent_zero_store_dr_p (write))
    goto end;

  /* The new statements will be placed before LOOP.  */
  bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
  generate_memset_zero (write, gimple_assign_lhs (write), nb_iter, bsi);
  res = true;

  /* If this is the last partition for which we generate code, we have
     to destroy the loop.  */
  if (!copy_p)
    {
      unsigned nbbs = loop->num_nodes;
      edge exit = single_exit (loop);
      basic_block src = loop_preheader_edge (loop)->src, dest = exit->dest;
      redirect_edge_pred (exit, src);
      exit->flags &= ~(EDGE_TRUE_VALUE|EDGE_FALSE_VALUE);
      exit->flags |= EDGE_FALLTHRU;
      cancel_loop_tree (loop);
      rescan_loop_exit (exit, false, true);

      for (i = 0; i < nbbs; i++)
	delete_basic_block (bbs[i]);

      set_immediate_dominator (CDI_DOMINATORS, dest,
			       recompute_dominator (CDI_DOMINATORS, dest));
    }

 end:
  free (bbs);
  return res;
}