static bool
forward_propagate_addr_expr (tree name, tree rhs)
{
  int stmt_loop_depth = gimple_bb (SSA_NAME_DEF_STMT (name))->loop_depth;
  imm_use_iterator iter;
  gimple use_stmt;
  bool all = true;
  bool single_use_p = has_single_use (name);

  FOR_EACH_IMM_USE_STMT (use_stmt, iter, name)
    {
      bool result;
      tree use_rhs;

      /* If the use is not in a simple assignment statement, then
	 there is nothing we can do.  */
      if (gimple_code (use_stmt) != GIMPLE_ASSIGN)
	{
	  if (!is_gimple_debug (use_stmt))
	    all = false;
	  continue;
	}

      /* If the use is in a deeper loop nest, then we do not want
	 to propagate the ADDR_EXPR into the loop as that is likely
	 adding expression evaluations into the loop.  */
      if (gimple_bb (use_stmt)->loop_depth > stmt_loop_depth)
	{
	  all = false;
	  continue;
	}

      {
	gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
	result = forward_propagate_addr_expr_1 (name, rhs, &gsi,
						single_use_p);
	/* If the use has moved to a different statement adjust
	   the update machinery for the old statement too.  */
	if (use_stmt != gsi_stmt (gsi))
	  {
	    update_stmt (use_stmt);
	    use_stmt = gsi_stmt (gsi);
	  }

	update_stmt (use_stmt);
      }
      all &= result;

      /* Remove intermediate now unused copy and conversion chains.  */
      use_rhs = gimple_assign_rhs1 (use_stmt);
      if (result
	  && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
	  && TREE_CODE (use_rhs) == SSA_NAME
	  && has_zero_uses (gimple_assign_lhs (use_stmt)))
	{
	  gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
	  release_defs (use_stmt);
	  gsi_remove (&gsi, true);
	}
    }
Exemple #2
0
static bool
remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
{
  struct nb_iter_bound *elt;
  bool changed = false;

  for (elt = loop->bounds; elt; elt = elt->next)
    {
      /* If statement is known to be undefined after peeling, turn it
	 into unreachable (or trap when debugging experience is supposed
	 to be good).  */
      if (!elt->is_exit
	  && wi::ltu_p (elt->bound, npeeled))
	{
	  gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
	  gcall *stmt = gimple_build_call
	      (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
	  gimple_set_location (stmt, gimple_location (elt->stmt));
	  gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
	  split_block (gimple_bb (stmt), stmt);
	  changed = true;
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    {
	      fprintf (dump_file, "Forced statement unreachable: ");
	      print_gimple_stmt (dump_file, elt->stmt, 0, 0);
	    }
	}
      /* If we know the exit will be taken after peeling, update.  */
      else if (elt->is_exit
	       && wi::leu_p (elt->bound, npeeled))
	{
	  basic_block bb = gimple_bb (elt->stmt);
	  edge exit_edge = EDGE_SUCC (bb, 0);

	  if (dump_file && (dump_flags & TDF_DETAILS))
	    {
	      fprintf (dump_file, "Forced exit to be taken: ");
	      print_gimple_stmt (dump_file, elt->stmt, 0, 0);
	    }
	  if (!loop_exit_edge_p (loop, exit_edge))
	    exit_edge = EDGE_SUCC (bb, 1);
	  gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
	  gcond *cond_stmt = as_a <gcond *> (elt->stmt);
	  if (exit_edge->flags & EDGE_TRUE_VALUE)
	    gimple_cond_make_true (cond_stmt);
	  else
	    gimple_cond_make_false (cond_stmt);
	  update_stmt (cond_stmt);
	  changed = true;
	}
    }
  return changed;
}
Exemple #3
0
/* Check whether G is a potential conditional compare candidate.  */
static bool
ccmp_candidate_p (gimple *g)
{
  tree rhs = gimple_assign_rhs_to_tree (g);
  tree lhs, op0, op1;
  gimple *gs0, *gs1;
  enum tree_code tcode, tcode0, tcode1;
  tcode = TREE_CODE (rhs);

  if (tcode != BIT_AND_EXPR && tcode != BIT_IOR_EXPR)
    return false;

  lhs = gimple_assign_lhs (g);
  op0 = TREE_OPERAND (rhs, 0);
  op1 = TREE_OPERAND (rhs, 1);

  if ((TREE_CODE (op0) != SSA_NAME) || (TREE_CODE (op1) != SSA_NAME)
      || !has_single_use (lhs))
    return false;

  gs0 = get_gimple_for_ssa_name (op0);
  gs1 = get_gimple_for_ssa_name (op1);
  if (!gs0 || !gs1 || !is_gimple_assign (gs0) || !is_gimple_assign (gs1)
      /* g, gs0 and gs1 must be in the same basic block, since current stage
	 is out-of-ssa.  We can not guarantee the correctness when forwording
	 the gs0 and gs1 into g whithout DATAFLOW analysis.  */
      || gimple_bb (gs0) != gimple_bb (gs1)
      || gimple_bb (gs0) != gimple_bb (g))
    return false;

  if (!(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0)))
       || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0))))
      || !(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1)))
	   || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1)))))
    return false;

  tcode0 = gimple_assign_rhs_code (gs0);
  tcode1 = gimple_assign_rhs_code (gs1);
  if (TREE_CODE_CLASS (tcode0) == tcc_comparison
      && TREE_CODE_CLASS (tcode1) == tcc_comparison)
    return true;
  if (TREE_CODE_CLASS (tcode0) == tcc_comparison
      && ccmp_candidate_p (gs1))
    return true;
  else if (TREE_CODE_CLASS (tcode1) == tcc_comparison
	   && ccmp_candidate_p (gs0))
    return true;
  /* We skip ccmp_candidate_p (gs1) && ccmp_candidate_p (gs0) since
     there is no way to set the CC flag.  */
  return false;
}
void
add_phi_arg (gimple phi, tree def, edge e)
{
  basic_block bb = e->dest;

  gcc_assert (bb == gimple_bb (phi));

  /* We resize PHI nodes upon edge creation.  We should always have
     enough room at this point.  */
  gcc_assert (gimple_phi_num_args (phi) <= gimple_phi_capacity (phi));

  /* We resize PHI nodes upon edge creation.  We should always have
     enough room at this point.  */
  gcc_assert (e->dest_idx < gimple_phi_num_args (phi));

  /* Copy propagation needs to know what object occur in abnormal
     PHI nodes.  This is a convenient place to record such information.  */
  if (e->flags & EDGE_ABNORMAL)
    {
      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def) = 1;
      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi)) = 1;
    }

  SET_PHI_ARG_DEF (phi, e->dest_idx, def);
}
Exemple #5
0
static bool
sese_bad_liveouts_use (sese region, bitmap liveouts, basic_block bb,
		       tree use)
{
  unsigned ver;
  basic_block def_bb;

  if (TREE_CODE (use) != SSA_NAME)
    return false;

  ver = SSA_NAME_VERSION (use);

  /* If it's in liveouts, the variable will get a new PHI node, and
     the debug use will be properly adjusted.  */
  if (bitmap_bit_p (liveouts, ver))
    return false;

  def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));

  if (!def_bb
      || !bb_in_sese_p (def_bb, region)
      || bb_in_sese_p (bb, region))
    return false;

  return true;
}
static void
remove_duplicate_close_phi (gphi *phi, gphi_iterator *gsi)
{
  gimple *use_stmt;
  use_operand_p use_p;
  imm_use_iterator imm_iter;
  tree res = gimple_phi_result (phi);
  tree def = gimple_phi_result (gsi->phi ());

  gcc_assert (same_close_phi_node (phi, gsi->phi ()));

  FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
    {
      FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
	SET_USE (use_p, res);

      update_stmt (use_stmt);

      /* It is possible that we just created a duplicate close-phi
	 for an already-processed containing loop.  Check for this
	 case and clean it up.  */
      if (gimple_code (use_stmt) == GIMPLE_PHI
	  && gimple_phi_num_args (use_stmt) == 1)
	make_close_phi_nodes_unique (gimple_bb (use_stmt));
    }
void
backprop::optimize_phi (gphi *phi, tree var, const usage_info *info)
{
  /* If the sign of the result doesn't matter, try to strip sign operations
     from arguments.  */
  if (info->flags.ignore_sign)
    {
      basic_block bb = gimple_bb (phi);
      use_operand_p use;
      ssa_op_iter oi;
      bool replaced = false;
      FOR_EACH_PHI_ARG (use, phi, oi, SSA_OP_USE)
	{
	  /* Propagating along abnormal edges is delicate, punt for now.  */
	  const int index = PHI_ARG_INDEX_FROM_USE (use);
	  if (EDGE_PRED (bb, index)->flags & EDGE_ABNORMAL)
	    continue;

	  tree new_arg = strip_sign_op (USE_FROM_PTR (use));
	  if (new_arg)
	    {
	      if (!replaced)
		prepare_change (var);
	      if (dump_file && (dump_flags & TDF_DETAILS))
		note_replacement (phi, USE_FROM_PTR (use), new_arg);
	      replace_exp (use, new_arg);
	      replaced = true;
	    }
	}
    }
static tree
tree_may_unswitch_on (basic_block bb, struct loop *loop)
{
  gimple stmt, def;
  tree cond, use;
  basic_block def_bb;
  ssa_op_iter iter;

  /* BB must end in a simple conditional jump.  */
  stmt = last_stmt (bb);
  if (!stmt || gimple_code (stmt) != GIMPLE_COND)
    return NULL_TREE;

  /* To keep the things simple, we do not directly remove the conditions,
     but just replace tests with 0 != 0 resp. 1 != 0.  Prevent the infinite
     loop where we would unswitch again on such a condition.  */
  if (gimple_cond_true_p (stmt) || gimple_cond_false_p (stmt))
    return NULL_TREE;

  /* Condition must be invariant.  */
  FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
    {
      def = SSA_NAME_DEF_STMT (use);
      def_bb = gimple_bb (def);
      if (def_bb
	  && flow_bb_inside_loop_p (loop, def_bb))
	return NULL_TREE;
    }
Exemple #9
0
static enum ssa_prop_result
copy_prop_visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
{
  enum ssa_prop_result retval = SSA_PROP_VARYING;
  location_t loc = gimple_location (stmt);

  tree op0 = valueize_val (gimple_cond_lhs (stmt));
  tree op1 = valueize_val (gimple_cond_rhs (stmt));

  /* See if we can determine the predicate's value.  */
  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "Trying to determine truth value of ");
      fprintf (dump_file, "predicate ");
      print_gimple_stmt (dump_file, stmt, 0, 0);
    }

  /* Fold COND and see whether we get a useful result.  */
  tree folded_cond = fold_binary_loc (loc, gimple_cond_code (stmt),
				      boolean_type_node, op0, op1);
  if (folded_cond)
    {
      basic_block bb = gimple_bb (stmt);
      *taken_edge_p = find_taken_edge (bb, folded_cond);
      if (*taken_edge_p)
	retval = SSA_PROP_INTERESTING;
    }

  if (dump_file && (dump_flags & TDF_DETAILS) && *taken_edge_p)
    fprintf (dump_file, "\nConditional will always take edge %d->%d\n",
	     (*taken_edge_p)->src->index, (*taken_edge_p)->dest->index);

  return retval;
}
Exemple #10
0
void
insert_debug_temp_for_var_def (gimple stmt)
{
  gimple def_stmt = ((void *) 0);
  int usecount = 0;
  tree value = ((void *) 0);
  for (; arf ();)
    {
      if (!gimple_debug_bind_p (stmt))
        continue;
      if (usecount++)
        break;
      unsigned char no_value = 0;
      if (!gimple_bb (def_stmt))
        no_value = 1;
      if (!no_value)
        value = gimple_assign_rhs_to_tree ();
    }
  if (value)
    {
      if ((tree_code_type[(int) (((value)->code))] == 42)
          || (usecount == 1 && (is_gimple_min_invariant (value))))
        value = unshare_expr (value);
    }
}
static void
tidy_after_forward_propagate_addr (gimple stmt)
{
  /* We may have turned a trapping insn into a non-trapping insn.  */
  if (maybe_clean_or_replace_eh_stmt (stmt, stmt)
      && gimple_purge_dead_eh_edges (gimple_bb (stmt)))
    cfg_changed = true;

  if (TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR)
     recompute_tree_invariant_for_addr_expr (gimple_assign_rhs1 (stmt));
}
Exemple #12
0
gphi_iterator
gsi_for_phi (gphi *phi)
{
  gphi_iterator i;
  basic_block bb = gimple_bb (phi);

  i = gsi_start_phis (bb);
  i.ptr = phi;

  return i;
}
static void
insert_trap_and_remove_trailing_statements (gimple_stmt_iterator *si_p, tree op)
{
  /* We want the NULL pointer dereference to actually occur so that
     code that wishes to catch the signal can do so.

     If the dereference is a load, then there's nothing to do as the
     LHS will be a throw-away SSA_NAME and the RHS is the NULL dereference.

     If the dereference is a store and we can easily transform the RHS,
     then simplify the RHS to enable more DCE.   Note that we require the
     statement to be a GIMPLE_ASSIGN which filters out calls on the RHS.  */
  gimple stmt = gsi_stmt (*si_p);
  if (walk_stmt_load_store_ops (stmt, (void *)op, NULL, check_loadstore)
      && is_gimple_assign (stmt)
      && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt))))
    {
      /* We just need to turn the RHS into zero converted to the proper
         type.  */
      tree type = TREE_TYPE (gimple_assign_lhs (stmt));
      gimple_assign_set_rhs_code (stmt, INTEGER_CST);
      gimple_assign_set_rhs1 (stmt, fold_convert (type, integer_zero_node));
      update_stmt (stmt);
    }

  gimple new_stmt
    = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
  gimple_seq seq = NULL;
  gimple_seq_add_stmt (&seq, new_stmt);

  /* If we had a NULL pointer dereference, then we want to insert the
     __builtin_trap after the statement, for the other cases we want
     to insert before the statement.  */
  if (walk_stmt_load_store_ops (stmt, (void *)op,
			        check_loadstore,
				check_loadstore))
    gsi_insert_after (si_p, seq, GSI_NEW_STMT);
  else
    gsi_insert_before (si_p, seq, GSI_NEW_STMT);

  /* We must remove statements from the end of the block so that we
     never reference a released SSA_NAME.  */
  basic_block bb = gimple_bb (gsi_stmt (*si_p));
  for (gimple_stmt_iterator si = gsi_last_bb (bb);
       gsi_stmt (si) != gsi_stmt (*si_p);
       si = gsi_last_bb (bb))
    {
      stmt = gsi_stmt (si);
      unlink_stmt_vdef (stmt);
      gsi_remove (&si, true);
      release_defs (stmt);
    }
}
Exemple #14
0
gphi_iterator
gsi_start_phis (basic_block bb)
{
  gimple_seq *pseq = phi_nodes_ptr (bb);

  /* Adapted from gsi_start_1. */
  gphi_iterator i;

  i.ptr = gimple_seq_first (*pseq);
  i.seq = pseq;
  i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;

  return i;
}
Exemple #15
0
static bool
remove_redundant_iv_tests (struct loop *loop)
{
  struct nb_iter_bound *elt;
  bool changed = false;

  if (!loop->any_upper_bound)
    return false;
  for (elt = loop->bounds; elt; elt = elt->next)
    {
      /* Exit is pointless if it won't be taken before loop reaches
	 upper bound.  */
      if (elt->is_exit && loop->any_upper_bound
          && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
	{
	  basic_block bb = gimple_bb (elt->stmt);
	  edge exit_edge = EDGE_SUCC (bb, 0);
	  struct tree_niter_desc niter;

	  if (!loop_exit_edge_p (loop, exit_edge))
	    exit_edge = EDGE_SUCC (bb, 1);

	  /* Only when we know the actual number of iterations, not
	     just a bound, we can remove the exit.  */
	  if (!number_of_iterations_exit (loop, exit_edge,
					  &niter, false, false)
	      || !integer_onep (niter.assumptions)
	      || !integer_zerop (niter.may_be_zero)
	      || !niter.niter
	      || TREE_CODE (niter.niter) != INTEGER_CST
	      || !wi::ltu_p (loop->nb_iterations_upper_bound,
			     wi::to_widest (niter.niter)))
	    continue;
	  
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    {
	      fprintf (dump_file, "Removed pointless exit: ");
	      print_gimple_stmt (dump_file, elt->stmt, 0, 0);
	    }
	  gcond *cond_stmt = as_a <gcond *> (elt->stmt);
	  if (exit_edge->flags & EDGE_TRUE_VALUE)
	    gimple_cond_make_false (cond_stmt);
	  else
	    gimple_cond_make_true (cond_stmt);
	  update_stmt (cond_stmt);
	  changed = true;
	}
    }
  return changed;
}
Exemple #16
0
gimple_stmt_iterator
gsi_for_stmt (gimple *stmt)
{
  gimple_stmt_iterator i;
  basic_block bb = gimple_bb (stmt);

  if (gimple_code (stmt) == GIMPLE_PHI)
    i = gsi_start_phis (bb);
  else
    i = gsi_start_bb (bb);

  i.ptr = stmt;
  return i;
}
Exemple #17
0
static inline void
replace_reciprocal (use_operand_p use_p)
{
  gimple use_stmt = USE_STMT (use_p);
  basic_block bb = gimple_bb (use_stmt);
  struct occurrence *occ = (struct occurrence *) bb->aux;

  if (optimize_bb_for_speed_p (bb)
      && occ->recip_def && use_stmt != occ->recip_def_stmt)
    {
      gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
      SET_USE (use_p, occ->recip_def);
      fold_stmt_inplace (use_stmt);
      update_stmt (use_stmt);
    }
}
Exemple #18
0
static enum ssa_prop_result
copy_prop_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
{
  enum ssa_prop_result retval = SSA_PROP_VARYING;
  location_t loc = gimple_location (stmt);

  tree op0 = gimple_cond_lhs (stmt);
  tree op1 = gimple_cond_rhs (stmt);

  /* The only conditionals that we may be able to compute statically
     are predicates involving two SSA_NAMEs.  */
  if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
    {
      op0 = valueize_val (op0);
      op1 = valueize_val (op1);

      /* See if we can determine the predicate's value.  */
      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  fprintf (dump_file, "Trying to determine truth value of ");
	  fprintf (dump_file, "predicate ");
	  print_gimple_stmt (dump_file, stmt, 0, 0);
	}

      /* We can fold COND and get a useful result only when we have
	 the same SSA_NAME on both sides of a comparison operator.  */
      if (op0 == op1)
	{
	  tree folded_cond = fold_binary_loc (loc, gimple_cond_code (stmt),
                                          boolean_type_node, op0, op1);
	  if (folded_cond)
	    {
	      basic_block bb = gimple_bb (stmt);
	      *taken_edge_p = find_taken_edge (bb, folded_cond);
	      if (*taken_edge_p)
		retval = SSA_PROP_INTERESTING;
	    }
	}
    }

  if (dump_file && (dump_flags & TDF_DETAILS) && *taken_edge_p)
    fprintf (dump_file, "\nConditional will always take edge %d->%d\n",
	     (*taken_edge_p)->src->index, (*taken_edge_p)->dest->index);

  return retval;
}
Exemple #19
0
gimple_stmt_iterator
gsi_for_stmt (gimple stmt)
{
  gimple_stmt_iterator i;
  basic_block bb = gimple_bb (stmt);

  if (gimple_code (stmt) == GIMPLE_PHI)
    i = gsi_start_phis (bb);
  else
    i = gsi_start_bb (bb);

  for (; !gsi_end_p (i); gsi_next (&i))
    if (gsi_stmt (i) == stmt)
      return i;

  gcc_unreachable ();
}
static void
replace_phi_edge_with_variable (basic_block cond_block,
                                edge e, gimple phi, tree new_tree)
{
    basic_block bb = gimple_bb (phi);
    basic_block block_to_remove;
    gimple_stmt_iterator gsi;

    /* Change the PHI argument to new.  */
    SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);

    /* Remove the empty basic block.  */
    if (EDGE_SUCC (cond_block, 0)->dest == bb)
    {
        EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
        EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
        EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
        EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;

        block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
    }
    else
    {
        EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
        EDGE_SUCC (cond_block, 1)->flags
        &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
        EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
        EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;

        block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
    }
    delete_basic_block (block_to_remove);

    /* Eliminate the COND_EXPR at the end of COND_BLOCK.  */
    gsi = gsi_last_bb (cond_block);
    gsi_remove (&gsi, true);

    if (dump_file && (dump_flags & TDF_DETAILS))
        fprintf (dump_file,
                 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
                 cond_block->index,
                 bb->index);
}
Exemple #21
0
static void
execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
{
  use_operand_p use_p;
  imm_use_iterator use_iter;
  struct occurrence *occ;
  int count = 0, threshold;

  gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));

  FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
    {
      gimple use_stmt = USE_STMT (use_p);
      if (is_division_by (use_stmt, def))
	{
	  register_division_in (gimple_bb (use_stmt));
	  count++;
	}
    }
static tree
lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
{
  imm_use_iterator imm_iter;
  gimple use_stmt;
  use_operand_p use_p;

  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
    {
      use_stmt = USE_STMT (use_p);
      if (use_stmt != stmt
          && gimple_assign_single_p (use_stmt)
          && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
          && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
	  && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
	{
	  return gimple_assign_lhs (use_stmt);
	}
    }
static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
{
	gimple stmt;
	gcall *track_stack;
	cgraph_node_ptr node;
	int frequency;
	basic_block bb;

	// insert call to void pax_track_stack(void)
	stmt = gimple_build_call(track_function_decl, 0);
	track_stack = as_a_gcall(stmt);
	gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);

	// update the cgraph
	bb = gimple_bb(track_stack);
	node = cgraph_get_create_node(track_function_decl);
	gcc_assert(node);
	frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
	cgraph_create_edge(cgraph_get_node(current_function_decl), node, track_stack, bb->count, frequency, bb->loop_depth);
}
Exemple #24
0
static void
sese_build_liveouts_use (sese region, bitmap liveouts, basic_block bb,
			 tree use)
{
  unsigned ver;
  basic_block def_bb;

  if (TREE_CODE (use) != SSA_NAME)
    return;

  ver = SSA_NAME_VERSION (use);
  def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));

  if (!def_bb
      || !bb_in_sese_p (def_bb, region)
      || bb_in_sese_p (bb, region))
    return;

  bitmap_set_bit (liveouts, ver);
}
Exemple #25
0
static basic_block
nearest_common_dominator_of_uses (def_operand_p def_p, bool *debug_stmts)
{
  tree var = DEF_FROM_PTR (def_p);
  bitmap blocks = BITMAP_ALLOC (NULL);
  basic_block commondom;
  unsigned int j;
  bitmap_iterator bi;
  imm_use_iterator imm_iter;
  use_operand_p use_p;

  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
    {
      gimple *usestmt = USE_STMT (use_p);
      basic_block useblock;

      if (gphi *phi = dyn_cast <gphi *> (usestmt))
	{
	  int idx = PHI_ARG_INDEX_FROM_USE (use_p);

	  useblock = gimple_phi_arg_edge (phi, idx)->src;
	}
      else if (is_gimple_debug (usestmt))
	{
	  *debug_stmts = true;
	  continue;
	}
      else
	{
	  useblock = gimple_bb (usestmt);
	}

      /* Short circuit. Nothing dominates the entry block.  */
      if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun))
	{
	  BITMAP_FREE (blocks);
	  return NULL;
	}
      bitmap_set_bit (blocks, useblock->index);
    }
static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
{
	gimple stmt;
	gcall *check_alloca;
	tree alloca_size;
	cgraph_node_ptr node;
	int frequency;
	basic_block bb;

	// insert call to void pax_check_alloca(unsigned long size)
	alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
	stmt = gimple_build_call(check_function_decl, 1, alloca_size);
	check_alloca = as_a_gcall(stmt);
	gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);

	// update the cgraph
	bb = gimple_bb(check_alloca);
	node = cgraph_get_create_node(check_function_decl);
	gcc_assert(node);
	frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
	cgraph_create_edge(cgraph_get_node(current_function_decl), node, check_alloca, bb->count, frequency, bb->loop_depth);
}
Exemple #27
0
static void
mf_build_check_statement_for (tree base, tree limit,
                              gimple_stmt_iterator *instr_gsi,
                              location_t location, tree dirflag)
{
  gimple_stmt_iterator gsi;
  basic_block cond_bb, then_bb, join_bb;
  edge e;
  tree cond, t, u, v;
  tree mf_base;
  tree mf_elem;
  tree mf_limit;
  gimple g;
  gimple_seq seq, stmts;

  /* We first need to split the current basic block, and start altering
     the CFG.  This allows us to insert the statements we're about to
     construct into the right basic blocks.  */

  cond_bb = gimple_bb (gsi_stmt (*instr_gsi));
  gsi = *instr_gsi;
  gsi_prev (&gsi);
  if (! gsi_end_p (gsi))
    e = split_block (cond_bb, gsi_stmt (gsi));
  else
    e = split_block_after_labels (cond_bb);
  cond_bb = e->src;
  join_bb = e->dest;

  /* A recap at this point: join_bb is the basic block at whose head
     is the gimple statement for which this check expression is being
     built.  cond_bb is the (possibly new, synthetic) basic block the
     end of which will contain the cache-lookup code, and a
     conditional that jumps to the cache-miss code or, much more
     likely, over to join_bb.  */

  /* Create the bb that contains the cache-miss fallback block (mf_check).  */
  then_bb = create_empty_bb (cond_bb);
  make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
  make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU);

  /* Mark the pseudo-fallthrough edge from cond_bb to join_bb.  */
  e = find_edge (cond_bb, join_bb);
  e->flags = EDGE_FALSE_VALUE;
  e->count = cond_bb->count;
  e->probability = REG_BR_PROB_BASE;

  /* Update dominance info.  Note that bb_join's data was
     updated by split_block.  */
  if (dom_info_available_p (CDI_DOMINATORS))
    {
      set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
    }

  /* Update loop info.  */
  if (current_loops)
    add_bb_to_loop (then_bb, cond_bb->loop_father);

  /* Build our local variables.  */
  mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem");
  mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base");
  mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit");

  /* Build: __mf_base = (uintptr_t) <base address expression>.  */
  seq = NULL;
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (base));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_base, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_limit = (uintptr_t) <limit address expression>.  */
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (limit));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_limit, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift)
                                            & __mf_mask].  */
  t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base,
              flag_mudflap_threads ? mf_cache_shift_decl
	       : mf_cache_shift_decl_l);
  t = build2 (BIT_AND_EXPR, mf_uintptr_type, t,
              flag_mudflap_threads ? mf_cache_mask_decl
	       : mf_cache_mask_decl_l);
  t = build4 (ARRAY_REF,
              TREE_TYPE (TREE_TYPE (mf_cache_array_decl)),
              mf_cache_array_decl, t, NULL_TREE, NULL_TREE);
  t = build1 (ADDR_EXPR, mf_cache_structptr_type, t);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_elem, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Quick validity check.

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))
        {
          __mf_check ();
          ... and only if single-threaded:
          __mf_lookup_shift_1 = f...;
          __mf_lookup_mask_l = ...;
        }

     It is expected that this body of code is rarely executed so we mark
     the edge to the THEN clause of the conditional jump as unlikely.  */

  /* Construct t <-- '__mf_elem->low  > __mf_base'.  */
  t = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              TYPE_FIELDS (mf_cache_struct_type), NULL_TREE);
  t = build2 (GT_EXPR, boolean_type_node, t, mf_base);

  /* Construct '__mf_elem->high < __mf_limit'.

     First build:
        1) u <--  '__mf_elem->high'
        2) v <--  '__mf_limit'.

     Then build 'u <-- (u < v).  */

  u = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              DECL_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE);

  v = mf_limit;

  u = build2 (LT_EXPR, boolean_type_node, u, v);

  /* Build the composed conditional: t <-- 't || u'.  Then store the
     result of the evaluation of 't' in a temporary variable which we
     can use as the condition for the conditional jump.  */
  t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  cond = create_tmp_reg (boolean_type_node, "__mf_unlikely_cond");
  g = gimple_build_assign  (cond, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build the conditional jump.  'cond' is just a temporary so we can
     simply build a void COND_EXPR.  We do need labels in both arms though.  */
  g = gimple_build_cond (NE_EXPR, cond, boolean_false_node, NULL_TREE,
			 NULL_TREE);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* At this point, after so much hard work, we have only constructed
     the conditional jump,

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))

     The lowered GIMPLE tree representing this code is in the statement
     list starting at 'head'.

     We can insert this now in the current basic block, i.e. the one that
     the statement we're instrumenting was originally in.  */
  gsi = gsi_last_bb (cond_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  /*  Now build up the body of the cache-miss handling:

     __mf_check();
     refresh *_l vars.

     This is the body of the conditional.  */

  seq = NULL;
  /* u is a string, so it is already a gimple value.  */
  u = mf_file_function_line_tree (location);
  /* NB: we pass the overall [base..limit] range to mf_check.  */
  v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
		   fold_build2_loc (location,
				MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
		   build_int_cst (mf_uintptr_type, 1));
  v = force_gimple_operand (v, &stmts, true, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_call (mf_check_fndecl, 4, mf_base, v, dirflag, u);
  gimple_seq_add_stmt (&seq, g);

  if (! flag_mudflap_threads)
    {
      if (stmt_ends_bb_p (g))
	{
	  gsi = gsi_start_bb (then_bb);
	  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
	  e = split_block (then_bb, g);
	  then_bb = e->dest;
	  seq = NULL;
	}

      g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl);
      gimple_seq_add_stmt (&seq, g);

      g = gimple_build_assign (mf_cache_mask_decl_l, mf_cache_mask_decl);
      gimple_seq_add_stmt (&seq, g);
    }

  /* Insert the check code in the THEN block.  */
  gsi = gsi_start_bb (then_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  *instr_gsi = gsi_start_bb (join_bb);
}
static bool
dse_possible_dead_store_p (gimple stmt, gimple *use_stmt)
{
  gimple temp;
  unsigned cnt = 0;

  *use_stmt = NULL;

  /* Find the first dominated statement that clobbers (part of) the
     memory stmt stores to with no intermediate statement that may use
     part of the memory stmt stores.  That is, find a store that may
     prove stmt to be a dead store.  */
  temp = stmt;
  do
    {
      gimple use_stmt;
      imm_use_iterator ui;
      bool fail = false;
      tree defvar;

      /* Limit stmt walking to be linear in the number of possibly
         dead stores.  */
      if (++cnt > 256)
	return false;

      if (gimple_code (temp) == GIMPLE_PHI)
	defvar = PHI_RESULT (temp);
      else
	defvar = gimple_vdef (temp);
      temp = NULL;
      FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
	{
	  cnt++;

	  /* If we ever reach our DSE candidate stmt again fail.  We
	     cannot handle dead stores in loops.  */
	  if (use_stmt == stmt)
	    {
	      fail = true;
	      BREAK_FROM_IMM_USE_STMT (ui);
	    }
	  /* In simple cases we can look through PHI nodes, but we
	     have to be careful with loops and with memory references
	     containing operands that are also operands of PHI nodes.
	     See gcc.c-torture/execute/20051110-*.c.  */
	  else if (gimple_code (use_stmt) == GIMPLE_PHI)
	    {
	      if (temp
		  /* Make sure we are not in a loop latch block.  */
		  || gimple_bb (stmt) == gimple_bb (use_stmt)
		  || dominated_by_p (CDI_DOMINATORS,
				     gimple_bb (stmt), gimple_bb (use_stmt))
		  /* We can look through PHIs to regions post-dominating
		     the DSE candidate stmt.  */
		  || !dominated_by_p (CDI_POST_DOMINATORS,
				      gimple_bb (stmt), gimple_bb (use_stmt)))
		{
		  fail = true;
		  BREAK_FROM_IMM_USE_STMT (ui);
		}
	      temp = use_stmt;
	    }
	  /* If the statement is a use the store is not dead.  */
	  else if (ref_maybe_used_by_stmt_p (use_stmt,
					     gimple_assign_lhs (stmt)))
	    {
	      fail = true;
	      BREAK_FROM_IMM_USE_STMT (ui);
	    }
	  /* If this is a store, remember it or bail out if we have
	     multiple ones (the will be in different CFG parts then).  */
	  else if (gimple_vdef (use_stmt))
	    {
	      if (temp)
		{
		  fail = true;
		  BREAK_FROM_IMM_USE_STMT (ui);
		}
	      temp = use_stmt;
	    }
	}

      if (fail)
	return false;

      /* If we didn't find any definition this means the store is dead
         if it isn't a store to global reachable memory.  In this case
	 just pretend the stmt makes itself dead.  Otherwise fail.  */
      if (!temp)
	{
	  if (is_hidden_global_store (stmt))
	    return false;

	  temp = stmt;
	  break;
	}
    }
Exemple #29
0
static void
emit_case_bit_tests (gswitch *swtch, tree index_expr,
		     tree minval, tree range, tree maxval)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gassign *shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);
  int prec = TYPE_PRECISION (word_type_node);
  wide_int wone = wi::one (prec);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].mask = wi::zero (prec);
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
	test[k].mask |= wi::lshift (wone, j);
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
     the minval subtractions, but it might make the mask constants more
     expensive.  So, compare the costs.  */
  if (compare_tree_int (minval, 0) > 0
      && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
    {
      int cost_diff;
      HOST_WIDE_INT m = tree_to_uhwi (minval);
      rtx reg = gen_raw_REG (word_mode, 10000);
      bool speed_p = optimize_bb_for_speed_p (gimple_bb (swtch));
      cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
					      GEN_INT (-m)), speed_p);
      for (i = 0; i < count; i++)
	{
	  rtx r = immed_wide_int_const (test[i].mask, word_mode);
	  cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	  r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
	  cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	}
      if (cost_diff > 0)
	{
	  for (i = 0; i < count; i++)
	    test[i].mask = wi::lshift (test[i].mask, m);
	  minval = build_zero_cst (TREE_TYPE (minval));
	  range = maxval;
	}
    }

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
static void
emit_case_bit_tests (gimple swtch, tree index_expr,
		     tree minval, tree range)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gimple shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].hi = 0;
	  test[k].lo = 0;
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
        if (j >= HOST_BITS_PER_WIDE_INT)
	  test[k].hi |= (HOST_WIDE_INT) 1 << (j - HOST_BITS_PER_INT);
	else
	  test[k].lo |= (HOST_WIDE_INT) 1 << j;
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }