Exemplo n.º 1
0
static temp_expr_table_p
new_temp_expr_table (var_map map)
{
  temp_expr_table_p t;
  unsigned x;

  t = XNEW (struct temp_expr_table_d);
  t->map = map;

  t->partition_dependencies = XCNEWVEC (bitmap, num_ssa_names + 1);
  t->expr_decl_uids = XCNEWVEC (bitmap, num_ssa_names + 1);
  t->kill_list = XCNEWVEC (bitmap, num_var_partitions (map) + 1);

  t->partition_in_use = BITMAP_ALLOC (NULL);

  t->virtual_partition = num_var_partitions (map);
  t->new_replaceable_dependencies = BITMAP_ALLOC (NULL);

  t->replaceable_expressions = NULL;
  t->num_in_part = XCNEWVEC (int, num_var_partitions (map));
  for (x = 1; x < num_ssa_names; x++)
    {
      int p;
      tree name = ssa_name (x);
      if (!name)
        continue;
      p = var_to_partition (map, name);
      if (p != NO_PARTITION)
        t->num_in_part[p]++;
    }

  return t;
}
Exemplo n.º 2
0
expr_hash_elt::expr_hash_elt (class expr_hash_elt &old_elt)
{
  m_expr = old_elt.m_expr;
  m_lhs = old_elt.m_lhs;
  m_vop = old_elt.m_vop;
  m_hash = old_elt.m_hash;
  m_stamp = this;

  /* Now deep copy the malloc'd space for CALL and PHI args.  */
  if (old_elt.m_expr.kind == EXPR_CALL)
    {
      size_t nargs = old_elt.m_expr.ops.call.nargs;
      size_t i;

      m_expr.ops.call.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        m_expr.ops.call.args[i] = old_elt.m_expr.ops.call.args[i];
    }
  else if (old_elt.m_expr.kind == EXPR_PHI)
    {
      size_t nargs = old_elt.m_expr.ops.phi.nargs;
      size_t i;

      m_expr.ops.phi.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        m_expr.ops.phi.args[i] = old_elt.m_expr.ops.phi.args[i];
    }
}
Exemplo n.º 3
0
static void
init_copy_prop (void)
{
  basic_block bb;

  n_copy_of = num_ssa_names;
  copy_of = XCNEWVEC (prop_value_t, n_copy_of);

  FOR_EACH_BB_FN (bb, cfun)
    {
      for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
	   gsi_next (&si))
	{
	  gimple *stmt = gsi_stmt (si);
	  ssa_op_iter iter;
          tree def;

	  /* The only statements that we care about are those that may
	     generate useful copies.  We also need to mark conditional
	     jumps so that their outgoing edges are added to the work
	     lists of the propagator.  */
	  if (stmt_ends_bb_p (stmt))
            prop_set_simulate_again (stmt, true);
	  else if (stmt_may_generate_copy (stmt))
            prop_set_simulate_again (stmt, true);
	  else
            prop_set_simulate_again (stmt, false);

	  /* Mark all the outputs of this statement as not being
	     the copy of anything.  */
	  FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
            if (!prop_simulate_again_p (stmt))
	      set_copy_of_val (def, def);
	}

      for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
	   gsi_next (&si))
	{
          gphi *phi = si.phi ();
          tree def;

	  def = gimple_phi_result (phi);
	  if (virtual_operand_p (def))
            prop_set_simulate_again (phi, false);
	  else
            prop_set_simulate_again (phi, true);

	  if (!prop_simulate_again_p (phi))
	    set_copy_of_val (def, def);
	}
    }
}
Exemplo n.º 4
0
/* * * * * * * * * * * * *
 *
 *  run_compiles   run all the regexp compiles for all the fixes once.
 */
void
run_compiles (void)
{
  tFixDesc *p_fixd = fixDescList;
  int fix_ct = FIX_COUNT;
  regex_t *p_re = XCNEWVEC (regex_t, REGEX_COUNT);

  /*  Make sure compile_re does not stumble across invalid data */

  memset (&incl_quote_re, '\0', sizeof (regex_t));

  compile_re (incl_quote_pat, &incl_quote_re, 1,
              "quoted include", "run_compiles");

  /*  Allow machine name tests to be ignored (testing, mainly) */

  if (pz_machine && ((*pz_machine == '\0') || (*pz_machine == '*')))
    pz_machine = (char*)NULL;

  /* FOR every fixup, ...  */
  do
    {
      tTestDesc *p_test = p_fixd->p_test_desc;
      int test_ct = p_fixd->test_ct;

      /*  IF the machine type pointer is not NULL (we are not in test mode)
             AND this test is for or not done on particular machines
          THEN ...   */

      if (  (pz_machine != NULL)
         && (p_fixd->papz_machs != (const char**) NULL)
         && ! machine_matches (p_fixd) )
        continue;

      /* FOR every test for the fixup, ...  */

      while (--test_ct >= 0)
        {
          switch (p_test->type)
            {
            case TT_EGREP:
            case TT_NEGREP:
              p_test->p_test_regex = p_re++;
              compile_re (p_test->pz_test_text, p_test->p_test_regex, 0,
                          "select test", p_fixd->fix_name);
            default: break;
            }
          p_test++;
        }
    }
  while (p_fixd++, --fix_ct > 0);
}
Exemplo n.º 5
0
static void
create_temp_arrays (struct switch_conv_info *info)
{
  int i;

  info->default_values = XCNEWVEC (tree, info->phi_count * 3);
  info->constructors = XCNEWVEC (VEC (constructor_elt, gc) *, info->phi_count);
  info->target_inbound_names = info->default_values + info->phi_count;
  info->target_outbound_names = info->target_inbound_names + info->phi_count;
  for (i = 0; i < info->phi_count; i++)
    info->constructors[i]
      = VEC_alloc (constructor_elt, gc, tree_low_cst (info->range_size, 1) + 1);
}
Exemplo n.º 6
0
void store_VRPs()
{
  size_t i;
  unsigned num = num_ssa_names;
  if(VRP_min_array)
    free (VRP_min_array);
  if(VRP_max_array)
    free (VRP_max_array);
  if(p_VRP_min)
    free (p_VRP_min);
  if(p_VRP_max)
    free (p_VRP_max);
  VRP_min_array = XCNEWVEC (tree, num);
  VRP_max_array = XCNEWVEC (tree, num);
  p_VRP_min = XCNEWVEC (bool, num);
  p_VRP_max = XCNEWVEC (bool, num);

  //restrict_range_to_consts();
  
  for (i = 0; i < num; i++)
    if (vr_value[i] && vr_value[i]->type ==  VR_RANGE && 
        TREE_CODE(vr_value[i]->min) == INTEGER_CST && 
        TREE_CODE(vr_value[i]->max) == INTEGER_CST && 
        !TREE_OVERFLOW (vr_value[i]->min) &&
        !TREE_OVERFLOW (vr_value[i]->max))
    {
      p_VRP_min[i] = true;
      p_VRP_max[i] = true;
      VRP_min_array[i] = vr_value[i]->min;
      VRP_max_array[i] = vr_value[i]->max;
    }
    else
    {
      p_VRP_min[i] = false;
      p_VRP_max[i] = false;
    }
  set_num_vr_values(num);
}
Exemplo n.º 7
0
cpp_hash_table *
ht_create (unsigned int order)
{
    unsigned int nslots = 1 << order;
    cpp_hash_table *table;

    table = XCNEW (cpp_hash_table);

    /* Strings need no alignment.  */
    obstack_specify_allocation (&table->stack, 0, 0, xmalloc, free);

    obstack_alignment_mask (&table->stack) = 0;

    table->entries = XCNEWVEC (hashnode, nslots);
    table->entries_owned = true;
    table->nslots = nslots;
    return table;
}
Exemplo n.º 8
0
static void
fbsd_add_threads (pid_t pid)
{
  struct cleanup *cleanup;
  lwpid_t *lwps;
  int i, nlwps;

  gdb_assert (!in_thread_list (pid_to_ptid (pid)));
  nlwps = ptrace (PT_GETNUMLWPS, pid, NULL, 0);
  if (nlwps == -1)
    perror_with_name (("ptrace"));

  lwps = XCNEWVEC (lwpid_t, nlwps);
  cleanup = make_cleanup (xfree, lwps);

  nlwps = ptrace (PT_GETLWPLIST, pid, (caddr_t) lwps, nlwps);
  if (nlwps == -1)
    perror_with_name (("ptrace"));

  for (i = 0; i < nlwps; i++)
    {
      ptid_t ptid = ptid_build (pid, lwps[i], 0);

      if (!in_thread_list (ptid))
	{
#ifdef PT_LWP_EVENTS
	  struct ptrace_lwpinfo pl;

	  /* Don't add exited threads.  Note that this is only called
	     when attaching to a multi-threaded process.  */
	  if (ptrace (PT_LWPINFO, lwps[i], (caddr_t) &pl, sizeof pl) == -1)
	    perror_with_name (("ptrace"));
	  if (pl.pl_flags & PL_FLAG_EXITED)
	    continue;
#endif
	  if (debug_fbsd_lwp)
	    fprintf_unfiltered (gdb_stdlog,
				"FLWP: adding thread for LWP %u\n",
				lwps[i]);
	  add_thread (ptid);
	}
    }
  do_cleanups (cleanup);
}
Exemplo n.º 9
0
Arquivo: symtab.c Projeto: 0mp/freebsd
hash_table *
ht_create (unsigned int order)
{
  unsigned int nslots = 1 << order;
  hash_table *table;

  table = XCNEW (hash_table);

  /* Strings need no alignment.  */
  _obstack_begin (&table->stack, 0, 0,
		  (void *(*) (long)) xmalloc,
		  (void (*) (void *)) free);

  obstack_alignment_mask (&table->stack) = 0;

  table->entries = XCNEWVEC (hashnode, nslots);
  table->entries_owned = true;
  table->nslots = nslots;
  return table;
}
Exemplo n.º 10
0
static bool
tail_duplicate (void)
{
  fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block_for_fn (cfun));
  basic_block *trace = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
  int *counts = XNEWVEC (int, last_basic_block_for_fn (cfun));
  int ninsns = 0, nduplicated = 0;
  gcov_type weighted_insns = 0, traced_insns = 0;
  fibheap_t heap = fibheap_new ();
  gcov_type cover_insns;
  int max_dup_insns;
  basic_block bb;
  bool changed = false;

  /* Create an oversized sbitmap to reduce the chance that we need to
     resize it.  */
  bb_seen = sbitmap_alloc (last_basic_block_for_fn (cfun) * 2);
  bitmap_clear (bb_seen);
  initialize_original_copy_tables ();

  if (profile_info && flag_branch_probabilities)
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
  else
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
  probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;

  branch_ratio_cutoff =
    (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));

  FOR_EACH_BB_FN (bb, cfun)
    {
      int n = count_insns (bb);
      if (!ignore_bb_p (bb))
	blocks[bb->index] = fibheap_insert (heap, -bb->frequency,
					    bb);

      counts [bb->index] = n;
      ninsns += n;
      weighted_insns += n * bb->frequency;
    }
Exemplo n.º 11
0
Arquivo: symtab.c Projeto: 0mp/freebsd
static void
ht_expand (hash_table *table)
{
  hashnode *nentries, *p, *limit;
  unsigned int size, sizemask;

  size = table->nslots * 2;
  nentries = XCNEWVEC (hashnode, size);
  sizemask = size - 1;

  p = table->entries;
  limit = p + table->nslots;
  do
    if (*p)
      {
	unsigned int index, hash, hash2;

	hash = (*p)->hash_value;
	index = hash & sizemask;

	if (nentries[index])
	  {
	    hash2 = ((hash * 17) & sizemask) | 1;
	    do
	      {
		index = (index + hash2) & sizemask;
	      }
	    while (nentries[index]);
	  }
	nentries[index] = *p;
      }
  while (++p < limit);

  if (table->entries_owned)
    free (table->entries);
  table->entries_owned = true;
  table->entries = nentries;
  table->nslots = size;
}
static void
fini_copy_prop (void)
{
    size_t i;
    prop_value_t *tmp;

    /* Set the final copy-of value for each variable by traversing the
       copy-of chains.  */
    tmp = XCNEWVEC (prop_value_t, num_ssa_names);
    for (i = 1; i < num_ssa_names; i++)
    {
        tree var = ssa_name (i);
        if (var && copy_of[i].value && copy_of[i].value != var)
            tmp[i].value = get_last_copy_of (var);
    }

    substitute_and_fold (tmp, false);

    free (cached_last_copy_of);
    free (copy_of);
    free (tmp);
}
Exemplo n.º 13
0
static void
fini_copy_prop (void)
{
  size_t i;
  prop_value_t *tmp;
  
  /* Set the final copy-of value for each variable by traversing the
     copy-of chains.  */
  tmp = XCNEWVEC (prop_value_t, num_ssa_names);
  for (i = 1; i < num_ssa_names; i++)
    {
      tree var = ssa_name (i);
      if (!var
	  || !copy_of[i].value
	  || copy_of[i].value == var)
	continue;

      tmp[i].value = get_last_copy_of (var);

      /* In theory the points-to solution of all members of the
         copy chain is their intersection.  For now we do not bother
	 to compute this but only make sure we do not lose points-to
	 information completely by setting the points-to solution
	 of the representative to the first solution we find if
	 it doesn't have one already.  */
      if (tmp[i].value != var
	  && POINTER_TYPE_P (TREE_TYPE (var))
	  && SSA_NAME_PTR_INFO (var)
	  && !SSA_NAME_PTR_INFO (tmp[i].value))
	duplicate_ssa_name_ptr_info (tmp[i].value, SSA_NAME_PTR_INFO (var));
    }

  substitute_and_fold (tmp, false);

  free (cached_last_copy_of);
  free (copy_of);
  free (tmp);
}
Exemplo n.º 14
0
static void
init_copy_prop (void)
{
  basic_block bb;

  copy_of = XCNEWVEC (prop_value_t, num_ssa_names);

  FOR_EACH_BB (bb)
    {
      gimple_stmt_iterator si;
      int depth = bb_loop_depth (bb);

      for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
	{
	  gimple stmt = gsi_stmt (si);
	  ssa_op_iter iter;
          tree def;

	  /* The only statements that we care about are those that may
	     generate useful copies.  We also need to mark conditional
	     jumps so that their outgoing edges are added to the work
	     lists of the propagator.

	     Avoid copy propagation from an inner into an outer loop.
	     Otherwise, this may move loop variant variables outside of
	     their loops and prevent coalescing opportunities.  If the
	     value was loop invariant, it will be hoisted by LICM and
	     exposed for copy propagation.
	     ???  This doesn't make sense.  */
	  if (stmt_ends_bb_p (stmt))
            prop_set_simulate_again (stmt, true);
	  else if (stmt_may_generate_copy (stmt)
                   /* Since we are iterating over the statements in
                      BB, not the phi nodes, STMT will always be an
                      assignment.  */
                   && loop_depth_of_name (gimple_assign_rhs1 (stmt)) <= depth)
            prop_set_simulate_again (stmt, true);
	  else
            prop_set_simulate_again (stmt, false);

	  /* Mark all the outputs of this statement as not being
	     the copy of anything.  */
	  FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
            if (!prop_simulate_again_p (stmt))
	      set_copy_of_val (def, def);
	}

      for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
	{
          gimple phi = gsi_stmt (si);
          tree def;

	  def = gimple_phi_result (phi);
	  if (virtual_operand_p (def))
            prop_set_simulate_again (phi, false);
	  else
            prop_set_simulate_again (phi, true);

	  if (!prop_simulate_again_p (phi))
	    set_copy_of_val (def, def);
	}
    }
}
Exemplo n.º 15
0
static void
init_copy_prop (void)
{
  basic_block bb;

  copy_of = XCNEWVEC (prop_value_t, num_ssa_names);

  cached_last_copy_of = XCNEWVEC (tree, num_ssa_names);

  FOR_EACH_BB (bb)
    {
      gimple_stmt_iterator si;
      int depth = bb->loop_depth;

      for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
	{
	  gimple stmt = gsi_stmt (si);
	  ssa_op_iter iter;
          tree def;

	  /* The only statements that we care about are those that may
	     generate useful copies.  We also need to mark conditional
	     jumps so that their outgoing edges are added to the work
	     lists of the propagator.

	     Avoid copy propagation from an inner into an outer loop.
	     Otherwise, this may move loop variant variables outside of
	     their loops and prevent coalescing opportunities.  If the
	     value was loop invariant, it will be hoisted by LICM and
	     exposed for copy propagation.  */
	  if (stmt_ends_bb_p (stmt))
            prop_set_simulate_again (stmt, true);
	  else if (stmt_may_generate_copy (stmt)
                   /* Since we are iterating over the statements in
                      BB, not the phi nodes, STMT will always be an
                      assignment.  */
                   && loop_depth_of_name (gimple_assign_rhs1 (stmt)) <= depth)
            prop_set_simulate_again (stmt, true);
	  else
            prop_set_simulate_again (stmt, false);

	  /* Mark all the outputs of this statement as not being
	     the copy of anything.  */
	  FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
            if (!prop_simulate_again_p (stmt))
	      set_copy_of_val (def, def);
	    else
	      cached_last_copy_of[SSA_NAME_VERSION (def)] = def;
	}

      for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
	{
          gimple phi = gsi_stmt (si);
          tree def;

	  def = gimple_phi_result (phi);
	  if (!is_gimple_reg (def)
	      /* In loop-closed SSA form do not copy-propagate through
	         PHI nodes.  Technically this is only needed for loop
		 exit PHIs, but this is difficult to query.  */
	      || (current_loops
		  && gimple_phi_num_args (phi) == 1
		  && loops_state_satisfies_p (LOOP_CLOSED_SSA)))
            prop_set_simulate_again (phi, false);
	  else
            prop_set_simulate_again (phi, true);

	  if (!prop_simulate_again_p (phi))
	    set_copy_of_val (def, def);
	  else
	    cached_last_copy_of[SSA_NAME_VERSION (def)] = def;
	}
    }
}
Exemplo n.º 16
0
static bool
tail_duplicate (void)
{
  fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block);
  basic_block *trace = XNEWVEC (basic_block, n_basic_blocks);
  int *counts = XNEWVEC (int, last_basic_block);
  int ninsns = 0, nduplicated = 0;
  gcov_type weighted_insns = 0, traced_insns = 0;
  fibheap_t heap = fibheap_new ();
  gcov_type cover_insns;
  int max_dup_insns;
  basic_block bb;
  bool changed = false;

  /* Create an oversized sbitmap to reduce the chance that we need to
     resize it.  */
  bb_seen = sbitmap_alloc (last_basic_block * 2);
  bitmap_clear (bb_seen);
  initialize_original_copy_tables ();

  if (profile_info && flag_branch_probabilities)
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
  else
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
  probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;

  branch_ratio_cutoff =
    (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));

  FOR_EACH_BB (bb)
    {
      int n = count_insns (bb);
      if (!ignore_bb_p (bb))
	blocks[bb->index] = fibheap_insert (heap, -bb->frequency,
					    bb);

      counts [bb->index] = n;
      ninsns += n;
      weighted_insns += n * bb->frequency;
    }

  if (profile_info && flag_branch_probabilities)
    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE_FEEDBACK);
  else
    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE);
  cover_insns = (weighted_insns * cover_insns + 50) / 100;
  max_dup_insns = (ninsns * PARAM_VALUE (TRACER_MAX_CODE_GROWTH) + 50) / 100;

  while (traced_insns < cover_insns && nduplicated < max_dup_insns
         && !fibheap_empty (heap))
    {
      basic_block bb = (basic_block) fibheap_extract_min (heap);
      int n, pos;

      if (!bb)
	break;

      blocks[bb->index] = NULL;

      if (ignore_bb_p (bb))
	continue;
      gcc_assert (!bb_seen_p (bb));

      n = find_trace (bb, trace);

      bb = trace[0];
      traced_insns += bb->frequency * counts [bb->index];
      if (blocks[bb->index])
	{
	  fibheap_delete_node (heap, blocks[bb->index]);
	  blocks[bb->index] = NULL;
	}

      for (pos = 1; pos < n; pos++)
	{
	  basic_block bb2 = trace[pos];

	  if (blocks[bb2->index])
	    {
	      fibheap_delete_node (heap, blocks[bb2->index]);
	      blocks[bb2->index] = NULL;
	    }
	  traced_insns += bb2->frequency * counts [bb2->index];
	  if (EDGE_COUNT (bb2->preds) > 1
	      && can_duplicate_block_p (bb2)
	      /* We have the tendency to duplicate the loop header
	         of all do { } while loops.  Do not do that - it is
		 not profitable and it might create a loop with multiple
		 entries or at least rotate the loop.  */
	      && (!current_loops
		  || bb2->loop_father->header != bb2))
	    {
	      edge e;
	      basic_block copy;

	      nduplicated += counts [bb2->index];

	      e = find_edge (bb, bb2);

	      copy = duplicate_block (bb2, e, bb);
	      flush_pending_stmts (e);

	      add_phi_args_after_copy (&copy, 1, NULL);

	      /* Reconsider the original copy of block we've duplicated.
	         Removing the most common predecessor may make it to be
	         head.  */
	      blocks[bb2->index] =
		fibheap_insert (heap, -bb2->frequency, bb2);

	      if (dump_file)
		fprintf (dump_file, "Duplicated %i as %i [%i]\n",
			 bb2->index, copy->index, copy->frequency);

	      bb2 = copy;
	      changed = true;
	    }
	  mark_bb_seen (bb2);
	  bb = bb2;
	  /* In case the trace became infrequent, stop duplicating.  */
	  if (ignore_bb_p (bb))
	    break;
	}
      if (dump_file)
	fprintf (dump_file, " covered now %.1f\n\n",
		 traced_insns * 100.0 / weighted_insns);
    }
  if (dump_file)
    fprintf (dump_file, "Duplicated %i insns (%i%%)\n", nduplicated,
	     nduplicated * 100 / ninsns);

  free_original_copy_tables ();
  sbitmap_free (bb_seen);
  free (blocks);
  free (trace);
  free (counts);
  fibheap_delete (heap);

  return changed;
}
Exemplo n.º 17
0
static void
tail_duplicate (void)
{
  fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block);
  basic_block *trace = XNEWVEC (basic_block, n_basic_blocks);
  int *counts = XNEWVEC (int, last_basic_block);
  int ninsns = 0, nduplicated = 0;
  gcov_type weighted_insns = 0, traced_insns = 0;
  fibheap_t heap = fibheap_new ();
  gcov_type cover_insns;
  int max_dup_insns;
  basic_block bb;

  if (profile_info && flag_branch_probabilities)
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
  else
    probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
  probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;

  branch_ratio_cutoff =
    (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));

  FOR_EACH_BB (bb)
    {
      int n = count_insns (bb);
      if (!ignore_bb_p (bb))
	blocks[bb->index] = fibheap_insert (heap, -bb->frequency,
					    bb);

      counts [bb->index] = n;
      ninsns += n;
      weighted_insns += n * bb->frequency;
    }

  if (profile_info && flag_branch_probabilities)
    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE_FEEDBACK);
  else
    cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE);
  cover_insns = (weighted_insns * cover_insns + 50) / 100;
  max_dup_insns = (ninsns * PARAM_VALUE (TRACER_MAX_CODE_GROWTH) + 50) / 100;

  while (traced_insns < cover_insns && nduplicated < max_dup_insns
         && !fibheap_empty (heap))
    {
      basic_block bb = fibheap_extract_min (heap);
      int n, pos;

      if (!bb)
	break;

      blocks[bb->index] = NULL;

      if (ignore_bb_p (bb))
	continue;
      gcc_assert (!seen (bb));

      n = find_trace (bb, trace);

      bb = trace[0];
      traced_insns += bb->frequency * counts [bb->index];
      if (blocks[bb->index])
	{
	  fibheap_delete_node (heap, blocks[bb->index]);
	  blocks[bb->index] = NULL;
	}

      for (pos = 1; pos < n; pos++)
	{
	  basic_block bb2 = trace[pos];

	  if (blocks[bb2->index])
	    {
	      fibheap_delete_node (heap, blocks[bb2->index]);
	      blocks[bb2->index] = NULL;
	    }
	  traced_insns += bb2->frequency * counts [bb2->index];
	  if (EDGE_COUNT (bb2->preds) > 1
	      && can_duplicate_block_p (bb2))
	    {
	      edge e;
	      basic_block old = bb2;

	      e = find_edge (bb, bb2);

	      nduplicated += counts [bb2->index];
	      bb2 = duplicate_block (bb2, e, bb);

	      /* Reconsider the original copy of block we've duplicated.
	         Removing the most common predecessor may make it to be
	         head.  */
	      blocks[old->index] =
		fibheap_insert (heap, -old->frequency, old);

	      if (dump_file)
		fprintf (dump_file, "Duplicated %i as %i [%i]\n",
			 old->index, bb2->index, bb2->frequency);
	    }
	  bb->aux = bb2;
	  bb2->il.rtl->visited = 1;
	  bb = bb2;
	  /* In case the trace became infrequent, stop duplicating.  */
	  if (ignore_bb_p (bb))
	    break;
	}
      if (dump_file)
	fprintf (dump_file, " covered now %.1f\n\n",
		 traced_insns * 100.0 / weighted_insns);
    }
  if (dump_file)
    fprintf (dump_file, "Duplicated %i insns (%i%%)\n", nduplicated,
	     nduplicated * 100 / ninsns);

  free (blocks);
  free (trace);
  free (counts);
  fibheap_delete (heap);
}
Exemplo n.º 18
0
/* A place for the many, many bitmaps we create.  */
static bitmap_obstack ter_bitmap_obstack;

extern void debug_ter (FILE *, temp_expr_table *);


/* Create a new TER table for MAP.  */

static temp_expr_table *
new_temp_expr_table (var_map map)
{
  temp_expr_table *t = XNEW (struct temp_expr_table);
  t->map = map;

  t->partition_dependencies = XCNEWVEC (bitmap, num_ssa_names + 1);
  t->expr_decl_uids = XCNEWVEC (bitmap, num_ssa_names + 1);
  t->kill_list = XCNEWVEC (bitmap, num_var_partitions (map) + 1);

  t->partition_in_use = BITMAP_ALLOC (&ter_bitmap_obstack);

  t->virtual_partition = num_var_partitions (map);
  t->new_replaceable_dependencies = BITMAP_ALLOC (&ter_bitmap_obstack);

  t->replaceable_expressions = NULL;
  t->num_in_part = XCNEWVEC (int, num_var_partitions (map));

  unsigned x;
  tree name;

  FOR_EACH_SSA_NAME (x, name, cfun)
Exemplo n.º 19
0
conflict_graph
conflict_graph_new (int num_regs)
{
  conflict_graph graph = XNEW (struct conflict_graph_def);
  graph->num_regs = num_regs;

  /* Set up the hash table.  No delete action is specified; memory
     management of arcs is through the obstack.  */
  graph->arc_hash_table
    = htab_create (INITIAL_ARC_CAPACITY, &arc_hash, &arc_eq, NULL);

  /* Create an obstack for allocating arcs.  */
  obstack_init (&graph->arc_obstack);
             
  /* Create and zero the lookup table by register number.  */
  graph->neighbor_heads = XCNEWVEC (conflict_graph_arc, num_regs);

  return graph;
}

/* Deletes a conflict graph.  */

void
conflict_graph_delete (conflict_graph graph)
{
  obstack_free (&graph->arc_obstack, NULL);
  htab_delete (graph->arc_hash_table);
  free (graph->neighbor_heads);
  free (graph);
}
Exemplo n.º 20
0
expr_hash_elt::expr_hash_elt (gimple *stmt, tree orig_lhs)
{
  enum gimple_code code = gimple_code (stmt);
  struct hashable_expr *expr = this->expr ();

  if (code == GIMPLE_ASSIGN)
    {
      enum tree_code subcode = gimple_assign_rhs_code (stmt);

      switch (get_gimple_rhs_class (subcode))
        {
        case GIMPLE_SINGLE_RHS:
	  expr->kind = EXPR_SINGLE;
	  expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
	  expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
	  break;
        case GIMPLE_UNARY_RHS:
	  expr->kind = EXPR_UNARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  if (CONVERT_EXPR_CODE_P (subcode))
	    subcode = NOP_EXPR;
	  expr->ops.unary.op = subcode;
	  expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
	  break;
        case GIMPLE_BINARY_RHS:
	  expr->kind = EXPR_BINARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  expr->ops.binary.op = subcode;
	  expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
	  expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
	  break;
        case GIMPLE_TERNARY_RHS:
	  expr->kind = EXPR_TERNARY;
	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
	  expr->ops.ternary.op = subcode;
	  expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
	  expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
	  expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
	  break;
        default:
          gcc_unreachable ();
        }
    }
  else if (code == GIMPLE_COND)
    {
      expr->type = boolean_type_node;
      expr->kind = EXPR_BINARY;
      expr->ops.binary.op = gimple_cond_code (stmt);
      expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
      expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
    }
  else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
    {
      size_t nargs = gimple_call_num_args (call_stmt);
      size_t i;

      gcc_assert (gimple_call_lhs (call_stmt));

      expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
      expr->kind = EXPR_CALL;
      expr->ops.call.fn_from = call_stmt;

      if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
        expr->ops.call.pure = true;
      else
        expr->ops.call.pure = false;

      expr->ops.call.nargs = nargs;
      expr->ops.call.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
    }
  else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
    {
      expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
      expr->kind = EXPR_SINGLE;
      expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
    }
  else if (code == GIMPLE_GOTO)
    {
      expr->type = TREE_TYPE (gimple_goto_dest (stmt));
      expr->kind = EXPR_SINGLE;
      expr->ops.single.rhs = gimple_goto_dest (stmt);
    }
  else if (code == GIMPLE_PHI)
    {
      size_t nargs = gimple_phi_num_args (stmt);
      size_t i;

      expr->type = TREE_TYPE (gimple_phi_result (stmt));
      expr->kind = EXPR_PHI;
      expr->ops.phi.nargs = nargs;
      expr->ops.phi.args = XCNEWVEC (tree, nargs);
      for (i = 0; i < nargs; i++)
        expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
    }
  else
    gcc_unreachable ();

  m_lhs = orig_lhs;
  m_vop = gimple_vuse (stmt);
  m_hash = avail_expr_hash (this);
  m_stamp = this;
}
Exemplo n.º 21
0
static unsigned
self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
		     struct loop *loop)
{
  tree stride, access_fn;
  HOST_WIDE_INT *strides, astride;
  VEC (tree, heap) *access_fns;
  tree ref = DR_REF (dr);
  unsigned i, ret = ~0u;

  /* In the following example:

     for (i = 0; i < N; i++)
       for (j = 0; j < N; j++)
         use (a[j][i]);
     the same cache line is accessed each N steps (except if the change from
     i to i + 1 crosses the boundary of the cache line).  Thus, for self-reuse,
     we cannot rely purely on the results of the data dependence analysis.

     Instead, we compute the stride of the reference in each loop, and consider
     the innermost loop in that the stride is less than cache size.  */

  strides = XCNEWVEC (HOST_WIDE_INT, n);
  access_fns = DR_ACCESS_FNS (dr);

  for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
    {
      /* Keep track of the reference corresponding to the subscript, so that we
	 know its stride.  */
      while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
	ref = TREE_OPERAND (ref, 0);
      
      if (TREE_CODE (ref) == ARRAY_REF)
	{
	  stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
	  if (host_integerp (stride, 1))
	    astride = tree_low_cst (stride, 1);
	  else
	    astride = L1_CACHE_LINE_SIZE;

	  ref = TREE_OPERAND (ref, 0);
	}
      else
	astride = 1;

      add_subscript_strides (access_fn, astride, strides, n, loop);
    }

  for (i = n; i-- > 0; )
    {
      unsigned HOST_WIDE_INT s;

      s = strides[i] < 0 ?  -strides[i] : strides[i];

      if (s < (unsigned) L1_CACHE_LINE_SIZE
	  && (loop_sizes[i]
	      > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
	{
	  ret = loop_sizes[i];
	  break;
	}
    }

  free (strides);
  return ret;
}