コード例 #1
0
ファイル: journal.c プロジェクト: snua12/zlomekfs
void initialize_journal_c(void)
{
	zfsd_mutex_init(&journal_mutex);
	journal_pool = create_alloc_pool("journal_pool",
									 sizeof(struct journal_entry_def),
									 1020, &journal_mutex);
}
コード例 #2
0
ファイル: regcprop.c プロジェクト: AlissonLinhares/NativeKit
static unsigned int
copyprop_hardreg_forward (void)
{
  struct value_data *all_vd;
  basic_block bb;
  sbitmap visited;
  bool analyze_called = false;

  all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (cfun));

  visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
  bitmap_clear (visited);

  if (MAY_HAVE_DEBUG_INSNS)
    debug_insn_changes_pool
      = create_alloc_pool ("debug insn changes pool",
			   sizeof (struct queued_debug_insn_change), 256);

  FOR_EACH_BB_FN (bb, cfun)
    {
      bitmap_set_bit (visited, bb->index);

      /* If a block has a single predecessor, that we've already
	 processed, begin with the value data that was live at
	 the end of the predecessor block.  */
      /* ??? Ought to use more intelligent queuing of blocks.  */
      if (single_pred_p (bb)
	  && bitmap_bit_p (visited, single_pred (bb)->index)
	  && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
	{
	  all_vd[bb->index] = all_vd[single_pred (bb)->index];
	  if (all_vd[bb->index].n_debug_insn_changes)
	    {
	      unsigned int regno;

	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
		{
		  if (all_vd[bb->index].e[regno].debug_insn_changes)
		    {
		      all_vd[bb->index].e[regno].debug_insn_changes = NULL;
		      if (--all_vd[bb->index].n_debug_insn_changes == 0)
			break;
		    }
		}
	    }
	}
      else
	init_value_data (all_vd + bb->index);

      copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
    }
コード例 #3
0
void
cfg_layout_initialize (void)
{
  basic_block bb;

  /* Our algorithm depends on fact that there are now dead jumptables
     around the code.  */
  cfg_layout_pool =
    create_alloc_pool ("cfg layout pool", sizeof (struct reorder_block_def),
		       n_basic_blocks + 2);
  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
    cfg_layout_initialize_rbi (bb);

  cfg_layout_rtl_register_cfg_hooks ();

  record_effective_endpoints ();

  cleanup_cfg (CLEANUP_CFGLAYOUT);
}
コード例 #4
0
struct et_node *
et_new_tree (void *data)
{
  struct et_node *nw;
  
  if (!et_nodes)
    et_nodes = create_alloc_pool ("et_node pool", sizeof (struct et_node), 300);
  nw = pool_alloc (et_nodes);

  nw->data = data;
  nw->father = NULL;
  nw->left = NULL;
  nw->right = NULL;
  nw->son = NULL;

  nw->rightmost_occ = et_new_occ (nw);
  nw->parent_occ = NULL;

  return nw;
}
コード例 #5
0
static struct et_occ *
et_new_occ (struct et_node *node)
{
  struct et_occ *nw;
  
  if (!et_occurrences)
    et_occurrences = create_alloc_pool ("et_occ pool", sizeof (struct et_occ), 300);
  nw = pool_alloc (et_occurrences);

  nw->of = node;
  nw->parent = NULL;
  nw->prev = NULL;
  nw->next = NULL;

  nw->depth = 0;
  nw->min_occ = nw;
  nw->min = 0;

  return nw;
}
コード例 #6
0
ファイル: regcprop.c プロジェクト: alexandermerritt/dragonfly
static unsigned int
copyprop_hardreg_forward (void)
{
  struct value_data *all_vd;
  basic_block bb;
  sbitmap visited;
  bool analyze_called = false;

  all_vd = XNEWVEC (struct value_data, last_basic_block);

  visited = sbitmap_alloc (last_basic_block);
  sbitmap_zero (visited);

  if (MAY_HAVE_DEBUG_INSNS)
    debug_insn_changes_pool
      = create_alloc_pool ("debug insn changes pool",
			   sizeof (struct queued_debug_insn_change), 256);

  FOR_EACH_BB (bb)
    {
      SET_BIT (visited, bb->index);

      /* If a block has a single predecessor, that we've already
	 processed, begin with the value data that was live at
	 the end of the predecessor block.  */
      /* ??? Ought to use more intelligent queuing of blocks.  */
      if (single_pred_p (bb)
	  && TEST_BIT (visited, single_pred (bb)->index)
	  && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
	{
	  all_vd[bb->index] = all_vd[single_pred (bb)->index];
	  if (all_vd[bb->index].n_debug_insn_changes)
	    {
	      unsigned int regno;

	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
		{
		  if (all_vd[bb->index].e[regno].debug_insn_changes)
		    {
		      all_vd[bb->index].e[regno].debug_insn_changes = NULL;
		      if (--all_vd[bb->index].n_debug_insn_changes == 0)
			break;
		    }
		}
	    }
	}
      else
	init_value_data (all_vd + bb->index);

      copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
    }

  if (MAY_HAVE_DEBUG_INSNS)
    {
      FOR_EACH_BB (bb)
	if (TEST_BIT (visited, bb->index)
	    && all_vd[bb->index].n_debug_insn_changes)
	  {
	    unsigned int regno;
	    bitmap live;

	    if (!analyze_called)
	      {
		df_analyze ();
		analyze_called = true;
	      }
	    live = df_get_live_out (bb);
	    for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
	      if (all_vd[bb->index].e[regno].debug_insn_changes)
		{
		  if (REGNO_REG_SET_P (live, regno))
		    apply_debug_insn_changes (all_vd + bb->index, regno);
		  if (all_vd[bb->index].n_debug_insn_changes == 0)
		    break;
		}
	  }

      free_alloc_pool (debug_insn_changes_pool);
    }

  sbitmap_free (visited);
  free (all_vd);
  return 0;
}