Exemplo n.º 1
0
static void propagate_split(SmoothEdge *edge, SmoothVert *vert,
				SmoothMesh *mesh)
{
	SmoothEdge *edge2;
	LinkNode *visited_faces = NULL;
#ifdef EDGESPLIT_DEBUG_1
	printf("=== START === propagate_split(edge = %4d, vert = %4d)\n",
		edge->newIndex, vert->newIndex);
#endif

	edge2 = find_other_sharp_edge(vert, edge, &visited_faces);

	if(!edge2) {
		/* didn't find a sharp or loose edge, so we've hit a dead end */
	} else if(!edge_is_loose(edge2)) {
		/* edge2 is not loose, so it must be sharp */
		if(edge_is_loose(edge)) {
			/* edge is loose, so we can split edge2 at this vert */
			split_edge(edge2, vert, mesh);
		} else if(edge_is_sharp(edge)) {
			/* both edges are sharp, so we can split the pair at vert */
			split_edge(edge, vert, mesh);
		} else {
			/* edge is not sharp, so try to split edge2 at its other vert */
			split_edge(edge2, other_vert(edge2, vert), mesh);
		}
	} else { /* edge2 is loose */
		if(edge_is_loose(edge)) {
			SmoothVert *vert2;
			ReplaceData repdata;

			/* can't split edge, what should we do with vert? */
			if(linklist_subset(vert->faces, visited_faces)) {
				/* vert has only one fan of faces attached; don't split it */
			} else {
				/* vert has more than one fan of faces attached; split it */
				vert2 = smoothvert_copy(vert, mesh);

				/* fails in rare cases, see [#26993] */
				if(vert2) {
					/* replace vert with its copy in visited_faces */
					repdata.find = vert;
					repdata.replace = vert2;
					BLI_linklist_apply(visited_faces, face_replace_vert, &repdata);
				}
			}
		} else {
			/* edge is not loose, so it must be sharp; split it */
			split_edge(edge, vert, mesh);
		}
	}

	BLI_linklist_free(visited_faces, NULL);
#ifdef EDGESPLIT_DEBUG_1
	printf("=== END === propagate_split(edge = %4d, vert = %4d)\n",
		edge->newIndex, vert->newIndex);
#endif
}
Exemplo n.º 2
0
struct loops *
loop_optimizer_init (FILE *dumpfile)
{
    struct loops *loops = xcalloc (1, sizeof (struct loops));
    edge e;

    /* Initialize structures for layout changes.  */
    cfg_layout_initialize (0);

    /* Avoid annoying special cases of edges going to exit
       block.  */
    for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
        if ((e->flags & EDGE_FALLTHRU) && e->src->succ->succ_next)
            split_edge (e);

    /* Find the loops.  */

    if (flow_loops_find (loops, LOOP_TREE) <= 1)
    {
        basic_block bb;

        /* No loops.  */
        flow_loops_free (loops);
        free_dominance_info (CDI_DOMINATORS);
        free (loops);

        /* Make chain.  */
        FOR_EACH_BB (bb)
        if (bb->next_bb != EXIT_BLOCK_PTR)
            bb->rbi->next = bb->next_bb;
        cfg_layout_finalize ();
        return NULL;
    }
Exemplo n.º 3
0
//------------------------------------------------------------------------- 
// Purpose       : Local modification functions. 
// 
// Special Notes :  
// 
// Creator       : Jason Kraftcheck 
// 
// Creation Date : 03/25/00 
//------------------------------------------------------------------------- 
CubitPoint* CubitFacetData::split_edge( int edge_index,
                                        const CubitVector& position )
{
  CubitPoint* pt1 = point((edge_index+1)%2);
  CubitPoint* pt2 = point((edge_index+2)%2);
  return split_edge( pt1, pt2, position );
}
Exemplo n.º 4
0
static void
naive_process_phi (hsa_insn_phi *phi, const vec<edge> &predecessors)
{
  unsigned count = phi->operand_count ();
  for (unsigned i = 0; i < count; i++)
    {
      gcc_checking_assert (phi->get_op (i));
      hsa_op_base *op = phi->get_op (i);
      hsa_bb *hbb;
      edge e;

      if (!op)
	break;

      e = predecessors[i];
      if (single_succ_p (e->src))
	hbb = hsa_bb_for_bb (e->src);
      else
	{
	  basic_block old_dest = e->dest;
	  hbb = hsa_init_new_bb (split_edge (e));

	  /* If switch insn used this edge, fix jump table.  */
	  hsa_bb *source = hsa_bb_for_bb (e->src);
	  hsa_insn_sbr *sbr;
	  if (source->m_last_insn
	      && (sbr = dyn_cast <hsa_insn_sbr *> (source->m_last_insn)))
	    sbr->replace_all_labels (old_dest, hbb->m_bb);
	}

      hsa_build_append_simple_mov (phi->m_dest, op, hbb);
    }
}
Exemplo n.º 5
0
static void split_sharp_edges(SmoothMesh *mesh, float split_angle, int flags)
{
	SmoothVert *vert;
	int i;
	/* if normal1 dot normal2 < threshold, angle is greater, so split */
	/* FIXME not sure if this always works */
	/* 0.00001 added for floating-point rounding */
	mesh->threshold = cosf((split_angle + 0.00001f) * (float)M_PI / 180.0f);
	mesh->flags = flags;

	/* loop through edges, splitting sharp ones */
	/* can't use an iterator here, because we'll be adding edges */
	for(i = 0; i < mesh->num_edges; i++) {
		SmoothEdge *edge = &mesh->edges[i];

		if(edge_is_sharp(edge)) {
			split_edge(edge, edge->verts[0], mesh);

			do {
				pop_propagate_stack(&edge, &vert, mesh);
				if(edge && smoothedge_has_vert(edge, vert))
					propagate_split(edge, vert, mesh);
			} while(edge);
		}
	}
}
Exemplo n.º 6
0
struct loops *
loop_optimizer_init (FILE *dumpfile)
{
  struct loops *loops = xcalloc (1, sizeof (struct loops));
  edge e;
  edge_iterator ei;
  static bool first_time = true;

  if (first_time)
    {
      first_time = false;
      init_set_costs ();
    }

  /* Avoid annoying special cases of edges going to exit
     block.  */

  for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
    if ((e->flags & EDGE_FALLTHRU) && !single_succ_p (e->src))
      split_edge (e);
    else
      ei_next (&ei);

  /* Find the loops.  */

  if (flow_loops_find (loops) <= 1)
    {
      /* No loops.  */
      flow_loops_free (loops);
      free (loops);

      return NULL;
    }

  /* Not going to update these.  */
  free (loops->cfg.rc_order);
  loops->cfg.rc_order = NULL;
  free (loops->cfg.dfs_order);
  loops->cfg.dfs_order = NULL;

  /* Create pre-headers.  */
  create_preheaders (loops, CP_SIMPLE_PREHEADERS);

  /* Force all latches to have only single successor.  */
  force_single_succ_latches (loops);

  /* Mark irreducible loops.  */
  mark_irreducible_loops (loops);

  /* Dump loops.  */
  flow_loops_dump (loops, dumpfile, NULL, 1);

#ifdef ENABLE_CHECKING
  verify_dominators (CDI_DOMINATORS);
  verify_loop_structure (loops);
#endif

  return loops;
}
Exemplo n.º 7
0
/* INSN is being scheduled after LAST.  Update counters.  */
static void
begin_schedule_ready (rtx insn, rtx last)
{
  sched_rgn_n_insns++;

  if (BLOCK_FOR_INSN (insn) == last_bb
      /* INSN is a jump in the last block, ...  */
      && control_flow_insn_p (insn)
      /* that is going to be moved over some instructions.  */
      && last != PREV_INSN (insn))
    {
      edge e;
      basic_block bb;

      /* An obscure special case, where we do have partially dead
	 instruction scheduled after last control flow instruction.
	 In this case we can create new basic block.  It is
	 always exactly one basic block last in the sequence.  */

      e = find_fallthru_edge (last_bb->succs);

      gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));

      gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
			   && !IS_SPECULATION_CHECK_P (insn)
			   && BB_HEAD (last_bb) != insn
			   && BB_END (last_bb) == insn);

      {
	rtx x;

	x = NEXT_INSN (insn);
	if (e)
	  gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
	else
	  gcc_checking_assert (BARRIER_P (x));
      }

      if (e)
	{
	  bb = split_edge (e);
	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
	}
      else
	/* Create an empty unreachable block after the INSN.  */
	bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);

      /* split_edge () creates BB before E->DEST.  Keep in mind, that
	 this operation extends scheduling region till the end of BB.
	 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
	 of the scheduling region.  */
      current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
      gcc_assert (current_sched_info->next_tail);

      /* Append new basic block to the end of the ebb.  */
      sched_init_only_bb (bb, last_bb);
      gcc_assert (last_bb == bb);
    }
}
Exemplo n.º 8
0
void
gimple_gen_ic_func_profiler (void)
{
  struct cgraph_node * c_node = cgraph_node::get (current_function_decl);
  gimple_stmt_iterator gsi;
  gcall *stmt1;
  gassign *stmt2;
  tree tree_uid, cur_func, void0;

  if (c_node->only_called_directly_p ())
    return;

  gimple_init_edge_profiler ();

  /* Insert code:

    stmt1: __gcov_indirect_call_profiler_v2 (profile_id,
					     &current_function_decl)
   */
  gsi = gsi_after_labels (split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))));

  cur_func = force_gimple_operand_gsi (&gsi,
				       build_addr (current_function_decl,
						   current_function_decl),
				       true, NULL_TREE,
				       true, GSI_SAME_STMT);
  tree_uid = build_int_cst
	      (gcov_type_node, cgraph_node::get (current_function_decl)->profile_id);
  /* Workaround for binutils bug 14342.  Once it is fixed, remove lto path.  */
  if (flag_lto)
    {
      tree counter_ptr, ptr_var;
      counter_ptr = force_gimple_operand_gsi (&gsi, ic_gcov_type_ptr_var,
					      true, NULL_TREE, true,
					      GSI_SAME_STMT);
      ptr_var = force_gimple_operand_gsi (&gsi, ic_void_ptr_var,
					  true, NULL_TREE, true,
					  GSI_SAME_STMT);

      stmt1 = gimple_build_call (tree_indirect_call_profiler_fn, 4,
				 counter_ptr, tree_uid, cur_func, ptr_var);
    }
  else
    {
      stmt1 = gimple_build_call (tree_indirect_call_profiler_fn, 2,
				 tree_uid, cur_func);
    }
  gsi_insert_before (&gsi, stmt1, GSI_SAME_STMT);

  /* Set __gcov_indirect_call_callee to 0,
     so that calls from other modules won't get misattributed
     to the last caller of the current callee. */
  void0 = build_int_cst (build_pointer_type (void_type_node), 0);
  stmt2 = gimple_build_assign (ic_void_ptr_var, void0);
  gsi_insert_before (&gsi, stmt2, GSI_SAME_STMT);
}
Exemplo n.º 9
0
static void
create_bb_after_loop (struct loop *loop)
{
  edge exit = single_exit (loop);

  if (!exit)
    return;

  split_edge (exit);
}
Exemplo n.º 10
0
static void refine_op(mesh* m, ment e)
{
  ment v[2];
  find_best_edge_split(m, the_split, e, v);
  split_edge(the_split, v);
}
Exemplo n.º 11
0
static void
emit_case_bit_tests (gswitch *swtch, tree index_expr,
		     tree minval, tree range, tree maxval)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gassign *shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);
  int prec = TYPE_PRECISION (word_type_node);
  wide_int wone = wi::one (prec);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].mask = wi::zero (prec);
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
	test[k].mask |= wi::lshift (wone, j);
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
     the minval subtractions, but it might make the mask constants more
     expensive.  So, compare the costs.  */
  if (compare_tree_int (minval, 0) > 0
      && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
    {
      int cost_diff;
      HOST_WIDE_INT m = tree_to_uhwi (minval);
      rtx reg = gen_raw_REG (word_mode, 10000);
      bool speed_p = optimize_bb_for_speed_p (gimple_bb (swtch));
      cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
					      GEN_INT (-m)), speed_p);
      for (i = 0; i < count; i++)
	{
	  rtx r = immed_wide_int_const (test[i].mask, word_mode);
	  cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	  r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
	  cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
				     word_mode, speed_p);
	}
      if (cost_diff > 0)
	{
	  for (i = 0; i < count; i++)
	    test[i].mask = wi::lshift (test[i].mask, m);
	  minval = build_zero_cst (TREE_TYPE (minval));
	  range = maxval;
	}
    }

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
Exemplo n.º 12
0
static void
emit_case_bit_tests (gimple swtch, tree index_expr,
		     tree minval, tree range)
{
  struct case_bit_test test[MAX_CASE_BIT_TESTS];
  unsigned int i, j, k;
  unsigned int count;

  basic_block switch_bb = gimple_bb (swtch);
  basic_block default_bb, new_default_bb, new_bb;
  edge default_edge;
  bool update_dom = dom_info_available_p (CDI_DOMINATORS);

  vec<basic_block> bbs_to_fix_dom = vNULL;

  tree index_type = TREE_TYPE (index_expr);
  tree unsigned_index_type = unsigned_type_for (index_type);
  unsigned int branch_num = gimple_switch_num_labels (swtch);

  gimple_stmt_iterator gsi;
  gimple shift_stmt;

  tree idx, tmp, csui;
  tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
  tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
  tree word_mode_one = fold_convert (word_type_node, integer_one_node);

  memset (&test, 0, sizeof (test));

  /* Get the edge for the default case.  */
  tmp = gimple_switch_default_label (swtch);
  default_bb = label_to_block (CASE_LABEL (tmp));
  default_edge = find_edge (switch_bb, default_bb);

  /* Go through all case labels, and collect the case labels, profile
     counts, and other information we need to build the branch tests.  */
  count = 0;
  for (i = 1; i < branch_num; i++)
    {
      unsigned int lo, hi;
      tree cs = gimple_switch_label (swtch, i);
      tree label = CASE_LABEL (cs);
      edge e = find_edge (switch_bb, label_to_block (label));
      for (k = 0; k < count; k++)
	if (e == test[k].target_edge)
	  break;

      if (k == count)
	{
	  gcc_checking_assert (count < MAX_CASE_BIT_TESTS);
	  test[k].hi = 0;
	  test[k].lo = 0;
	  test[k].target_edge = e;
	  test[k].label = label;
	  test[k].bits = 1;
	  count++;
	}
      else
        test[k].bits++;

      lo = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					  CASE_LOW (cs), minval));
      if (CASE_HIGH (cs) == NULL_TREE)
	hi = lo;
      else
	hi = tree_to_uhwi (int_const_binop (MINUS_EXPR,
					    CASE_HIGH (cs), minval));

      for (j = lo; j <= hi; j++)
        if (j >= HOST_BITS_PER_WIDE_INT)
	  test[k].hi |= (HOST_WIDE_INT) 1 << (j - HOST_BITS_PER_INT);
	else
	  test[k].lo |= (HOST_WIDE_INT) 1 << j;
    }

  qsort (test, count, sizeof (*test), case_bit_test_cmp);

  /* We generate two jumps to the default case label.
     Split the default edge, so that we don't have to do any PHI node
     updating.  */
  new_default_bb = split_edge (default_edge);

  if (update_dom)
    {
      bbs_to_fix_dom.create (10);
      bbs_to_fix_dom.quick_push (switch_bb);
      bbs_to_fix_dom.quick_push (default_bb);
      bbs_to_fix_dom.quick_push (new_default_bb);
    }

  /* Now build the test-and-branch code.  */

  gsi = gsi_last_bb (switch_bb);

  /* idx = (unsigned)x - minval.  */
  idx = fold_convert (unsigned_index_type, index_expr);
  idx = fold_build2 (MINUS_EXPR, unsigned_index_type, idx,
		     fold_convert (unsigned_index_type, minval));
  idx = force_gimple_operand_gsi (&gsi, idx,
				  /*simple=*/true, NULL_TREE,
				  /*before=*/true, GSI_SAME_STMT);

  /* if (idx > range) goto default */
  range = force_gimple_operand_gsi (&gsi,
				    fold_convert (unsigned_index_type, range),
				    /*simple=*/true, NULL_TREE,
				    /*before=*/true, GSI_SAME_STMT);
  tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
  new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
  if (update_dom)
    bbs_to_fix_dom.quick_push (new_bb);
  gcc_assert (gimple_bb (swtch) == new_bb);
  gsi = gsi_last_bb (new_bb);

  /* Any blocks dominated by the GIMPLE_SWITCH, but that are not successors
     of NEW_BB, are still immediately dominated by SWITCH_BB.  Make it so.  */
  if (update_dom)
    {
      vec<basic_block> dom_bbs;
      basic_block dom_son;

      dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
      FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
	{
	  edge e = find_edge (new_bb, dom_son);
	  if (e && single_pred_p (e->dest))
	    continue;
	  set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
	  bbs_to_fix_dom.safe_push (dom_son);
	}
      dom_bbs.release ();
    }
Exemplo n.º 13
0
static bool
gimple_find_edge_insert_loc (edge e, gimple_stmt_iterator *gsi,
			     basic_block *new_bb)
{
  basic_block dest, src;
  gimple *tmp;

  dest = e->dest;

  /* If the destination has one predecessor which has no PHI nodes,
     insert there.  Except for the exit block.

     The requirement for no PHI nodes could be relaxed.  Basically we
     would have to examine the PHIs to prove that none of them used
     the value set by the statement we want to insert on E.  That
     hardly seems worth the effort.  */
 restart:
  if (single_pred_p (dest)
      && gimple_seq_empty_p (phi_nodes (dest))
      && dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
    {
      *gsi = gsi_start_bb (dest);
      if (gsi_end_p (*gsi))
	return true;

      /* Make sure we insert after any leading labels.  */
      tmp = gsi_stmt (*gsi);
      while (gimple_code (tmp) == GIMPLE_LABEL)
	{
	  gsi_next (gsi);
	  if (gsi_end_p (*gsi))
	    break;
	  tmp = gsi_stmt (*gsi);
	}

      if (gsi_end_p (*gsi))
	{
	  *gsi = gsi_last_bb (dest);
	  return true;
	}
      else
	return false;
    }

  /* If the source has one successor, the edge is not abnormal and
     the last statement does not end a basic block, insert there.
     Except for the entry block.  */
  src = e->src;
  if ((e->flags & EDGE_ABNORMAL) == 0
      && single_succ_p (src)
      && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
    {
      *gsi = gsi_last_bb (src);
      if (gsi_end_p (*gsi))
	return true;

      tmp = gsi_stmt (*gsi);
      if (!stmt_ends_bb_p (tmp))
	return true;

      switch (gimple_code (tmp))
	{
	case GIMPLE_RETURN:
	case GIMPLE_RESX:
	  return false;
	default:
	  break;
        }
    }

  /* Otherwise, create a new basic block, and split this edge.  */
  dest = split_edge (e);
  if (new_bb)
    *new_bb = dest;
  e = single_pred_edge (dest);
  goto restart;
}
Exemplo n.º 14
0
static int implement_lospre_assignment(const assignment_lospre a, T_t &T, G_t &G, const iCode *ic) // Assignment has to be passed as a copy (not reference), since the transformations on the tree-decomposition will invalidate it otherwise.
{
  operand *tmpop;
  unsigned substituted = 0, split = 0;

  typedef typename boost::graph_traits<G_t>::edge_iterator edge_iter_t;
  typedef typename boost::graph_traits<G_t>::edge_descriptor edge_desc_t;
  std::set<edge_desc_t> calculation_edges; // Use descriptor, not iterator due to possible invalidation of iterators when inserting vertices or edges.
  edge_iter_t e, e_end;
  for(boost::tie(e, e_end) = boost::edges(G); e != e_end; ++e)
    if(!((a.global[boost::source(*e, G)] & true) && !G[boost::source(*e, G)].invalidates) && (a.global[boost::target(*e, G)] & true))
      calculation_edges.insert(*e);

  if(!calculation_edges.size())
    return(0);

#ifdef DEBUG_LOSPRE
  std::cout << "Optimizing at " << ic->key << "\n"; std::cout.flush();
#endif

  tmpop = newiTempOperand (operandType (IC_RESULT (ic)), TRUE);
  tmpop->isvolatile = false;
#ifdef DEBUG_LOSPRE
  std::cout << "New tmpop: " << OP_SYMBOL_CONST(tmpop)->name << " "; printTypeChain(operandType (IC_RESULT(ic)), stdout); std::cout << "\n";
#endif

  for(typename std::set<edge_desc_t>::iterator i = calculation_edges.begin(); i != calculation_edges.end(); ++i)
  {
    split_edge(T, G, *i, ic, tmpop);
    split++;
  }

  typedef typename boost::graph_traits<G_t>::vertex_iterator vertex_iter_t;
  vertex_iter_t v, v_end;

  for(boost::tie(v, v_end) = boost::vertices(G); v != v_end; ++v)
    {
      if(!G[*v].uses)
        continue;
      typename boost::graph_traits<G_t>::in_edge_iterator e = in_edges(*v, G).first;
      if (a.global.size() <= *v)
        continue;
      if(!((a.global[*v] & true) && !G[*v].invalidates || boost::source(*e, G) < a.global.size() && (a.global[boost::source(*e, G)] & true)))
        continue;
#ifdef DEBUG_LOSPRE
      std::cout << "Substituting ic " << G[*v].ic->key << "\n";
#endif
      substituted++;

      iCode *ic = G[*v].ic;
      //if (IC_LEFT (ic) && IS_ITEMP (IC_LEFT (ic)))
      //  bitVectUnSetBit (OP_SYMBOL (IC_LEFT (ic))->uses, ic->key);
      //if (IC_RIGHT (ic) && IS_ITEMP (IC_RIGHT (ic)))
       // bitVectUnSetBit (OP_SYMBOL (IC_RIGHT (ic))->uses, ic->key);
      IC_RIGHT(ic) = tmpop;
      //bitVectSetBit (OP_SYMBOL (IC_RIGHT(ic))->uses, ic->key);
      if (!POINTER_SET (ic))
        {
          IC_LEFT(ic) = 0;
          ic->op = '=';
          IC_RESULT(ic) = operandFromOperand (IC_RESULT (ic));
          IC_RESULT(ic)->isaddr = 0;
        }
      if(IS_OP_VOLATILE(IC_RESULT (ic)))
        continue;

      {
        typedef typename boost::graph_traits<G_t>::adjacency_iterator adjacency_iter_t;
        adjacency_iter_t c, c_end;
        boost::tie(c, c_end) = adjacent_vertices(*v, G);
        if (c != c_end)
          forward_lospre_assignment(G, *c, ic, a);
      }
    }

  if(substituted <= 0)
    {
      std::cerr << "Introduced " << OP_SYMBOL_CONST(tmpop)->name << ", but did not substitute any calculations.\n";
      return (-1);
    }

  if(substituted < split) // Todo: Remove this warning when optimization for speed instead of code size is implemented!
    std::cout << "Introduced " << OP_SYMBOL_CONST(tmpop)->name << ", but did substitute only " << substituted << " calculations, while introducing "<< split << ".\n"; std::cout.flush();

  return(1);
}
Exemplo n.º 15
0
/* Takes care of merging natural loops with shared headers.  */
static void
canonicalize_loop_headers (void)
{
  basic_block header;
  edge e;

  /* Compute the dominators.  */
  calculate_dominance_info (CDI_DOMINATORS);

  alloc_aux_for_blocks (sizeof (int));
  alloc_aux_for_edges (sizeof (int));

  /* Split blocks so that each loop has only single latch.  */
  FOR_EACH_BB (header)
    {
      int num_latches = 0;
      int have_abnormal_edge = 0;

      for (e = header->pred; e; e = e->pred_next)
	{
	  basic_block latch = e->src;

	  if (e->flags & EDGE_ABNORMAL)
	    have_abnormal_edge = 1;

	  if (latch != ENTRY_BLOCK_PTR
	      && dominated_by_p (CDI_DOMINATORS, latch, header))
	    {
	      num_latches++;
	      LATCH_EDGE (e) = 1;
	    }
	}
      if (have_abnormal_edge)
	HEADER_BLOCK (header) = 0;
      else
	HEADER_BLOCK (header) = num_latches;
    }

  free_dominance_info (CDI_DOMINATORS);

  if (HEADER_BLOCK (ENTRY_BLOCK_PTR->succ->dest))
    {
      basic_block bb;

      /* We could not redirect edges freely here. On the other hand,
	 we can simply split the edge from entry block.  */
      bb = split_edge (ENTRY_BLOCK_PTR->succ);

      alloc_aux_for_edge (bb->succ, sizeof (int));
      LATCH_EDGE (bb->succ) = 0;
      alloc_aux_for_block (bb, sizeof (int));
      HEADER_BLOCK (bb) = 0;
    }

  FOR_EACH_BB (header)
    {
      int num_latch;
      int want_join_latch;
      int max_freq, is_heavy;
      edge heavy;

      if (!HEADER_BLOCK (header))
	continue;

      num_latch = HEADER_BLOCK (header);

      want_join_latch = (num_latch > 1);

      if (!want_join_latch)
	continue;

      /* Find a heavy edge.  */
      is_heavy = 1;
      heavy = NULL;
      max_freq = 0;
      for (e = header->pred; e; e = e->pred_next)
	if (LATCH_EDGE (e) &&
	    EDGE_FREQUENCY (e) > max_freq)
	  max_freq = EDGE_FREQUENCY (e);
      for (e = header->pred; e; e = e->pred_next)
	if (LATCH_EDGE (e) &&
	    EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO)
	  {
	    if (heavy)
	      {
		is_heavy = 0;
		break;
	      }
	    else
	      heavy = e;
	  }

      if (is_heavy)
	{
	  basic_block new_header =
	    make_forwarder_block (header, true, true, heavy, 0);
	  if (num_latch > 2)
	    make_forwarder_block (new_header, true, false, NULL, 1);
	}
      else
	make_forwarder_block (header, true, false, NULL, 1);
    }

  free_aux_for_blocks ();
  free_aux_for_edges ();
}
Exemplo n.º 16
0
static struct loop *
unswitch_loop (struct loop *loop, basic_block unswitch_on, rtx cond, rtx cinsn)
{
  edge entry, latch_edge, true_edge, false_edge, e;
  basic_block switch_bb, unswitch_on_alt;
  struct loop *nloop;
  int irred_flag, prob;
  rtx seq;

  /* Some sanity checking.  */
  gcc_assert (flow_bb_inside_loop_p (loop, unswitch_on));
  gcc_assert (EDGE_COUNT (unswitch_on->succs) == 2);
  gcc_assert (just_once_each_iteration_p (loop, unswitch_on));
  gcc_assert (!loop->inner);
  gcc_assert (flow_bb_inside_loop_p (loop, EDGE_SUCC (unswitch_on, 0)->dest));
  gcc_assert (flow_bb_inside_loop_p (loop, EDGE_SUCC (unswitch_on, 1)->dest));

  entry = loop_preheader_edge (loop);

  /* Make a copy.  */
  irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
  entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
  if (!duplicate_loop_to_header_edge (loop, entry, 1,
			      	      NULL, NULL, NULL, 0))
    return NULL;
  entry->flags |= irred_flag;

  /* Record the block with condition we unswitch on.  */
  unswitch_on_alt = get_bb_copy (unswitch_on);
  true_edge = BRANCH_EDGE (unswitch_on_alt);
  false_edge = FALLTHRU_EDGE (unswitch_on);
  latch_edge = single_succ_edge (get_bb_copy (loop->latch));

  /* Create a block with the condition.  */
  prob = true_edge->probability;
  switch_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
  seq = compare_and_jump_seq (XEXP (cond, 0), XEXP (cond, 1), GET_CODE (cond),
			      block_label (true_edge->dest),
			      prob, cinsn);
  emit_insn_after (seq, BB_END (switch_bb));
  e = make_edge (switch_bb, true_edge->dest, 0);
  e->probability = prob;
  e->count = latch_edge->count * prob / REG_BR_PROB_BASE;
  e = make_edge (switch_bb, FALLTHRU_EDGE (unswitch_on)->dest, EDGE_FALLTHRU);
  e->probability = false_edge->probability;
  e->count = latch_edge->count * (false_edge->probability) / REG_BR_PROB_BASE;

  if (irred_flag)
    {
      switch_bb->flags |= BB_IRREDUCIBLE_LOOP;
      EDGE_SUCC (switch_bb, 0)->flags |= EDGE_IRREDUCIBLE_LOOP;
      EDGE_SUCC (switch_bb, 1)->flags |= EDGE_IRREDUCIBLE_LOOP;
    }
  else
    {
      switch_bb->flags &= ~BB_IRREDUCIBLE_LOOP;
      EDGE_SUCC (switch_bb, 0)->flags &= ~EDGE_IRREDUCIBLE_LOOP;
      EDGE_SUCC (switch_bb, 1)->flags &= ~EDGE_IRREDUCIBLE_LOOP;
    }

  /* Loopify from the copy of LOOP body, constructing the new loop.  */
  nloop = loopify (latch_edge,
		   single_pred_edge (get_bb_copy (loop->header)), switch_bb,
		   BRANCH_EDGE (switch_bb), FALLTHRU_EDGE (switch_bb), true,
		   prob, REG_BR_PROB_BASE - prob);

  copy_loop_info (loop, nloop);
  /* Remove branches that are now unreachable in new loops.  */
  remove_path (true_edge);
  remove_path (false_edge);

  /* Preserve the simple loop preheaders.  */
  split_edge (loop_preheader_edge (loop));
  split_edge (loop_preheader_edge (nloop));

  return nloop;
}
Exemplo n.º 17
0
void SuffixTree::add_to_tree(int pos) {
    
    ++curr_end; // automatically update all leaf nodes to new end
    
    ++remainder; // increment remainder as we are attempting to insert another character
    
    Node* last_new_node = nullptr;
    
    while (remainder > 0)
    {
        // if active length is 0, then
        // only the character at the current index
        // remains to be inserted
        if (active_point.active_length == 0)
        {
            active_point.active_edge = pos;
        }
        
        // check if the character is present at this node
        auto it = active_point.active_node->edges.find(input_string[active_point.active_edge]);
        
        if (it == active_point.active_node->edges.end())
        {
            // create a new leaf node
            std::unique_ptr<Node> node(new Node(pos, &curr_end, root.get()));
            
            // add it to active node's edges
            active_point.active_node->edges.insert(std::make_pair(input_string[active_point.active_edge], std::move(node)));
            active_point.active_node->is_leaf = false;
            
            if (last_new_node != nullptr)
            {
                last_new_node->suffix_link = active_point.active_node;
                last_new_node = nullptr;
            }
        }
        else
        {
            Node* ptr = it->second.get();
            
            // check if we are falling off
            if (ptr->start + active_point.active_length > *(ptr->end))
            {
                active_point.active_node = ptr;
                active_point.active_edge += (*(ptr->end) - ptr->start + 1);
                active_point.active_length -= (*(ptr->end) - ptr->start + 1);
               continue;
            }
            
            // check if the next character matches the current one
            if (input_string[ptr->start + active_point.active_length] == input_string[pos])
            {
                ++active_point.active_length;
                if (last_new_node != nullptr && active_point.active_node != root.get())
                {
                    last_new_node->suffix_link = active_point.active_node;
                    last_new_node = nullptr;
                }
                return;
            }
            
            // if we are here, the character does not match
            // need to create a new internal node
            Node* node = split_edge(ptr, pos);
            
            if (node && (last_new_node == ptr))
            {
                Node* temp = last_new_node;
                last_new_node = node;
                ptr = temp;
            }
            
            if (last_new_node != nullptr)
            {
                last_new_node->suffix_link = ptr;
            }
            last_new_node = ptr;
        }
        --remainder;
        if (active_point.active_node == root.get() && active_point.active_length > 0)
        {
            --active_point.active_length;
            active_point.active_edge = pos - remainder + 1;
        }
        else if(active_point.active_node != root.get())
        {
            active_point.active_node = active_point.active_node->suffix_link;
        }
    }
}
Exemplo n.º 18
0
void UkkTree::add_substring_to_node_iterative(UkkNode *_node,int _from, int _to){
        UkkNode *node=_node;
        int from=_from;
        int to=_to;
        while TRUE {
            Symbol x=text[from];
            int substring_lenght=to-from+1;
            auto edge_it=node->edges.find(x);
            if (edge_it==node->edges.end()){// #CASE1 (final)
                //RULE2: new leaf case
                //std::cout<<"Rule2New"<<std::endl;
                UkkLeaf *new_leaf= new UkkLeaf(from);
                node->edges[x]=new_leaf;
                last_leaf=new_leaf;
                n_leafs++;
                if (bool(last_leaf_node)
                && (! last_leaf_node->isRoot())
                && (! bool(last_leaf_node->suffixLink)))
                    {last_leaf_node->suffixLink=node;};
                last_leaf_node=node;
                return;
                }
            else if (edgeLenght(edge_it->second)>(substring_lenght))// #CASE2 (finale)
                {//the suffix to add is shorter than the label of current edge:
                UkkEdgeBase *edge=edge_it->second;
                int csl=common_substring_lenght(from,edge->first);
                if (csl>substring_lenght) {// #CASE2.1 (finale)
                    //this extension will be implicit , use rule 3
                    //std::cout<<"Rule3"<<std::endl;
                    rule_3_used=TRUE;
                    //following extensions will also be implicit in this phase
                    if (  bool(last_leaf_node)
                    && (! last_leaf_node->isRoot())
                    && (! bool(last_leaf_node->suffixLink)) )
                        last_leaf_node->suffixLink=node;
                    return;}
                else {// #CASE2.2
                    //in this case common_substring_lenght<=substring_lenght
                      //this extension will split the the edge using RULE2
                    /*
                    if edge is a leaf
                        assert last_common_index<=text_index
                    else
                        assert last_common_index< edge.last
                    */
                    UkkEdgeBase *edge=edge_it->second;
                    //std::cout<<"Rule2Split"<<std::endl;
                    split_edge(
                        node,
                        edge,
                        csl,
                        to);
                    return;};
                }
            //#CASE3 (ricorsivo)
            //in this case :
            //  edgeLenght(edge_it.second)<=substring_lenght
            //  recursively call the same function on the next node in the chain
            node=edge_it->second->node();
            from=from+edgeLenght(edge_it->second);};
        return;};
Exemplo n.º 19
0
static basic_block
create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
{
  edge eg;
  edge_iterator ei;
  basic_block pre_exit;

  /* The only non-call predecessor at this stage is a block with a
     fallthrough edge; there can be at most one, but there could be
     none at all, e.g. when exit is called.  */
  pre_exit = 0;
  FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
    if (eg->flags & EDGE_FALLTHRU)
      {
	basic_block src_bb = eg->src;
	rtx_insn *last_insn;
	rtx ret_reg;

	gcc_assert (!pre_exit);
	/* If this function returns a value at the end, we have to
	   insert the final mode switch before the return value copy
	   to its hard register.  */
	if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
	    && GET_CODE (PATTERN (last_insn)) == USE
	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
	  {
	    int ret_start = REGNO (ret_reg);
	    int nregs = hard_regno_nregs[ret_start][GET_MODE (ret_reg)];
	    int ret_end = ret_start + nregs;
	    bool short_block = false;
	    bool multi_reg_return = false;
	    bool forced_late_switch = false;
	    rtx_insn *before_return_copy;

	    do
	      {
		rtx_insn *return_copy = PREV_INSN (last_insn);
		rtx return_copy_pat, copy_reg;
		int copy_start, copy_num;
		int j;

		if (NONDEBUG_INSN_P (return_copy))
		  {
		    /* When using SJLJ exceptions, the call to the
		       unregister function is inserted between the
		       clobber of the return value and the copy.
		       We do not want to split the block before this
		       or any other call; if we have not found the
		       copy yet, the copy must have been deleted.  */
		    if (CALL_P (return_copy))
		      {
			short_block = true;
			break;
		      }
		    return_copy_pat = PATTERN (return_copy);
		    switch (GET_CODE (return_copy_pat))
		      {
		      case USE:
			/* Skip USEs of multiple return registers.
			   __builtin_apply pattern is also handled here.  */
			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
			    && (targetm.calls.function_value_regno_p
				(REGNO (XEXP (return_copy_pat, 0)))))
			  {
			    multi_reg_return = true;
			    last_insn = return_copy;
			    continue;
			  }
			break;

		      case ASM_OPERANDS:
			/* Skip barrier insns.  */
			if (!MEM_VOLATILE_P (return_copy_pat))
			  break;

			/* Fall through.  */

		      case ASM_INPUT:
		      case UNSPEC_VOLATILE:
			last_insn = return_copy;
			continue;

		      default:
			break;
		      }

		    /* If the return register is not (in its entirety)
		       likely spilled, the return copy might be
		       partially or completely optimized away.  */
		    return_copy_pat = single_set (return_copy);
		    if (!return_copy_pat)
		      {
			return_copy_pat = PATTERN (return_copy);
			if (GET_CODE (return_copy_pat) != CLOBBER)
			  break;
			else if (!optimize)
			  {
			    /* This might be (clobber (reg [<result>]))
			       when not optimizing.  Then check if
			       the previous insn is the clobber for
			       the return register.  */
			    copy_reg = SET_DEST (return_copy_pat);
			    if (GET_CODE (copy_reg) == REG
				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
			      {
				if (INSN_P (PREV_INSN (return_copy)))
				  {
				    return_copy = PREV_INSN (return_copy);
				    return_copy_pat = PATTERN (return_copy);
				    if (GET_CODE (return_copy_pat) != CLOBBER)
				      break;
				  }
			      }
			  }
		      }
		    copy_reg = SET_DEST (return_copy_pat);
		    if (GET_CODE (copy_reg) == REG)
		      copy_start = REGNO (copy_reg);
		    else if (GET_CODE (copy_reg) == SUBREG
			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
		      copy_start = REGNO (SUBREG_REG (copy_reg));
		    else
		      {
			/* When control reaches end of non-void function,
			   there are no return copy insns at all.  This
			   avoids an ice on that invalid function.  */
			if (ret_start + nregs == ret_end)
			  short_block = true;
			break;
		      }
		    if (!targetm.calls.function_value_regno_p (copy_start))
		      copy_num = 0;
		    else
		      copy_num
			= hard_regno_nregs[copy_start][GET_MODE (copy_reg)];

		    /* If the return register is not likely spilled, - as is
		       the case for floating point on SH4 - then it might
		       be set by an arithmetic operation that needs a
		       different mode than the exit block.  */
		    for (j = n_entities - 1; j >= 0; j--)
		      {
			int e = entity_map[j];
			int mode =
			  targetm.mode_switching.needed (e, return_copy);

			if (mode != num_modes[e]
			    && mode != targetm.mode_switching.exit (e))
			  break;
		      }
		    if (j >= 0)
		      {
			/* __builtin_return emits a sequence of loads to all
			   return registers.  One of them might require
			   another mode than MODE_EXIT, even if it is
			   unrelated to the return value, so we want to put
			   the final mode switch after it.  */
			if (multi_reg_return
			    && targetm.calls.function_value_regno_p
			        (copy_start))
			  forced_late_switch = true;

			/* For the SH4, floating point loads depend on fpscr,
			   thus we might need to put the final mode switch
			   after the return value copy.  That is still OK,
			   because a floating point return value does not
			   conflict with address reloads.  */
			if (copy_start >= ret_start
			    && copy_start + copy_num <= ret_end
			    && OBJECT_P (SET_SRC (return_copy_pat)))
			  forced_late_switch = true;
			break;
		      }
		    if (copy_num == 0)
		      {
			last_insn = return_copy;
			continue;
		      }

		    if (copy_start >= ret_start
			&& copy_start + copy_num <= ret_end)
		      nregs -= copy_num;
		    else if (!multi_reg_return
			     || !targetm.calls.function_value_regno_p
				 (copy_start))
		      break;
		    last_insn = return_copy;
		  }
		/* ??? Exception handling can lead to the return value
		   copy being already separated from the return value use,
		   as in  unwind-dw2.c .
		   Similarly, conditionally returning without a value,
		   and conditionally using builtin_return can lead to an
		   isolated use.  */
		if (return_copy == BB_HEAD (src_bb))
		  {
		    short_block = true;
		    break;
		  }
		last_insn = return_copy;
	      }
	    while (nregs);

	    /* If we didn't see a full return value copy, verify that there
	       is a plausible reason for this.  If some, but not all of the
	       return register is likely spilled, we can expect that there
	       is a copy for the likely spilled part.  */
	    gcc_assert (!nregs
			|| forced_late_switch
			|| short_block
			|| !(targetm.class_likely_spilled_p
			     (REGNO_REG_CLASS (ret_start)))
			|| (nregs
			    != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
			/* For multi-hard-register floating point
		   	   values, sometimes the likely-spilled part
		   	   is ordinarily copied first, then the other
		   	   part is set with an arithmetic operation.
		   	   This doesn't actually cause reload
		   	   failures, so let it pass.  */
			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
			    && nregs != 1));

	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
	      {
		before_return_copy
		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
		/* Instructions preceding LAST_INSN in the same block might
		   require a different mode than MODE_EXIT, so if we might
		   have such instructions, keep them in a separate block
		   from pre_exit.  */
		src_bb = split_block (src_bb,
				      PREV_INSN (before_return_copy))->dest;
	      }
	    else
	      before_return_copy = last_insn;
	    pre_exit = split_block (src_bb, before_return_copy)->src;
	  }
	else
	  {
	    pre_exit = split_edge (eg);
	  }
      }

  return pre_exit;
}
Exemplo n.º 20
0
void UkkTree::add_substring_to_node(UkkNode *node,int from, int to){
        Symbol x=text[from];
        /*
        std::cout<<"[UkkTree::add_substring_to_node] node #"<<node->name<<" from="<<from<<" to="<<to;
        std::cout<<"["<<x<<"]:"<<substring(from,to+1)<<std::endl;
        DEBUG CODE*/
        //std::cout<<"{START "<<node<<" "<<from<<" " <<to<<" "<<" }"<<std::endl;
        int substring_lenght=to-from+1;
        /*
        std::cout<<"[UkkTree::add_substring_to_node] substring lenght :"<<substring_lenght<<std::endl;
        DEBUG CODE*/
        auto edge_it=node->edges.find(x);
        if (edge_it==node->edges.end()){// #CASE1 (finale)
            //RULE2: new leaf case
            //std::cout<<"Rule2New"<<std::endl;
            /*
            std::cout<<"[UkkTree::add_substring_to_node] no edges[symbol]=>new leaf"<<std::endl;
            std::cout<<"[UkkTree::add_substring_to_node]<"<<from<<","<<to<<">RULE2 "<<std::endl;
            DEBUG CODE*/
            UkkLeaf *new_leaf= new UkkLeaf(from);
            node->edges[x]=new_leaf;
            last_leaf=new_leaf;
            n_leafs++;
            if (bool(last_leaf_node)
            && (! last_leaf_node->isRoot())
            && (! bool(last_leaf_node->suffixLink)))
                {last_leaf_node->suffixLink=node;
                //std::cout<<"+s link"<<std::endl;
                };
            last_leaf_node=node;
            }
        else if (edgeLenght(edge_it->second)>=(substring_lenght))// #CASE2 (finale) //NOTE: il segno prima era >, diventato >=alle 19 di mercoledì
            {//the suffix to add is shorter than the label of current edge:
            UkkEdgeBase *edge=edge_it->second;
            /*
            std::cout << "edgeLenght(edge)="<<edgeLenght(edge_it->second) << std::endl;
            std::cout<<"[UkkTree::add_substring_to_node] found "<<node->name<<".edges[symbol]:"<<edgeString(edge)<<std::endl;
            DEBUG CODE*/
            int csl=common_substring_lenght(from,edge->first);
            /*
            std::cout<<"the suffix to add is shorter than the label of current edge"<<std::endl;
            std::cout<<" common:"<<csl<<std::endl;
            DEBUG CODE*/
            if (csl>=substring_lenght) {// #CASE2.1 (finale)
                //this extension will be implicit , use rule 3
                rule_3_used=TRUE;
                //std::cout<<"Rule3"<<std::endl;
                /*std::cout<<"[UkkTree::add_substring_to_node] ising RULE3"<<std::endl; DEBUG CODE*/
                //following extensions will also be implicit in this phase
                if (  bool(last_leaf_node)
                && (! last_leaf_node->isRoot())
                && (! bool(last_leaf_node->suffixLink)) )
                    { /*std::cout << "[UkkTree::add_substring_to_node] suffixlink ";
                      std::cout << last_leaf_node->name;
                      std::cout << "-->"<<node->name<<std::endl; DEBUG CODE*/
                      //std::cout<<"+s link"<<std::endl;
                      last_leaf_node->suffixLink=node;}
                else{
                      /*
                      std::cout << "[UkkTree::add_substring_to_node] NO suffixlink "<<std::endl;
                      if (!bool(last_leaf_node))
                        std::cout << "\tno last_leaf_node"<<std::endl;
                      if (bool(last_leaf_node)&&(last_leaf_node->isRoot()))
                        std::cout << "\tlast_leaf_node is root"<<std::endl;
                      if (bool(last_leaf_node)&&(last_leaf_node->suffixLink))
                        std::cout << "\tlast_leaf_node has suffixlink"<<std::endl;*/;
                };
                return;
                }
            else {// #CASE2.2
                //in this case common_substring_lenght<=substring_lenght
                  //this extension will split the the edge using RULE2
                  //std::cout<<"Rule2Split"<<std::endl;
                /*
                if edge is a leaf
                    assert last_common_index<=text_index
                else
                    assert last_common_index< edge.last
                */

                /*
                std::cout<<"<"<<from<<","<<to<<">RULE2.2 => split"<<std::endl;
                std::cout<<"[UkkTree::add_substring_to_node] call split_edge"<<std::endl;
                std::cout<<"[UkkTree::add_substring_to_node] split_edge node:"<<node->name<<std::endl;
                std::cout<<"[UkkTree::add_substring_to_node] split_edge edges["<<text[edge->first]<<"]"<<std::endl;
                std::cout<<"[UkkTree::add_substring_to_node] split_edge csl:"<<csl<<std::endl;
                std::cout<<"[UkkTree::add_substring_to_node] split_edge to:"<<to<<std::endl;
                DEBUG CODE*/
                UkkEdgeBase *edge=edge_it->second;
                split_edge(
                    node,
                    edge,
                    csl,
                    to);
                return;
                };
            }
        else {//#CASE3 (ricorsivo lungo l' albero , cerca atro nodo)
            //in this case :
            //  edgeLenght(edge_it.second)<=substring_lenght
            //  recursively call the same function on the next node in the chain
            /*
            std::cout << "edgeLenght(edge)="<<edgeLenght(edge_it->second) << " is short , going down one node"<< std::endl;
            DEBUG CODE*/
            //std::cout<<"MOVE"<<std::endl;
            UkkNode *next_node=edge_it->second->node();
            int next_from=from+edgeLenght(edge_it->second);
            int next_to=to;
            add_substring_to_node(next_node,next_from, next_to);};
        //std::cout<<"{STOP "<<node<<" "<<from<<" " <<to<<" "<<" }"<<std::endl;
        return;};
Exemplo n.º 21
0
static void
shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
					  unsigned int nconds)
{
  gimple_stmt_iterator bi_call_bsi;
  basic_block bi_call_bb, join_tgt_bb, guard_bb;
  edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
  edge bi_call_in_edge0, guard_bb_in_edge;
  unsigned tn_cond_stmts;
  unsigned ci;
  gimple *cond_expr = NULL;
  gimple *cond_expr_start;

  /* The cfg we want to create looks like this:

	   [guard n-1]         <- guard_bb (old block)
	     |    \
	     | [guard n-2]                   }
	     |    / \                        }
	     |   /  ...                      } new blocks
	     |  /  [guard 0]                 }
	     | /    /   |                    }
	    [ call ]    |     <- bi_call_bb  }
	     | \        |
	     |  \       |
	     |   [ join ]     <- join_tgt_bb (old iff call must end bb)
	     |
	 possible EH edges (only if [join] is old)

     When [join] is new, the immediate dominators for these blocks are:

     1. [guard n-1]: unchanged
     2. [call]: [guard n-1]
     3. [guard m]: [guard m+1] for 0 <= m <= n-2
     4. [join]: [guard n-1]

     We punt for the more complex case case of [join] being old and
     simply free the dominance info.  We also punt on postdominators,
     which aren't expected to be available at this point anyway.  */
  bi_call_bb = gimple_bb (bi_call);

  /* Now find the join target bb -- split bi_call_bb if needed.  */
  if (stmt_ends_bb_p (bi_call))
    {
      /* We checked that there was a fallthrough edge in
	 can_guard_call_p.  */
      join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs);
      gcc_assert (join_tgt_in_edge_from_call);
      /* We don't want to handle PHIs.  */
      if (EDGE_COUNT (join_tgt_in_edge_from_call->dest->preds) > 1)
	join_tgt_bb = split_edge (join_tgt_in_edge_from_call);
      else
	{
	  join_tgt_bb = join_tgt_in_edge_from_call->dest;
	  /* We may have degenerate PHIs in the destination.  Propagate
	     those out.  */
	  for (gphi_iterator i = gsi_start_phis (join_tgt_bb); !gsi_end_p (i);)
	    {
	      gphi *phi = i.phi ();
	      replace_uses_by (gimple_phi_result (phi),
			       gimple_phi_arg_def (phi, 0));
	      remove_phi_node (&i, true);
	    }
	}
    }
  else
    {
      join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
      join_tgt_bb = join_tgt_in_edge_from_call->dest;
    }

  bi_call_bsi = gsi_for_stmt (bi_call);

  /* Now it is time to insert the first conditional expression
     into bi_call_bb and split this bb so that bi_call is
     shrink-wrapped.  */
  tn_cond_stmts = conds.length ();
  cond_expr = NULL;
  cond_expr_start = conds[0];
  for (ci = 0; ci < tn_cond_stmts; ci++)
    {
      gimple *c = conds[ci];
      gcc_assert (c || ci != 0);
      if (!c)
        break;
      gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT);
      cond_expr = c;
    }
  ci++;
  gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);

  typedef std::pair<edge, edge> edge_pair;
  auto_vec<edge_pair, 8> edges;

  bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
  bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
  bi_call_in_edge0->flags |= EDGE_FALSE_VALUE;
  guard_bb = bi_call_bb;
  bi_call_bb = bi_call_in_edge0->dest;
  join_tgt_in_edge_fall_thru = make_edge (guard_bb, join_tgt_bb,
                                          EDGE_TRUE_VALUE);

  edges.reserve (nconds);
  edges.quick_push (edge_pair (bi_call_in_edge0, join_tgt_in_edge_fall_thru));

  /* Code generation for the rest of the conditions  */
  for (unsigned int i = 1; i < nconds; ++i)
    {
      unsigned ci0;
      edge bi_call_in_edge;
      gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
      ci0 = ci;
      cond_expr_start = conds[ci0];
      for (; ci < tn_cond_stmts; ci++)
        {
	  gimple *c = conds[ci];
          gcc_assert (c || ci != ci0);
          if (!c)
            break;
          gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT);
          cond_expr = c;
        }
      ci++;
      gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND);
      guard_bb_in_edge = split_block (guard_bb, cond_expr);
      guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
      guard_bb_in_edge->flags |= EDGE_TRUE_VALUE;

      bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_FALSE_VALUE);
      edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge));
    }

  /* Now update the probability and profile information, processing the
     guards in order of execution.

     There are two approaches we could take here.  On the one hand we
     could assign a probability of X to the call block and distribute
     that probability among its incoming edges.  On the other hand we
     could assign a probability of X to each individual call edge.

     The choice only affects calls that have more than one condition.
     In those cases, the second approach would give the call block
     a greater probability than the first.  However, the difference
     is only small, and our chosen X is a pure guess anyway.

     Here we take the second approach because it's slightly simpler
     and because it's easy to see that it doesn't lose profile counts.  */
  bi_call_bb->count = profile_count::zero ();
  while (!edges.is_empty ())
    {
      edge_pair e = edges.pop ();
      edge call_edge = e.first;
      edge nocall_edge = e.second;
      basic_block src_bb = call_edge->src;
      gcc_assert (src_bb == nocall_edge->src);

      call_edge->probability = profile_probability::very_unlikely ();
      nocall_edge->probability = profile_probability::always ()
				 - call_edge->probability;

      bi_call_bb->count += call_edge->count ();

      if (nocall_edge->dest != join_tgt_bb)
	nocall_edge->dest->count = src_bb->count - bi_call_bb->count;
    }

  if (dom_info_available_p (CDI_DOMINATORS))
    {
      /* The split_blocks leave [guard 0] as the immediate dominator
	 of [call] and [call] as the immediate dominator of [join].
	 Fix them up.  */
      set_immediate_dominator (CDI_DOMINATORS, bi_call_bb, guard_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_tgt_bb, guard_bb);
    }

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      location_t loc;
      loc = gimple_location (bi_call);
      fprintf (dump_file,
               "%s:%d: note: function call is shrink-wrapped"
               " into error conditions.\n",
               LOCATION_FILE (loc), LOCATION_LINE (loc));
    }
}
Exemplo n.º 22
0
static int
optimize_mode_switching (void)
{
  int e;
  basic_block bb;
  bool need_commit = false;
  static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
#define N_ENTITIES ARRAY_SIZE (num_modes)
  int entity_map[N_ENTITIES];
  struct bb_info *bb_info[N_ENTITIES];
  int i, j;
  int n_entities = 0;
  int max_num_modes = 0;
  bool emitted ATTRIBUTE_UNUSED = false;
  basic_block post_entry = 0;
  basic_block pre_exit = 0;
  struct edge_list *edge_list = 0;

  /* These bitmaps are used for the LCM algorithm.  */
  sbitmap *kill, *del, *insert, *antic, *transp, *comp;
  sbitmap *avin, *avout;

  for (e = N_ENTITIES - 1; e >= 0; e--)
    if (OPTIMIZE_MODE_SWITCHING (e))
      {
	int entry_exit_extra = 0;

	/* Create the list of segments within each basic block.
	   If NORMAL_MODE is defined, allow for two extra
	   blocks split from the entry and exit block.  */
	if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	  entry_exit_extra = 3;

	bb_info[n_entities]
	  = XCNEWVEC (struct bb_info,
		      last_basic_block_for_fn (cfun) + entry_exit_extra);
	entity_map[n_entities++] = e;
	if (num_modes[e] > max_num_modes)
	  max_num_modes = num_modes[e];
      }

  if (! n_entities)
    return 0;

  /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined.  */
  gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
	      || (!targetm.mode_switching.entry
		  && !targetm.mode_switching.exit));

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    {
      /* Split the edge from the entry block, so that we can note that
	 there NORMAL_MODE is supplied.  */
      post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
      pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
    }

  df_analyze ();

  /* Create the bitmap vectors.  */
  antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				 n_entities * max_num_modes);
  comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);
  avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
				n_entities * max_num_modes);
  kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
			       n_entities * max_num_modes);

  bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
  bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));

  for (j = n_entities - 1; j >= 0; j--)
    {
      int e = entity_map[j];
      int no_mode = num_modes[e];
      struct bb_info *info = bb_info[j];
      rtx_insn *insn;

      /* Determine what the first use (if any) need for a mode of entity E is.
	 This will be the mode that is anticipatable for this block.
	 Also compute the initial transparency settings.  */
      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr;
	  int last_mode = no_mode;
	  bool any_set_required = false;
	  HARD_REG_SET live_now;

	  info[bb->index].mode_out = info[bb->index].mode_in = no_mode;

	  REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));

	  /* Pretend the mode is clobbered across abnormal edges.  */
	  {
	    edge_iterator ei;
	    edge eg;
	    FOR_EACH_EDGE (eg, ei, bb->preds)
	      if (eg->flags & EDGE_COMPLEX)
		break;
	    if (eg)
	      {
		rtx_insn *ins_pos = BB_HEAD (bb);
		if (LABEL_P (ins_pos))
		  ins_pos = NEXT_INSN (ins_pos);
		gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
		if (ins_pos != BB_END (bb))
		  ins_pos = NEXT_INSN (ins_pos);
		ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
		add_seginfo (info + bb->index, ptr);
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	      }
	  }

	  FOR_BB_INSNS (bb, insn)
	    {
	      if (INSN_P (insn))
		{
		  int mode = targetm.mode_switching.needed (e, insn);
		  rtx link;

		  if (mode != no_mode && mode != last_mode)
		    {
		      any_set_required = true;
		      last_mode = mode;
		      ptr = new_seginfo (mode, insn, bb->index, live_now);
		      add_seginfo (info + bb->index, ptr);
		      for (i = 0; i < no_mode; i++)
			clear_mode_bit (transp[bb->index], j, i);
		    }

		  if (targetm.mode_switching.after)
		    last_mode = targetm.mode_switching.after (e, last_mode,
							      insn);

		  /* Update LIVE_NOW.  */
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_DEAD)
		      reg_dies (XEXP (link, 0), &live_now);

		  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
		    if (REG_NOTE_KIND (link) == REG_UNUSED)
		      reg_dies (XEXP (link, 0), &live_now);
		}
	    }

	  info[bb->index].computing = last_mode;
	  /* Check for blocks without ANY mode requirements.
	     N.B. because of MODE_AFTER, last_mode might still
	     be different from no_mode, in which case we need to
	     mark the block as nontransparent.  */
	  if (!any_set_required)
	    {
	      ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
	      add_seginfo (info + bb->index, ptr);
	      if (last_mode != no_mode)
		for (i = 0; i < no_mode; i++)
		  clear_mode_bit (transp[bb->index], j, i);
	    }
	}
      if (targetm.mode_switching.entry && targetm.mode_switching.exit)
	{
	  int mode = targetm.mode_switching.entry (e);

	  info[post_entry->index].mode_out =
	    info[post_entry->index].mode_in = no_mode;
	  if (pre_exit)
	    {
	      info[pre_exit->index].mode_out =
		info[pre_exit->index].mode_in = no_mode;
	    }

	  if (mode != no_mode)
	    {
	      bb = post_entry;

	      /* By always making this nontransparent, we save
		 an extra check in make_preds_opaque.  We also
		 need this to avoid confusing pre_edge_lcm when
		 antic is cleared but transp and comp are set.  */
	      for (i = 0; i < no_mode; i++)
		clear_mode_bit (transp[bb->index], j, i);

	      /* Insert a fake computing definition of MODE into entry
		 blocks which compute no mode. This represents the mode on
		 entry.  */
	      info[bb->index].computing = mode;

	      if (pre_exit)
		info[pre_exit->index].seginfo->mode =
		  targetm.mode_switching.exit (e);
	    }
	}

      /* Set the anticipatable and computing arrays.  */
      for (i = 0; i < no_mode; i++)
	{
	  int m = targetm.mode_switching.priority (entity_map[j], i);

	  FOR_EACH_BB_FN (bb, cfun)
	    {
	      if (info[bb->index].seginfo->mode == m)
		set_mode_bit (antic[bb->index], j, m);

	      if (info[bb->index].computing == m)
		set_mode_bit (comp[bb->index], j, m);
	    }
	}
    }

  /* Calculate the optimal locations for the
     placement mode switches to modes with priority I.  */

  FOR_EACH_BB_FN (bb, cfun)
    bitmap_not (kill[bb->index], transp[bb->index]);

  edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
				kill, avin, avout, &insert, &del);

  for (j = n_entities - 1; j >= 0; j--)
    {
      int no_mode = num_modes[entity_map[j]];

      /* Insert all mode sets that have been inserted by lcm.  */

      for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
	{
	  edge eg = INDEX_EDGE (edge_list, ed);

	  eg->aux = (void *)(intptr_t)-1;

	  for (i = 0; i < no_mode; i++)
	    {
	      int m = targetm.mode_switching.priority (entity_map[j], i);
	      if (mode_bit_p (insert[ed], j, m))
		{
		  eg->aux = (void *)(intptr_t)m;
		  break;
		}
	    }
	}

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct bb_info *info = bb_info[j];
	  int last_mode = no_mode;

	  /* intialize mode in availability for bb.  */
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avout[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_out = last_mode;

	  /* intialize mode out availability for bb.  */
	  last_mode = no_mode;
	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (avin[bb->index], j, i))
	      {
		if (last_mode == no_mode)
		  last_mode = i;
		if (last_mode != i)
		  {
		    last_mode = no_mode;
		    break;
		  }
	      }
	  info[bb->index].mode_in = last_mode;

	  for (i = 0; i < no_mode; i++)
	    if (mode_bit_p (del[bb->index], j, i))
	      info[bb->index].seginfo->mode = no_mode;
	}

      /* Now output the remaining mode sets in all the segments.  */

      /* In case there was no mode inserted. the mode information on the edge
	 might not be complete.
	 Update mode info on edges and commit pending mode sets.  */
      need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);

      /* Reset modes for next entity.  */
      clear_aux_for_edges ();

      FOR_EACH_BB_FN (bb, cfun)
	{
	  struct seginfo *ptr, *next;
	  int cur_mode = bb_info[j][bb->index].mode_in;

	  for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
	    {
	      next = ptr->next;
	      if (ptr->mode != no_mode)
		{
		  rtx_insn *mode_set;

		  rtl_profile_for_bb (bb);
		  start_sequence ();

		  targetm.mode_switching.emit (entity_map[j], ptr->mode,
					       cur_mode, ptr->regs_live);
		  mode_set = get_insns ();
		  end_sequence ();

		  /* modes kill each other inside a basic block.  */
		  cur_mode = ptr->mode;

		  /* Insert MODE_SET only if it is nonempty.  */
		  if (mode_set != NULL_RTX)
		    {
		      emitted = true;
		      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
			/* We need to emit the insns in a FIFO-like manner,
			   i.e. the first to be emitted at our insertion
			   point ends up first in the instruction steam.
			   Because we made sure that NOTE_INSN_BASIC_BLOCK is
			   only used for initially empty basic blocks, we
			   can achieve this by appending at the end of
			   the block.  */
			emit_insn_after
			  (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
		      else
			emit_insn_before (mode_set, ptr->insn_ptr);
		    }

		  default_rtl_profile ();
		}

	      free (ptr);
	    }
	}

      free (bb_info[j]);
    }

  free_edge_list (edge_list);

  /* Finished. Free up all the things we've allocated.  */
  sbitmap_vector_free (del);
  sbitmap_vector_free (insert);
  sbitmap_vector_free (kill);
  sbitmap_vector_free (antic);
  sbitmap_vector_free (transp);
  sbitmap_vector_free (comp);
  sbitmap_vector_free (avin);
  sbitmap_vector_free (avout);

  if (need_commit)
    commit_edge_insertions ();

  if (targetm.mode_switching.entry && targetm.mode_switching.exit)
    cleanup_cfg (CLEANUP_NO_INSN_DEL);
  else if (!need_commit && !emitted)
    return 0;

  return 1;
}
Exemplo n.º 23
0
/**
 * @brief EpsTrimesh::split_on_sphere_center_and_tangent
 * @param tid
 */
DANI_INLINE
void EpsTrimesh::split_on_sphere_tangent(const uint tid)
{
    Sphere *sphere = &t_epsilons.at(tid);

    if (sphere->tangent_pos == TANGENT_UNKNOWN)
        return;

    if (sphere->radius == 0.0)
        return;

    if (sphere->center_pos != CENTERED_ON_VERTEX)
        assert(false);

    uint vid = UINT_MAX;

    // center
    if (sphere->tangent_pos == TANGENT_ON_EDGE)
    {
        std::cout << ">>>>Sphere associated to triangle " << tid << " is tangent on edge .. splitting .. " << std::endl;

        vid = split_edge(sphere->tangent_id, sphere->tangent);

        for (uint tt : adj_vtx2tri(vid))
            t_epsilons.at(tt).center_tri.first = tt;
    }
    else
    if (sphere->tangent_pos == TANGENT_ON_TRIANGLE)
    {
        std::cout << ">>>>Sphere associated to triangle " << tid << " is tangent on triangle .. splitting .. " << std::endl;

        vid = split_triangle(sphere->tangent_tri.first, sphere->tangent);

        for (uint tt : adj_vtx2tri(vid))
            t_epsilons.at(tt).center_tri.first = tt;
    }
    else
    if (sphere->tangent_pos == TANGENT_ON_VERTEX)
    {
        std::cout << ">>>>Sphere associated to triangle " << tid << " is tangent on vertex. " << std::endl;
    }

    // Recursive

    if (vid != UINT_MAX)
    {
        for (uint tt=0; tt < num_triangles(); tt++)
        {
            if (t_epsilons.at(tt).tangent_pos == TANGENT_ON_EDGE &&
                    std::find(adj_vtx2edg(vid).begin(), adj_vtx2edg(vid).end(), t_epsilons.at(tt).tangent_id) != adj_vtx2edg(vid).end())
                split_on_sphere_tangent(tt);
            else
            if (t_epsilons.at(tt).tangent_pos == TANGENT_ON_TRIANGLE &&
                    std::find(adj_vtx2tri(vid).begin(), adj_vtx2tri(vid).end(), t_epsilons.at(tt).tangent_tri.first) != adj_vtx2tri(vid).end())
                split_on_sphere_tangent(tt);
        }
    }

    return;

}