Example #1
0
static unsigned int
tracer (void)
{
  bool changed;

  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
    return 0;

  mark_dfs_back_edges ();
  if (dump_file)
    brief_dump_cfg (dump_file, dump_flags);

  /* Trace formation is done on the fly inside tail_duplicate */
  changed = tail_duplicate ();
  if (changed)
    {
      free_dominance_info (CDI_DOMINATORS);
      /* If we changed the CFG schedule loops for fixup by cleanup_cfg.  */
      if (current_loops)
	loops_state_set (LOOPS_NEED_FIXUP);
    }

  if (dump_file)
    brief_dump_cfg (dump_file, dump_flags);

  return changed ? TODO_cleanup_cfg : 0;
}
Example #2
0
void
loop_optimizer_init (unsigned flags)
{
  timevar_push (TV_LOOP_INIT);

  if (!current_loops)
    {
      gcc_assert (!(cfun->curr_properties & PROP_loops));

      /* Find the loops.  */
      current_loops = flow_loops_find (NULL);
    }
  else
    {
      bool recorded_exits = loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS);
      bool needs_fixup = loops_state_satisfies_p (LOOPS_NEED_FIXUP);

      gcc_assert (cfun->curr_properties & PROP_loops);

      /* Ensure that the dominators are computed, like flow_loops_find does.  */
      calculate_dominance_info (CDI_DOMINATORS);

#ifdef ENABLE_CHECKING
      if (!needs_fixup)
	verify_loop_structure ();
#endif

      /* Clear all flags.  */
      if (recorded_exits)
	release_recorded_exits ();
      loops_state_clear (~0U);

      if (needs_fixup)
	{
	  /* Apply LOOPS_MAY_HAVE_MULTIPLE_LATCHES early as fix_loop_structure
	     re-applies flags.  */
	  loops_state_set (flags & LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
	  fix_loop_structure (NULL);
	}
    }

  /* Apply flags to loops.  */
  apply_loop_flags (flags);

  /* Dump loops.  */
  flow_loops_dump (dump_file, NULL, 1);

#ifdef ENABLE_CHECKING
  verify_loop_structure ();
#endif

  timevar_pop (TV_LOOP_INIT);
}
Example #3
0
void
loop_optimizer_finalize (void)
{
  struct loop *loop;
  basic_block bb;

  timevar_push (TV_LOOP_FINI);

  if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
    release_recorded_exits ();

  free_numbers_of_iterations_estimates ();

  /* If we should preserve loop structure, do not free it but clear
     flags that advanced properties are there as we are not preserving
     that in full.  */
  if (cfun->curr_properties & PROP_loops)
    {
      loops_state_clear (LOOP_CLOSED_SSA
			 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS
			 | LOOPS_HAVE_PREHEADERS
			 | LOOPS_HAVE_SIMPLE_LATCHES
			 | LOOPS_HAVE_FALLTHRU_PREHEADERS);
      loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
      goto loop_fini_done;
    }

  gcc_assert (current_loops != NULL);

  FOR_EACH_LOOP (loop, 0)
    free_simple_loop_desc (loop);

  /* Clean up.  */
  flow_loops_free (current_loops);
  ggc_free (current_loops);
  current_loops = NULL;

  FOR_ALL_BB_FN (bb, cfun)
    {
      bb->loop_father = NULL;
    }

loop_fini_done:
  timevar_pop (TV_LOOP_FINI);
}
Example #4
0
static void
apply_loop_flags (unsigned flags)
{
  if (flags & LOOPS_MAY_HAVE_MULTIPLE_LATCHES)
    {
      /* If the loops may have multiple latches, we cannot canonicalize
	 them further (and most of the loop manipulation functions will
	 not work).  However, we avoid modifying cfg, which some
	 passes may want.  */
      gcc_assert ((flags & ~(LOOPS_MAY_HAVE_MULTIPLE_LATCHES
			     | LOOPS_HAVE_RECORDED_EXITS)) == 0);
      loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
    }
  else
    disambiguate_loops_with_multiple_latches ();

  /* Create pre-headers.  */
  if (flags & LOOPS_HAVE_PREHEADERS)
    {
      int cp_flags = CP_SIMPLE_PREHEADERS;

      if (flags & LOOPS_HAVE_FALLTHRU_PREHEADERS)
        cp_flags |= CP_FALLTHRU_PREHEADERS;

      create_preheaders (cp_flags);
    }

  /* Force all latches to have only single successor.  */
  if (flags & LOOPS_HAVE_SIMPLE_LATCHES)
    force_single_succ_latches ();

  /* Mark irreducible loops.  */
  if (flags & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
    mark_irreducible_loops ();

  if (flags & LOOPS_HAVE_RECORDED_EXITS)
    record_loop_exits ();
}
Example #5
0
unsigned int
execute_fixup_cfg (void)
{
  basic_block bb;
  gimple_stmt_iterator gsi;
  int todo = gimple_in_ssa_p (cfun) ? TODO_verify_ssa : 0;
  gcov_type count_scale;
  edge e;
  edge_iterator ei;

  count_scale
      = GCOV_COMPUTE_SCALE (cgraph_get_node (current_function_decl)->count,
                            ENTRY_BLOCK_PTR->count);

  ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count;
  EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count,
                                       count_scale);

  FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
    e->count = apply_scale (e->count, count_scale);

  FOR_EACH_BB (bb)
    {
      bb->count = apply_scale (bb->count, count_scale);
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt = gsi_stmt (gsi);
	  tree decl = is_gimple_call (stmt)
		      ? gimple_call_fndecl (stmt)
		      : NULL;
	  if (decl)
	    {
	      int flags = gimple_call_flags (stmt);
	      if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
		{
		  if (gimple_purge_dead_abnormal_call_edges (bb))
		    todo |= TODO_cleanup_cfg;

		  if (gimple_in_ssa_p (cfun))
		    {
		      todo |= TODO_update_ssa | TODO_cleanup_cfg;
		      update_stmt (stmt);
		    }
		}

	      if (flags & ECF_NORETURN
		  && fixup_noreturn_call (stmt))
		todo |= TODO_cleanup_cfg;
	     }

	  if (maybe_clean_eh_stmt (stmt)
	      && gimple_purge_dead_eh_edges (bb))
	    todo |= TODO_cleanup_cfg;
	}

      FOR_EACH_EDGE (e, ei, bb->succs)
        e->count = apply_scale (e->count, count_scale);

      /* If we have a basic block with no successors that does not
	 end with a control statement or a noreturn call end it with
	 a call to __builtin_unreachable.  This situation can occur
	 when inlining a noreturn call that does in fact return.  */
      if (EDGE_COUNT (bb->succs) == 0)
	{
	  gimple stmt = last_stmt (bb);
	  if (!stmt
	      || (!is_ctrl_stmt (stmt)
		  && (!is_gimple_call (stmt)
		      || (gimple_call_flags (stmt) & ECF_NORETURN) == 0)))
	    {
	      stmt = gimple_build_call
		  (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
	      gimple_stmt_iterator gsi = gsi_last_bb (bb);
	      gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
	    }
	}
    }
  if (count_scale != REG_BR_PROB_BASE)
    compute_function_frequency ();

  /* We just processed all calls.  */
  if (cfun->gimple_df)
    vec_free (MODIFIED_NORETURN_CALLS (cfun));

  /* Dump a textual representation of the flowgraph.  */
  if (dump_file)
    gimple_dump_cfg (dump_file, dump_flags);

  if (current_loops
      && (todo & TODO_cleanup_cfg))
    loops_state_set (LOOPS_NEED_FIXUP);

  return todo;
}
Example #6
0
void
ubsan_expand_null_ifn (gimple_stmt_iterator gsi)
{
  gimple stmt = gsi_stmt (gsi);
  location_t loc = gimple_location (stmt);
  gcc_assert (gimple_call_num_args (stmt) == 2);
  tree ptr = gimple_call_arg (stmt, 0);
  tree ckind = gimple_call_arg (stmt, 1);

  basic_block cur_bb = gsi_bb (gsi);

  /* Split the original block holding the pointer dereference.  */
  edge e = split_block (cur_bb, stmt);

  /* Get a hold on the 'condition block', the 'then block' and the
     'else block'.  */
  basic_block cond_bb = e->src;
  basic_block fallthru_bb = e->dest;
  basic_block then_bb = create_empty_bb (cond_bb);
  add_bb_to_loop (then_bb, cond_bb->loop_father);
  loops_state_set (LOOPS_NEED_FIXUP);

  /* Make an edge coming from the 'cond block' into the 'then block';
     this edge is unlikely taken, so set up the probability accordingly.  */
  e = make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
  e->probability = PROB_VERY_UNLIKELY;

  /* Connect 'then block' with the 'else block'.  This is needed
     as the ubsan routines we call in the 'then block' are not noreturn.
     The 'then block' only has one outcoming edge.  */
  make_single_succ_edge (then_bb, fallthru_bb, EDGE_FALLTHRU);

  /* Set up the fallthrough basic block.  */
  e = find_edge (cond_bb, fallthru_bb);
  e->flags = EDGE_FALSE_VALUE;
  e->count = cond_bb->count;
  e->probability = REG_BR_PROB_BASE - PROB_VERY_UNLIKELY;

  /* Update dominance info for the newly created then_bb; note that
     fallthru_bb's dominance info has already been updated by
     split_bock.  */
  if (dom_info_available_p (CDI_DOMINATORS))
    set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);

  /* Put the ubsan builtin call into the newly created BB.  */
  gimple g;
  if (flag_sanitize_undefined_trap_on_error)
    g = gimple_build_call (builtin_decl_implicit (BUILT_IN_TRAP), 0);
  else
    {
      enum built_in_function bcode
	= flag_sanitize_recover
	  ? BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH
	  : BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT;
      tree fn = builtin_decl_implicit (bcode);
      const struct ubsan_mismatch_data m
	= { build_zero_cst (pointer_sized_int_node), ckind };
      tree data
	= ubsan_create_data ("__ubsan_null_data", &loc, &m,
			     ubsan_type_descriptor (TREE_TYPE (ptr),
						    UBSAN_PRINT_POINTER),
			     NULL_TREE);
      data = build_fold_addr_expr_loc (loc, data);
      g = gimple_build_call (fn, 2, data,
			     build_zero_cst (pointer_sized_int_node));
    }
  gimple_set_location (g, loc);
  gimple_stmt_iterator gsi2 = gsi_start_bb (then_bb);
  gsi_insert_after (&gsi2, g, GSI_NEW_STMT);

  /* Unlink the UBSAN_NULLs vops before replacing it.  */
  unlink_stmt_vdef (stmt);

  g = gimple_build_cond (EQ_EXPR, ptr, build_int_cst (TREE_TYPE (ptr), 0),
			 NULL_TREE, NULL_TREE);
  gimple_set_location (g, loc);

  /* Replace the UBSAN_NULL with a GIMPLE_COND stmt.  */
  gsi_replace (&gsi, g, false);
}
Example #7
0
static bool
split_paths ()
{
    bool changed = false;
    loop_p loop;

    loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
    initialize_original_copy_tables ();
    calculate_dominance_info (CDI_DOMINATORS);

    FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
    {
        /* Only split paths if we are optimizing this loop for speed.  */
        if (!optimize_loop_for_speed_p (loop))
            continue;

        /* See if there is a block that we can duplicate to split the
        path to the loop latch.  */
        basic_block bb
            = find_block_to_duplicate_for_splitting_paths (loop->latch);

        /* BB is the merge point for an IF-THEN-ELSE we want to transform.

        Essentially we want to create a duplicate of bb and redirect the
         first predecessor of BB to the duplicate (leaving the second
         predecessor as is.  This will split the path leading to the latch
         re-using BB to avoid useless copying.  */
        if (bb && is_feasible_trace (bb))
        {
            if (dump_file && (dump_flags & TDF_DETAILS))
                fprintf (dump_file,
                         "Duplicating join block %d into predecessor paths\n",
                         bb->index);
            basic_block pred0 = EDGE_PRED (bb, 0)->src;
            transform_duplicate (pred0, bb);
            changed = true;

            /* If BB has an outgoing edge marked as IRREDUCIBLE, then
               duplicating BB may result in an irreducible region turning
               into a natural loop.

               Long term we might want to hook this into the block
               duplication code, but as we've seen with similar changes
               for edge removal, that can be somewhat risky.  */
            if (EDGE_SUCC (bb, 0)->flags & EDGE_IRREDUCIBLE_LOOP
                    || EDGE_SUCC (bb, 1)->flags & EDGE_IRREDUCIBLE_LOOP)
            {
                if (dump_file && (dump_flags & TDF_DETAILS))
                    fprintf (dump_file,
                             "Join block %d has EDGE_IRREDUCIBLE_LOOP set.  "
                             "Scheduling loop fixups.\n",
                             bb->index);
                loops_state_set (LOOPS_NEED_FIXUP);
            }
        }
    }

    loop_optimizer_finalize ();
    free_original_copy_tables ();
    return changed;
}