Exemple #1
0
static unsigned int
copyprop_hardreg_forward (void)
{
  struct value_data *all_vd;
  basic_block bb;
  sbitmap visited;
  bool analyze_called = false;

  all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (cfun));

  visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
  bitmap_clear (visited);

  if (MAY_HAVE_DEBUG_INSNS)
    debug_insn_changes_pool
      = create_alloc_pool ("debug insn changes pool",
			   sizeof (struct queued_debug_insn_change), 256);

  FOR_EACH_BB_FN (bb, cfun)
    {
      bitmap_set_bit (visited, bb->index);

      /* If a block has a single predecessor, that we've already
	 processed, begin with the value data that was live at
	 the end of the predecessor block.  */
      /* ??? Ought to use more intelligent queuing of blocks.  */
      if (single_pred_p (bb)
	  && bitmap_bit_p (visited, single_pred (bb)->index)
	  && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
	{
	  all_vd[bb->index] = all_vd[single_pred (bb)->index];
	  if (all_vd[bb->index].n_debug_insn_changes)
	    {
	      unsigned int regno;

	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
		{
		  if (all_vd[bb->index].e[regno].debug_insn_changes)
		    {
		      all_vd[bb->index].e[regno].debug_insn_changes = NULL;
		      if (--all_vd[bb->index].n_debug_insn_changes == 0)
			break;
		    }
		}
	    }
	}
      else
	init_value_data (all_vd + bb->index);

      copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
    }
basic_block *
blocks_in_phiopt_order (void)
{
    basic_block x, y;
    basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
    unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
    unsigned np, i;
    sbitmap visited = sbitmap_alloc (last_basic_block);

#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))

    sbitmap_zero (visited);

    MARK_VISITED (ENTRY_BLOCK_PTR);
    FOR_EACH_BB (x)
    {
        if (VISITED_P (x))
            continue;

        /* Walk the predecessors of x as long as they have precisely one
        predecessor and add them to the list, so that they get stored
         after x.  */
        for (y = x, np = 1;
                single_pred_p (y) && !VISITED_P (single_pred (y));
                y = single_pred (y))
            np++;
        for (y = x, i = n - np;
                single_pred_p (y) && !VISITED_P (single_pred (y));
                y = single_pred (y), i++)
        {
            order[i] = y;
            MARK_VISITED (y);
        }
        order[i] = y;
        MARK_VISITED (y);

        gcc_assert (i == n - 1);
        n -= np;
    }

    sbitmap_free (visited);
    gcc_assert (n == 0);
    return order;

#undef MARK_VISITED
#undef VISITED_P
}
Exemple #3
0
/* Check if the given DEF is available in INSN.  This would require full
   computation of available expressions; we check only restricted conditions:
   - if DEF is the sole definition of its register, go ahead;
   - in the same basic block, we check for no definitions killing the
     definition of DEF_INSN;
   - if USE's basic block has DEF's basic block as the sole predecessor,
     we check if the definition is killed after DEF_INSN or before
     TARGET_INSN insn, in their respective basic blocks.  */
static bool
use_killed_between (df_ref use, rtx_insn *def_insn, rtx_insn *target_insn)
{
  basic_block def_bb = BLOCK_FOR_INSN (def_insn);
  basic_block target_bb = BLOCK_FOR_INSN (target_insn);
  int regno;
  df_ref def;

  /* We used to have a def reaching a use that is _before_ the def,
     with the def not dominating the use even though the use and def
     are in the same basic block, when a register may be used
     uninitialized in a loop.  This should not happen anymore since
     we do not use reaching definitions, but still we test for such
     cases and assume that DEF is not available.  */
  if (def_bb == target_bb
      ? DF_INSN_LUID (def_insn) >= DF_INSN_LUID (target_insn)
      : !dominated_by_p (CDI_DOMINATORS, target_bb, def_bb))
    return true;

  /* Check if the reg in USE has only one definition.  We already
     know that this definition reaches use, or we wouldn't be here.
     However, this is invalid for hard registers because if they are
     live at the beginning of the function it does not mean that we
     have an uninitialized access.  */
  regno = DF_REF_REGNO (use);
  def = DF_REG_DEF_CHAIN (regno);
  if (def
      && DF_REF_NEXT_REG (def) == NULL
      && regno >= FIRST_PSEUDO_REGISTER)
    return false;

  /* Check locally if we are in the same basic block.  */
  if (def_bb == target_bb)
    return local_ref_killed_between_p (use, def_insn, target_insn);

  /* Finally, if DEF_BB is the sole predecessor of TARGET_BB.  */
  if (single_pred_p (target_bb)
      && single_pred (target_bb) == def_bb)
    {
      df_ref x;

      /* See if USE is killed between DEF_INSN and the last insn in the
	 basic block containing DEF_INSN.  */
      x = df_bb_regno_last_def_find (def_bb, regno);
      if (x && DF_INSN_LUID (DF_REF_INSN (x)) >= DF_INSN_LUID (def_insn))
	return true;

      /* See if USE is killed between TARGET_INSN and the first insn in the
	 basic block containing TARGET_INSN.  */
      x = df_bb_regno_first_def_find (target_bb, regno);
      if (x && DF_INSN_LUID (DF_REF_INSN (x)) < DF_INSN_LUID (target_insn))
	return true;

      return false;
    }

  /* Otherwise assume the worst case.  */
  return true;
}
static bool
tree_ssa_ifcombine_bb (basic_block inner_cond_bb)
{
  basic_block then_bb = NULL, else_bb = NULL;

  if (!recognize_if_then_else (inner_cond_bb, &then_bb, &else_bb))
    return false;

  /* Recognize && and || of two conditions with a common
     then/else block which entry edges we can merge.  That is:
       if (a || b)
	 ;
     and
       if (a && b)
	 ;
     This requires a single predecessor of the inner cond_bb.  */
  if (single_pred_p (inner_cond_bb)
      && bb_no_side_effects_p (inner_cond_bb))
    {
      basic_block outer_cond_bb = single_pred (inner_cond_bb);

      if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb,
				   then_bb, else_bb, inner_cond_bb))
	return true;

      if (forwarder_block_to (else_bb, then_bb))
	{
	  /* Other possibilities for the && form, if else_bb is
	     empty forwarder block to then_bb.  Compared to the above simpler
	     forms this can be treated as if then_bb and else_bb were swapped,
	     and the corresponding inner_cond_bb not inverted because of that.
	     For same_phi_args_p we look at equality of arguments between
	     edge from outer_cond_bb and the forwarder block.  */
	  if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
				       then_bb, else_bb))
	    return true;
	}
      else if (forwarder_block_to (then_bb, else_bb))
	{
	  /* Other possibilities for the || form, if then_bb is
	     empty forwarder block to else_bb.  Compared to the above simpler
	     forms this can be treated as if then_bb and else_bb were swapped,
	     and the corresponding inner_cond_bb not inverted because of that.
	     For same_phi_args_p we look at equality of arguments between
	     edge from outer_cond_bb and the forwarder block.  */
	  if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, else_bb,
				       then_bb, then_bb))
	    return true;
	}
    }

  return false;
}
static bool
check_final_bb (void)
{
  gimple_stmt_iterator gsi;

  info.phi_count = 0;
  for (gsi = gsi_start_phis (info.final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
    {
      gimple phi = gsi_stmt (gsi);
      unsigned int i;

      info.phi_count++;

      for (i = 0; i < gimple_phi_num_args (phi); i++)
	{
	  basic_block bb = gimple_phi_arg_edge (phi, i)->src;

	  if (bb == info.switch_bb
	      || (single_pred_p (bb) && single_pred (bb) == info.switch_bb))
	    {
	      tree reloc, val;

	      val = gimple_phi_arg_def (phi, i);
	      if (!is_gimple_ip_invariant (val))
		{
		  info.reason = "   Non-invariant value from a case\n";
		  return false; /* Non-invariant argument.  */
		}
	      reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
	      if ((flag_pic && reloc != null_pointer_node)
		  || (!flag_pic && reloc == NULL_TREE))
		{
		  if (reloc)
		    info.reason
		      = "   Value from a case would need runtime relocations\n";
		  else
		    info.reason
		      = "   Value from a case is not a valid initializer\n";
		  return false;
		}
	    }
	}
    }

  return true;
}
Exemple #6
0
static void
set_location_for_edge (edge e)
{
  if (e->goto_locus)
    {
      set_curr_insn_source_location (e->goto_locus);
      set_curr_insn_block (e->goto_block);
    }
  else
    {
      basic_block bb = e->src;
      gimple_stmt_iterator gsi;

      do
	{
	  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_debug (stmt))
		continue;
	      if (gimple_has_location (stmt) || gimple_block (stmt))
		{
		  set_curr_insn_source_location (gimple_location (stmt));
		  set_curr_insn_block (gimple_block (stmt));
		  return;
		}
	    }
	  /* Nothing found in this basic block.  Make a half-assed attempt
	     to continue with another block.  */
	  if (single_pred_p (bb))
	    bb = single_pred (bb);
	  else
	    bb = e->src;
	}
      while (bb != e->src);
    }
}
Exemple #7
0
void
find_comparison_dom_walker::before_dom_children (basic_block bb)
{
  struct comparison *last_cmp;
  rtx_insn *insn, *next, *last_clobber;
  bool last_cmp_valid;
  bool need_purge = false;
  bitmap killed;

  killed = BITMAP_ALLOC (NULL);

  /* The last comparison that was made.  Will be reset to NULL
     once the flags are clobbered.  */
  last_cmp = NULL;

  /* True iff the last comparison has not been clobbered, nor
     have its inputs.  Used to eliminate duplicate compares.  */
  last_cmp_valid = false;

  /* The last insn that clobbered the flags, if that insn is of
     a form that may be valid for eliminating a following compare.
     To be reset to NULL once the flags are set otherwise.  */
  last_clobber = NULL;

  /* Propagate the last live comparison throughout the extended basic block. */
  if (single_pred_p (bb))
    {
      last_cmp = (struct comparison *) single_pred (bb)->aux;
      if (last_cmp)
	last_cmp_valid = last_cmp->inputs_valid;
    }

  for (insn = BB_HEAD (bb); insn; insn = next)
    {
      rtx src;

      next = (insn == BB_END (bb) ? NULL : NEXT_INSN (insn));
      if (!NONDEBUG_INSN_P (insn))
	continue;

      /* Compute the set of registers modified by this instruction.  */
      bitmap_clear (killed);
      df_simulate_find_defs (insn, killed);

      src = conforming_compare (insn);
      if (src)
	{
	  rtx eh_note = NULL;

	  if (cfun->can_throw_non_call_exceptions)
	    eh_note = find_reg_note (insn, REG_EH_REGION, NULL);

	  if (last_cmp_valid && can_eliminate_compare (src, eh_note, last_cmp))
	    {
	      if (eh_note)
		need_purge = true;
	      delete_insn (insn);
	      continue;
	    }

	  last_cmp = XCNEW (struct comparison);
	  last_cmp->insn = insn;
	  last_cmp->prev_clobber = last_clobber;
	  last_cmp->in_a = XEXP (src, 0);
	  last_cmp->in_b = XEXP (src, 1);
	  last_cmp->eh_note = eh_note;
	  last_cmp->orig_mode = GET_MODE (src);
	  all_compares.safe_push (last_cmp);

	  /* It's unusual, but be prepared for comparison patterns that
	     also clobber an input, or perhaps a scratch.  */
	  last_clobber = NULL;
	  last_cmp_valid = true;
	}

      /* Notice if this instruction kills the flags register.  */
      else if (bitmap_bit_p (killed, targetm.flags_regnum))
	{
	  /* See if this insn could be the "clobber" that eliminates
	     a future comparison.   */
	  last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL);

	  /* In either case, the previous compare is no longer valid.  */
	  last_cmp = NULL;
	  last_cmp_valid = false;
	}

      /* Notice if this instruction uses the flags register.  */
      else if (last_cmp)
	find_flags_uses_in_insn (last_cmp, insn);

      /* Notice if any of the inputs to the comparison have changed.  */
      if (last_cmp_valid
	  && (bitmap_bit_p (killed, REGNO (last_cmp->in_a))
	      || (REG_P (last_cmp->in_b)
		  && bitmap_bit_p (killed, REGNO (last_cmp->in_b)))))
	last_cmp_valid = false;
    }
static bool
tree_ssa_ifcombine_bb (basic_block inner_cond_bb)
{
  basic_block then_bb = NULL, else_bb = NULL;

  if (!recognize_if_then_else (inner_cond_bb, &then_bb, &else_bb))
    return false;

  /* Recognize && and || of two conditions with a common
     then/else block which entry edges we can merge.  That is:
       if (a || b)
	 ;
     and
       if (a && b)
	 ;
     This requires a single predecessor of the inner cond_bb.  */
  if (single_pred_p (inner_cond_bb))
    {
      basic_block outer_cond_bb = single_pred (inner_cond_bb);

      /* The && form is characterized by a common else_bb with
	 the two edges leading to it mergable.  The latter is
	 guaranteed by matching PHI arguments in the else_bb and
	 the inner cond_bb having no side-effects.  */
      if (recognize_if_then_else (outer_cond_bb, &inner_cond_bb, &else_bb)
	  && same_phi_args_p (outer_cond_bb, inner_cond_bb, else_bb)
	  && bb_no_side_effects_p (inner_cond_bb))
	{
	  /* We have
	       <outer_cond_bb>
		 if (q) goto inner_cond_bb; else goto else_bb;
	       <inner_cond_bb>
		 if (p) goto ...; else goto else_bb;
		 ...
	       <else_bb>
		 ...
	   */
	  return ifcombine_ifandif (inner_cond_bb, outer_cond_bb);
	}

      /* The || form is characterized by a common then_bb with the
	 two edges leading to it mergable.  The latter is guaranteed
         by matching PHI arguments in the then_bb and the inner cond_bb
	 having no side-effects.  */
      if (recognize_if_then_else (outer_cond_bb, &then_bb, &inner_cond_bb)
	  && same_phi_args_p (outer_cond_bb, inner_cond_bb, then_bb)
	  && bb_no_side_effects_p (inner_cond_bb))
	{
	  /* We have
	       <outer_cond_bb>
		 if (q) goto then_bb; else goto inner_cond_bb;
	       <inner_cond_bb>
		 if (q) goto then_bb; else goto ...;
	       <then_bb>
		 ...
	   */
	  return ifcombine_iforif (inner_cond_bb, outer_cond_bb);
	}
    }

  return false;
}
Exemple #9
0
void
find_comparison_dom_walker::before_dom_children (basic_block bb)
{
  struct comparison *last_cmp;
  rtx insn, next, last_clobber;
  bool last_cmp_valid;
  bool need_purge = false;
  bitmap killed;

  killed = BITMAP_ALLOC (NULL);

  /* The last comparison that was made.  Will be reset to NULL
     once the flags are clobbered.  */
  last_cmp = NULL;

  /* True iff the last comparison has not been clobbered, nor
     have its inputs.  Used to eliminate duplicate compares.  */
  last_cmp_valid = false;

  /* The last insn that clobbered the flags, if that insn is of
     a form that may be valid for eliminating a following compare.
     To be reset to NULL once the flags are set otherwise.  */
  last_clobber = NULL;

  /* Propagate the last live comparison throughout the extended basic block. */
  if (single_pred_p (bb))
    {
      last_cmp = (struct comparison *) single_pred (bb)->aux;
      if (last_cmp)
	last_cmp_valid = last_cmp->inputs_valid;
    }

  for (insn = BB_HEAD (bb); insn; insn = next)
    {
      rtx src;

      next = (insn == BB_END (bb) ? NULL_RTX : NEXT_INSN (insn));
      if (!NONDEBUG_INSN_P (insn))
	continue;

      /* Compute the set of registers modified by this instruction.  */
      bitmap_clear (killed);
      df_simulate_find_defs (insn, killed);

      src = conforming_compare (insn);
      if (src)
	{
	  enum machine_mode src_mode = GET_MODE (src);
	  rtx eh_note = NULL;

	  if (flag_non_call_exceptions)
	    eh_note = find_reg_note (insn, REG_EH_REGION, NULL);

	  if (!last_cmp_valid)
	    goto dont_delete;

	  /* Take care that it's in the same EH region.  */
	  if (flag_non_call_exceptions
	      && !rtx_equal_p (eh_note, last_cmp->eh_note))
	    goto dont_delete;

	  /* Make sure the compare is redundant with the previous.  */
	  if (!rtx_equal_p (last_cmp->in_a, XEXP (src, 0))
	      || !rtx_equal_p (last_cmp->in_b, XEXP (src, 1)))
	    goto dont_delete;

	  /* New mode must be compatible with the previous compare mode.  */
	  {
	    enum machine_mode new_mode
	      = targetm.cc_modes_compatible (last_cmp->orig_mode, src_mode);
	    if (new_mode == VOIDmode)
	      goto dont_delete;

	    if (new_mode != last_cmp->orig_mode)
	      {
		rtx x, flags = gen_rtx_REG (src_mode, targetm.flags_regnum);

		/* Generate new comparison for substitution.  */
		x = gen_rtx_COMPARE (new_mode, XEXP (src, 0), XEXP (src, 1));
		x = gen_rtx_SET (VOIDmode, flags, x);

		if (!validate_change (last_cmp->insn,
				      &PATTERN (last_cmp->insn), x, false))
		  goto dont_delete;

		last_cmp->orig_mode = new_mode;
	      }
	  }

	  /* All tests and substitutions succeeded!  */
	  if (eh_note)
	    need_purge = true;
	  delete_insn (insn);
	  continue;

	dont_delete:
	  last_cmp = XCNEW (struct comparison);
	  last_cmp->insn = insn;
	  last_cmp->prev_clobber = last_clobber;
	  last_cmp->in_a = XEXP (src, 0);
	  last_cmp->in_b = XEXP (src, 1);
	  last_cmp->eh_note = eh_note;
	  last_cmp->orig_mode = src_mode;
	  all_compares.safe_push (last_cmp);

	  /* It's unusual, but be prepared for comparison patterns that
	     also clobber an input, or perhaps a scratch.  */
	  last_clobber = NULL;
	  last_cmp_valid = true;
	}

      /* Notice if this instruction kills the flags register.  */
      else if (bitmap_bit_p (killed, targetm.flags_regnum))
	{
	  /* See if this insn could be the "clobber" that eliminates
	     a future comparison.   */
	  last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL);

	  /* In either case, the previous compare is no longer valid.  */
	  last_cmp = NULL;
	  last_cmp_valid = false;
	  continue;
	}

      /* Notice if this instruction uses the flags register.  */
      else if (last_cmp)
	find_flags_uses_in_insn (last_cmp, insn);

      /* Notice if any of the inputs to the comparison have changed.  */
      if (last_cmp_valid
	  && (bitmap_bit_p (killed, REGNO (last_cmp->in_a))
	      || (REG_P (last_cmp->in_b)
		  && bitmap_bit_p (killed, REGNO (last_cmp->in_b)))))
	last_cmp_valid = false;
    }
/* The core routine of conditional store replacement and normal
   phi optimizations.  Both share much of the infrastructure in how
   to match applicable basic block patterns.  DO_STORE_ELIM is true
   when we want to do conditional store replacement, false otherwise.  */
static unsigned int
tree_ssa_phiopt_worker (bool do_store_elim)
{
    basic_block bb;
    basic_block *bb_order;
    unsigned n, i;
    bool cfgchanged = false;
    struct pointer_set_t *nontrap = 0;

    if (do_store_elim)
    {
        condstoretemp = NULL_TREE;
        /* Calculate the set of non-trapping memory accesses.  */
        nontrap = get_non_trapping ();
    }

    /* Search every basic block for COND_EXPR we may be able to optimize.

       We walk the blocks in order that guarantees that a block with
       a single predecessor is processed before the predecessor.
       This ensures that we collapse inner ifs before visiting the
       outer ones, and also that we do not try to visit a removed
       block.  */
    bb_order = blocks_in_phiopt_order ();
    n = n_basic_blocks - NUM_FIXED_BLOCKS;

    for (i = 0; i < n; i++)
    {
        gimple cond_stmt, phi;
        basic_block bb1, bb2;
        edge e1, e2;
        tree arg0, arg1;

        bb = bb_order[i];

        cond_stmt = last_stmt (bb);
        /* Check to see if the last statement is a GIMPLE_COND.  */
        if (!cond_stmt
                || gimple_code (cond_stmt) != GIMPLE_COND)
            continue;

        e1 = EDGE_SUCC (bb, 0);
        bb1 = e1->dest;
        e2 = EDGE_SUCC (bb, 1);
        bb2 = e2->dest;

        /* We cannot do the optimization on abnormal edges.  */
        if ((e1->flags & EDGE_ABNORMAL) != 0
                || (e2->flags & EDGE_ABNORMAL) != 0)
            continue;

        /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
        if (EDGE_COUNT (bb1->succs) == 0
                || bb2 == NULL
                || EDGE_COUNT (bb2->succs) == 0)
            continue;

        /* Find the bb which is the fall through to the other.  */
        if (EDGE_SUCC (bb1, 0)->dest == bb2)
            ;
        else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
            basic_block bb_tmp = bb1;
            edge e_tmp = e1;
            bb1 = bb2;
            bb2 = bb_tmp;
            e1 = e2;
            e2 = e_tmp;
        }
        else
            continue;

        e1 = EDGE_SUCC (bb1, 0);

        /* Make sure that bb1 is just a fall through.  */
        if (!single_succ_p (bb1)
                || (e1->flags & EDGE_FALLTHRU) == 0)
            continue;

        /* Also make sure that bb1 only have one predecessor and that it
        is bb.  */
        if (!single_pred_p (bb1)
                || single_pred (bb1) != bb)
            continue;

        if (do_store_elim)
        {
            /* bb1 is the middle block, bb2 the join block, bb the split block,
               e1 the fallthrough edge from bb1 to bb2.  We can't do the
               optimization if the join block has more than two predecessors.  */
            if (EDGE_COUNT (bb2->preds) > 2)
                continue;
            if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
                cfgchanged = true;
        }
        else
        {
            gimple_seq phis = phi_nodes (bb2);

            /* Check to make sure that there is only one PHI node.
               TODO: we could do it with more than one iff the other PHI nodes
               have the same elements for these two edges.  */
            if (! gimple_seq_singleton_p (phis))
                continue;

            phi = gsi_stmt (gsi_start (phis));
            arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
            arg1 = gimple_phi_arg_def (phi, e2->dest_idx);

            /* Something is wrong if we cannot find the arguments in the PHI
               node.  */
            gcc_assert (arg0 != NULL && arg1 != NULL);

            /* Do the replacement of conditional if it can be done.  */
            if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
            else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
                cfgchanged = true;
        }
    }

    free (bb_order);

    if (do_store_elim)
        pointer_set_destroy (nontrap);
    /* If the CFG has changed, we should cleanup the CFG.  */
    if (cfgchanged && do_store_elim)
    {
        /* In cond-store replacement we have added some loads on edges
           and new VOPS (as we moved the store, and created a load).  */
        gsi_commit_edge_inserts ();
        return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
    }
    else if (cfgchanged)
        return TODO_cleanup_cfg;
    return 0;
}
Exemple #11
0
static void
fixup_reorder_chain (void)
{
  basic_block bb, prev_bb;
  int index;
  rtx insn = NULL;

  if (cfg_layout_function_header)
    {
      set_first_insn (cfg_layout_function_header);
      insn = cfg_layout_function_header;
      while (NEXT_INSN (insn))
	insn = NEXT_INSN (insn);
    }

  /* First do the bulk reordering -- rechain the blocks without regard to
     the needed changes to jumps and labels.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
       bb != 0;
       bb = bb->aux, index++)
    {
      if (bb->il.rtl->header)
	{
	  if (insn)
	    NEXT_INSN (insn) = bb->il.rtl->header;
	  else
	    set_first_insn (bb->il.rtl->header);
	  PREV_INSN (bb->il.rtl->header) = insn;
	  insn = bb->il.rtl->header;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
      if (insn)
	NEXT_INSN (insn) = BB_HEAD (bb);
      else
	set_first_insn (BB_HEAD (bb));
      PREV_INSN (BB_HEAD (bb)) = insn;
      insn = BB_END (bb);
      if (bb->il.rtl->footer)
	{
	  NEXT_INSN (insn) = bb->il.rtl->footer;
	  PREV_INSN (bb->il.rtl->footer) = insn;
	  while (NEXT_INSN (insn))
	    insn = NEXT_INSN (insn);
	}
    }

  gcc_assert (index == n_basic_blocks);

  NEXT_INSN (insn) = cfg_layout_function_footer;
  if (cfg_layout_function_footer)
    PREV_INSN (cfg_layout_function_footer) = insn;

  while (NEXT_INSN (insn))
    insn = NEXT_INSN (insn);

  set_last_insn (insn);
#ifdef ENABLE_CHECKING
  verify_insn_chain ();
#endif
  delete_dead_jumptables ();

  /* Now add jumps and labels as needed to match the blocks new
     outgoing edges.  */

  for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->aux)
    {
      edge e_fall, e_taken, e;
      rtx bb_end_insn;
      basic_block nb;
      edge_iterator ei;

      if (EDGE_COUNT (bb->succs) == 0)
	continue;

      /* Find the old fallthru edge, and another non-EH edge for
	 a taken jump.  */
      e_taken = e_fall = NULL;

      FOR_EACH_EDGE (e, ei, bb->succs)
	if (e->flags & EDGE_FALLTHRU)
	  e_fall = e;
	else if (! (e->flags & EDGE_EH))
	  e_taken = e;

      bb_end_insn = BB_END (bb);
      if (JUMP_P (bb_end_insn))
	{
	  if (any_condjump_p (bb_end_insn))
	    {
	      /* If the old fallthru is still next, nothing to do.  */
	      if (bb->aux == e_fall->dest
		  || e_fall->dest == EXIT_BLOCK_PTR)
		continue;

	      /* The degenerated case of conditional jump jumping to the next
		 instruction can happen for jumps with side effects.  We need
		 to construct a forwarder block and this will be done just
		 fine by force_nonfallthru below.  */
	      if (!e_taken)
		;

	      /* There is another special case: if *neither* block is next,
		 such as happens at the very end of a function, then we'll
		 need to add a new unconditional jump.  Choose the taken
		 edge based on known or assumed probability.  */
	      else if (bb->aux != e_taken->dest)
		{
		  rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);

		  if (note
		      && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2
		      && invert_jump (bb_end_insn,
				      (e_fall->dest == EXIT_BLOCK_PTR
				       ? NULL_RTX
				       : label_for_bb (e_fall->dest)), 0))
		    {
		      e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
		      gcc_assert (could_fall_through
				  (e_taken->src, e_taken->dest));
#endif
		      e_taken->flags |= EDGE_FALLTHRU;
		      update_br_prob_note (bb);
		      e = e_fall, e_fall = e_taken, e_taken = e;
		    }
		}

	      /* If the "jumping" edge is a crossing edge, and the fall
		 through edge is non-crossing, leave things as they are.  */
	      else if ((e_taken->flags & EDGE_CROSSING)
		       && !(e_fall->flags & EDGE_CROSSING))
		continue;

	      /* Otherwise we can try to invert the jump.  This will
		 basically never fail, however, keep up the pretense.  */
	      else if (invert_jump (bb_end_insn,
				    (e_fall->dest == EXIT_BLOCK_PTR
				     ? NULL_RTX
				     : label_for_bb (e_fall->dest)), 0))
		{
		  e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
		  gcc_assert (could_fall_through
			      (e_taken->src, e_taken->dest));
#endif
		  e_taken->flags |= EDGE_FALLTHRU;
		  update_br_prob_note (bb);
		  continue;
		}
	    }
	  else
	    {
	      /* Otherwise we have some return, switch or computed
		 jump.  In the 99% case, there should not have been a
		 fallthru edge.  */
	      gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
	      continue;
	    }
	}
      else
	{
	  /* No fallthru implies a noreturn function with EH edges, or
	     something similarly bizarre.  In any case, we don't need to
	     do anything.  */
	  if (! e_fall)
	    continue;

	  /* If the fallthru block is still next, nothing to do.  */
	  if (bb->aux == e_fall->dest)
	    continue;

	  /* A fallthru to exit block.  */
	  if (e_fall->dest == EXIT_BLOCK_PTR)
	    continue;
	}

      /* We got here if we need to add a new jump insn.  */
      nb = force_nonfallthru (e_fall);
      if (nb)
	{
	  nb->il.rtl->visited = 1;
	  nb->aux = bb->aux;
	  bb->aux = nb;
	  /* Don't process this new block.  */
	  bb = nb;

	  /* Make sure new bb is tagged for correct section (same as
	     fall-thru source, since you cannot fall-throu across
	     section boundaries).  */
	  BB_COPY_PARTITION (e_fall->src, single_pred (bb));
	  if (flag_reorder_blocks_and_partition
	      && targetm.have_named_sections
	      && JUMP_P (BB_END (bb))
	      && !any_condjump_p (BB_END (bb))
	      && (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING))
	    REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
	      (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
	}
    }

  /* Put basic_block_info in the new order.  */

  if (dump_file)
    {
      fprintf (dump_file, "Reordered sequence:\n");
      for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
	   bb;
	   bb = bb->aux, index++)
	{
	  fprintf (dump_file, " %i ", index);
	  if (get_bb_original (bb))
	    fprintf (dump_file, "duplicate of %i ",
		     get_bb_original (bb)->index);
	  else if (forwarder_block_p (bb)
		   && !LABEL_P (BB_HEAD (bb)))
	    fprintf (dump_file, "compensation ");
	  else
	    fprintf (dump_file, "bb %i ", bb->index);
	  fprintf (dump_file, " [%i]\n", bb->frequency);
	}
    }

  prev_bb = ENTRY_BLOCK_PTR;
  bb = ENTRY_BLOCK_PTR->next_bb;
  index = NUM_FIXED_BLOCKS;

  for (; bb; prev_bb = bb, bb = bb->aux, index ++)
    {
      bb->index = index;
      SET_BASIC_BLOCK (index, bb);

      bb->prev_bb = prev_bb;
      prev_bb->next_bb = bb;
    }
  prev_bb->next_bb = EXIT_BLOCK_PTR;
  EXIT_BLOCK_PTR->prev_bb = prev_bb;

  /* Annoying special case - jump around dead jumptables left in the code.  */
  FOR_EACH_BB (bb)
    {
      edge e;
      edge_iterator ei;

      FOR_EACH_EDGE (e, ei, bb->succs)
	if (e->flags & EDGE_FALLTHRU)
	  break;

      if (e && !can_fallthru (e->src, e->dest))
	force_nonfallthru (e);
    }
}
Exemple #12
0
/* The core routine of conditional store replacement and normal
   phi optimizations.  Both share much of the infrastructure in how
   to match applicable basic block patterns.  DO_STORE_ELIM is true
   when we want to do conditional store replacement, false otherwise.
   DO_HOIST_LOADS is true when we want to hoist adjacent loads out
   of diamond control flow patterns, false otherwise.  */
static unsigned int
tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
{
  basic_block bb;
  basic_block *bb_order;
  unsigned n, i;
  bool cfgchanged = false;
  hash_set<tree> *nontrap = 0;

  if (do_store_elim)
    /* Calculate the set of non-trapping memory accesses.  */
    nontrap = get_non_trapping ();

  /* Search every basic block for COND_EXPR we may be able to optimize.

     We walk the blocks in order that guarantees that a block with
     a single predecessor is processed before the predecessor.
     This ensures that we collapse inner ifs before visiting the
     outer ones, and also that we do not try to visit a removed
     block.  */
  bb_order = single_pred_before_succ_order ();
  n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;

  for (i = 0; i < n; i++)
    {
      gimple cond_stmt;
      gphi *phi;
      basic_block bb1, bb2;
      edge e1, e2;
      tree arg0, arg1;

      bb = bb_order[i];

      cond_stmt = last_stmt (bb);
      /* Check to see if the last statement is a GIMPLE_COND.  */
      if (!cond_stmt
          || gimple_code (cond_stmt) != GIMPLE_COND)
        continue;

      e1 = EDGE_SUCC (bb, 0);
      bb1 = e1->dest;
      e2 = EDGE_SUCC (bb, 1);
      bb2 = e2->dest;

      /* We cannot do the optimization on abnormal edges.  */
      if ((e1->flags & EDGE_ABNORMAL) != 0
          || (e2->flags & EDGE_ABNORMAL) != 0)
       continue;

      /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
      if (EDGE_COUNT (bb1->succs) == 0
          || bb2 == NULL
	  || EDGE_COUNT (bb2->succs) == 0)
        continue;

      /* Find the bb which is the fall through to the other.  */
      if (EDGE_SUCC (bb1, 0)->dest == bb2)
        ;
      else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
	  std::swap (bb1, bb2);
	  std::swap (e1, e2);
	}
      else if (do_store_elim
	       && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
	{
	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;

	  if (!single_succ_p (bb1)
	      || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
	      || !single_succ_p (bb2)
	      || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
	      || EDGE_COUNT (bb3->preds) != 2)
	    continue;
	  if (cond_if_else_store_replacement (bb1, bb2, bb3))
	    cfgchanged = true;
	  continue;
	}
      else if (do_hoist_loads
		 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
	{
	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;

	  if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
	      && single_succ_p (bb1)
	      && single_succ_p (bb2)
	      && single_pred_p (bb1)
	      && single_pred_p (bb2)
	      && EDGE_COUNT (bb->succs) == 2
	      && EDGE_COUNT (bb3->preds) == 2
	      /* If one edge or the other is dominant, a conditional move
		 is likely to perform worse than the well-predicted branch.  */
	      && !predictable_edge_p (EDGE_SUCC (bb, 0))
	      && !predictable_edge_p (EDGE_SUCC (bb, 1)))
	    hoist_adjacent_loads (bb, bb1, bb2, bb3);
	  continue;
	}
      else
	continue;

      e1 = EDGE_SUCC (bb1, 0);

      /* Make sure that bb1 is just a fall through.  */
      if (!single_succ_p (bb1)
	  || (e1->flags & EDGE_FALLTHRU) == 0)
        continue;

      /* Also make sure that bb1 only have one predecessor and that it
	 is bb.  */
      if (!single_pred_p (bb1)
          || single_pred (bb1) != bb)
	continue;

      if (do_store_elim)
	{
	  /* bb1 is the middle block, bb2 the join block, bb the split block,
	     e1 the fallthrough edge from bb1 to bb2.  We can't do the
	     optimization if the join block has more than two predecessors.  */
	  if (EDGE_COUNT (bb2->preds) > 2)
	    continue;
	  if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
	    cfgchanged = true;
	}
      else
	{
	  gimple_seq phis = phi_nodes (bb2);
	  gimple_stmt_iterator gsi;
	  bool candorest = true;

	  /* Value replacement can work with more than one PHI
	     so try that first. */
	  for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      phi = as_a <gphi *> (gsi_stmt (gsi));
	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
	      if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
		{
		  candorest = false;
	          cfgchanged = true;
		  break;
		}
	    }

	  if (!candorest)
	    continue;

	  phi = single_non_singleton_phi_for_edges (phis, e1, e2);
	  if (!phi)
	    continue;

	  arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	  arg1 = gimple_phi_arg_def (phi, e2->dest_idx);

	  /* Something is wrong if we cannot find the arguments in the PHI
	     node.  */
	  gcc_assert (arg0 != NULL && arg1 != NULL);

	  if (factor_out_conditional_conversion (e1, e2, phi, arg0, arg1))
	    {
	      /* factor_out_conditional_conversion may create a new PHI in
		 BB2 and eliminate an existing PHI in BB2.  Recompute values
		 that may be affected by that change.  */
	      phis = phi_nodes (bb2);
	      phi = single_non_singleton_phi_for_edges (phis, e1, e2);
	      gcc_assert (phi);
	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
	      gcc_assert (arg0 != NULL && arg1 != NULL);
	    }

	  /* Do the replacement of conditional if it can be done.  */
	  if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	}
    }

  free (bb_order);

  if (do_store_elim)
    delete nontrap;
  /* If the CFG has changed, we should cleanup the CFG.  */
  if (cfgchanged && do_store_elim)
    {
      /* In cond-store replacement we have added some loads on edges
         and new VOPS (as we moved the store, and created a load).  */
      gsi_commit_edge_inserts ();
      return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
    }
  else if (cfgchanged)
    return TODO_cleanup_cfg;
  return 0;
}
Exemple #13
0
static unsigned int
copyprop_hardreg_forward (void)
{
  struct value_data *all_vd;
  basic_block bb;
  sbitmap visited;
  bool analyze_called = false;

  all_vd = XNEWVEC (struct value_data, last_basic_block);

  visited = sbitmap_alloc (last_basic_block);
  sbitmap_zero (visited);

  if (MAY_HAVE_DEBUG_INSNS)
    debug_insn_changes_pool
      = create_alloc_pool ("debug insn changes pool",
			   sizeof (struct queued_debug_insn_change), 256);

  FOR_EACH_BB (bb)
    {
      SET_BIT (visited, bb->index);

      /* If a block has a single predecessor, that we've already
	 processed, begin with the value data that was live at
	 the end of the predecessor block.  */
      /* ??? Ought to use more intelligent queuing of blocks.  */
      if (single_pred_p (bb)
	  && TEST_BIT (visited, single_pred (bb)->index)
	  && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
	{
	  all_vd[bb->index] = all_vd[single_pred (bb)->index];
	  if (all_vd[bb->index].n_debug_insn_changes)
	    {
	      unsigned int regno;

	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
		{
		  if (all_vd[bb->index].e[regno].debug_insn_changes)
		    {
		      all_vd[bb->index].e[regno].debug_insn_changes = NULL;
		      if (--all_vd[bb->index].n_debug_insn_changes == 0)
			break;
		    }
		}
	    }
	}
      else
	init_value_data (all_vd + bb->index);

      copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
    }

  if (MAY_HAVE_DEBUG_INSNS)
    {
      FOR_EACH_BB (bb)
	if (TEST_BIT (visited, bb->index)
	    && all_vd[bb->index].n_debug_insn_changes)
	  {
	    unsigned int regno;
	    bitmap live;

	    if (!analyze_called)
	      {
		df_analyze ();
		analyze_called = true;
	      }
	    live = df_get_live_out (bb);
	    for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
	      if (all_vd[bb->index].e[regno].debug_insn_changes)
		{
		  if (REGNO_REG_SET_P (live, regno))
		    apply_debug_insn_changes (all_vd + bb->index, regno);
		  if (all_vd[bb->index].n_debug_insn_changes == 0)
		    break;
		}
	  }

      free_alloc_pool (debug_insn_changes_pool);
    }

  sbitmap_free (visited);
  free (all_vd);
  return 0;
}
Exemple #14
0
static unsigned int
tree_ssa_phiopt (void)
{
  basic_block bb;
  basic_block *bb_order;
  unsigned n, i;
  bool cfgchanged = false;

  /* Search every basic block for COND_EXPR we may be able to optimize.

     We walk the blocks in order that guarantees that a block with
     a single predecessor is processed before the predecessor.
     This ensures that we collapse inner ifs before visiting the
     outer ones, and also that we do not try to visit a removed
     block.  */
  bb_order = blocks_in_phiopt_order ();
  n = n_basic_blocks - NUM_FIXED_BLOCKS;

  for (i = 0; i < n; i++) 
    {
      tree cond_expr;
      tree phi;
      basic_block bb1, bb2;
      edge e1, e2;
      tree arg0, arg1;

      bb = bb_order[i];

      cond_expr = last_stmt (bb);
      /* Check to see if the last statement is a COND_EXPR.  */
      if (!cond_expr
          || TREE_CODE (cond_expr) != COND_EXPR)
        continue;

      e1 = EDGE_SUCC (bb, 0);
      bb1 = e1->dest;
      e2 = EDGE_SUCC (bb, 1);
      bb2 = e2->dest;

      /* We cannot do the optimization on abnormal edges.  */
      if ((e1->flags & EDGE_ABNORMAL) != 0
          || (e2->flags & EDGE_ABNORMAL) != 0)
       continue;

      /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
      if (EDGE_COUNT (bb1->succs) == 0
          || bb2 == NULL
          || EDGE_COUNT (bb2->succs) == 0)
        continue;

      /* Find the bb which is the fall through to the other.  */
      if (EDGE_SUCC (bb1, 0)->dest == bb2)
        ;
      else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
          basic_block bb_tmp = bb1;
          edge e_tmp = e1;
          bb1 = bb2;
          bb2 = bb_tmp;
          e1 = e2;
          e2 = e_tmp;
        }
      else
        continue;

      e1 = EDGE_SUCC (bb1, 0);

      /* Make sure that bb1 is just a fall through.  */
      if (!single_succ_p (bb1)
          || (e1->flags & EDGE_FALLTHRU) == 0)
        continue;

      /* Also make sure that bb1 only have one predecessor and that it
         is bb.  */
      if (!single_pred_p (bb1)
          || single_pred (bb1) != bb)
        continue;

      phi = phi_nodes (bb2);

      /* Check to make sure that there is only one PHI node.
         TODO: we could do it with more than one iff the other PHI nodes
         have the same elements for these two edges.  */
      if (!phi || PHI_CHAIN (phi) != NULL)
        continue;

      arg0 = PHI_ARG_DEF_TREE (phi, e1->dest_idx);
      arg1 = PHI_ARG_DEF_TREE (phi, e2->dest_idx);

      /* Something is wrong if we cannot find the arguments in the PHI
         node.  */
      gcc_assert (arg0 != NULL && arg1 != NULL);

      /* Do the replacement of conditional if it can be done.  */
      if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
      else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
      else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
      else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
        cfgchanged = true;
    }

  free (bb_order);
  
  /* If the CFG has changed, we should cleanup the CFG. */
  return cfgchanged ? TODO_cleanup_cfg : 0;
}