Ejemplo n.º 1
0
static unsigned int on_execute_pass(void)
{
    basic_block bb;
    gimple_stmt_iterator gsi;
    const char* name;
    const char* file = EXPR_FILENAME(cfun->decl);
    const unsigned int line = EXPR_LINENO(cfun->decl);

    TRACE();

    if (DECL_ASSEMBLER_NAME(cfun->decl) == NULL)
    {
        printf("--- skipping anonymous function\n");
        return 0;
    }

    name = IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(cfun->decl));

#if 0 /* debug */
    printf("--- passing on function: %s\n", name);
#endif

    track_pragmed_func(file, line, name);

    FOR_EACH_BB(bb)
    {
        for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi))
        {
            const_gimple stmt = gsi_stmt(gsi);
            const enum gimple_code code = gimple_code(stmt);

            if (code == GIMPLE_CALL)
            {
                const char* const name = get_called_name(stmt);
                const tracked_func_t* const tf = find_tracked_func(name);

                printf("CALL%s: %s()\n", tf ? "_TASK" : "", name);

                if (tf != NULL) handle_task_call(gsi, tf);
            }

#if 0 /* debug */
            if (gimple_has_location(stmt))
            {
                const location_t loc = gimple_location(stmt);

                const char* type = "STMT";
                if (code == GIMPLE_CALL)

                    printf
                    (
                        "%s locus: .%s/%u.\n",
                        type, LOCATION_FILE(loc), LOCATION_LINE(loc)
                    );
            }
#endif /* debug */
        }
    }

    return 0;
}
Ejemplo n.º 2
0
static void
lower_builtin_setjmp (gimple_stmt_iterator *gsi)
{
  gimple stmt = gsi_stmt (*gsi);
  location_t loc = gimple_location (stmt);
  tree cont_label = create_artificial_label (loc);
  tree next_label = create_artificial_label (loc);
  tree dest, t, arg;
  gimple g;

  /* __builtin_setjmp_{setup,receiver} aren't ECF_RETURNS_TWICE and for RTL
     these builtins are modelled as non-local label jumps to the label
     that is passed to these two builtins, so pretend we have a non-local
     label during GIMPLE passes too.  See PR60003.  */ 
  cfun->has_nonlocal_label = 1;

  /* NEXT_LABEL is the label __builtin_longjmp will jump to.  Its address is
     passed to both __builtin_setjmp_setup and __builtin_setjmp_receiver.  */
  FORCED_LABEL (next_label) = 1;

  dest = gimple_call_lhs (stmt);

  /* Build '__builtin_setjmp_setup (BUF, NEXT_LABEL)' and insert.  */
  arg = build_addr (next_label, current_function_decl);
  t = builtin_decl_implicit (BUILT_IN_SETJMP_SETUP);
  g = gimple_build_call (t, 2, gimple_call_arg (stmt, 0), arg);
  gimple_set_location (g, loc);
  gimple_set_block (g, gimple_block (stmt));
  gsi_insert_before (gsi, g, GSI_SAME_STMT);

  /* Build 'DEST = 0' and insert.  */
  if (dest)
    {
      g = gimple_build_assign (dest, build_zero_cst (TREE_TYPE (dest)));
      gimple_set_location (g, loc);
      gimple_set_block (g, gimple_block (stmt));
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
    }

  /* Build 'goto CONT_LABEL' and insert.  */
  g = gimple_build_goto (cont_label);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);

  /* Build 'NEXT_LABEL:' and insert.  */
  g = gimple_build_label (next_label);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);

  /* Build '__builtin_setjmp_receiver (NEXT_LABEL)' and insert.  */
  arg = build_addr (next_label, current_function_decl);
  t = builtin_decl_implicit (BUILT_IN_SETJMP_RECEIVER);
  g = gimple_build_call (t, 1, arg);
  gimple_set_location (g, loc);
  gimple_set_block (g, gimple_block (stmt));
  gsi_insert_before (gsi, g, GSI_SAME_STMT);

  /* Build 'DEST = 1' and insert.  */
  if (dest)
    {
      g = gimple_build_assign (dest, fold_convert_loc (loc, TREE_TYPE (dest),
						       integer_one_node));
      gimple_set_location (g, loc);
      gimple_set_block (g, gimple_block (stmt));
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
    }

  /* Build 'CONT_LABEL:' and insert.  */
  g = gimple_build_label (cont_label);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);

  /* Remove the call to __builtin_setjmp.  */
  gsi_remove (gsi, false);
}
Ejemplo n.º 3
0
static void
find_tail_calls (basic_block bb, struct tailcall **ret)
{
  tree ass_var = NULL_TREE, ret_var, func, param;
  gimple *stmt;
  gcall *call = NULL;
  gimple_stmt_iterator gsi, agsi;
  bool tail_recursion;
  struct tailcall *nw;
  edge e;
  tree m, a;
  basic_block abb;
  size_t idx;
  tree var;

  if (!single_succ_p (bb))
    return;

  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
    {
      stmt = gsi_stmt (gsi);

      /* Ignore labels, returns, nops, clobbers and debug stmts.  */
      if (gimple_code (stmt) == GIMPLE_LABEL
	  || gimple_code (stmt) == GIMPLE_RETURN
	  || gimple_code (stmt) == GIMPLE_NOP
	  || gimple_clobber_p (stmt)
	  || is_gimple_debug (stmt))
	continue;

      /* Check for a call.  */
      if (is_gimple_call (stmt))
	{
	  call = as_a <gcall *> (stmt);
	  ass_var = gimple_call_lhs (call);
	  break;
	}

      /* If the statement references memory or volatile operands, fail.  */
      if (gimple_references_memory_p (stmt)
	  || gimple_has_volatile_ops (stmt))
	return;
    }

  if (gsi_end_p (gsi))
    {
      edge_iterator ei;
      /* Recurse to the predecessors.  */
      FOR_EACH_EDGE (e, ei, bb->preds)
	find_tail_calls (e->src, ret);

      return;
    }

  /* If the LHS of our call is not just a simple register, we can't
     transform this into a tail or sibling call.  This situation happens,
     in (e.g.) "*p = foo()" where foo returns a struct.  In this case
     we won't have a temporary here, but we need to carry out the side
     effect anyway, so tailcall is impossible.

     ??? In some situations (when the struct is returned in memory via
     invisible argument) we could deal with this, e.g. by passing 'p'
     itself as that argument to foo, but it's too early to do this here,
     and expand_call() will not handle it anyway.  If it ever can, then
     we need to revisit this here, to allow that situation.  */
  if (ass_var && !is_gimple_reg (ass_var))
    return;

  /* We found the call, check whether it is suitable.  */
  tail_recursion = false;
  func = gimple_call_fndecl (call);
  if (func
      && !DECL_BUILT_IN (func)
      && recursive_call_p (current_function_decl, func))
    {
      tree arg;

      for (param = DECL_ARGUMENTS (func), idx = 0;
	   param && idx < gimple_call_num_args (call);
	   param = DECL_CHAIN (param), idx ++)
	{
	  arg = gimple_call_arg (call, idx);
	  if (param != arg)
	    {
	      /* Make sure there are no problems with copying.  The parameter
	         have a copyable type and the two arguments must have reasonably
	         equivalent types.  The latter requirement could be relaxed if
	         we emitted a suitable type conversion statement.  */
	      if (!is_gimple_reg_type (TREE_TYPE (param))
		  || !useless_type_conversion_p (TREE_TYPE (param),
					         TREE_TYPE (arg)))
		break;

	      /* The parameter should be a real operand, so that phi node
		 created for it at the start of the function has the meaning
		 of copying the value.  This test implies is_gimple_reg_type
		 from the previous condition, however this one could be
		 relaxed by being more careful with copying the new value
		 of the parameter (emitting appropriate GIMPLE_ASSIGN and
		 updating the virtual operands).  */
	      if (!is_gimple_reg (param))
		break;
	    }
	}
      if (idx == gimple_call_num_args (call) && !param)
	tail_recursion = true;
    }

  /* Make sure the tail invocation of this function does not refer
     to local variables.  */
  FOR_EACH_LOCAL_DECL (cfun, idx, var)
    {
      if (TREE_CODE (var) != PARM_DECL
	  && auto_var_in_fn_p (var, cfun->decl)
	  && (ref_maybe_used_by_stmt_p (call, var)
	      || call_may_clobber_ref_p (call, var)))
	return;
    }

  /* Now check the statements after the call.  None of them has virtual
     operands, so they may only depend on the call through its return
     value.  The return value should also be dependent on each of them,
     since we are running after dce.  */
  m = NULL_TREE;
  a = NULL_TREE;

  abb = bb;
  agsi = gsi;
  while (1)
    {
      tree tmp_a = NULL_TREE;
      tree tmp_m = NULL_TREE;
      gsi_next (&agsi);

      while (gsi_end_p (agsi))
	{
	  ass_var = propagate_through_phis (ass_var, single_succ_edge (abb));
	  abb = single_succ (abb);
	  agsi = gsi_start_bb (abb);
	}

      stmt = gsi_stmt (agsi);

      if (gimple_code (stmt) == GIMPLE_LABEL
	  || gimple_code (stmt) == GIMPLE_NOP)
	continue;

      if (gimple_code (stmt) == GIMPLE_RETURN)
	break;

      if (gimple_clobber_p (stmt))
	continue;

      if (is_gimple_debug (stmt))
	continue;

      if (gimple_code (stmt) != GIMPLE_ASSIGN)
	return;

      /* This is a gimple assign. */
      if (! process_assignment (as_a <gassign *> (stmt), gsi, &tmp_m,
				&tmp_a, &ass_var))
	return;

      if (tmp_a)
	{
	  tree type = TREE_TYPE (tmp_a);
	  if (a)
	    a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a);
	  else
	    a = tmp_a;
	}
      if (tmp_m)
	{
	  tree type = TREE_TYPE (tmp_m);
	  if (m)
	    m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m);
	  else
	    m = tmp_m;

	  if (a)
	    a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m);
	}
    }

  /* See if this is a tail call we can handle.  */
  ret_var = gimple_return_retval (as_a <greturn *> (stmt));

  /* We may proceed if there either is no return value, or the return value
     is identical to the call's return.  */
  if (ret_var
      && (ret_var != ass_var))
    return;

  /* If this is not a tail recursive call, we cannot handle addends or
     multiplicands.  */
  if (!tail_recursion && (m || a))
    return;

  /* For pointers only allow additions.  */
  if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
    return;

  nw = XNEW (struct tailcall);

  nw->call_gsi = gsi;

  nw->tail_recursion = tail_recursion;

  nw->mult = m;
  nw->add = a;

  nw->next = *ret;
  *ret = nw;
}
Ejemplo n.º 4
0
/* Look into pointer pointed to by GSIP and figure out what interesting side
   effects it has.  */
static void
check_stmt (gimple_stmt_iterator *gsip, funct_state local, bool ipa)
{
  gimple stmt = gsi_stmt (*gsip);

  if (is_gimple_debug (stmt))
    return;

  if (dump_file)
    {
      fprintf (dump_file, "  scanning: ");
      print_gimple_stmt (dump_file, stmt, 0, 0);
    }

  if (gimple_has_volatile_ops (stmt)
      && !gimple_clobber_p (stmt))
    {
      local->pure_const_state = IPA_NEITHER;
      if (dump_file)
	fprintf (dump_file, "    Volatile stmt is not const/pure\n");
    }

  /* Look for loads and stores.  */
  walk_stmt_load_store_ops (stmt, local,
			    ipa ? check_ipa_load : check_load,
			    ipa ? check_ipa_store :  check_store);

  if (gimple_code (stmt) != GIMPLE_CALL
      && stmt_could_throw_p (stmt))
    {
      if (cfun->can_throw_non_call_exceptions)
	{
	  if (dump_file)
	    fprintf (dump_file, "    can throw; looping\n");
	  local->looping = true;
	}
      if (stmt_can_throw_external (stmt))
	{
	  if (dump_file)
	    fprintf (dump_file, "    can throw externally\n");
	  local->can_throw = true;
	}
      else
	if (dump_file)
	  fprintf (dump_file, "    can throw\n");
    }
  switch (gimple_code (stmt))
    {
    case GIMPLE_CALL:
      check_call (local, stmt, ipa);
      break;
    case GIMPLE_LABEL:
      if (DECL_NONLOCAL (gimple_label_label (stmt)))
	/* Target of long jump. */
	{
          if (dump_file)
            fprintf (dump_file, "    nonlocal label is not const/pure\n");
	  local->pure_const_state = IPA_NEITHER;
	}
      break;
    case GIMPLE_ASM:
      if (gimple_asm_clobbers_memory_p (stmt))
	{
	  if (dump_file)
	    fprintf (dump_file, "    memory asm clobber is not const/pure\n");
	  /* Abandon all hope, ye who enter here. */
	  local->pure_const_state = IPA_NEITHER;
	}
      if (gimple_asm_volatile_p (stmt))
	{
	  if (dump_file)
	    fprintf (dump_file, "    volatile is not const/pure\n");
	  /* Abandon all hope, ye who enter here. */
	  local->pure_const_state = IPA_NEITHER;
          local->looping = true;
	}
      return;
    default:
      break;
    }
}
Ejemplo n.º 5
0
static bool
generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
{
  bool res = false;
  unsigned i, x = 0;
  basic_block *bbs;
  gimple write = NULL;
  gimple_stmt_iterator bsi;
  tree nb_iter = number_of_exit_cond_executions (loop);

  if (!nb_iter || nb_iter == chrec_dont_know)
    return false;

  bbs = get_loop_body_in_dom_order (loop);

  for (i = 0; i < loop->num_nodes; i++)
    {
      basic_block bb = bbs[i];

      for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	x++;

      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	{
	  gimple stmt = gsi_stmt (bsi);

	  if (gimple_code (stmt) == GIMPLE_LABEL
	      || is_gimple_debug (stmt))
	    continue;

	  if (!bitmap_bit_p (partition, x++))
	    continue;

	  /* If the stmt has uses outside of the loop fail.  */
	  if (stmt_has_scalar_dependences_outside_loop (stmt))
	    goto end;

	  if (is_gimple_assign (stmt)
	      && !is_gimple_reg (gimple_assign_lhs (stmt)))
	    {
	      /* Don't generate the builtins when there are more than
		 one memory write.  */
	      if (write != NULL)
		goto end;

	      write = stmt;
	      if (bb == loop->latch)
		nb_iter = number_of_latch_executions (loop);
	    }
	}
    }

  if (!stmt_with_adjacent_zero_store_dr_p (write))
    goto end;

  /* The new statements will be placed before LOOP.  */
  bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
  generate_memset_zero (write, gimple_assign_lhs (write), nb_iter, bsi);
  res = true;

  /* If this is the last partition for which we generate code, we have
     to destroy the loop.  */
  if (!copy_p)
    {
      unsigned nbbs = loop->num_nodes;
      edge exit = single_exit (loop);
      basic_block src = loop_preheader_edge (loop)->src, dest = exit->dest;
      redirect_edge_pred (exit, src);
      exit->flags &= ~(EDGE_TRUE_VALUE|EDGE_FALSE_VALUE);
      exit->flags |= EDGE_FALLTHRU;
      cancel_loop_tree (loop);
      rescan_loop_exit (exit, false, true);

      for (i = 0; i < nbbs; i++)
	delete_basic_block (bbs[i]);

      set_immediate_dominator (CDI_DOMINATORS, dest,
			       recompute_dominator (CDI_DOMINATORS, dest));
    }

 end:
  free (bbs);
  return res;
}
/* The core routine of conditional store replacement and normal
   phi optimizations.  Both share much of the infrastructure in how
   to match applicable basic block patterns.  DO_STORE_ELIM is true
   when we want to do conditional store replacement, false otherwise.  */
static unsigned int
tree_ssa_phiopt_worker (bool do_store_elim)
{
  basic_block bb;
  basic_block *bb_order;
  unsigned n, i;
  bool cfgchanged = false;
  struct pointer_set_t *nontrap = 0;

  if (do_store_elim)
    {
      condstoretemp = NULL_TREE;
      /* Calculate the set of non-trapping memory accesses.  */
      nontrap = get_non_trapping ();
    }

  /* Search every basic block for COND_EXPR we may be able to optimize.

     We walk the blocks in order that guarantees that a block with
     a single predecessor is processed before the predecessor.
     This ensures that we collapse inner ifs before visiting the
     outer ones, and also that we do not try to visit a removed
     block.  */
  bb_order = blocks_in_phiopt_order ();
  n = n_basic_blocks - NUM_FIXED_BLOCKS;

  for (i = 0; i < n; i++) 
    {
      gimple cond_stmt, phi;
      basic_block bb1, bb2;
      edge e1, e2;
      tree arg0, arg1;

      bb = bb_order[i];

      cond_stmt = last_stmt (bb);
      /* Check to see if the last statement is a GIMPLE_COND.  */
      if (!cond_stmt
          || gimple_code (cond_stmt) != GIMPLE_COND)
        continue;

      e1 = EDGE_SUCC (bb, 0);
      bb1 = e1->dest;
      e2 = EDGE_SUCC (bb, 1);
      bb2 = e2->dest;

      /* We cannot do the optimization on abnormal edges.  */
      if ((e1->flags & EDGE_ABNORMAL) != 0
          || (e2->flags & EDGE_ABNORMAL) != 0)
       continue;

      /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
      if (EDGE_COUNT (bb1->succs) == 0
          || bb2 == NULL
	  || EDGE_COUNT (bb2->succs) == 0)
        continue;

      /* Find the bb which is the fall through to the other.  */
      if (EDGE_SUCC (bb1, 0)->dest == bb2)
        ;
      else if (EDGE_SUCC (bb2, 0)->dest == bb1)
        {
	  basic_block bb_tmp = bb1;
	  edge e_tmp = e1;
	  bb1 = bb2;
	  bb2 = bb_tmp;
	  e1 = e2;
	  e2 = e_tmp;
	}
      else
        continue;

      e1 = EDGE_SUCC (bb1, 0);

      /* Make sure that bb1 is just a fall through.  */
      if (!single_succ_p (bb1)
	  || (e1->flags & EDGE_FALLTHRU) == 0)
        continue;

      /* Also make sure that bb1 only have one predecessor and that it
	 is bb.  */
      if (!single_pred_p (bb1)
          || single_pred (bb1) != bb)
	continue;

      if (do_store_elim)
	{
	  /* bb1 is the middle block, bb2 the join block, bb the split block,
	     e1 the fallthrough edge from bb1 to bb2.  We can't do the
	     optimization if the join block has more than two predecessors.  */
	  if (EDGE_COUNT (bb2->preds) > 2)
	    continue;
	  if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
	    cfgchanged = true;
	}
      else
	{
	  gimple_seq phis = phi_nodes (bb2);

	  /* Check to make sure that there is only one PHI node.
	     TODO: we could do it with more than one iff the other PHI nodes
	     have the same elements for these two edges.  */
	  if (! gimple_seq_singleton_p (phis))
	    continue;

	  phi = gsi_stmt (gsi_start (phis));
	  arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
	  arg1 = gimple_phi_arg_def (phi, e2->dest_idx);

	  /* Something is wrong if we cannot find the arguments in the PHI
	     node.  */
	  gcc_assert (arg0 != NULL && arg1 != NULL);

	  /* Do the replacement of conditional if it can be done.  */
	  if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	  else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
	    cfgchanged = true;
	}
    }

  free (bb_order);
  
  if (do_store_elim)
    pointer_set_destroy (nontrap);
  /* If the CFG has changed, we should cleanup the CFG.  */
  if (cfgchanged && do_store_elim)
    {
      /* In cond-store replacement we have added some loads on edges
         and new VOPS (as we moved the store, and created a load).  */
      gsi_commit_edge_inserts ();
      return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
    }
  else if (cfgchanged)
    return TODO_cleanup_cfg;
  return 0;
}
Ejemplo n.º 7
0
static void
init_copy_prop (void)
{
  basic_block bb;

  copy_of = XCNEWVEC (prop_value_t, num_ssa_names);

  FOR_EACH_BB (bb)
    {
      gimple_stmt_iterator si;
      int depth = bb->loop_depth;

      for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
	{
	  gimple stmt = gsi_stmt (si);
	  ssa_op_iter iter;
          tree def;

	  /* The only statements that we care about are those that may
	     generate useful copies.  We also need to mark conditional
	     jumps so that their outgoing edges are added to the work
	     lists of the propagator.

	     Avoid copy propagation from an inner into an outer loop.
	     Otherwise, this may move loop variant variables outside of
	     their loops and prevent coalescing opportunities.  If the
	     value was loop invariant, it will be hoisted by LICM and
	     exposed for copy propagation.
	     ???  This doesn't make sense.  */
	  if (stmt_ends_bb_p (stmt))
            prop_set_simulate_again (stmt, true);
	  else if (stmt_may_generate_copy (stmt)
                   /* Since we are iterating over the statements in
                      BB, not the phi nodes, STMT will always be an
                      assignment.  */
                   && loop_depth_of_name (gimple_assign_rhs1 (stmt)) <= depth)
            prop_set_simulate_again (stmt, true);
	  else
            prop_set_simulate_again (stmt, false);

	  /* Mark all the outputs of this statement as not being
	     the copy of anything.  */
	  FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
            if (!prop_simulate_again_p (stmt))
	      set_copy_of_val (def, def);
	}

      for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
	{
          gimple phi = gsi_stmt (si);
          tree def;

	  def = gimple_phi_result (phi);
	  if (!is_gimple_reg (def))
            prop_set_simulate_again (phi, false);
	  else
            prop_set_simulate_again (phi, true);

	  if (!prop_simulate_again_p (phi))
	    set_copy_of_val (def, def);
	}
    }
}
Ejemplo n.º 8
0
static void
mf_build_check_statement_for (tree base, tree limit,
                              gimple_stmt_iterator *instr_gsi,
                              location_t location, tree dirflag)
{
  gimple_stmt_iterator gsi;
  basic_block cond_bb, then_bb, join_bb;
  edge e;
  tree cond, t, u, v;
  tree mf_base;
  tree mf_elem;
  tree mf_limit;
  gimple g;
  gimple_seq seq, stmts;

  /* We first need to split the current basic block, and start altering
     the CFG.  This allows us to insert the statements we're about to
     construct into the right basic blocks.  */

  cond_bb = gimple_bb (gsi_stmt (*instr_gsi));
  gsi = *instr_gsi;
  gsi_prev (&gsi);
  if (! gsi_end_p (gsi))
    e = split_block (cond_bb, gsi_stmt (gsi));
  else
    e = split_block_after_labels (cond_bb);
  cond_bb = e->src;
  join_bb = e->dest;

  /* A recap at this point: join_bb is the basic block at whose head
     is the gimple statement for which this check expression is being
     built.  cond_bb is the (possibly new, synthetic) basic block the
     end of which will contain the cache-lookup code, and a
     conditional that jumps to the cache-miss code or, much more
     likely, over to join_bb.  */

  /* Create the bb that contains the cache-miss fallback block (mf_check).  */
  then_bb = create_empty_bb (cond_bb);
  make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
  make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU);

  /* Mark the pseudo-fallthrough edge from cond_bb to join_bb.  */
  e = find_edge (cond_bb, join_bb);
  e->flags = EDGE_FALSE_VALUE;
  e->count = cond_bb->count;
  e->probability = REG_BR_PROB_BASE;

  /* Update dominance info.  Note that bb_join's data was
     updated by split_block.  */
  if (dom_info_available_p (CDI_DOMINATORS))
    {
      set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
      set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
    }

  /* Update loop info.  */
  if (current_loops)
    add_bb_to_loop (then_bb, cond_bb->loop_father);

  /* Build our local variables.  */
  mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem");
  mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base");
  mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit");

  /* Build: __mf_base = (uintptr_t) <base address expression>.  */
  seq = NULL;
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (base));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_base, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_limit = (uintptr_t) <limit address expression>.  */
  t = fold_convert_loc (location, mf_uintptr_type,
			unshare_expr (limit));
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_limit, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift)
                                            & __mf_mask].  */
  t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base,
              flag_mudflap_threads ? mf_cache_shift_decl
	       : mf_cache_shift_decl_l);
  t = build2 (BIT_AND_EXPR, mf_uintptr_type, t,
              flag_mudflap_threads ? mf_cache_mask_decl
	       : mf_cache_mask_decl_l);
  t = build4 (ARRAY_REF,
              TREE_TYPE (TREE_TYPE (mf_cache_array_decl)),
              mf_cache_array_decl, t, NULL_TREE, NULL_TREE);
  t = build1 (ADDR_EXPR, mf_cache_structptr_type, t);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_assign (mf_elem, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Quick validity check.

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))
        {
          __mf_check ();
          ... and only if single-threaded:
          __mf_lookup_shift_1 = f...;
          __mf_lookup_mask_l = ...;
        }

     It is expected that this body of code is rarely executed so we mark
     the edge to the THEN clause of the conditional jump as unlikely.  */

  /* Construct t <-- '__mf_elem->low  > __mf_base'.  */
  t = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              TYPE_FIELDS (mf_cache_struct_type), NULL_TREE);
  t = build2 (GT_EXPR, boolean_type_node, t, mf_base);

  /* Construct '__mf_elem->high < __mf_limit'.

     First build:
        1) u <--  '__mf_elem->high'
        2) v <--  '__mf_limit'.

     Then build 'u <-- (u < v).  */

  u = build3 (COMPONENT_REF, mf_uintptr_type,
              build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem),
              DECL_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE);

  v = mf_limit;

  u = build2 (LT_EXPR, boolean_type_node, u, v);

  /* Build the composed conditional: t <-- 't || u'.  Then store the
     result of the evaluation of 't' in a temporary variable which we
     can use as the condition for the conditional jump.  */
  t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u);
  t = force_gimple_operand (t, &stmts, false, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  cond = create_tmp_reg (boolean_type_node, "__mf_unlikely_cond");
  g = gimple_build_assign  (cond, t);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* Build the conditional jump.  'cond' is just a temporary so we can
     simply build a void COND_EXPR.  We do need labels in both arms though.  */
  g = gimple_build_cond (NE_EXPR, cond, boolean_false_node, NULL_TREE,
			 NULL_TREE);
  gimple_set_location (g, location);
  gimple_seq_add_stmt (&seq, g);

  /* At this point, after so much hard work, we have only constructed
     the conditional jump,

     if (__mf_elem->low > __mf_base
         || (__mf_elem_high < __mf_limit))

     The lowered GIMPLE tree representing this code is in the statement
     list starting at 'head'.

     We can insert this now in the current basic block, i.e. the one that
     the statement we're instrumenting was originally in.  */
  gsi = gsi_last_bb (cond_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  /*  Now build up the body of the cache-miss handling:

     __mf_check();
     refresh *_l vars.

     This is the body of the conditional.  */

  seq = NULL;
  /* u is a string, so it is already a gimple value.  */
  u = mf_file_function_line_tree (location);
  /* NB: we pass the overall [base..limit] range to mf_check.  */
  v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
		   fold_build2_loc (location,
				MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
		   build_int_cst (mf_uintptr_type, 1));
  v = force_gimple_operand (v, &stmts, true, NULL_TREE);
  gimple_seq_add_seq (&seq, stmts);
  g = gimple_build_call (mf_check_fndecl, 4, mf_base, v, dirflag, u);
  gimple_seq_add_stmt (&seq, g);

  if (! flag_mudflap_threads)
    {
      if (stmt_ends_bb_p (g))
	{
	  gsi = gsi_start_bb (then_bb);
	  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
	  e = split_block (then_bb, g);
	  then_bb = e->dest;
	  seq = NULL;
	}

      g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl);
      gimple_seq_add_stmt (&seq, g);

      g = gimple_build_assign (mf_cache_mask_decl_l, mf_cache_mask_decl);
      gimple_seq_add_stmt (&seq, g);
    }

  /* Insert the check code in the THEN block.  */
  gsi = gsi_start_bb (then_bb);
  gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);

  *instr_gsi = gsi_start_bb (join_bb);
}
Ejemplo n.º 9
0
tree
walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
		  walk_tree_fn callback_op, struct walk_stmt_info *wi)
{
  gimple ret;
  tree tree_ret;
  gimple stmt = gsi_stmt (*gsi);

  if (wi)
    {
      wi->gsi = *gsi;
      wi->removed_stmt = false;

      if (wi->want_locations && gimple_has_location (stmt))
	input_location = gimple_location (stmt);
    }

  ret = NULL;

  /* Invoke the statement callback.  Return if the callback handled
     all of STMT operands by itself.  */
  if (callback_stmt)
    {
      bool handled_ops = false;
      tree_ret = callback_stmt (gsi, &handled_ops, wi);
      if (handled_ops)
	return tree_ret;

      /* If CALLBACK_STMT did not handle operands, it should not have
	 a value to return.  */
      gcc_assert (tree_ret == NULL);

      if (wi && wi->removed_stmt)
	return NULL;

      /* Re-read stmt in case the callback changed it.  */
      stmt = gsi_stmt (*gsi);
    }

  /* If CALLBACK_OP is defined, invoke it on every operand of STMT.  */
  if (callback_op)
    {
      tree_ret = walk_gimple_op (stmt, callback_op, wi);
      if (tree_ret)
	return tree_ret;
    }

  /* If STMT can have statements inside (e.g. GIMPLE_BIND), walk them.  */
  switch (gimple_code (stmt))
    {
    case GIMPLE_BIND:
      ret = walk_gimple_seq_mod (gimple_bind_body_ptr (stmt), callback_stmt,
				 callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    case GIMPLE_CATCH:
      ret = walk_gimple_seq_mod (gimple_catch_handler_ptr (stmt), callback_stmt,
				 callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    case GIMPLE_EH_FILTER:
      ret = walk_gimple_seq_mod (gimple_eh_filter_failure_ptr (stmt), callback_stmt,
		             callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    case GIMPLE_EH_ELSE:
      ret = walk_gimple_seq_mod (gimple_eh_else_n_body_ptr (stmt),
			     callback_stmt, callback_op, wi);
      if (ret)
	return wi->callback_result;
      ret = walk_gimple_seq_mod (gimple_eh_else_e_body_ptr (stmt),
			     callback_stmt, callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    case GIMPLE_TRY:
      ret = walk_gimple_seq_mod (gimple_try_eval_ptr (stmt), callback_stmt, callback_op,
	                     wi);
      if (ret)
	return wi->callback_result;

      ret = walk_gimple_seq_mod (gimple_try_cleanup_ptr (stmt), callback_stmt,
	                     callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    case GIMPLE_OMP_FOR:
      ret = walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt), callback_stmt,
		             callback_op, wi);
      if (ret)
	return wi->callback_result;

      /* FALL THROUGH.  */
    case GIMPLE_OMP_CRITICAL:
    case GIMPLE_OMP_MASTER:
    case GIMPLE_OMP_TASKGROUP:
    case GIMPLE_OMP_ORDERED:
    case GIMPLE_OMP_SECTION:
    case GIMPLE_OMP_PARALLEL:
    case GIMPLE_OMP_TASK:
    case GIMPLE_OMP_SECTIONS:
    case GIMPLE_OMP_SINGLE:
    case GIMPLE_OMP_TARGET:
    case GIMPLE_OMP_TEAMS:
      ret = walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), callback_stmt,
			     callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    case GIMPLE_WITH_CLEANUP_EXPR:
      ret = walk_gimple_seq_mod (gimple_wce_cleanup_ptr (stmt), callback_stmt,
			     callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    case GIMPLE_TRANSACTION:
      ret = walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
			     callback_stmt, callback_op, wi);
      if (ret)
	return wi->callback_result;
      break;

    default:
      gcc_assert (!gimple_has_substatements (stmt));
      break;
    }

  return NULL;
}
Ejemplo n.º 10
0
static bool
tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
			 int upper_bound)
{
  basic_block *body = get_loop_body (loop);
  gimple_stmt_iterator gsi;
  unsigned int i;
  bool after_exit;
  vec<basic_block> path = get_loop_hot_path (loop);

  size->overall = 0;
  size->eliminated_by_peeling = 0;
  size->last_iteration = 0;
  size->last_iteration_eliminated_by_peeling = 0;
  size->num_pure_calls_on_hot_path = 0;
  size->num_non_pure_calls_on_hot_path = 0;
  size->non_call_stmts_on_hot_path = 0;
  size->num_branches_on_hot_path = 0;
  size->constant_iv = 0;

  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
  for (i = 0; i < loop->num_nodes; i++)
    {
      if (edge_to_cancel && body[i] != edge_to_cancel->src
	  && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
	after_exit = true;
      else
	after_exit = false;
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);

      for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt = gsi_stmt (gsi);
	  int num = estimate_num_insns (stmt, &eni_size_weights);
	  bool likely_eliminated = false;
	  bool likely_eliminated_last = false;
	  bool likely_eliminated_peeled = false;

	  if (dump_file && (dump_flags & TDF_DETAILS))
	    {
	      fprintf (dump_file, "  size: %3i ", num);
	      print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
	    }

	  /* Look for reasons why we might optimize this stmt away. */

	  if (gimple_has_side_effects (stmt))
	    ;
	  /* Exit conditional.  */
	  else if (exit && body[i] == exit->src
		   && stmt == last_stmt (exit->src))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Exit condition will be eliminated "
			 "in peeled copies.\n");
	      likely_eliminated_peeled = true;
	    }
	  else if (edge_to_cancel && body[i] == edge_to_cancel->src
		   && stmt == last_stmt (edge_to_cancel->src))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Exit condition will be eliminated "
			 "in last copy.\n");
	      likely_eliminated_last = true;
	    }
	  /* Sets of IV variables  */
	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
	      && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Induction variable computation will"
			 " be folded away.\n");
	      likely_eliminated = true;
	    }
	  /* Assignments of IV variables.  */
	  else if (gimple_code (stmt) == GIMPLE_ASSIGN
		   && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
		   && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
		   && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
		       || constant_after_peeling (gimple_assign_rhs2 (stmt),
		       				  stmt, loop)))
	    {
	      size->constant_iv = true;
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Constant expression will be folded away.\n");
	      likely_eliminated = true;
	    }
	  /* Conditionals.  */
	  else if ((gimple_code (stmt) == GIMPLE_COND
		    && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
		    && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
		   || (gimple_code (stmt) == GIMPLE_SWITCH
		       && constant_after_peeling (gimple_switch_index (
						    as_a <gswitch *> (stmt)),
						  stmt, loop)))
	    {
	      if (dump_file && (dump_flags & TDF_DETAILS))
	        fprintf (dump_file, "   Constant conditional.\n");
	      likely_eliminated = true;
	    }

	  size->overall += num;
	  if (likely_eliminated || likely_eliminated_peeled)
	    size->eliminated_by_peeling += num;
	  if (!after_exit)
	    {
	      size->last_iteration += num;
	      if (likely_eliminated || likely_eliminated_last)
		size->last_iteration_eliminated_by_peeling += num;
	    }
	  if ((size->overall * 3 / 2 - size->eliminated_by_peeling
	      - size->last_iteration_eliminated_by_peeling) > upper_bound)
	    {
              free (body);
	      path.release ();
	      return true;
	    }
	}
    }
  while (path.length ())
    {
      basic_block bb = path.pop ();
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt = gsi_stmt (gsi);
	  if (gimple_code (stmt) == GIMPLE_CALL)
	    {
	      int flags = gimple_call_flags (stmt);
	      tree decl = gimple_call_fndecl (stmt);

	      if (decl && DECL_IS_BUILTIN (decl)
		  && is_inexpensive_builtin (decl))
		;
	      else if (flags & (ECF_PURE | ECF_CONST))
		size->num_pure_calls_on_hot_path++;
	      else
		size->num_non_pure_calls_on_hot_path++;
	      size->num_branches_on_hot_path ++;
	    }
	  else if (gimple_code (stmt) != GIMPLE_CALL
		   && gimple_code (stmt) != GIMPLE_DEBUG)
	    size->non_call_stmts_on_hot_path++;
	  if (((gimple_code (stmt) == GIMPLE_COND
	        && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
		    || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
	       || (gimple_code (stmt) == GIMPLE_SWITCH
		   && !constant_after_peeling (gimple_switch_index (
						 as_a <gswitch *> (stmt)),
					       stmt, loop)))
	      && (!exit || bb != exit->src))
	    size->num_branches_on_hot_path++;
	}
    }
  path.release ();
  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
    	     size->eliminated_by_peeling, size->last_iteration,
	     size->last_iteration_eliminated_by_peeling);

  free (body);
  return false;
}
Ejemplo n.º 11
0
      gimple_seq_add_stmt (&new_seq, stmt);
      return new_seq;
    }
   else
    return seq;
}


/* Process every variable mentioned in BIND_EXPRs.  */
static tree
mx_xfn_xform_decls (gimple_stmt_iterator *gsi,
		    bool *handled_operands_p ATTRIBUTE_UNUSED,
		    struct walk_stmt_info *wi)
{
  struct mf_xform_decls_data *d = (struct mf_xform_decls_data *) wi->info;
  gimple stmt = gsi_stmt (*gsi);

  switch (gimple_code (stmt))
    {
    case GIMPLE_BIND:
      {
        /* Process function parameters now (but only once).  */
	if (d->param_decls)
	  {
	    gimple_bind_set_body (stmt,
				  mx_register_decls (d->param_decls,
						     gimple_bind_body (stmt),
						     gimple_location (stmt)));
	    d->param_decls = NULL_TREE;
	  }
Ejemplo n.º 12
0
Archivo: ubsan.c Proyecto: didemoto/gcc
static void
instrument_bool_enum_load (gimple_stmt_iterator *gsi)
{
  gimple stmt = gsi_stmt (*gsi);
  tree rhs = gimple_assign_rhs1 (stmt);
  tree type = TREE_TYPE (rhs);
  tree minv = NULL_TREE, maxv = NULL_TREE;

  if (TREE_CODE (type) == BOOLEAN_TYPE && (flag_sanitize & SANITIZE_BOOL))
    {
      minv = boolean_false_node;
      maxv = boolean_true_node;
    }
  else if (TREE_CODE (type) == ENUMERAL_TYPE
	   && (flag_sanitize & SANITIZE_ENUM)
	   && TREE_TYPE (type) != NULL_TREE
	   && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
	   && (TYPE_PRECISION (TREE_TYPE (type))
	       < GET_MODE_PRECISION (TYPE_MODE (type))))
    {
      minv = TYPE_MIN_VALUE (TREE_TYPE (type));
      maxv = TYPE_MAX_VALUE (TREE_TYPE (type));
    }
  else
    return;

  int modebitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
  HOST_WIDE_INT bitsize, bitpos;
  tree offset;
  enum machine_mode mode;
  int volatilep = 0, unsignedp = 0;
  tree base = get_inner_reference (rhs, &bitsize, &bitpos, &offset, &mode,
				   &unsignedp, &volatilep, false);
  tree utype = build_nonstandard_integer_type (modebitsize, 1);

  if ((TREE_CODE (base) == VAR_DECL && DECL_HARD_REGISTER (base))
      || (bitpos % modebitsize) != 0
      || bitsize != modebitsize
      || GET_MODE_BITSIZE (TYPE_MODE (utype)) != modebitsize
      || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
    return;

  location_t loc = gimple_location (stmt);
  tree ptype = build_pointer_type (TREE_TYPE (rhs));
  tree atype = reference_alias_ptr_type (rhs);
  gimple g = gimple_build_assign (make_ssa_name (ptype, NULL),
				  build_fold_addr_expr (rhs));
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  tree mem = build2 (MEM_REF, utype, gimple_assign_lhs (g),
		     build_int_cst (atype, 0));
  tree urhs = make_ssa_name (utype, NULL);
  g = gimple_build_assign (urhs, mem);
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  minv = fold_convert (utype, minv);
  maxv = fold_convert (utype, maxv);
  if (!integer_zerop (minv))
    {
      g = gimple_build_assign_with_ops (MINUS_EXPR,
					make_ssa_name (utype, NULL),
					urhs, minv);
      gimple_set_location (g, loc);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
    }

  gimple_stmt_iterator gsi2 = *gsi;
  basic_block then_bb, fallthru_bb;
  *gsi = create_cond_insert_point (gsi, true, false, true,
				   &then_bb, &fallthru_bb);
  g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (g),
			 int_const_binop (MINUS_EXPR, maxv, minv),
			 NULL_TREE, NULL_TREE);
  gimple_set_location (g, loc);
  gsi_insert_after (gsi, g, GSI_NEW_STMT);

  gimple_assign_set_rhs_with_ops (&gsi2, NOP_EXPR, urhs, NULL_TREE);
  update_stmt (stmt);

  tree data = ubsan_create_data ("__ubsan_invalid_value_data",
				 loc, NULL,
				 ubsan_type_descriptor (type, false),
				 NULL_TREE);
  data = build_fold_addr_expr_loc (loc, data);
  tree fn = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE);

  gsi2 = gsi_after_labels (then_bb);
  tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs),
				       true, NULL_TREE, true, GSI_SAME_STMT);
  g = gimple_build_call (fn, 2, data, val);
  gimple_set_location (g, loc);
  gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
}
Ejemplo n.º 13
0
Archivo: ubsan.c Proyecto: didemoto/gcc
void
ubsan_expand_null_ifn (gimple_stmt_iterator gsi)
{
  gimple stmt = gsi_stmt (gsi);
  location_t loc = gimple_location (stmt);
  gcc_assert (gimple_call_num_args (stmt) == 2);
  tree ptr = gimple_call_arg (stmt, 0);
  tree ckind = gimple_call_arg (stmt, 1);

  basic_block cur_bb = gsi_bb (gsi);

  /* Split the original block holding the pointer dereference.  */
  edge e = split_block (cur_bb, stmt);

  /* Get a hold on the 'condition block', the 'then block' and the
     'else block'.  */
  basic_block cond_bb = e->src;
  basic_block fallthru_bb = e->dest;
  basic_block then_bb = create_empty_bb (cond_bb);
  if (current_loops)
    {
      add_bb_to_loop (then_bb, cond_bb->loop_father);
      loops_state_set (LOOPS_NEED_FIXUP);
    }

  /* Make an edge coming from the 'cond block' into the 'then block';
     this edge is unlikely taken, so set up the probability accordingly.  */
  e = make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
  e->probability = PROB_VERY_UNLIKELY;

  /* Connect 'then block' with the 'else block'.  This is needed
     as the ubsan routines we call in the 'then block' are not noreturn.
     The 'then block' only has one outcoming edge.  */
  make_single_succ_edge (then_bb, fallthru_bb, EDGE_FALLTHRU);

  /* Set up the fallthrough basic block.  */
  e = find_edge (cond_bb, fallthru_bb);
  e->flags = EDGE_FALSE_VALUE;
  e->count = cond_bb->count;
  e->probability = REG_BR_PROB_BASE - PROB_VERY_UNLIKELY;

  /* Update dominance info for the newly created then_bb; note that
     fallthru_bb's dominance info has already been updated by
     split_bock.  */
  if (dom_info_available_p (CDI_DOMINATORS))
    set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);

  /* Put the ubsan builtin call into the newly created BB.  */
  tree fn = builtin_decl_implicit (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH);
  const struct ubsan_mismatch_data m
    = { build_zero_cst (pointer_sized_int_node), ckind };
  tree data = ubsan_create_data ("__ubsan_null_data",
				 loc, &m,
				 ubsan_type_descriptor (TREE_TYPE (ptr), true),
				 NULL_TREE);
  data = build_fold_addr_expr_loc (loc, data);
  gimple g = gimple_build_call (fn, 2, data,
				build_zero_cst (pointer_sized_int_node));
  gimple_set_location (g, loc);
  gimple_stmt_iterator gsi2 = gsi_start_bb (then_bb);
  gsi_insert_after (&gsi2, g, GSI_NEW_STMT);

  /* Unlink the UBSAN_NULLs vops before replacing it.  */
  unlink_stmt_vdef (stmt);

  g = gimple_build_cond (EQ_EXPR, ptr, build_int_cst (TREE_TYPE (ptr), 0),
			 NULL_TREE, NULL_TREE);
  gimple_set_location (g, loc);

  /* Replace the UBSAN_NULL with a GIMPLE_COND stmt.  */
  gsi_replace (&gsi, g, false);
}
static void
build_constructors (gimple swtch)
{
  unsigned i, branch_num = gimple_switch_num_labels (swtch);
  tree pos = info.range_min;

  for (i = 1; i < branch_num; i++)
    {
      tree cs = gimple_switch_label (swtch, i);
      basic_block bb = label_to_block (CASE_LABEL (cs));
      edge e;
      tree high;
      gimple_stmt_iterator gsi;
      int j;

      if (bb == info.final_bb)
	e = find_edge (info.switch_bb, bb);
      else
	e = single_succ_edge (bb);
      gcc_assert (e);

      while (tree_int_cst_lt (pos, CASE_LOW (cs)))
	{
	  int k;
	  for (k = 0; k < info.phi_count; k++)
	    {
	      constructor_elt *elt;

	      elt = VEC_quick_push (constructor_elt,
				    info.constructors[k], NULL);
	      elt->index = int_const_binop (MINUS_EXPR, pos,
					    info.range_min);
	      elt->value = info.default_values[k];
	    }

	  pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
	}
      gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));

      j = 0;
      if (CASE_HIGH (cs))
	high = CASE_HIGH (cs);
      else
	high = CASE_LOW (cs);
      for (gsi = gsi_start_phis (info.final_bb);
	   !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple phi = gsi_stmt (gsi);
	  tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
	  tree low = CASE_LOW (cs);
	  pos = CASE_LOW (cs);

	  do
	    {
	      constructor_elt *elt;

	      elt = VEC_quick_push (constructor_elt,
				    info.constructors[j], NULL);
	      elt->index = int_const_binop (MINUS_EXPR, pos, info.range_min);
	      elt->value = val;

	      pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
	    } while (!tree_int_cst_lt (high, pos)
		     && tree_int_cst_lt (low, pos));
	  j++;
	}
    }
}
Ejemplo n.º 15
0
bool
aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
{
  bool changed = false;
  gimple stmt = gsi_stmt (*gsi);
  tree call = gimple_call_fn (stmt);
  tree fndecl;
  gimple new_stmt = NULL;

  if (call)
    {
      fndecl = gimple_call_fndecl (stmt);
      if (fndecl)
	{
	  int fcode = DECL_FUNCTION_CODE (fndecl);
	  int nargs = gimple_call_num_args (stmt);
	  tree *args = (nargs > 0
			? gimple_call_arg_ptr (stmt, 0)
			: &error_mark_node);

	  /* We use gimple's REDUC_(PLUS|MIN|MAX)_EXPRs for float, signed int
	     and unsigned int; it will distinguish according to the types of
	     the arguments to the __builtin.  */
	  switch (fcode)
	    {
	      BUILTIN_VALL (UNOP, reduc_plus_scal_, 10)
	        new_stmt = gimple_build_assign_with_ops (
						REDUC_PLUS_EXPR,
						gimple_call_lhs (stmt),
						args[0],
						NULL_TREE);
		break;
	      BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10)
	      BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10)
		new_stmt = gimple_build_assign_with_ops (
						REDUC_MAX_EXPR,
						gimple_call_lhs (stmt),
						args[0],
						NULL_TREE);
		break;
	      BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10)
	      BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10)
		new_stmt = gimple_build_assign_with_ops (
						REDUC_MIN_EXPR,
						gimple_call_lhs (stmt),
						args[0],
						NULL_TREE);
		break;

	    default:
	      break;
	    }
	}
    }

  if (new_stmt)
    {
      gsi_replace (gsi, new_stmt, true);
      changed = true;
    }

  return changed;
}
static void
vect_pattern_recog_1 (
	gimple (* vect_recog_func) (gimple, tree *, tree *),
	gimple_stmt_iterator si)
{
  gimple stmt = gsi_stmt (si), pattern_stmt;
  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
  stmt_vec_info pattern_stmt_info;
  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
  tree pattern_vectype;
  tree type_in, type_out;
  enum tree_code code;

  pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
  if (!pattern_stmt)
    return;

  if (VECTOR_MODE_P (TYPE_MODE (type_in)))
    {
      /* No need to check target support (already checked by the pattern
         recognition function).  */
      pattern_vectype = type_in;
    }
  else
    {
      enum machine_mode vec_mode;
      enum insn_code icode;
      optab optab;

      /* Check target support  */
      pattern_vectype = get_vectype_for_scalar_type (type_in);
      if (!pattern_vectype)
        return;

      if (is_gimple_assign (pattern_stmt))
	code = gimple_assign_rhs_code (pattern_stmt);
      else
        {
	  gcc_assert (is_gimple_call (pattern_stmt));
	  code = CALL_EXPR;
	}

      optab = optab_for_tree_code (code, pattern_vectype, optab_default);
      vec_mode = TYPE_MODE (pattern_vectype);
      if (!optab
          || (icode = optab_handler (optab, vec_mode)->insn_code) ==
              CODE_FOR_nothing
          || (type_out
              && (!get_vectype_for_scalar_type (type_out)
                  || (insn_data[icode].operand[0].mode !=
                      TYPE_MODE (get_vectype_for_scalar_type (type_out))))))
	return;
    }

  /* Found a vectorizable pattern.  */
  if (vect_print_dump_info (REPORT_DETAILS))
    {
      fprintf (vect_dump, "pattern recognized: ");
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
    }

  /* Mark the stmts that are involved in the pattern. */
  gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
  set_vinfo_for_stmt (pattern_stmt,
		      new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
  pattern_stmt_info = vinfo_for_stmt (pattern_stmt);

  STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
  STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
  STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
  STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
  STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;

  return;
}
Ejemplo n.º 17
0
/* Find memcpy, mempcpy, memmove and memset calls, perform
   checks before call and then call no_chk version of
   functions.  We do it on O2 to enable inlining of these
   functions during expand.

   Also try to find memcpy, mempcpy, memmove and memset calls
   which are known to not write pointers to memory and use
   faster function versions for them.  */
static void
chkp_optimize_string_function_calls (void)
{
  basic_block bb;

  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "Searching for replaceable string function calls...\n");

  FOR_EACH_BB_FN (bb, cfun)
    {
      gimple_stmt_iterator i;

      for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
        {
	  gimple *stmt = gsi_stmt (i);
	  tree fndecl;

	  if (gimple_code (stmt) != GIMPLE_CALL
	      || !gimple_call_with_bounds_p (stmt))
	    continue;

	  fndecl = gimple_call_fndecl (stmt);

	  if (!fndecl || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
	    continue;

	  if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMCPY_CHKP
	      || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMPCPY_CHKP
	      || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMMOVE_CHKP
	      || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP)
	    {
	      tree dst = gimple_call_arg (stmt, 0);
	      tree dst_bnd = gimple_call_arg (stmt, 1);
	      bool is_memset = DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP;
	      tree size = gimple_call_arg (stmt, is_memset ? 3 : 4);
	      tree fndecl_nochk;
	      gimple_stmt_iterator j;
	      basic_block check_bb;
	      address_t size_val;
	      int sign;
	      bool known;

	      /* We may replace call with corresponding __chkp_*_nobnd
		 call in case destination pointer base type is not
		 void or pointer.  */
	      if (POINTER_TYPE_P (TREE_TYPE (dst))
		  && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (dst)))
		  && !chkp_type_has_pointer (TREE_TYPE (TREE_TYPE (dst))))
		{
		  tree fndecl_nobnd
		    = chkp_get_nobnd_fndecl (DECL_FUNCTION_CODE (fndecl));

		  if (fndecl_nobnd)
		    fndecl = fndecl_nobnd;
		}

	      fndecl_nochk = chkp_get_nochk_fndecl (DECL_FUNCTION_CODE (fndecl));

	      if (fndecl_nochk)
		fndecl = fndecl_nochk;

	      if (fndecl != gimple_call_fndecl (stmt))
		{
		  if (dump_file && (dump_flags & TDF_DETAILS))
		    {
		      fprintf (dump_file, "Replacing call: ");
		      print_gimple_stmt (dump_file, stmt, 0,
					 TDF_VOPS|TDF_MEMSYMS);
		    }

		  gimple_call_set_fndecl (stmt, fndecl);

		  if (dump_file && (dump_flags & TDF_DETAILS))
		    {
		      fprintf (dump_file, "With a new call: ");
		      print_gimple_stmt (dump_file, stmt, 0,
					 TDF_VOPS|TDF_MEMSYMS);
		    }
		}

	      /* If there is no nochk version of function then
		 do nothing.  Otherwise insert checks before
		 the call.  */
	      if (!fndecl_nochk)
		continue;

	      /* If size passed to call is known and > 0
		 then we may insert checks unconditionally.  */
	      size_val.pol.create (0);
	      chkp_collect_value (size, size_val);
	      known = chkp_is_constant_addr (size_val, &sign);
	      size_val.pol.release ();

	      /* If we are not sure size is not zero then we have
		 to perform runtime check for size and perform
		 checks only when size is not zero.  */
	      if (!known)
		{
		  gimple *check = gimple_build_cond (NE_EXPR,
						     size,
						     size_zero_node,
						     NULL_TREE,
						     NULL_TREE);

		  /* Split block before string function call.  */
		  gsi_prev (&i);
		  check_bb = insert_cond_bb (bb, gsi_stmt (i), check);

		  /* Set position for checks.  */
		  j = gsi_last_bb (check_bb);

		  /* The block was splitted and therefore we
		     need to set iterator to its end.  */
		  i = gsi_last_bb (bb);
		}
	      /* If size is known to be zero then no checks
		 should be performed.  */
	      else if (!sign)
		continue;
	      else
		j = i;

	      size = size_binop (MINUS_EXPR, size, size_one_node);
	      if (!is_memset)
		{
		  tree src = gimple_call_arg (stmt, 2);
		  tree src_bnd = gimple_call_arg (stmt, 3);

		  chkp_check_mem_access (src, fold_build_pointer_plus (src, size),
					 src_bnd, j, gimple_location (stmt),
					 integer_zero_node);
		}

	      chkp_check_mem_access (dst, fold_build_pointer_plus (dst, size),
				     dst_bnd, j, gimple_location (stmt),
				     integer_one_node);

	    }
	}
    }
Ejemplo n.º 18
0
static void
lower_gimple_bind (gimple_stmt_iterator *gsi, struct lower_data *data)
{
  tree old_block = data->block;
  gbind *stmt = as_a <gbind *> (gsi_stmt (*gsi));
  tree new_block = gimple_bind_block (stmt);

  if (new_block)
    {
      if (new_block == old_block)
	{
	  /* The outermost block of the original function may not be the
	     outermost statement chain of the gimplified function.  So we
	     may see the outermost block just inside the function.  */
	  gcc_assert (new_block == DECL_INITIAL (current_function_decl));
	  new_block = NULL;
	}
      else
	{
	  /* We do not expect to handle duplicate blocks.  */
	  gcc_assert (!TREE_ASM_WRITTEN (new_block));
	  TREE_ASM_WRITTEN (new_block) = 1;

	  /* Block tree may get clobbered by inlining.  Normally this would
	     be fixed in rest_of_decl_compilation using block notes, but
	     since we are not going to emit them, it is up to us.  */
	  BLOCK_CHAIN (new_block) = BLOCK_SUBBLOCKS (old_block);
	  BLOCK_SUBBLOCKS (old_block) = new_block;
	  BLOCK_SUBBLOCKS (new_block) = NULL_TREE;
	  BLOCK_SUPERCONTEXT (new_block) = old_block;

	  data->block = new_block;
	}
    }

  record_vars (gimple_bind_vars (stmt));

  /* Scrap DECL_CHAIN up to BLOCK_VARS to ease GC after we no longer
     need gimple_bind_vars.  */
  tree next;
  /* BLOCK_VARS and gimple_bind_vars share a common sub-chain.  Find
     it by marking all BLOCK_VARS.  */
  if (gimple_bind_block (stmt))
    for (tree t = BLOCK_VARS (gimple_bind_block (stmt)); t; t = DECL_CHAIN (t))
      TREE_VISITED (t) = 1;
  for (tree var = gimple_bind_vars (stmt);
       var && ! TREE_VISITED (var); var = next)
    {
      next = DECL_CHAIN (var);
      DECL_CHAIN (var) = NULL_TREE;
    }
  /* Unmark BLOCK_VARS.  */
  if (gimple_bind_block (stmt))
    for (tree t = BLOCK_VARS (gimple_bind_block (stmt)); t; t = DECL_CHAIN (t))
      TREE_VISITED (t) = 0;

  lower_sequence (gimple_bind_body_ptr (stmt), data);

  if (new_block)
    {
      gcc_assert (data->block == new_block);

      BLOCK_SUBBLOCKS (new_block)
	= blocks_nreverse (BLOCK_SUBBLOCKS (new_block));
      data->block = old_block;
    }

  /* The GIMPLE_BIND no longer carries any useful information -- kill it.  */
  gsi_insert_seq_before (gsi, gimple_bind_body (stmt), GSI_SAME_STMT);
  gsi_remove (gsi, false);
}
Ejemplo n.º 19
0
/* Verify cgraph nodes of given cgraph node.  */
void
verify_cgraph_node (struct cgraph_node *node)
{
  struct cgraph_edge *e;
  struct cgraph_node *main_clone;
  struct function *this_cfun = DECL_STRUCT_FUNCTION (node->decl);
  struct function *saved_cfun = cfun;
  basic_block this_block;
  gimple_stmt_iterator gsi;
  bool error_found = false;

  if (errorcount || sorrycount)
    return;

  timevar_push (TV_CGRAPH_VERIFY);
  /* debug_generic_stmt needs correct cfun */
  set_cfun (this_cfun);
  for (e = node->callees; e; e = e->next_callee)
    if (e->aux)
      {
	error ("aux field set for edge %s->%s",
	       cgraph_node_name (e->caller), cgraph_node_name (e->callee));
	error_found = true;
      }
  if (node->count < 0)
    {
      error ("Execution count is negative");
      error_found = true;
    }
  for (e = node->callers; e; e = e->next_caller)
    {
      if (e->count < 0)
	{
	  error ("caller edge count is negative");
	  error_found = true;
	}
      if (e->frequency < 0)
	{
	  error ("caller edge frequency is negative");
	  error_found = true;
	}
      if (e->frequency > CGRAPH_FREQ_MAX)
	{
	  error ("caller edge frequency is too large");
	  error_found = true;
	}
      if (!e->inline_failed)
	{
	  if (node->global.inlined_to
	      != (e->caller->global.inlined_to
		  ? e->caller->global.inlined_to : e->caller))
	    {
	      error ("inlined_to pointer is wrong");
	      error_found = true;
	    }
	  if (node->callers->next_caller)
	    {
	      error ("multiple inline callers");
	      error_found = true;
	    }
	}
      else
	if (node->global.inlined_to)
	  {
	    error ("inlined_to pointer set for noninline callers");
	    error_found = true;
	  }
    }
  if (!node->callers && node->global.inlined_to)
    {
      error ("inlined_to pointer is set but no predecessors found");
      error_found = true;
    }
  if (node->global.inlined_to == node)
    {
      error ("inlined_to pointer refers to itself");
      error_found = true;
    }

  for (main_clone = cgraph_node (node->decl); main_clone;
       main_clone = main_clone->next_clone)
    if (main_clone == node)
      break;
  if (!cgraph_node (node->decl))
    {
      error ("node not found in cgraph_hash");
      error_found = true;
    }

  if (node->analyzed
      && !TREE_ASM_WRITTEN (node->decl)
      && (!DECL_EXTERNAL (node->decl) || node->global.inlined_to))
    {
      if (this_cfun->cfg)
	{
	  /* The nodes we're interested in are never shared, so walk
	     the tree ignoring duplicates.  */
	  struct pointer_set_t *visited_nodes = pointer_set_create ();
	  /* Reach the trees by walking over the CFG, and note the
	     enclosing basic-blocks in the call edges.  */
	  FOR_EACH_BB_FN (this_block, this_cfun)
	    for (gsi = gsi_start_bb (this_block);
                 !gsi_end_p (gsi);
                 gsi_next (&gsi))
	      {
		gimple stmt = gsi_stmt (gsi);
		tree decl;
		if (is_gimple_call (stmt) && (decl = gimple_call_fndecl (stmt)))
		  {
		    struct cgraph_edge *e = cgraph_edge (node, stmt);
		    if (e)
		      {
			if (e->aux)
			  {
			    error ("shared call_stmt:");
			    debug_gimple_stmt (stmt);
			    error_found = true;
			  }
			if (e->callee->decl != cgraph_node (decl)->decl
			    && e->inline_failed)
			  {
			    error ("edge points to wrong declaration:");
			    debug_tree (e->callee->decl);
			    fprintf (stderr," Instead of:");
			    debug_tree (decl);
			  }
			e->aux = (void *)1;
		      }
		    else
		      {
			error ("missing callgraph edge for call stmt:");
			debug_gimple_stmt (stmt);
			error_found = true;
		      }
		  }
	      }
	  pointer_set_destroy (visited_nodes);
	}
      else
	/* No CFG available?!  */
	gcc_unreachable ();

      for (e = node->callees; e; e = e->next_callee)
	{
	  if (!e->aux && !e->indirect_call)
	    {
	      error ("edge %s->%s has no corresponding call_stmt",
		     cgraph_node_name (e->caller),
		     cgraph_node_name (e->callee));
	      debug_gimple_stmt (e->call_stmt);
	      error_found = true;
	    }
	  e->aux = 0;
	}
    }
Ejemplo n.º 20
0
static void
lower_try_catch (gimple_stmt_iterator *gsi, struct lower_data *data)
{
  bool cannot_fallthru;
  gimple *stmt = gsi_stmt (*gsi);
  gimple_stmt_iterator i;

  /* We don't handle GIMPLE_TRY_FINALLY.  */
  gcc_assert (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH);

  lower_sequence (gimple_try_eval_ptr (stmt), data);
  cannot_fallthru = data->cannot_fallthru;

  i = gsi_start (*gimple_try_cleanup_ptr (stmt));
  switch (gimple_code (gsi_stmt (i)))
    {
    case GIMPLE_CATCH:
      /* We expect to see a sequence of GIMPLE_CATCH stmts, each with a
	 catch expression and a body.  The whole try/catch may fall
	 through iff any of the catch bodies falls through.  */
      for (; !gsi_end_p (i); gsi_next (&i))
	{
	  data->cannot_fallthru = false;
	  lower_sequence (gimple_catch_handler_ptr (
                            as_a <gcatch *> (gsi_stmt (i))),
			  data);
	  if (!data->cannot_fallthru)
	    cannot_fallthru = false;
	}
      break;

    case GIMPLE_EH_FILTER:
      /* The exception filter expression only matters if there is an
	 exception.  If the exception does not match EH_FILTER_TYPES,
	 we will execute EH_FILTER_FAILURE, and we will fall through
	 if that falls through.  If the exception does match
	 EH_FILTER_TYPES, the stack unwinder will continue up the
	 stack, so we will not fall through.  We don't know whether we
	 will throw an exception which matches EH_FILTER_TYPES or not,
	 so we just ignore EH_FILTER_TYPES and assume that we might
	 throw an exception which doesn't match.  */
      data->cannot_fallthru = false;
      lower_sequence (gimple_eh_filter_failure_ptr (gsi_stmt (i)), data);
      if (!data->cannot_fallthru)
	cannot_fallthru = false;
      break;

    case GIMPLE_DEBUG:
      gcc_checking_assert (gimple_debug_begin_stmt_p (stmt));
      break;

    default:
      /* This case represents statements to be executed when an
	 exception occurs.  Those statements are implicitly followed
	 by a GIMPLE_RESX to resume execution after the exception.  So
	 in this case the try/catch never falls through.  */
      data->cannot_fallthru = false;
      lower_sequence (gimple_try_cleanup_ptr (stmt), data);
      break;
    }

  data->cannot_fallthru = cannot_fallthru;
  gsi_next (gsi);
}
Ejemplo n.º 21
0
static unsigned int
tree_profiling (void)
{
  struct cgraph_node *node;

  /* Don't profile functions produced at destruction time, particularly
     the gcov datastructure initializer.  Don't profile if it has been
     already instrumented either (when OpenMP expansion creates
     child function from already instrumented body).  */
  if (cgraph_state == CGRAPH_STATE_FINISHED)
    return 0;

  init_node_map();

  for (node = cgraph_nodes; node; node = node->next)
    {
      if (!node->analyzed
	  || !gimple_has_body_p (node->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION
	  || DECL_STRUCT_FUNCTION (node->decl)->after_tree_profile)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->decl));
      current_function_decl = node->decl;

      /* Re-set global shared temporary variable for edge-counters.  */
      gcov_type_tmp_var = NULL_TREE;

      /* Local pure-const may imply need to fixup the cfg.  */
      execute_fixup_cfg ();
      branch_prob ();

      if (! flag_branch_probabilities
	  && flag_profile_values)
	gimple_gen_ic_func_profiler ();

      if (flag_branch_probabilities
	  && flag_profile_values
	  && flag_value_profile_transformations)
	gimple_value_profile_transformations ();

      /* The above could hose dominator info.  Currently there is
	 none coming in, this is a safety valve.  It should be
	 easy to adjust it, if and when there is some.  */
      free_dominance_info (CDI_DOMINATORS);
      free_dominance_info (CDI_POST_DOMINATORS);

      current_function_decl = NULL;
      pop_cfun ();
    }

  /* Drop pure/const flags from instrumented functions.  */
  for (node = cgraph_nodes; node; node = node->next)
    {
      if (!node->analyzed
	  || !gimple_has_body_p (node->decl)
	  || !(!node->clone_of || node->decl != node->clone_of->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION
	  || DECL_STRUCT_FUNCTION (node->decl)->after_tree_profile)
	continue;

      cgraph_set_const_flag (node, false, false);
      cgraph_set_pure_flag (node, false, false);
    }

  /* Update call statements and rebuild the cgraph.  */
  for (node = cgraph_nodes; node; node = node->next)
    {
      basic_block bb;

      if (!node->analyzed
	  || !gimple_has_body_p (node->decl)
	  || !(!node->clone_of || node->decl != node->clone_of->decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION
	  || DECL_STRUCT_FUNCTION (node->decl)->after_tree_profile)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->decl));
      current_function_decl = node->decl;

      FOR_EACH_BB (bb)
	{
	  gimple_stmt_iterator gsi;
	  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_call (stmt))
		update_stmt (stmt);
	    }
	}

      cfun->after_tree_profile = 1;
      update_ssa (TODO_update_ssa);

      rebuild_cgraph_edges ();

      current_function_decl = NULL;
      pop_cfun ();
    }

  del_node_map();
  return 0;
}
Ejemplo n.º 22
0
static unsigned int
tree_profiling (void)
{
  struct cgraph_node *node;

  /* This is a small-ipa pass that gets called only once, from
     cgraphunit.c:ipa_passes().  */
  gcc_assert (cgraph_state == CGRAPH_STATE_IPA_SSA);

  init_node_map();

  FOR_EACH_DEFINED_FUNCTION (node)
    {
      if (!gimple_has_body_p (node->symbol.decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));

      /* Local pure-const may imply need to fixup the cfg.  */
      if (execute_fixup_cfg () & TODO_cleanup_cfg)
	cleanup_tree_cfg ();

      branch_prob ();

      if (! flag_branch_probabilities
	  && flag_profile_values)
	gimple_gen_ic_func_profiler ();

      if (flag_branch_probabilities
	  && flag_profile_values
	  && flag_value_profile_transformations)
	gimple_value_profile_transformations ();

      /* The above could hose dominator info.  Currently there is
	 none coming in, this is a safety valve.  It should be
	 easy to adjust it, if and when there is some.  */
      free_dominance_info (CDI_DOMINATORS);
      free_dominance_info (CDI_POST_DOMINATORS);
      pop_cfun ();
    }

  /* Drop pure/const flags from instrumented functions.  */
  FOR_EACH_DEFINED_FUNCTION (node)
    {
      if (!gimple_has_body_p (node->symbol.decl)
	  || !(!node->clone_of
	  || node->symbol.decl != node->clone_of->symbol.decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION)
	continue;

      cgraph_set_const_flag (node, false, false);
      cgraph_set_pure_flag (node, false, false);
    }

  /* Update call statements and rebuild the cgraph.  */
  FOR_EACH_DEFINED_FUNCTION (node)
    {
      basic_block bb;

      if (!gimple_has_body_p (node->symbol.decl)
	  || !(!node->clone_of
	  || node->symbol.decl != node->clone_of->symbol.decl))
	continue;

      /* Don't profile functions produced for builtin stuff.  */
      if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION)
	continue;

      push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));

      FOR_EACH_BB (bb)
	{
	  gimple_stmt_iterator gsi;
	  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	    {
	      gimple stmt = gsi_stmt (gsi);
	      if (is_gimple_call (stmt))
		update_stmt (stmt);
	    }
	}

      update_ssa (TODO_update_ssa);

      rebuild_cgraph_edges ();

      pop_cfun ();
    }

  del_node_map();
  return 0;
}
Ejemplo n.º 23
0
static bool
generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
{
  unsigned i, x;
  gimple_stmt_iterator bsi;
  basic_block *bbs;

  if (copy_p)
    {
      loop = copy_loop_before (loop);
      create_preheader (loop, CP_SIMPLE_PREHEADERS);
      create_bb_after_loop (loop);
    }

  if (loop == NULL)
    return false;

  /* Remove stmts not in the PARTITION bitmap.  The order in which we
     visit the phi nodes and the statements is exactly as in
     stmts_from_loop.  */
  bbs = get_loop_body_in_dom_order (loop);

  if (MAY_HAVE_DEBUG_STMTS)
    for (x = 0, i = 0; i < loop->num_nodes; i++)
      {
	basic_block bb = bbs[i];

	for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	  if (!bitmap_bit_p (partition, x++))
	    reset_debug_uses (gsi_stmt (bsi));

	for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
	  {
	    gimple stmt = gsi_stmt (bsi);
	    if (gimple_code (stmt) != GIMPLE_LABEL
		&& !is_gimple_debug (stmt)
		&& !bitmap_bit_p (partition, x++))
	      reset_debug_uses (stmt);
	  }
      }

  for (x = 0, i = 0; i < loop->num_nodes; i++)
    {
      basic_block bb = bbs[i];

      for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
	if (!bitmap_bit_p (partition, x++))
	  {
	    gimple phi = gsi_stmt (bsi);
	    if (!is_gimple_reg (gimple_phi_result (phi)))
	      mark_virtual_phi_result_for_renaming (phi);
	    remove_phi_node (&bsi, true);
	  }
	else
	  gsi_next (&bsi);

      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
	{
	  gimple stmt = gsi_stmt (bsi);
	  if (gimple_code (stmt) != GIMPLE_LABEL
	      && !is_gimple_debug (stmt)
	      && !bitmap_bit_p (partition, x++))
	    {
	      unlink_stmt_vdef (stmt);
	      gsi_remove (&bsi, true);
	      release_defs (stmt);
	    }
	  else
	    gsi_next (&bsi);
	}
    }

  free (bbs);
  return true;
}
Ejemplo n.º 24
0
static bool
should_duplicate_loop_header_p (basic_block header, struct loop *loop,
				int *limit)
{
  gimple_stmt_iterator bsi;
  gimple *last;

  gcc_assert (!header->aux);

  /* Loop header copying usually increases size of the code.  This used not to
     be true, since quite often it is possible to verify that the condition is
     satisfied in the first iteration and therefore to eliminate it.  Jump
     threading handles these cases now.  */
  if (optimize_loop_for_size_p (loop))
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "  Not duplicating bb %i: optimizing for size.\n",
		 header->index);
      return false;
    }

  gcc_assert (EDGE_COUNT (header->succs) > 0);
  if (single_succ_p (header))
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "  Not duplicating bb %i: it is single succ.\n",
		 header->index);
      return false;
    }

  if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest)
      && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest))
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "  Not duplicating bb %i: both sucessors are in loop.\n",
		 loop->num);
      return false;
    }

  /* If this is not the original loop header, we want it to have just
     one predecessor in order to match the && pattern.  */
  if (header != loop->header && !single_pred_p (header))
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "  Not duplicating bb %i: it has mutiple predecestors.\n",
		 header->index);
      return false;
    }

  last = last_stmt (header);
  if (gimple_code (last) != GIMPLE_COND)
    {
      if (dump_file && (dump_flags & TDF_DETAILS))
	fprintf (dump_file,
		 "  Not duplicating bb %i: it does not end by conditional.\n",
		 header->index);
      return false;
    }

  /* Count number of instructions and punt on calls.  */
  for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi))
    {
      last = gsi_stmt (bsi);

      if (gimple_code (last) == GIMPLE_LABEL)
	continue;

      if (is_gimple_debug (last))
	continue;

      if (gimple_code (last) == GIMPLE_CALL
	  && !gimple_inexpensive_call_p (as_a <gcall *> (last)))
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file,
		     "  Not duplicating bb %i: it contains call.\n",
		     header->index);
	  return false;
	}

      *limit -= estimate_num_insns (last, &eni_size_weights);
      if (*limit < 0)
	{
	  if (dump_file && (dump_flags & TDF_DETAILS))
	    fprintf (dump_file,
		     "  Not duplicating bb %i contains too many insns.\n",
		     header->index);
	  return false;
	}
    }
  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "    Will duplicate bb %i\n", header->index); 
  return true;
}
Ejemplo n.º 25
0
static void
lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
{
  gimple stmt = gsi_stmt (*gsi);

  gimple_set_block (stmt, data->block);

  switch (gimple_code (stmt))
    {
    case GIMPLE_BIND:
      lower_gimple_bind (gsi, data);
      /* Propagate fallthruness.  */
      return;

    case GIMPLE_COND:
    case GIMPLE_GOTO:
    case GIMPLE_SWITCH:
      data->cannot_fallthru = true;
      gsi_next (gsi);
      return;

    case GIMPLE_RETURN:
      if (data->cannot_fallthru)
	{
	  gsi_remove (gsi, false);
	  /* Propagate fallthruness.  */
	}
      else
	{
	  lower_gimple_return (gsi, data);
	  data->cannot_fallthru = true;
	}
      return;

    case GIMPLE_TRY:
      if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
	lower_try_catch (gsi, data);
      else
	{
	  /* It must be a GIMPLE_TRY_FINALLY.  */
	  bool cannot_fallthru;
	  lower_sequence (gimple_try_eval_ptr (stmt), data);
	  cannot_fallthru = data->cannot_fallthru;

	  /* The finally clause is always executed after the try clause,
	     so if it does not fall through, then the try-finally will not
	     fall through.  Otherwise, if the try clause does not fall
	     through, then when the finally clause falls through it will
	     resume execution wherever the try clause was going.  So the
	     whole try-finally will only fall through if both the try
	     clause and the finally clause fall through.  */
	  data->cannot_fallthru = false;
	  lower_sequence (gimple_try_cleanup_ptr (stmt), data);
	  data->cannot_fallthru |= cannot_fallthru;
	  gsi_next (gsi);
	}
      return;

    case GIMPLE_EH_ELSE:
      {
	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
	lower_sequence (gimple_eh_else_n_body_ptr (eh_else_stmt), data);
	lower_sequence (gimple_eh_else_e_body_ptr (eh_else_stmt), data);
      }
      break;

    case GIMPLE_NOP:
    case GIMPLE_ASM:
    case GIMPLE_ASSIGN:
    case GIMPLE_PREDICT:
    case GIMPLE_LABEL:
    case GIMPLE_EH_MUST_NOT_THROW:
    case GIMPLE_OMP_FOR:
    case GIMPLE_OMP_SECTIONS:
    case GIMPLE_OMP_SECTIONS_SWITCH:
    case GIMPLE_OMP_SECTION:
    case GIMPLE_OMP_SINGLE:
    case GIMPLE_OMP_MASTER:
    case GIMPLE_OMP_TASKGROUP:
    case GIMPLE_OMP_ORDERED:
    case GIMPLE_OMP_CRITICAL:
    case GIMPLE_OMP_RETURN:
    case GIMPLE_OMP_ATOMIC_LOAD:
    case GIMPLE_OMP_ATOMIC_STORE:
    case GIMPLE_OMP_CONTINUE:
      break;

    case GIMPLE_CALL:
      {
	tree decl = gimple_call_fndecl (stmt);
	unsigned i;

	for (i = 0; i < gimple_call_num_args (stmt); i++)
	  {
	    tree arg = gimple_call_arg (stmt, i);
	    if (EXPR_P (arg))
	      TREE_SET_BLOCK (arg, data->block);
	  }

	if (decl
	    && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
	  {
	    if (DECL_FUNCTION_CODE (decl) == BUILT_IN_SETJMP)
	      {
		lower_builtin_setjmp (gsi);
		data->cannot_fallthru = false;
		return;
	      }
	    else if (DECL_FUNCTION_CODE (decl) == BUILT_IN_POSIX_MEMALIGN
		     && flag_tree_bit_ccp)
	      {
		lower_builtin_posix_memalign (gsi);
		return;
	      }
	  }

	if (decl && (flags_from_decl_or_type (decl) & ECF_NORETURN))
	  {
	    data->cannot_fallthru = true;
	    gsi_next (gsi);
	    return;
	  }
      }
      break;

    case GIMPLE_OMP_PARALLEL:
    case GIMPLE_OMP_TASK:
    case GIMPLE_OMP_TARGET:
    case GIMPLE_OMP_TEAMS:
      data->cannot_fallthru = false;
      lower_omp_directive (gsi, data);
      data->cannot_fallthru = false;
      return;

    case GIMPLE_TRANSACTION:
      lower_sequence (gimple_transaction_body_ptr (
			as_a <gtransaction *> (stmt)),
		      data);
      break;

    default:
      gcc_unreachable ();
    }

  data->cannot_fallthru = false;
  gsi_next (gsi);
}
Ejemplo n.º 26
0
static unsigned int
rename_ssa_copies (void)
{
  var_map map;
  basic_block bb;
  gimple_stmt_iterator gsi;
  tree var, part_var;
  gimple stmt, phi;
  unsigned x;
  FILE *debug;

  memset (&stats, 0, sizeof (stats));

  if (dump_file && (dump_flags & TDF_DETAILS))
    debug = dump_file;
  else
    debug = NULL;

  map = init_var_map (num_ssa_names);

  FOR_EACH_BB (bb)
    {
      /* Scan for real copies.  */
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  stmt = gsi_stmt (gsi);
	  if (gimple_assign_ssa_name_copy_p (stmt))
	    {
	      tree lhs = gimple_assign_lhs (stmt);
	      tree rhs = gimple_assign_rhs1 (stmt);

	      copy_rename_partition_coalesce (map, lhs, rhs, debug);
	    }
	}
    }

  FOR_EACH_BB (bb)
    {
      /* Treat PHI nodes as copies between the result and each argument.  */
      for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
        {
          size_t i;
	  tree res;

	  phi = gsi_stmt (gsi);
	  res = gimple_phi_result (phi);

	  /* Do not process virtual SSA_NAMES.  */
	  if (virtual_operand_p (res))
	    continue;

	  /* Make sure to only use the same partition for an argument
	     as the result but never the other way around.  */
	  if (SSA_NAME_VAR (res)
	      && !DECL_IGNORED_P (SSA_NAME_VAR (res)))
	    for (i = 0; i < gimple_phi_num_args (phi); i++)
	      {
		tree arg = PHI_ARG_DEF (phi, i);
		if (TREE_CODE (arg) == SSA_NAME)
		  copy_rename_partition_coalesce (map, res, arg,
						  debug);
	      }
	  /* Else if all arguments are in the same partition try to merge
	     it with the result.  */
	  else
	    {
	      int all_p_same = -1;
	      int p = -1;
	      for (i = 0; i < gimple_phi_num_args (phi); i++)
		{
		  tree arg = PHI_ARG_DEF (phi, i);
		  if (TREE_CODE (arg) != SSA_NAME)
		    {
		      all_p_same = 0;
		      break;
		    }
		  else if (all_p_same == -1)
		    {
		      p = partition_find (map->var_partition,
					  SSA_NAME_VERSION (arg));
		      all_p_same = 1;
		    }
		  else if (all_p_same == 1
			   && p != partition_find (map->var_partition,
						   SSA_NAME_VERSION (arg)))
		    {
		      all_p_same = 0;
		      break;
		    }
		}
	      if (all_p_same == 1)
		copy_rename_partition_coalesce (map, res,
						PHI_ARG_DEF (phi, 0),
						debug);
	    }
        }
    }

  if (debug)
    dump_var_map (debug, map);

  /* Now one more pass to make all elements of a partition share the same
     root variable.  */

  for (x = 1; x < num_ssa_names; x++)
    {
      part_var = partition_to_var (map, x);
      if (!part_var)
        continue;
      var = ssa_name (x);
      if (SSA_NAME_VAR (var) == SSA_NAME_VAR (part_var))
	continue;
      if (debug)
        {
	  fprintf (debug, "Coalesced ");
	  print_generic_expr (debug, var, TDF_SLIM);
	  fprintf (debug, " to ");
	  print_generic_expr (debug, part_var, TDF_SLIM);
	  fprintf (debug, "\n");
	}
      stats.coalesced++;
      replace_ssa_name_symbol (var, SSA_NAME_VAR (part_var));
    }

  statistics_counter_event (cfun, "copies coalesced",
			    stats.coalesced);
  delete_var_map (map);
  return 0;
}
Ejemplo n.º 27
0
static tree
independent_of_stmt_p (tree expr, gimple *at, gimple_stmt_iterator gsi)
{
  basic_block bb, call_bb, at_bb;
  edge e;
  edge_iterator ei;

  if (is_gimple_min_invariant (expr))
    return expr;

  if (TREE_CODE (expr) != SSA_NAME)
    return NULL_TREE;

  /* Mark the blocks in the chain leading to the end.  */
  at_bb = gimple_bb (at);
  call_bb = gimple_bb (gsi_stmt (gsi));
  for (bb = call_bb; bb != at_bb; bb = single_succ (bb))
    bb->aux = &bb->aux;
  bb->aux = &bb->aux;

  while (1)
    {
      at = SSA_NAME_DEF_STMT (expr);
      bb = gimple_bb (at);

      /* The default definition or defined before the chain.  */
      if (!bb || !bb->aux)
	break;

      if (bb == call_bb)
	{
	  for (; !gsi_end_p (gsi); gsi_next (&gsi))
	    if (gsi_stmt (gsi) == at)
	      break;

	  if (!gsi_end_p (gsi))
	    expr = NULL_TREE;
	  break;
	}

      if (gimple_code (at) != GIMPLE_PHI)
	{
	  expr = NULL_TREE;
	  break;
	}

      FOR_EACH_EDGE (e, ei, bb->preds)
	if (e->src->aux)
	  break;
      gcc_assert (e);

      expr = PHI_ARG_DEF_FROM_EDGE (at, e);
      if (TREE_CODE (expr) != SSA_NAME)
	{
	  /* The value is a constant.  */
	  break;
	}
    }

  /* Unmark the blocks.  */
  for (bb = call_bb; bb != at_bb; bb = single_succ (bb))
    bb->aux = NULL;
  bb->aux = NULL;

  return expr;
}
Ejemplo n.º 28
0
static bool
init_dont_simulate_again (void)
{
  basic_block bb;
  gimple_stmt_iterator gsi;
  gimple phi;
  bool saw_a_complex_op = false;

  FOR_EACH_BB (bb)
    {
      for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  phi = gsi_stmt (gsi);
	  prop_set_simulate_again (phi,
				   is_complex_reg (gimple_phi_result (phi)));
	}

      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  gimple stmt;
	  tree op0, op1;
	  bool sim_again_p;

	  stmt = gsi_stmt (gsi);
	  op0 = op1 = NULL_TREE;

	  /* Most control-altering statements must be initially 
	     simulated, else we won't cover the entire cfg.  */
	  sim_again_p = stmt_ends_bb_p (stmt);

	  switch (gimple_code (stmt))
	    {
	    case GIMPLE_CALL:
	      if (gimple_call_lhs (stmt))
	        sim_again_p = is_complex_reg (gimple_call_lhs (stmt));
	      break;

	    case GIMPLE_ASSIGN:
	      sim_again_p = is_complex_reg (gimple_assign_lhs (stmt));
	      if (gimple_assign_rhs_code (stmt) == REALPART_EXPR
		  || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
		op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
	      else
		op0 = gimple_assign_rhs1 (stmt);
	      if (gimple_num_ops (stmt) > 2)
		op1 = gimple_assign_rhs2 (stmt);
	      break;

	    case GIMPLE_COND:
	      op0 = gimple_cond_lhs (stmt);
	      op1 = gimple_cond_rhs (stmt);
	      break;

	    default:
	      break;
	    }

	  if (op0 || op1)
	    switch (gimple_expr_code (stmt))
	      {
	      case EQ_EXPR:
	      case NE_EXPR:
	      case PLUS_EXPR:
	      case MINUS_EXPR:
	      case MULT_EXPR:
	      case TRUNC_DIV_EXPR:
	      case CEIL_DIV_EXPR:
	      case FLOOR_DIV_EXPR:
	      case ROUND_DIV_EXPR:
	      case RDIV_EXPR:
		if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE
		    || TREE_CODE (TREE_TYPE (op1)) == COMPLEX_TYPE)
		  saw_a_complex_op = true;
		break;

	      case NEGATE_EXPR:
	      case CONJ_EXPR:
		if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE)
		  saw_a_complex_op = true;
		break;

	      case REALPART_EXPR:
	      case IMAGPART_EXPR:
		/* The total store transformation performed during
		  gimplification creates such uninitialized loads
		  and we need to lower the statement to be able
		  to fix things up.  */
		if (TREE_CODE (op0) == SSA_NAME
		    && ssa_undefined_value_p (op0))
		  saw_a_complex_op = true;
		break;

	      default:
		break;
	      }

	  prop_set_simulate_again (stmt, sim_again_p);
	}
    }

  return saw_a_complex_op;
}
Ejemplo n.º 29
0
static void
eliminate_tail_call (struct tailcall *t)
{
  tree param, rslt;
  gimple *stmt, *call;
  tree arg;
  size_t idx;
  basic_block bb, first;
  edge e;
  gphi *phi;
  gphi_iterator gpi;
  gimple_stmt_iterator gsi;
  gimple *orig_stmt;

  stmt = orig_stmt = gsi_stmt (t->call_gsi);
  bb = gsi_bb (t->call_gsi);

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "Eliminated tail recursion in bb %d : ",
	       bb->index);
      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
      fprintf (dump_file, "\n");
    }

  gcc_assert (is_gimple_call (stmt));

  first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));

  /* Remove the code after call_gsi that will become unreachable.  The
     possibly unreachable code in other blocks is removed later in
     cfg cleanup.  */
  gsi = t->call_gsi;
  gimple_stmt_iterator gsi2 = gsi_last_bb (gimple_bb (gsi_stmt (gsi)));
  while (gsi_stmt (gsi2) != gsi_stmt (gsi))
    {
      gimple *t = gsi_stmt (gsi2);
      /* Do not remove the return statement, so that redirect_edge_and_branch
	 sees how the block ends.  */
      if (gimple_code (t) != GIMPLE_RETURN)
	{
	  gimple_stmt_iterator gsi3 = gsi2;
	  gsi_prev (&gsi2);
	  gsi_remove (&gsi3, true);
	  release_defs (t);
	}
      else
	gsi_prev (&gsi2);
    }

  /* Number of executions of function has reduced by the tailcall.  */
  e = single_succ_edge (gsi_bb (t->call_gsi));
  decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e));
  decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count,
		    EDGE_FREQUENCY (e));
  if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
    decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e));

  /* Replace the call by a jump to the start of function.  */
  e = redirect_edge_and_branch (single_succ_edge (gsi_bb (t->call_gsi)),
				first);
  gcc_assert (e);
  PENDING_STMT (e) = NULL;

  /* Add phi node entries for arguments.  The ordering of the phi nodes should
     be the same as the ordering of the arguments.  */
  for (param = DECL_ARGUMENTS (current_function_decl),
	 idx = 0, gpi = gsi_start_phis (first);
       param;
       param = DECL_CHAIN (param), idx++)
    {
      if (!arg_needs_copy_p (param))
	continue;

      arg = gimple_call_arg (stmt, idx);
      phi = gpi.phi ();
      gcc_assert (param == SSA_NAME_VAR (PHI_RESULT (phi)));

      add_phi_arg (phi, arg, e, gimple_location (stmt));
      gsi_next (&gpi);
    }

  /* Update the values of accumulators.  */
  adjust_accumulator_values (t->call_gsi, t->mult, t->add, e);

  call = gsi_stmt (t->call_gsi);
  rslt = gimple_call_lhs (call);
  if (rslt != NULL_TREE)
    {
      /* Result of the call will no longer be defined.  So adjust the
	 SSA_NAME_DEF_STMT accordingly.  */
      SSA_NAME_DEF_STMT (rslt) = gimple_build_nop ();
    }

  gsi_remove (&t->call_gsi, true);
  release_defs (call);
}
static unsigned int
rename_ssa_copies (void)
{
  var_map map;
  basic_block bb;
  gimple_stmt_iterator gsi;
  tree var, part_var;
  gimple stmt, phi;
  unsigned x;
  FILE *debug;
  bool updated = false;

  if (dump_file && (dump_flags & TDF_DETAILS))
    debug = dump_file;
  else
    debug = NULL;

  map = init_var_map (num_ssa_names);

  FOR_EACH_BB (bb)
    {
      /* Scan for real copies.  */
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
	{
	  stmt = gsi_stmt (gsi);
	  if (gimple_assign_ssa_name_copy_p (stmt))
	    {
	      tree lhs = gimple_assign_lhs (stmt);
	      tree rhs = gimple_assign_rhs1 (stmt);

	      updated |= copy_rename_partition_coalesce (map, lhs, rhs, debug);
	    }
	}
    }

  FOR_EACH_BB (bb)
    {
      /* Treat PHI nodes as copies between the result and each argument.  */
      for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
        {
          size_t i;
	  tree res;

	  phi = gsi_stmt (gsi);
	  res = gimple_phi_result (phi);

	  /* Do not process virtual SSA_NAMES.  */
	  if (!is_gimple_reg (SSA_NAME_VAR (res)))
	    continue;

          for (i = 0; i < gimple_phi_num_args (phi); i++)
            {
              tree arg = gimple_phi_arg (phi, i)->def;
              if (TREE_CODE (arg) == SSA_NAME)
		updated |= copy_rename_partition_coalesce (map, res, arg, debug);
            }
        }
    }

  if (debug)
    dump_var_map (debug, map);

  /* Now one more pass to make all elements of a partition share the same
     root variable.  */

  for (x = 1; x < num_ssa_names; x++)
    {
      part_var = partition_to_var (map, x);
      if (!part_var)
        continue;
      var = ssa_name (x);
      if (debug)
        {
	  if (SSA_NAME_VAR (var) != SSA_NAME_VAR (part_var))
	    {
	      fprintf (debug, "Coalesced ");
	      print_generic_expr (debug, var, TDF_SLIM);
	      fprintf (debug, " to ");
	      print_generic_expr (debug, part_var, TDF_SLIM);
	      fprintf (debug, "\n");
	    }
	}
      replace_ssa_name_symbol (var, SSA_NAME_VAR (part_var));
    }

  delete_var_map (map);
  return updated ? TODO_remove_unused_locals : 0;
}