Beispiel #1
0
static tree
adjust_return_value_with_ops (enum tree_code code, const char *label,
			      tree acc, tree op1, gimple_stmt_iterator gsi)
{

  tree ret_type = TREE_TYPE (DECL_RESULT (current_function_decl));
  tree result = make_temp_ssa_name (ret_type, NULL, label);
  gimple stmt;

  if (POINTER_TYPE_P (ret_type))
    {
      gcc_assert (code == PLUS_EXPR && TREE_TYPE (acc) == sizetype);
      code = POINTER_PLUS_EXPR;
    }
  if (types_compatible_p (TREE_TYPE (acc), TREE_TYPE (op1))
      && code != POINTER_PLUS_EXPR)
    stmt = gimple_build_assign_with_ops (code, result, acc, op1);
  else
    {
      tree tem;
      if (code == POINTER_PLUS_EXPR)
	tem = fold_build2 (code, TREE_TYPE (op1), op1, acc);
      else
	tem = fold_build2 (code, TREE_TYPE (op1),
			   fold_convert (TREE_TYPE (op1), acc), op1);
      tree rhs = fold_convert (ret_type, tem);
      rhs = force_gimple_operand_gsi (&gsi, rhs,
				      false, NULL, true, GSI_SAME_STMT);
      stmt = gimple_build_assign (result, rhs);
    }

  gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
  return result;
}
Beispiel #2
0
tree
maybe_push_res_to_seq (code_helper rcode, tree type, tree *ops,
		       gimple_seq *seq, tree res)
{
  if (rcode.is_tree_code ())
    {
      if (!res
	  && (TREE_CODE_LENGTH ((tree_code) rcode) == 0
	      || ((tree_code) rcode) == ADDR_EXPR)
	  && is_gimple_val (ops[0]))
	return ops[0];
      if (!seq)
	return NULL_TREE;
      /* Play safe and do not allow abnormals to be mentioned in
         newly created statements.  */
      if ((TREE_CODE (ops[0]) == SSA_NAME
	   && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ops[0]))
	  || (ops[1]
	      && TREE_CODE (ops[1]) == SSA_NAME
	      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ops[1]))
	  || (ops[2]
	      && TREE_CODE (ops[2]) == SSA_NAME
	      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ops[2])))
	return NULL_TREE;
      if (!res)
	res = make_ssa_name (type, NULL);
      maybe_build_generic_op (rcode, type, &ops[0], ops[1], ops[2]);
      gimple new_stmt = gimple_build_assign_with_ops (rcode, res,
						      ops[0], ops[1], ops[2]);
      gimple_seq_add_stmt_without_update (seq, new_stmt);
      return res;
    }
  else
    {
      if (!seq)
	return NULL_TREE;
      tree decl = builtin_decl_implicit (rcode);
      if (!decl)
	return NULL_TREE;
      unsigned nargs = type_num_arguments (TREE_TYPE (decl));
      gcc_assert (nargs <= 3);
      /* Play safe and do not allow abnormals to be mentioned in
         newly created statements.  */
      if ((TREE_CODE (ops[0]) == SSA_NAME
	   && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ops[0]))
	  || (nargs >= 2
	      && TREE_CODE (ops[1]) == SSA_NAME
	      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ops[1]))
	  || (nargs == 3
	      && TREE_CODE (ops[2]) == SSA_NAME
	      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ops[2])))
	return NULL_TREE;
      if (!res)
	res = make_ssa_name (type, NULL);
      gimple new_stmt = gimple_build_call (decl, nargs, ops[0], ops[1], ops[2]);
      gimple_call_set_lhs (new_stmt, res);
      gimple_seq_add_stmt_without_update (seq, new_stmt);
      return res;
    }
}
Beispiel #3
0
static void
insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
		    tree def, tree recip_def, int threshold)
{
  tree type;
  gimple new_stmt;
  gimple_stmt_iterator gsi;
  struct occurrence *occ_child;

  if (!recip_def
      && (occ->bb_has_division || !flag_trapping_math)
      && occ->num_divisions >= threshold)
    {
      /* Make a variable with the replacement and substitute it.  */
      type = TREE_TYPE (def);
      recip_def = make_rename_temp (type, "reciptmp");
      new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
					       build_one_cst (type), def);
  
      if (occ->bb_has_division)
        {
          /* Case 1: insert before an existing division.  */
          gsi = gsi_after_labels (occ->bb);
          while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
	    gsi_next (&gsi);

          gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
        }
      else if (def_gsi && occ->bb == def_gsi->bb)
        {
          /* Case 2: insert right after the definition.  Note that this will
	     never happen if the definition statement can throw, because in
	     that case the sole successor of the statement's basic block will
	     dominate all the uses as well.  */
          gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
        }
      else
        {
          /* Case 3: insert in a basic block not containing defs/uses.  */
          gsi = gsi_after_labels (occ->bb);
          gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
        }

      occ->recip_def_stmt = new_stmt;
    }

  occ->recip_def = recip_def;
  for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
    insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
}
Beispiel #4
0
void
gimple_gen_edge_profiler (int edgeno, edge e)
{
  tree ref, one;
  gimple stmt1, stmt2, stmt3;

  /* We share one temporary variable declaration per function.  This
     gets re-set in tree_profiling.  */
  if (gcov_type_tmp_var == NULL_TREE)
    gcov_type_tmp_var = create_tmp_reg (gcov_type_node, "PROF_edge_counter");

  if (PROFILE_GEN_EDGE_ATOMIC)
    ref = tree_coverage_counter_addr (GCOV_COUNTER_ARCS, edgeno);
  else
    ref = tree_coverage_counter_ref (GCOV_COUNTER_ARCS, edgeno);

  one = build_int_cst (gcov_type_node, 1);
  if (PROFILE_GEN_EDGE_ATOMIC)
    {
      /* __atomic_fetch_add (&counter, 1, MEMMODEL_RELAXED); */
      stmt3 = gimple_build_call (builtin_decl_explicit (
                                   GCOV_TYPE_ATOMIC_FETCH_ADD),
                                 3, ref, one,
                                 build_int_cst (integer_type_node,
                                   MEMMODEL_RELAXED));
      find_referenced_vars_in (stmt3);
    }
  else
    {
      stmt1 = gimple_build_assign (gcov_type_tmp_var, ref);
      gimple_assign_set_lhs (stmt1, make_ssa_name (gcov_type_tmp_var, stmt1));
      find_referenced_vars_in (stmt1);
      stmt2 = gimple_build_assign_with_ops (PLUS_EXPR, gcov_type_tmp_var,
            				gimple_assign_lhs (stmt1), one);
      gimple_assign_set_lhs (stmt2, make_ssa_name (gcov_type_tmp_var, stmt2));
      stmt3 = gimple_build_assign (unshare_expr (ref), gimple_assign_lhs (stmt2));

      gsi_insert_on_edge (e, stmt1);
      gsi_insert_on_edge (e, stmt2);
    }
   gsi_insert_on_edge (e, stmt3);
} 
Beispiel #5
0
void
gimple_gen_edge_profiler (int edgeno, edge e)
{
  tree ref, one, gcov_type_tmp_var;
  gassign *stmt1, *stmt2, *stmt3;

  ref = tree_coverage_counter_ref (GCOV_COUNTER_ARCS, edgeno);
  one = build_int_cst (gcov_type_node, 1);
  gcov_type_tmp_var = make_temp_ssa_name (gcov_type_node,
					  NULL, "PROF_edge_counter");
  stmt1 = gimple_build_assign (gcov_type_tmp_var, ref);
  gcov_type_tmp_var = make_temp_ssa_name (gcov_type_node,
					  NULL, "PROF_edge_counter");
  stmt2 = gimple_build_assign_with_ops (PLUS_EXPR, gcov_type_tmp_var,
					gimple_assign_lhs (stmt1), one);
  stmt3 = gimple_build_assign (unshare_expr (ref), gimple_assign_lhs (stmt2));
  gsi_insert_on_edge (e, stmt1);
  gsi_insert_on_edge (e, stmt2);
  gsi_insert_on_edge (e, stmt3);
}
Beispiel #6
0
static tree
update_accumulator_with_ops (enum tree_code code, tree acc, tree op1,
			     gimple_stmt_iterator gsi)
{
  gimple stmt;
  tree var = copy_ssa_name (acc, NULL);
  if (types_compatible_p (TREE_TYPE (acc), TREE_TYPE (op1)))
    stmt = gimple_build_assign_with_ops (code, var, acc, op1);
  else
    {
      tree rhs = fold_convert (TREE_TYPE (acc),
			       fold_build2 (code,
					    TREE_TYPE (op1),
					    fold_convert (TREE_TYPE (op1), acc),
					    op1));
      rhs = force_gimple_operand_gsi (&gsi, rhs,
				      false, NULL, false, GSI_CONTINUE_LINKING);
      stmt = gimple_build_assign (var, rhs);
    }
  gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
  return var;
}
void
gimple_gen_edge_profiler (int edgeno, edge e)
{
  tree ref, one;
  gimple stmt1, stmt2, stmt3;

  /* We share one temporary variable declaration per function.  This
     gets re-set in tree_profiling.  */
  if (gcov_type_tmp_var == NULL_TREE)
    gcov_type_tmp_var = create_tmp_reg (gcov_type_node, "PROF_edge_counter");
  ref = tree_coverage_counter_ref (GCOV_COUNTER_ARCS, edgeno);
  one = build_int_cst (gcov_type_node, 1);
  stmt1 = gimple_build_assign (gcov_type_tmp_var, ref);
  gimple_assign_set_lhs (stmt1, make_ssa_name (gcov_type_tmp_var, stmt1));
  stmt2 = gimple_build_assign_with_ops (PLUS_EXPR, gcov_type_tmp_var,
					gimple_assign_lhs (stmt1), one);
  gimple_assign_set_lhs (stmt2, make_ssa_name (gcov_type_tmp_var, stmt2));
  stmt3 = gimple_build_assign (unshare_expr (ref), gimple_assign_lhs (stmt2));
  gsi_insert_on_edge (e, stmt1);
  gsi_insert_on_edge (e, stmt2);
  gsi_insert_on_edge (e, stmt3);
}
Beispiel #8
0
bool
aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
{
  bool changed = false;
  gimple stmt = gsi_stmt (*gsi);
  tree call = gimple_call_fn (stmt);
  tree fndecl;
  gimple new_stmt = NULL;

  if (call)
    {
      fndecl = gimple_call_fndecl (stmt);
      if (fndecl)
	{
	  int fcode = DECL_FUNCTION_CODE (fndecl);
	  int nargs = gimple_call_num_args (stmt);
	  tree *args = (nargs > 0
			? gimple_call_arg_ptr (stmt, 0)
			: &error_mark_node);

	  /* We use gimple's REDUC_(PLUS|MIN|MAX)_EXPRs for float, signed int
	     and unsigned int; it will distinguish according to the types of
	     the arguments to the __builtin.  */
	  switch (fcode)
	    {
	      BUILTIN_VALL (UNOP, reduc_plus_scal_, 10)
	        new_stmt = gimple_build_assign_with_ops (
						REDUC_PLUS_EXPR,
						gimple_call_lhs (stmt),
						args[0],
						NULL_TREE);
		break;
	      BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10)
	      BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10)
		new_stmt = gimple_build_assign_with_ops (
						REDUC_MAX_EXPR,
						gimple_call_lhs (stmt),
						args[0],
						NULL_TREE);
		break;
	      BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10)
	      BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10)
		new_stmt = gimple_build_assign_with_ops (
						REDUC_MIN_EXPR,
						gimple_call_lhs (stmt),
						args[0],
						NULL_TREE);
		break;

	    default:
	      break;
	    }
	}
    }

  if (new_stmt)
    {
      gsi_replace (gsi, new_stmt, true);
      changed = true;
    }

  return changed;
}
Beispiel #9
0
static void
instrument_bool_enum_load (gimple_stmt_iterator *gsi)
{
  gimple stmt = gsi_stmt (*gsi);
  tree rhs = gimple_assign_rhs1 (stmt);
  tree type = TREE_TYPE (rhs);
  tree minv = NULL_TREE, maxv = NULL_TREE;

  if (TREE_CODE (type) == BOOLEAN_TYPE && (flag_sanitize & SANITIZE_BOOL))
    {
      minv = boolean_false_node;
      maxv = boolean_true_node;
    }
  else if (TREE_CODE (type) == ENUMERAL_TYPE
	   && (flag_sanitize & SANITIZE_ENUM)
	   && TREE_TYPE (type) != NULL_TREE
	   && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
	   && (TYPE_PRECISION (TREE_TYPE (type))
	       < GET_MODE_PRECISION (TYPE_MODE (type))))
    {
      minv = TYPE_MIN_VALUE (TREE_TYPE (type));
      maxv = TYPE_MAX_VALUE (TREE_TYPE (type));
    }
  else
    return;

  int modebitsize = GET_MODE_BITSIZE (TYPE_MODE (type));
  HOST_WIDE_INT bitsize, bitpos;
  tree offset;
  enum machine_mode mode;
  int volatilep = 0, unsignedp = 0;
  tree base = get_inner_reference (rhs, &bitsize, &bitpos, &offset, &mode,
				   &unsignedp, &volatilep, false);
  tree utype = build_nonstandard_integer_type (modebitsize, 1);

  if ((TREE_CODE (base) == VAR_DECL && DECL_HARD_REGISTER (base))
      || (bitpos % modebitsize) != 0
      || bitsize != modebitsize
      || GET_MODE_BITSIZE (TYPE_MODE (utype)) != modebitsize
      || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
    return;

  location_t loc = gimple_location (stmt);
  tree ptype = build_pointer_type (TREE_TYPE (rhs));
  tree atype = reference_alias_ptr_type (rhs);
  gimple g = gimple_build_assign (make_ssa_name (ptype, NULL),
				  build_fold_addr_expr (rhs));
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  tree mem = build2 (MEM_REF, utype, gimple_assign_lhs (g),
		     build_int_cst (atype, 0));
  tree urhs = make_ssa_name (utype, NULL);
  g = gimple_build_assign (urhs, mem);
  gimple_set_location (g, loc);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  minv = fold_convert (utype, minv);
  maxv = fold_convert (utype, maxv);
  if (!integer_zerop (minv))
    {
      g = gimple_build_assign_with_ops (MINUS_EXPR,
					make_ssa_name (utype, NULL),
					urhs, minv);
      gimple_set_location (g, loc);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
    }

  gimple_stmt_iterator gsi2 = *gsi;
  basic_block then_bb, fallthru_bb;
  *gsi = create_cond_insert_point (gsi, true, false, true,
				   &then_bb, &fallthru_bb);
  g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (g),
			 int_const_binop (MINUS_EXPR, maxv, minv),
			 NULL_TREE, NULL_TREE);
  gimple_set_location (g, loc);
  gsi_insert_after (gsi, g, GSI_NEW_STMT);

  gimple_assign_set_rhs_with_ops (&gsi2, NOP_EXPR, urhs, NULL_TREE);
  update_stmt (stmt);

  gsi2 = gsi_after_labels (then_bb);
  if (flag_sanitize_undefined_trap_on_error)
    g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
  else
    {
      tree data = ubsan_create_data ("__ubsan_invalid_value_data", &loc, NULL,
				     ubsan_type_descriptor (type), NULL_TREE);
      data = build_fold_addr_expr_loc (loc, data);
      enum built_in_function bcode
	= flag_sanitize_recover
	  ? BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE
	  : BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT;
      tree fn = builtin_decl_explicit (bcode);

      tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs),
					   true, NULL_TREE, true,
					   GSI_SAME_STMT);
      g = gimple_build_call (fn, 2, data, val);
    }
  gimple_set_location (g, loc);
  gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
}
Beispiel #10
0
static gimple
vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
{
  gimple stmt;
  tree oprnd0, oprnd1;
  stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
  tree type, half_type;
  gimple pattern_stmt;
  loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
  struct loop *loop = LOOP_VINFO_LOOP (loop_info);
  tree var;

  if (!is_gimple_assign (last_stmt))
    return NULL;

  type = gimple_expr_type (last_stmt);

  /* Look for the following pattern
          DX = (TYPE) X;
          sum_1 = DX + sum_0;
     In which DX is at least double the size of X, and sum_1 has been
     recognized as a reduction variable.
   */

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

  if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
    return NULL;

  if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
    return NULL;

  oprnd0 = gimple_assign_rhs1 (last_stmt);
  oprnd1 = gimple_assign_rhs2 (last_stmt);
  if (!types_compatible_p (TREE_TYPE (oprnd0), type)
      || !types_compatible_p (TREE_TYPE (oprnd1), type))
    return NULL;

  /* So far so good. Since last_stmt was detected as a (summation) reduction,
     we know that oprnd1 is the reduction variable (defined by a loop-header
     phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
     Left to check that oprnd0 is defined by a cast from type 'type' to type
     'TYPE'.  */

  if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
    return NULL;

  oprnd0 = gimple_assign_rhs1 (stmt);
  *type_in = half_type;
  *type_out = type;

  /* Pattern detected. Create a stmt to be used to replace the pattern: */
  var = vect_recog_temp_ssa_var (type, NULL);
  pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
					       oprnd0, oprnd1);
  SSA_NAME_DEF_STMT (var) = pattern_stmt;

  if (vect_print_dump_info (REPORT_DETAILS))
    {
      fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
    }

  /* We don't allow changing the order of the computation in the inner-loop
     when doing outer-loop vectorization.  */
  gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));

  return pattern_stmt;
}
Beispiel #11
0
static gimple
vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
{
  tree fn, base, exp = NULL;
  gimple stmt;
  tree var;

  if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
    return NULL;

  fn = gimple_call_fndecl (last_stmt);
  if (fn == NULL_TREE || DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL)
   return NULL;

  switch (DECL_FUNCTION_CODE (fn))
    {
    case BUILT_IN_POWIF:
    case BUILT_IN_POWI:
    case BUILT_IN_POWF:
    case BUILT_IN_POW:
      base = gimple_call_arg (last_stmt, 0);
      exp = gimple_call_arg (last_stmt, 1);
      if (TREE_CODE (exp) != REAL_CST
	  && TREE_CODE (exp) != INTEGER_CST)
        return NULL;
      break;

    default:
      return NULL;
    }

  /* We now have a pow or powi builtin function call with a constant
     exponent.  */

  *type_out = NULL_TREE;

  /* Catch squaring.  */
  if ((host_integerp (exp, 0)
       && tree_low_cst (exp, 0) == 2)
      || (TREE_CODE (exp) == REAL_CST
          && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
    {
      *type_in = TREE_TYPE (base);

      var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
      stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
      SSA_NAME_DEF_STMT (var) = stmt;
      return stmt;
    }

  /* Catch square root.  */
  if (TREE_CODE (exp) == REAL_CST
      && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
    {
      tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
      *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
      if (*type_in)
	{
	  gimple stmt = gimple_build_call (newfn, 1, base);
	  if (vectorizable_function (stmt, *type_in, *type_in)
	      != NULL_TREE)
	    {
	      var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
	      gimple_call_set_lhs (stmt, var);
	      return stmt;
	    }
	}
    }

  return NULL;
}
Beispiel #12
0
static gimple
vect_recog_widen_mult_pattern (gimple last_stmt,
			       tree *type_in,
			       tree *type_out)
{
  gimple def_stmt0, def_stmt1;
  tree oprnd0, oprnd1;
  tree type, half_type0, half_type1;
  gimple pattern_stmt;
  tree vectype, vectype_out;
  tree dummy;
  tree var;
  enum tree_code dummy_code;
  int dummy_int;
  VEC (tree, heap) *dummy_vec;

  if (!is_gimple_assign (last_stmt))
    return NULL;

  type = gimple_expr_type (last_stmt);

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

  if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
    return NULL;

  oprnd0 = gimple_assign_rhs1 (last_stmt);
  oprnd1 = gimple_assign_rhs2 (last_stmt);
  if (!types_compatible_p (TREE_TYPE (oprnd0), type)
      || !types_compatible_p (TREE_TYPE (oprnd1), type))
    return NULL;

  /* Check argument 0 */
  if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
    return NULL;
  oprnd0 = gimple_assign_rhs1 (def_stmt0);

  /* Check argument 1 */
  if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
    return NULL;
  oprnd1 = gimple_assign_rhs1 (def_stmt1);

  if (!types_compatible_p (half_type0, half_type1))
    return NULL;

  /* Pattern detected.  */
  if (vect_print_dump_info (REPORT_DETAILS))
    fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");

  /* Check target support  */
  vectype = get_vectype_for_scalar_type (half_type0);
  vectype_out = get_vectype_for_scalar_type (type);
  if (!vectype
      || !vectype_out
      || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
					  vectype_out, vectype,
					  &dummy, &dummy, &dummy_code,
					  &dummy_code, &dummy_int, &dummy_vec))
    return NULL;

  *type_in = vectype;
  *type_out = vectype_out;

  /* Pattern supported. Create a stmt to be used to replace the pattern: */
  var = vect_recog_temp_ssa_var (type, NULL);
  pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
					       oprnd1);
  SSA_NAME_DEF_STMT (var) = pattern_stmt;

  if (vect_print_dump_info (REPORT_DETAILS))
    print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);

  return pattern_stmt;
}
static bool
abs_replacement (basic_block cond_bb, basic_block middle_bb,
                 edge e0 ATTRIBUTE_UNUSED, edge e1,
                 gimple phi, tree arg0, tree arg1)
{
    tree result;
    gimple new_stmt, cond;
    gimple_stmt_iterator gsi;
    edge true_edge, false_edge;
    gimple assign;
    edge e;
    tree rhs, lhs;
    bool negate;
    enum tree_code cond_code;

    /* If the type says honor signed zeros we cannot do this
       optimization.  */
    if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
        return false;

    /* OTHER_BLOCK must have only one executable statement which must have the
       form arg0 = -arg1 or arg1 = -arg0.  */

    assign = last_and_only_stmt (middle_bb);
    /* If we did not find the proper negation assignment, then we can not
       optimize.  */
    if (assign == NULL)
        return false;

    /* If we got here, then we have found the only executable statement
       in OTHER_BLOCK.  If it is anything other than arg = -arg1 or
       arg1 = -arg0, then we can not optimize.  */
    if (gimple_code (assign) != GIMPLE_ASSIGN)
        return false;

    lhs = gimple_assign_lhs (assign);

    if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
        return false;

    rhs = gimple_assign_rhs1 (assign);

    /* The assignment has to be arg0 = -arg1 or arg1 = -arg0.  */
    if (!(lhs == arg0 && rhs == arg1)
            && !(lhs == arg1 && rhs == arg0))
        return false;

    cond = last_stmt (cond_bb);
    result = PHI_RESULT (phi);

    /* Only relationals comparing arg[01] against zero are interesting.  */
    cond_code = gimple_cond_code (cond);
    if (cond_code != GT_EXPR && cond_code != GE_EXPR
            && cond_code != LT_EXPR && cond_code != LE_EXPR)
        return false;

    /* Make sure the conditional is arg[01] OP y.  */
    if (gimple_cond_lhs (cond) != rhs)
        return false;

    if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
            ? real_zerop (gimple_cond_rhs (cond))
            : integer_zerop (gimple_cond_rhs (cond)))
        ;
    else
        return false;

    /* We need to know which is the true edge and which is the false
       edge so that we know if have abs or negative abs.  */
    extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);

    /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
       will need to negate the result.  Similarly for LT_EXPR/LE_EXPR if
       the false edge goes to OTHER_BLOCK.  */
    if (cond_code == GT_EXPR || cond_code == GE_EXPR)
        e = true_edge;
    else
        e = false_edge;

    if (e->dest == middle_bb)
        negate = true;
    else
        negate = false;

    result = duplicate_ssa_name (result, NULL);

    if (negate)
    {
        tree tmp = create_tmp_var (TREE_TYPE (result), NULL);
        add_referenced_var (tmp);
        lhs = make_ssa_name (tmp, NULL);
    }
    else
        lhs = result;

    /* Build the modify expression with abs expression.  */
    new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL);

    gsi = gsi_last_bb (cond_bb);
    gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);

    if (negate)
    {
        /* Get the right GSI.  We want to insert after the recently
        added ABS_EXPR statement (which we know is the first statement
         in the block.  */
        new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL);

        gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
    }

    replace_phi_edge_with_variable (cond_bb, e1, phi, result);

    /* Note that we optimized this PHI.  */
    return true;
}
static bool
minmax_replacement (basic_block cond_bb, basic_block middle_bb,
                    edge e0, edge e1, gimple phi,
                    tree arg0, tree arg1)
{
    tree result, type;
    gimple cond, new_stmt;
    edge true_edge, false_edge;
    enum tree_code cmp, minmax, ass_code;
    tree smaller, larger, arg_true, arg_false;
    gimple_stmt_iterator gsi, gsi_from;

    type = TREE_TYPE (PHI_RESULT (phi));

    /* The optimization may be unsafe due to NaNs.  */
    if (HONOR_NANS (TYPE_MODE (type)))
        return false;

    cond = last_stmt (cond_bb);
    cmp = gimple_cond_code (cond);
    result = PHI_RESULT (phi);

    /* This transformation is only valid for order comparisons.  Record which
       operand is smaller/larger if the result of the comparison is true.  */
    if (cmp == LT_EXPR || cmp == LE_EXPR)
    {
        smaller = gimple_cond_lhs (cond);
        larger = gimple_cond_rhs (cond);
    }
    else if (cmp == GT_EXPR || cmp == GE_EXPR)
    {
        smaller = gimple_cond_rhs (cond);
        larger = gimple_cond_lhs (cond);
    }
    else
        return false;

    /* We need to know which is the true edge and which is the false
        edge so that we know if have abs or negative abs.  */
    extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);

    /* Forward the edges over the middle basic block.  */
    if (true_edge->dest == middle_bb)
        true_edge = EDGE_SUCC (true_edge->dest, 0);
    if (false_edge->dest == middle_bb)
        false_edge = EDGE_SUCC (false_edge->dest, 0);

    if (true_edge == e0)
    {
        gcc_assert (false_edge == e1);
        arg_true = arg0;
        arg_false = arg1;
    }
    else
    {
        gcc_assert (false_edge == e0);
        gcc_assert (true_edge == e1);
        arg_true = arg1;
        arg_false = arg0;
    }

    if (empty_block_p (middle_bb))
    {
        if (operand_equal_for_phi_arg_p (arg_true, smaller)
                && operand_equal_for_phi_arg_p (arg_false, larger))
        {
            /* Case

               if (smaller < larger)
               rslt = smaller;
               else
               rslt = larger;  */
            minmax = MIN_EXPR;
        }
        else if (operand_equal_for_phi_arg_p (arg_false, smaller)
                 && operand_equal_for_phi_arg_p (arg_true, larger))
            minmax = MAX_EXPR;
        else
            return false;
    }
    else
    {
        /* Recognize the following case, assuming d <= u:

        if (a <= u)
           b = MAX (a, d);
         x = PHI <b, u>

         This is equivalent to

         b = MAX (a, d);
         x = MIN (b, u);  */

        gimple assign = last_and_only_stmt (middle_bb);
        tree lhs, op0, op1, bound;

        if (!assign
                || gimple_code (assign) != GIMPLE_ASSIGN)
            return false;

        lhs = gimple_assign_lhs (assign);
        ass_code = gimple_assign_rhs_code (assign);
        if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
            return false;
        op0 = gimple_assign_rhs1 (assign);
        op1 = gimple_assign_rhs2 (assign);

        if (true_edge->src == middle_bb)
        {
            /* We got here if the condition is true, i.e., SMALLER < LARGER.  */
            if (!operand_equal_for_phi_arg_p (lhs, arg_true))
                return false;

            if (operand_equal_for_phi_arg_p (arg_false, larger))
            {
                /* Case

                if (smaller < larger)
                   {
                     r' = MAX_EXPR (smaller, bound)
                   }
                 r = PHI <r', larger>  --> to be turned to MIN_EXPR.  */
                if (ass_code != MAX_EXPR)
                    return false;

                minmax = MIN_EXPR;
                if (operand_equal_for_phi_arg_p (op0, smaller))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, smaller))
                    bound = op0;
                else
                    return false;

                /* We need BOUND <= LARGER.  */
                if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
                                                    bound, larger)))
                    return false;
            }
            else if (operand_equal_for_phi_arg_p (arg_false, smaller))
            {
                /* Case

                if (smaller < larger)
                   {
                     r' = MIN_EXPR (larger, bound)
                   }
                 r = PHI <r', smaller>  --> to be turned to MAX_EXPR.  */
                if (ass_code != MIN_EXPR)
                    return false;

                minmax = MAX_EXPR;
                if (operand_equal_for_phi_arg_p (op0, larger))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, larger))
                    bound = op0;
                else
                    return false;

                /* We need BOUND >= SMALLER.  */
                if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
                                                    bound, smaller)))
                    return false;
            }
            else
                return false;
        }
        else
        {
            /* We got here if the condition is false, i.e., SMALLER > LARGER.  */
            if (!operand_equal_for_phi_arg_p (lhs, arg_false))
                return false;

            if (operand_equal_for_phi_arg_p (arg_true, larger))
            {
                /* Case

                if (smaller > larger)
                   {
                     r' = MIN_EXPR (smaller, bound)
                   }
                 r = PHI <r', larger>  --> to be turned to MAX_EXPR.  */
                if (ass_code != MIN_EXPR)
                    return false;

                minmax = MAX_EXPR;
                if (operand_equal_for_phi_arg_p (op0, smaller))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, smaller))
                    bound = op0;
                else
                    return false;

                /* We need BOUND >= LARGER.  */
                if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
                                                    bound, larger)))
                    return false;
            }
            else if (operand_equal_for_phi_arg_p (arg_true, smaller))
            {
                /* Case

                if (smaller > larger)
                   {
                     r' = MAX_EXPR (larger, bound)
                   }
                 r = PHI <r', smaller>  --> to be turned to MIN_EXPR.  */
                if (ass_code != MAX_EXPR)
                    return false;

                minmax = MIN_EXPR;
                if (operand_equal_for_phi_arg_p (op0, larger))
                    bound = op1;
                else if (operand_equal_for_phi_arg_p (op1, larger))
                    bound = op0;
                else
                    return false;

                /* We need BOUND <= SMALLER.  */
                if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
                                                    bound, smaller)))
                    return false;
            }
            else
                return false;
        }

        /* Move the statement from the middle block.  */
        gsi = gsi_last_bb (cond_bb);
        gsi_from = gsi_last_bb (middle_bb);
        gsi_move_before (&gsi_from, &gsi);
    }

    /* Emit the statement to compute min/max.  */
    result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
    new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1);
    gsi = gsi_last_bb (cond_bb);
    gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);

    replace_phi_edge_with_variable (cond_bb, e1, phi, result);
    return true;
}
static bool
conditional_replacement (basic_block cond_bb, basic_block middle_bb,
                         edge e0, edge e1, gimple phi,
                         tree arg0, tree arg1)
{
    tree result;
    gimple stmt, new_stmt;
    tree cond;
    gimple_stmt_iterator gsi;
    edge true_edge, false_edge;
    tree new_var, new_var2;

    /* FIXME: Gimplification of complex type is too hard for now.  */
    if (TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
            || TREE_CODE (TREE_TYPE (arg1)) == COMPLEX_TYPE)
        return false;

    /* The PHI arguments have the constants 0 and 1, then convert
       it to the conditional.  */
    if ((integer_zerop (arg0) && integer_onep (arg1))
            || (integer_zerop (arg1) && integer_onep (arg0)))
        ;
    else
        return false;

    if (!empty_block_p (middle_bb))
        return false;

    /* At this point we know we have a GIMPLE_COND with two successors.
       One successor is BB, the other successor is an empty block which
       falls through into BB.

       There is a single PHI node at the join point (BB) and its arguments
       are constants (0, 1).

       So, given the condition COND, and the two PHI arguments, we can
       rewrite this PHI into non-branching code:

         dest = (COND) or dest = COND'

       We use the condition as-is if the argument associated with the
       true edge has the value one or the argument associated with the
       false edge as the value zero.  Note that those conditions are not
       the same since only one of the outgoing edges from the GIMPLE_COND
       will directly reach BB and thus be associated with an argument.  */

    stmt = last_stmt (cond_bb);
    result = PHI_RESULT (phi);

    /* To handle special cases like floating point comparison, it is easier and
       less error-prone to build a tree and gimplify it on the fly though it is
       less efficient.  */
    cond = fold_build2 (gimple_cond_code (stmt), boolean_type_node,
                        gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));

    /* We need to know which is the true edge and which is the false
       edge so that we know when to invert the condition below.  */
    extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
    if ((e0 == true_edge && integer_zerop (arg0))
            || (e0 == false_edge && integer_onep (arg0))
            || (e1 == true_edge && integer_zerop (arg1))
            || (e1 == false_edge && integer_onep (arg1)))
        cond = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);

    /* Insert our new statements at the end of conditional block before the
       COND_STMT.  */
    gsi = gsi_for_stmt (stmt);
    new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
                                        GSI_SAME_STMT);

    if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
    {
        new_var2 = create_tmp_var (TREE_TYPE (result), NULL);
        add_referenced_var (new_var2);
        new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2,
                   new_var, NULL);
        new_var2 = make_ssa_name (new_var2, new_stmt);
        gimple_assign_set_lhs (new_stmt, new_var2);
        gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
        new_var = new_var2;
    }

    replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);

    /* Note that we optimized this PHI.  */
    return true;
}
Beispiel #16
0
static void insert_call_to_junk_fn(gimple stmt)
{
    tree tv, rv, fn, rhs, tmp;
    gimple gimp;
    gimple_stmt_iterator gsi;
    static bool has_initted;
    static tree decl, proto, decl_get_funcs, proto_get_funcs, fn_ptr_type;
    
    printf("slimer: Inserting junk function call at line: %d\n",
            gimple_lineno(stmt));

    /* Get random value modulo n_funcs for index into runtime __funcs array of
     * junk functions:
     *     rv = time % n_funcs;
     *     fn = __funcs + rv;
     *     call fn
     */

    /* Build instances */
    if (!has_initted)
    {
        proto = build_function_type_list(
            uint64_type_node, ptr_type_node, NULL_TREE);
        decl = build_fn_decl("time", proto);
        DECL_EXTERNAL(decl) = 1;

        proto_get_funcs = build_function_type_list(ptr_type_node, NULL_TREE);
        decl_get_funcs = build_fn_decl("__slimer_get_funcs", proto_get_funcs);

        fn_ptr_type = build_function_type_list(
            void_type_node, void_type_node, NULL_TREE);

        has_initted = true;
    }

    /* time_tmp = time(NULL); */
    tv = create_tmp_var(uint64_type_node, "time_tmp");
    tv = make_ssa_name(tv, NULL);
    gimp = gimple_build_call(decl, 1, null_pointer_node);
    gimple_set_lhs(gimp, tv);
    gsi = gsi_for_stmt(stmt);
    gsi_insert_before(&gsi, gimp, GSI_SAME_STMT);

    /* rv_tmp = time_temp % n_funcs */
    rv = create_tmp_var(uint64_type_node, "rv_tmp");
    rv = make_ssa_name(rv, NULL);
    rhs = build_int_cst(integer_type_node, n_funcs);
    gimp = gimple_build_assign_with_ops(TRUNC_MOD_EXPR, rv, tv, rhs);
    gsi_insert_before(&gsi, gimp, GSI_SAME_STMT);

    /* tmp = __slimer_get_funcs(); TODO: Get rid of __slimer_get_funcs()
     * rv = rv * sizeof(void *)
     * fn_tmp = tmp + rv
     */
    tree pp_type = build_pointer_type(ptr_type_node);
    tmp = create_tmp_var(pp_type, "tmp");
    tmp = make_ssa_name(tmp, NULL);
    gimp = gimple_build_call(decl_get_funcs, 0);
    gimple_set_lhs(gimp, tmp);
    gsi_insert_before(&gsi, gimp, GSI_SAME_STMT);

    /* rv = rv * sizeof(void *))
     * FIXME: THIS IS NOT SUFFICIENT FOR CROSS COMPILING FOR ARCHITECTURES THAT
     * HAVE ADDRESS SIZES sizeof(void *)
     */
    tree addr_size = build_int_cst(integer_type_node, sizeof(void *));
    gimp = gimple_build_assign_with_ops(MULT_EXPR, rv, rv, addr_size);
    gsi_insert_before(&gsi, gimp, GSI_SAME_STMT);

    fn = create_tmp_var(pp_type, "fn_tmp");
    fn = make_ssa_name(fn, NULL);
    gimp = gimple_build_assign_with_ops(PLUS_EXPR, fn, tmp, rv);
    gsi_insert_before(&gsi, gimp, GSI_SAME_STMT);

    /* the_fn = *fn */
    tree f = build_pointer_type(fn_ptr_type); 
    tree the_fn = create_tmp_var(f, "the_func_ptr");
    the_fn = make_ssa_name(the_fn, NULL);
    gimp = gimple_build_assign(the_fn, build_simple_mem_ref(fn));
    gsi_insert_before(&gsi, gimp, GSI_SAME_STMT);

    /* call the_fn */
    gimp = gimple_build_call(the_fn, 0);
    gsi_insert_before(&gsi, gimp, GSI_SAME_STMT);

#ifdef GOAT_DEBUG
    debug_function(cfun->decl, 0);
#endif
}