Beispiel #1
0
void
omp_adjust_for_condition (location_t loc, enum tree_code *cond_code, tree *n2)
{
  switch (*cond_code)
    {
    case LT_EXPR:
    case GT_EXPR:
    case NE_EXPR:
      break;
    case LE_EXPR:
      if (POINTER_TYPE_P (TREE_TYPE (*n2)))
	*n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, 1);
      else
	*n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (*n2), *n2,
			       build_int_cst (TREE_TYPE (*n2), 1));
      *cond_code = LT_EXPR;
      break;
    case GE_EXPR:
      if (POINTER_TYPE_P (TREE_TYPE (*n2)))
	*n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, -1);
      else
	*n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (*n2), *n2,
			       build_int_cst (TREE_TYPE (*n2), 1));
      *cond_code = GT_EXPR;
      break;
    default:
      gcc_unreachable ();
    }
}
Beispiel #2
0
static void
mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
                   location_t location, tree dirflag)
{
  tree type, base, limit, addr, size, t;

  /* Don't instrument read operations.  */
  if (dirflag == integer_zero_node && flag_mudflap_ignore_reads)
    return;

  /* Don't instrument marked nodes.  */
  if (mf_marked_p (*tp))
    return;

  t = *tp;
  type = TREE_TYPE (t);

  if (type == error_mark_node)
    return;

  size = TYPE_SIZE_UNIT (type);

  switch (TREE_CODE (t))
    {
    case ARRAY_REF:
    case COMPONENT_REF:
      {
        /* This is trickier than it may first appear.  The reason is
           that we are looking at expressions from the "inside out" at
           this point.  We may have a complex nested aggregate/array
           expression (e.g. "a.b[i].c"), maybe with an indirection as
           the leftmost operator ("p->a.b.d"), where instrumentation
           is necessary.  Or we may have an innocent "a.b.c"
           expression that must not be instrumented.  We need to
           recurse all the way down the nesting structure to figure it
           out: looking just at the outer node is not enough.  */
        tree var;
        int component_ref_only = (TREE_CODE (t) == COMPONENT_REF);
	/* If we have a bitfield component reference, we must note the
	   innermost addressable object in ELT, from which we will
	   construct the byte-addressable bounds of the bitfield.  */
	tree elt = NULL_TREE;
	int bitfield_ref_p = (TREE_CODE (t) == COMPONENT_REF
			      && DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1)));

        /* Iterate to the top of the ARRAY_REF/COMPONENT_REF
           containment hierarchy to find the outermost VAR_DECL.  */
        var = TREE_OPERAND (t, 0);
        while (1)
          {
	    if (bitfield_ref_p && elt == NULL_TREE
		&& (TREE_CODE (var) == ARRAY_REF
		    || TREE_CODE (var) == COMPONENT_REF))
	      elt = var;

            if (TREE_CODE (var) == ARRAY_REF)
              {
                component_ref_only = 0;
                var = TREE_OPERAND (var, 0);
              }
            else if (TREE_CODE (var) == COMPONENT_REF)
              var = TREE_OPERAND (var, 0);
            else if (INDIRECT_REF_P (var)
		     || TREE_CODE (var) == MEM_REF)
              {
		base = TREE_OPERAND (var, 0);
                break;
              }
            else if (TREE_CODE (var) == VIEW_CONVERT_EXPR)
	      {
		var = TREE_OPERAND (var, 0);
		if (CONSTANT_CLASS_P (var)
		    && TREE_CODE (var) != STRING_CST)
		  return;
	      }
            else
              {
                gcc_assert (TREE_CODE (var) == VAR_DECL
                            || TREE_CODE (var) == PARM_DECL
                            || TREE_CODE (var) == RESULT_DECL
                            || TREE_CODE (var) == STRING_CST);
                /* Don't instrument this access if the underlying
                   variable is not "eligible".  This test matches
                   those arrays that have only known-valid indexes,
                   and thus are not labeled TREE_ADDRESSABLE.  */
                if (! mf_decl_eligible_p (var) || component_ref_only)
                  return;
                else
		  {
		    base = build1 (ADDR_EXPR,
				   build_pointer_type (TREE_TYPE (var)), var);
		    break;
		  }
              }
          }

        /* Handle the case of ordinary non-indirection structure
           accesses.  These have only nested COMPONENT_REF nodes (no
           INDIRECT_REF), but pass through the above filter loop.
           Note that it's possible for such a struct variable to match
           the eligible_p test because someone else might take its
           address sometime.  */

        /* We need special processing for bitfield components, because
           their addresses cannot be taken.  */
        if (bitfield_ref_p)
          {
            tree field = TREE_OPERAND (t, 1);

            if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST)
              size = DECL_SIZE_UNIT (field);

	    if (elt)
	      elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)),
			    elt);
            addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base);
            addr = fold_build_pointer_plus_loc (location,
						addr, byte_position (field));
          }
        else
          addr = build1 (ADDR_EXPR, build_pointer_type (type), t);

        limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type,
                             fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
					  fold_convert (mf_uintptr_type, addr),
					  size),
                             integer_one_node);
      }
      break;

    case INDIRECT_REF:
      addr = TREE_OPERAND (t, 0);
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc
	(location, fold_build_pointer_plus_loc (location, base, size), -1);
      break;

    case MEM_REF:
      if (addr_expr_of_non_mem_decl_p (TREE_OPERAND (t, 0)))
	return;

      addr = fold_build_pointer_plus_loc (location, TREE_OPERAND (t, 0),
					  TREE_OPERAND (t, 1));
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc (location,
			   fold_build_pointer_plus_loc (location,
							base, size), -1);
      break;

    case TARGET_MEM_REF:
      if (addr_expr_of_non_mem_decl_p (TMR_BASE (t)))
	return;

      addr = tree_mem_ref_addr (ptr_type_node, t);
      base = addr;
      limit = fold_build_pointer_plus_hwi_loc (location,
			   fold_build_pointer_plus_loc (location,
							base, size), -1);
      break;

    case ARRAY_RANGE_REF:
      warning (OPT_Wmudflap,
	       "mudflap checking not yet implemented for ARRAY_RANGE_REF");
      return;

    case BIT_FIELD_REF:
      /* ??? merge with COMPONENT_REF code above? */
      {
        tree ofs, rem, bpu;

        /* If we're not dereferencing something, then the access
           must be ok.  */
        if (TREE_CODE (TREE_OPERAND (t, 0)) != INDIRECT_REF)
          return;

        bpu = bitsize_int (BITS_PER_UNIT);
        ofs = fold_convert (bitsizetype, TREE_OPERAND (t, 2));
        rem = size_binop_loc (location, TRUNC_MOD_EXPR, ofs, bpu);
        ofs = size_binop_loc (location, TRUNC_DIV_EXPR, ofs, bpu);

        size = fold_convert (bitsizetype, TREE_OPERAND (t, 1));
        size = size_binop_loc (location, PLUS_EXPR, size, rem);
        size = size_binop_loc (location, CEIL_DIV_EXPR, size, bpu);
        size = fold_convert (sizetype, size);

        addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
        addr = fold_convert (ptr_type_node, addr);
        addr = fold_build_pointer_plus_loc (location, addr, ofs);

        base = addr;
        limit = fold_build_pointer_plus_hwi_loc (location,
                             fold_build_pointer_plus_loc (location,
							  base, size), -1);
      }
      break;

    default:
      return;
    }

  mf_build_check_statement_for (base, limit, iter, location, dirflag);
}
Beispiel #3
0
tree
cilk_for_number_of_iterations (tree cilk_for)
{
  tree t, v, n1, n2, step, type, init, cond, incr, itype;
  enum tree_code cond_code;
  location_t loc = EXPR_LOCATION (cilk_for);

  init = TREE_VEC_ELT (OMP_FOR_INIT (cilk_for), 0);
  v = TREE_OPERAND (init, 0);
  cond = TREE_VEC_ELT (OMP_FOR_COND (cilk_for), 0);
  incr = TREE_VEC_ELT (OMP_FOR_INCR (cilk_for), 0);
  type = TREE_TYPE (v);

  gcc_assert (TREE_CODE (TREE_TYPE (v)) == INTEGER_TYPE
	      || TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE);
  n1 = TREE_OPERAND (init, 1);
  cond_code = TREE_CODE (cond);
  n2 = TREE_OPERAND (cond, 1);
  switch (cond_code)
    {
    case LT_EXPR:
    case GT_EXPR:
    case NE_EXPR:
      break;
    case LE_EXPR:
      if (POINTER_TYPE_P (TREE_TYPE (n2)))
	n2 = fold_build_pointer_plus_hwi_loc (loc, n2, 1);
      else
	n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (n2), n2,
			      build_int_cst (TREE_TYPE (n2), 1));
      cond_code = LT_EXPR;
      break;
    case GE_EXPR:
      if (POINTER_TYPE_P (TREE_TYPE (n2)))
	n2 = fold_build_pointer_plus_hwi_loc (loc, n2, -1);
      else
	n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (n2), n2,
			      build_int_cst (TREE_TYPE (n2), 1));
      cond_code = GT_EXPR;
      break;
    default:
      gcc_unreachable ();
    }

  step = NULL_TREE;
  switch (TREE_CODE (incr))
    {
    case PREINCREMENT_EXPR:
    case POSTINCREMENT_EXPR:
      step = build_int_cst (TREE_TYPE (v), 1);
      break;
    case PREDECREMENT_EXPR:
    case POSTDECREMENT_EXPR:
      step = build_int_cst (TREE_TYPE (v), -1);
      break;
    case MODIFY_EXPR:
      t = TREE_OPERAND (incr, 1);
      gcc_assert (TREE_OPERAND (t, 0) == v);
      switch (TREE_CODE (t))
	{
	case PLUS_EXPR:
	  step = TREE_OPERAND (t, 1);
	  break;
	case POINTER_PLUS_EXPR:
	  step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
	  break;
	case MINUS_EXPR:
	  step = TREE_OPERAND (t, 1);
	  step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (step), step);
	  break;
	default:
	  gcc_unreachable ();
	}
      break;
    default:
      gcc_unreachable ();
    }

  itype = type;
  if (POINTER_TYPE_P (itype))
    itype = signed_type_for (itype);
  if (cond_code == NE_EXPR)
    {
      /* For NE_EXPR, we need to find out if the iterator increases
	 or decreases from whether step is positive or negative.  */
      tree stype = itype;
      if (TYPE_UNSIGNED (stype))
	stype = signed_type_for (stype);
      cond = fold_build2_loc (loc, GE_EXPR, boolean_type_node,
			      fold_convert_loc (loc, stype, step),
			      build_int_cst (stype, 0));
      t = fold_build3_loc (loc, COND_EXPR, itype, cond,
			   build_int_cst (itype, -1),
			   build_int_cst (itype, 1));
    }
  else
    t = build_int_cst (itype, (cond_code == LT_EXPR ? -1 : 1));
  t = fold_build2_loc (loc, PLUS_EXPR, itype,
		       fold_convert_loc (loc, itype, step), t);
  t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
		       fold_convert_loc (loc, itype, n2));
  t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
		       fold_convert_loc (loc, itype, n1));
  if (TYPE_UNSIGNED (itype) && cond_code == GT_EXPR)
    t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
			 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
			 fold_build1_loc (loc, NEGATE_EXPR, itype,
					  fold_convert_loc (loc, itype,
							    step)));
  else if (TYPE_UNSIGNED (itype) && cond_code == NE_EXPR)
    {
      tree t1
	= fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
			   fold_convert_loc (loc, itype, step));
      tree t2
	= fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
			   fold_build1_loc (loc, NEGATE_EXPR, itype, t),
			   fold_build1_loc (loc, NEGATE_EXPR, itype,
					    fold_convert_loc (loc, itype,
							      step)));
      t = fold_build3_loc (loc, COND_EXPR, itype, cond, t1, t2);
    }
  else
    t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
			 fold_convert_loc (loc, itype, step));
  cond = fold_build2_loc (loc, cond_code, boolean_type_node, n1, n2);
  t = fold_build3_loc (loc, COND_EXPR, itype, cond, t,
		       build_int_cst (itype, 0));
  return t;
}