Exemplo n.º 1
0
static void
pack_ts_omp_clause_value_fields (struct output_block *ob,
				 struct bitpack_d *bp, tree expr)
{
  stream_output_location (ob, bp, OMP_CLAUSE_LOCATION (expr));
  switch (OMP_CLAUSE_CODE (expr))
    {
    case OMP_CLAUSE_DEFAULT:
      bp_pack_enum (bp, omp_clause_default_kind, OMP_CLAUSE_DEFAULT_LAST,
		    OMP_CLAUSE_DEFAULT_KIND (expr));
      break;
    case OMP_CLAUSE_SCHEDULE:
      bp_pack_enum (bp, omp_clause_schedule_kind, OMP_CLAUSE_SCHEDULE_LAST,
		    OMP_CLAUSE_SCHEDULE_KIND (expr));
      break;
    case OMP_CLAUSE_DEPEND:
      bp_pack_enum (bp, omp_clause_depend_kind, OMP_CLAUSE_DEPEND_LAST,
		    OMP_CLAUSE_DEPEND_KIND (expr));
      break;
    case OMP_CLAUSE_MAP:
      bp_pack_enum (bp, gomp_map_kind, GOMP_MAP_LAST,
		    OMP_CLAUSE_MAP_KIND (expr));
      break;
    case OMP_CLAUSE_PROC_BIND:
      bp_pack_enum (bp, omp_clause_proc_bind_kind, OMP_CLAUSE_PROC_BIND_LAST,
		    OMP_CLAUSE_PROC_BIND_KIND (expr));
      break;
    case OMP_CLAUSE_REDUCTION:
      bp_pack_enum (bp, tree_code, MAX_TREE_CODES,
		    OMP_CLAUSE_REDUCTION_CODE (expr));
      break;
    default:
      break;
    }
}
Exemplo n.º 2
0
static void
unpack_ts_omp_clause_value_fields (struct data_in *data_in,
				   struct bitpack_d *bp, tree expr)
{
  stream_input_location (&OMP_CLAUSE_LOCATION (expr), bp, data_in);
  switch (OMP_CLAUSE_CODE (expr))
    {
    case OMP_CLAUSE_DEFAULT:
      OMP_CLAUSE_DEFAULT_KIND (expr)
	= bp_unpack_enum (bp, omp_clause_default_kind,
			  OMP_CLAUSE_DEFAULT_LAST);
      break;
    case OMP_CLAUSE_SCHEDULE:
      OMP_CLAUSE_SCHEDULE_KIND (expr)
	= bp_unpack_enum (bp, omp_clause_schedule_kind,
			  OMP_CLAUSE_SCHEDULE_LAST);
      break;
    case OMP_CLAUSE_DEPEND:
      OMP_CLAUSE_DEPEND_KIND (expr)
	= bp_unpack_enum (bp, omp_clause_depend_kind, OMP_CLAUSE_DEPEND_LAST);
      break;
    case OMP_CLAUSE_MAP:
      OMP_CLAUSE_SET_MAP_KIND (expr, bp_unpack_enum (bp, gomp_map_kind,
						     GOMP_MAP_LAST));
      break;
    case OMP_CLAUSE_PROC_BIND:
      OMP_CLAUSE_PROC_BIND_KIND (expr)
	= bp_unpack_enum (bp, omp_clause_proc_bind_kind,
			  OMP_CLAUSE_PROC_BIND_LAST);
      break;
    case OMP_CLAUSE_REDUCTION:
      OMP_CLAUSE_REDUCTION_CODE (expr)
	= bp_unpack_enum (bp, tree_code, MAX_TREE_CODES);
      break;
    default:
      break;
    }
}
Exemplo n.º 3
0
Arquivo: c-omp.c Projeto: ymgcmstk/gcc
void
c_omp_split_clauses (location_t loc, enum tree_code code,
		     omp_clause_mask mask, tree clauses, tree *cclauses)
{
  tree next, c;
  enum c_omp_clause_split s;
  int i;

  for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
    cclauses[i] = NULL;
  /* Add implicit nowait clause on
     #pragma omp parallel {for,for simd,sections}.  */
  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
    switch (code)
      {
      case OMP_FOR:
      case OMP_SIMD:
        cclauses[C_OMP_CLAUSE_SPLIT_FOR]
	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
	break;
      case OMP_SECTIONS:
	cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
	break;
      default:
	break;
      }

  for (; clauses ; clauses = next)
    {
      next = OMP_CLAUSE_CHAIN (clauses);

      switch (OMP_CLAUSE_CODE (clauses))
	{
	/* First the clauses that are unique to some constructs.  */
	case OMP_CLAUSE_DEVICE:
	case OMP_CLAUSE_MAP:
	case OMP_CLAUSE_IS_DEVICE_PTR:
	case OMP_CLAUSE_DEFAULTMAP:
	  s = C_OMP_CLAUSE_SPLIT_TARGET;
	  break;
	case OMP_CLAUSE_NUM_TEAMS:
	case OMP_CLAUSE_THREAD_LIMIT:
	  s = C_OMP_CLAUSE_SPLIT_TEAMS;
	  break;
	case OMP_CLAUSE_DIST_SCHEDULE:
	  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
	  break;
	case OMP_CLAUSE_COPYIN:
	case OMP_CLAUSE_NUM_THREADS:
	case OMP_CLAUSE_PROC_BIND:
	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	  break;
	case OMP_CLAUSE_ORDERED:
	case OMP_CLAUSE_NOWAIT:
	  s = C_OMP_CLAUSE_SPLIT_FOR;
	  break;
	case OMP_CLAUSE_SCHEDULE:
	  s = C_OMP_CLAUSE_SPLIT_FOR;
	  if (code != OMP_SIMD)
	    OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
	  break;
	case OMP_CLAUSE_SAFELEN:
	case OMP_CLAUSE_SIMDLEN:
	case OMP_CLAUSE_ALIGNED:
	  s = C_OMP_CLAUSE_SPLIT_SIMD;
	  break;
	case OMP_CLAUSE_GRAINSIZE:
	case OMP_CLAUSE_NUM_TASKS:
	case OMP_CLAUSE_FINAL:
	case OMP_CLAUSE_UNTIED:
	case OMP_CLAUSE_MERGEABLE:
	case OMP_CLAUSE_NOGROUP:
	case OMP_CLAUSE_PRIORITY:
	  s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
	  break;
	/* Duplicate this to all of taskloop, distribute, for and simd.  */
	case OMP_CLAUSE_COLLAPSE:
	  if (code == OMP_SIMD)
	    {
	      if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
			   | (OMP_CLAUSE_MASK_1
			      << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
			   | (OMP_CLAUSE_MASK_1
			      << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_COLLAPSE);
		  OMP_CLAUSE_COLLAPSE_EXPR (c)
		    = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
		  cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
		}
	      else
		{
		  /* This must be #pragma omp target simd */
		  s = C_OMP_CLAUSE_SPLIT_SIMD;
		  break;
		}
	    }
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
	    {
	      if ((mask & (OMP_CLAUSE_MASK_1
			   << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_COLLAPSE);
		  OMP_CLAUSE_COLLAPSE_EXPR (c)
		    = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
		  cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
		  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
		}
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	    }
	  else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
		   != 0)
	    s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
	  else
	    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
	  break;
	/* Private clause is supported on all constructs,
	   it is enough to put it on the innermost one.  For
	   #pragma omp {for,sections} put it on parallel though,
	   as that's what we did for OpenMP 3.1.  */
	case OMP_CLAUSE_PRIVATE:
	  switch (code)
	    {
	    case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
	    case OMP_FOR: case OMP_SECTIONS:
	    case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
	    case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
	    case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
	    default: gcc_unreachable ();
	    }
	  break;
	/* Firstprivate clause is supported on all constructs but
	   simd.  Put it on the outermost of those and duplicate on teams
	   and parallel.  */
	case OMP_CLAUSE_FIRSTPRIVATE:
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
	      != 0)
	    {
	      if (code == OMP_SIMD
		  && (mask & ((OMP_CLAUSE_MASK_1
			       << PRAGMA_OMP_CLAUSE_NUM_THREADS)
			      | (OMP_CLAUSE_MASK_1
				 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
		{
		  /* This must be #pragma omp target simd.  */
		  s = C_OMP_CLAUSE_SPLIT_TARGET;
		  break;
		}
	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
				    OMP_CLAUSE_FIRSTPRIVATE);
	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
	      cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
	    }
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
	      != 0)
	    {
	      if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
			   | (OMP_CLAUSE_MASK_1
			      << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_FIRSTPRIVATE);
		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
		  if ((mask & (OMP_CLAUSE_MASK_1
			       << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
		    s = C_OMP_CLAUSE_SPLIT_TEAMS;
		  else
		    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
		}
	      else
		/* This must be
		   #pragma omp parallel{, for{, simd}, sections}
		   or
		   #pragma omp target parallel.  */
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	    }
	  else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
		   != 0)
	    {
	      /* This must be one of
		 #pragma omp {,target }teams distribute
		 #pragma omp target teams
		 #pragma omp {,target }teams distribute simd.  */
	      gcc_assert (code == OMP_DISTRIBUTE
			  || code == OMP_TEAMS
			  || code == OMP_SIMD);
	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
	    }
	  else if ((mask & (OMP_CLAUSE_MASK_1
			    << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
	    {
	      /* This must be #pragma omp distribute simd.  */
	      gcc_assert (code == OMP_SIMD);
	      s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
	    }
	  else if ((mask & (OMP_CLAUSE_MASK_1
			    << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
	    {
	      /* This must be #pragma omp taskloop simd.  */
	      gcc_assert (code == OMP_SIMD);
	      s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
	    }
	  else
	    {
	      /* This must be #pragma omp for simd.  */
	      gcc_assert (code == OMP_SIMD);
	      s = C_OMP_CLAUSE_SPLIT_FOR;
	    }
	  break;
	/* Lastprivate is allowed on for, sections and simd.  In
	   parallel {for{, simd},sections} we actually want to put it on
	   parallel rather than for or sections.  */
	case OMP_CLAUSE_LASTPRIVATE:
	  if (code == OMP_FOR || code == OMP_SECTIONS)
	    {
	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
		  != 0)
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	      break;
	    }
	  gcc_assert (code == OMP_SIMD);
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
	    {
	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
				    OMP_CLAUSE_LASTPRIVATE);
	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
		  != 0)
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	      OMP_CLAUSE_CHAIN (c) = cclauses[s];
	      cclauses[s] = c;
	    }
	  s = C_OMP_CLAUSE_SPLIT_SIMD;
	  break;
	/* Shared and default clauses are allowed on parallel, teams and
	   taskloop.  */
	case OMP_CLAUSE_SHARED:
	case OMP_CLAUSE_DEFAULT:
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
	      != 0)
	    {
	      s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
	      break;
	    }
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
	      != 0)
	    {
	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
		  == 0)
		{
		  s = C_OMP_CLAUSE_SPLIT_TEAMS;
		  break;
		}
	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
				    OMP_CLAUSE_CODE (clauses));
	      if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
		OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
	      else
		OMP_CLAUSE_DEFAULT_KIND (c)
		  = OMP_CLAUSE_DEFAULT_KIND (clauses);
	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
	      cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
	    }
	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	  break;
	/* Reduction is allowed on simd, for, parallel, sections and teams.
	   Duplicate it on all of them, but omit on for or sections if
	   parallel is present.  */
	case OMP_CLAUSE_REDUCTION:
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
	    {
	      if (code == OMP_SIMD)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_REDUCTION);
		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
		  OMP_CLAUSE_REDUCTION_CODE (c)
		    = OMP_CLAUSE_REDUCTION_CODE (clauses);
		  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
		    = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
		  OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
		    = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
		  cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
		}
	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
		  != 0)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_REDUCTION);
		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
		  OMP_CLAUSE_REDUCTION_CODE (c)
		    = OMP_CLAUSE_REDUCTION_CODE (clauses);
		  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
		    = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
		  OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
		    = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
		  s = C_OMP_CLAUSE_SPLIT_TEAMS;
		}
	      else if ((mask & (OMP_CLAUSE_MASK_1
				<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	    }
	  else if (code == OMP_SECTIONS || code == OMP_PARALLEL)
	    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	  else if (code == OMP_SIMD)
	    s = C_OMP_CLAUSE_SPLIT_SIMD;
	  else
	    s = C_OMP_CLAUSE_SPLIT_TEAMS;
	  break;
	case OMP_CLAUSE_IF:
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
	      != 0)
	    s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
	  else if ((mask & (OMP_CLAUSE_MASK_1
			    << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
	    {
	      if ((mask & (OMP_CLAUSE_MASK_1
			   << PRAGMA_OMP_CLAUSE_MAP)) != 0)
		{
		  if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL)
		    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
		  else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET)
		    s = C_OMP_CLAUSE_SPLIT_TARGET;
		  else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK)
		    {
		      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					    OMP_CLAUSE_IF);
		      OMP_CLAUSE_IF_MODIFIER (c)
			= OMP_CLAUSE_IF_MODIFIER (clauses);
		      OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
		      OMP_CLAUSE_CHAIN (c)
			= cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
		      cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
		      s = C_OMP_CLAUSE_SPLIT_PARALLEL;
		    }
		  else
		    {
		      error_at (OMP_CLAUSE_LOCATION (clauses),
				"expected %<parallel%> or %<target%> %<if%> "
				"clause modifier");
		      continue;
		    }
		}
	      else
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	    }
	  else
	    s = C_OMP_CLAUSE_SPLIT_TARGET;
	  break;
	case OMP_CLAUSE_LINEAR:
	  /* Linear clause is allowed on simd and for.  Put it on the
	     innermost construct.  */
	  if (code == OMP_SIMD)
	    s = C_OMP_CLAUSE_SPLIT_SIMD;
	  else
	    s = C_OMP_CLAUSE_SPLIT_FOR;
	  break;
	default:
	  gcc_unreachable ();
	}
      OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
      cclauses[s] = clauses;
    }
#ifdef ENABLE_CHECKING
  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
    gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
    gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
    gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
    gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
  if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
	       | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
      && code != OMP_SECTIONS)
    gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
  if (code != OMP_SIMD)
    gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
#endif
}
Exemplo n.º 4
0
static void
gfc_trans_omp_array_reduction (tree c, gfc_symbol *sym, locus where)
{
  gfc_symtree *root1 = NULL, *root2 = NULL, *root3 = NULL, *root4 = NULL;
  gfc_symtree *symtree1, *symtree2, *symtree3, *symtree4 = NULL;
  gfc_symbol init_val_sym, outer_sym, intrinsic_sym;
  gfc_expr *e1, *e2, *e3, *e4;
  gfc_ref *ref;
  tree decl, backend_decl, stmt;
  locus old_loc = gfc_current_locus;
  const char *iname;
  try t;

  decl = OMP_CLAUSE_DECL (c);
  gfc_current_locus = where;

  /* Create a fake symbol for init value.  */
  memset (&init_val_sym, 0, sizeof (init_val_sym));
  init_val_sym.ns = sym->ns;
  init_val_sym.name = sym->name;
  init_val_sym.ts = sym->ts;
  init_val_sym.attr.referenced = 1;
  init_val_sym.declared_at = where;
  init_val_sym.attr.flavor = FL_VARIABLE;
  backend_decl = omp_reduction_init (c, gfc_sym_type (&init_val_sym));
  init_val_sym.backend_decl = backend_decl;

  /* Create a fake symbol for the outer array reference.  */
  outer_sym = *sym;
  outer_sym.as = gfc_copy_array_spec (sym->as);
  outer_sym.attr.dummy = 0;
  outer_sym.attr.result = 0;
  outer_sym.attr.flavor = FL_VARIABLE;
  outer_sym.backend_decl = create_tmp_var_raw (TREE_TYPE (decl), NULL);

  /* Create fake symtrees for it.  */
  symtree1 = gfc_new_symtree (&root1, sym->name);
  symtree1->n.sym = sym;
  gcc_assert (symtree1 == root1);

  symtree2 = gfc_new_symtree (&root2, sym->name);
  symtree2->n.sym = &init_val_sym;
  gcc_assert (symtree2 == root2);

  symtree3 = gfc_new_symtree (&root3, sym->name);
  symtree3->n.sym = &outer_sym;
  gcc_assert (symtree3 == root3);

  /* Create expressions.  */
  e1 = gfc_get_expr ();
  e1->expr_type = EXPR_VARIABLE;
  e1->where = where;
  e1->symtree = symtree1;
  e1->ts = sym->ts;
  e1->ref = ref = gfc_get_ref ();
  ref->u.ar.where = where;
  ref->u.ar.as = sym->as;
  ref->u.ar.type = AR_FULL;
  ref->u.ar.dimen = 0;
  t = gfc_resolve_expr (e1);
  gcc_assert (t == SUCCESS);

  e2 = gfc_get_expr ();
  e2->expr_type = EXPR_VARIABLE;
  e2->where = where;
  e2->symtree = symtree2;
  e2->ts = sym->ts;
  t = gfc_resolve_expr (e2);
  gcc_assert (t == SUCCESS);

  e3 = gfc_copy_expr (e1);
  e3->symtree = symtree3;
  t = gfc_resolve_expr (e3);
  gcc_assert (t == SUCCESS);

  iname = NULL;
  switch (OMP_CLAUSE_REDUCTION_CODE (c))
    {
    case PLUS_EXPR:
    case MINUS_EXPR:
      e4 = gfc_add (e3, e1);
      break;
    case MULT_EXPR:
      e4 = gfc_multiply (e3, e1);
      break;
    case TRUTH_ANDIF_EXPR:
      e4 = gfc_and (e3, e1);
      break;
    case TRUTH_ORIF_EXPR:
      e4 = gfc_or (e3, e1);
      break;
    case EQ_EXPR:
      e4 = gfc_eqv (e3, e1);
      break;
    case NE_EXPR:
      e4 = gfc_neqv (e3, e1);
      break;
    case MIN_EXPR:
      iname = "min";
      break;
    case MAX_EXPR:
      iname = "max";
      break;
    case BIT_AND_EXPR:
      iname = "iand";
      break;
    case BIT_IOR_EXPR:
      iname = "ior";
      break;
    case BIT_XOR_EXPR:
      iname = "ieor";
      break;
    default:
      gcc_unreachable ();
    }
  if (iname != NULL)
    {
      memset (&intrinsic_sym, 0, sizeof (intrinsic_sym));
      intrinsic_sym.ns = sym->ns;
      intrinsic_sym.name = iname;
      intrinsic_sym.ts = sym->ts;
      intrinsic_sym.attr.referenced = 1;
      intrinsic_sym.attr.intrinsic = 1;
      intrinsic_sym.attr.function = 1;
      intrinsic_sym.result = &intrinsic_sym;
      intrinsic_sym.declared_at = where;

      symtree4 = gfc_new_symtree (&root4, iname);
      symtree4->n.sym = &intrinsic_sym;
      gcc_assert (symtree4 == root4);

      e4 = gfc_get_expr ();
      e4->expr_type = EXPR_FUNCTION;
      e4->where = where;
      e4->symtree = symtree4;
      e4->value.function.isym = gfc_find_function (iname);
      e4->value.function.actual = gfc_get_actual_arglist ();
      e4->value.function.actual->expr = e3;
      e4->value.function.actual->next = gfc_get_actual_arglist ();
      e4->value.function.actual->next->expr = e1;
    }
  /* e1 and e3 have been stored as arguments of e4, avoid sharing.  */
  e1 = gfc_copy_expr (e1);
  e3 = gfc_copy_expr (e3);
  t = gfc_resolve_expr (e4);
  gcc_assert (t == SUCCESS);

  /* Create the init statement list.  */
  pushlevel (0);
  stmt = gfc_trans_assignment (e1, e2, false);
  if (TREE_CODE (stmt) != BIND_EXPR)
    stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0, 0));
  else
    poplevel (0, 0, 0);
  OMP_CLAUSE_REDUCTION_INIT (c) = stmt;

  /* Create the merge statement list.  */
  pushlevel (0);
  stmt = gfc_trans_assignment (e3, e4, false);
  if (TREE_CODE (stmt) != BIND_EXPR)
    stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0, 0));
  else
    poplevel (0, 0, 0);
  OMP_CLAUSE_REDUCTION_MERGE (c) = stmt;

  /* And stick the placeholder VAR_DECL into the clause as well.  */
  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = outer_sym.backend_decl;

  gfc_current_locus = old_loc;

  gfc_free_expr (e1);
  gfc_free_expr (e2);
  gfc_free_expr (e3);
  gfc_free_expr (e4);
  gfc_free (symtree1);
  gfc_free (symtree2);
  gfc_free (symtree3);
  if (symtree4)
    gfc_free (symtree4);
  gfc_free_array_spec (outer_sym.as);
}

static tree
gfc_trans_omp_reduction_list (gfc_namelist *namelist, tree list, 
			      enum tree_code reduction_code, locus where)
{
  for (; namelist != NULL; namelist = namelist->next)
    if (namelist->sym->attr.referenced)
      {
	tree t = gfc_trans_omp_variable (namelist->sym);
	if (t != error_mark_node)
	  {
	    tree node = build_omp_clause (OMP_CLAUSE_REDUCTION);
	    OMP_CLAUSE_DECL (node) = t;
	    OMP_CLAUSE_REDUCTION_CODE (node) = reduction_code;
	    if (namelist->sym->attr.dimension)
	      gfc_trans_omp_array_reduction (node, namelist->sym, where);
	    list = gfc_trans_add_clause (node, list);
	  }
      }
  return list;
}

static tree
gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
		       locus where)
{
  tree omp_clauses = NULL_TREE, chunk_size, c, old_clauses;
  int list;
  enum omp_clause_code clause_code;
  gfc_se se;

  if (clauses == NULL)
    return NULL_TREE;

  for (list = 0; list < OMP_LIST_NUM; list++)
    {
      gfc_namelist *n = clauses->lists[list];

      if (n == NULL)
	continue;
      if (list >= OMP_LIST_REDUCTION_FIRST
	  && list <= OMP_LIST_REDUCTION_LAST)
	{
	  enum tree_code reduction_code;
	  switch (list)
	    {
	    case OMP_LIST_PLUS:
	      reduction_code = PLUS_EXPR;
	      break;
	    case OMP_LIST_MULT:
	      reduction_code = MULT_EXPR;
	      break;
	    case OMP_LIST_SUB:
	      reduction_code = MINUS_EXPR;
	      break;
	    case OMP_LIST_AND:
	      reduction_code = TRUTH_ANDIF_EXPR;
	      break;
	    case OMP_LIST_OR:
	      reduction_code = TRUTH_ORIF_EXPR;
	      break;
	    case OMP_LIST_EQV:
	      reduction_code = EQ_EXPR;
	      break;
	    case OMP_LIST_NEQV:
	      reduction_code = NE_EXPR;
	      break;
	    case OMP_LIST_MAX:
	      reduction_code = MAX_EXPR;
	      break;
	    case OMP_LIST_MIN:
	      reduction_code = MIN_EXPR;
	      break;
	    case OMP_LIST_IAND:
	      reduction_code = BIT_AND_EXPR;
	      break;
	    case OMP_LIST_IOR:
	      reduction_code = BIT_IOR_EXPR;
	      break;
	    case OMP_LIST_IEOR:
	      reduction_code = BIT_XOR_EXPR;
	      break;
	    default:
	      gcc_unreachable ();
	    }
	  old_clauses = omp_clauses;
	  omp_clauses
	    = gfc_trans_omp_reduction_list (n, omp_clauses, reduction_code,
					    where);
	  continue;
	}
      switch (list)
	{
	case OMP_LIST_PRIVATE:
	  clause_code = OMP_CLAUSE_PRIVATE;
	  goto add_clause;
	case OMP_LIST_SHARED:
	  clause_code = OMP_CLAUSE_SHARED;
	  goto add_clause;
	case OMP_LIST_FIRSTPRIVATE:
	  clause_code = OMP_CLAUSE_FIRSTPRIVATE;
	  goto add_clause;
	case OMP_LIST_LASTPRIVATE:
	  clause_code = OMP_CLAUSE_LASTPRIVATE;
	  goto add_clause;
	case OMP_LIST_COPYIN:
	  clause_code = OMP_CLAUSE_COPYIN;
	  goto add_clause;
	case OMP_LIST_COPYPRIVATE:
	  clause_code = OMP_CLAUSE_COPYPRIVATE;
	  /* FALLTHROUGH */
	add_clause:
	  omp_clauses
	    = gfc_trans_omp_variable_list (clause_code, n, omp_clauses);
	  break;
	default:
	  break;
	}
    }

  if (clauses->if_expr)
    {
      tree if_var;

      gfc_init_se (&se, NULL);
      gfc_conv_expr (&se, clauses->if_expr);
      gfc_add_block_to_block (block, &se.pre);
      if_var = gfc_evaluate_now (se.expr, block);
      gfc_add_block_to_block (block, &se.post);

      c = build_omp_clause (OMP_CLAUSE_IF);
      OMP_CLAUSE_IF_EXPR (c) = if_var;
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->num_threads)
    {
      tree num_threads;

      gfc_init_se (&se, NULL);
      gfc_conv_expr (&se, clauses->num_threads);
      gfc_add_block_to_block (block, &se.pre);
      num_threads = gfc_evaluate_now (se.expr, block);
      gfc_add_block_to_block (block, &se.post);

      c = build_omp_clause (OMP_CLAUSE_NUM_THREADS);
      OMP_CLAUSE_NUM_THREADS_EXPR (c) = num_threads;
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  chunk_size = NULL_TREE;
  if (clauses->chunk_size)
    {
      gfc_init_se (&se, NULL);
      gfc_conv_expr (&se, clauses->chunk_size);
      gfc_add_block_to_block (block, &se.pre);
      chunk_size = gfc_evaluate_now (se.expr, block);
      gfc_add_block_to_block (block, &se.post);
    }

  if (clauses->sched_kind != OMP_SCHED_NONE)
    {
      c = build_omp_clause (OMP_CLAUSE_SCHEDULE);
      OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = chunk_size;
      switch (clauses->sched_kind)
	{
	case OMP_SCHED_STATIC:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
	  break;
	case OMP_SCHED_DYNAMIC:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
	  break;
	case OMP_SCHED_GUIDED:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
	  break;
	case OMP_SCHED_RUNTIME:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
	  break;
	default:
	  gcc_unreachable ();
	}
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->default_sharing != OMP_DEFAULT_UNKNOWN)
    {
      c = build_omp_clause (OMP_CLAUSE_DEFAULT);
      switch (clauses->default_sharing)
	{
	case OMP_DEFAULT_NONE:
	  OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_NONE;
	  break;
	case OMP_DEFAULT_SHARED:
	  OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_SHARED;
	  break;
	case OMP_DEFAULT_PRIVATE:
	  OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_PRIVATE;
	  break;
	default:
	  gcc_unreachable ();
	}
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->nowait)
    {
      c = build_omp_clause (OMP_CLAUSE_NOWAIT);
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->ordered)
    {
      c = build_omp_clause (OMP_CLAUSE_ORDERED);
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  return omp_clauses;
}
Exemplo n.º 5
0
Arquivo: c-omp.c Projeto: Nodplus/gcc
void
c_omp_split_clauses (location_t loc, enum tree_code code,
		     omp_clause_mask mask, tree clauses, tree *cclauses)
{
  tree next, c;
  enum c_omp_clause_split s;
  int i;

  for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
    cclauses[i] = NULL;
  /* Add implicit nowait clause on
     #pragma omp parallel {for,for simd,sections}.  */
  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
    switch (code)
      {
      case OMP_FOR:
      case OMP_SIMD:
        cclauses[C_OMP_CLAUSE_SPLIT_FOR]
	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
	break;
      case OMP_SECTIONS:
	cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
	  = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
	break;
      default:
	break;
      }

  for (; clauses ; clauses = next)
    {
      next = OMP_CLAUSE_CHAIN (clauses);

      switch (OMP_CLAUSE_CODE (clauses))
	{
	/* First the clauses that are unique to some constructs.  */
	case OMP_CLAUSE_DEVICE:
	case OMP_CLAUSE_MAP:
	  s = C_OMP_CLAUSE_SPLIT_TARGET;
	  break;
	case OMP_CLAUSE_NUM_TEAMS:
	case OMP_CLAUSE_THREAD_LIMIT:
	  s = C_OMP_CLAUSE_SPLIT_TEAMS;
	  break;
	case OMP_CLAUSE_DIST_SCHEDULE:
	  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
	  break;
	case OMP_CLAUSE_COPYIN:
	case OMP_CLAUSE_NUM_THREADS:
	case OMP_CLAUSE_PROC_BIND:
	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	  break;
	case OMP_CLAUSE_ORDERED:
	case OMP_CLAUSE_SCHEDULE:
	case OMP_CLAUSE_NOWAIT:
	  s = C_OMP_CLAUSE_SPLIT_FOR;
	  break;
	case OMP_CLAUSE_SAFELEN:
	case OMP_CLAUSE_LINEAR:
	case OMP_CLAUSE_ALIGNED:
	  s = C_OMP_CLAUSE_SPLIT_SIMD;
	  break;
	/* Duplicate this to all of distribute, for and simd.  */
	case OMP_CLAUSE_COLLAPSE:
	  if (code == OMP_SIMD)
	    {
	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
				    OMP_CLAUSE_COLLAPSE);
	      OMP_CLAUSE_COLLAPSE_EXPR (c)
		= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
	      cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
	    }
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
	    {
	      if ((mask & (OMP_CLAUSE_MASK_1
			   << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_COLLAPSE);
		  OMP_CLAUSE_COLLAPSE_EXPR (c)
		    = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
		  cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
		  s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
		}
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	    }
	  else
	    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
	  break;
	/* Private clause is supported on all constructs but target,
	   it is enough to put it on the innermost one.  For
	   #pragma omp {for,sections} put it on parallel though,
	   as that's what we did for OpenMP 3.1.  */
	case OMP_CLAUSE_PRIVATE:
	  switch (code)
	    {
	    case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
	    case OMP_FOR: case OMP_SECTIONS:
	    case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
	    case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
	    case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
	    default: gcc_unreachable ();
	    }
	  break;
	/* Firstprivate clause is supported on all constructs but
	   target and simd.  Put it on the outermost of those and
	   duplicate on parallel.  */
	case OMP_CLAUSE_FIRSTPRIVATE:
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
	      != 0)
	    {
	      if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
			   | (OMP_CLAUSE_MASK_1
			      << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_FIRSTPRIVATE);
		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
		  if ((mask & (OMP_CLAUSE_MASK_1
			       << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
		    s = C_OMP_CLAUSE_SPLIT_TEAMS;
		  else
		    s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
		}
	      else
		/* This must be
		   #pragma omp parallel{, for{, simd}, sections}.  */
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	    }
	  else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
		   != 0)
	    {
	      /* This must be #pragma omp {,target }teams distribute.  */
	      gcc_assert (code == OMP_DISTRIBUTE);
	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
	    }
	  else if ((mask & (OMP_CLAUSE_MASK_1
			    << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
	    {
	      /* This must be #pragma omp distribute simd.  */
	      gcc_assert (code == OMP_SIMD);
	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
	    }
	  else
	    {
	      /* This must be #pragma omp for simd.  */
	      gcc_assert (code == OMP_SIMD);
	      s = C_OMP_CLAUSE_SPLIT_FOR;
	    }
	  break;
	/* Lastprivate is allowed on for, sections and simd.  In
	   parallel {for{, simd},sections} we actually want to put it on
	   parallel rather than for or sections.  */
	case OMP_CLAUSE_LASTPRIVATE:
	  if (code == OMP_FOR || code == OMP_SECTIONS)
	    {
	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
		  != 0)
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	      break;
	    }
	  gcc_assert (code == OMP_SIMD);
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
	    {
	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
				    OMP_CLAUSE_LASTPRIVATE);
	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
		  != 0)
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	      OMP_CLAUSE_CHAIN (c) = cclauses[s];
	      cclauses[s] = c;
	    }
	  s = C_OMP_CLAUSE_SPLIT_SIMD;
	  break;
	/* Shared and default clauses are allowed on private and teams.  */
	case OMP_CLAUSE_SHARED:
	case OMP_CLAUSE_DEFAULT:
	  if (code == OMP_TEAMS)
	    {
	      s = C_OMP_CLAUSE_SPLIT_TEAMS;
	      break;
	    }
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
	      != 0)
	    {
	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
				    OMP_CLAUSE_CODE (clauses));
	      if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
		OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
	      else
		OMP_CLAUSE_DEFAULT_KIND (c)
		  = OMP_CLAUSE_DEFAULT_KIND (clauses);
	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
	      cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
	      
	    }
	  s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	  break;
	/* Reduction is allowed on simd, for, parallel, sections and teams.
	   Duplicate it on all of them, but omit on for or sections if
	   parallel is present.  */
	case OMP_CLAUSE_REDUCTION:
	  if (code == OMP_SIMD)
	    {
	      c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
				    OMP_CLAUSE_REDUCTION);
	      OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
	      OMP_CLAUSE_REDUCTION_CODE (c)
		= OMP_CLAUSE_REDUCTION_CODE (clauses);
	      OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
		= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
	      OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
	      cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
	    }
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
	    {
	      if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
		  != 0)
		{
		  c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
					OMP_CLAUSE_REDUCTION);
		  OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
		  OMP_CLAUSE_REDUCTION_CODE (c)
		    = OMP_CLAUSE_REDUCTION_CODE (clauses);
		  OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
		    = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
		  OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
		  cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
		  s = C_OMP_CLAUSE_SPLIT_TEAMS;
		}
	      else if ((mask & (OMP_CLAUSE_MASK_1
				<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
		s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	      else
		s = C_OMP_CLAUSE_SPLIT_FOR;
	    }
	  else if (code == OMP_SECTIONS)
	    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	  else
	    s = C_OMP_CLAUSE_SPLIT_TEAMS;
	  break;
	case OMP_CLAUSE_IF:
	  /* FIXME: This is currently being discussed.  */
	  if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
	      != 0)
	    s = C_OMP_CLAUSE_SPLIT_PARALLEL;
	  else
	    s = C_OMP_CLAUSE_SPLIT_TARGET;
	  break;
	default:
	  gcc_unreachable ();
	}
      OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
      cclauses[s] = clauses;
    }
}
Exemplo n.º 6
0
static tree
cp_genericize_r (tree *stmt_p, int *walk_subtrees, void *data)
{
  tree stmt = *stmt_p;
  struct cp_genericize_data *wtd = (struct cp_genericize_data *) data;
  struct pointer_set_t *p_set = wtd->p_set;

  /* If in an OpenMP context, note var uses.  */
  if (__builtin_expect (wtd->omp_ctx != NULL, 0)
      && (VAR_P (stmt)
	  || TREE_CODE (stmt) == PARM_DECL
	  || TREE_CODE (stmt) == RESULT_DECL)
      && omp_var_to_track (stmt))
    omp_cxx_notice_variable (wtd->omp_ctx, stmt);

  if (is_invisiref_parm (stmt)
      /* Don't dereference parms in a thunk, pass the references through. */
      && !(DECL_THUNK_P (current_function_decl)
	   && TREE_CODE (stmt) == PARM_DECL))
    {
      *stmt_p = convert_from_reference (stmt);
      *walk_subtrees = 0;
      return NULL;
    }

  /* Map block scope extern declarations to visible declarations with the
     same name and type in outer scopes if any.  */
  if (cp_function_chain->extern_decl_map
      && VAR_OR_FUNCTION_DECL_P (stmt)
      && DECL_EXTERNAL (stmt))
    {
      struct cxx_int_tree_map *h, in;
      in.uid = DECL_UID (stmt);
      h = (struct cxx_int_tree_map *)
	  htab_find_with_hash (cp_function_chain->extern_decl_map,
			       &in, in.uid);
      if (h)
	{
	  *stmt_p = h->to;
	  *walk_subtrees = 0;
	  return NULL;
	}
    }

  /* Other than invisiref parms, don't walk the same tree twice.  */
  if (pointer_set_contains (p_set, stmt))
    {
      *walk_subtrees = 0;
      return NULL_TREE;
    }

  if (TREE_CODE (stmt) == ADDR_EXPR
      && is_invisiref_parm (TREE_OPERAND (stmt, 0)))
    {
      /* If in an OpenMP context, note var uses.  */
      if (__builtin_expect (wtd->omp_ctx != NULL, 0)
	  && omp_var_to_track (TREE_OPERAND (stmt, 0)))
	omp_cxx_notice_variable (wtd->omp_ctx, TREE_OPERAND (stmt, 0));
      *stmt_p = convert (TREE_TYPE (stmt), TREE_OPERAND (stmt, 0));
      *walk_subtrees = 0;
    }
  else if (TREE_CODE (stmt) == RETURN_EXPR
	   && TREE_OPERAND (stmt, 0)
	   && is_invisiref_parm (TREE_OPERAND (stmt, 0)))
    /* Don't dereference an invisiref RESULT_DECL inside a RETURN_EXPR.  */
    *walk_subtrees = 0;
  else if (TREE_CODE (stmt) == OMP_CLAUSE)
    switch (OMP_CLAUSE_CODE (stmt))
      {
      case OMP_CLAUSE_LASTPRIVATE:
	/* Don't dereference an invisiref in OpenMP clauses.  */
	if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
	  {
	    *walk_subtrees = 0;
	    if (OMP_CLAUSE_LASTPRIVATE_STMT (stmt))
	      cp_walk_tree (&OMP_CLAUSE_LASTPRIVATE_STMT (stmt),
			    cp_genericize_r, data, NULL);
	  }
	break;
      case OMP_CLAUSE_PRIVATE:
	/* Don't dereference an invisiref in OpenMP clauses.  */
	if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
	  *walk_subtrees = 0;
	else if (wtd->omp_ctx != NULL)
	  {
	    /* Private clause doesn't cause any references to the
	       var in outer contexts, avoid calling
	       omp_cxx_notice_variable for it.  */
	    struct cp_genericize_omp_taskreg *old = wtd->omp_ctx;
	    wtd->omp_ctx = NULL;
	    cp_walk_tree (&OMP_CLAUSE_DECL (stmt), cp_genericize_r,
			  data, NULL);
	    wtd->omp_ctx = old;
	    *walk_subtrees = 0;
	  }
	break;
      case OMP_CLAUSE_SHARED:
      case OMP_CLAUSE_FIRSTPRIVATE:
      case OMP_CLAUSE_COPYIN:
      case OMP_CLAUSE_COPYPRIVATE:
	/* Don't dereference an invisiref in OpenMP clauses.  */
	if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
	  *walk_subtrees = 0;
	break;
      case OMP_CLAUSE_REDUCTION:
	/* Don't dereference an invisiref in reduction clause's
	   OMP_CLAUSE_DECL either.  OMP_CLAUSE_REDUCTION_{INIT,MERGE}
	   still needs to be genericized.  */
	if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
	  {
	    *walk_subtrees = 0;
	    if (OMP_CLAUSE_REDUCTION_INIT (stmt))
	      cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (stmt),
			    cp_genericize_r, data, NULL);
	    if (OMP_CLAUSE_REDUCTION_MERGE (stmt))
	      cp_walk_tree (&OMP_CLAUSE_REDUCTION_MERGE (stmt),
			    cp_genericize_r, data, NULL);
	  }
	break;
      default:
	break;
      }
  else if (IS_TYPE_OR_DECL_P (stmt))
    *walk_subtrees = 0;

  /* Due to the way voidify_wrapper_expr is written, we don't get a chance
     to lower this construct before scanning it, so we need to lower these
     before doing anything else.  */
  else if (TREE_CODE (stmt) == CLEANUP_STMT)
    *stmt_p = build2_loc (EXPR_LOCATION (stmt),
			  CLEANUP_EH_ONLY (stmt) ? TRY_CATCH_EXPR
						 : TRY_FINALLY_EXPR,
			  void_type_node,
			  CLEANUP_BODY (stmt),
			  CLEANUP_EXPR (stmt));

  else if (TREE_CODE (stmt) == IF_STMT)
    {
      genericize_if_stmt (stmt_p);
      /* *stmt_p has changed, tail recurse to handle it again.  */
      return cp_genericize_r (stmt_p, walk_subtrees, data);
    }

  /* COND_EXPR might have incompatible types in branches if one or both
     arms are bitfields.  Fix it up now.  */
  else if (TREE_CODE (stmt) == COND_EXPR)
    {
      tree type_left
	= (TREE_OPERAND (stmt, 1)
	   ? is_bitfield_expr_with_lowered_type (TREE_OPERAND (stmt, 1))
	   : NULL_TREE);
      tree type_right
	= (TREE_OPERAND (stmt, 2)
	   ? is_bitfield_expr_with_lowered_type (TREE_OPERAND (stmt, 2))
	   : NULL_TREE);
      if (type_left
	  && !useless_type_conversion_p (TREE_TYPE (stmt),
					 TREE_TYPE (TREE_OPERAND (stmt, 1))))
	{
	  TREE_OPERAND (stmt, 1)
	    = fold_convert (type_left, TREE_OPERAND (stmt, 1));
	  gcc_assert (useless_type_conversion_p (TREE_TYPE (stmt),
						 type_left));
	}
      if (type_right
	  && !useless_type_conversion_p (TREE_TYPE (stmt),
					 TREE_TYPE (TREE_OPERAND (stmt, 2))))
	{
	  TREE_OPERAND (stmt, 2)
	    = fold_convert (type_right, TREE_OPERAND (stmt, 2));
	  gcc_assert (useless_type_conversion_p (TREE_TYPE (stmt),
						 type_right));
	}
    }

  else if (TREE_CODE (stmt) == BIND_EXPR)
    {
      if (__builtin_expect (wtd->omp_ctx != NULL, 0))
	{
	  tree decl;
	  for (decl = BIND_EXPR_VARS (stmt); decl; decl = DECL_CHAIN (decl))
	    if (VAR_P (decl)
		&& !DECL_EXTERNAL (decl)
		&& omp_var_to_track (decl))
	      {
		splay_tree_node n
		  = splay_tree_lookup (wtd->omp_ctx->variables,
				       (splay_tree_key) decl);
		if (n == NULL)
		  splay_tree_insert (wtd->omp_ctx->variables,
				     (splay_tree_key) decl,
				     TREE_STATIC (decl)
				     ? OMP_CLAUSE_DEFAULT_SHARED
				     : OMP_CLAUSE_DEFAULT_PRIVATE);
	      }
	}
      wtd->bind_expr_stack.safe_push (stmt);
      cp_walk_tree (&BIND_EXPR_BODY (stmt),
		    cp_genericize_r, data, NULL);
      wtd->bind_expr_stack.pop ();
    }

  else if (TREE_CODE (stmt) == USING_STMT)
    {
      tree block = NULL_TREE;

      /* Get the innermost inclosing GIMPLE_BIND that has a non NULL
         BLOCK, and append an IMPORTED_DECL to its
	 BLOCK_VARS chained list.  */
      if (wtd->bind_expr_stack.exists ())
	{
	  int i;
	  for (i = wtd->bind_expr_stack.length () - 1; i >= 0; i--)
	    if ((block = BIND_EXPR_BLOCK (wtd->bind_expr_stack[i])))
	      break;
	}
      if (block)
	{
	  tree using_directive;
	  gcc_assert (TREE_OPERAND (stmt, 0));

	  using_directive = make_node (IMPORTED_DECL);
	  TREE_TYPE (using_directive) = void_type_node;

	  IMPORTED_DECL_ASSOCIATED_DECL (using_directive)
	    = TREE_OPERAND (stmt, 0);
	  DECL_CHAIN (using_directive) = BLOCK_VARS (block);
	  BLOCK_VARS (block) = using_directive;
	}
      /* The USING_STMT won't appear in GENERIC.  */
      *stmt_p = build1 (NOP_EXPR, void_type_node, integer_zero_node);
      *walk_subtrees = 0;
    }

  else if (TREE_CODE (stmt) == DECL_EXPR
	   && TREE_CODE (DECL_EXPR_DECL (stmt)) == USING_DECL)
    {
      /* Using decls inside DECL_EXPRs are just dropped on the floor.  */
      *stmt_p = build1 (NOP_EXPR, void_type_node, integer_zero_node);
      *walk_subtrees = 0;
    }
  else if (TREE_CODE (stmt) == OMP_PARALLEL || TREE_CODE (stmt) == OMP_TASK)
    {
      struct cp_genericize_omp_taskreg omp_ctx;
      tree c, decl;
      splay_tree_node n;

      *walk_subtrees = 0;
      cp_walk_tree (&OMP_CLAUSES (stmt), cp_genericize_r, data, NULL);
      omp_ctx.is_parallel = TREE_CODE (stmt) == OMP_PARALLEL;
      omp_ctx.default_shared = omp_ctx.is_parallel;
      omp_ctx.outer = wtd->omp_ctx;
      omp_ctx.variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
      wtd->omp_ctx = &omp_ctx;
      for (c = OMP_CLAUSES (stmt); c; c = OMP_CLAUSE_CHAIN (c))
	switch (OMP_CLAUSE_CODE (c))
	  {
	  case OMP_CLAUSE_SHARED:
	  case OMP_CLAUSE_PRIVATE:
	  case OMP_CLAUSE_FIRSTPRIVATE:
	  case OMP_CLAUSE_LASTPRIVATE:
	    decl = OMP_CLAUSE_DECL (c);
	    if (decl == error_mark_node || !omp_var_to_track (decl))
	      break;
	    n = splay_tree_lookup (omp_ctx.variables, (splay_tree_key) decl);
	    if (n != NULL)
	      break;
	    splay_tree_insert (omp_ctx.variables, (splay_tree_key) decl,
			       OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
			       ? OMP_CLAUSE_DEFAULT_SHARED
			       : OMP_CLAUSE_DEFAULT_PRIVATE);
	    if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE
		&& omp_ctx.outer)
	      omp_cxx_notice_variable (omp_ctx.outer, decl);
	    break;
	  case OMP_CLAUSE_DEFAULT:
	    if (OMP_CLAUSE_DEFAULT_KIND (c) == OMP_CLAUSE_DEFAULT_SHARED)
	      omp_ctx.default_shared = true;
	  default:
	    break;
	  }
      cp_walk_tree (&OMP_BODY (stmt), cp_genericize_r, data, NULL);
      wtd->omp_ctx = omp_ctx.outer;
      splay_tree_delete (omp_ctx.variables);
    }
  else if (TREE_CODE (stmt) == CONVERT_EXPR)
    gcc_assert (!CONVERT_EXPR_VBASE_PATH (stmt));
  else if (TREE_CODE (stmt) == FOR_STMT)
    genericize_for_stmt (stmt_p, walk_subtrees, data);
  else if (TREE_CODE (stmt) == WHILE_STMT)
    genericize_while_stmt (stmt_p, walk_subtrees, data);
  else if (TREE_CODE (stmt) == DO_STMT)
    genericize_do_stmt (stmt_p, walk_subtrees, data);
  else if (TREE_CODE (stmt) == SWITCH_STMT)
    genericize_switch_stmt (stmt_p, walk_subtrees, data);
  else if (TREE_CODE (stmt) == CONTINUE_STMT)
    genericize_continue_stmt (stmt_p);
  else if (TREE_CODE (stmt) == BREAK_STMT)
    genericize_break_stmt (stmt_p);
  else if (TREE_CODE (stmt) == OMP_FOR
	   || TREE_CODE (stmt) == OMP_SIMD
	   || TREE_CODE (stmt) == OMP_DISTRIBUTE)
    genericize_omp_for_stmt (stmt_p, walk_subtrees, data);
  else if (TREE_CODE (stmt) == SIZEOF_EXPR)
    {
      if (SIZEOF_EXPR_TYPE_P (stmt))
	*stmt_p
	  = cxx_sizeof_or_alignof_type (TREE_TYPE (TREE_OPERAND (stmt, 0)),
					SIZEOF_EXPR, false);
      else if (TYPE_P (TREE_OPERAND (stmt, 0)))
	*stmt_p = cxx_sizeof_or_alignof_type (TREE_OPERAND (stmt, 0),
					      SIZEOF_EXPR, false);
      else
	*stmt_p = cxx_sizeof_or_alignof_expr (TREE_OPERAND (stmt, 0),
					      SIZEOF_EXPR, false);
      if (*stmt_p == error_mark_node)
	*stmt_p = size_one_node;
      return NULL;
    }    

  pointer_set_insert (p_set, *stmt_p);

  return NULL;
}
Exemplo n.º 7
0
static tree
gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
		       locus where)
{
  tree omp_clauses = NULL_TREE, chunk_size, c;
  int list;
  enum omp_clause_code clause_code;
  gfc_se se;

  if (clauses == NULL)
    return NULL_TREE;

  for (list = 0; list < OMP_LIST_NUM; list++)
    {
      gfc_namelist *n = clauses->lists[list];

      if (n == NULL)
	continue;
      if (list >= OMP_LIST_REDUCTION_FIRST
	  && list <= OMP_LIST_REDUCTION_LAST)
	{
	  enum tree_code reduction_code;
	  switch (list)
	    {
	    case OMP_LIST_PLUS:
	      reduction_code = PLUS_EXPR;
	      break;
	    case OMP_LIST_MULT:
	      reduction_code = MULT_EXPR;
	      break;
	    case OMP_LIST_SUB:
	      reduction_code = MINUS_EXPR;
	      break;
	    case OMP_LIST_AND:
	      reduction_code = TRUTH_ANDIF_EXPR;
	      break;
	    case OMP_LIST_OR:
	      reduction_code = TRUTH_ORIF_EXPR;
	      break;
	    case OMP_LIST_EQV:
	      reduction_code = EQ_EXPR;
	      break;
	    case OMP_LIST_NEQV:
	      reduction_code = NE_EXPR;
	      break;
	    case OMP_LIST_MAX:
	      reduction_code = MAX_EXPR;
	      break;
	    case OMP_LIST_MIN:
	      reduction_code = MIN_EXPR;
	      break;
	    case OMP_LIST_IAND:
	      reduction_code = BIT_AND_EXPR;
	      break;
	    case OMP_LIST_IOR:
	      reduction_code = BIT_IOR_EXPR;
	      break;
	    case OMP_LIST_IEOR:
	      reduction_code = BIT_XOR_EXPR;
	      break;
	    default:
	      gcc_unreachable ();
	    }
	  omp_clauses
	    = gfc_trans_omp_reduction_list (n, omp_clauses, reduction_code,
					    where);
	  continue;
	}
      switch (list)
	{
	case OMP_LIST_PRIVATE:
	  clause_code = OMP_CLAUSE_PRIVATE;
	  goto add_clause;
	case OMP_LIST_SHARED:
	  clause_code = OMP_CLAUSE_SHARED;
	  goto add_clause;
	case OMP_LIST_FIRSTPRIVATE:
	  clause_code = OMP_CLAUSE_FIRSTPRIVATE;
	  goto add_clause;
	case OMP_LIST_LASTPRIVATE:
	  clause_code = OMP_CLAUSE_LASTPRIVATE;
	  goto add_clause;
	case OMP_LIST_COPYIN:
	  clause_code = OMP_CLAUSE_COPYIN;
	  goto add_clause;
	case OMP_LIST_COPYPRIVATE:
	  clause_code = OMP_CLAUSE_COPYPRIVATE;
	  /* FALLTHROUGH */
	add_clause:
	  omp_clauses
	    = gfc_trans_omp_variable_list (clause_code, n, omp_clauses);
	  break;
	default:
	  break;
	}
    }

  if (clauses->if_expr)
    {
      tree if_var;

      gfc_init_se (&se, NULL);
      gfc_conv_expr (&se, clauses->if_expr);
      gfc_add_block_to_block (block, &se.pre);
      if_var = gfc_evaluate_now (se.expr, block);
      gfc_add_block_to_block (block, &se.post);

      c = build_omp_clause (where.lb->location, OMP_CLAUSE_IF);
      OMP_CLAUSE_IF_EXPR (c) = if_var;
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->num_threads)
    {
      tree num_threads;

      gfc_init_se (&se, NULL);
      gfc_conv_expr (&se, clauses->num_threads);
      gfc_add_block_to_block (block, &se.pre);
      num_threads = gfc_evaluate_now (se.expr, block);
      gfc_add_block_to_block (block, &se.post);

      c = build_omp_clause (where.lb->location, OMP_CLAUSE_NUM_THREADS);
      OMP_CLAUSE_NUM_THREADS_EXPR (c) = num_threads;
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  chunk_size = NULL_TREE;
  if (clauses->chunk_size)
    {
      gfc_init_se (&se, NULL);
      gfc_conv_expr (&se, clauses->chunk_size);
      gfc_add_block_to_block (block, &se.pre);
      chunk_size = gfc_evaluate_now (se.expr, block);
      gfc_add_block_to_block (block, &se.post);
    }

  if (clauses->sched_kind != OMP_SCHED_NONE)
    {
      c = build_omp_clause (where.lb->location, OMP_CLAUSE_SCHEDULE);
      OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = chunk_size;
      switch (clauses->sched_kind)
	{
	case OMP_SCHED_STATIC:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
	  break;
	case OMP_SCHED_DYNAMIC:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
	  break;
	case OMP_SCHED_GUIDED:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
	  break;
	case OMP_SCHED_RUNTIME:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
	  break;
	case OMP_SCHED_AUTO:
	  OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
	  break;
	default:
	  gcc_unreachable ();
	}
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->default_sharing != OMP_DEFAULT_UNKNOWN)
    {
      c = build_omp_clause (where.lb->location, OMP_CLAUSE_DEFAULT);
      switch (clauses->default_sharing)
	{
	case OMP_DEFAULT_NONE:
	  OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_NONE;
	  break;
	case OMP_DEFAULT_SHARED:
	  OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_SHARED;
	  break;
	case OMP_DEFAULT_PRIVATE:
	  OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_PRIVATE;
	  break;
	case OMP_DEFAULT_FIRSTPRIVATE:
	  OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_FIRSTPRIVATE;
	  break;
	default:
	  gcc_unreachable ();
	}
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->nowait)
    {
      c = build_omp_clause (where.lb->location, OMP_CLAUSE_NOWAIT);
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->ordered)
    {
      c = build_omp_clause (where.lb->location, OMP_CLAUSE_ORDERED);
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->untied)
    {
      c = build_omp_clause (where.lb->location, OMP_CLAUSE_UNTIED);
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  if (clauses->collapse)
    {
      c = build_omp_clause (where.lb->location, OMP_CLAUSE_COLLAPSE);
      OMP_CLAUSE_COLLAPSE_EXPR (c) = build_int_cst (NULL, clauses->collapse);
      omp_clauses = gfc_trans_add_clause (c, omp_clauses);
    }

  return omp_clauses;
}