tree c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) { tree c; vec<tree> clvec = vNULL; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { tree decl = OMP_CLAUSE_DECL (c); tree arg; int idx; for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { decl = OMP_CLAUSE_LINEAR_STEP (c); for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (integer_type_node, idx); } } clvec.safe_push (c); } if (!clvec.is_empty ()) { unsigned int len = clvec.length (), i; clvec.qsort (c_omp_declare_simd_clause_cmp); clauses = clvec[0]; for (i = 0; i < len; i++) OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; } else clauses = NULL_TREE; clvec.release (); return clauses; }
static void pack_ts_omp_clause_value_fields (struct output_block *ob, struct bitpack_d *bp, tree expr) { stream_output_location (ob, bp, OMP_CLAUSE_LOCATION (expr)); switch (OMP_CLAUSE_CODE (expr)) { case OMP_CLAUSE_DEFAULT: bp_pack_enum (bp, omp_clause_default_kind, OMP_CLAUSE_DEFAULT_LAST, OMP_CLAUSE_DEFAULT_KIND (expr)); break; case OMP_CLAUSE_SCHEDULE: bp_pack_enum (bp, omp_clause_schedule_kind, OMP_CLAUSE_SCHEDULE_LAST, OMP_CLAUSE_SCHEDULE_KIND (expr)); break; case OMP_CLAUSE_DEPEND: bp_pack_enum (bp, omp_clause_depend_kind, OMP_CLAUSE_DEPEND_LAST, OMP_CLAUSE_DEPEND_KIND (expr)); break; case OMP_CLAUSE_MAP: bp_pack_enum (bp, gomp_map_kind, GOMP_MAP_LAST, OMP_CLAUSE_MAP_KIND (expr)); break; case OMP_CLAUSE_PROC_BIND: bp_pack_enum (bp, omp_clause_proc_bind_kind, OMP_CLAUSE_PROC_BIND_LAST, OMP_CLAUSE_PROC_BIND_KIND (expr)); break; case OMP_CLAUSE_REDUCTION: bp_pack_enum (bp, tree_code, MAX_TREE_CODES, OMP_CLAUSE_REDUCTION_CODE (expr)); break; default: break; } }
tree oacc_build_routine_dims (tree clauses) { /* Must match GOMP_DIM ordering. */ static const omp_clause_code ids[] = {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ}; int ix; int level = -1; for (; clauses; clauses = OMP_CLAUSE_CHAIN (clauses)) for (ix = GOMP_DIM_MAX + 1; ix--;) if (OMP_CLAUSE_CODE (clauses) == ids[ix]) { if (level >= 0) error_at (OMP_CLAUSE_LOCATION (clauses), "multiple loop axes specified for routine"); level = ix; break; } /* Default to SEQ. */ if (level < 0) level = GOMP_DIM_MAX; tree dims = NULL_TREE; for (ix = GOMP_DIM_MAX; ix--;) dims = tree_cons (build_int_cst (boolean_type_node, ix >= level), build_int_cst (integer_type_node, ix < level), dims); return dims; }
tree c_finish_cilk_clauses (tree clauses) { for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { tree prev = clauses; /* If a variable appears in a linear clause it cannot appear in any other OMP clause. */ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR) for (tree c2 = clauses; c2; c2 = OMP_CLAUSE_CHAIN (c2)) { if (c == c2) continue; enum omp_clause_code code = OMP_CLAUSE_CODE (c2); switch (code) { case OMP_CLAUSE_LINEAR: case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: break; case OMP_CLAUSE_SAFELEN: goto next; default: gcc_unreachable (); } if (OMP_CLAUSE_DECL (c) == OMP_CLAUSE_DECL (c2)) { error_at (OMP_CLAUSE_LOCATION (c2), "variable appears in more than one clause"); inform (OMP_CLAUSE_LOCATION (c), "other clause defined here"); // Remove problematic clauses. OMP_CLAUSE_CHAIN (prev) = OMP_CLAUSE_CHAIN (c2); } next: prev = c2; } } return clauses; }
static void unpack_ts_omp_clause_value_fields (struct data_in *data_in, struct bitpack_d *bp, tree expr) { stream_input_location (&OMP_CLAUSE_LOCATION (expr), bp, data_in); switch (OMP_CLAUSE_CODE (expr)) { case OMP_CLAUSE_DEFAULT: OMP_CLAUSE_DEFAULT_KIND (expr) = bp_unpack_enum (bp, omp_clause_default_kind, OMP_CLAUSE_DEFAULT_LAST); break; case OMP_CLAUSE_SCHEDULE: OMP_CLAUSE_SCHEDULE_KIND (expr) = bp_unpack_enum (bp, omp_clause_schedule_kind, OMP_CLAUSE_SCHEDULE_LAST); break; case OMP_CLAUSE_DEPEND: OMP_CLAUSE_DEPEND_KIND (expr) = bp_unpack_enum (bp, omp_clause_depend_kind, OMP_CLAUSE_DEPEND_LAST); break; case OMP_CLAUSE_MAP: OMP_CLAUSE_SET_MAP_KIND (expr, bp_unpack_enum (bp, gomp_map_kind, GOMP_MAP_LAST)); break; case OMP_CLAUSE_PROC_BIND: OMP_CLAUSE_PROC_BIND_KIND (expr) = bp_unpack_enum (bp, omp_clause_proc_bind_kind, OMP_CLAUSE_PROC_BIND_LAST); break; case OMP_CLAUSE_REDUCTION: OMP_CLAUSE_REDUCTION_CODE (expr) = bp_unpack_enum (bp, tree_code, MAX_TREE_CODES); break; default: break; } }
void c_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* Add implicit nowait clause on #pragma omp parallel {for,for simd,sections}. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_DEFAULTMAP: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_NOWAIT: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_FOR; if (code != OMP_SIMD) OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_ALIGNED: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_PRIORITY: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; /* Duplicate this to all of taskloop, distribute, for and simd. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { /* This must be #pragma omp target simd */ s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* Private clause is supported on all constructs, it is enough to put it on the innermost one. For #pragma omp {for,sections} put it on parallel though, as that's what we did for OpenMP 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; default: gcc_unreachable (); } break; /* Firstprivate clause is supported on all constructs but simd. Put it on the outermost of those and duplicate on teams and parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (code == OMP_SIMD && (mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) { /* This must be #pragma omp target simd. */ s = C_OMP_CLAUSE_SPLIT_TARGET; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else /* This must be #pragma omp parallel{, for{, simd}, sections} or #pragma omp target parallel. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* This must be one of #pragma omp {,target }teams distribute #pragma omp target teams #pragma omp {,target }teams distribute simd. */ gcc_assert (code == OMP_DISTRIBUTE || code == OMP_TEAMS || code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be #pragma omp distribute simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { /* This must be #pragma omp taskloop simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else { /* This must be #pragma omp for simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* Lastprivate is allowed on for, sections and simd. In parallel {for{, simd},sections} we actually want to put it on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } gcc_assert (code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN (c) = cclauses[s]; cclauses[s] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Shared and default clauses are allowed on parallel, teams and taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* Reduction is allowed on simd, for, parallel, sections and teams. Duplicate it on all of them, but omit on for or sections if parallel is present. */ case OMP_CLAUSE_REDUCTION: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS || code == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IF: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET) s = C_OMP_CLAUSE_SPLIT_TARGET; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else { error_at (OMP_CLAUSE_LOCATION (clauses), "expected %<parallel%> or %<target%> %<if%> " "clause modifier"); continue; } } else s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_LINEAR: /* Linear clause is allowed on simd and for. Put it on the innermost construct. */ if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_FOR; break; default: gcc_unreachable (); } OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } #ifdef ENABLE_CHECKING if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 && code != OMP_SECTIONS) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); if (code != OMP_SIMD) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); #endif }
void c_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* Add implicit nowait clause on #pragma omp parallel {for,for simd,sections}. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_LINEAR: case OMP_CLAUSE_ALIGNED: s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Duplicate this to all of distribute, for and simd. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* Private clause is supported on all constructs but target, it is enough to put it on the innermost one. For #pragma omp {for,sections} put it on parallel though, as that's what we did for OpenMP 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; default: gcc_unreachable (); } break; /* Firstprivate clause is supported on all constructs but target and simd. Put it on the outermost of those and duplicate on parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else /* This must be #pragma omp parallel{, for{, simd}, sections}. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* This must be #pragma omp {,target }teams distribute. */ gcc_assert (code == OMP_DISTRIBUTE); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be #pragma omp distribute simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else { /* This must be #pragma omp for simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* Lastprivate is allowed on for, sections and simd. In parallel {for{, simd},sections} we actually want to put it on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } gcc_assert (code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN (c) = cclauses[s]; cclauses[s] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Shared and default clauses are allowed on private and teams. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if (code == OMP_TEAMS) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* Reduction is allowed on simd, for, parallel, sections and teams. Duplicate it on all of them, but omit on for or sections if parallel is present. */ case OMP_CLAUSE_REDUCTION: if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IF: /* FIXME: This is currently being discussed. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_TARGET; break; default: gcc_unreachable (); } OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } }