summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog12
-rw-r--r--gcc/c-family/ChangeLog7
-rw-r--r--gcc/c-family/c-omp.c9
-rw-r--r--gcc/gimplify.c9
-rw-r--r--gcc/omp-low.c12
-rw-r--r--gcc/testsuite/ChangeLog6
-rw-r--r--gcc/testsuite/c-c++-common/gomp/pr61486-1.c13
-rw-r--r--gcc/testsuite/c-c++-common/gomp/pr61486-2.c458
8 files changed, 521 insertions, 5 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 47d414190a3..6e8da453ff5 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,15 @@
+2014-06-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/61486
+ * gimplify.c (struct gimplify_omp_ctx): Add distribute field.
+ (gimplify_adjust_omp_clauses): Don't or in GOVD_LASTPRIVATE
+ if outer combined construct is distribute.
+ (gimplify_omp_for): For OMP_DISTRIBUTE set
+ gimplify_omp_ctxp->distribute.
+ * omp-low.c (scan_sharing_clauses) <case OMP_CLAUSE_SHARED>: For
+ GIMPLE_OMP_TEAMS, if decl isn't global in outer context, record
+ mapping into decl map.
+
2014-06-12 Jason Merrill <jason@redhat.com>
* common.opt (fabi-version): Change default to 0.
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index dd91f6b51c1..0348310f094 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,10 @@
+2014-06-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/61486
+ * c-omp.c (c_omp_split_clauses): Don't crash on firstprivate in
+ #pragma omp target teams or
+ #pragma omp {,target }teams distribute simd.
+
2014-06-12 Jason Merrill <jason@redhat.com>
* c.opt (Wabi=, fabi-compat-version): New.
diff --git a/gcc/c-family/c-omp.c b/gcc/c-family/c-omp.c
index dd0a45d968a..6a0e41988a7 100644
--- a/gcc/c-family/c-omp.c
+++ b/gcc/c-family/c-omp.c
@@ -789,8 +789,13 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
- /* This must be #pragma omp {,target }teams distribute. */
- gcc_assert (code == OMP_DISTRIBUTE);
+ /* This must be one of
+ #pragma omp {,target }teams distribute
+ #pragma omp target teams
+ #pragma omp {,target }teams distribute simd. */
+ gcc_assert (code == OMP_DISTRIBUTE
+ || code == OMP_TEAMS
+ || code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_TEAMS;
}
else if ((mask & (OMP_CLAUSE_MASK_1
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 1635b96cd69..10f8ac6d0e0 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -139,6 +139,7 @@ struct gimplify_omp_ctx
enum omp_clause_default_kind default_kind;
enum omp_region_type region_type;
bool combined_loop;
+ bool distribute;
};
static struct gimplify_ctx *gimplify_ctxp;
@@ -6359,7 +6360,11 @@ gimplify_adjust_omp_clauses (tree *list_p)
if (n == NULL
|| (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
- int flags = GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE;
+ int flags = GOVD_FIRSTPRIVATE;
+ /* #pragma omp distribute does not allow
+ lastprivate clause. */
+ if (!ctx->outer_context->distribute)
+ flags |= GOVD_LASTPRIVATE;
if (n == NULL)
omp_add_variable (ctx->outer_context, decl,
flags | GOVD_SEEN);
@@ -6640,6 +6645,8 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
|| TREE_CODE (for_stmt) == CILK_SIMD);
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p,
simd ? ORT_SIMD : ORT_WORKSHARE);
+ if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE)
+ gimplify_omp_ctxp->distribute = true;
/* Handle OMP_FOR_INIT. */
for_pre_body = NULL;
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index ddb049d3ea1..67254cc5bdf 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -1509,11 +1509,19 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
break;
case OMP_CLAUSE_SHARED:
+ decl = OMP_CLAUSE_DECL (c);
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
- break;
+ {
+ /* Global variables don't need to be copied,
+ the receiver side will use them directly. */
+ tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
+ if (is_global_var (odecl))
+ break;
+ insert_decl_map (&ctx->cb, decl, odecl);
+ break;
+ }
gcc_assert (is_taskreg_ctx (ctx));
- decl = OMP_CLAUSE_DECL (c);
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
/* Global variables don't need to be copied,
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 464c823b80d..497b979b1f5 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,9 @@
+2014-06-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/61486
+ * c-c++-common/gomp/pr61486-1.c: New test.
+ * c-c++-common/gomp/pr61486-2.c: New test.
+
2014-06-10 Alan Lawrence <alan.lawrence@arm.com>
PR target/59843
diff --git a/gcc/testsuite/c-c++-common/gomp/pr61486-1.c b/gcc/testsuite/c-c++-common/gomp/pr61486-1.c
new file mode 100644
index 00000000000..9ada58c8ccf
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/pr61486-1.c
@@ -0,0 +1,13 @@
+/* PR middle-end/61486 */
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+int
+foo (int *a)
+{
+ int i, j = 0;
+ #pragma omp target teams distribute simd linear(i, j) map(a[:10])
+ for (i = 0; i < 10; i++)
+ a[i] = j++;
+ return i + j;
+}
diff --git a/gcc/testsuite/c-c++-common/gomp/pr61486-2.c b/gcc/testsuite/c-c++-common/gomp/pr61486-2.c
new file mode 100644
index 00000000000..729438101e2
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/pr61486-2.c
@@ -0,0 +1,458 @@
+/* PR middle-end/61486 */
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+#pragma omp declare target
+void dosomething (int *a, int n, int m);
+#pragma omp end declare target
+
+void
+test (int n, int o, int p, int q, int r, int s, int *pp)
+{
+ int a[o], i, j;
+ #pragma omp target data device (n + 1) if (n != 6) map (tofrom: n, r)
+ {
+ #pragma omp target device (n + 1) if (n != 6) map (from: n) map (alloc: a[2:o-2])
+ dosomething (a, n, 0);
+ #pragma omp target teams device (n + 1) num_teams (n + 4) thread_limit (n * 2) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target teams distribute device (n + 1) num_teams (n + 4) collapse (2) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target teams distribute device (n + 1) num_teams (n + 4) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ #pragma omp ordered
+ p = q;
+ s = i * 10 + j;
+ }
+ #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+ proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ {
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp ordered
+ p = q;
+ s = i * 10;
+ }
+ #pragma omp target teams distribute parallel for simd device (n + 1) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ schedule (static, 8) num_teams (n + 4) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp target teams distribute parallel for simd device (n + 1) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+ proc_bind (master) lastprivate (s) schedule (static, 8) \
+ num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+ #pragma omp target teams distribute simd device (n + 1) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+ lastprivate (s) num_teams (n + 4) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp target teams distribute simd device (n + 1) \
+ if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
+ num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams num_teams (n + 4) thread_limit (n * 2) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute num_teams (n + 4) collapse (2) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute num_teams (n + 4) default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
+ default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ #pragma omp ordered
+ p = q;
+ s = i * 10 + j;
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
+ default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+ proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ {
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp ordered
+ p = q;
+ s = i * 10;
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ schedule (static, 8) num_teams (n + 4) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+ proc_bind (master) lastprivate (s) schedule (static, 8) \
+ num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute simd default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+ lastprivate (s) num_teams (n + 4) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
+ #pragma omp teams distribute simd default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
+ num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2)default(shared) shared(n) \
+ private (p) reduction (+: r)
+ #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2) shared(n) private(p) reduction (+ : r) \
+ default(shared)
+ #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2)
+ #pragma omp distribute parallel for if (n != 6) \
+ default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
+ collapse (2) dist_schedule (static, 4) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ #pragma omp ordered
+ p = q;
+ s = i * 10 + j;
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2)
+ #pragma omp distribute parallel for if (n != 6) \
+ default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
+ num_threads (n + 4) dist_schedule (static, 4) \
+ proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ {
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp ordered
+ p = q;
+ s = i * 10;
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2)
+ #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ collapse (2) dist_schedule (static, 4) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ schedule (static, 8) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2)
+ #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ num_threads (n + 4) dist_schedule (static, 4) \
+ proc_bind (master) lastprivate (s) schedule (static, 8) \
+ safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
+ reduction(+:r)
+ #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+ collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
+ num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
+ reduction(+:r)
+ #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+ lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+ }
+}
+
+int q, i, j;
+
+void
+test2 (int n, int o, int p, int r, int s, int *pp)
+{
+ int a[o];
+ #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp distribute parallel for if (n != 6) \
+ default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
+ collapse (2) dist_schedule (static, 4) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ #pragma omp ordered
+ p = q;
+ s = i * 10 + j;
+ }
+ #pragma omp distribute parallel for if (n != 6) \
+ default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
+ num_threads (n + 4) dist_schedule (static, 4) \
+ proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+ for (i = 0; i < 10; i++)
+ {
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ dosomething (a, n, p + q);
+ }
+ #pragma omp ordered
+ p = q;
+ s = i * 10;
+ }
+ #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ collapse (2) dist_schedule (static, 4) \
+ num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+ schedule (static, 8) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+ private (p) firstprivate (q) shared (n) reduction (+: r) \
+ num_threads (n + 4) dist_schedule (static, 4) \
+ proc_bind (master) lastprivate (s) schedule (static, 8) \
+ safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+ #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+ collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
+ for (i = 0; i < 10; i++)
+ for (j = 0; j < 10; j++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i*10+j] = p + q;
+ s = i * 10 + j;
+ }
+ #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+ lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
+ for (i = 0; i < 10; i++)
+ {
+ r = r + 1;
+ p = q;
+ a[2+i] = p + q;
+ s = i * 10;
+ }
+}